diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:05:51 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:05:51 +0000 |
commit | 5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch) | |
tree | a94efe259b9009378be6d90eb30d2b019d95c194 /drivers/misc | |
parent | Initial commit. (diff) | |
download | linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.tar.xz linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.zip |
Adding upstream version 5.10.209.upstream/5.10.209upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
494 files changed, 263254 insertions, 0 deletions
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig new file mode 100644 index 000000000..fafa8b0d8 --- /dev/null +++ b/drivers/misc/Kconfig @@ -0,0 +1,484 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Misc strange devices +# + +menu "Misc devices" + +config SENSORS_LIS3LV02D + tristate + depends on INPUT + +config AD525X_DPOT + tristate "Analog Devices Digital Potentiometers" + depends on (I2C || SPI) && SYSFS + help + If you say yes here, you get support for the Analog Devices + AD5258, AD5259, AD5251, AD5252, AD5253, AD5254, AD5255 + AD5160, AD5161, AD5162, AD5165, AD5200, AD5201, AD5203, + AD5204, AD5206, AD5207, AD5231, AD5232, AD5233, AD5235, + AD5260, AD5262, AD5263, AD5290, AD5291, AD5292, AD5293, + AD7376, AD8400, AD8402, AD8403, ADN2850, AD5241, AD5242, + AD5243, AD5245, AD5246, AD5247, AD5248, AD5280, AD5282, + ADN2860, AD5273, AD5171, AD5170, AD5172, AD5173, AD5270, + AD5271, AD5272, AD5274 + digital potentiometer chips. + + See Documentation/misc-devices/ad525x_dpot.rst for the + userspace interface. + + This driver can also be built as a module. If so, the module + will be called ad525x_dpot. + +config AD525X_DPOT_I2C + tristate "support I2C bus connection" + depends on AD525X_DPOT && I2C + help + Say Y here if you have a digital potentiometers hooked to an I2C bus. + + To compile this driver as a module, choose M here: the + module will be called ad525x_dpot-i2c. + +config AD525X_DPOT_SPI + tristate "support SPI bus connection" + depends on AD525X_DPOT && SPI_MASTER + help + Say Y here if you have a digital potentiometers hooked to an SPI bus. + + If unsure, say N (but it's safe to say "Y"). + + To compile this driver as a module, choose M here: the + module will be called ad525x_dpot-spi. + +config ATMEL_TCLIB + bool "Atmel AT32/AT91 Timer/Counter Library" + depends on ARCH_AT91 + help + Select this if you want a library to allocate the Timer/Counter + blocks found on many Atmel processors. This facilitates using + these blocks by different drivers despite processor differences. + +config DUMMY_IRQ + tristate "Dummy IRQ handler" + help + This module accepts a single 'irq' parameter, which it should register for. + The sole purpose of this module is to help with debugging of systems on + which spurious IRQs would happen on disabled IRQ vector. + +config IBM_ASM + tristate "Device driver for IBM RSA service processor" + depends on X86 && PCI && INPUT + depends on SERIAL_8250 || SERIAL_8250=n + help + This option enables device driver support for in-band access to the + IBM RSA (Condor) service processor in eServer xSeries systems. + The ibmasm device driver allows user space application to access + ASM (Advanced Systems Management) functions on the service + processor. The driver is meant to be used in conjunction with + a user space API. + The ibmasm driver also enables the OS to use the UART on the + service processor board as a regular serial port. To make use of + this feature serial driver support (CONFIG_SERIAL_8250) must be + enabled. + + WARNING: This software may not be supported or function + correctly on your IBM server. Please consult the IBM ServerProven + website <https://www-03.ibm.com/systems/info/x86servers/serverproven/compat/us/> + for information on the specific driver level and support statement + for your IBM server. + +config IBMVMC + tristate "IBM Virtual Management Channel support" + depends on PPC_PSERIES + help + This is the IBM POWER Virtual Management Channel + + This driver is to be used for the POWER Virtual + Management Channel virtual adapter on the PowerVM + platform. It provides both request/response and + async message support through the /dev/ibmvmc node. + + To compile this driver as a module, choose M here: the + module will be called ibmvmc. + +config PHANTOM + tristate "Sensable PHANToM (PCI)" + depends on PCI + help + Say Y here if you want to build a driver for Sensable PHANToM device. + + This driver is only for PCI PHANToMs. + + If you choose to build module, its name will be phantom. If unsure, + say N here. + +config INTEL_MID_PTI + tristate "Parallel Trace Interface for MIPI P1149.7 cJTAG standard" + depends on PCI && TTY && (X86_INTEL_MID || COMPILE_TEST) + help + The PTI (Parallel Trace Interface) driver directs + trace data routed from various parts in the system out + through an Intel Penwell PTI port and out of the mobile + device for analysis with a debugging tool (Lauterbach or Fido). + + You should select this driver if the target kernel is meant for + an Intel Atom (non-netbook) mobile device containing a MIPI + P1149.7 standard implementation. + +config TIFM_CORE + tristate "TI Flash Media interface support" + depends on PCI + help + If you want support for Texas Instruments(R) Flash Media adapters + you should select this option and then also choose an appropriate + host adapter, such as 'TI Flash Media PCI74xx/PCI76xx host adapter + support', if you have a TI PCI74xx compatible card reader, for + example. + You will also have to select some flash card format drivers. MMC/SD + cards are supported via 'MMC/SD Card support: TI Flash Media MMC/SD + Interface support (MMC_TIFM_SD)'. + + To compile this driver as a module, choose M here: the module will + be called tifm_core. + +config TIFM_7XX1 + tristate "TI Flash Media PCI74xx/PCI76xx host adapter support" + depends on PCI && TIFM_CORE + default TIFM_CORE + help + This option enables support for Texas Instruments(R) PCI74xx and + PCI76xx families of Flash Media adapters, found in many laptops. + To make actual use of the device, you will have to select some + flash card format drivers, as outlined in the TIFM_CORE Help. + + To compile this driver as a module, choose M here: the module will + be called tifm_7xx1. + +config ICS932S401 + tristate "Integrated Circuits ICS932S401" + depends on I2C + help + If you say yes here you get support for the Integrated Circuits + ICS932S401 clock control chips. + + This driver can also be built as a module. If so, the module + will be called ics932s401. + +config ATMEL_SSC + tristate "Device driver for Atmel SSC peripheral" + depends on HAS_IOMEM && (ARCH_AT91 || COMPILE_TEST) + help + This option enables device driver support for Atmel Synchronized + Serial Communication peripheral (SSC). + + The SSC peripheral supports a wide variety of serial frame based + communications, i.e. I2S, SPI, etc. + + If unsure, say N. + +config ENCLOSURE_SERVICES + tristate "Enclosure Services" + help + Provides support for intelligent enclosures (bays which + contain storage devices). You also need either a host + driver (SCSI/ATA) which supports enclosures + or a SCSI enclosure device (SES) to use these services. + +config SGI_XP + tristate "Support communication between SGI SSIs" + depends on NET + depends on (IA64_SGI_UV || X86_UV) && SMP + depends on X86_64 || BROKEN + select SGI_GRU if X86_64 && SMP + help + An SGI machine can be divided into multiple Single System + Images which act independently of each other and have + hardware based memory protection from the others. Enabling + this feature will allow for direct communication between SSIs + based on a network adapter and DMA messaging. + +config CS5535_MFGPT + tristate "CS5535/CS5536 Geode Multi-Function General Purpose Timer (MFGPT) support" + depends on MFD_CS5535 + help + This driver provides access to MFGPT functionality for other + drivers that need timers. MFGPTs are available in the CS5535 and + CS5536 companion chips that are found in AMD Geode and several + other platforms. They have a better resolution and max interval + than the generic PIT, and are suitable for use as high-res timers. + You probably don't want to enable this manually; other drivers that + make use of it should enable it. + +config CS5535_MFGPT_DEFAULT_IRQ + int + depends on CS5535_MFGPT + default 7 + help + MFGPTs on the CS5535 require an interrupt. The selected IRQ + can be overridden as a module option as well as by driver that + use the cs5535_mfgpt_ API; however, different architectures might + want to use a different IRQ by default. This is here for + architectures to set as necessary. + +config CS5535_CLOCK_EVENT_SRC + tristate "CS5535/CS5536 high-res timer (MFGPT) events" + depends on GENERIC_CLOCKEVENTS && CS5535_MFGPT + help + This driver provides a clock event source based on the MFGPT + timer(s) in the CS5535 and CS5536 companion chips. + MFGPTs have a better resolution and max interval than the + generic PIT, and are suitable for use as high-res timers. + +config HP_ILO + tristate "Channel interface driver for the HP iLO processor" + depends on PCI + help + The channel interface driver allows applications to communicate + with iLO management processors present on HP ProLiant servers. + Upon loading, the driver creates /dev/hpilo/dXccbN files, which + can be used to gather data from the management processor, via + read and write system calls. + + To compile this driver as a module, choose M here: the + module will be called hpilo. + +config QCOM_COINCELL + tristate "Qualcomm coincell charger support" + depends on MFD_SPMI_PMIC || COMPILE_TEST + help + This driver supports the coincell block found inside of + Qualcomm PMICs. The coincell charger provides a means to + charge a coincell battery or backup capacitor which is used + to maintain PMIC register and RTC state in the absence of + external power. + +config QCOM_FASTRPC + tristate "Qualcomm FastRPC" + depends on ARCH_QCOM || COMPILE_TEST + depends on RPMSG + select DMA_SHARED_BUFFER + help + Provides a communication mechanism that allows for clients to + make remote method invocations across processor boundary to + applications DSP processor. Say M if you want to enable this + module. + +config SGI_GRU + tristate "SGI GRU driver" + depends on X86_UV && SMP + select MMU_NOTIFIER + help + The GRU is a hardware resource located in the system chipset. The GRU + contains memory that can be mmapped into the user address space. This memory is + used to communicate with the GRU to perform functions such as load/store, + scatter/gather, bcopy, AMOs, etc. The GRU is directly accessed by user + instructions using user virtual addresses. GRU instructions (ex., bcopy) use + user virtual addresses for operands. + + If you are not running on a SGI UV system, say N. + +config SGI_GRU_DEBUG + bool "SGI GRU driver debug" + depends on SGI_GRU + help + This option enables additional debugging code for the SGI GRU driver. + If you are unsure, say N. + +config APDS9802ALS + tristate "Medfield Avago APDS9802 ALS Sensor module" + depends on I2C + help + If you say yes here you get support for the ALS APDS9802 ambient + light sensor. + + This driver can also be built as a module. If so, the module + will be called apds9802als. + +config ISL29003 + tristate "Intersil ISL29003 ambient light sensor" + depends on I2C && SYSFS + help + If you say yes here you get support for the Intersil ISL29003 + ambient light sensor. + + This driver can also be built as a module. If so, the module + will be called isl29003. + +config ISL29020 + tristate "Intersil ISL29020 ambient light sensor" + depends on I2C + help + If you say yes here you get support for the Intersil ISL29020 + ambient light sensor. + + This driver can also be built as a module. If so, the module + will be called isl29020. + +config SENSORS_TSL2550 + tristate "Taos TSL2550 ambient light sensor" + depends on I2C && SYSFS + help + If you say yes here you get support for the Taos TSL2550 + ambient light sensor. + + This driver can also be built as a module. If so, the module + will be called tsl2550. + +config SENSORS_BH1770 + tristate "BH1770GLC / SFH7770 combined ALS - Proximity sensor" + depends on I2C + help + Say Y here if you want to build a driver for BH1770GLC (ROHM) or + SFH7770 (Osram) combined ambient light and proximity sensor chip. + + To compile this driver as a module, choose M here: the + module will be called bh1770glc. If unsure, say N here. + +config SENSORS_APDS990X + tristate "APDS990X combined als and proximity sensors" + depends on I2C + help + Say Y here if you want to build a driver for Avago APDS990x + combined ambient light and proximity sensor chip. + + To compile this driver as a module, choose M here: the + module will be called apds990x. If unsure, say N here. + +config HMC6352 + tristate "Honeywell HMC6352 compass" + depends on I2C + help + This driver provides support for the Honeywell HMC6352 compass, + providing configuration and heading data via sysfs. + +config DS1682 + tristate "Dallas DS1682 Total Elapsed Time Recorder with Alarm" + depends on I2C + help + If you say yes here you get support for Dallas Semiconductor + DS1682 Total Elapsed Time Recorder. + + This driver can also be built as a module. If so, the module + will be called ds1682. + +config VMWARE_BALLOON + tristate "VMware Balloon Driver" + depends on VMWARE_VMCI && X86 && HYPERVISOR_GUEST + select MEMORY_BALLOON + help + This is VMware physical memory management driver which acts + like a "balloon" that can be inflated to reclaim physical pages + by reserving them in the guest and invalidating them in the + monitor, freeing up the underlying machine pages so they can + be allocated to other guests. The balloon can also be deflated + to allow the guest to use more physical memory. + + If unsure, say N. + + To compile this driver as a module, choose M here: the + module will be called vmw_balloon. + +config PCH_PHUB + tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) PHUB" + select GENERIC_NET_UTILS + depends on PCI && (X86_32 || MIPS || COMPILE_TEST) + help + This driver is for PCH(Platform controller Hub) PHUB(Packet Hub) of + Intel Topcliff which is an IOH(Input/Output Hub) for x86 embedded + processor. The Topcliff has MAC address and Option ROM data in SROM. + This driver can access MAC address and Option ROM data in SROM. + + This driver also can be used for LAPIS Semiconductor's IOH, + ML7213/ML7223/ML7831. + ML7213 which is for IVI(In-Vehicle Infotainment) use. + ML7223 IOH is for MP(Media Phone) use. + ML7831 IOH is for general purpose use. + ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series. + ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH. + + To compile this driver as a module, choose M here: the module will + be called pch_phub. + +config LATTICE_ECP3_CONFIG + tristate "Lattice ECP3 FPGA bitstream configuration via SPI" + depends on SPI && SYSFS + select FW_LOADER + default n + help + This option enables support for bitstream configuration (programming + or loading) of the Lattice ECP3 FPGA family via SPI. + + If unsure, say N. + +config SRAM + bool "Generic on-chip SRAM driver" + depends on HAS_IOMEM + select GENERIC_ALLOCATOR + select SRAM_EXEC if ARM + help + This driver allows you to declare a memory region to be managed by + the genalloc API. It is supposed to be used for small on-chip SRAM + areas found on many SoCs. + +config SRAM_EXEC + bool + +config PCI_ENDPOINT_TEST + depends on PCI + select CRC32 + tristate "PCI Endpoint Test driver" + help + Enable this configuration option to enable the host side test driver + for PCI Endpoint. + +config XILINX_SDFEC + tristate "Xilinx SDFEC 16" + depends on HAS_IOMEM + help + This option enables support for the Xilinx SDFEC (Soft Decision + Forward Error Correction) driver. This enables a char driver + for the SDFEC. + + You may select this driver if your design instantiates the + SDFEC(16nm) hardened block. To compile this as a module choose M. + + If unsure, say N. + +config MISC_RTSX + tristate + default MISC_RTSX_PCI || MISC_RTSX_USB + +config PVPANIC + tristate "pvpanic device support" + depends on HAS_IOMEM && (ACPI || OF) + help + This driver provides support for the pvpanic device. pvpanic is + a paravirtualized device provided by QEMU; it lets a virtual machine + (guest) communicate panic events to the host. + +config HISI_HIKEY_USB + tristate "USB GPIO Hub on HiSilicon Hikey 960/970 Platform" + depends on (OF && GPIOLIB) || COMPILE_TEST + depends on USB_ROLE_SWITCH + help + If you say yes here this adds support for the on-board USB GPIO hub + found on HiKey 960/970 boards, which is necessary to support + switching between the dual-role USB-C port and the USB-A host ports + using only one USB controller. + +source "drivers/misc/c2port/Kconfig" +source "drivers/misc/eeprom/Kconfig" +source "drivers/misc/cb710/Kconfig" +source "drivers/misc/ti-st/Kconfig" +source "drivers/misc/lis3lv02d/Kconfig" +source "drivers/misc/altera-stapl/Kconfig" +source "drivers/misc/mei/Kconfig" +source "drivers/misc/vmw_vmci/Kconfig" +source "drivers/misc/genwqe/Kconfig" +source "drivers/misc/echo/Kconfig" +source "drivers/misc/cxl/Kconfig" +source "drivers/misc/ocxl/Kconfig" +source "drivers/misc/cardreader/Kconfig" +source "drivers/misc/habanalabs/Kconfig" +source "drivers/misc/uacce/Kconfig" +endmenu diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile new file mode 100644 index 000000000..d23231e73 --- /dev/null +++ b/drivers/misc/Makefile @@ -0,0 +1,59 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for misc devices that really don't fit anywhere else. +# + +obj-$(CONFIG_IBM_ASM) += ibmasm/ +obj-$(CONFIG_IBMVMC) += ibmvmc.o +obj-$(CONFIG_AD525X_DPOT) += ad525x_dpot.o +obj-$(CONFIG_AD525X_DPOT_I2C) += ad525x_dpot-i2c.o +obj-$(CONFIG_AD525X_DPOT_SPI) += ad525x_dpot-spi.o +obj-$(CONFIG_INTEL_MID_PTI) += pti.o +obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o +obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o +obj-$(CONFIG_DUMMY_IRQ) += dummy-irq.o +obj-$(CONFIG_ICS932S401) += ics932s401.o +obj-$(CONFIG_LKDTM) += lkdtm/ +obj-$(CONFIG_TIFM_CORE) += tifm_core.o +obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o +obj-$(CONFIG_PHANTOM) += phantom.o +obj-$(CONFIG_QCOM_COINCELL) += qcom-coincell.o +obj-$(CONFIG_QCOM_FASTRPC) += fastrpc.o +obj-$(CONFIG_SENSORS_BH1770) += bh1770glc.o +obj-$(CONFIG_SENSORS_APDS990X) += apds990x.o +obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o +obj-$(CONFIG_KGDB_TESTS) += kgdbts.o +obj-$(CONFIG_SGI_XP) += sgi-xp/ +obj-$(CONFIG_SGI_GRU) += sgi-gru/ +obj-$(CONFIG_CS5535_MFGPT) += cs5535-mfgpt.o +obj-$(CONFIG_HP_ILO) += hpilo.o +obj-$(CONFIG_APDS9802ALS) += apds9802als.o +obj-$(CONFIG_ISL29003) += isl29003.o +obj-$(CONFIG_ISL29020) += isl29020.o +obj-$(CONFIG_SENSORS_TSL2550) += tsl2550.o +obj-$(CONFIG_DS1682) += ds1682.o +obj-$(CONFIG_C2PORT) += c2port/ +obj-$(CONFIG_HMC6352) += hmc6352.o +obj-y += eeprom/ +obj-y += cb710/ +obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o +obj-$(CONFIG_PCH_PHUB) += pch_phub.o +obj-y += ti-st/ +obj-y += lis3lv02d/ +obj-$(CONFIG_ALTERA_STAPL) +=altera-stapl/ +obj-$(CONFIG_INTEL_MEI) += mei/ +obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/ +obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o +obj-$(CONFIG_SRAM) += sram.o +obj-$(CONFIG_SRAM_EXEC) += sram-exec.o +obj-$(CONFIG_GENWQE) += genwqe/ +obj-$(CONFIG_ECHO) += echo/ +obj-$(CONFIG_CXL_BASE) += cxl/ +obj-$(CONFIG_PCI_ENDPOINT_TEST) += pci_endpoint_test.o +obj-$(CONFIG_OCXL) += ocxl/ +obj-y += cardreader/ +obj-$(CONFIG_PVPANIC) += pvpanic.o +obj-$(CONFIG_HABANA_AI) += habanalabs/ +obj-$(CONFIG_UACCE) += uacce/ +obj-$(CONFIG_XILINX_SDFEC) += xilinx_sdfec.o +obj-$(CONFIG_HISI_HIKEY_USB) += hisi_hikey_usb.o diff --git a/drivers/misc/ad525x_dpot-i2c.c b/drivers/misc/ad525x_dpot-i2c.c new file mode 100644 index 000000000..bd869ec5e --- /dev/null +++ b/drivers/misc/ad525x_dpot-i2c.c @@ -0,0 +1,118 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Driver for the Analog Devices digital potentiometers (I2C bus) + * + * Copyright (C) 2010-2011 Michael Hennerich, Analog Devices Inc. + */ + +#include <linux/i2c.h> +#include <linux/module.h> + +#include "ad525x_dpot.h" + +/* I2C bus functions */ +static int write_d8(void *client, u8 val) +{ + return i2c_smbus_write_byte(client, val); +} + +static int write_r8d8(void *client, u8 reg, u8 val) +{ + return i2c_smbus_write_byte_data(client, reg, val); +} + +static int write_r8d16(void *client, u8 reg, u16 val) +{ + return i2c_smbus_write_word_data(client, reg, val); +} + +static int read_d8(void *client) +{ + return i2c_smbus_read_byte(client); +} + +static int read_r8d8(void *client, u8 reg) +{ + return i2c_smbus_read_byte_data(client, reg); +} + +static int read_r8d16(void *client, u8 reg) +{ + return i2c_smbus_read_word_data(client, reg); +} + +static const struct ad_dpot_bus_ops bops = { + .read_d8 = read_d8, + .read_r8d8 = read_r8d8, + .read_r8d16 = read_r8d16, + .write_d8 = write_d8, + .write_r8d8 = write_r8d8, + .write_r8d16 = write_r8d16, +}; + +static int ad_dpot_i2c_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct ad_dpot_bus_data bdata = { + .client = client, + .bops = &bops, + }; + + if (!i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_WORD_DATA)) { + dev_err(&client->dev, "SMBUS Word Data not Supported\n"); + return -EIO; + } + + return ad_dpot_probe(&client->dev, &bdata, id->driver_data, id->name); +} + +static int ad_dpot_i2c_remove(struct i2c_client *client) +{ + return ad_dpot_remove(&client->dev); +} + +static const struct i2c_device_id ad_dpot_id[] = { + {"ad5258", AD5258_ID}, + {"ad5259", AD5259_ID}, + {"ad5251", AD5251_ID}, + {"ad5252", AD5252_ID}, + {"ad5253", AD5253_ID}, + {"ad5254", AD5254_ID}, + {"ad5255", AD5255_ID}, + {"ad5241", AD5241_ID}, + {"ad5242", AD5242_ID}, + {"ad5243", AD5243_ID}, + {"ad5245", AD5245_ID}, + {"ad5246", AD5246_ID}, + {"ad5247", AD5247_ID}, + {"ad5248", AD5248_ID}, + {"ad5280", AD5280_ID}, + {"ad5282", AD5282_ID}, + {"adn2860", ADN2860_ID}, + {"ad5273", AD5273_ID}, + {"ad5161", AD5161_ID}, + {"ad5171", AD5171_ID}, + {"ad5170", AD5170_ID}, + {"ad5172", AD5172_ID}, + {"ad5173", AD5173_ID}, + {"ad5272", AD5272_ID}, + {"ad5274", AD5274_ID}, + {} +}; +MODULE_DEVICE_TABLE(i2c, ad_dpot_id); + +static struct i2c_driver ad_dpot_i2c_driver = { + .driver = { + .name = "ad_dpot", + }, + .probe = ad_dpot_i2c_probe, + .remove = ad_dpot_i2c_remove, + .id_table = ad_dpot_id, +}; + +module_i2c_driver(ad_dpot_i2c_driver); + +MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>"); +MODULE_DESCRIPTION("digital potentiometer I2C bus driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/ad525x_dpot-spi.c b/drivers/misc/ad525x_dpot-spi.c new file mode 100644 index 000000000..aea931dd2 --- /dev/null +++ b/drivers/misc/ad525x_dpot-spi.c @@ -0,0 +1,145 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Driver for the Analog Devices digital potentiometers (SPI bus) + * + * Copyright (C) 2010-2011 Michael Hennerich, Analog Devices Inc. + */ + +#include <linux/spi/spi.h> +#include <linux/module.h> + +#include "ad525x_dpot.h" + +/* SPI bus functions */ +static int write8(void *client, u8 val) +{ + u8 data = val; + + return spi_write(client, &data, 1); +} + +static int write16(void *client, u8 reg, u8 val) +{ + u8 data[2] = {reg, val}; + + return spi_write(client, data, 2); +} + +static int write24(void *client, u8 reg, u16 val) +{ + u8 data[3] = {reg, val >> 8, val}; + + return spi_write(client, data, 3); +} + +static int read8(void *client) +{ + int ret; + u8 data; + + ret = spi_read(client, &data, 1); + if (ret < 0) + return ret; + + return data; +} + +static int read16(void *client, u8 reg) +{ + int ret; + u8 buf_rx[2]; + + write16(client, reg, 0); + ret = spi_read(client, buf_rx, 2); + if (ret < 0) + return ret; + + return (buf_rx[0] << 8) | buf_rx[1]; +} + +static int read24(void *client, u8 reg) +{ + int ret; + u8 buf_rx[3]; + + write24(client, reg, 0); + ret = spi_read(client, buf_rx, 3); + if (ret < 0) + return ret; + + return (buf_rx[1] << 8) | buf_rx[2]; +} + +static const struct ad_dpot_bus_ops bops = { + .read_d8 = read8, + .read_r8d8 = read16, + .read_r8d16 = read24, + .write_d8 = write8, + .write_r8d8 = write16, + .write_r8d16 = write24, +}; +static int ad_dpot_spi_probe(struct spi_device *spi) +{ + struct ad_dpot_bus_data bdata = { + .client = spi, + .bops = &bops, + }; + + return ad_dpot_probe(&spi->dev, &bdata, + spi_get_device_id(spi)->driver_data, + spi_get_device_id(spi)->name); +} + +static int ad_dpot_spi_remove(struct spi_device *spi) +{ + return ad_dpot_remove(&spi->dev); +} + +static const struct spi_device_id ad_dpot_spi_id[] = { + {"ad5160", AD5160_ID}, + {"ad5161", AD5161_ID}, + {"ad5162", AD5162_ID}, + {"ad5165", AD5165_ID}, + {"ad5200", AD5200_ID}, + {"ad5201", AD5201_ID}, + {"ad5203", AD5203_ID}, + {"ad5204", AD5204_ID}, + {"ad5206", AD5206_ID}, + {"ad5207", AD5207_ID}, + {"ad5231", AD5231_ID}, + {"ad5232", AD5232_ID}, + {"ad5233", AD5233_ID}, + {"ad5235", AD5235_ID}, + {"ad5260", AD5260_ID}, + {"ad5262", AD5262_ID}, + {"ad5263", AD5263_ID}, + {"ad5290", AD5290_ID}, + {"ad5291", AD5291_ID}, + {"ad5292", AD5292_ID}, + {"ad5293", AD5293_ID}, + {"ad7376", AD7376_ID}, + {"ad8400", AD8400_ID}, + {"ad8402", AD8402_ID}, + {"ad8403", AD8403_ID}, + {"adn2850", ADN2850_ID}, + {"ad5270", AD5270_ID}, + {"ad5271", AD5271_ID}, + {} +}; +MODULE_DEVICE_TABLE(spi, ad_dpot_spi_id); + +static struct spi_driver ad_dpot_spi_driver = { + .driver = { + .name = "ad_dpot", + }, + .probe = ad_dpot_spi_probe, + .remove = ad_dpot_spi_remove, + .id_table = ad_dpot_spi_id, +}; + +module_spi_driver(ad_dpot_spi_driver); + +MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>"); +MODULE_DESCRIPTION("digital potentiometer SPI bus driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("spi:ad_dpot"); diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c new file mode 100644 index 000000000..6f164522b --- /dev/null +++ b/drivers/misc/ad525x_dpot.c @@ -0,0 +1,762 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * ad525x_dpot: Driver for the Analog Devices digital potentiometers + * Copyright (c) 2009-2010 Analog Devices, Inc. + * Author: Michael Hennerich <michael.hennerich@analog.com> + * + * DEVID #Wipers #Positions Resistor Options (kOhm) + * AD5258 1 64 1, 10, 50, 100 + * AD5259 1 256 5, 10, 50, 100 + * AD5251 2 64 1, 10, 50, 100 + * AD5252 2 256 1, 10, 50, 100 + * AD5255 3 512 25, 250 + * AD5253 4 64 1, 10, 50, 100 + * AD5254 4 256 1, 10, 50, 100 + * AD5160 1 256 5, 10, 50, 100 + * AD5161 1 256 5, 10, 50, 100 + * AD5162 2 256 2.5, 10, 50, 100 + * AD5165 1 256 100 + * AD5200 1 256 10, 50 + * AD5201 1 33 10, 50 + * AD5203 4 64 10, 100 + * AD5204 4 256 10, 50, 100 + * AD5206 6 256 10, 50, 100 + * AD5207 2 256 10, 50, 100 + * AD5231 1 1024 10, 50, 100 + * AD5232 2 256 10, 50, 100 + * AD5233 4 64 10, 50, 100 + * AD5235 2 1024 25, 250 + * AD5260 1 256 20, 50, 200 + * AD5262 2 256 20, 50, 200 + * AD5263 4 256 20, 50, 200 + * AD5290 1 256 10, 50, 100 + * AD5291 1 256 20, 50, 100 (20-TP) + * AD5292 1 1024 20, 50, 100 (20-TP) + * AD5293 1 1024 20, 50, 100 + * AD7376 1 128 10, 50, 100, 1M + * AD8400 1 256 1, 10, 50, 100 + * AD8402 2 256 1, 10, 50, 100 + * AD8403 4 256 1, 10, 50, 100 + * ADN2850 3 512 25, 250 + * AD5241 1 256 10, 100, 1M + * AD5246 1 128 5, 10, 50, 100 + * AD5247 1 128 5, 10, 50, 100 + * AD5245 1 256 5, 10, 50, 100 + * AD5243 2 256 2.5, 10, 50, 100 + * AD5248 2 256 2.5, 10, 50, 100 + * AD5242 2 256 20, 50, 200 + * AD5280 1 256 20, 50, 200 + * AD5282 2 256 20, 50, 200 + * ADN2860 3 512 25, 250 + * AD5273 1 64 1, 10, 50, 100 (OTP) + * AD5171 1 64 5, 10, 50, 100 (OTP) + * AD5170 1 256 2.5, 10, 50, 100 (OTP) + * AD5172 2 256 2.5, 10, 50, 100 (OTP) + * AD5173 2 256 2.5, 10, 50, 100 (OTP) + * AD5270 1 1024 20, 50, 100 (50-TP) + * AD5271 1 256 20, 50, 100 (50-TP) + * AD5272 1 1024 20, 50, 100 (50-TP) + * AD5274 1 256 20, 50, 100 (50-TP) + * + * See Documentation/misc-devices/ad525x_dpot.rst for more info. + * + * derived from ad5258.c + * Copyright (c) 2009 Cyber Switching, Inc. + * Author: Chris Verges <chrisv@cyberswitching.com> + * + * derived from ad5252.c + * Copyright (c) 2006-2011 Michael Hennerich <michael.hennerich@analog.com> + */ + +#include <linux/module.h> +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/delay.h> +#include <linux/slab.h> + +#include "ad525x_dpot.h" + +/* + * Client data (each client gets its own) + */ + +struct dpot_data { + struct ad_dpot_bus_data bdata; + struct mutex update_lock; + unsigned int rdac_mask; + unsigned int max_pos; + unsigned long devid; + unsigned int uid; + unsigned int feat; + unsigned int wipers; + u16 rdac_cache[MAX_RDACS]; + DECLARE_BITMAP(otp_en_mask, MAX_RDACS); +}; + +static inline int dpot_read_d8(struct dpot_data *dpot) +{ + return dpot->bdata.bops->read_d8(dpot->bdata.client); +} + +static inline int dpot_read_r8d8(struct dpot_data *dpot, u8 reg) +{ + return dpot->bdata.bops->read_r8d8(dpot->bdata.client, reg); +} + +static inline int dpot_read_r8d16(struct dpot_data *dpot, u8 reg) +{ + return dpot->bdata.bops->read_r8d16(dpot->bdata.client, reg); +} + +static inline int dpot_write_d8(struct dpot_data *dpot, u8 val) +{ + return dpot->bdata.bops->write_d8(dpot->bdata.client, val); +} + +static inline int dpot_write_r8d8(struct dpot_data *dpot, u8 reg, u16 val) +{ + return dpot->bdata.bops->write_r8d8(dpot->bdata.client, reg, val); +} + +static inline int dpot_write_r8d16(struct dpot_data *dpot, u8 reg, u16 val) +{ + return dpot->bdata.bops->write_r8d16(dpot->bdata.client, reg, val); +} + +static s32 dpot_read_spi(struct dpot_data *dpot, u8 reg) +{ + unsigned int ctrl = 0; + int value; + + if (!(reg & (DPOT_ADDR_EEPROM | DPOT_ADDR_CMD))) { + + if (dpot->feat & F_RDACS_WONLY) + return dpot->rdac_cache[reg & DPOT_RDAC_MASK]; + if (dpot->uid == DPOT_UID(AD5291_ID) || + dpot->uid == DPOT_UID(AD5292_ID) || + dpot->uid == DPOT_UID(AD5293_ID)) { + + value = dpot_read_r8d8(dpot, + DPOT_AD5291_READ_RDAC << 2); + + if (dpot->uid == DPOT_UID(AD5291_ID)) + value = value >> 2; + + return value; + } else if (dpot->uid == DPOT_UID(AD5270_ID) || + dpot->uid == DPOT_UID(AD5271_ID)) { + + value = dpot_read_r8d8(dpot, + DPOT_AD5270_1_2_4_READ_RDAC << 2); + + if (value < 0) + return value; + + if (dpot->uid == DPOT_UID(AD5271_ID)) + value = value >> 2; + + return value; + } + + ctrl = DPOT_SPI_READ_RDAC; + } else if (reg & DPOT_ADDR_EEPROM) { + ctrl = DPOT_SPI_READ_EEPROM; + } + + if (dpot->feat & F_SPI_16BIT) + return dpot_read_r8d8(dpot, ctrl); + else if (dpot->feat & F_SPI_24BIT) + return dpot_read_r8d16(dpot, ctrl); + + return -EFAULT; +} + +static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg) +{ + int value; + unsigned int ctrl = 0; + + switch (dpot->uid) { + case DPOT_UID(AD5246_ID): + case DPOT_UID(AD5247_ID): + return dpot_read_d8(dpot); + case DPOT_UID(AD5245_ID): + case DPOT_UID(AD5241_ID): + case DPOT_UID(AD5242_ID): + case DPOT_UID(AD5243_ID): + case DPOT_UID(AD5248_ID): + case DPOT_UID(AD5280_ID): + case DPOT_UID(AD5282_ID): + ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ? + 0 : DPOT_AD5282_RDAC_AB; + return dpot_read_r8d8(dpot, ctrl); + case DPOT_UID(AD5170_ID): + case DPOT_UID(AD5171_ID): + case DPOT_UID(AD5273_ID): + return dpot_read_d8(dpot); + case DPOT_UID(AD5172_ID): + case DPOT_UID(AD5173_ID): + ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ? + 0 : DPOT_AD5172_3_A0; + return dpot_read_r8d8(dpot, ctrl); + case DPOT_UID(AD5272_ID): + case DPOT_UID(AD5274_ID): + dpot_write_r8d8(dpot, + (DPOT_AD5270_1_2_4_READ_RDAC << 2), 0); + + value = dpot_read_r8d16(dpot, DPOT_AD5270_1_2_4_RDAC << 2); + if (value < 0) + return value; + /* + * AD5272/AD5274 returns high byte first, however + * underling smbus expects low byte first. + */ + value = swab16(value); + + if (dpot->uid == DPOT_UID(AD5274_ID)) + value = value >> 2; + return value; + default: + if ((reg & DPOT_REG_TOL) || (dpot->max_pos > 256)) + return dpot_read_r8d16(dpot, (reg & 0xF8) | + ((reg & 0x7) << 1)); + else + return dpot_read_r8d8(dpot, reg); + } +} + +static s32 dpot_read(struct dpot_data *dpot, u8 reg) +{ + if (dpot->feat & F_SPI) + return dpot_read_spi(dpot, reg); + else + return dpot_read_i2c(dpot, reg); +} + +static s32 dpot_write_spi(struct dpot_data *dpot, u8 reg, u16 value) +{ + unsigned int val = 0; + + if (!(reg & (DPOT_ADDR_EEPROM | DPOT_ADDR_CMD | DPOT_ADDR_OTP))) { + if (dpot->feat & F_RDACS_WONLY) + dpot->rdac_cache[reg & DPOT_RDAC_MASK] = value; + + if (dpot->feat & F_AD_APPDATA) { + if (dpot->feat & F_SPI_8BIT) { + val = ((reg & DPOT_RDAC_MASK) << + DPOT_MAX_POS(dpot->devid)) | + value; + return dpot_write_d8(dpot, val); + } else if (dpot->feat & F_SPI_16BIT) { + val = ((reg & DPOT_RDAC_MASK) << + DPOT_MAX_POS(dpot->devid)) | + value; + return dpot_write_r8d8(dpot, val >> 8, + val & 0xFF); + } else + BUG(); + } else { + if (dpot->uid == DPOT_UID(AD5291_ID) || + dpot->uid == DPOT_UID(AD5292_ID) || + dpot->uid == DPOT_UID(AD5293_ID)) { + + dpot_write_r8d8(dpot, DPOT_AD5291_CTRLREG << 2, + DPOT_AD5291_UNLOCK_CMD); + + if (dpot->uid == DPOT_UID(AD5291_ID)) + value = value << 2; + + return dpot_write_r8d8(dpot, + (DPOT_AD5291_RDAC << 2) | + (value >> 8), value & 0xFF); + } else if (dpot->uid == DPOT_UID(AD5270_ID) || + dpot->uid == DPOT_UID(AD5271_ID)) { + dpot_write_r8d8(dpot, + DPOT_AD5270_1_2_4_CTRLREG << 2, + DPOT_AD5270_1_2_4_UNLOCK_CMD); + + if (dpot->uid == DPOT_UID(AD5271_ID)) + value = value << 2; + + return dpot_write_r8d8(dpot, + (DPOT_AD5270_1_2_4_RDAC << 2) | + (value >> 8), value & 0xFF); + } + val = DPOT_SPI_RDAC | (reg & DPOT_RDAC_MASK); + } + } else if (reg & DPOT_ADDR_EEPROM) { + val = DPOT_SPI_EEPROM | (reg & DPOT_RDAC_MASK); + } else if (reg & DPOT_ADDR_CMD) { + switch (reg) { + case DPOT_DEC_ALL_6DB: + val = DPOT_SPI_DEC_ALL_6DB; + break; + case DPOT_INC_ALL_6DB: + val = DPOT_SPI_INC_ALL_6DB; + break; + case DPOT_DEC_ALL: + val = DPOT_SPI_DEC_ALL; + break; + case DPOT_INC_ALL: + val = DPOT_SPI_INC_ALL; + break; + } + } else if (reg & DPOT_ADDR_OTP) { + if (dpot->uid == DPOT_UID(AD5291_ID) || + dpot->uid == DPOT_UID(AD5292_ID)) { + return dpot_write_r8d8(dpot, + DPOT_AD5291_STORE_XTPM << 2, 0); + } else if (dpot->uid == DPOT_UID(AD5270_ID) || + dpot->uid == DPOT_UID(AD5271_ID)) { + return dpot_write_r8d8(dpot, + DPOT_AD5270_1_2_4_STORE_XTPM << 2, 0); + } + } else + BUG(); + + if (dpot->feat & F_SPI_16BIT) + return dpot_write_r8d8(dpot, val, value); + else if (dpot->feat & F_SPI_24BIT) + return dpot_write_r8d16(dpot, val, value); + + return -EFAULT; +} + +static s32 dpot_write_i2c(struct dpot_data *dpot, u8 reg, u16 value) +{ + /* Only write the instruction byte for certain commands */ + unsigned int tmp = 0, ctrl = 0; + + switch (dpot->uid) { + case DPOT_UID(AD5246_ID): + case DPOT_UID(AD5247_ID): + return dpot_write_d8(dpot, value); + + case DPOT_UID(AD5245_ID): + case DPOT_UID(AD5241_ID): + case DPOT_UID(AD5242_ID): + case DPOT_UID(AD5243_ID): + case DPOT_UID(AD5248_ID): + case DPOT_UID(AD5280_ID): + case DPOT_UID(AD5282_ID): + ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ? + 0 : DPOT_AD5282_RDAC_AB; + return dpot_write_r8d8(dpot, ctrl, value); + case DPOT_UID(AD5171_ID): + case DPOT_UID(AD5273_ID): + if (reg & DPOT_ADDR_OTP) { + tmp = dpot_read_d8(dpot); + if (tmp >> 6) /* Ready to Program? */ + return -EFAULT; + ctrl = DPOT_AD5273_FUSE; + } + return dpot_write_r8d8(dpot, ctrl, value); + case DPOT_UID(AD5172_ID): + case DPOT_UID(AD5173_ID): + ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ? + 0 : DPOT_AD5172_3_A0; + if (reg & DPOT_ADDR_OTP) { + tmp = dpot_read_r8d16(dpot, ctrl); + if (tmp >> 14) /* Ready to Program? */ + return -EFAULT; + ctrl |= DPOT_AD5170_2_3_FUSE; + } + return dpot_write_r8d8(dpot, ctrl, value); + case DPOT_UID(AD5170_ID): + if (reg & DPOT_ADDR_OTP) { + tmp = dpot_read_r8d16(dpot, tmp); + if (tmp >> 14) /* Ready to Program? */ + return -EFAULT; + ctrl = DPOT_AD5170_2_3_FUSE; + } + return dpot_write_r8d8(dpot, ctrl, value); + case DPOT_UID(AD5272_ID): + case DPOT_UID(AD5274_ID): + dpot_write_r8d8(dpot, DPOT_AD5270_1_2_4_CTRLREG << 2, + DPOT_AD5270_1_2_4_UNLOCK_CMD); + + if (reg & DPOT_ADDR_OTP) + return dpot_write_r8d8(dpot, + DPOT_AD5270_1_2_4_STORE_XTPM << 2, 0); + + if (dpot->uid == DPOT_UID(AD5274_ID)) + value = value << 2; + + return dpot_write_r8d8(dpot, (DPOT_AD5270_1_2_4_RDAC << 2) | + (value >> 8), value & 0xFF); + default: + if (reg & DPOT_ADDR_CMD) + return dpot_write_d8(dpot, reg); + + if (dpot->max_pos > 256) + return dpot_write_r8d16(dpot, (reg & 0xF8) | + ((reg & 0x7) << 1), value); + else + /* All other registers require instruction + data bytes */ + return dpot_write_r8d8(dpot, reg, value); + } +} + +static s32 dpot_write(struct dpot_data *dpot, u8 reg, u16 value) +{ + if (dpot->feat & F_SPI) + return dpot_write_spi(dpot, reg, value); + else + return dpot_write_i2c(dpot, reg, value); +} + +/* sysfs functions */ + +static ssize_t sysfs_show_reg(struct device *dev, + struct device_attribute *attr, + char *buf, u32 reg) +{ + struct dpot_data *data = dev_get_drvdata(dev); + s32 value; + + if (reg & DPOT_ADDR_OTP_EN) + return sprintf(buf, "%s\n", + test_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask) ? + "enabled" : "disabled"); + + + mutex_lock(&data->update_lock); + value = dpot_read(data, reg); + mutex_unlock(&data->update_lock); + + if (value < 0) + return -EINVAL; + /* + * Let someone else deal with converting this ... + * the tolerance is a two-byte value where the MSB + * is a sign + integer value, and the LSB is a + * decimal value. See page 18 of the AD5258 + * datasheet (Rev. A) for more details. + */ + + if (reg & DPOT_REG_TOL) + return sprintf(buf, "0x%04x\n", value & 0xFFFF); + else + return sprintf(buf, "%u\n", value & data->rdac_mask); +} + +static ssize_t sysfs_set_reg(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count, u32 reg) +{ + struct dpot_data *data = dev_get_drvdata(dev); + unsigned long value; + int err; + + if (reg & DPOT_ADDR_OTP_EN) { + if (sysfs_streq(buf, "enabled")) + set_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask); + else + clear_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask); + + return count; + } + + if ((reg & DPOT_ADDR_OTP) && + !test_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask)) + return -EPERM; + + err = kstrtoul(buf, 10, &value); + if (err) + return err; + + if (value > data->rdac_mask) + value = data->rdac_mask; + + mutex_lock(&data->update_lock); + dpot_write(data, reg, value); + if (reg & DPOT_ADDR_EEPROM) + msleep(26); /* Sleep while the EEPROM updates */ + else if (reg & DPOT_ADDR_OTP) + msleep(400); /* Sleep while the OTP updates */ + mutex_unlock(&data->update_lock); + + return count; +} + +static ssize_t sysfs_do_cmd(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count, u32 reg) +{ + struct dpot_data *data = dev_get_drvdata(dev); + + mutex_lock(&data->update_lock); + dpot_write(data, reg, 0); + mutex_unlock(&data->update_lock); + + return count; +} + +/* ------------------------------------------------------------------------- */ + +#define DPOT_DEVICE_SHOW(_name, _reg) static ssize_t \ +show_##_name(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + return sysfs_show_reg(dev, attr, buf, _reg); \ +} + +#define DPOT_DEVICE_SET(_name, _reg) static ssize_t \ +set_##_name(struct device *dev, \ + struct device_attribute *attr, \ + const char *buf, size_t count) \ +{ \ + return sysfs_set_reg(dev, attr, buf, count, _reg); \ +} + +#define DPOT_DEVICE_SHOW_SET(name, reg) \ +DPOT_DEVICE_SHOW(name, reg) \ +DPOT_DEVICE_SET(name, reg) \ +static DEVICE_ATTR(name, S_IWUSR | S_IRUGO, show_##name, set_##name) + +#define DPOT_DEVICE_SHOW_ONLY(name, reg) \ +DPOT_DEVICE_SHOW(name, reg) \ +static DEVICE_ATTR(name, S_IWUSR | S_IRUGO, show_##name, NULL) + +DPOT_DEVICE_SHOW_SET(rdac0, DPOT_ADDR_RDAC | DPOT_RDAC0); +DPOT_DEVICE_SHOW_SET(eeprom0, DPOT_ADDR_EEPROM | DPOT_RDAC0); +DPOT_DEVICE_SHOW_ONLY(tolerance0, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC0); +DPOT_DEVICE_SHOW_SET(otp0, DPOT_ADDR_OTP | DPOT_RDAC0); +DPOT_DEVICE_SHOW_SET(otp0en, DPOT_ADDR_OTP_EN | DPOT_RDAC0); + +DPOT_DEVICE_SHOW_SET(rdac1, DPOT_ADDR_RDAC | DPOT_RDAC1); +DPOT_DEVICE_SHOW_SET(eeprom1, DPOT_ADDR_EEPROM | DPOT_RDAC1); +DPOT_DEVICE_SHOW_ONLY(tolerance1, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC1); +DPOT_DEVICE_SHOW_SET(otp1, DPOT_ADDR_OTP | DPOT_RDAC1); +DPOT_DEVICE_SHOW_SET(otp1en, DPOT_ADDR_OTP_EN | DPOT_RDAC1); + +DPOT_DEVICE_SHOW_SET(rdac2, DPOT_ADDR_RDAC | DPOT_RDAC2); +DPOT_DEVICE_SHOW_SET(eeprom2, DPOT_ADDR_EEPROM | DPOT_RDAC2); +DPOT_DEVICE_SHOW_ONLY(tolerance2, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC2); +DPOT_DEVICE_SHOW_SET(otp2, DPOT_ADDR_OTP | DPOT_RDAC2); +DPOT_DEVICE_SHOW_SET(otp2en, DPOT_ADDR_OTP_EN | DPOT_RDAC2); + +DPOT_DEVICE_SHOW_SET(rdac3, DPOT_ADDR_RDAC | DPOT_RDAC3); +DPOT_DEVICE_SHOW_SET(eeprom3, DPOT_ADDR_EEPROM | DPOT_RDAC3); +DPOT_DEVICE_SHOW_ONLY(tolerance3, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC3); +DPOT_DEVICE_SHOW_SET(otp3, DPOT_ADDR_OTP | DPOT_RDAC3); +DPOT_DEVICE_SHOW_SET(otp3en, DPOT_ADDR_OTP_EN | DPOT_RDAC3); + +DPOT_DEVICE_SHOW_SET(rdac4, DPOT_ADDR_RDAC | DPOT_RDAC4); +DPOT_DEVICE_SHOW_SET(eeprom4, DPOT_ADDR_EEPROM | DPOT_RDAC4); +DPOT_DEVICE_SHOW_ONLY(tolerance4, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC4); +DPOT_DEVICE_SHOW_SET(otp4, DPOT_ADDR_OTP | DPOT_RDAC4); +DPOT_DEVICE_SHOW_SET(otp4en, DPOT_ADDR_OTP_EN | DPOT_RDAC4); + +DPOT_DEVICE_SHOW_SET(rdac5, DPOT_ADDR_RDAC | DPOT_RDAC5); +DPOT_DEVICE_SHOW_SET(eeprom5, DPOT_ADDR_EEPROM | DPOT_RDAC5); +DPOT_DEVICE_SHOW_ONLY(tolerance5, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC5); +DPOT_DEVICE_SHOW_SET(otp5, DPOT_ADDR_OTP | DPOT_RDAC5); +DPOT_DEVICE_SHOW_SET(otp5en, DPOT_ADDR_OTP_EN | DPOT_RDAC5); + +static const struct attribute *dpot_attrib_wipers[] = { + &dev_attr_rdac0.attr, + &dev_attr_rdac1.attr, + &dev_attr_rdac2.attr, + &dev_attr_rdac3.attr, + &dev_attr_rdac4.attr, + &dev_attr_rdac5.attr, + NULL +}; + +static const struct attribute *dpot_attrib_eeprom[] = { + &dev_attr_eeprom0.attr, + &dev_attr_eeprom1.attr, + &dev_attr_eeprom2.attr, + &dev_attr_eeprom3.attr, + &dev_attr_eeprom4.attr, + &dev_attr_eeprom5.attr, + NULL +}; + +static const struct attribute *dpot_attrib_otp[] = { + &dev_attr_otp0.attr, + &dev_attr_otp1.attr, + &dev_attr_otp2.attr, + &dev_attr_otp3.attr, + &dev_attr_otp4.attr, + &dev_attr_otp5.attr, + NULL +}; + +static const struct attribute *dpot_attrib_otp_en[] = { + &dev_attr_otp0en.attr, + &dev_attr_otp1en.attr, + &dev_attr_otp2en.attr, + &dev_attr_otp3en.attr, + &dev_attr_otp4en.attr, + &dev_attr_otp5en.attr, + NULL +}; + +static const struct attribute *dpot_attrib_tolerance[] = { + &dev_attr_tolerance0.attr, + &dev_attr_tolerance1.attr, + &dev_attr_tolerance2.attr, + &dev_attr_tolerance3.attr, + &dev_attr_tolerance4.attr, + &dev_attr_tolerance5.attr, + NULL +}; + +/* ------------------------------------------------------------------------- */ + +#define DPOT_DEVICE_DO_CMD(_name, _cmd) static ssize_t \ +set_##_name(struct device *dev, \ + struct device_attribute *attr, \ + const char *buf, size_t count) \ +{ \ + return sysfs_do_cmd(dev, attr, buf, count, _cmd); \ +} \ +static DEVICE_ATTR(_name, S_IWUSR | S_IRUGO, NULL, set_##_name) + +DPOT_DEVICE_DO_CMD(inc_all, DPOT_INC_ALL); +DPOT_DEVICE_DO_CMD(dec_all, DPOT_DEC_ALL); +DPOT_DEVICE_DO_CMD(inc_all_6db, DPOT_INC_ALL_6DB); +DPOT_DEVICE_DO_CMD(dec_all_6db, DPOT_DEC_ALL_6DB); + +static struct attribute *ad525x_attributes_commands[] = { + &dev_attr_inc_all.attr, + &dev_attr_dec_all.attr, + &dev_attr_inc_all_6db.attr, + &dev_attr_dec_all_6db.attr, + NULL +}; + +static const struct attribute_group ad525x_group_commands = { + .attrs = ad525x_attributes_commands, +}; + +static int ad_dpot_add_files(struct device *dev, + unsigned int features, unsigned int rdac) +{ + int err = sysfs_create_file(&dev->kobj, + dpot_attrib_wipers[rdac]); + if (features & F_CMD_EEP) + err |= sysfs_create_file(&dev->kobj, + dpot_attrib_eeprom[rdac]); + if (features & F_CMD_TOL) + err |= sysfs_create_file(&dev->kobj, + dpot_attrib_tolerance[rdac]); + if (features & F_CMD_OTP) { + err |= sysfs_create_file(&dev->kobj, + dpot_attrib_otp_en[rdac]); + err |= sysfs_create_file(&dev->kobj, + dpot_attrib_otp[rdac]); + } + + if (err) + dev_err(dev, "failed to register sysfs hooks for RDAC%d\n", + rdac); + + return err; +} + +static inline void ad_dpot_remove_files(struct device *dev, + unsigned int features, unsigned int rdac) +{ + sysfs_remove_file(&dev->kobj, + dpot_attrib_wipers[rdac]); + if (features & F_CMD_EEP) + sysfs_remove_file(&dev->kobj, + dpot_attrib_eeprom[rdac]); + if (features & F_CMD_TOL) + sysfs_remove_file(&dev->kobj, + dpot_attrib_tolerance[rdac]); + if (features & F_CMD_OTP) { + sysfs_remove_file(&dev->kobj, + dpot_attrib_otp_en[rdac]); + sysfs_remove_file(&dev->kobj, + dpot_attrib_otp[rdac]); + } +} + +int ad_dpot_probe(struct device *dev, + struct ad_dpot_bus_data *bdata, unsigned long devid, + const char *name) +{ + + struct dpot_data *data; + int i, err = 0; + + data = kzalloc(sizeof(struct dpot_data), GFP_KERNEL); + if (!data) { + err = -ENOMEM; + goto exit; + } + + dev_set_drvdata(dev, data); + mutex_init(&data->update_lock); + + data->bdata = *bdata; + data->devid = devid; + + data->max_pos = 1 << DPOT_MAX_POS(devid); + data->rdac_mask = data->max_pos - 1; + data->feat = DPOT_FEAT(devid); + data->uid = DPOT_UID(devid); + data->wipers = DPOT_WIPERS(devid); + + for (i = DPOT_RDAC0; i < MAX_RDACS; i++) + if (data->wipers & (1 << i)) { + err = ad_dpot_add_files(dev, data->feat, i); + if (err) + goto exit_remove_files; + /* power-up midscale */ + if (data->feat & F_RDACS_WONLY) + data->rdac_cache[i] = data->max_pos / 2; + } + + if (data->feat & F_CMD_INC) + err = sysfs_create_group(&dev->kobj, &ad525x_group_commands); + + if (err) { + dev_err(dev, "failed to register sysfs hooks\n"); + goto exit_free; + } + + dev_info(dev, "%s %d-Position Digital Potentiometer registered\n", + name, data->max_pos); + + return 0; + +exit_remove_files: + for (i = DPOT_RDAC0; i < MAX_RDACS; i++) + if (data->wipers & (1 << i)) + ad_dpot_remove_files(dev, data->feat, i); + +exit_free: + kfree(data); + dev_set_drvdata(dev, NULL); +exit: + dev_err(dev, "failed to create client for %s ID 0x%lX\n", + name, devid); + return err; +} +EXPORT_SYMBOL(ad_dpot_probe); + +int ad_dpot_remove(struct device *dev) +{ + struct dpot_data *data = dev_get_drvdata(dev); + int i; + + for (i = DPOT_RDAC0; i < MAX_RDACS; i++) + if (data->wipers & (1 << i)) + ad_dpot_remove_files(dev, data->feat, i); + + kfree(data); + + return 0; +} +EXPORT_SYMBOL(ad_dpot_remove); + + +MODULE_AUTHOR("Chris Verges <chrisv@cyberswitching.com>, " + "Michael Hennerich <michael.hennerich@analog.com>"); +MODULE_DESCRIPTION("Digital potentiometer driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/ad525x_dpot.h b/drivers/misc/ad525x_dpot.h new file mode 100644 index 000000000..ee8dc9f5a --- /dev/null +++ b/drivers/misc/ad525x_dpot.h @@ -0,0 +1,214 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Driver for the Analog Devices digital potentiometers + * + * Copyright (C) 2010 Michael Hennerich, Analog Devices Inc. + */ + +#ifndef _AD_DPOT_H_ +#define _AD_DPOT_H_ + +#include <linux/types.h> + +#define DPOT_CONF(features, wipers, max_pos, uid) \ + (((features) << 18) | (((wipers) & 0xFF) << 10) | \ + ((max_pos & 0xF) << 6) | (uid & 0x3F)) + +#define DPOT_UID(conf) (conf & 0x3F) +#define DPOT_MAX_POS(conf) ((conf >> 6) & 0xF) +#define DPOT_WIPERS(conf) ((conf >> 10) & 0xFF) +#define DPOT_FEAT(conf) (conf >> 18) + +#define BRDAC0 (1 << 0) +#define BRDAC1 (1 << 1) +#define BRDAC2 (1 << 2) +#define BRDAC3 (1 << 3) +#define BRDAC4 (1 << 4) +#define BRDAC5 (1 << 5) +#define MAX_RDACS 6 + +#define F_CMD_INC (1 << 0) /* Features INC/DEC ALL, 6dB */ +#define F_CMD_EEP (1 << 1) /* Features EEPROM */ +#define F_CMD_OTP (1 << 2) /* Features OTP */ +#define F_CMD_TOL (1 << 3) /* RDACS feature Tolerance REG */ +#define F_RDACS_RW (1 << 4) /* RDACS are Read/Write */ +#define F_RDACS_WONLY (1 << 5) /* RDACS are Write only */ +#define F_AD_APPDATA (1 << 6) /* RDAC Address append to data */ +#define F_SPI_8BIT (1 << 7) /* All SPI XFERS are 8-bit */ +#define F_SPI_16BIT (1 << 8) /* All SPI XFERS are 16-bit */ +#define F_SPI_24BIT (1 << 9) /* All SPI XFERS are 24-bit */ + +#define F_RDACS_RW_TOL (F_RDACS_RW | F_CMD_EEP | F_CMD_TOL) +#define F_RDACS_RW_EEP (F_RDACS_RW | F_CMD_EEP) +#define F_SPI (F_SPI_8BIT | F_SPI_16BIT | F_SPI_24BIT) + +enum dpot_devid { + AD5258_ID = DPOT_CONF(F_RDACS_RW_TOL, BRDAC0, 6, 0), /* I2C */ + AD5259_ID = DPOT_CONF(F_RDACS_RW_TOL, BRDAC0, 8, 1), + AD5251_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC, + BRDAC1 | BRDAC3, 6, 2), + AD5252_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC, + BRDAC1 | BRDAC3, 8, 3), + AD5253_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC, + BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 6, 4), + AD5254_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC, + BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 8, 5), + AD5255_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC, + BRDAC0 | BRDAC1 | BRDAC2, 9, 6), + AD5160_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT, + BRDAC0, 8, 7), /* SPI */ + AD5161_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT, + BRDAC0, 8, 8), + AD5162_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT, + BRDAC0 | BRDAC1, 8, 9), + AD5165_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT, + BRDAC0, 8, 10), + AD5200_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT, + BRDAC0, 8, 11), + AD5201_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT, + BRDAC0, 5, 12), + AD5203_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT, + BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 6, 13), + AD5204_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT, + BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 8, 14), + AD5206_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT, + BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3 | BRDAC4 | BRDAC5, + 8, 15), + AD5207_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT, + BRDAC0 | BRDAC1, 8, 16), + AD5231_ID = DPOT_CONF(F_RDACS_RW_EEP | F_CMD_INC | F_SPI_24BIT, + BRDAC0, 10, 17), + AD5232_ID = DPOT_CONF(F_RDACS_RW_EEP | F_CMD_INC | F_SPI_16BIT, + BRDAC0 | BRDAC1, 8, 18), + AD5233_ID = DPOT_CONF(F_RDACS_RW_EEP | F_CMD_INC | F_SPI_16BIT, + BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 6, 19), + AD5235_ID = DPOT_CONF(F_RDACS_RW_EEP | F_CMD_INC | F_SPI_24BIT, + BRDAC0 | BRDAC1, 10, 20), + AD5260_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT, + BRDAC0, 8, 21), + AD5262_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT, + BRDAC0 | BRDAC1, 8, 22), + AD5263_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT, + BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 8, 23), + AD5290_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT, + BRDAC0, 8, 24), + AD5291_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT | F_CMD_OTP, + BRDAC0, 8, 25), + AD5292_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT | F_CMD_OTP, + BRDAC0, 10, 26), + AD5293_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT, BRDAC0, 10, 27), + AD7376_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT, + BRDAC0, 7, 28), + AD8400_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT, + BRDAC0, 8, 29), + AD8402_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT, + BRDAC0 | BRDAC1, 8, 30), + AD8403_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT, + BRDAC0 | BRDAC1 | BRDAC2, 8, 31), + ADN2850_ID = DPOT_CONF(F_RDACS_RW_EEP | F_CMD_INC | F_SPI_24BIT, + BRDAC0 | BRDAC1, 10, 32), + AD5241_ID = DPOT_CONF(F_RDACS_RW, BRDAC0, 8, 33), + AD5242_ID = DPOT_CONF(F_RDACS_RW, BRDAC0 | BRDAC1, 8, 34), + AD5243_ID = DPOT_CONF(F_RDACS_RW, BRDAC0 | BRDAC1, 8, 35), + AD5245_ID = DPOT_CONF(F_RDACS_RW, BRDAC0, 8, 36), + AD5246_ID = DPOT_CONF(F_RDACS_RW, BRDAC0, 7, 37), + AD5247_ID = DPOT_CONF(F_RDACS_RW, BRDAC0, 7, 38), + AD5248_ID = DPOT_CONF(F_RDACS_RW, BRDAC0 | BRDAC1, 8, 39), + AD5280_ID = DPOT_CONF(F_RDACS_RW, BRDAC0, 8, 40), + AD5282_ID = DPOT_CONF(F_RDACS_RW, BRDAC0 | BRDAC1, 8, 41), + ADN2860_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC, + BRDAC0 | BRDAC1 | BRDAC2, 9, 42), + AD5273_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0, 6, 43), + AD5171_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0, 6, 44), + AD5170_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0, 8, 45), + AD5172_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0 | BRDAC1, 8, 46), + AD5173_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0 | BRDAC1, 8, 47), + AD5270_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP | F_SPI_16BIT, + BRDAC0, 10, 48), + AD5271_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP | F_SPI_16BIT, + BRDAC0, 8, 49), + AD5272_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0, 10, 50), + AD5274_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0, 8, 51), +}; + +#define DPOT_RDAC0 0 +#define DPOT_RDAC1 1 +#define DPOT_RDAC2 2 +#define DPOT_RDAC3 3 +#define DPOT_RDAC4 4 +#define DPOT_RDAC5 5 + +#define DPOT_RDAC_MASK 0x1F + +#define DPOT_REG_TOL 0x18 +#define DPOT_TOL_RDAC0 (DPOT_REG_TOL | DPOT_RDAC0) +#define DPOT_TOL_RDAC1 (DPOT_REG_TOL | DPOT_RDAC1) +#define DPOT_TOL_RDAC2 (DPOT_REG_TOL | DPOT_RDAC2) +#define DPOT_TOL_RDAC3 (DPOT_REG_TOL | DPOT_RDAC3) +#define DPOT_TOL_RDAC4 (DPOT_REG_TOL | DPOT_RDAC4) +#define DPOT_TOL_RDAC5 (DPOT_REG_TOL | DPOT_RDAC5) + +/* RDAC-to-EEPROM Interface Commands */ +#define DPOT_ADDR_RDAC (0x0 << 5) +#define DPOT_ADDR_EEPROM (0x1 << 5) +#define DPOT_ADDR_OTP (0x1 << 6) +#define DPOT_ADDR_CMD (0x1 << 7) +#define DPOT_ADDR_OTP_EN (0x1 << 9) + +#define DPOT_DEC_ALL_6DB (DPOT_ADDR_CMD | (0x4 << 3)) +#define DPOT_INC_ALL_6DB (DPOT_ADDR_CMD | (0x9 << 3)) +#define DPOT_DEC_ALL (DPOT_ADDR_CMD | (0x6 << 3)) +#define DPOT_INC_ALL (DPOT_ADDR_CMD | (0xB << 3)) + +#define DPOT_SPI_RDAC 0xB0 +#define DPOT_SPI_EEPROM 0x30 +#define DPOT_SPI_READ_RDAC 0xA0 +#define DPOT_SPI_READ_EEPROM 0x90 +#define DPOT_SPI_DEC_ALL_6DB 0x50 +#define DPOT_SPI_INC_ALL_6DB 0xD0 +#define DPOT_SPI_DEC_ALL 0x70 +#define DPOT_SPI_INC_ALL 0xF0 + +/* AD5291/2/3 use special commands */ +#define DPOT_AD5291_RDAC 0x01 +#define DPOT_AD5291_READ_RDAC 0x02 +#define DPOT_AD5291_STORE_XTPM 0x03 +#define DPOT_AD5291_CTRLREG 0x06 +#define DPOT_AD5291_UNLOCK_CMD 0x03 + +/* AD5270/1/2/4 use special commands */ +#define DPOT_AD5270_1_2_4_RDAC 0x01 +#define DPOT_AD5270_1_2_4_READ_RDAC 0x02 +#define DPOT_AD5270_1_2_4_STORE_XTPM 0x03 +#define DPOT_AD5270_1_2_4_CTRLREG 0x07 +#define DPOT_AD5270_1_2_4_UNLOCK_CMD 0x03 + +#define DPOT_AD5282_RDAC_AB 0x80 + +#define DPOT_AD5273_FUSE 0x80 +#define DPOT_AD5170_2_3_FUSE 0x20 +#define DPOT_AD5170_2_3_OW 0x08 +#define DPOT_AD5172_3_A0 0x08 +#define DPOT_AD5170_2FUSE 0x80 + +struct dpot_data; + +struct ad_dpot_bus_ops { + int (*read_d8)(void *client); + int (*read_r8d8)(void *client, u8 reg); + int (*read_r8d16)(void *client, u8 reg); + int (*write_d8)(void *client, u8 val); + int (*write_r8d8)(void *client, u8 reg, u8 val); + int (*write_r8d16)(void *client, u8 reg, u16 val); +}; + +struct ad_dpot_bus_data { + void *client; + const struct ad_dpot_bus_ops *bops; +}; + +int ad_dpot_probe(struct device *dev, struct ad_dpot_bus_data *bdata, + unsigned long devid, const char *name); +int ad_dpot_remove(struct device *dev); + +#endif diff --git a/drivers/misc/altera-stapl/Kconfig b/drivers/misc/altera-stapl/Kconfig new file mode 100644 index 000000000..6c4c6575e --- /dev/null +++ b/drivers/misc/altera-stapl/Kconfig @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0-only +comment "Altera FPGA firmware download module (requires I2C)" + depends on !I2C + +config ALTERA_STAPL + tristate "Altera FPGA firmware download module" + depends on I2C + help + An Altera FPGA module. Say Y when you want to support this tool. diff --git a/drivers/misc/altera-stapl/Makefile b/drivers/misc/altera-stapl/Makefile new file mode 100644 index 000000000..dd0f81896 --- /dev/null +++ b/drivers/misc/altera-stapl/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only +altera-stapl-objs = altera-lpt.o altera-jtag.o altera-comp.o altera.o + +obj-$(CONFIG_ALTERA_STAPL) += altera-stapl.o diff --git a/drivers/misc/altera-stapl/altera-comp.c b/drivers/misc/altera-stapl/altera-comp.c new file mode 100644 index 000000000..4a63f51cc --- /dev/null +++ b/drivers/misc/altera-stapl/altera-comp.c @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * altera-comp.c + * + * altera FPGA driver + * + * Copyright (C) Altera Corporation 1998-2001 + * Copyright (C) 2010 NetUP Inc. + * Copyright (C) 2010 Igor M. Liplianin <liplianin@netup.ru> + */ + +#include <linux/kernel.h> +#include "altera-exprt.h" + +#define SHORT_BITS 16 +#define CHAR_BITS 8 +#define DATA_BLOB_LENGTH 3 +#define MATCH_DATA_LENGTH 8192 +#define ALTERA_REQUEST_SIZE 1024 +#define ALTERA_BUFFER_SIZE (MATCH_DATA_LENGTH + ALTERA_REQUEST_SIZE) + +static u32 altera_bits_req(u32 n) +{ + u32 result = SHORT_BITS; + + if (n == 0) + result = 1; + else { + /* Look for the highest non-zero bit position */ + while ((n & (1 << (SHORT_BITS - 1))) == 0) { + n <<= 1; + --result; + } + } + + return result; +} + +static u32 altera_read_packed(u8 *buffer, u32 bits, u32 *bits_avail, + u32 *in_index) +{ + u32 result = 0; + u32 shift = 0; + u32 databyte = 0; + + while (bits > 0) { + databyte = buffer[*in_index]; + result |= (((databyte >> (CHAR_BITS - *bits_avail)) + & (0xff >> (CHAR_BITS - *bits_avail))) << shift); + + if (bits <= *bits_avail) { + result &= (0xffff >> (SHORT_BITS - (bits + shift))); + *bits_avail -= bits; + bits = 0; + } else { + ++(*in_index); + shift += *bits_avail; + bits -= *bits_avail; + *bits_avail = CHAR_BITS; + } + } + + return result; +} + +u32 altera_shrink(u8 *in, u32 in_length, u8 *out, u32 out_length, s32 version) +{ + u32 i, j, data_length = 0L; + u32 offset, length; + u32 match_data_length = MATCH_DATA_LENGTH; + u32 bits_avail = CHAR_BITS; + u32 in_index = 0L; + + if (version > 0) + --match_data_length; + + for (i = 0; i < out_length; ++i) + out[i] = 0; + + /* Read number of bytes in data. */ + for (i = 0; i < sizeof(in_length); ++i) { + data_length = data_length | ( + altera_read_packed(in, + CHAR_BITS, + &bits_avail, + &in_index) << (i * CHAR_BITS)); + } + + if (data_length > out_length) { + data_length = 0L; + return data_length; + } + + i = 0; + while (i < data_length) { + /* A 0 bit indicates literal data. */ + if (altera_read_packed(in, 1, &bits_avail, + &in_index) == 0) { + for (j = 0; j < DATA_BLOB_LENGTH; ++j) { + if (i < data_length) { + out[i] = (u8)altera_read_packed(in, + CHAR_BITS, + &bits_avail, + &in_index); + i++; + } + } + } else { + /* A 1 bit indicates offset/length to follow. */ + offset = altera_read_packed(in, altera_bits_req((s16) + (i > match_data_length ? + match_data_length : i)), + &bits_avail, + &in_index); + length = altera_read_packed(in, CHAR_BITS, + &bits_avail, + &in_index); + for (j = 0; j < length; ++j) { + if (i < data_length) { + out[i] = out[i - offset]; + i++; + } + } + } + } + + return data_length; +} diff --git a/drivers/misc/altera-stapl/altera-exprt.h b/drivers/misc/altera-stapl/altera-exprt.h new file mode 100644 index 000000000..6a8b696ce --- /dev/null +++ b/drivers/misc/altera-stapl/altera-exprt.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * altera-exprt.h + * + * altera FPGA driver + * + * Copyright (C) Altera Corporation 1998-2001 + * Copyright (C) 2010 NetUP Inc. + * Copyright (C) 2010 Igor M. Liplianin <liplianin@netup.ru> + */ + +#ifndef ALTERA_EXPRT_H +#define ALTERA_EXPRT_H + + +u32 altera_shrink(u8 *in, u32 in_length, u8 *out, u32 out_length, s32 version); +int netup_jtag_io_lpt(void *device, int tms, int tdi, int read_tdo); + +#endif /* ALTERA_EXPRT_H */ diff --git a/drivers/misc/altera-stapl/altera-jtag.c b/drivers/misc/altera-stapl/altera-jtag.c new file mode 100644 index 000000000..27e8e0c9e --- /dev/null +++ b/drivers/misc/altera-stapl/altera-jtag.c @@ -0,0 +1,1007 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * altera-jtag.c + * + * altera FPGA driver + * + * Copyright (C) Altera Corporation 1998-2001 + * Copyright (C) 2010 NetUP Inc. + * Copyright (C) 2010 Igor M. Liplianin <liplianin@netup.ru> + */ + +#include <linux/delay.h> +#include <linux/firmware.h> +#include <linux/slab.h> +#include <misc/altera.h> +#include "altera-exprt.h" +#include "altera-jtag.h" + +#define alt_jtag_io(a, b, c)\ + astate->config->jtag_io(astate->config->dev, a, b, c); + +#define alt_malloc(a) kzalloc(a, GFP_KERNEL); + +/* + * This structure shows, for each JTAG state, which state is reached after + * a single TCK clock cycle with TMS high or TMS low, respectively. This + * describes all possible state transitions in the JTAG state machine. + */ +struct altera_jtag_machine { + enum altera_jtag_state tms_high; + enum altera_jtag_state tms_low; +}; + +static const struct altera_jtag_machine altera_transitions[] = { + /* RESET */ { RESET, IDLE }, + /* IDLE */ { DRSELECT, IDLE }, + /* DRSELECT */ { IRSELECT, DRCAPTURE }, + /* DRCAPTURE */ { DREXIT1, DRSHIFT }, + /* DRSHIFT */ { DREXIT1, DRSHIFT }, + /* DREXIT1 */ { DRUPDATE, DRPAUSE }, + /* DRPAUSE */ { DREXIT2, DRPAUSE }, + /* DREXIT2 */ { DRUPDATE, DRSHIFT }, + /* DRUPDATE */ { DRSELECT, IDLE }, + /* IRSELECT */ { RESET, IRCAPTURE }, + /* IRCAPTURE */ { IREXIT1, IRSHIFT }, + /* IRSHIFT */ { IREXIT1, IRSHIFT }, + /* IREXIT1 */ { IRUPDATE, IRPAUSE }, + /* IRPAUSE */ { IREXIT2, IRPAUSE }, + /* IREXIT2 */ { IRUPDATE, IRSHIFT }, + /* IRUPDATE */ { DRSELECT, IDLE } +}; + +/* + * This table contains the TMS value to be used to take the NEXT STEP on + * the path to the desired state. The array index is the current state, + * and the bit position is the desired endstate. To find out which state + * is used as the intermediate state, look up the TMS value in the + * altera_transitions[] table. + */ +static const u16 altera_jtag_path_map[16] = { + /* RST RTI SDRS CDR SDR E1DR PDR E2DR */ + 0x0001, 0xFFFD, 0xFE01, 0xFFE7, 0xFFEF, 0xFF0F, 0xFFBF, 0xFFFF, + /* UDR SIRS CIR SIR E1IR PIR E2IR UIR */ + 0xFEFD, 0x0001, 0xF3FF, 0xF7FF, 0x87FF, 0xDFFF, 0xFFFF, 0x7FFD +}; + +/* Flag bits for alt_jtag_io() function */ +#define TMS_HIGH 1 +#define TMS_LOW 0 +#define TDI_HIGH 1 +#define TDI_LOW 0 +#define READ_TDO 1 +#define IGNORE_TDO 0 + +int altera_jinit(struct altera_state *astate) +{ + struct altera_jtag *js = &astate->js; + + /* initial JTAG state is unknown */ + js->jtag_state = ILLEGAL_JTAG_STATE; + + /* initialize to default state */ + js->drstop_state = IDLE; + js->irstop_state = IDLE; + js->dr_pre = 0; + js->dr_post = 0; + js->ir_pre = 0; + js->ir_post = 0; + js->dr_length = 0; + js->ir_length = 0; + + js->dr_pre_data = NULL; + js->dr_post_data = NULL; + js->ir_pre_data = NULL; + js->ir_post_data = NULL; + js->dr_buffer = NULL; + js->ir_buffer = NULL; + + return 0; +} + +int altera_set_drstop(struct altera_jtag *js, enum altera_jtag_state state) +{ + js->drstop_state = state; + + return 0; +} + +int altera_set_irstop(struct altera_jtag *js, enum altera_jtag_state state) +{ + js->irstop_state = state; + + return 0; +} + +int altera_set_dr_pre(struct altera_jtag *js, + u32 count, u32 start_index, + u8 *preamble_data) +{ + int status = 0; + u32 i; + u32 j; + + if (count > js->dr_pre) { + kfree(js->dr_pre_data); + js->dr_pre_data = (u8 *)alt_malloc((count + 7) >> 3); + if (js->dr_pre_data == NULL) + status = -ENOMEM; + else + js->dr_pre = count; + } else + js->dr_pre = count; + + if (status == 0) { + for (i = 0; i < count; ++i) { + j = i + start_index; + + if (preamble_data == NULL) + js->dr_pre_data[i >> 3] |= (1 << (i & 7)); + else { + if (preamble_data[j >> 3] & (1 << (j & 7))) + js->dr_pre_data[i >> 3] |= + (1 << (i & 7)); + else + js->dr_pre_data[i >> 3] &= + ~(u32)(1 << (i & 7)); + + } + } + } + + return status; +} + +int altera_set_ir_pre(struct altera_jtag *js, u32 count, u32 start_index, + u8 *preamble_data) +{ + int status = 0; + u32 i; + u32 j; + + if (count > js->ir_pre) { + kfree(js->ir_pre_data); + js->ir_pre_data = (u8 *)alt_malloc((count + 7) >> 3); + if (js->ir_pre_data == NULL) + status = -ENOMEM; + else + js->ir_pre = count; + + } else + js->ir_pre = count; + + if (status == 0) { + for (i = 0; i < count; ++i) { + j = i + start_index; + if (preamble_data == NULL) + js->ir_pre_data[i >> 3] |= (1 << (i & 7)); + else { + if (preamble_data[j >> 3] & (1 << (j & 7))) + js->ir_pre_data[i >> 3] |= + (1 << (i & 7)); + else + js->ir_pre_data[i >> 3] &= + ~(u32)(1 << (i & 7)); + + } + } + } + + return status; +} + +int altera_set_dr_post(struct altera_jtag *js, u32 count, u32 start_index, + u8 *postamble_data) +{ + int status = 0; + u32 i; + u32 j; + + if (count > js->dr_post) { + kfree(js->dr_post_data); + js->dr_post_data = (u8 *)alt_malloc((count + 7) >> 3); + + if (js->dr_post_data == NULL) + status = -ENOMEM; + else + js->dr_post = count; + + } else + js->dr_post = count; + + if (status == 0) { + for (i = 0; i < count; ++i) { + j = i + start_index; + + if (postamble_data == NULL) + js->dr_post_data[i >> 3] |= (1 << (i & 7)); + else { + if (postamble_data[j >> 3] & (1 << (j & 7))) + js->dr_post_data[i >> 3] |= + (1 << (i & 7)); + else + js->dr_post_data[i >> 3] &= + ~(u32)(1 << (i & 7)); + + } + } + } + + return status; +} + +int altera_set_ir_post(struct altera_jtag *js, u32 count, u32 start_index, + u8 *postamble_data) +{ + int status = 0; + u32 i; + u32 j; + + if (count > js->ir_post) { + kfree(js->ir_post_data); + js->ir_post_data = (u8 *)alt_malloc((count + 7) >> 3); + if (js->ir_post_data == NULL) + status = -ENOMEM; + else + js->ir_post = count; + + } else + js->ir_post = count; + + if (status != 0) + return status; + + for (i = 0; i < count; ++i) { + j = i + start_index; + + if (postamble_data == NULL) + js->ir_post_data[i >> 3] |= (1 << (i & 7)); + else { + if (postamble_data[j >> 3] & (1 << (j & 7))) + js->ir_post_data[i >> 3] |= (1 << (i & 7)); + else + js->ir_post_data[i >> 3] &= + ~(u32)(1 << (i & 7)); + + } + } + + return status; +} + +static void altera_jreset_idle(struct altera_state *astate) +{ + struct altera_jtag *js = &astate->js; + int i; + /* Go to Test Logic Reset (no matter what the starting state may be) */ + for (i = 0; i < 5; ++i) + alt_jtag_io(TMS_HIGH, TDI_LOW, IGNORE_TDO); + + /* Now step to Run Test / Idle */ + alt_jtag_io(TMS_LOW, TDI_LOW, IGNORE_TDO); + js->jtag_state = IDLE; +} + +int altera_goto_jstate(struct altera_state *astate, + enum altera_jtag_state state) +{ + struct altera_jtag *js = &astate->js; + int tms; + int count = 0; + int status = 0; + + if (js->jtag_state == ILLEGAL_JTAG_STATE) + /* initialize JTAG chain to known state */ + altera_jreset_idle(astate); + + if (js->jtag_state == state) { + /* + * We are already in the desired state. + * If it is a stable state, loop here. + * Otherwise do nothing (no clock cycles). + */ + if ((state == IDLE) || (state == DRSHIFT) || + (state == DRPAUSE) || (state == IRSHIFT) || + (state == IRPAUSE)) { + alt_jtag_io(TMS_LOW, TDI_LOW, IGNORE_TDO); + } else if (state == RESET) + alt_jtag_io(TMS_HIGH, TDI_LOW, IGNORE_TDO); + + } else { + while ((js->jtag_state != state) && (count < 9)) { + /* Get TMS value to take a step toward desired state */ + tms = (altera_jtag_path_map[js->jtag_state] & + (1 << state)) + ? TMS_HIGH : TMS_LOW; + + /* Take a step */ + alt_jtag_io(tms, TDI_LOW, IGNORE_TDO); + + if (tms) + js->jtag_state = + altera_transitions[js->jtag_state].tms_high; + else + js->jtag_state = + altera_transitions[js->jtag_state].tms_low; + + ++count; + } + } + + if (js->jtag_state != state) + status = -EREMOTEIO; + + return status; +} + +int altera_wait_cycles(struct altera_state *astate, + s32 cycles, + enum altera_jtag_state wait_state) +{ + struct altera_jtag *js = &astate->js; + int tms; + s32 count; + int status = 0; + + if (js->jtag_state != wait_state) + status = altera_goto_jstate(astate, wait_state); + + if (status == 0) { + /* + * Set TMS high to loop in RESET state + * Set TMS low to loop in any other stable state + */ + tms = (wait_state == RESET) ? TMS_HIGH : TMS_LOW; + + for (count = 0L; count < cycles; count++) + alt_jtag_io(tms, TDI_LOW, IGNORE_TDO); + + } + + return status; +} + +int altera_wait_msecs(struct altera_state *astate, + s32 microseconds, enum altera_jtag_state wait_state) +/* + * Causes JTAG hardware to sit in the specified stable + * state for the specified duration of real time. If + * no JTAG operations have been performed yet, then only + * a delay is performed. This permits the WAIT USECS + * statement to be used in VECTOR programs without causing + * any JTAG operations. + * Returns 0 for success, else appropriate error code. + */ +{ + struct altera_jtag *js = &astate->js; + int status = 0; + + if ((js->jtag_state != ILLEGAL_JTAG_STATE) && + (js->jtag_state != wait_state)) + status = altera_goto_jstate(astate, wait_state); + + if (status == 0) + /* Wait for specified time interval */ + udelay(microseconds); + + return status; +} + +static void altera_concatenate_data(u8 *buffer, + u8 *preamble_data, + u32 preamble_count, + u8 *target_data, + u32 start_index, + u32 target_count, + u8 *postamble_data, + u32 postamble_count) +/* + * Copies preamble data, target data, and postamble data + * into one buffer for IR or DR scans. + */ +{ + u32 i, j, k; + + for (i = 0L; i < preamble_count; ++i) { + if (preamble_data[i >> 3L] & (1L << (i & 7L))) + buffer[i >> 3L] |= (1L << (i & 7L)); + else + buffer[i >> 3L] &= ~(u32)(1L << (i & 7L)); + + } + + j = start_index; + k = preamble_count + target_count; + for (; i < k; ++i, ++j) { + if (target_data[j >> 3L] & (1L << (j & 7L))) + buffer[i >> 3L] |= (1L << (i & 7L)); + else + buffer[i >> 3L] &= ~(u32)(1L << (i & 7L)); + + } + + j = 0L; + k = preamble_count + target_count + postamble_count; + for (; i < k; ++i, ++j) { + if (postamble_data[j >> 3L] & (1L << (j & 7L))) + buffer[i >> 3L] |= (1L << (i & 7L)); + else + buffer[i >> 3L] &= ~(u32)(1L << (i & 7L)); + + } +} + +static int alt_jtag_drscan(struct altera_state *astate, + int start_state, + int count, + u8 *tdi, + u8 *tdo) +{ + int i = 0; + int tdo_bit = 0; + int status = 1; + + /* First go to DRSHIFT state */ + switch (start_state) { + case 0: /* IDLE */ + alt_jtag_io(1, 0, 0); /* DRSELECT */ + alt_jtag_io(0, 0, 0); /* DRCAPTURE */ + alt_jtag_io(0, 0, 0); /* DRSHIFT */ + break; + + case 1: /* DRPAUSE */ + alt_jtag_io(1, 0, 0); /* DREXIT2 */ + alt_jtag_io(1, 0, 0); /* DRUPDATE */ + alt_jtag_io(1, 0, 0); /* DRSELECT */ + alt_jtag_io(0, 0, 0); /* DRCAPTURE */ + alt_jtag_io(0, 0, 0); /* DRSHIFT */ + break; + + case 2: /* IRPAUSE */ + alt_jtag_io(1, 0, 0); /* IREXIT2 */ + alt_jtag_io(1, 0, 0); /* IRUPDATE */ + alt_jtag_io(1, 0, 0); /* DRSELECT */ + alt_jtag_io(0, 0, 0); /* DRCAPTURE */ + alt_jtag_io(0, 0, 0); /* DRSHIFT */ + break; + + default: + status = 0; + } + + if (status) { + /* loop in the SHIFT-DR state */ + for (i = 0; i < count; i++) { + tdo_bit = alt_jtag_io( + (i == count - 1), + tdi[i >> 3] & (1 << (i & 7)), + (tdo != NULL)); + + if (tdo != NULL) { + if (tdo_bit) + tdo[i >> 3] |= (1 << (i & 7)); + else + tdo[i >> 3] &= ~(u32)(1 << (i & 7)); + + } + } + + alt_jtag_io(0, 0, 0); /* DRPAUSE */ + } + + return status; +} + +static int alt_jtag_irscan(struct altera_state *astate, + int start_state, + int count, + u8 *tdi, + u8 *tdo) +{ + int i = 0; + int tdo_bit = 0; + int status = 1; + + /* First go to IRSHIFT state */ + switch (start_state) { + case 0: /* IDLE */ + alt_jtag_io(1, 0, 0); /* DRSELECT */ + alt_jtag_io(1, 0, 0); /* IRSELECT */ + alt_jtag_io(0, 0, 0); /* IRCAPTURE */ + alt_jtag_io(0, 0, 0); /* IRSHIFT */ + break; + + case 1: /* DRPAUSE */ + alt_jtag_io(1, 0, 0); /* DREXIT2 */ + alt_jtag_io(1, 0, 0); /* DRUPDATE */ + alt_jtag_io(1, 0, 0); /* DRSELECT */ + alt_jtag_io(1, 0, 0); /* IRSELECT */ + alt_jtag_io(0, 0, 0); /* IRCAPTURE */ + alt_jtag_io(0, 0, 0); /* IRSHIFT */ + break; + + case 2: /* IRPAUSE */ + alt_jtag_io(1, 0, 0); /* IREXIT2 */ + alt_jtag_io(1, 0, 0); /* IRUPDATE */ + alt_jtag_io(1, 0, 0); /* DRSELECT */ + alt_jtag_io(1, 0, 0); /* IRSELECT */ + alt_jtag_io(0, 0, 0); /* IRCAPTURE */ + alt_jtag_io(0, 0, 0); /* IRSHIFT */ + break; + + default: + status = 0; + } + + if (status) { + /* loop in the SHIFT-IR state */ + for (i = 0; i < count; i++) { + tdo_bit = alt_jtag_io( + (i == count - 1), + tdi[i >> 3] & (1 << (i & 7)), + (tdo != NULL)); + if (tdo != NULL) { + if (tdo_bit) + tdo[i >> 3] |= (1 << (i & 7)); + else + tdo[i >> 3] &= ~(u32)(1 << (i & 7)); + + } + } + + alt_jtag_io(0, 0, 0); /* IRPAUSE */ + } + + return status; +} + +static void altera_extract_target_data(u8 *buffer, + u8 *target_data, + u32 start_index, + u32 preamble_count, + u32 target_count) +/* + * Copies target data from scan buffer, filtering out + * preamble and postamble data. + */ +{ + u32 i; + u32 j; + u32 k; + + j = preamble_count; + k = start_index + target_count; + for (i = start_index; i < k; ++i, ++j) { + if (buffer[j >> 3] & (1 << (j & 7))) + target_data[i >> 3] |= (1 << (i & 7)); + else + target_data[i >> 3] &= ~(u32)(1 << (i & 7)); + + } +} + +int altera_irscan(struct altera_state *astate, + u32 count, + u8 *tdi_data, + u32 start_index) +/* Shifts data into instruction register */ +{ + struct altera_jtag *js = &astate->js; + int start_code = 0; + u32 alloc_chars = 0; + u32 shift_count = js->ir_pre + count + js->ir_post; + int status = 0; + enum altera_jtag_state start_state = ILLEGAL_JTAG_STATE; + + switch (js->jtag_state) { + case ILLEGAL_JTAG_STATE: + case RESET: + case IDLE: + start_code = 0; + start_state = IDLE; + break; + + case DRSELECT: + case DRCAPTURE: + case DRSHIFT: + case DREXIT1: + case DRPAUSE: + case DREXIT2: + case DRUPDATE: + start_code = 1; + start_state = DRPAUSE; + break; + + case IRSELECT: + case IRCAPTURE: + case IRSHIFT: + case IREXIT1: + case IRPAUSE: + case IREXIT2: + case IRUPDATE: + start_code = 2; + start_state = IRPAUSE; + break; + + default: + status = -EREMOTEIO; + break; + } + + if (status == 0) + if (js->jtag_state != start_state) + status = altera_goto_jstate(astate, start_state); + + if (status == 0) { + if (shift_count > js->ir_length) { + alloc_chars = (shift_count + 7) >> 3; + kfree(js->ir_buffer); + js->ir_buffer = (u8 *)alt_malloc(alloc_chars); + if (js->ir_buffer == NULL) + status = -ENOMEM; + else + js->ir_length = alloc_chars * 8; + + } + } + + if (status == 0) { + /* + * Copy preamble data, IR data, + * and postamble data into a buffer + */ + altera_concatenate_data(js->ir_buffer, + js->ir_pre_data, + js->ir_pre, + tdi_data, + start_index, + count, + js->ir_post_data, + js->ir_post); + /* Do the IRSCAN */ + alt_jtag_irscan(astate, + start_code, + shift_count, + js->ir_buffer, + NULL); + + /* alt_jtag_irscan() always ends in IRPAUSE state */ + js->jtag_state = IRPAUSE; + } + + if (status == 0) + if (js->irstop_state != IRPAUSE) + status = altera_goto_jstate(astate, js->irstop_state); + + + return status; +} + +int altera_swap_ir(struct altera_state *astate, + u32 count, + u8 *in_data, + u32 in_index, + u8 *out_data, + u32 out_index) +/* Shifts data into instruction register, capturing output data */ +{ + struct altera_jtag *js = &astate->js; + int start_code = 0; + u32 alloc_chars = 0; + u32 shift_count = js->ir_pre + count + js->ir_post; + int status = 0; + enum altera_jtag_state start_state = ILLEGAL_JTAG_STATE; + + switch (js->jtag_state) { + case ILLEGAL_JTAG_STATE: + case RESET: + case IDLE: + start_code = 0; + start_state = IDLE; + break; + + case DRSELECT: + case DRCAPTURE: + case DRSHIFT: + case DREXIT1: + case DRPAUSE: + case DREXIT2: + case DRUPDATE: + start_code = 1; + start_state = DRPAUSE; + break; + + case IRSELECT: + case IRCAPTURE: + case IRSHIFT: + case IREXIT1: + case IRPAUSE: + case IREXIT2: + case IRUPDATE: + start_code = 2; + start_state = IRPAUSE; + break; + + default: + status = -EREMOTEIO; + break; + } + + if (status == 0) + if (js->jtag_state != start_state) + status = altera_goto_jstate(astate, start_state); + + if (status == 0) { + if (shift_count > js->ir_length) { + alloc_chars = (shift_count + 7) >> 3; + kfree(js->ir_buffer); + js->ir_buffer = (u8 *)alt_malloc(alloc_chars); + if (js->ir_buffer == NULL) + status = -ENOMEM; + else + js->ir_length = alloc_chars * 8; + + } + } + + if (status == 0) { + /* + * Copy preamble data, IR data, + * and postamble data into a buffer + */ + altera_concatenate_data(js->ir_buffer, + js->ir_pre_data, + js->ir_pre, + in_data, + in_index, + count, + js->ir_post_data, + js->ir_post); + + /* Do the IRSCAN */ + alt_jtag_irscan(astate, + start_code, + shift_count, + js->ir_buffer, + js->ir_buffer); + + /* alt_jtag_irscan() always ends in IRPAUSE state */ + js->jtag_state = IRPAUSE; + } + + if (status == 0) + if (js->irstop_state != IRPAUSE) + status = altera_goto_jstate(astate, js->irstop_state); + + + if (status == 0) + /* Now extract the returned data from the buffer */ + altera_extract_target_data(js->ir_buffer, + out_data, out_index, + js->ir_pre, count); + + return status; +} + +int altera_drscan(struct altera_state *astate, + u32 count, + u8 *tdi_data, + u32 start_index) +/* Shifts data into data register (ignoring output data) */ +{ + struct altera_jtag *js = &astate->js; + int start_code = 0; + u32 alloc_chars = 0; + u32 shift_count = js->dr_pre + count + js->dr_post; + int status = 0; + enum altera_jtag_state start_state = ILLEGAL_JTAG_STATE; + + switch (js->jtag_state) { + case ILLEGAL_JTAG_STATE: + case RESET: + case IDLE: + start_code = 0; + start_state = IDLE; + break; + + case DRSELECT: + case DRCAPTURE: + case DRSHIFT: + case DREXIT1: + case DRPAUSE: + case DREXIT2: + case DRUPDATE: + start_code = 1; + start_state = DRPAUSE; + break; + + case IRSELECT: + case IRCAPTURE: + case IRSHIFT: + case IREXIT1: + case IRPAUSE: + case IREXIT2: + case IRUPDATE: + start_code = 2; + start_state = IRPAUSE; + break; + + default: + status = -EREMOTEIO; + break; + } + + if (status == 0) + if (js->jtag_state != start_state) + status = altera_goto_jstate(astate, start_state); + + if (status == 0) { + if (shift_count > js->dr_length) { + alloc_chars = (shift_count + 7) >> 3; + kfree(js->dr_buffer); + js->dr_buffer = (u8 *)alt_malloc(alloc_chars); + if (js->dr_buffer == NULL) + status = -ENOMEM; + else + js->dr_length = alloc_chars * 8; + + } + } + + if (status == 0) { + /* + * Copy preamble data, DR data, + * and postamble data into a buffer + */ + altera_concatenate_data(js->dr_buffer, + js->dr_pre_data, + js->dr_pre, + tdi_data, + start_index, + count, + js->dr_post_data, + js->dr_post); + /* Do the DRSCAN */ + alt_jtag_drscan(astate, start_code, shift_count, + js->dr_buffer, NULL); + /* alt_jtag_drscan() always ends in DRPAUSE state */ + js->jtag_state = DRPAUSE; + } + + if (status == 0) + if (js->drstop_state != DRPAUSE) + status = altera_goto_jstate(astate, js->drstop_state); + + return status; +} + +int altera_swap_dr(struct altera_state *astate, u32 count, + u8 *in_data, u32 in_index, + u8 *out_data, u32 out_index) +/* Shifts data into data register, capturing output data */ +{ + struct altera_jtag *js = &astate->js; + int start_code = 0; + u32 alloc_chars = 0; + u32 shift_count = js->dr_pre + count + js->dr_post; + int status = 0; + enum altera_jtag_state start_state = ILLEGAL_JTAG_STATE; + + switch (js->jtag_state) { + case ILLEGAL_JTAG_STATE: + case RESET: + case IDLE: + start_code = 0; + start_state = IDLE; + break; + + case DRSELECT: + case DRCAPTURE: + case DRSHIFT: + case DREXIT1: + case DRPAUSE: + case DREXIT2: + case DRUPDATE: + start_code = 1; + start_state = DRPAUSE; + break; + + case IRSELECT: + case IRCAPTURE: + case IRSHIFT: + case IREXIT1: + case IRPAUSE: + case IREXIT2: + case IRUPDATE: + start_code = 2; + start_state = IRPAUSE; + break; + + default: + status = -EREMOTEIO; + break; + } + + if (status == 0) + if (js->jtag_state != start_state) + status = altera_goto_jstate(astate, start_state); + + if (status == 0) { + if (shift_count > js->dr_length) { + alloc_chars = (shift_count + 7) >> 3; + kfree(js->dr_buffer); + js->dr_buffer = (u8 *)alt_malloc(alloc_chars); + + if (js->dr_buffer == NULL) + status = -ENOMEM; + else + js->dr_length = alloc_chars * 8; + + } + } + + if (status == 0) { + /* + * Copy preamble data, DR data, + * and postamble data into a buffer + */ + altera_concatenate_data(js->dr_buffer, + js->dr_pre_data, + js->dr_pre, + in_data, + in_index, + count, + js->dr_post_data, + js->dr_post); + + /* Do the DRSCAN */ + alt_jtag_drscan(astate, + start_code, + shift_count, + js->dr_buffer, + js->dr_buffer); + + /* alt_jtag_drscan() always ends in DRPAUSE state */ + js->jtag_state = DRPAUSE; + } + + if (status == 0) + if (js->drstop_state != DRPAUSE) + status = altera_goto_jstate(astate, js->drstop_state); + + if (status == 0) + /* Now extract the returned data from the buffer */ + altera_extract_target_data(js->dr_buffer, + out_data, + out_index, + js->dr_pre, + count); + + return status; +} + +void altera_free_buffers(struct altera_state *astate) +{ + struct altera_jtag *js = &astate->js; + /* If the JTAG interface was used, reset it to TLR */ + if (js->jtag_state != ILLEGAL_JTAG_STATE) + altera_jreset_idle(astate); + + kfree(js->dr_pre_data); + js->dr_pre_data = NULL; + + kfree(js->dr_post_data); + js->dr_post_data = NULL; + + kfree(js->dr_buffer); + js->dr_buffer = NULL; + + kfree(js->ir_pre_data); + js->ir_pre_data = NULL; + + kfree(js->ir_post_data); + js->ir_post_data = NULL; + + kfree(js->ir_buffer); + js->ir_buffer = NULL; +} diff --git a/drivers/misc/altera-stapl/altera-jtag.h b/drivers/misc/altera-stapl/altera-jtag.h new file mode 100644 index 000000000..90235b31e --- /dev/null +++ b/drivers/misc/altera-stapl/altera-jtag.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * altera-jtag.h + * + * altera FPGA driver + * + * Copyright (C) Altera Corporation 1998-2001 + * Copyright (C) 2010 NetUP Inc. + * Copyright (C) 2010 Igor M. Liplianin <liplianin@netup.ru> + */ + +#ifndef ALTERA_JTAG_H +#define ALTERA_JTAG_H + +/* Function Prototypes */ +enum altera_jtag_state { + ILLEGAL_JTAG_STATE = -1, + RESET = 0, + IDLE = 1, + DRSELECT = 2, + DRCAPTURE = 3, + DRSHIFT = 4, + DREXIT1 = 5, + DRPAUSE = 6, + DREXIT2 = 7, + DRUPDATE = 8, + IRSELECT = 9, + IRCAPTURE = 10, + IRSHIFT = 11, + IREXIT1 = 12, + IRPAUSE = 13, + IREXIT2 = 14, + IRUPDATE = 15 + +}; + +struct altera_jtag { + /* Global variable to store the current JTAG state */ + enum altera_jtag_state jtag_state; + + /* Store current stop-state for DR and IR scan commands */ + enum altera_jtag_state drstop_state; + enum altera_jtag_state irstop_state; + + /* Store current padding values */ + u32 dr_pre; + u32 dr_post; + u32 ir_pre; + u32 ir_post; + u32 dr_length; + u32 ir_length; + u8 *dr_pre_data; + u8 *dr_post_data; + u8 *ir_pre_data; + u8 *ir_post_data; + u8 *dr_buffer; + u8 *ir_buffer; +}; + +#define ALTERA_STACK_SIZE 128 +#define ALTERA_MESSAGE_LENGTH 1024 + +struct altera_state { + struct altera_config *config; + struct altera_jtag js; + char msg_buff[ALTERA_MESSAGE_LENGTH + 1]; + long stack[ALTERA_STACK_SIZE]; +}; + +int altera_jinit(struct altera_state *astate); +int altera_set_drstop(struct altera_jtag *js, enum altera_jtag_state state); +int altera_set_irstop(struct altera_jtag *js, enum altera_jtag_state state); +int altera_set_dr_pre(struct altera_jtag *js, u32 count, u32 start_index, + u8 *preamble_data); +int altera_set_ir_pre(struct altera_jtag *js, u32 count, u32 start_index, + u8 *preamble_data); +int altera_set_dr_post(struct altera_jtag *js, u32 count, u32 start_index, + u8 *postamble_data); +int altera_set_ir_post(struct altera_jtag *js, u32 count, u32 start_index, + u8 *postamble_data); +int altera_goto_jstate(struct altera_state *astate, + enum altera_jtag_state state); +int altera_wait_cycles(struct altera_state *astate, s32 cycles, + enum altera_jtag_state wait_state); +int altera_wait_msecs(struct altera_state *astate, s32 microseconds, + enum altera_jtag_state wait_state); +int altera_irscan(struct altera_state *astate, u32 count, + u8 *tdi_data, u32 start_index); +int altera_swap_ir(struct altera_state *astate, + u32 count, u8 *in_data, + u32 in_index, u8 *out_data, + u32 out_index); +int altera_drscan(struct altera_state *astate, u32 count, + u8 *tdi_data, u32 start_index); +int altera_swap_dr(struct altera_state *astate, u32 count, + u8 *in_data, u32 in_index, + u8 *out_data, u32 out_index); +void altera_free_buffers(struct altera_state *astate); +#endif /* ALTERA_JTAG_H */ diff --git a/drivers/misc/altera-stapl/altera-lpt.c b/drivers/misc/altera-stapl/altera-lpt.c new file mode 100644 index 000000000..2b7d9cf41 --- /dev/null +++ b/drivers/misc/altera-stapl/altera-lpt.c @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * altera-lpt.c + * + * altera FPGA driver + * + * Copyright (C) Altera Corporation 1998-2001 + * Copyright (C) 2010 NetUP Inc. + * Copyright (C) 2010 Abylay Ospan <aospan@netup.ru> + */ + +#include <linux/io.h> +#include <linux/kernel.h> +#include "altera-exprt.h" + +static int lpt_hardware_initialized; + +static void byteblaster_write(int port, int data) +{ + outb((u8)data, (u16)(port + 0x378)); +}; + +static int byteblaster_read(int port) +{ + int data = 0; + data = inb((u16)(port + 0x378)); + return data & 0xff; +}; + +int netup_jtag_io_lpt(void *device, int tms, int tdi, int read_tdo) +{ + int data = 0; + int tdo = 0; + int initial_lpt_ctrl = 0; + + if (!lpt_hardware_initialized) { + initial_lpt_ctrl = byteblaster_read(2); + byteblaster_write(2, (initial_lpt_ctrl | 0x02) & 0xdf); + lpt_hardware_initialized = 1; + } + + data = ((tdi ? 0x40 : 0) | (tms ? 0x02 : 0)); + + byteblaster_write(0, data); + + if (read_tdo) { + tdo = byteblaster_read(1); + tdo = ((tdo & 0x80) ? 0 : 1); + } + + byteblaster_write(0, data | 0x01); + + byteblaster_write(0, data); + + return tdo; +} diff --git a/drivers/misc/altera-stapl/altera.c b/drivers/misc/altera-stapl/altera.c new file mode 100644 index 000000000..5bdf57472 --- /dev/null +++ b/drivers/misc/altera-stapl/altera.c @@ -0,0 +1,2522 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * altera.c + * + * altera FPGA driver + * + * Copyright (C) Altera Corporation 1998-2001 + * Copyright (C) 2010,2011 NetUP Inc. + * Copyright (C) 2010,2011 Igor M. Liplianin <liplianin@netup.ru> + */ + +#include <asm/unaligned.h> +#include <linux/ctype.h> +#include <linux/string.h> +#include <linux/firmware.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <misc/altera.h> +#include "altera-exprt.h" +#include "altera-jtag.h" + +static int debug = 1; +module_param(debug, int, 0644); +MODULE_PARM_DESC(debug, "enable debugging information"); + +MODULE_DESCRIPTION("altera FPGA kernel module"); +MODULE_AUTHOR("Igor M. Liplianin <liplianin@netup.ru>"); +MODULE_LICENSE("GPL"); + +#define dprintk(args...) \ + if (debug) { \ + printk(KERN_DEBUG args); \ + } + +enum altera_fpga_opcode { + OP_NOP = 0, + OP_DUP, + OP_SWP, + OP_ADD, + OP_SUB, + OP_MULT, + OP_DIV, + OP_MOD, + OP_SHL, + OP_SHR, + OP_NOT, + OP_AND, + OP_OR, + OP_XOR, + OP_INV, + OP_GT, + OP_LT, + OP_RET, + OP_CMPS, + OP_PINT, + OP_PRNT, + OP_DSS, + OP_DSSC, + OP_ISS, + OP_ISSC, + OP_DPR = 0x1c, + OP_DPRL, + OP_DPO, + OP_DPOL, + OP_IPR, + OP_IPRL, + OP_IPO, + OP_IPOL, + OP_PCHR, + OP_EXIT, + OP_EQU, + OP_POPT, + OP_ABS = 0x2c, + OP_BCH0, + OP_PSH0 = 0x2f, + OP_PSHL = 0x40, + OP_PSHV, + OP_JMP, + OP_CALL, + OP_NEXT, + OP_PSTR, + OP_SINT = 0x47, + OP_ST, + OP_ISTP, + OP_DSTP, + OP_SWPN, + OP_DUPN, + OP_POPV, + OP_POPE, + OP_POPA, + OP_JMPZ, + OP_DS, + OP_IS, + OP_DPRA, + OP_DPOA, + OP_IPRA, + OP_IPOA, + OP_EXPT, + OP_PSHE, + OP_PSHA, + OP_DYNA, + OP_EXPV = 0x5c, + OP_COPY = 0x80, + OP_REVA, + OP_DSC, + OP_ISC, + OP_WAIT, + OP_VS, + OP_CMPA = 0xc0, + OP_VSC, +}; + +struct altera_procinfo { + char *name; + u8 attrs; + struct altera_procinfo *next; +}; + +/* This function checks if enough parameters are available on the stack. */ +static int altera_check_stack(int stack_ptr, int count, int *status) +{ + if (stack_ptr < count) { + *status = -EOVERFLOW; + return 0; + } + + return 1; +} + +static void altera_export_int(char *key, s32 value) +{ + dprintk("Export: key = \"%s\", value = %d\n", key, value); +} + +#define HEX_LINE_CHARS 72 +#define HEX_LINE_BITS (HEX_LINE_CHARS * 4) + +static void altera_export_bool_array(char *key, u8 *data, s32 count) +{ + char string[HEX_LINE_CHARS + 1]; + s32 i, offset; + u32 size, line, lines, linebits, value, j, k; + + if (count > HEX_LINE_BITS) { + dprintk("Export: key = \"%s\", %d bits, value = HEX\n", + key, count); + lines = (count + (HEX_LINE_BITS - 1)) / HEX_LINE_BITS; + + for (line = 0; line < lines; ++line) { + if (line < (lines - 1)) { + linebits = HEX_LINE_BITS; + size = HEX_LINE_CHARS; + offset = count - ((line + 1) * HEX_LINE_BITS); + } else { + linebits = + count - ((lines - 1) * HEX_LINE_BITS); + size = (linebits + 3) / 4; + offset = 0L; + } + + string[size] = '\0'; + j = size - 1; + value = 0; + + for (k = 0; k < linebits; ++k) { + i = k + offset; + if (data[i >> 3] & (1 << (i & 7))) + value |= (1 << (i & 3)); + if ((i & 3) == 3) { + sprintf(&string[j], "%1x", value); + value = 0; + --j; + } + } + if ((k & 3) > 0) + sprintf(&string[j], "%1x", value); + + dprintk("%s\n", string); + } + + } else { + size = (count + 3) / 4; + string[size] = '\0'; + j = size - 1; + value = 0; + + for (i = 0; i < count; ++i) { + if (data[i >> 3] & (1 << (i & 7))) + value |= (1 << (i & 3)); + if ((i & 3) == 3) { + sprintf(&string[j], "%1x", value); + value = 0; + --j; + } + } + if ((i & 3) > 0) + sprintf(&string[j], "%1x", value); + + dprintk("Export: key = \"%s\", %d bits, value = HEX %s\n", + key, count, string); + } +} + +static int altera_execute(struct altera_state *astate, + u8 *p, + s32 program_size, + s32 *error_address, + int *exit_code, + int *format_version) +{ + struct altera_config *aconf = astate->config; + char *msg_buff = astate->msg_buff; + long *stack = astate->stack; + int status = 0; + u32 first_word = 0L; + u32 action_table = 0L; + u32 proc_table = 0L; + u32 str_table = 0L; + u32 sym_table = 0L; + u32 data_sect = 0L; + u32 code_sect = 0L; + u32 debug_sect = 0L; + u32 action_count = 0L; + u32 proc_count = 0L; + u32 sym_count = 0L; + long *vars = NULL; + s32 *var_size = NULL; + char *attrs = NULL; + u8 *proc_attributes = NULL; + u32 pc; + u32 opcode_address; + u32 args[3]; + u32 opcode; + u32 name_id; + u8 charbuf[4]; + long long_tmp; + u32 variable_id; + u8 *charptr_tmp; + u8 *charptr_tmp2; + long *longptr_tmp; + int version = 0; + int delta = 0; + int stack_ptr = 0; + u32 arg_count; + int done = 0; + int bad_opcode = 0; + u32 count; + u32 index; + u32 index2; + s32 long_count; + s32 long_idx; + s32 long_idx2; + u32 i; + u32 j; + u32 uncomp_size; + u32 offset; + u32 value; + int current_proc = 0; + int reverse; + + char *name; + + dprintk("%s\n", __func__); + + /* Read header information */ + if (program_size > 52L) { + first_word = get_unaligned_be32(&p[0]); + version = (first_word & 1L); + *format_version = version + 1; + delta = version * 8; + + action_table = get_unaligned_be32(&p[4]); + proc_table = get_unaligned_be32(&p[8]); + str_table = get_unaligned_be32(&p[4 + delta]); + sym_table = get_unaligned_be32(&p[16 + delta]); + data_sect = get_unaligned_be32(&p[20 + delta]); + code_sect = get_unaligned_be32(&p[24 + delta]); + debug_sect = get_unaligned_be32(&p[28 + delta]); + action_count = get_unaligned_be32(&p[40 + delta]); + proc_count = get_unaligned_be32(&p[44 + delta]); + sym_count = get_unaligned_be32(&p[48 + (2 * delta)]); + } + + if ((first_word != 0x4A414D00L) && (first_word != 0x4A414D01L)) { + done = 1; + status = -EIO; + goto exit_done; + } + + if (sym_count <= 0) + goto exit_done; + + vars = kcalloc(sym_count, sizeof(long), GFP_KERNEL); + + if (vars == NULL) + status = -ENOMEM; + + if (status == 0) { + var_size = kcalloc(sym_count, sizeof(s32), GFP_KERNEL); + + if (var_size == NULL) + status = -ENOMEM; + } + + if (status == 0) { + attrs = kzalloc(sym_count, GFP_KERNEL); + + if (attrs == NULL) + status = -ENOMEM; + } + + if ((status == 0) && (version > 0)) { + proc_attributes = kzalloc(proc_count, GFP_KERNEL); + + if (proc_attributes == NULL) + status = -ENOMEM; + } + + if (status != 0) + goto exit_done; + + delta = version * 2; + + for (i = 0; i < sym_count; ++i) { + offset = (sym_table + ((11 + delta) * i)); + + value = get_unaligned_be32(&p[offset + 3 + delta]); + + attrs[i] = p[offset]; + + /* + * use bit 7 of attribute byte to indicate that + * this buffer was dynamically allocated + * and should be freed later + */ + attrs[i] &= 0x7f; + + var_size[i] = get_unaligned_be32(&p[offset + 7 + delta]); + + /* + * Attribute bits: + * bit 0: 0 = read-only, 1 = read-write + * bit 1: 0 = not compressed, 1 = compressed + * bit 2: 0 = not initialized, 1 = initialized + * bit 3: 0 = scalar, 1 = array + * bit 4: 0 = Boolean, 1 = integer + * bit 5: 0 = declared variable, + * 1 = compiler created temporary variable + */ + + if ((attrs[i] & 0x0c) == 0x04) + /* initialized scalar variable */ + vars[i] = value; + else if ((attrs[i] & 0x1e) == 0x0e) { + /* initialized compressed Boolean array */ + uncomp_size = get_unaligned_le32(&p[data_sect + value]); + + /* allocate a buffer for the uncompressed data */ + vars[i] = (long)kzalloc(uncomp_size, GFP_KERNEL); + if (vars[i] == 0L) + status = -ENOMEM; + else { + /* set flag so buffer will be freed later */ + attrs[i] |= 0x80; + + /* uncompress the data */ + if (altera_shrink(&p[data_sect + value], + var_size[i], + (u8 *)vars[i], + uncomp_size, + version) != uncomp_size) + /* decompression failed */ + status = -EIO; + else + var_size[i] = uncomp_size * 8L; + + } + } else if ((attrs[i] & 0x1e) == 0x0c) { + /* initialized Boolean array */ + vars[i] = value + data_sect + (long)p; + } else if ((attrs[i] & 0x1c) == 0x1c) { + /* initialized integer array */ + vars[i] = value + data_sect; + } else if ((attrs[i] & 0x0c) == 0x08) { + /* uninitialized array */ + + /* flag attrs so that memory is freed */ + attrs[i] |= 0x80; + + if (var_size[i] > 0) { + u32 size; + + if (attrs[i] & 0x10) + /* integer array */ + size = (var_size[i] * sizeof(s32)); + else + /* Boolean array */ + size = ((var_size[i] + 7L) / 8L); + + vars[i] = (long)kzalloc(size, GFP_KERNEL); + + if (vars[i] == 0) { + status = -ENOMEM; + } else { + /* zero out memory */ + for (j = 0; j < size; ++j) + ((u8 *)(vars[i]))[j] = 0; + + } + } else + vars[i] = 0; + + } else + vars[i] = 0; + + } + +exit_done: + if (status != 0) + done = 1; + + altera_jinit(astate); + + pc = code_sect; + msg_buff[0] = '\0'; + + /* + * For JBC version 2, we will execute the procedures corresponding to + * the selected ACTION + */ + if (version > 0) { + if (aconf->action == NULL) { + status = -EINVAL; + done = 1; + } else { + int action_found = 0; + for (i = 0; (i < action_count) && !action_found; ++i) { + name_id = get_unaligned_be32(&p[action_table + + (12 * i)]); + + name = &p[str_table + name_id]; + + if (strncasecmp(aconf->action, name, strlen(name)) == 0) { + action_found = 1; + current_proc = + get_unaligned_be32(&p[action_table + + (12 * i) + 8]); + } + } + + if (!action_found) { + status = -EINVAL; + done = 1; + } + } + + if (status == 0) { + int first_time = 1; + i = current_proc; + while ((i != 0) || first_time) { + first_time = 0; + /* check procedure attribute byte */ + proc_attributes[i] = + (p[proc_table + + (13 * i) + 8] & + 0x03); + + /* + * BIT0 - OPTIONAL + * BIT1 - RECOMMENDED + * BIT6 - FORCED OFF + * BIT7 - FORCED ON + */ + + i = get_unaligned_be32(&p[proc_table + + (13 * i) + 4]); + } + + /* + * Set current_proc to the first procedure + * to be executed + */ + i = current_proc; + while ((i != 0) && + ((proc_attributes[i] == 1) || + ((proc_attributes[i] & 0xc0) == 0x40))) { + i = get_unaligned_be32(&p[proc_table + + (13 * i) + 4]); + } + + if ((i != 0) || ((i == 0) && (current_proc == 0) && + ((proc_attributes[0] != 1) && + ((proc_attributes[0] & 0xc0) != 0x40)))) { + current_proc = i; + pc = code_sect + + get_unaligned_be32(&p[proc_table + + (13 * i) + 9]); + if ((pc < code_sect) || (pc >= debug_sect)) + status = -ERANGE; + } else + /* there are no procedures to execute! */ + done = 1; + + } + } + + msg_buff[0] = '\0'; + + while (!done) { + opcode = (p[pc] & 0xff); + opcode_address = pc; + ++pc; + + if (debug > 1) + printk("opcode: %02x\n", opcode); + + arg_count = (opcode >> 6) & 3; + for (i = 0; i < arg_count; ++i) { + args[i] = get_unaligned_be32(&p[pc]); + pc += 4; + } + + switch (opcode) { + case OP_NOP: + break; + case OP_DUP: + if (altera_check_stack(stack_ptr, 1, &status)) { + stack[stack_ptr] = stack[stack_ptr - 1]; + ++stack_ptr; + } + break; + case OP_SWP: + if (altera_check_stack(stack_ptr, 2, &status)) { + long_tmp = stack[stack_ptr - 2]; + stack[stack_ptr - 2] = stack[stack_ptr - 1]; + stack[stack_ptr - 1] = long_tmp; + } + break; + case OP_ADD: + if (altera_check_stack(stack_ptr, 2, &status)) { + --stack_ptr; + stack[stack_ptr - 1] += stack[stack_ptr]; + } + break; + case OP_SUB: + if (altera_check_stack(stack_ptr, 2, &status)) { + --stack_ptr; + stack[stack_ptr - 1] -= stack[stack_ptr]; + } + break; + case OP_MULT: + if (altera_check_stack(stack_ptr, 2, &status)) { + --stack_ptr; + stack[stack_ptr - 1] *= stack[stack_ptr]; + } + break; + case OP_DIV: + if (altera_check_stack(stack_ptr, 2, &status)) { + --stack_ptr; + stack[stack_ptr - 1] /= stack[stack_ptr]; + } + break; + case OP_MOD: + if (altera_check_stack(stack_ptr, 2, &status)) { + --stack_ptr; + stack[stack_ptr - 1] %= stack[stack_ptr]; + } + break; + case OP_SHL: + if (altera_check_stack(stack_ptr, 2, &status)) { + --stack_ptr; + stack[stack_ptr - 1] <<= stack[stack_ptr]; + } + break; + case OP_SHR: + if (altera_check_stack(stack_ptr, 2, &status)) { + --stack_ptr; + stack[stack_ptr - 1] >>= stack[stack_ptr]; + } + break; + case OP_NOT: + if (altera_check_stack(stack_ptr, 1, &status)) + stack[stack_ptr - 1] ^= (-1L); + + break; + case OP_AND: + if (altera_check_stack(stack_ptr, 2, &status)) { + --stack_ptr; + stack[stack_ptr - 1] &= stack[stack_ptr]; + } + break; + case OP_OR: + if (altera_check_stack(stack_ptr, 2, &status)) { + --stack_ptr; + stack[stack_ptr - 1] |= stack[stack_ptr]; + } + break; + case OP_XOR: + if (altera_check_stack(stack_ptr, 2, &status)) { + --stack_ptr; + stack[stack_ptr - 1] ^= stack[stack_ptr]; + } + break; + case OP_INV: + if (!altera_check_stack(stack_ptr, 1, &status)) + break; + stack[stack_ptr - 1] = stack[stack_ptr - 1] ? 0L : 1L; + break; + case OP_GT: + if (!altera_check_stack(stack_ptr, 2, &status)) + break; + --stack_ptr; + stack[stack_ptr - 1] = + (stack[stack_ptr - 1] > stack[stack_ptr]) ? + 1L : 0L; + + break; + case OP_LT: + if (!altera_check_stack(stack_ptr, 2, &status)) + break; + --stack_ptr; + stack[stack_ptr - 1] = + (stack[stack_ptr - 1] < stack[stack_ptr]) ? + 1L : 0L; + + break; + case OP_RET: + if ((version > 0) && (stack_ptr == 0)) { + /* + * We completed one of the main procedures + * of an ACTION. + * Find the next procedure + * to be executed and jump to it. + * If there are no more procedures, then EXIT. + */ + i = get_unaligned_be32(&p[proc_table + + (13 * current_proc) + 4]); + while ((i != 0) && + ((proc_attributes[i] == 1) || + ((proc_attributes[i] & 0xc0) == 0x40))) + i = get_unaligned_be32(&p[proc_table + + (13 * i) + 4]); + + if (i == 0) { + /* no procedures to execute! */ + done = 1; + *exit_code = 0; /* success */ + } else { + current_proc = i; + pc = code_sect + get_unaligned_be32( + &p[proc_table + + (13 * i) + 9]); + if ((pc < code_sect) || + (pc >= debug_sect)) + status = -ERANGE; + } + + } else + if (altera_check_stack(stack_ptr, 1, &status)) { + pc = stack[--stack_ptr] + code_sect; + if ((pc <= code_sect) || + (pc >= debug_sect)) + status = -ERANGE; + + } + + break; + case OP_CMPS: + /* + * Array short compare + * ...stack 0 is source 1 value + * ...stack 1 is source 2 value + * ...stack 2 is mask value + * ...stack 3 is count + */ + if (altera_check_stack(stack_ptr, 4, &status)) { + s32 a = stack[--stack_ptr]; + s32 b = stack[--stack_ptr]; + long_tmp = stack[--stack_ptr]; + count = stack[stack_ptr - 1]; + + if ((count < 1) || (count > 32)) + status = -ERANGE; + else { + long_tmp &= ((-1L) >> (32 - count)); + + stack[stack_ptr - 1] = + ((a & long_tmp) == (b & long_tmp)) + ? 1L : 0L; + } + } + break; + case OP_PINT: + /* + * PRINT add integer + * ...stack 0 is integer value + */ + if (!altera_check_stack(stack_ptr, 1, &status)) + break; + sprintf(&msg_buff[strlen(msg_buff)], + "%ld", stack[--stack_ptr]); + break; + case OP_PRNT: + /* PRINT finish */ + if (debug) + printk(msg_buff, "\n"); + + msg_buff[0] = '\0'; + break; + case OP_DSS: + /* + * DRSCAN short + * ...stack 0 is scan data + * ...stack 1 is count + */ + if (!altera_check_stack(stack_ptr, 2, &status)) + break; + long_tmp = stack[--stack_ptr]; + count = stack[--stack_ptr]; + put_unaligned_le32(long_tmp, &charbuf[0]); + status = altera_drscan(astate, count, charbuf, 0); + break; + case OP_DSSC: + /* + * DRSCAN short with capture + * ...stack 0 is scan data + * ...stack 1 is count + */ + if (!altera_check_stack(stack_ptr, 2, &status)) + break; + long_tmp = stack[--stack_ptr]; + count = stack[stack_ptr - 1]; + put_unaligned_le32(long_tmp, &charbuf[0]); + status = altera_swap_dr(astate, count, charbuf, + 0, charbuf, 0); + stack[stack_ptr - 1] = get_unaligned_le32(&charbuf[0]); + break; + case OP_ISS: + /* + * IRSCAN short + * ...stack 0 is scan data + * ...stack 1 is count + */ + if (!altera_check_stack(stack_ptr, 2, &status)) + break; + long_tmp = stack[--stack_ptr]; + count = stack[--stack_ptr]; + put_unaligned_le32(long_tmp, &charbuf[0]); + status = altera_irscan(astate, count, charbuf, 0); + break; + case OP_ISSC: + /* + * IRSCAN short with capture + * ...stack 0 is scan data + * ...stack 1 is count + */ + if (!altera_check_stack(stack_ptr, 2, &status)) + break; + long_tmp = stack[--stack_ptr]; + count = stack[stack_ptr - 1]; + put_unaligned_le32(long_tmp, &charbuf[0]); + status = altera_swap_ir(astate, count, charbuf, + 0, charbuf, 0); + stack[stack_ptr - 1] = get_unaligned_le32(&charbuf[0]); + break; + case OP_DPR: + if (!altera_check_stack(stack_ptr, 1, &status)) + break; + count = stack[--stack_ptr]; + status = altera_set_dr_pre(&astate->js, count, 0, NULL); + break; + case OP_DPRL: + /* + * DRPRE with literal data + * ...stack 0 is count + * ...stack 1 is literal data + */ + if (!altera_check_stack(stack_ptr, 2, &status)) + break; + count = stack[--stack_ptr]; + long_tmp = stack[--stack_ptr]; + put_unaligned_le32(long_tmp, &charbuf[0]); + status = altera_set_dr_pre(&astate->js, count, 0, + charbuf); + break; + case OP_DPO: + /* + * DRPOST + * ...stack 0 is count + */ + if (altera_check_stack(stack_ptr, 1, &status)) { + count = stack[--stack_ptr]; + status = altera_set_dr_post(&astate->js, count, + 0, NULL); + } + break; + case OP_DPOL: + /* + * DRPOST with literal data + * ...stack 0 is count + * ...stack 1 is literal data + */ + if (!altera_check_stack(stack_ptr, 2, &status)) + break; + count = stack[--stack_ptr]; + long_tmp = stack[--stack_ptr]; + put_unaligned_le32(long_tmp, &charbuf[0]); + status = altera_set_dr_post(&astate->js, count, 0, + charbuf); + break; + case OP_IPR: + if (altera_check_stack(stack_ptr, 1, &status)) { + count = stack[--stack_ptr]; + status = altera_set_ir_pre(&astate->js, count, + 0, NULL); + } + break; + case OP_IPRL: + /* + * IRPRE with literal data + * ...stack 0 is count + * ...stack 1 is literal data + */ + if (altera_check_stack(stack_ptr, 2, &status)) { + count = stack[--stack_ptr]; + long_tmp = stack[--stack_ptr]; + put_unaligned_le32(long_tmp, &charbuf[0]); + status = altera_set_ir_pre(&astate->js, count, + 0, charbuf); + } + break; + case OP_IPO: + /* + * IRPOST + * ...stack 0 is count + */ + if (altera_check_stack(stack_ptr, 1, &status)) { + count = stack[--stack_ptr]; + status = altera_set_ir_post(&astate->js, count, + 0, NULL); + } + break; + case OP_IPOL: + /* + * IRPOST with literal data + * ...stack 0 is count + * ...stack 1 is literal data + */ + if (!altera_check_stack(stack_ptr, 2, &status)) + break; + count = stack[--stack_ptr]; + long_tmp = stack[--stack_ptr]; + put_unaligned_le32(long_tmp, &charbuf[0]); + status = altera_set_ir_post(&astate->js, count, 0, + charbuf); + break; + case OP_PCHR: + if (altera_check_stack(stack_ptr, 1, &status)) { + u8 ch; + count = strlen(msg_buff); + ch = (char) stack[--stack_ptr]; + if ((ch < 1) || (ch > 127)) { + /* + * character code out of range + * instead of flagging an error, + * force the value to 127 + */ + ch = 127; + } + msg_buff[count] = ch; + msg_buff[count + 1] = '\0'; + } + break; + case OP_EXIT: + if (altera_check_stack(stack_ptr, 1, &status)) + *exit_code = stack[--stack_ptr]; + + done = 1; + break; + case OP_EQU: + if (!altera_check_stack(stack_ptr, 2, &status)) + break; + --stack_ptr; + stack[stack_ptr - 1] = + (stack[stack_ptr - 1] == stack[stack_ptr]) ? + 1L : 0L; + break; + case OP_POPT: + if (altera_check_stack(stack_ptr, 1, &status)) + --stack_ptr; + + break; + case OP_ABS: + if (!altera_check_stack(stack_ptr, 1, &status)) + break; + if (stack[stack_ptr - 1] < 0) + stack[stack_ptr - 1] = 0 - stack[stack_ptr - 1]; + + break; + case OP_BCH0: + /* + * Batch operation 0 + * SWP + * SWPN 7 + * SWP + * SWPN 6 + * DUPN 8 + * SWPN 2 + * SWP + * DUPN 6 + * DUPN 6 + */ + + /* SWP */ + if (altera_check_stack(stack_ptr, 2, &status)) { + long_tmp = stack[stack_ptr - 2]; + stack[stack_ptr - 2] = stack[stack_ptr - 1]; + stack[stack_ptr - 1] = long_tmp; + } + + /* SWPN 7 */ + index = 7 + 1; + if (altera_check_stack(stack_ptr, index, &status)) { + long_tmp = stack[stack_ptr - index]; + stack[stack_ptr - index] = stack[stack_ptr - 1]; + stack[stack_ptr - 1] = long_tmp; + } + + /* SWP */ + if (altera_check_stack(stack_ptr, 2, &status)) { + long_tmp = stack[stack_ptr - 2]; + stack[stack_ptr - 2] = stack[stack_ptr - 1]; + stack[stack_ptr - 1] = long_tmp; + } + + /* SWPN 6 */ + index = 6 + 1; + if (altera_check_stack(stack_ptr, index, &status)) { + long_tmp = stack[stack_ptr - index]; + stack[stack_ptr - index] = stack[stack_ptr - 1]; + stack[stack_ptr - 1] = long_tmp; + } + + /* DUPN 8 */ + index = 8 + 1; + if (altera_check_stack(stack_ptr, index, &status)) { + stack[stack_ptr] = stack[stack_ptr - index]; + ++stack_ptr; + } + + /* SWPN 2 */ + index = 2 + 1; + if (altera_check_stack(stack_ptr, index, &status)) { + long_tmp = stack[stack_ptr - index]; + stack[stack_ptr - index] = stack[stack_ptr - 1]; + stack[stack_ptr - 1] = long_tmp; + } + + /* SWP */ + if (altera_check_stack(stack_ptr, 2, &status)) { + long_tmp = stack[stack_ptr - 2]; + stack[stack_ptr - 2] = stack[stack_ptr - 1]; + stack[stack_ptr - 1] = long_tmp; + } + + /* DUPN 6 */ + index = 6 + 1; + if (altera_check_stack(stack_ptr, index, &status)) { + stack[stack_ptr] = stack[stack_ptr - index]; + ++stack_ptr; + } + + /* DUPN 6 */ + index = 6 + 1; + if (altera_check_stack(stack_ptr, index, &status)) { + stack[stack_ptr] = stack[stack_ptr - index]; + ++stack_ptr; + } + break; + case OP_PSH0: + stack[stack_ptr++] = 0; + break; + case OP_PSHL: + stack[stack_ptr++] = (s32) args[0]; + break; + case OP_PSHV: + stack[stack_ptr++] = vars[args[0]]; + break; + case OP_JMP: + pc = args[0] + code_sect; + if ((pc < code_sect) || (pc >= debug_sect)) + status = -ERANGE; + break; + case OP_CALL: + stack[stack_ptr++] = pc; + pc = args[0] + code_sect; + if ((pc < code_sect) || (pc >= debug_sect)) + status = -ERANGE; + break; + case OP_NEXT: + /* + * Process FOR / NEXT loop + * ...argument 0 is variable ID + * ...stack 0 is step value + * ...stack 1 is end value + * ...stack 2 is top address + */ + if (altera_check_stack(stack_ptr, 3, &status)) { + s32 step = stack[stack_ptr - 1]; + s32 end = stack[stack_ptr - 2]; + s32 top = stack[stack_ptr - 3]; + s32 iterator = vars[args[0]]; + int break_out = 0; + + if (step < 0) { + if (iterator <= end) + break_out = 1; + } else if (iterator >= end) + break_out = 1; + + if (break_out) { + stack_ptr -= 3; + } else { + vars[args[0]] = iterator + step; + pc = top + code_sect; + if ((pc < code_sect) || + (pc >= debug_sect)) + status = -ERANGE; + } + } + break; + case OP_PSTR: + /* + * PRINT add string + * ...argument 0 is string ID + */ + count = strlen(msg_buff); + strlcpy(&msg_buff[count], + &p[str_table + args[0]], + ALTERA_MESSAGE_LENGTH - count); + break; + case OP_SINT: + /* + * STATE intermediate state + * ...argument 0 is state code + */ + status = altera_goto_jstate(astate, args[0]); + break; + case OP_ST: + /* + * STATE final state + * ...argument 0 is state code + */ + status = altera_goto_jstate(astate, args[0]); + break; + case OP_ISTP: + /* + * IRSTOP state + * ...argument 0 is state code + */ + status = altera_set_irstop(&astate->js, args[0]); + break; + case OP_DSTP: + /* + * DRSTOP state + * ...argument 0 is state code + */ + status = altera_set_drstop(&astate->js, args[0]); + break; + + case OP_SWPN: + /* + * Exchange top with Nth stack value + * ...argument 0 is 0-based stack entry + * to swap with top element + */ + index = (args[0]) + 1; + if (altera_check_stack(stack_ptr, index, &status)) { + long_tmp = stack[stack_ptr - index]; + stack[stack_ptr - index] = stack[stack_ptr - 1]; + stack[stack_ptr - 1] = long_tmp; + } + break; + case OP_DUPN: + /* + * Duplicate Nth stack value + * ...argument 0 is 0-based stack entry to duplicate + */ + index = (args[0]) + 1; + if (altera_check_stack(stack_ptr, index, &status)) { + stack[stack_ptr] = stack[stack_ptr - index]; + ++stack_ptr; + } + break; + case OP_POPV: + /* + * Pop stack into scalar variable + * ...argument 0 is variable ID + * ...stack 0 is value + */ + if (altera_check_stack(stack_ptr, 1, &status)) + vars[args[0]] = stack[--stack_ptr]; + + break; + case OP_POPE: + /* + * Pop stack into integer array element + * ...argument 0 is variable ID + * ...stack 0 is array index + * ...stack 1 is value + */ + if (!altera_check_stack(stack_ptr, 2, &status)) + break; + variable_id = args[0]; + + /* + * If variable is read-only, + * convert to writable array + */ + if ((version > 0) && + ((attrs[variable_id] & 0x9c) == 0x1c)) { + /* Allocate a writable buffer for this array */ + count = var_size[variable_id]; + long_tmp = vars[variable_id]; + longptr_tmp = kcalloc(count, sizeof(long), + GFP_KERNEL); + vars[variable_id] = (long)longptr_tmp; + + if (vars[variable_id] == 0) { + status = -ENOMEM; + break; + } + + /* copy previous contents into buffer */ + for (i = 0; i < count; ++i) { + longptr_tmp[i] = + get_unaligned_be32(&p[long_tmp]); + long_tmp += sizeof(long); + } + + /* + * set bit 7 - buffer was + * dynamically allocated + */ + attrs[variable_id] |= 0x80; + + /* clear bit 2 - variable is writable */ + attrs[variable_id] &= ~0x04; + attrs[variable_id] |= 0x01; + + } + + /* check that variable is a writable integer array */ + if ((attrs[variable_id] & 0x1c) != 0x18) + status = -ERANGE; + else { + longptr_tmp = (long *)vars[variable_id]; + + /* pop the array index */ + index = stack[--stack_ptr]; + + /* pop the value and store it into the array */ + longptr_tmp[index] = stack[--stack_ptr]; + } + + break; + case OP_POPA: + /* + * Pop stack into Boolean array + * ...argument 0 is variable ID + * ...stack 0 is count + * ...stack 1 is array index + * ...stack 2 is value + */ + if (!altera_check_stack(stack_ptr, 3, &status)) + break; + variable_id = args[0]; + + /* + * If variable is read-only, + * convert to writable array + */ + if ((version > 0) && + ((attrs[variable_id] & 0x9c) == 0x0c)) { + /* Allocate a writable buffer for this array */ + long_tmp = + (var_size[variable_id] + 7L) >> 3L; + charptr_tmp2 = (u8 *)vars[variable_id]; + charptr_tmp = + kzalloc(long_tmp, GFP_KERNEL); + vars[variable_id] = (long)charptr_tmp; + + if (vars[variable_id] == 0) { + status = -ENOMEM; + break; + } + + /* zero the buffer */ + for (long_idx = 0L; + long_idx < long_tmp; + ++long_idx) { + charptr_tmp[long_idx] = 0; + } + + /* copy previous contents into buffer */ + for (long_idx = 0L; + long_idx < var_size[variable_id]; + ++long_idx) { + long_idx2 = long_idx; + + if (charptr_tmp2[long_idx2 >> 3] & + (1 << (long_idx2 & 7))) { + charptr_tmp[long_idx >> 3] |= + (1 << (long_idx & 7)); + } + } + + /* + * set bit 7 - buffer was + * dynamically allocated + */ + attrs[variable_id] |= 0x80; + + /* clear bit 2 - variable is writable */ + attrs[variable_id] &= ~0x04; + attrs[variable_id] |= 0x01; + + } + + /* + * check that variable is + * a writable Boolean array + */ + if ((attrs[variable_id] & 0x1c) != 0x08) { + status = -ERANGE; + break; + } + + charptr_tmp = (u8 *)vars[variable_id]; + + /* pop the count (number of bits to copy) */ + long_count = stack[--stack_ptr]; + + /* pop the array index */ + long_idx = stack[--stack_ptr]; + + reverse = 0; + + if (version > 0) { + /* + * stack 0 = array right index + * stack 1 = array left index + */ + + if (long_idx > long_count) { + reverse = 1; + long_tmp = long_count; + long_count = 1 + long_idx - + long_count; + long_idx = long_tmp; + + /* reverse POPA is not supported */ + status = -ERANGE; + break; + } else + long_count = 1 + long_count - + long_idx; + + } + + /* pop the data */ + long_tmp = stack[--stack_ptr]; + + if (long_count < 1) { + status = -ERANGE; + break; + } + + for (i = 0; i < long_count; ++i) { + if (long_tmp & (1L << (s32) i)) + charptr_tmp[long_idx >> 3L] |= + (1L << (long_idx & 7L)); + else + charptr_tmp[long_idx >> 3L] &= + ~(1L << (long_idx & 7L)); + + ++long_idx; + } + + break; + case OP_JMPZ: + /* + * Pop stack and branch if zero + * ...argument 0 is address + * ...stack 0 is condition value + */ + if (altera_check_stack(stack_ptr, 1, &status)) { + if (stack[--stack_ptr] == 0) { + pc = args[0] + code_sect; + if ((pc < code_sect) || + (pc >= debug_sect)) + status = -ERANGE; + } + } + break; + case OP_DS: + case OP_IS: + /* + * DRSCAN + * IRSCAN + * ...argument 0 is scan data variable ID + * ...stack 0 is array index + * ...stack 1 is count + */ + if (!altera_check_stack(stack_ptr, 2, &status)) + break; + long_idx = stack[--stack_ptr]; + long_count = stack[--stack_ptr]; + reverse = 0; + if (version > 0) { + /* + * stack 0 = array right index + * stack 1 = array left index + * stack 2 = count + */ + long_tmp = long_count; + long_count = stack[--stack_ptr]; + + if (long_idx > long_tmp) { + reverse = 1; + long_idx = long_tmp; + } + } + + charptr_tmp = (u8 *)vars[args[0]]; + + if (reverse) { + /* + * allocate a buffer + * and reverse the data order + */ + charptr_tmp2 = charptr_tmp; + charptr_tmp = kzalloc((long_count >> 3) + 1, + GFP_KERNEL); + if (charptr_tmp == NULL) { + status = -ENOMEM; + break; + } + + long_tmp = long_idx + long_count - 1; + long_idx2 = 0; + while (long_idx2 < long_count) { + if (charptr_tmp2[long_tmp >> 3] & + (1 << (long_tmp & 7))) + charptr_tmp[long_idx2 >> 3] |= + (1 << (long_idx2 & 7)); + else + charptr_tmp[long_idx2 >> 3] &= + ~(1 << (long_idx2 & 7)); + + --long_tmp; + ++long_idx2; + } + } + + if (opcode == 0x51) /* DS */ + status = altera_drscan(astate, long_count, + charptr_tmp, long_idx); + else /* IS */ + status = altera_irscan(astate, long_count, + charptr_tmp, long_idx); + + if (reverse) + kfree(charptr_tmp); + + break; + case OP_DPRA: + /* + * DRPRE with array data + * ...argument 0 is variable ID + * ...stack 0 is array index + * ...stack 1 is count + */ + if (!altera_check_stack(stack_ptr, 2, &status)) + break; + index = stack[--stack_ptr]; + count = stack[--stack_ptr]; + + if (version > 0) + /* + * stack 0 = array right index + * stack 1 = array left index + */ + count = 1 + count - index; + + charptr_tmp = (u8 *)vars[args[0]]; + status = altera_set_dr_pre(&astate->js, count, index, + charptr_tmp); + break; + case OP_DPOA: + /* + * DRPOST with array data + * ...argument 0 is variable ID + * ...stack 0 is array index + * ...stack 1 is count + */ + if (!altera_check_stack(stack_ptr, 2, &status)) + break; + index = stack[--stack_ptr]; + count = stack[--stack_ptr]; + + if (version > 0) + /* + * stack 0 = array right index + * stack 1 = array left index + */ + count = 1 + count - index; + + charptr_tmp = (u8 *)vars[args[0]]; + status = altera_set_dr_post(&astate->js, count, index, + charptr_tmp); + break; + case OP_IPRA: + /* + * IRPRE with array data + * ...argument 0 is variable ID + * ...stack 0 is array index + * ...stack 1 is count + */ + if (!altera_check_stack(stack_ptr, 2, &status)) + break; + index = stack[--stack_ptr]; + count = stack[--stack_ptr]; + + if (version > 0) + /* + * stack 0 = array right index + * stack 1 = array left index + */ + count = 1 + count - index; + + charptr_tmp = (u8 *)vars[args[0]]; + status = altera_set_ir_pre(&astate->js, count, index, + charptr_tmp); + + break; + case OP_IPOA: + /* + * IRPOST with array data + * ...argument 0 is variable ID + * ...stack 0 is array index + * ...stack 1 is count + */ + if (!altera_check_stack(stack_ptr, 2, &status)) + break; + index = stack[--stack_ptr]; + count = stack[--stack_ptr]; + + if (version > 0) + /* + * stack 0 = array right index + * stack 1 = array left index + */ + count = 1 + count - index; + + charptr_tmp = (u8 *)vars[args[0]]; + status = altera_set_ir_post(&astate->js, count, index, + charptr_tmp); + + break; + case OP_EXPT: + /* + * EXPORT + * ...argument 0 is string ID + * ...stack 0 is integer expression + */ + if (altera_check_stack(stack_ptr, 1, &status)) { + name = &p[str_table + args[0]]; + long_tmp = stack[--stack_ptr]; + altera_export_int(name, long_tmp); + } + break; + case OP_PSHE: + /* + * Push integer array element + * ...argument 0 is variable ID + * ...stack 0 is array index + */ + if (!altera_check_stack(stack_ptr, 1, &status)) + break; + variable_id = args[0]; + index = stack[stack_ptr - 1]; + + /* check variable type */ + if ((attrs[variable_id] & 0x1f) == 0x19) { + /* writable integer array */ + longptr_tmp = (long *)vars[variable_id]; + stack[stack_ptr - 1] = longptr_tmp[index]; + } else if ((attrs[variable_id] & 0x1f) == 0x1c) { + /* read-only integer array */ + long_tmp = vars[variable_id] + + (index * sizeof(long)); + stack[stack_ptr - 1] = + get_unaligned_be32(&p[long_tmp]); + } else + status = -ERANGE; + + break; + case OP_PSHA: + /* + * Push Boolean array + * ...argument 0 is variable ID + * ...stack 0 is count + * ...stack 1 is array index + */ + if (!altera_check_stack(stack_ptr, 2, &status)) + break; + variable_id = args[0]; + + /* check that variable is a Boolean array */ + if ((attrs[variable_id] & 0x18) != 0x08) { + status = -ERANGE; + break; + } + + charptr_tmp = (u8 *)vars[variable_id]; + + /* pop the count (number of bits to copy) */ + count = stack[--stack_ptr]; + + /* pop the array index */ + index = stack[stack_ptr - 1]; + + if (version > 0) + /* + * stack 0 = array right index + * stack 1 = array left index + */ + count = 1 + count - index; + + if ((count < 1) || (count > 32)) { + status = -ERANGE; + break; + } + + long_tmp = 0L; + + for (i = 0; i < count; ++i) + if (charptr_tmp[(i + index) >> 3] & + (1 << ((i + index) & 7))) + long_tmp |= (1L << i); + + stack[stack_ptr - 1] = long_tmp; + + break; + case OP_DYNA: + /* + * Dynamically change size of array + * ...argument 0 is variable ID + * ...stack 0 is new size + */ + if (!altera_check_stack(stack_ptr, 1, &status)) + break; + variable_id = args[0]; + long_tmp = stack[--stack_ptr]; + + if (long_tmp > var_size[variable_id]) { + var_size[variable_id] = long_tmp; + + if (attrs[variable_id] & 0x10) + /* allocate integer array */ + long_tmp *= sizeof(long); + else + /* allocate Boolean array */ + long_tmp = (long_tmp + 7) >> 3; + + /* + * If the buffer was previously allocated, + * free it + */ + if (attrs[variable_id] & 0x80) { + kfree((void *)vars[variable_id]); + vars[variable_id] = 0; + } + + /* + * Allocate a new buffer + * of the requested size + */ + vars[variable_id] = (long) + kzalloc(long_tmp, GFP_KERNEL); + + if (vars[variable_id] == 0) { + status = -ENOMEM; + break; + } + + /* + * Set the attribute bit to indicate that + * this buffer was dynamically allocated and + * should be freed later + */ + attrs[variable_id] |= 0x80; + + /* zero out memory */ + count = ((var_size[variable_id] + 7L) / + 8L); + charptr_tmp = (u8 *)(vars[variable_id]); + for (index = 0; index < count; ++index) + charptr_tmp[index] = 0; + + } + + break; + case OP_EXPV: + /* + * Export Boolean array + * ...argument 0 is string ID + * ...stack 0 is variable ID + * ...stack 1 is array right index + * ...stack 2 is array left index + */ + if (!altera_check_stack(stack_ptr, 3, &status)) + break; + if (version == 0) { + /* EXPV is not supported in JBC 1.0 */ + bad_opcode = 1; + break; + } + name = &p[str_table + args[0]]; + variable_id = stack[--stack_ptr]; + long_idx = stack[--stack_ptr];/* right indx */ + long_idx2 = stack[--stack_ptr];/* left indx */ + + if (long_idx > long_idx2) { + /* reverse indices not supported */ + status = -ERANGE; + break; + } + + long_count = 1 + long_idx2 - long_idx; + + charptr_tmp = (u8 *)vars[variable_id]; + charptr_tmp2 = NULL; + + if ((long_idx & 7L) != 0) { + s32 k = long_idx; + charptr_tmp2 = + kzalloc(((long_count + 7L) / 8L), + GFP_KERNEL); + if (charptr_tmp2 == NULL) { + status = -ENOMEM; + break; + } + + for (i = 0; i < long_count; ++i) { + if (charptr_tmp[k >> 3] & + (1 << (k & 7))) + charptr_tmp2[i >> 3] |= + (1 << (i & 7)); + else + charptr_tmp2[i >> 3] &= + ~(1 << (i & 7)); + + ++k; + } + charptr_tmp = charptr_tmp2; + + } else if (long_idx != 0) + charptr_tmp = &charptr_tmp[long_idx >> 3]; + + altera_export_bool_array(name, charptr_tmp, + long_count); + + /* free allocated buffer */ + if ((long_idx & 7L) != 0) + kfree(charptr_tmp2); + + break; + case OP_COPY: { + /* + * Array copy + * ...argument 0 is dest ID + * ...argument 1 is source ID + * ...stack 0 is count + * ...stack 1 is dest index + * ...stack 2 is source index + */ + s32 copy_count; + s32 copy_index; + s32 copy_index2; + s32 destleft; + s32 src_count; + s32 dest_count; + int src_reverse = 0; + int dest_reverse = 0; + + if (!altera_check_stack(stack_ptr, 3, &status)) + break; + + copy_count = stack[--stack_ptr]; + copy_index = stack[--stack_ptr]; + copy_index2 = stack[--stack_ptr]; + reverse = 0; + + if (version > 0) { + /* + * stack 0 = source right index + * stack 1 = source left index + * stack 2 = destination right index + * stack 3 = destination left index + */ + destleft = stack[--stack_ptr]; + + if (copy_count > copy_index) { + src_reverse = 1; + reverse = 1; + src_count = 1 + copy_count - copy_index; + /* copy_index = source start index */ + } else { + src_count = 1 + copy_index - copy_count; + /* source start index */ + copy_index = copy_count; + } + + if (copy_index2 > destleft) { + dest_reverse = 1; + reverse = !reverse; + dest_count = 1 + copy_index2 - destleft; + /* destination start index */ + copy_index2 = destleft; + } else + dest_count = 1 + destleft - copy_index2; + + copy_count = (src_count < dest_count) ? + src_count : dest_count; + + if ((src_reverse || dest_reverse) && + (src_count != dest_count)) + /* + * If either the source or destination + * is reversed, we can't tolerate + * a length mismatch, because we + * "left justify" arrays when copying. + * This won't work correctly + * with reversed arrays. + */ + status = -ERANGE; + + } + + count = copy_count; + index = copy_index; + index2 = copy_index2; + + /* + * If destination is a read-only array, + * allocate a buffer and convert it to a writable array + */ + variable_id = args[1]; + if ((version > 0) && + ((attrs[variable_id] & 0x9c) == 0x0c)) { + /* Allocate a writable buffer for this array */ + long_tmp = + (var_size[variable_id] + 7L) >> 3L; + charptr_tmp2 = (u8 *)vars[variable_id]; + charptr_tmp = + kzalloc(long_tmp, GFP_KERNEL); + vars[variable_id] = (long)charptr_tmp; + + if (vars[variable_id] == 0) { + status = -ENOMEM; + break; + } + + /* zero the buffer */ + for (long_idx = 0L; long_idx < long_tmp; + ++long_idx) + charptr_tmp[long_idx] = 0; + + /* copy previous contents into buffer */ + for (long_idx = 0L; + long_idx < var_size[variable_id]; + ++long_idx) { + long_idx2 = long_idx; + + if (charptr_tmp2[long_idx2 >> 3] & + (1 << (long_idx2 & 7))) + charptr_tmp[long_idx >> 3] |= + (1 << (long_idx & 7)); + + } + + /* + set bit 7 - buffer was dynamically allocated */ + attrs[variable_id] |= 0x80; + + /* clear bit 2 - variable is writable */ + attrs[variable_id] &= ~0x04; + attrs[variable_id] |= 0x01; + } + + charptr_tmp = (u8 *)vars[args[1]]; + charptr_tmp2 = (u8 *)vars[args[0]]; + + /* check if destination is a writable Boolean array */ + if ((attrs[args[1]] & 0x1c) != 0x08) { + status = -ERANGE; + break; + } + + if (count < 1) { + status = -ERANGE; + break; + } + + if (reverse) + index2 += (count - 1); + + for (i = 0; i < count; ++i) { + if (charptr_tmp2[index >> 3] & + (1 << (index & 7))) + charptr_tmp[index2 >> 3] |= + (1 << (index2 & 7)); + else + charptr_tmp[index2 >> 3] &= + ~(1 << (index2 & 7)); + + ++index; + if (reverse) + --index2; + else + ++index2; + } + + break; + } + case OP_DSC: + case OP_ISC: { + /* + * DRSCAN with capture + * IRSCAN with capture + * ...argument 0 is scan data variable ID + * ...argument 1 is capture variable ID + * ...stack 0 is capture index + * ...stack 1 is scan data index + * ...stack 2 is count + */ + s32 scan_right, scan_left; + s32 capture_count = 0; + s32 scan_count = 0; + s32 capture_index; + s32 scan_index; + + if (!altera_check_stack(stack_ptr, 3, &status)) + break; + + capture_index = stack[--stack_ptr]; + scan_index = stack[--stack_ptr]; + + if (version > 0) { + /* + * stack 0 = capture right index + * stack 1 = capture left index + * stack 2 = scan right index + * stack 3 = scan left index + * stack 4 = count + */ + scan_right = stack[--stack_ptr]; + scan_left = stack[--stack_ptr]; + capture_count = 1 + scan_index - capture_index; + scan_count = 1 + scan_left - scan_right; + scan_index = scan_right; + } + + long_count = stack[--stack_ptr]; + /* + * If capture array is read-only, allocate a buffer + * and convert it to a writable array + */ + variable_id = args[1]; + if ((version > 0) && + ((attrs[variable_id] & 0x9c) == 0x0c)) { + /* Allocate a writable buffer for this array */ + long_tmp = + (var_size[variable_id] + 7L) >> 3L; + charptr_tmp2 = (u8 *)vars[variable_id]; + charptr_tmp = + kzalloc(long_tmp, GFP_KERNEL); + vars[variable_id] = (long)charptr_tmp; + + if (vars[variable_id] == 0) { + status = -ENOMEM; + break; + } + + /* zero the buffer */ + for (long_idx = 0L; long_idx < long_tmp; + ++long_idx) + charptr_tmp[long_idx] = 0; + + /* copy previous contents into buffer */ + for (long_idx = 0L; + long_idx < var_size[variable_id]; + ++long_idx) { + long_idx2 = long_idx; + + if (charptr_tmp2[long_idx2 >> 3] & + (1 << (long_idx2 & 7))) + charptr_tmp[long_idx >> 3] |= + (1 << (long_idx & 7)); + + } + + /* + * set bit 7 - buffer was + * dynamically allocated + */ + attrs[variable_id] |= 0x80; + + /* clear bit 2 - variable is writable */ + attrs[variable_id] &= ~0x04; + attrs[variable_id] |= 0x01; + + } + + charptr_tmp = (u8 *)vars[args[0]]; + charptr_tmp2 = (u8 *)vars[args[1]]; + + if ((version > 0) && + ((long_count > capture_count) || + (long_count > scan_count))) { + status = -ERANGE; + break; + } + + /* + * check that capture array + * is a writable Boolean array + */ + if ((attrs[args[1]] & 0x1c) != 0x08) { + status = -ERANGE; + break; + } + + if (status == 0) { + if (opcode == 0x82) /* DSC */ + status = altera_swap_dr(astate, + long_count, + charptr_tmp, + scan_index, + charptr_tmp2, + capture_index); + else /* ISC */ + status = altera_swap_ir(astate, + long_count, + charptr_tmp, + scan_index, + charptr_tmp2, + capture_index); + + } + + break; + } + case OP_WAIT: + /* + * WAIT + * ...argument 0 is wait state + * ...argument 1 is end state + * ...stack 0 is cycles + * ...stack 1 is microseconds + */ + if (!altera_check_stack(stack_ptr, 2, &status)) + break; + long_tmp = stack[--stack_ptr]; + + if (long_tmp != 0L) + status = altera_wait_cycles(astate, long_tmp, + args[0]); + + long_tmp = stack[--stack_ptr]; + + if ((status == 0) && (long_tmp != 0L)) + status = altera_wait_msecs(astate, + long_tmp, + args[0]); + + if ((status == 0) && (args[1] != args[0])) + status = altera_goto_jstate(astate, + args[1]); + + if (version > 0) { + --stack_ptr; /* throw away MAX cycles */ + --stack_ptr; /* throw away MAX microseconds */ + } + break; + case OP_CMPA: { + /* + * Array compare + * ...argument 0 is source 1 ID + * ...argument 1 is source 2 ID + * ...argument 2 is mask ID + * ...stack 0 is source 1 index + * ...stack 1 is source 2 index + * ...stack 2 is mask index + * ...stack 3 is count + */ + s32 a, b; + u8 *source1 = (u8 *)vars[args[0]]; + u8 *source2 = (u8 *)vars[args[1]]; + u8 *mask = (u8 *)vars[args[2]]; + u32 index1; + u32 index2; + u32 mask_index; + + if (!altera_check_stack(stack_ptr, 4, &status)) + break; + + index1 = stack[--stack_ptr]; + index2 = stack[--stack_ptr]; + mask_index = stack[--stack_ptr]; + long_count = stack[--stack_ptr]; + + if (version > 0) { + /* + * stack 0 = source 1 right index + * stack 1 = source 1 left index + * stack 2 = source 2 right index + * stack 3 = source 2 left index + * stack 4 = mask right index + * stack 5 = mask left index + */ + s32 mask_right = stack[--stack_ptr]; + s32 mask_left = stack[--stack_ptr]; + /* source 1 count */ + a = 1 + index2 - index1; + /* source 2 count */ + b = 1 + long_count - mask_index; + a = (a < b) ? a : b; + /* mask count */ + b = 1 + mask_left - mask_right; + a = (a < b) ? a : b; + /* source 2 start index */ + index2 = mask_index; + /* mask start index */ + mask_index = mask_right; + long_count = a; + } + + long_tmp = 1L; + + if (long_count < 1) + status = -ERANGE; + else { + count = long_count; + + for (i = 0; i < count; ++i) { + if (mask[mask_index >> 3] & + (1 << (mask_index & 7))) { + a = source1[index1 >> 3] & + (1 << (index1 & 7)) + ? 1 : 0; + b = source2[index2 >> 3] & + (1 << (index2 & 7)) + ? 1 : 0; + + if (a != b) /* failure */ + long_tmp = 0L; + } + ++index1; + ++index2; + ++mask_index; + } + } + + stack[stack_ptr++] = long_tmp; + + break; + } + default: + /* Unrecognized opcode -- ERROR! */ + bad_opcode = 1; + break; + } + + if (bad_opcode) + status = -ENOSYS; + + if ((stack_ptr < 0) || (stack_ptr >= ALTERA_STACK_SIZE)) + status = -EOVERFLOW; + + if (status != 0) { + done = 1; + *error_address = (s32)(opcode_address - code_sect); + } + } + + altera_free_buffers(astate); + + /* Free all dynamically allocated arrays */ + if ((attrs != NULL) && (vars != NULL)) + for (i = 0; i < sym_count; ++i) + if (attrs[i] & 0x80) + kfree((void *)vars[i]); + + kfree(vars); + kfree(var_size); + kfree(attrs); + kfree(proc_attributes); + + return status; +} + +static int altera_get_note(u8 *p, s32 program_size, s32 *offset, + char *key, char *value, int keylen, int vallen) +/* + * Gets key and value of NOTE fields in the JBC file. + * Can be called in two modes: if offset pointer is NULL, + * then the function searches for note fields which match + * the key string provided. If offset is not NULL, then + * the function finds the next note field of any key, + * starting at the offset specified by the offset pointer. + * Returns 0 for success, else appropriate error code + */ +{ + int status = -ENODATA; + u32 note_strings = 0L; + u32 note_table = 0L; + u32 note_count = 0L; + u32 first_word = 0L; + int version = 0; + int delta = 0; + char *key_ptr; + char *value_ptr; + int i; + + /* Read header information */ + if (program_size > 52L) { + first_word = get_unaligned_be32(&p[0]); + version = (first_word & 1L); + delta = version * 8; + + note_strings = get_unaligned_be32(&p[8 + delta]); + note_table = get_unaligned_be32(&p[12 + delta]); + note_count = get_unaligned_be32(&p[44 + (2 * delta)]); + } + + if ((first_word != 0x4A414D00L) && (first_word != 0x4A414D01L)) + return -EIO; + + if (note_count <= 0L) + return status; + + if (offset == NULL) { + /* + * We will search for the first note with a specific key, + * and return only the value + */ + for (i = 0; (i < note_count) && + (status != 0); ++i) { + key_ptr = &p[note_strings + + get_unaligned_be32( + &p[note_table + (8 * i)])]; + if (key && !strncasecmp(key, key_ptr, strlen(key_ptr))) { + status = 0; + + value_ptr = &p[note_strings + + get_unaligned_be32( + &p[note_table + (8 * i) + 4])]; + + if (value != NULL) + strlcpy(value, value_ptr, vallen); + + } + } + } else { + /* + * We will search for the next note, regardless of the key, + * and return both the value and the key + */ + + i = *offset; + + if ((i >= 0) && (i < note_count)) { + status = 0; + + if (key != NULL) + strlcpy(key, &p[note_strings + + get_unaligned_be32( + &p[note_table + (8 * i)])], + keylen); + + if (value != NULL) + strlcpy(value, &p[note_strings + + get_unaligned_be32( + &p[note_table + (8 * i) + 4])], + vallen); + + *offset = i + 1; + } + } + + return status; +} + +static int altera_check_crc(u8 *p, s32 program_size) +{ + int status = 0; + u16 local_expected = 0, + local_actual = 0, + shift_reg = 0xffff; + int bit, feedback; + u8 databyte; + u32 i; + u32 crc_section = 0L; + u32 first_word = 0L; + int version = 0; + int delta = 0; + + if (program_size > 52L) { + first_word = get_unaligned_be32(&p[0]); + version = (first_word & 1L); + delta = version * 8; + + crc_section = get_unaligned_be32(&p[32 + delta]); + } + + if ((first_word != 0x4A414D00L) && (first_word != 0x4A414D01L)) + status = -EIO; + + if (crc_section >= program_size) + status = -EIO; + + if (status == 0) { + local_expected = (u16)get_unaligned_be16(&p[crc_section]); + + for (i = 0; i < crc_section; ++i) { + databyte = p[i]; + for (bit = 0; bit < 8; bit++) { + feedback = (databyte ^ shift_reg) & 0x01; + shift_reg >>= 1; + if (feedback) + shift_reg ^= 0x8408; + + databyte >>= 1; + } + } + + local_actual = (u16)~shift_reg; + + if (local_expected != local_actual) + status = -EILSEQ; + + } + + if (debug || status) { + switch (status) { + case 0: + printk(KERN_INFO "%s: CRC matched: %04x\n", __func__, + local_actual); + break; + case -EILSEQ: + printk(KERN_ERR "%s: CRC mismatch: expected %04x, " + "actual %04x\n", __func__, local_expected, + local_actual); + break; + case -ENODATA: + printk(KERN_ERR "%s: expected CRC not found, " + "actual CRC = %04x\n", __func__, + local_actual); + break; + case -EIO: + printk(KERN_ERR "%s: error: format isn't " + "recognized.\n", __func__); + break; + default: + printk(KERN_ERR "%s: CRC function returned error " + "code %d\n", __func__, status); + break; + } + } + + return status; +} + +static int altera_get_file_info(u8 *p, + s32 program_size, + int *format_version, + int *action_count, + int *procedure_count) +{ + int status = -EIO; + u32 first_word = 0; + int version = 0; + + if (program_size <= 52L) + return status; + + first_word = get_unaligned_be32(&p[0]); + + if ((first_word == 0x4A414D00L) || (first_word == 0x4A414D01L)) { + status = 0; + + version = (first_word & 1L); + *format_version = version + 1; + + if (version > 0) { + *action_count = get_unaligned_be32(&p[48]); + *procedure_count = get_unaligned_be32(&p[52]); + } + } + + return status; +} + +static int altera_get_act_info(u8 *p, + s32 program_size, + int index, + char **name, + char **description, + struct altera_procinfo **proc_list) +{ + int status = -EIO; + struct altera_procinfo *procptr = NULL; + struct altera_procinfo *tmpptr = NULL; + u32 first_word = 0L; + u32 action_table = 0L; + u32 proc_table = 0L; + u32 str_table = 0L; + u32 note_strings = 0L; + u32 action_count = 0L; + u32 proc_count = 0L; + u32 act_name_id = 0L; + u32 act_desc_id = 0L; + u32 act_proc_id = 0L; + u32 act_proc_name = 0L; + u8 act_proc_attribute = 0; + + if (program_size <= 52L) + return status; + /* Read header information */ + first_word = get_unaligned_be32(&p[0]); + + if (first_word != 0x4A414D01L) + return status; + + action_table = get_unaligned_be32(&p[4]); + proc_table = get_unaligned_be32(&p[8]); + str_table = get_unaligned_be32(&p[12]); + note_strings = get_unaligned_be32(&p[16]); + action_count = get_unaligned_be32(&p[48]); + proc_count = get_unaligned_be32(&p[52]); + + if (index >= action_count) + return status; + + act_name_id = get_unaligned_be32(&p[action_table + (12 * index)]); + act_desc_id = get_unaligned_be32(&p[action_table + (12 * index) + 4]); + act_proc_id = get_unaligned_be32(&p[action_table + (12 * index) + 8]); + + *name = &p[str_table + act_name_id]; + + if (act_desc_id < (note_strings - str_table)) + *description = &p[str_table + act_desc_id]; + + do { + act_proc_name = get_unaligned_be32( + &p[proc_table + (13 * act_proc_id)]); + act_proc_attribute = + (p[proc_table + (13 * act_proc_id) + 8] & 0x03); + + procptr = + kzalloc(sizeof(struct altera_procinfo), + GFP_KERNEL); + + if (procptr == NULL) + status = -ENOMEM; + else { + procptr->name = &p[str_table + act_proc_name]; + procptr->attrs = act_proc_attribute; + procptr->next = NULL; + + /* add record to end of linked list */ + if (*proc_list == NULL) + *proc_list = procptr; + else { + tmpptr = *proc_list; + while (tmpptr->next != NULL) + tmpptr = tmpptr->next; + tmpptr->next = procptr; + } + } + + act_proc_id = get_unaligned_be32( + &p[proc_table + (13 * act_proc_id) + 4]); + } while ((act_proc_id != 0) && (act_proc_id < proc_count)); + + return status; +} + +int altera_init(struct altera_config *config, const struct firmware *fw) +{ + struct altera_state *astate = NULL; + struct altera_procinfo *proc_list = NULL; + struct altera_procinfo *procptr = NULL; + char *key = NULL; + char *value = NULL; + char *action_name = NULL; + char *description = NULL; + int exec_result = 0; + int exit_code = 0; + int format_version = 0; + int action_count = 0; + int procedure_count = 0; + int index = 0; + s32 offset = 0L; + s32 error_address = 0L; + int retval = 0; + + key = kzalloc(33, GFP_KERNEL); + if (!key) { + retval = -ENOMEM; + goto out; + } + value = kzalloc(257, GFP_KERNEL); + if (!value) { + retval = -ENOMEM; + goto free_key; + } + astate = kzalloc(sizeof(struct altera_state), GFP_KERNEL); + if (!astate) { + retval = -ENOMEM; + goto free_value; + } + + astate->config = config; + if (!astate->config->jtag_io) { + dprintk("%s: using byteblaster!\n", __func__); + astate->config->jtag_io = netup_jtag_io_lpt; + } + + altera_check_crc((u8 *)fw->data, fw->size); + + if (debug) { + altera_get_file_info((u8 *)fw->data, fw->size, &format_version, + &action_count, &procedure_count); + printk(KERN_INFO "%s: File format is %s ByteCode format\n", + __func__, (format_version == 2) ? "Jam STAPL" : + "pre-standardized Jam 1.1"); + while (altera_get_note((u8 *)fw->data, fw->size, + &offset, key, value, 32, 256) == 0) + printk(KERN_INFO "%s: NOTE \"%s\" = \"%s\"\n", + __func__, key, value); + } + + if (debug && (format_version == 2) && (action_count > 0)) { + printk(KERN_INFO "%s: Actions available:\n", __func__); + for (index = 0; index < action_count; ++index) { + altera_get_act_info((u8 *)fw->data, fw->size, + index, &action_name, + &description, + &proc_list); + + if (description == NULL) + printk(KERN_INFO "%s: %s\n", + __func__, + action_name); + else + printk(KERN_INFO "%s: %s \"%s\"\n", + __func__, + action_name, + description); + + procptr = proc_list; + while (procptr != NULL) { + if (procptr->attrs != 0) + printk(KERN_INFO "%s: %s (%s)\n", + __func__, + procptr->name, + (procptr->attrs == 1) ? + "optional" : "recommended"); + + proc_list = procptr->next; + kfree(procptr); + procptr = proc_list; + } + } + + printk(KERN_INFO "\n"); + } + + exec_result = altera_execute(astate, (u8 *)fw->data, fw->size, + &error_address, &exit_code, &format_version); + + if (exit_code) + exec_result = -EREMOTEIO; + + if ((format_version == 2) && (exec_result == -EINVAL)) { + if (astate->config->action == NULL) + printk(KERN_ERR "%s: error: no action specified for " + "Jam STAPL file.\nprogram terminated.\n", + __func__); + else + printk(KERN_ERR "%s: error: action \"%s\"" + " is not supported " + "for this Jam STAPL file.\n" + "Program terminated.\n", __func__, + astate->config->action); + + } else if (exec_result) + printk(KERN_ERR "%s: error %d\n", __func__, exec_result); + + kfree(astate); +free_value: + kfree(value); +free_key: + kfree(key); +out: + return retval; +} +EXPORT_SYMBOL(altera_init); diff --git a/drivers/misc/apds9802als.c b/drivers/misc/apds9802als.c new file mode 100644 index 000000000..6fff44b95 --- /dev/null +++ b/drivers/misc/apds9802als.c @@ -0,0 +1,310 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * apds9802als.c - apds9802 ALS Driver + * + * Copyright (C) 2009 Intel Corp + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/i2c.h> +#include <linux/err.h> +#include <linux/delay.h> +#include <linux/mutex.h> +#include <linux/sysfs.h> +#include <linux/pm_runtime.h> + +#define ALS_MIN_RANGE_VAL 1 +#define ALS_MAX_RANGE_VAL 2 +#define POWER_STA_ENABLE 1 +#define POWER_STA_DISABLE 0 + +#define DRIVER_NAME "apds9802als" + +struct als_data { + struct mutex mutex; +}; + +static ssize_t als_sensing_range_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + int val; + + val = i2c_smbus_read_byte_data(client, 0x81); + if (val < 0) + return val; + if (val & 1) + return sprintf(buf, "4095\n"); + else + return sprintf(buf, "65535\n"); +} + +static int als_wait_for_data_ready(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + int ret; + int retry = 10; + + do { + msleep(30); + ret = i2c_smbus_read_byte_data(client, 0x86); + } while (!(ret & 0x80) && retry--); + + if (retry < 0) { + dev_warn(dev, "timeout waiting for data ready\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static ssize_t als_lux0_input_data_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct als_data *data = i2c_get_clientdata(client); + int ret_val; + int temp; + + /* Protect against parallel reads */ + pm_runtime_get_sync(dev); + mutex_lock(&data->mutex); + + /* clear EOC interrupt status */ + i2c_smbus_write_byte(client, 0x40); + /* start measurement */ + temp = i2c_smbus_read_byte_data(client, 0x81); + i2c_smbus_write_byte_data(client, 0x81, temp | 0x08); + + ret_val = als_wait_for_data_ready(dev); + if (ret_val < 0) + goto failed; + + temp = i2c_smbus_read_byte_data(client, 0x8C); /* LSB data */ + if (temp < 0) { + ret_val = temp; + goto failed; + } + ret_val = i2c_smbus_read_byte_data(client, 0x8D); /* MSB data */ + if (ret_val < 0) + goto failed; + + mutex_unlock(&data->mutex); + pm_runtime_put_sync(dev); + + temp = (ret_val << 8) | temp; + return sprintf(buf, "%d\n", temp); +failed: + mutex_unlock(&data->mutex); + pm_runtime_put_sync(dev); + return ret_val; +} + +static ssize_t als_sensing_range_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + struct als_data *data = i2c_get_clientdata(client); + int ret_val; + unsigned long val; + + ret_val = kstrtoul(buf, 10, &val); + if (ret_val) + return ret_val; + + if (val < 4096) + val = 1; + else if (val < 65536) + val = 2; + else + return -ERANGE; + + pm_runtime_get_sync(dev); + + /* Make sure nobody else reads/modifies/writes 0x81 while we + are active */ + mutex_lock(&data->mutex); + + ret_val = i2c_smbus_read_byte_data(client, 0x81); + if (ret_val < 0) + goto fail; + + /* Reset the bits before setting them */ + ret_val = ret_val & 0xFA; + + if (val == 1) /* Setting detection range up to 4k LUX */ + ret_val = (ret_val | 0x01); + else /* Setting detection range up to 64k LUX*/ + ret_val = (ret_val | 0x00); + + ret_val = i2c_smbus_write_byte_data(client, 0x81, ret_val); + + if (ret_val >= 0) { + /* All OK */ + mutex_unlock(&data->mutex); + pm_runtime_put_sync(dev); + return count; + } +fail: + mutex_unlock(&data->mutex); + pm_runtime_put_sync(dev); + return ret_val; +} + +static int als_set_power_state(struct i2c_client *client, bool on_off) +{ + int ret_val; + struct als_data *data = i2c_get_clientdata(client); + + mutex_lock(&data->mutex); + ret_val = i2c_smbus_read_byte_data(client, 0x80); + if (ret_val < 0) + goto fail; + if (on_off) + ret_val = ret_val | 0x01; + else + ret_val = ret_val & 0xFE; + ret_val = i2c_smbus_write_byte_data(client, 0x80, ret_val); +fail: + mutex_unlock(&data->mutex); + return ret_val; +} + +static DEVICE_ATTR(lux0_sensor_range, S_IRUGO | S_IWUSR, + als_sensing_range_show, als_sensing_range_store); +static DEVICE_ATTR(lux0_input, S_IRUGO, als_lux0_input_data_show, NULL); + +static struct attribute *mid_att_als[] = { + &dev_attr_lux0_sensor_range.attr, + &dev_attr_lux0_input.attr, + NULL +}; + +static const struct attribute_group m_als_gr = { + .name = "apds9802als", + .attrs = mid_att_als +}; + +static int als_set_default_config(struct i2c_client *client) +{ + int ret_val; + /* Write the command and then switch on */ + ret_val = i2c_smbus_write_byte_data(client, 0x80, 0x01); + if (ret_val < 0) { + dev_err(&client->dev, "failed default switch on write\n"); + return ret_val; + } + /* detection range: 1~64K Lux, maunal measurement */ + ret_val = i2c_smbus_write_byte_data(client, 0x81, 0x08); + if (ret_val < 0) + dev_err(&client->dev, "failed default LUX on write\n"); + + /* We always get 0 for the 1st measurement after system power on, + * so make sure it is finished before user asks for data. + */ + als_wait_for_data_ready(&client->dev); + + return ret_val; +} + +static int apds9802als_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int res; + struct als_data *data; + + data = kzalloc(sizeof(struct als_data), GFP_KERNEL); + if (data == NULL) { + dev_err(&client->dev, "Memory allocation failed\n"); + return -ENOMEM; + } + i2c_set_clientdata(client, data); + res = sysfs_create_group(&client->dev.kobj, &m_als_gr); + if (res) { + dev_err(&client->dev, "device create file failed\n"); + goto als_error1; + } + dev_info(&client->dev, "ALS chip found\n"); + als_set_default_config(client); + mutex_init(&data->mutex); + + pm_runtime_set_active(&client->dev); + pm_runtime_enable(&client->dev); + + return res; +als_error1: + kfree(data); + return res; +} + +static int apds9802als_remove(struct i2c_client *client) +{ + struct als_data *data = i2c_get_clientdata(client); + + pm_runtime_get_sync(&client->dev); + + als_set_power_state(client, false); + sysfs_remove_group(&client->dev.kobj, &m_als_gr); + + pm_runtime_disable(&client->dev); + pm_runtime_set_suspended(&client->dev); + pm_runtime_put_noidle(&client->dev); + + kfree(data); + return 0; +} + +#ifdef CONFIG_PM + +static int apds9802als_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + + als_set_power_state(client, false); + return 0; +} + +static int apds9802als_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + + als_set_power_state(client, true); + return 0; +} + +static UNIVERSAL_DEV_PM_OPS(apds9802als_pm_ops, apds9802als_suspend, + apds9802als_resume, NULL); + +#define APDS9802ALS_PM_OPS (&apds9802als_pm_ops) + +#else /* CONFIG_PM */ +#define APDS9802ALS_PM_OPS NULL +#endif /* CONFIG_PM */ + +static const struct i2c_device_id apds9802als_id[] = { + { DRIVER_NAME, 0 }, + { } +}; + +MODULE_DEVICE_TABLE(i2c, apds9802als_id); + +static struct i2c_driver apds9802als_driver = { + .driver = { + .name = DRIVER_NAME, + .pm = APDS9802ALS_PM_OPS, + }, + .probe = apds9802als_probe, + .remove = apds9802als_remove, + .id_table = apds9802als_id, +}; + +module_i2c_driver(apds9802als_driver); + +MODULE_AUTHOR("Anantha Narayanan <Anantha.Narayanan@intel.com"); +MODULE_DESCRIPTION("Avago apds9802als ALS Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/apds990x.c b/drivers/misc/apds990x.c new file mode 100644 index 000000000..45f5b997a --- /dev/null +++ b/drivers/misc/apds990x.c @@ -0,0 +1,1285 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * This file is part of the APDS990x sensor driver. + * Chip is combined proximity and ambient light sensor. + * + * Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies). + * + * Contact: Samu Onkalo <samu.p.onkalo@nokia.com> + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/i2c.h> +#include <linux/interrupt.h> +#include <linux/mutex.h> +#include <linux/regulator/consumer.h> +#include <linux/pm_runtime.h> +#include <linux/delay.h> +#include <linux/wait.h> +#include <linux/slab.h> +#include <linux/platform_data/apds990x.h> + +/* Register map */ +#define APDS990X_ENABLE 0x00 /* Enable of states and interrupts */ +#define APDS990X_ATIME 0x01 /* ALS ADC time */ +#define APDS990X_PTIME 0x02 /* Proximity ADC time */ +#define APDS990X_WTIME 0x03 /* Wait time */ +#define APDS990X_AILTL 0x04 /* ALS interrupt low threshold low byte */ +#define APDS990X_AILTH 0x05 /* ALS interrupt low threshold hi byte */ +#define APDS990X_AIHTL 0x06 /* ALS interrupt hi threshold low byte */ +#define APDS990X_AIHTH 0x07 /* ALS interrupt hi threshold hi byte */ +#define APDS990X_PILTL 0x08 /* Proximity interrupt low threshold low byte */ +#define APDS990X_PILTH 0x09 /* Proximity interrupt low threshold hi byte */ +#define APDS990X_PIHTL 0x0a /* Proximity interrupt hi threshold low byte */ +#define APDS990X_PIHTH 0x0b /* Proximity interrupt hi threshold hi byte */ +#define APDS990X_PERS 0x0c /* Interrupt persistence filters */ +#define APDS990X_CONFIG 0x0d /* Configuration */ +#define APDS990X_PPCOUNT 0x0e /* Proximity pulse count */ +#define APDS990X_CONTROL 0x0f /* Gain control register */ +#define APDS990X_REV 0x11 /* Revision Number */ +#define APDS990X_ID 0x12 /* Device ID */ +#define APDS990X_STATUS 0x13 /* Device status */ +#define APDS990X_CDATAL 0x14 /* Clear ADC low data register */ +#define APDS990X_CDATAH 0x15 /* Clear ADC high data register */ +#define APDS990X_IRDATAL 0x16 /* IR ADC low data register */ +#define APDS990X_IRDATAH 0x17 /* IR ADC high data register */ +#define APDS990X_PDATAL 0x18 /* Proximity ADC low data register */ +#define APDS990X_PDATAH 0x19 /* Proximity ADC high data register */ + +/* Control */ +#define APDS990X_MAX_AGAIN 3 + +/* Enable register */ +#define APDS990X_EN_PIEN (0x1 << 5) +#define APDS990X_EN_AIEN (0x1 << 4) +#define APDS990X_EN_WEN (0x1 << 3) +#define APDS990X_EN_PEN (0x1 << 2) +#define APDS990X_EN_AEN (0x1 << 1) +#define APDS990X_EN_PON (0x1 << 0) +#define APDS990X_EN_DISABLE_ALL 0 + +/* Status register */ +#define APDS990X_ST_PINT (0x1 << 5) +#define APDS990X_ST_AINT (0x1 << 4) + +/* I2C access types */ +#define APDS990x_CMD_TYPE_MASK (0x03 << 5) +#define APDS990x_CMD_TYPE_RB (0x00 << 5) /* Repeated byte */ +#define APDS990x_CMD_TYPE_INC (0x01 << 5) /* Auto increment */ +#define APDS990x_CMD_TYPE_SPE (0x03 << 5) /* Special function */ + +#define APDS990x_ADDR_SHIFT 0 +#define APDS990x_CMD 0x80 + +/* Interrupt ack commands */ +#define APDS990X_INT_ACK_ALS 0x6 +#define APDS990X_INT_ACK_PS 0x5 +#define APDS990X_INT_ACK_BOTH 0x7 + +/* ptime */ +#define APDS990X_PTIME_DEFAULT 0xff /* Recommended conversion time 2.7ms*/ + +/* wtime */ +#define APDS990X_WTIME_DEFAULT 0xee /* ~50ms wait time */ + +#define APDS990X_TIME_TO_ADC 1024 /* One timetick as ADC count value */ + +/* Persistence */ +#define APDS990X_APERS_SHIFT 0 +#define APDS990X_PPERS_SHIFT 4 + +/* Supported ID:s */ +#define APDS990X_ID_0 0x0 +#define APDS990X_ID_4 0x4 +#define APDS990X_ID_29 0x29 + +/* pgain and pdiode settings */ +#define APDS_PGAIN_1X 0x0 +#define APDS_PDIODE_IR 0x2 + +#define APDS990X_LUX_OUTPUT_SCALE 10 + +/* Reverse chip factors for threshold calculation */ +struct reverse_factors { + u32 afactor; + int cf1; + int irf1; + int cf2; + int irf2; +}; + +struct apds990x_chip { + struct apds990x_platform_data *pdata; + struct i2c_client *client; + struct mutex mutex; /* avoid parallel access */ + struct regulator_bulk_data regs[2]; + wait_queue_head_t wait; + + int prox_en; + bool prox_continuous_mode; + bool lux_wait_fresh_res; + + /* Chip parameters */ + struct apds990x_chip_factors cf; + struct reverse_factors rcf; + u16 atime; /* als integration time */ + u16 arate; /* als reporting rate */ + u16 a_max_result; /* Max possible ADC value with current atime */ + u8 again_meas; /* Gain used in last measurement */ + u8 again_next; /* Next calculated gain */ + u8 pgain; + u8 pdiode; + u8 pdrive; + u8 lux_persistence; + u8 prox_persistence; + + u32 lux_raw; + u32 lux; + u16 lux_clear; + u16 lux_ir; + u16 lux_calib; + u32 lux_thres_hi; + u32 lux_thres_lo; + + u32 prox_thres; + u16 prox_data; + u16 prox_calib; + + char chipname[10]; + u8 revision; +}; + +#define APDS_CALIB_SCALER 8192 +#define APDS_LUX_NEUTRAL_CALIB_VALUE (1 * APDS_CALIB_SCALER) +#define APDS_PROX_NEUTRAL_CALIB_VALUE (1 * APDS_CALIB_SCALER) + +#define APDS_PROX_DEF_THRES 600 +#define APDS_PROX_HYSTERESIS 50 +#define APDS_LUX_DEF_THRES_HI 101 +#define APDS_LUX_DEF_THRES_LO 100 +#define APDS_DEFAULT_PROX_PERS 1 + +#define APDS_TIMEOUT 2000 +#define APDS_STARTUP_DELAY 25000 /* us */ +#define APDS_RANGE 65535 +#define APDS_PROX_RANGE 1023 +#define APDS_LUX_GAIN_LO_LIMIT 100 +#define APDS_LUX_GAIN_LO_LIMIT_STRICT 25 + +#define TIMESTEP 87 /* 2.7ms is about 87 / 32 */ +#define TIME_STEP_SCALER 32 + +#define APDS_LUX_AVERAGING_TIME 50 /* tolerates 50/60Hz ripple */ +#define APDS_LUX_DEFAULT_RATE 200 + +static const u8 again[] = {1, 8, 16, 120}; /* ALS gain steps */ + +/* Following two tables must match i.e 10Hz rate means 1 as persistence value */ +static const u16 arates_hz[] = {10, 5, 2, 1}; +static const u8 apersis[] = {1, 2, 4, 5}; + +/* Regulators */ +static const char reg_vcc[] = "Vdd"; +static const char reg_vled[] = "Vled"; + +static int apds990x_read_byte(struct apds990x_chip *chip, u8 reg, u8 *data) +{ + struct i2c_client *client = chip->client; + s32 ret; + + reg &= ~APDS990x_CMD_TYPE_MASK; + reg |= APDS990x_CMD | APDS990x_CMD_TYPE_RB; + + ret = i2c_smbus_read_byte_data(client, reg); + *data = ret; + return (int)ret; +} + +static int apds990x_read_word(struct apds990x_chip *chip, u8 reg, u16 *data) +{ + struct i2c_client *client = chip->client; + s32 ret; + + reg &= ~APDS990x_CMD_TYPE_MASK; + reg |= APDS990x_CMD | APDS990x_CMD_TYPE_INC; + + ret = i2c_smbus_read_word_data(client, reg); + *data = ret; + return (int)ret; +} + +static int apds990x_write_byte(struct apds990x_chip *chip, u8 reg, u8 data) +{ + struct i2c_client *client = chip->client; + s32 ret; + + reg &= ~APDS990x_CMD_TYPE_MASK; + reg |= APDS990x_CMD | APDS990x_CMD_TYPE_RB; + + ret = i2c_smbus_write_byte_data(client, reg, data); + return (int)ret; +} + +static int apds990x_write_word(struct apds990x_chip *chip, u8 reg, u16 data) +{ + struct i2c_client *client = chip->client; + s32 ret; + + reg &= ~APDS990x_CMD_TYPE_MASK; + reg |= APDS990x_CMD | APDS990x_CMD_TYPE_INC; + + ret = i2c_smbus_write_word_data(client, reg, data); + return (int)ret; +} + +static int apds990x_mode_on(struct apds990x_chip *chip) +{ + /* ALS is mandatory, proximity optional */ + u8 reg = APDS990X_EN_AIEN | APDS990X_EN_PON | APDS990X_EN_AEN | + APDS990X_EN_WEN; + + if (chip->prox_en) + reg |= APDS990X_EN_PIEN | APDS990X_EN_PEN; + + return apds990x_write_byte(chip, APDS990X_ENABLE, reg); +} + +static u16 apds990x_lux_to_threshold(struct apds990x_chip *chip, u32 lux) +{ + u32 thres; + u32 cpl; + u32 ir; + + if (lux == 0) + return 0; + else if (lux == APDS_RANGE) + return APDS_RANGE; + + /* + * Reported LUX value is a combination of the IR and CLEAR channel + * values. However, interrupt threshold is only for clear channel. + * This function approximates needed HW threshold value for a given + * LUX value in the current lightning type. + * IR level compared to visible light varies heavily depending on the + * source of the light + * + * Calculate threshold value for the next measurement period. + * Math: threshold = lux * cpl where + * cpl = atime * again / (glass_attenuation * device_factor) + * (count-per-lux) + * + * First remove calibration. Division by four is to avoid overflow + */ + lux = lux * (APDS_CALIB_SCALER / 4) / (chip->lux_calib / 4); + + /* Multiplication by 64 is to increase accuracy */ + cpl = ((u32)chip->atime * (u32)again[chip->again_next] * + APDS_PARAM_SCALE * 64) / (chip->cf.ga * chip->cf.df); + + thres = lux * cpl / 64; + /* + * Convert IR light from the latest result to match with + * new gain step. This helps to adapt with the current + * source of light. + */ + ir = (u32)chip->lux_ir * (u32)again[chip->again_next] / + (u32)again[chip->again_meas]; + + /* + * Compensate count with IR light impact + * IAC1 > IAC2 (see apds990x_get_lux for formulas) + */ + if (chip->lux_clear * APDS_PARAM_SCALE >= + chip->rcf.afactor * chip->lux_ir) + thres = (chip->rcf.cf1 * thres + chip->rcf.irf1 * ir) / + APDS_PARAM_SCALE; + else + thres = (chip->rcf.cf2 * thres + chip->rcf.irf2 * ir) / + APDS_PARAM_SCALE; + + if (thres >= chip->a_max_result) + thres = chip->a_max_result - 1; + return thres; +} + +static inline int apds990x_set_atime(struct apds990x_chip *chip, u32 time_ms) +{ + u8 reg_value; + + chip->atime = time_ms; + /* Formula is specified in the data sheet */ + reg_value = 256 - ((time_ms * TIME_STEP_SCALER) / TIMESTEP); + /* Calculate max ADC value for given integration time */ + chip->a_max_result = (u16)(256 - reg_value) * APDS990X_TIME_TO_ADC; + return apds990x_write_byte(chip, APDS990X_ATIME, reg_value); +} + +/* Called always with mutex locked */ +static int apds990x_refresh_pthres(struct apds990x_chip *chip, int data) +{ + int ret, lo, hi; + + /* If the chip is not in use, don't try to access it */ + if (pm_runtime_suspended(&chip->client->dev)) + return 0; + + if (data < chip->prox_thres) { + lo = 0; + hi = chip->prox_thres; + } else { + lo = chip->prox_thres - APDS_PROX_HYSTERESIS; + if (chip->prox_continuous_mode) + hi = chip->prox_thres; + else + hi = APDS_RANGE; + } + + ret = apds990x_write_word(chip, APDS990X_PILTL, lo); + ret |= apds990x_write_word(chip, APDS990X_PIHTL, hi); + return ret; +} + +/* Called always with mutex locked */ +static int apds990x_refresh_athres(struct apds990x_chip *chip) +{ + int ret; + /* If the chip is not in use, don't try to access it */ + if (pm_runtime_suspended(&chip->client->dev)) + return 0; + + ret = apds990x_write_word(chip, APDS990X_AILTL, + apds990x_lux_to_threshold(chip, chip->lux_thres_lo)); + ret |= apds990x_write_word(chip, APDS990X_AIHTL, + apds990x_lux_to_threshold(chip, chip->lux_thres_hi)); + + return ret; +} + +/* Called always with mutex locked */ +static void apds990x_force_a_refresh(struct apds990x_chip *chip) +{ + /* This will force ALS interrupt after the next measurement. */ + apds990x_write_word(chip, APDS990X_AILTL, APDS_LUX_DEF_THRES_LO); + apds990x_write_word(chip, APDS990X_AIHTL, APDS_LUX_DEF_THRES_HI); +} + +/* Called always with mutex locked */ +static void apds990x_force_p_refresh(struct apds990x_chip *chip) +{ + /* This will force proximity interrupt after the next measurement. */ + apds990x_write_word(chip, APDS990X_PILTL, APDS_PROX_DEF_THRES - 1); + apds990x_write_word(chip, APDS990X_PIHTL, APDS_PROX_DEF_THRES); +} + +/* Called always with mutex locked */ +static int apds990x_calc_again(struct apds990x_chip *chip) +{ + int curr_again = chip->again_meas; + int next_again = chip->again_meas; + int ret = 0; + + /* Calculate suitable als gain */ + if (chip->lux_clear == chip->a_max_result) + next_again -= 2; /* ALS saturated. Decrease gain by 2 steps */ + else if (chip->lux_clear > chip->a_max_result / 2) + next_again--; + else if (chip->lux_clear < APDS_LUX_GAIN_LO_LIMIT_STRICT) + next_again += 2; /* Too dark. Increase gain by 2 steps */ + else if (chip->lux_clear < APDS_LUX_GAIN_LO_LIMIT) + next_again++; + + /* Limit gain to available range */ + if (next_again < 0) + next_again = 0; + else if (next_again > APDS990X_MAX_AGAIN) + next_again = APDS990X_MAX_AGAIN; + + /* Let's check can we trust the measured result */ + if (chip->lux_clear == chip->a_max_result) + /* Result can be totally garbage due to saturation */ + ret = -ERANGE; + else if (next_again != curr_again && + chip->lux_clear < APDS_LUX_GAIN_LO_LIMIT_STRICT) + /* + * Gain is changed and measurement result is very small. + * Result can be totally garbage due to underflow + */ + ret = -ERANGE; + + chip->again_next = next_again; + apds990x_write_byte(chip, APDS990X_CONTROL, + (chip->pdrive << 6) | + (chip->pdiode << 4) | + (chip->pgain << 2) | + (chip->again_next << 0)); + + /* + * Error means bad result -> re-measurement is needed. The forced + * refresh uses fastest possible persistence setting to get result + * as soon as possible. + */ + if (ret < 0) + apds990x_force_a_refresh(chip); + else + apds990x_refresh_athres(chip); + + return ret; +} + +/* Called always with mutex locked */ +static int apds990x_get_lux(struct apds990x_chip *chip, int clear, int ir) +{ + int iac, iac1, iac2; /* IR adjusted counts */ + u32 lpc; /* Lux per count */ + + /* Formulas: + * iac1 = CF1 * CLEAR_CH - IRF1 * IR_CH + * iac2 = CF2 * CLEAR_CH - IRF2 * IR_CH + */ + iac1 = (chip->cf.cf1 * clear - chip->cf.irf1 * ir) / APDS_PARAM_SCALE; + iac2 = (chip->cf.cf2 * clear - chip->cf.irf2 * ir) / APDS_PARAM_SCALE; + + iac = max(iac1, iac2); + iac = max(iac, 0); + + lpc = APDS990X_LUX_OUTPUT_SCALE * (chip->cf.df * chip->cf.ga) / + (u32)(again[chip->again_meas] * (u32)chip->atime); + + return (iac * lpc) / APDS_PARAM_SCALE; +} + +static int apds990x_ack_int(struct apds990x_chip *chip, u8 mode) +{ + struct i2c_client *client = chip->client; + s32 ret; + u8 reg = APDS990x_CMD | APDS990x_CMD_TYPE_SPE; + + switch (mode & (APDS990X_ST_AINT | APDS990X_ST_PINT)) { + case APDS990X_ST_AINT: + reg |= APDS990X_INT_ACK_ALS; + break; + case APDS990X_ST_PINT: + reg |= APDS990X_INT_ACK_PS; + break; + default: + reg |= APDS990X_INT_ACK_BOTH; + break; + } + + ret = i2c_smbus_read_byte_data(client, reg); + return (int)ret; +} + +static irqreturn_t apds990x_irq(int irq, void *data) +{ + struct apds990x_chip *chip = data; + u8 status; + + apds990x_read_byte(chip, APDS990X_STATUS, &status); + apds990x_ack_int(chip, status); + + mutex_lock(&chip->mutex); + if (!pm_runtime_suspended(&chip->client->dev)) { + if (status & APDS990X_ST_AINT) { + apds990x_read_word(chip, APDS990X_CDATAL, + &chip->lux_clear); + apds990x_read_word(chip, APDS990X_IRDATAL, + &chip->lux_ir); + /* Store used gain for calculations */ + chip->again_meas = chip->again_next; + + chip->lux_raw = apds990x_get_lux(chip, + chip->lux_clear, + chip->lux_ir); + + if (apds990x_calc_again(chip) == 0) { + /* Result is valid */ + chip->lux = chip->lux_raw; + chip->lux_wait_fresh_res = false; + wake_up(&chip->wait); + sysfs_notify(&chip->client->dev.kobj, + NULL, "lux0_input"); + } + } + + if ((status & APDS990X_ST_PINT) && chip->prox_en) { + u16 clr_ch; + + apds990x_read_word(chip, APDS990X_CDATAL, &clr_ch); + /* + * If ALS channel is saturated at min gain, + * proximity gives false posivite values. + * Just ignore them. + */ + if (chip->again_meas == 0 && + clr_ch == chip->a_max_result) + chip->prox_data = 0; + else + apds990x_read_word(chip, + APDS990X_PDATAL, + &chip->prox_data); + + apds990x_refresh_pthres(chip, chip->prox_data); + if (chip->prox_data < chip->prox_thres) + chip->prox_data = 0; + else if (!chip->prox_continuous_mode) + chip->prox_data = APDS_PROX_RANGE; + sysfs_notify(&chip->client->dev.kobj, + NULL, "prox0_raw"); + } + } + mutex_unlock(&chip->mutex); + return IRQ_HANDLED; +} + +static int apds990x_configure(struct apds990x_chip *chip) +{ + /* It is recommended to use disabled mode during these operations */ + apds990x_write_byte(chip, APDS990X_ENABLE, APDS990X_EN_DISABLE_ALL); + + /* conversion and wait times for different state machince states */ + apds990x_write_byte(chip, APDS990X_PTIME, APDS990X_PTIME_DEFAULT); + apds990x_write_byte(chip, APDS990X_WTIME, APDS990X_WTIME_DEFAULT); + apds990x_set_atime(chip, APDS_LUX_AVERAGING_TIME); + + apds990x_write_byte(chip, APDS990X_CONFIG, 0); + + /* Persistence levels */ + apds990x_write_byte(chip, APDS990X_PERS, + (chip->lux_persistence << APDS990X_APERS_SHIFT) | + (chip->prox_persistence << APDS990X_PPERS_SHIFT)); + + apds990x_write_byte(chip, APDS990X_PPCOUNT, chip->pdata->ppcount); + + /* Start with relatively small gain */ + chip->again_meas = 1; + chip->again_next = 1; + apds990x_write_byte(chip, APDS990X_CONTROL, + (chip->pdrive << 6) | + (chip->pdiode << 4) | + (chip->pgain << 2) | + (chip->again_next << 0)); + return 0; +} + +static int apds990x_detect(struct apds990x_chip *chip) +{ + struct i2c_client *client = chip->client; + int ret; + u8 id; + + ret = apds990x_read_byte(chip, APDS990X_ID, &id); + if (ret < 0) { + dev_err(&client->dev, "ID read failed\n"); + return ret; + } + + ret = apds990x_read_byte(chip, APDS990X_REV, &chip->revision); + if (ret < 0) { + dev_err(&client->dev, "REV read failed\n"); + return ret; + } + + switch (id) { + case APDS990X_ID_0: + case APDS990X_ID_4: + case APDS990X_ID_29: + snprintf(chip->chipname, sizeof(chip->chipname), "APDS-990x"); + break; + default: + ret = -ENODEV; + break; + } + return ret; +} + +#ifdef CONFIG_PM +static int apds990x_chip_on(struct apds990x_chip *chip) +{ + int err = regulator_bulk_enable(ARRAY_SIZE(chip->regs), + chip->regs); + if (err < 0) + return err; + + usleep_range(APDS_STARTUP_DELAY, 2 * APDS_STARTUP_DELAY); + + /* Refresh all configs in case of regulators were off */ + chip->prox_data = 0; + apds990x_configure(chip); + apds990x_mode_on(chip); + return 0; +} +#endif + +static int apds990x_chip_off(struct apds990x_chip *chip) +{ + apds990x_write_byte(chip, APDS990X_ENABLE, APDS990X_EN_DISABLE_ALL); + regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs); + return 0; +} + +static ssize_t apds990x_lux_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct apds990x_chip *chip = dev_get_drvdata(dev); + ssize_t ret; + u32 result; + long timeout; + + if (pm_runtime_suspended(dev)) + return -EIO; + + timeout = wait_event_interruptible_timeout(chip->wait, + !chip->lux_wait_fresh_res, + msecs_to_jiffies(APDS_TIMEOUT)); + if (!timeout) + return -EIO; + + mutex_lock(&chip->mutex); + result = (chip->lux * chip->lux_calib) / APDS_CALIB_SCALER; + if (result > (APDS_RANGE * APDS990X_LUX_OUTPUT_SCALE)) + result = APDS_RANGE * APDS990X_LUX_OUTPUT_SCALE; + + ret = sprintf(buf, "%d.%d\n", + result / APDS990X_LUX_OUTPUT_SCALE, + result % APDS990X_LUX_OUTPUT_SCALE); + mutex_unlock(&chip->mutex); + return ret; +} + +static DEVICE_ATTR(lux0_input, S_IRUGO, apds990x_lux_show, NULL); + +static ssize_t apds990x_lux_range_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", APDS_RANGE); +} + +static DEVICE_ATTR(lux0_sensor_range, S_IRUGO, apds990x_lux_range_show, NULL); + +static ssize_t apds990x_lux_calib_format_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", APDS_CALIB_SCALER); +} + +static DEVICE_ATTR(lux0_calibscale_default, S_IRUGO, + apds990x_lux_calib_format_show, NULL); + +static ssize_t apds990x_lux_calib_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct apds990x_chip *chip = dev_get_drvdata(dev); + + return sprintf(buf, "%u\n", chip->lux_calib); +} + +static ssize_t apds990x_lux_calib_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct apds990x_chip *chip = dev_get_drvdata(dev); + unsigned long value; + int ret; + + ret = kstrtoul(buf, 0, &value); + if (ret) + return ret; + + chip->lux_calib = value; + + return len; +} + +static DEVICE_ATTR(lux0_calibscale, S_IRUGO | S_IWUSR, apds990x_lux_calib_show, + apds990x_lux_calib_store); + +static ssize_t apds990x_rate_avail(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int i; + int pos = 0; + + for (i = 0; i < ARRAY_SIZE(arates_hz); i++) + pos += sprintf(buf + pos, "%d ", arates_hz[i]); + sprintf(buf + pos - 1, "\n"); + return pos; +} + +static ssize_t apds990x_rate_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct apds990x_chip *chip = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", chip->arate); +} + +static int apds990x_set_arate(struct apds990x_chip *chip, int rate) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(arates_hz); i++) + if (rate >= arates_hz[i]) + break; + + if (i == ARRAY_SIZE(arates_hz)) + return -EINVAL; + + /* Pick up corresponding persistence value */ + chip->lux_persistence = apersis[i]; + chip->arate = arates_hz[i]; + + /* If the chip is not in use, don't try to access it */ + if (pm_runtime_suspended(&chip->client->dev)) + return 0; + + /* Persistence levels */ + return apds990x_write_byte(chip, APDS990X_PERS, + (chip->lux_persistence << APDS990X_APERS_SHIFT) | + (chip->prox_persistence << APDS990X_PPERS_SHIFT)); +} + +static ssize_t apds990x_rate_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct apds990x_chip *chip = dev_get_drvdata(dev); + unsigned long value; + int ret; + + ret = kstrtoul(buf, 0, &value); + if (ret) + return ret; + + mutex_lock(&chip->mutex); + ret = apds990x_set_arate(chip, value); + mutex_unlock(&chip->mutex); + + if (ret < 0) + return ret; + return len; +} + +static DEVICE_ATTR(lux0_rate_avail, S_IRUGO, apds990x_rate_avail, NULL); + +static DEVICE_ATTR(lux0_rate, S_IRUGO | S_IWUSR, apds990x_rate_show, + apds990x_rate_store); + +static ssize_t apds990x_prox_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t ret; + struct apds990x_chip *chip = dev_get_drvdata(dev); + + if (pm_runtime_suspended(dev) || !chip->prox_en) + return -EIO; + + mutex_lock(&chip->mutex); + ret = sprintf(buf, "%d\n", chip->prox_data); + mutex_unlock(&chip->mutex); + return ret; +} + +static DEVICE_ATTR(prox0_raw, S_IRUGO, apds990x_prox_show, NULL); + +static ssize_t apds990x_prox_range_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", APDS_PROX_RANGE); +} + +static DEVICE_ATTR(prox0_sensor_range, S_IRUGO, apds990x_prox_range_show, NULL); + +static ssize_t apds990x_prox_enable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct apds990x_chip *chip = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", chip->prox_en); +} + +static ssize_t apds990x_prox_enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct apds990x_chip *chip = dev_get_drvdata(dev); + unsigned long value; + int ret; + + ret = kstrtoul(buf, 0, &value); + if (ret) + return ret; + + mutex_lock(&chip->mutex); + + if (!chip->prox_en) + chip->prox_data = 0; + + if (value) + chip->prox_en++; + else if (chip->prox_en > 0) + chip->prox_en--; + + if (!pm_runtime_suspended(dev)) + apds990x_mode_on(chip); + mutex_unlock(&chip->mutex); + return len; +} + +static DEVICE_ATTR(prox0_raw_en, S_IRUGO | S_IWUSR, apds990x_prox_enable_show, + apds990x_prox_enable_store); + +static const char *reporting_modes[] = {"trigger", "periodic"}; + +static ssize_t apds990x_prox_reporting_mode_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct apds990x_chip *chip = dev_get_drvdata(dev); + + return sprintf(buf, "%s\n", + reporting_modes[!!chip->prox_continuous_mode]); +} + +static ssize_t apds990x_prox_reporting_mode_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct apds990x_chip *chip = dev_get_drvdata(dev); + int ret; + + ret = sysfs_match_string(reporting_modes, buf); + if (ret < 0) + return ret; + + chip->prox_continuous_mode = ret; + return len; +} + +static DEVICE_ATTR(prox0_reporting_mode, S_IRUGO | S_IWUSR, + apds990x_prox_reporting_mode_show, + apds990x_prox_reporting_mode_store); + +static ssize_t apds990x_prox_reporting_avail_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%s %s\n", reporting_modes[0], reporting_modes[1]); +} + +static DEVICE_ATTR(prox0_reporting_mode_avail, S_IRUGO | S_IWUSR, + apds990x_prox_reporting_avail_show, NULL); + + +static ssize_t apds990x_lux_thresh_above_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct apds990x_chip *chip = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", chip->lux_thres_hi); +} + +static ssize_t apds990x_lux_thresh_below_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct apds990x_chip *chip = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", chip->lux_thres_lo); +} + +static ssize_t apds990x_set_lux_thresh(struct apds990x_chip *chip, u32 *target, + const char *buf) +{ + unsigned long thresh; + int ret; + + ret = kstrtoul(buf, 0, &thresh); + if (ret) + return ret; + + if (thresh > APDS_RANGE) + return -EINVAL; + + mutex_lock(&chip->mutex); + *target = thresh; + /* + * Don't update values in HW if we are still waiting for + * first interrupt to come after device handle open call. + */ + if (!chip->lux_wait_fresh_res) + apds990x_refresh_athres(chip); + mutex_unlock(&chip->mutex); + return ret; + +} + +static ssize_t apds990x_lux_thresh_above_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct apds990x_chip *chip = dev_get_drvdata(dev); + int ret = apds990x_set_lux_thresh(chip, &chip->lux_thres_hi, buf); + + if (ret < 0) + return ret; + return len; +} + +static ssize_t apds990x_lux_thresh_below_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct apds990x_chip *chip = dev_get_drvdata(dev); + int ret = apds990x_set_lux_thresh(chip, &chip->lux_thres_lo, buf); + + if (ret < 0) + return ret; + return len; +} + +static DEVICE_ATTR(lux0_thresh_above_value, S_IRUGO | S_IWUSR, + apds990x_lux_thresh_above_show, + apds990x_lux_thresh_above_store); + +static DEVICE_ATTR(lux0_thresh_below_value, S_IRUGO | S_IWUSR, + apds990x_lux_thresh_below_show, + apds990x_lux_thresh_below_store); + +static ssize_t apds990x_prox_threshold_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct apds990x_chip *chip = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", chip->prox_thres); +} + +static ssize_t apds990x_prox_threshold_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct apds990x_chip *chip = dev_get_drvdata(dev); + unsigned long value; + int ret; + + ret = kstrtoul(buf, 0, &value); + if (ret) + return ret; + + if ((value > APDS_RANGE) || (value == 0) || + (value < APDS_PROX_HYSTERESIS)) + return -EINVAL; + + mutex_lock(&chip->mutex); + chip->prox_thres = value; + + apds990x_force_p_refresh(chip); + mutex_unlock(&chip->mutex); + return len; +} + +static DEVICE_ATTR(prox0_thresh_above_value, S_IRUGO | S_IWUSR, + apds990x_prox_threshold_show, + apds990x_prox_threshold_store); + +static ssize_t apds990x_power_state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", !pm_runtime_suspended(dev)); + return 0; +} + +static ssize_t apds990x_power_state_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct apds990x_chip *chip = dev_get_drvdata(dev); + unsigned long value; + int ret; + + ret = kstrtoul(buf, 0, &value); + if (ret) + return ret; + + if (value) { + pm_runtime_get_sync(dev); + mutex_lock(&chip->mutex); + chip->lux_wait_fresh_res = true; + apds990x_force_a_refresh(chip); + apds990x_force_p_refresh(chip); + mutex_unlock(&chip->mutex); + } else { + if (!pm_runtime_suspended(dev)) + pm_runtime_put(dev); + } + return len; +} + +static DEVICE_ATTR(power_state, S_IRUGO | S_IWUSR, + apds990x_power_state_show, + apds990x_power_state_store); + +static ssize_t apds990x_chip_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct apds990x_chip *chip = dev_get_drvdata(dev); + + return sprintf(buf, "%s %d\n", chip->chipname, chip->revision); +} + +static DEVICE_ATTR(chip_id, S_IRUGO, apds990x_chip_id_show, NULL); + +static struct attribute *sysfs_attrs_ctrl[] = { + &dev_attr_lux0_calibscale.attr, + &dev_attr_lux0_calibscale_default.attr, + &dev_attr_lux0_input.attr, + &dev_attr_lux0_sensor_range.attr, + &dev_attr_lux0_rate.attr, + &dev_attr_lux0_rate_avail.attr, + &dev_attr_lux0_thresh_above_value.attr, + &dev_attr_lux0_thresh_below_value.attr, + &dev_attr_prox0_raw_en.attr, + &dev_attr_prox0_raw.attr, + &dev_attr_prox0_sensor_range.attr, + &dev_attr_prox0_thresh_above_value.attr, + &dev_attr_prox0_reporting_mode.attr, + &dev_attr_prox0_reporting_mode_avail.attr, + &dev_attr_chip_id.attr, + &dev_attr_power_state.attr, + NULL +}; + +static const struct attribute_group apds990x_attribute_group[] = { + {.attrs = sysfs_attrs_ctrl }, +}; + +static int apds990x_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct apds990x_chip *chip; + int err; + + chip = kzalloc(sizeof *chip, GFP_KERNEL); + if (!chip) + return -ENOMEM; + + i2c_set_clientdata(client, chip); + chip->client = client; + + init_waitqueue_head(&chip->wait); + mutex_init(&chip->mutex); + chip->pdata = client->dev.platform_data; + + if (chip->pdata == NULL) { + dev_err(&client->dev, "platform data is mandatory\n"); + err = -EINVAL; + goto fail1; + } + + if (chip->pdata->cf.ga == 0) { + /* set uncovered sensor default parameters */ + chip->cf.ga = 1966; /* 0.48 * APDS_PARAM_SCALE */ + chip->cf.cf1 = 4096; /* 1.00 * APDS_PARAM_SCALE */ + chip->cf.irf1 = 9134; /* 2.23 * APDS_PARAM_SCALE */ + chip->cf.cf2 = 2867; /* 0.70 * APDS_PARAM_SCALE */ + chip->cf.irf2 = 5816; /* 1.42 * APDS_PARAM_SCALE */ + chip->cf.df = 52; + } else { + chip->cf = chip->pdata->cf; + } + + /* precalculate inverse chip factors for threshold control */ + chip->rcf.afactor = + (chip->cf.irf1 - chip->cf.irf2) * APDS_PARAM_SCALE / + (chip->cf.cf1 - chip->cf.cf2); + chip->rcf.cf1 = APDS_PARAM_SCALE * APDS_PARAM_SCALE / + chip->cf.cf1; + chip->rcf.irf1 = chip->cf.irf1 * APDS_PARAM_SCALE / + chip->cf.cf1; + chip->rcf.cf2 = APDS_PARAM_SCALE * APDS_PARAM_SCALE / + chip->cf.cf2; + chip->rcf.irf2 = chip->cf.irf2 * APDS_PARAM_SCALE / + chip->cf.cf2; + + /* Set something to start with */ + chip->lux_thres_hi = APDS_LUX_DEF_THRES_HI; + chip->lux_thres_lo = APDS_LUX_DEF_THRES_LO; + chip->lux_calib = APDS_LUX_NEUTRAL_CALIB_VALUE; + + chip->prox_thres = APDS_PROX_DEF_THRES; + chip->pdrive = chip->pdata->pdrive; + chip->pdiode = APDS_PDIODE_IR; + chip->pgain = APDS_PGAIN_1X; + chip->prox_calib = APDS_PROX_NEUTRAL_CALIB_VALUE; + chip->prox_persistence = APDS_DEFAULT_PROX_PERS; + chip->prox_continuous_mode = false; + + chip->regs[0].supply = reg_vcc; + chip->regs[1].supply = reg_vled; + + err = regulator_bulk_get(&client->dev, + ARRAY_SIZE(chip->regs), chip->regs); + if (err < 0) { + dev_err(&client->dev, "Cannot get regulators\n"); + goto fail1; + } + + err = regulator_bulk_enable(ARRAY_SIZE(chip->regs), chip->regs); + if (err < 0) { + dev_err(&client->dev, "Cannot enable regulators\n"); + goto fail2; + } + + usleep_range(APDS_STARTUP_DELAY, 2 * APDS_STARTUP_DELAY); + + err = apds990x_detect(chip); + if (err < 0) { + dev_err(&client->dev, "APDS990X not found\n"); + goto fail3; + } + + pm_runtime_set_active(&client->dev); + + apds990x_configure(chip); + apds990x_set_arate(chip, APDS_LUX_DEFAULT_RATE); + apds990x_mode_on(chip); + + pm_runtime_enable(&client->dev); + + if (chip->pdata->setup_resources) { + err = chip->pdata->setup_resources(); + if (err) { + err = -EINVAL; + goto fail3; + } + } + + err = sysfs_create_group(&chip->client->dev.kobj, + apds990x_attribute_group); + if (err < 0) { + dev_err(&chip->client->dev, "Sysfs registration failed\n"); + goto fail4; + } + + err = request_threaded_irq(client->irq, NULL, + apds990x_irq, + IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW | + IRQF_ONESHOT, + "apds990x", chip); + if (err) { + dev_err(&client->dev, "could not get IRQ %d\n", + client->irq); + goto fail5; + } + return err; +fail5: + sysfs_remove_group(&chip->client->dev.kobj, + &apds990x_attribute_group[0]); +fail4: + if (chip->pdata && chip->pdata->release_resources) + chip->pdata->release_resources(); +fail3: + regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs); +fail2: + regulator_bulk_free(ARRAY_SIZE(chip->regs), chip->regs); +fail1: + kfree(chip); + return err; +} + +static int apds990x_remove(struct i2c_client *client) +{ + struct apds990x_chip *chip = i2c_get_clientdata(client); + + free_irq(client->irq, chip); + sysfs_remove_group(&chip->client->dev.kobj, + apds990x_attribute_group); + + if (chip->pdata && chip->pdata->release_resources) + chip->pdata->release_resources(); + + if (!pm_runtime_suspended(&client->dev)) + apds990x_chip_off(chip); + + pm_runtime_disable(&client->dev); + pm_runtime_set_suspended(&client->dev); + + regulator_bulk_free(ARRAY_SIZE(chip->regs), chip->regs); + + kfree(chip); + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int apds990x_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct apds990x_chip *chip = i2c_get_clientdata(client); + + apds990x_chip_off(chip); + return 0; +} + +static int apds990x_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct apds990x_chip *chip = i2c_get_clientdata(client); + + /* + * If we were enabled at suspend time, it is expected + * everything works nice and smoothly. Chip_on is enough + */ + apds990x_chip_on(chip); + + return 0; +} +#endif + +#ifdef CONFIG_PM +static int apds990x_runtime_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct apds990x_chip *chip = i2c_get_clientdata(client); + + apds990x_chip_off(chip); + return 0; +} + +static int apds990x_runtime_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct apds990x_chip *chip = i2c_get_clientdata(client); + + apds990x_chip_on(chip); + return 0; +} + +#endif + +static const struct i2c_device_id apds990x_id[] = { + {"apds990x", 0 }, + {} +}; + +MODULE_DEVICE_TABLE(i2c, apds990x_id); + +static const struct dev_pm_ops apds990x_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(apds990x_suspend, apds990x_resume) + SET_RUNTIME_PM_OPS(apds990x_runtime_suspend, + apds990x_runtime_resume, + NULL) +}; + +static struct i2c_driver apds990x_driver = { + .driver = { + .name = "apds990x", + .pm = &apds990x_pm_ops, + }, + .probe = apds990x_probe, + .remove = apds990x_remove, + .id_table = apds990x_id, +}; + +module_i2c_driver(apds990x_driver); + +MODULE_DESCRIPTION("APDS990X combined ALS and proximity sensor"); +MODULE_AUTHOR("Samu Onkalo, Nokia Corporation"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c new file mode 100644 index 000000000..69f9b0336 --- /dev/null +++ b/drivers/misc/atmel-ssc.c @@ -0,0 +1,282 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Atmel SSC driver + * + * Copyright (C) 2007 Atmel Corporation + */ + +#include <linux/platform_device.h> +#include <linux/list.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/mutex.h> +#include <linux/atmel-ssc.h> +#include <linux/slab.h> +#include <linux/module.h> + +#include <linux/of.h> + +#include "../../sound/soc/atmel/atmel_ssc_dai.h" + +/* Serialize access to ssc_list and user count */ +static DEFINE_MUTEX(user_lock); +static LIST_HEAD(ssc_list); + +struct ssc_device *ssc_request(unsigned int ssc_num) +{ + int ssc_valid = 0; + struct ssc_device *ssc; + + mutex_lock(&user_lock); + list_for_each_entry(ssc, &ssc_list, list) { + if (ssc->pdev->dev.of_node) { + if (of_alias_get_id(ssc->pdev->dev.of_node, "ssc") + == ssc_num) { + ssc->pdev->id = ssc_num; + ssc_valid = 1; + break; + } + } else if (ssc->pdev->id == ssc_num) { + ssc_valid = 1; + break; + } + } + + if (!ssc_valid) { + mutex_unlock(&user_lock); + pr_err("ssc: ssc%d platform device is missing\n", ssc_num); + return ERR_PTR(-ENODEV); + } + + if (ssc->user) { + mutex_unlock(&user_lock); + dev_dbg(&ssc->pdev->dev, "module busy\n"); + return ERR_PTR(-EBUSY); + } + ssc->user++; + mutex_unlock(&user_lock); + + clk_prepare(ssc->clk); + + return ssc; +} +EXPORT_SYMBOL(ssc_request); + +void ssc_free(struct ssc_device *ssc) +{ + bool disable_clk = true; + + mutex_lock(&user_lock); + if (ssc->user) + ssc->user--; + else { + disable_clk = false; + dev_dbg(&ssc->pdev->dev, "device already free\n"); + } + mutex_unlock(&user_lock); + + if (disable_clk) + clk_unprepare(ssc->clk); +} +EXPORT_SYMBOL(ssc_free); + +static struct atmel_ssc_platform_data at91rm9200_config = { + .use_dma = 0, + .has_fslen_ext = 0, +}; + +static struct atmel_ssc_platform_data at91sam9rl_config = { + .use_dma = 0, + .has_fslen_ext = 1, +}; + +static struct atmel_ssc_platform_data at91sam9g45_config = { + .use_dma = 1, + .has_fslen_ext = 1, +}; + +static const struct platform_device_id atmel_ssc_devtypes[] = { + { + .name = "at91rm9200_ssc", + .driver_data = (unsigned long) &at91rm9200_config, + }, { + .name = "at91sam9rl_ssc", + .driver_data = (unsigned long) &at91sam9rl_config, + }, { + .name = "at91sam9g45_ssc", + .driver_data = (unsigned long) &at91sam9g45_config, + }, { + /* sentinel */ + } +}; + +#ifdef CONFIG_OF +static const struct of_device_id atmel_ssc_dt_ids[] = { + { + .compatible = "atmel,at91rm9200-ssc", + .data = &at91rm9200_config, + }, { + .compatible = "atmel,at91sam9rl-ssc", + .data = &at91sam9rl_config, + }, { + .compatible = "atmel,at91sam9g45-ssc", + .data = &at91sam9g45_config, + }, { + /* sentinel */ + } +}; +MODULE_DEVICE_TABLE(of, atmel_ssc_dt_ids); +#endif + +static inline const struct atmel_ssc_platform_data * + atmel_ssc_get_driver_data(struct platform_device *pdev) +{ + if (pdev->dev.of_node) { + const struct of_device_id *match; + match = of_match_node(atmel_ssc_dt_ids, pdev->dev.of_node); + if (match == NULL) + return NULL; + return match->data; + } + + return (struct atmel_ssc_platform_data *) + platform_get_device_id(pdev)->driver_data; +} + +#ifdef CONFIG_SND_ATMEL_SOC_SSC +static int ssc_sound_dai_probe(struct ssc_device *ssc) +{ + struct device_node *np = ssc->pdev->dev.of_node; + int ret; + int id; + + ssc->sound_dai = false; + + if (!of_property_read_bool(np, "#sound-dai-cells")) + return 0; + + id = of_alias_get_id(np, "ssc"); + if (id < 0) + return id; + + ret = atmel_ssc_set_audio(id); + ssc->sound_dai = !ret; + + return ret; +} + +static void ssc_sound_dai_remove(struct ssc_device *ssc) +{ + if (!ssc->sound_dai) + return; + + atmel_ssc_put_audio(of_alias_get_id(ssc->pdev->dev.of_node, "ssc")); +} +#else +static inline int ssc_sound_dai_probe(struct ssc_device *ssc) +{ + if (of_property_read_bool(ssc->pdev->dev.of_node, "#sound-dai-cells")) + return -ENOTSUPP; + + return 0; +} + +static inline void ssc_sound_dai_remove(struct ssc_device *ssc) +{ +} +#endif + +static int ssc_probe(struct platform_device *pdev) +{ + struct resource *regs; + struct ssc_device *ssc; + const struct atmel_ssc_platform_data *plat_dat; + + ssc = devm_kzalloc(&pdev->dev, sizeof(struct ssc_device), GFP_KERNEL); + if (!ssc) { + dev_dbg(&pdev->dev, "out of memory\n"); + return -ENOMEM; + } + + ssc->pdev = pdev; + + plat_dat = atmel_ssc_get_driver_data(pdev); + if (!plat_dat) + return -ENODEV; + ssc->pdata = (struct atmel_ssc_platform_data *)plat_dat; + + if (pdev->dev.of_node) { + struct device_node *np = pdev->dev.of_node; + ssc->clk_from_rk_pin = + of_property_read_bool(np, "atmel,clk-from-rk-pin"); + } + + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ssc->regs = devm_ioremap_resource(&pdev->dev, regs); + if (IS_ERR(ssc->regs)) + return PTR_ERR(ssc->regs); + + ssc->phybase = regs->start; + + ssc->clk = devm_clk_get(&pdev->dev, "pclk"); + if (IS_ERR(ssc->clk)) { + dev_dbg(&pdev->dev, "no pclk clock defined\n"); + return -ENXIO; + } + + /* disable all interrupts */ + clk_prepare_enable(ssc->clk); + ssc_writel(ssc->regs, IDR, -1); + ssc_readl(ssc->regs, SR); + clk_disable_unprepare(ssc->clk); + + ssc->irq = platform_get_irq(pdev, 0); + if (ssc->irq < 0) { + dev_dbg(&pdev->dev, "could not get irq\n"); + return ssc->irq; + } + + mutex_lock(&user_lock); + list_add_tail(&ssc->list, &ssc_list); + mutex_unlock(&user_lock); + + platform_set_drvdata(pdev, ssc); + + dev_info(&pdev->dev, "Atmel SSC device at 0x%p (irq %d)\n", + ssc->regs, ssc->irq); + + if (ssc_sound_dai_probe(ssc)) + dev_err(&pdev->dev, "failed to auto-setup ssc for audio\n"); + + return 0; +} + +static int ssc_remove(struct platform_device *pdev) +{ + struct ssc_device *ssc = platform_get_drvdata(pdev); + + ssc_sound_dai_remove(ssc); + + mutex_lock(&user_lock); + list_del(&ssc->list); + mutex_unlock(&user_lock); + + return 0; +} + +static struct platform_driver ssc_driver = { + .driver = { + .name = "ssc", + .of_match_table = of_match_ptr(atmel_ssc_dt_ids), + }, + .id_table = atmel_ssc_devtypes, + .probe = ssc_probe, + .remove = ssc_remove, +}; +module_platform_driver(ssc_driver); + +MODULE_AUTHOR("Hans-Christian Egtvedt <hcegtvedt@atmel.com>"); +MODULE_DESCRIPTION("SSC driver for Atmel AVR32 and AT91"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:ssc"); diff --git a/drivers/misc/atmel_tclib.c b/drivers/misc/atmel_tclib.c new file mode 100644 index 000000000..7de7840f6 --- /dev/null +++ b/drivers/misc/atmel_tclib.c @@ -0,0 +1,200 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/ioport.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/export.h> +#include <linux/of.h> +#include <soc/at91/atmel_tcb.h> + +/* + * This is a thin library to solve the problem of how to portably allocate + * one of the TC blocks. For simplicity, it doesn't currently expect to + * share individual timers between different drivers. + */ + +#if defined(CONFIG_AVR32) +/* AVR32 has these divide PBB */ +const u8 atmel_tc_divisors[5] = { 0, 4, 8, 16, 32, }; +EXPORT_SYMBOL(atmel_tc_divisors); + +#elif defined(CONFIG_ARCH_AT91) +/* AT91 has these divide MCK */ +const u8 atmel_tc_divisors[5] = { 2, 8, 32, 128, 0, }; +EXPORT_SYMBOL(atmel_tc_divisors); + +#endif + +static DEFINE_SPINLOCK(tc_list_lock); +static LIST_HEAD(tc_list); + +/** + * atmel_tc_alloc - allocate a specified TC block + * @block: which block to allocate + * + * Caller allocates a block. If it is available, a pointer to a + * pre-initialized struct atmel_tc is returned. The caller can access + * the registers directly through the "regs" field. + */ +struct atmel_tc *atmel_tc_alloc(unsigned block) +{ + struct atmel_tc *tc; + struct platform_device *pdev = NULL; + + spin_lock(&tc_list_lock); + list_for_each_entry(tc, &tc_list, node) { + if (tc->allocated) + continue; + + if ((tc->pdev->dev.of_node && tc->id == block) || + (tc->pdev->id == block)) { + pdev = tc->pdev; + tc->allocated = true; + break; + } + } + spin_unlock(&tc_list_lock); + + return pdev ? tc : NULL; +} +EXPORT_SYMBOL_GPL(atmel_tc_alloc); + +/** + * atmel_tc_free - release a specified TC block + * @tc: Timer/counter block that was returned by atmel_tc_alloc() + * + * This reverses the effect of atmel_tc_alloc(), invalidating the resource + * returned by that routine and making the TC available to other drivers. + */ +void atmel_tc_free(struct atmel_tc *tc) +{ + spin_lock(&tc_list_lock); + if (tc->allocated) + tc->allocated = false; + spin_unlock(&tc_list_lock); +} +EXPORT_SYMBOL_GPL(atmel_tc_free); + +#if defined(CONFIG_OF) +static struct atmel_tcb_config tcb_rm9200_config = { + .counter_width = 16, +}; + +static struct atmel_tcb_config tcb_sam9x5_config = { + .counter_width = 32, +}; + +static const struct of_device_id atmel_tcb_dt_ids[] = { + { + .compatible = "atmel,at91rm9200-tcb", + .data = &tcb_rm9200_config, + }, { + .compatible = "atmel,at91sam9x5-tcb", + .data = &tcb_sam9x5_config, + }, { + /* sentinel */ + } +}; + +MODULE_DEVICE_TABLE(of, atmel_tcb_dt_ids); +#endif + +static int __init tc_probe(struct platform_device *pdev) +{ + struct atmel_tc *tc; + struct clk *clk; + int irq; + unsigned int i; + + if (of_get_child_count(pdev->dev.of_node)) + return -EBUSY; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return -EINVAL; + + tc = devm_kzalloc(&pdev->dev, sizeof(struct atmel_tc), GFP_KERNEL); + if (!tc) + return -ENOMEM; + + tc->pdev = pdev; + + clk = devm_clk_get(&pdev->dev, "t0_clk"); + if (IS_ERR(clk)) + return PTR_ERR(clk); + + tc->slow_clk = devm_clk_get(&pdev->dev, "slow_clk"); + if (IS_ERR(tc->slow_clk)) + return PTR_ERR(tc->slow_clk); + + tc->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(tc->regs)) + return PTR_ERR(tc->regs); + + /* Now take SoC information if available */ + if (pdev->dev.of_node) { + const struct of_device_id *match; + match = of_match_node(atmel_tcb_dt_ids, pdev->dev.of_node); + if (match) + tc->tcb_config = match->data; + + tc->id = of_alias_get_id(tc->pdev->dev.of_node, "tcb"); + } else { + tc->id = pdev->id; + } + + tc->clk[0] = clk; + tc->clk[1] = devm_clk_get(&pdev->dev, "t1_clk"); + if (IS_ERR(tc->clk[1])) + tc->clk[1] = clk; + tc->clk[2] = devm_clk_get(&pdev->dev, "t2_clk"); + if (IS_ERR(tc->clk[2])) + tc->clk[2] = clk; + + tc->irq[0] = irq; + tc->irq[1] = platform_get_irq(pdev, 1); + if (tc->irq[1] < 0) + tc->irq[1] = irq; + tc->irq[2] = platform_get_irq(pdev, 2); + if (tc->irq[2] < 0) + tc->irq[2] = irq; + + for (i = 0; i < 3; i++) + writel(ATMEL_TC_ALL_IRQ, tc->regs + ATMEL_TC_REG(i, IDR)); + + spin_lock(&tc_list_lock); + list_add_tail(&tc->node, &tc_list); + spin_unlock(&tc_list_lock); + + platform_set_drvdata(pdev, tc); + + return 0; +} + +static void tc_shutdown(struct platform_device *pdev) +{ + int i; + struct atmel_tc *tc = platform_get_drvdata(pdev); + + for (i = 0; i < 3; i++) + writel(ATMEL_TC_ALL_IRQ, tc->regs + ATMEL_TC_REG(i, IDR)); +} + +static struct platform_driver tc_driver = { + .driver = { + .name = "atmel_tcb", + .of_match_table = of_match_ptr(atmel_tcb_dt_ids), + }, + .shutdown = tc_shutdown, +}; + +static int __init tc_init(void) +{ + return platform_driver_probe(&tc_driver, tc_probe); +} +arch_initcall(tc_init); diff --git a/drivers/misc/bh1770glc.c b/drivers/misc/bh1770glc.c new file mode 100644 index 000000000..0581bb9ce --- /dev/null +++ b/drivers/misc/bh1770glc.c @@ -0,0 +1,1393 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * This file is part of the ROHM BH1770GLC / OSRAM SFH7770 sensor driver. + * Chip is combined proximity and ambient light sensor. + * + * Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies). + * + * Contact: Samu Onkalo <samu.p.onkalo@nokia.com> + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/i2c.h> +#include <linux/interrupt.h> +#include <linux/mutex.h> +#include <linux/platform_data/bh1770glc.h> +#include <linux/regulator/consumer.h> +#include <linux/pm_runtime.h> +#include <linux/workqueue.h> +#include <linux/delay.h> +#include <linux/wait.h> +#include <linux/slab.h> + +#define BH1770_ALS_CONTROL 0x80 /* ALS operation mode control */ +#define BH1770_PS_CONTROL 0x81 /* PS operation mode control */ +#define BH1770_I_LED 0x82 /* active LED and LED1, LED2 current */ +#define BH1770_I_LED3 0x83 /* LED3 current setting */ +#define BH1770_ALS_PS_MEAS 0x84 /* Forced mode trigger */ +#define BH1770_PS_MEAS_RATE 0x85 /* PS meas. rate at stand alone mode */ +#define BH1770_ALS_MEAS_RATE 0x86 /* ALS meas. rate at stand alone mode */ +#define BH1770_PART_ID 0x8a /* Part number and revision ID */ +#define BH1770_MANUFACT_ID 0x8b /* Manufacturerer ID */ +#define BH1770_ALS_DATA_0 0x8c /* ALS DATA low byte */ +#define BH1770_ALS_DATA_1 0x8d /* ALS DATA high byte */ +#define BH1770_ALS_PS_STATUS 0x8e /* Measurement data and int status */ +#define BH1770_PS_DATA_LED1 0x8f /* PS data from LED1 */ +#define BH1770_PS_DATA_LED2 0x90 /* PS data from LED2 */ +#define BH1770_PS_DATA_LED3 0x91 /* PS data from LED3 */ +#define BH1770_INTERRUPT 0x92 /* Interrupt setting */ +#define BH1770_PS_TH_LED1 0x93 /* PS interrupt threshold for LED1 */ +#define BH1770_PS_TH_LED2 0x94 /* PS interrupt threshold for LED2 */ +#define BH1770_PS_TH_LED3 0x95 /* PS interrupt threshold for LED3 */ +#define BH1770_ALS_TH_UP_0 0x96 /* ALS upper threshold low byte */ +#define BH1770_ALS_TH_UP_1 0x97 /* ALS upper threshold high byte */ +#define BH1770_ALS_TH_LOW_0 0x98 /* ALS lower threshold low byte */ +#define BH1770_ALS_TH_LOW_1 0x99 /* ALS lower threshold high byte */ + +/* MANUFACT_ID */ +#define BH1770_MANUFACT_ROHM 0x01 +#define BH1770_MANUFACT_OSRAM 0x03 + +/* PART_ID */ +#define BH1770_PART 0x90 +#define BH1770_PART_MASK 0xf0 +#define BH1770_REV_MASK 0x0f +#define BH1770_REV_SHIFT 0 +#define BH1770_REV_0 0x00 +#define BH1770_REV_1 0x01 + +/* Operating modes for both */ +#define BH1770_STANDBY 0x00 +#define BH1770_FORCED 0x02 +#define BH1770_STANDALONE 0x03 +#define BH1770_SWRESET (0x01 << 2) + +#define BH1770_PS_TRIG_MEAS (1 << 0) +#define BH1770_ALS_TRIG_MEAS (1 << 1) + +/* Interrupt control */ +#define BH1770_INT_OUTPUT_MODE (1 << 3) /* 0 = latched */ +#define BH1770_INT_POLARITY (1 << 2) /* 1 = active high */ +#define BH1770_INT_ALS_ENA (1 << 1) +#define BH1770_INT_PS_ENA (1 << 0) + +/* Interrupt status */ +#define BH1770_INT_LED1_DATA (1 << 0) +#define BH1770_INT_LED1_INT (1 << 1) +#define BH1770_INT_LED2_DATA (1 << 2) +#define BH1770_INT_LED2_INT (1 << 3) +#define BH1770_INT_LED3_DATA (1 << 4) +#define BH1770_INT_LED3_INT (1 << 5) +#define BH1770_INT_LEDS_INT ((1 << 1) | (1 << 3) | (1 << 5)) +#define BH1770_INT_ALS_DATA (1 << 6) +#define BH1770_INT_ALS_INT (1 << 7) + +/* Led channels */ +#define BH1770_LED1 0x00 + +#define BH1770_DISABLE 0 +#define BH1770_ENABLE 1 +#define BH1770_PROX_CHANNELS 1 + +#define BH1770_LUX_DEFAULT_RATE 1 /* Index to lux rate table */ +#define BH1770_PROX_DEFAULT_RATE 1 /* Direct HW value =~ 50Hz */ +#define BH1770_PROX_DEF_RATE_THRESH 6 /* Direct HW value =~ 5 Hz */ +#define BH1770_STARTUP_DELAY 50 +#define BH1770_RESET_TIME 10 +#define BH1770_TIMEOUT 2100 /* Timeout in 2.1 seconds */ + +#define BH1770_LUX_RANGE 65535 +#define BH1770_PROX_RANGE 255 +#define BH1770_COEF_SCALER 1024 +#define BH1770_CALIB_SCALER 8192 +#define BH1770_LUX_NEUTRAL_CALIB_VALUE (1 * BH1770_CALIB_SCALER) +#define BH1770_LUX_DEF_THRES 1000 +#define BH1770_PROX_DEF_THRES 70 +#define BH1770_PROX_DEF_ABS_THRES 100 +#define BH1770_DEFAULT_PERSISTENCE 10 +#define BH1770_PROX_MAX_PERSISTENCE 50 +#define BH1770_LUX_GA_SCALE 16384 +#define BH1770_LUX_CF_SCALE 2048 /* CF ChipFactor */ +#define BH1770_NEUTRAL_CF BH1770_LUX_CF_SCALE +#define BH1770_LUX_CORR_SCALE 4096 + +#define PROX_ABOVE_THRESHOLD 1 +#define PROX_BELOW_THRESHOLD 0 + +#define PROX_IGNORE_LUX_LIMIT 500 + +struct bh1770_chip { + struct bh1770_platform_data *pdata; + char chipname[10]; + u8 revision; + struct i2c_client *client; + struct regulator_bulk_data regs[2]; + struct mutex mutex; /* avoid parallel access */ + wait_queue_head_t wait; + + bool int_mode_prox; + bool int_mode_lux; + struct delayed_work prox_work; + u32 lux_cf; /* Chip specific factor */ + u32 lux_ga; + u32 lux_calib; + int lux_rate_index; + u32 lux_corr; + u16 lux_data_raw; + u16 lux_threshold_hi; + u16 lux_threshold_lo; + u16 lux_thres_hi_onchip; + u16 lux_thres_lo_onchip; + bool lux_wait_result; + + int prox_enable_count; + u16 prox_coef; + u16 prox_const; + int prox_rate; + int prox_rate_threshold; + u8 prox_persistence; + u8 prox_persistence_counter; + u8 prox_data; + u8 prox_threshold; + u8 prox_threshold_hw; + bool prox_force_update; + u8 prox_abs_thres; + u8 prox_led; +}; + +static const char reg_vcc[] = "Vcc"; +static const char reg_vleds[] = "Vleds"; + +/* + * Supported stand alone rates in ms from chip data sheet + * {10, 20, 30, 40, 70, 100, 200, 500, 1000, 2000}; + */ +static const s16 prox_rates_hz[] = {100, 50, 33, 25, 14, 10, 5, 2}; +static const s16 prox_rates_ms[] = {10, 20, 30, 40, 70, 100, 200, 500}; + +/* + * Supported stand alone rates in ms from chip data sheet + * {100, 200, 500, 1000, 2000}; + */ +static const s16 lux_rates_hz[] = {10, 5, 2, 1, 0}; + +/* + * interrupt control functions are called while keeping chip->mutex + * excluding module probe / remove + */ +static inline int bh1770_lux_interrupt_control(struct bh1770_chip *chip, + int lux) +{ + chip->int_mode_lux = lux; + /* Set interrupt modes, interrupt active low, latched */ + return i2c_smbus_write_byte_data(chip->client, + BH1770_INTERRUPT, + (lux << 1) | chip->int_mode_prox); +} + +static inline int bh1770_prox_interrupt_control(struct bh1770_chip *chip, + int ps) +{ + chip->int_mode_prox = ps; + return i2c_smbus_write_byte_data(chip->client, + BH1770_INTERRUPT, + (chip->int_mode_lux << 1) | (ps << 0)); +} + +/* chip->mutex is always kept here */ +static int bh1770_lux_rate(struct bh1770_chip *chip, int rate_index) +{ + /* sysfs may call this when the chip is powered off */ + if (pm_runtime_suspended(&chip->client->dev)) + return 0; + + /* Proper proximity response needs fastest lux rate (100ms) */ + if (chip->prox_enable_count) + rate_index = 0; + + return i2c_smbus_write_byte_data(chip->client, + BH1770_ALS_MEAS_RATE, + rate_index); +} + +static int bh1770_prox_rate(struct bh1770_chip *chip, int mode) +{ + int rate; + + rate = (mode == PROX_ABOVE_THRESHOLD) ? + chip->prox_rate_threshold : chip->prox_rate; + + return i2c_smbus_write_byte_data(chip->client, + BH1770_PS_MEAS_RATE, + rate); +} + +/* InfraredLED is controlled by the chip during proximity scanning */ +static inline int bh1770_led_cfg(struct bh1770_chip *chip) +{ + /* LED cfg, current for leds 1 and 2 */ + return i2c_smbus_write_byte_data(chip->client, + BH1770_I_LED, + (BH1770_LED1 << 6) | + (BH1770_LED_5mA << 3) | + chip->prox_led); +} + +/* + * Following two functions converts raw ps values from HW to normalized + * values. Purpose is to compensate differences between different sensor + * versions and variants so that result means about the same between + * versions. + */ +static inline u8 bh1770_psraw_to_adjusted(struct bh1770_chip *chip, u8 psraw) +{ + u16 adjusted; + adjusted = (u16)(((u32)(psraw + chip->prox_const) * chip->prox_coef) / + BH1770_COEF_SCALER); + if (adjusted > BH1770_PROX_RANGE) + adjusted = BH1770_PROX_RANGE; + return adjusted; +} + +static inline u8 bh1770_psadjusted_to_raw(struct bh1770_chip *chip, u8 ps) +{ + u16 raw; + + raw = (((u32)ps * BH1770_COEF_SCALER) / chip->prox_coef); + if (raw > chip->prox_const) + raw = raw - chip->prox_const; + else + raw = 0; + return raw; +} + +/* + * Following two functions converts raw lux values from HW to normalized + * values. Purpose is to compensate differences between different sensor + * versions and variants so that result means about the same between + * versions. Chip->mutex is kept when this is called. + */ +static int bh1770_prox_set_threshold(struct bh1770_chip *chip) +{ + u8 tmp = 0; + + /* sysfs may call this when the chip is powered off */ + if (pm_runtime_suspended(&chip->client->dev)) + return 0; + + tmp = bh1770_psadjusted_to_raw(chip, chip->prox_threshold); + chip->prox_threshold_hw = tmp; + + return i2c_smbus_write_byte_data(chip->client, BH1770_PS_TH_LED1, + tmp); +} + +static inline u16 bh1770_lux_raw_to_adjusted(struct bh1770_chip *chip, u16 raw) +{ + u32 lux; + lux = ((u32)raw * chip->lux_corr) / BH1770_LUX_CORR_SCALE; + return min(lux, (u32)BH1770_LUX_RANGE); +} + +static inline u16 bh1770_lux_adjusted_to_raw(struct bh1770_chip *chip, + u16 adjusted) +{ + return (u32)adjusted * BH1770_LUX_CORR_SCALE / chip->lux_corr; +} + +/* chip->mutex is kept when this is called */ +static int bh1770_lux_update_thresholds(struct bh1770_chip *chip, + u16 threshold_hi, u16 threshold_lo) +{ + u8 data[4]; + int ret; + + /* sysfs may call this when the chip is powered off */ + if (pm_runtime_suspended(&chip->client->dev)) + return 0; + + /* + * Compensate threshold values with the correction factors if not + * set to minimum or maximum. + * Min & max values disables interrupts. + */ + if (threshold_hi != BH1770_LUX_RANGE && threshold_hi != 0) + threshold_hi = bh1770_lux_adjusted_to_raw(chip, threshold_hi); + + if (threshold_lo != BH1770_LUX_RANGE && threshold_lo != 0) + threshold_lo = bh1770_lux_adjusted_to_raw(chip, threshold_lo); + + if (chip->lux_thres_hi_onchip == threshold_hi && + chip->lux_thres_lo_onchip == threshold_lo) + return 0; + + chip->lux_thres_hi_onchip = threshold_hi; + chip->lux_thres_lo_onchip = threshold_lo; + + data[0] = threshold_hi; + data[1] = threshold_hi >> 8; + data[2] = threshold_lo; + data[3] = threshold_lo >> 8; + + ret = i2c_smbus_write_i2c_block_data(chip->client, + BH1770_ALS_TH_UP_0, + ARRAY_SIZE(data), + data); + return ret; +} + +static int bh1770_lux_get_result(struct bh1770_chip *chip) +{ + u16 data; + int ret; + + ret = i2c_smbus_read_byte_data(chip->client, BH1770_ALS_DATA_0); + if (ret < 0) + return ret; + + data = ret & 0xff; + ret = i2c_smbus_read_byte_data(chip->client, BH1770_ALS_DATA_1); + if (ret < 0) + return ret; + + chip->lux_data_raw = data | ((ret & 0xff) << 8); + + return 0; +} + +/* Calculate correction value which contains chip and device specific parts */ +static u32 bh1770_get_corr_value(struct bh1770_chip *chip) +{ + u32 tmp; + /* Impact of glass attenuation correction */ + tmp = (BH1770_LUX_CORR_SCALE * chip->lux_ga) / BH1770_LUX_GA_SCALE; + /* Impact of chip factor correction */ + tmp = (tmp * chip->lux_cf) / BH1770_LUX_CF_SCALE; + /* Impact of Device specific calibration correction */ + tmp = (tmp * chip->lux_calib) / BH1770_CALIB_SCALER; + return tmp; +} + +static int bh1770_lux_read_result(struct bh1770_chip *chip) +{ + bh1770_lux_get_result(chip); + return bh1770_lux_raw_to_adjusted(chip, chip->lux_data_raw); +} + +/* + * Chip on / off functions are called while keeping mutex except probe + * or remove phase + */ +static int bh1770_chip_on(struct bh1770_chip *chip) +{ + int ret = regulator_bulk_enable(ARRAY_SIZE(chip->regs), + chip->regs); + if (ret < 0) + return ret; + + usleep_range(BH1770_STARTUP_DELAY, BH1770_STARTUP_DELAY * 2); + + /* Reset the chip */ + i2c_smbus_write_byte_data(chip->client, BH1770_ALS_CONTROL, + BH1770_SWRESET); + usleep_range(BH1770_RESET_TIME, BH1770_RESET_TIME * 2); + + /* + * ALS is started always since proximity needs als results + * for realibility estimation. + * Let's assume dark until the first ALS measurement is ready. + */ + chip->lux_data_raw = 0; + chip->prox_data = 0; + ret = i2c_smbus_write_byte_data(chip->client, + BH1770_ALS_CONTROL, BH1770_STANDALONE); + + /* Assume reset defaults */ + chip->lux_thres_hi_onchip = BH1770_LUX_RANGE; + chip->lux_thres_lo_onchip = 0; + + return ret; +} + +static void bh1770_chip_off(struct bh1770_chip *chip) +{ + i2c_smbus_write_byte_data(chip->client, + BH1770_INTERRUPT, BH1770_DISABLE); + i2c_smbus_write_byte_data(chip->client, + BH1770_ALS_CONTROL, BH1770_STANDBY); + i2c_smbus_write_byte_data(chip->client, + BH1770_PS_CONTROL, BH1770_STANDBY); + regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs); +} + +/* chip->mutex is kept when this is called */ +static int bh1770_prox_mode_control(struct bh1770_chip *chip) +{ + if (chip->prox_enable_count) { + chip->prox_force_update = true; /* Force immediate update */ + + bh1770_lux_rate(chip, chip->lux_rate_index); + bh1770_prox_set_threshold(chip); + bh1770_led_cfg(chip); + bh1770_prox_rate(chip, PROX_BELOW_THRESHOLD); + bh1770_prox_interrupt_control(chip, BH1770_ENABLE); + i2c_smbus_write_byte_data(chip->client, + BH1770_PS_CONTROL, BH1770_STANDALONE); + } else { + chip->prox_data = 0; + bh1770_lux_rate(chip, chip->lux_rate_index); + bh1770_prox_interrupt_control(chip, BH1770_DISABLE); + i2c_smbus_write_byte_data(chip->client, + BH1770_PS_CONTROL, BH1770_STANDBY); + } + return 0; +} + +/* chip->mutex is kept when this is called */ +static int bh1770_prox_read_result(struct bh1770_chip *chip) +{ + int ret; + bool above; + u8 mode; + + ret = i2c_smbus_read_byte_data(chip->client, BH1770_PS_DATA_LED1); + if (ret < 0) + goto out; + + if (ret > chip->prox_threshold_hw) + above = true; + else + above = false; + + /* + * when ALS levels goes above limit, proximity result may be + * false proximity. Thus ignore the result. With real proximity + * there is a shadow causing low als levels. + */ + if (chip->lux_data_raw > PROX_IGNORE_LUX_LIMIT) + ret = 0; + + chip->prox_data = bh1770_psraw_to_adjusted(chip, ret); + + /* Strong proximity level or force mode requires immediate response */ + if (chip->prox_data >= chip->prox_abs_thres || + chip->prox_force_update) + chip->prox_persistence_counter = chip->prox_persistence; + + chip->prox_force_update = false; + + /* Persistence filttering to reduce false proximity events */ + if (likely(above)) { + if (chip->prox_persistence_counter < chip->prox_persistence) { + chip->prox_persistence_counter++; + ret = -ENODATA; + } else { + mode = PROX_ABOVE_THRESHOLD; + ret = 0; + } + } else { + chip->prox_persistence_counter = 0; + mode = PROX_BELOW_THRESHOLD; + chip->prox_data = 0; + ret = 0; + } + + /* Set proximity detection rate based on above or below value */ + if (ret == 0) { + bh1770_prox_rate(chip, mode); + sysfs_notify(&chip->client->dev.kobj, NULL, "prox0_raw"); + } +out: + return ret; +} + +static int bh1770_detect(struct bh1770_chip *chip) +{ + struct i2c_client *client = chip->client; + s32 ret; + u8 manu, part; + + ret = i2c_smbus_read_byte_data(client, BH1770_MANUFACT_ID); + if (ret < 0) + goto error; + manu = (u8)ret; + + ret = i2c_smbus_read_byte_data(client, BH1770_PART_ID); + if (ret < 0) + goto error; + part = (u8)ret; + + chip->revision = (part & BH1770_REV_MASK) >> BH1770_REV_SHIFT; + chip->prox_coef = BH1770_COEF_SCALER; + chip->prox_const = 0; + chip->lux_cf = BH1770_NEUTRAL_CF; + + if ((manu == BH1770_MANUFACT_ROHM) && + ((part & BH1770_PART_MASK) == BH1770_PART)) { + snprintf(chip->chipname, sizeof(chip->chipname), "BH1770GLC"); + return 0; + } + + if ((manu == BH1770_MANUFACT_OSRAM) && + ((part & BH1770_PART_MASK) == BH1770_PART)) { + snprintf(chip->chipname, sizeof(chip->chipname), "SFH7770"); + /* Values selected by comparing different versions */ + chip->prox_coef = 819; /* 0.8 * BH1770_COEF_SCALER */ + chip->prox_const = 40; + return 0; + } + + ret = -ENODEV; +error: + dev_dbg(&client->dev, "BH1770 or SFH7770 not found\n"); + + return ret; +} + +/* + * This work is re-scheduled at every proximity interrupt. + * If this work is running, it means that there hasn't been any + * proximity interrupt in time. Situation is handled as no-proximity. + * It would be nice to have low-threshold interrupt or interrupt + * when measurement and hi-threshold are both 0. But neither of those exists. + * This is a workaroud for missing HW feature. + */ + +static void bh1770_prox_work(struct work_struct *work) +{ + struct bh1770_chip *chip = + container_of(work, struct bh1770_chip, prox_work.work); + + mutex_lock(&chip->mutex); + bh1770_prox_read_result(chip); + mutex_unlock(&chip->mutex); +} + +/* This is threaded irq handler */ +static irqreturn_t bh1770_irq(int irq, void *data) +{ + struct bh1770_chip *chip = data; + int status; + int rate = 0; + + mutex_lock(&chip->mutex); + status = i2c_smbus_read_byte_data(chip->client, BH1770_ALS_PS_STATUS); + + /* Acknowledge interrupt by reading this register */ + i2c_smbus_read_byte_data(chip->client, BH1770_INTERRUPT); + + /* + * Check if there is fresh data available for als. + * If this is the very first data, update thresholds after that. + */ + if (status & BH1770_INT_ALS_DATA) { + bh1770_lux_get_result(chip); + if (unlikely(chip->lux_wait_result)) { + chip->lux_wait_result = false; + wake_up(&chip->wait); + bh1770_lux_update_thresholds(chip, + chip->lux_threshold_hi, + chip->lux_threshold_lo); + } + } + + /* Disable interrupt logic to guarantee acknowledgement */ + i2c_smbus_write_byte_data(chip->client, BH1770_INTERRUPT, + (0 << 1) | (0 << 0)); + + if ((status & BH1770_INT_ALS_INT)) + sysfs_notify(&chip->client->dev.kobj, NULL, "lux0_input"); + + if (chip->int_mode_prox && (status & BH1770_INT_LEDS_INT)) { + rate = prox_rates_ms[chip->prox_rate_threshold]; + bh1770_prox_read_result(chip); + } + + /* Re-enable interrupt logic */ + i2c_smbus_write_byte_data(chip->client, BH1770_INTERRUPT, + (chip->int_mode_lux << 1) | + (chip->int_mode_prox << 0)); + mutex_unlock(&chip->mutex); + + /* + * Can't cancel work while keeping mutex since the work uses the + * same mutex. + */ + if (rate) { + /* + * Simulate missing no-proximity interrupt 50ms after the + * next expected interrupt time. + */ + cancel_delayed_work_sync(&chip->prox_work); + schedule_delayed_work(&chip->prox_work, + msecs_to_jiffies(rate + 50)); + } + return IRQ_HANDLED; +} + +static ssize_t bh1770_power_state_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + unsigned long value; + ssize_t ret; + + ret = kstrtoul(buf, 0, &value); + if (ret) + return ret; + + mutex_lock(&chip->mutex); + if (value) { + pm_runtime_get_sync(dev); + + ret = bh1770_lux_rate(chip, chip->lux_rate_index); + if (ret < 0) { + pm_runtime_put(dev); + goto leave; + } + + ret = bh1770_lux_interrupt_control(chip, BH1770_ENABLE); + if (ret < 0) { + pm_runtime_put(dev); + goto leave; + } + + /* This causes interrupt after the next measurement cycle */ + bh1770_lux_update_thresholds(chip, BH1770_LUX_DEF_THRES, + BH1770_LUX_DEF_THRES); + /* Inform that we are waiting for a result from ALS */ + chip->lux_wait_result = true; + bh1770_prox_mode_control(chip); + } else if (!pm_runtime_suspended(dev)) { + pm_runtime_put(dev); + } + ret = count; +leave: + mutex_unlock(&chip->mutex); + return ret; +} + +static ssize_t bh1770_power_state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", !pm_runtime_suspended(dev)); +} + +static ssize_t bh1770_lux_result_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + ssize_t ret; + long timeout; + + if (pm_runtime_suspended(dev)) + return -EIO; /* Chip is not enabled at all */ + + timeout = wait_event_interruptible_timeout(chip->wait, + !chip->lux_wait_result, + msecs_to_jiffies(BH1770_TIMEOUT)); + if (!timeout) + return -EIO; + + mutex_lock(&chip->mutex); + ret = sprintf(buf, "%d\n", bh1770_lux_read_result(chip)); + mutex_unlock(&chip->mutex); + + return ret; +} + +static ssize_t bh1770_lux_range_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", BH1770_LUX_RANGE); +} + +static ssize_t bh1770_prox_enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + unsigned long value; + int ret; + + ret = kstrtoul(buf, 0, &value); + if (ret) + return ret; + + mutex_lock(&chip->mutex); + /* Assume no proximity. Sensor will tell real state soon */ + if (!chip->prox_enable_count) + chip->prox_data = 0; + + if (value) + chip->prox_enable_count++; + else if (chip->prox_enable_count > 0) + chip->prox_enable_count--; + else + goto leave; + + /* Run control only when chip is powered on */ + if (!pm_runtime_suspended(dev)) + bh1770_prox_mode_control(chip); +leave: + mutex_unlock(&chip->mutex); + return count; +} + +static ssize_t bh1770_prox_enable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + ssize_t len; + + mutex_lock(&chip->mutex); + len = sprintf(buf, "%d\n", chip->prox_enable_count); + mutex_unlock(&chip->mutex); + return len; +} + +static ssize_t bh1770_prox_result_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + ssize_t ret; + + mutex_lock(&chip->mutex); + if (chip->prox_enable_count && !pm_runtime_suspended(dev)) + ret = sprintf(buf, "%d\n", chip->prox_data); + else + ret = -EIO; + mutex_unlock(&chip->mutex); + return ret; +} + +static ssize_t bh1770_prox_range_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", BH1770_PROX_RANGE); +} + +static ssize_t bh1770_get_prox_rate_avail(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int i; + int pos = 0; + for (i = 0; i < ARRAY_SIZE(prox_rates_hz); i++) + pos += sprintf(buf + pos, "%d ", prox_rates_hz[i]); + sprintf(buf + pos - 1, "\n"); + return pos; +} + +static ssize_t bh1770_get_prox_rate_above(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + return sprintf(buf, "%d\n", prox_rates_hz[chip->prox_rate_threshold]); +} + +static ssize_t bh1770_get_prox_rate_below(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + return sprintf(buf, "%d\n", prox_rates_hz[chip->prox_rate]); +} + +static int bh1770_prox_rate_validate(int rate) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(prox_rates_hz) - 1; i++) + if (rate >= prox_rates_hz[i]) + break; + return i; +} + +static ssize_t bh1770_set_prox_rate_above(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + unsigned long value; + int ret; + + ret = kstrtoul(buf, 0, &value); + if (ret) + return ret; + + mutex_lock(&chip->mutex); + chip->prox_rate_threshold = bh1770_prox_rate_validate(value); + mutex_unlock(&chip->mutex); + return count; +} + +static ssize_t bh1770_set_prox_rate_below(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + unsigned long value; + int ret; + + ret = kstrtoul(buf, 0, &value); + if (ret) + return ret; + + mutex_lock(&chip->mutex); + chip->prox_rate = bh1770_prox_rate_validate(value); + mutex_unlock(&chip->mutex); + return count; +} + +static ssize_t bh1770_get_prox_thres(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + return sprintf(buf, "%d\n", chip->prox_threshold); +} + +static ssize_t bh1770_set_prox_thres(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + unsigned long value; + int ret; + + ret = kstrtoul(buf, 0, &value); + if (ret) + return ret; + + if (value > BH1770_PROX_RANGE) + return -EINVAL; + + mutex_lock(&chip->mutex); + chip->prox_threshold = value; + ret = bh1770_prox_set_threshold(chip); + mutex_unlock(&chip->mutex); + if (ret < 0) + return ret; + return count; +} + +static ssize_t bh1770_prox_persistence_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + + return sprintf(buf, "%u\n", chip->prox_persistence); +} + +static ssize_t bh1770_prox_persistence_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + unsigned long value; + int ret; + + ret = kstrtoul(buf, 0, &value); + if (ret) + return ret; + + if (value > BH1770_PROX_MAX_PERSISTENCE) + return -EINVAL; + + chip->prox_persistence = value; + + return len; +} + +static ssize_t bh1770_prox_abs_thres_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + return sprintf(buf, "%u\n", chip->prox_abs_thres); +} + +static ssize_t bh1770_prox_abs_thres_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + unsigned long value; + int ret; + + ret = kstrtoul(buf, 0, &value); + if (ret) + return ret; + + if (value > BH1770_PROX_RANGE) + return -EINVAL; + + chip->prox_abs_thres = value; + + return len; +} + +static ssize_t bh1770_chip_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + return sprintf(buf, "%s rev %d\n", chip->chipname, chip->revision); +} + +static ssize_t bh1770_lux_calib_default_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", BH1770_CALIB_SCALER); +} + +static ssize_t bh1770_lux_calib_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + ssize_t len; + + mutex_lock(&chip->mutex); + len = sprintf(buf, "%u\n", chip->lux_calib); + mutex_unlock(&chip->mutex); + return len; +} + +static ssize_t bh1770_lux_calib_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + unsigned long value; + u32 old_calib; + u32 new_corr; + int ret; + + ret = kstrtoul(buf, 0, &value); + if (ret) + return ret; + + mutex_lock(&chip->mutex); + old_calib = chip->lux_calib; + chip->lux_calib = value; + new_corr = bh1770_get_corr_value(chip); + if (new_corr == 0) { + chip->lux_calib = old_calib; + mutex_unlock(&chip->mutex); + return -EINVAL; + } + chip->lux_corr = new_corr; + /* Refresh thresholds on HW after changing correction value */ + bh1770_lux_update_thresholds(chip, chip->lux_threshold_hi, + chip->lux_threshold_lo); + + mutex_unlock(&chip->mutex); + + return len; +} + +static ssize_t bh1770_get_lux_rate_avail(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int i; + int pos = 0; + for (i = 0; i < ARRAY_SIZE(lux_rates_hz); i++) + pos += sprintf(buf + pos, "%d ", lux_rates_hz[i]); + sprintf(buf + pos - 1, "\n"); + return pos; +} + +static ssize_t bh1770_get_lux_rate(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + return sprintf(buf, "%d\n", lux_rates_hz[chip->lux_rate_index]); +} + +static ssize_t bh1770_set_lux_rate(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + unsigned long rate_hz; + int ret, i; + + ret = kstrtoul(buf, 0, &rate_hz); + if (ret) + return ret; + + for (i = 0; i < ARRAY_SIZE(lux_rates_hz) - 1; i++) + if (rate_hz >= lux_rates_hz[i]) + break; + + mutex_lock(&chip->mutex); + chip->lux_rate_index = i; + ret = bh1770_lux_rate(chip, i); + mutex_unlock(&chip->mutex); + + if (ret < 0) + return ret; + + return count; +} + +static ssize_t bh1770_get_lux_thresh_above(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + return sprintf(buf, "%d\n", chip->lux_threshold_hi); +} + +static ssize_t bh1770_get_lux_thresh_below(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + return sprintf(buf, "%d\n", chip->lux_threshold_lo); +} + +static ssize_t bh1770_set_lux_thresh(struct bh1770_chip *chip, u16 *target, + const char *buf) +{ + unsigned long thresh; + int ret; + + ret = kstrtoul(buf, 0, &thresh); + if (ret) + return ret; + + if (thresh > BH1770_LUX_RANGE) + return -EINVAL; + + mutex_lock(&chip->mutex); + *target = thresh; + /* + * Don't update values in HW if we are still waiting for + * first interrupt to come after device handle open call. + */ + if (!chip->lux_wait_result) + ret = bh1770_lux_update_thresholds(chip, + chip->lux_threshold_hi, + chip->lux_threshold_lo); + mutex_unlock(&chip->mutex); + return ret; + +} + +static ssize_t bh1770_set_lux_thresh_above(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + int ret = bh1770_set_lux_thresh(chip, &chip->lux_threshold_hi, buf); + if (ret < 0) + return ret; + return len; +} + +static ssize_t bh1770_set_lux_thresh_below(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + int ret = bh1770_set_lux_thresh(chip, &chip->lux_threshold_lo, buf); + if (ret < 0) + return ret; + return len; +} + +static DEVICE_ATTR(prox0_raw_en, S_IRUGO | S_IWUSR, bh1770_prox_enable_show, + bh1770_prox_enable_store); +static DEVICE_ATTR(prox0_thresh_above1_value, S_IRUGO | S_IWUSR, + bh1770_prox_abs_thres_show, + bh1770_prox_abs_thres_store); +static DEVICE_ATTR(prox0_thresh_above0_value, S_IRUGO | S_IWUSR, + bh1770_get_prox_thres, + bh1770_set_prox_thres); +static DEVICE_ATTR(prox0_raw, S_IRUGO, bh1770_prox_result_show, NULL); +static DEVICE_ATTR(prox0_sensor_range, S_IRUGO, bh1770_prox_range_show, NULL); +static DEVICE_ATTR(prox0_thresh_above_count, S_IRUGO | S_IWUSR, + bh1770_prox_persistence_show, + bh1770_prox_persistence_store); +static DEVICE_ATTR(prox0_rate_above, S_IRUGO | S_IWUSR, + bh1770_get_prox_rate_above, + bh1770_set_prox_rate_above); +static DEVICE_ATTR(prox0_rate_below, S_IRUGO | S_IWUSR, + bh1770_get_prox_rate_below, + bh1770_set_prox_rate_below); +static DEVICE_ATTR(prox0_rate_avail, S_IRUGO, bh1770_get_prox_rate_avail, NULL); + +static DEVICE_ATTR(lux0_calibscale, S_IRUGO | S_IWUSR, bh1770_lux_calib_show, + bh1770_lux_calib_store); +static DEVICE_ATTR(lux0_calibscale_default, S_IRUGO, + bh1770_lux_calib_default_show, + NULL); +static DEVICE_ATTR(lux0_input, S_IRUGO, bh1770_lux_result_show, NULL); +static DEVICE_ATTR(lux0_sensor_range, S_IRUGO, bh1770_lux_range_show, NULL); +static DEVICE_ATTR(lux0_rate, S_IRUGO | S_IWUSR, bh1770_get_lux_rate, + bh1770_set_lux_rate); +static DEVICE_ATTR(lux0_rate_avail, S_IRUGO, bh1770_get_lux_rate_avail, NULL); +static DEVICE_ATTR(lux0_thresh_above_value, S_IRUGO | S_IWUSR, + bh1770_get_lux_thresh_above, + bh1770_set_lux_thresh_above); +static DEVICE_ATTR(lux0_thresh_below_value, S_IRUGO | S_IWUSR, + bh1770_get_lux_thresh_below, + bh1770_set_lux_thresh_below); +static DEVICE_ATTR(chip_id, S_IRUGO, bh1770_chip_id_show, NULL); +static DEVICE_ATTR(power_state, S_IRUGO | S_IWUSR, bh1770_power_state_show, + bh1770_power_state_store); + + +static struct attribute *sysfs_attrs[] = { + &dev_attr_lux0_calibscale.attr, + &dev_attr_lux0_calibscale_default.attr, + &dev_attr_lux0_input.attr, + &dev_attr_lux0_sensor_range.attr, + &dev_attr_lux0_rate.attr, + &dev_attr_lux0_rate_avail.attr, + &dev_attr_lux0_thresh_above_value.attr, + &dev_attr_lux0_thresh_below_value.attr, + &dev_attr_prox0_raw.attr, + &dev_attr_prox0_sensor_range.attr, + &dev_attr_prox0_raw_en.attr, + &dev_attr_prox0_thresh_above_count.attr, + &dev_attr_prox0_rate_above.attr, + &dev_attr_prox0_rate_below.attr, + &dev_attr_prox0_rate_avail.attr, + &dev_attr_prox0_thresh_above0_value.attr, + &dev_attr_prox0_thresh_above1_value.attr, + &dev_attr_chip_id.attr, + &dev_attr_power_state.attr, + NULL +}; + +static const struct attribute_group bh1770_attribute_group = { + .attrs = sysfs_attrs +}; + +static int bh1770_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct bh1770_chip *chip; + int err; + + chip = devm_kzalloc(&client->dev, sizeof *chip, GFP_KERNEL); + if (!chip) + return -ENOMEM; + + i2c_set_clientdata(client, chip); + chip->client = client; + + mutex_init(&chip->mutex); + init_waitqueue_head(&chip->wait); + INIT_DELAYED_WORK(&chip->prox_work, bh1770_prox_work); + + if (client->dev.platform_data == NULL) { + dev_err(&client->dev, "platform data is mandatory\n"); + return -EINVAL; + } + + chip->pdata = client->dev.platform_data; + chip->lux_calib = BH1770_LUX_NEUTRAL_CALIB_VALUE; + chip->lux_rate_index = BH1770_LUX_DEFAULT_RATE; + chip->lux_threshold_lo = BH1770_LUX_DEF_THRES; + chip->lux_threshold_hi = BH1770_LUX_DEF_THRES; + + if (chip->pdata->glass_attenuation == 0) + chip->lux_ga = BH1770_NEUTRAL_GA; + else + chip->lux_ga = chip->pdata->glass_attenuation; + + chip->prox_threshold = BH1770_PROX_DEF_THRES; + chip->prox_led = chip->pdata->led_def_curr; + chip->prox_abs_thres = BH1770_PROX_DEF_ABS_THRES; + chip->prox_persistence = BH1770_DEFAULT_PERSISTENCE; + chip->prox_rate_threshold = BH1770_PROX_DEF_RATE_THRESH; + chip->prox_rate = BH1770_PROX_DEFAULT_RATE; + chip->prox_data = 0; + + chip->regs[0].supply = reg_vcc; + chip->regs[1].supply = reg_vleds; + + err = devm_regulator_bulk_get(&client->dev, + ARRAY_SIZE(chip->regs), chip->regs); + if (err < 0) { + dev_err(&client->dev, "Cannot get regulators\n"); + return err; + } + + err = regulator_bulk_enable(ARRAY_SIZE(chip->regs), + chip->regs); + if (err < 0) { + dev_err(&client->dev, "Cannot enable regulators\n"); + return err; + } + + usleep_range(BH1770_STARTUP_DELAY, BH1770_STARTUP_DELAY * 2); + err = bh1770_detect(chip); + if (err < 0) + goto fail0; + + /* Start chip */ + bh1770_chip_on(chip); + pm_runtime_set_active(&client->dev); + pm_runtime_enable(&client->dev); + + chip->lux_corr = bh1770_get_corr_value(chip); + if (chip->lux_corr == 0) { + dev_err(&client->dev, "Improper correction values\n"); + err = -EINVAL; + goto fail0; + } + + if (chip->pdata->setup_resources) { + err = chip->pdata->setup_resources(); + if (err) { + err = -EINVAL; + goto fail0; + } + } + + err = sysfs_create_group(&chip->client->dev.kobj, + &bh1770_attribute_group); + if (err < 0) { + dev_err(&chip->client->dev, "Sysfs registration failed\n"); + goto fail1; + } + + /* + * Chip needs level triggered interrupt to work. However, + * level triggering doesn't work always correctly with power + * management. Select both + */ + err = request_threaded_irq(client->irq, NULL, + bh1770_irq, + IRQF_TRIGGER_FALLING | IRQF_ONESHOT | + IRQF_TRIGGER_LOW, + "bh1770", chip); + if (err) { + dev_err(&client->dev, "could not get IRQ %d\n", + client->irq); + goto fail2; + } + regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs); + return err; +fail2: + sysfs_remove_group(&chip->client->dev.kobj, + &bh1770_attribute_group); +fail1: + if (chip->pdata->release_resources) + chip->pdata->release_resources(); +fail0: + regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs); + return err; +} + +static int bh1770_remove(struct i2c_client *client) +{ + struct bh1770_chip *chip = i2c_get_clientdata(client); + + free_irq(client->irq, chip); + + sysfs_remove_group(&chip->client->dev.kobj, + &bh1770_attribute_group); + + if (chip->pdata->release_resources) + chip->pdata->release_resources(); + + cancel_delayed_work_sync(&chip->prox_work); + + if (!pm_runtime_suspended(&client->dev)) + bh1770_chip_off(chip); + + pm_runtime_disable(&client->dev); + pm_runtime_set_suspended(&client->dev); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int bh1770_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct bh1770_chip *chip = i2c_get_clientdata(client); + + bh1770_chip_off(chip); + + return 0; +} + +static int bh1770_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct bh1770_chip *chip = i2c_get_clientdata(client); + int ret = 0; + + bh1770_chip_on(chip); + + if (!pm_runtime_suspended(dev)) { + /* + * If we were enabled at suspend time, it is expected + * everything works nice and smoothly + */ + ret = bh1770_lux_rate(chip, chip->lux_rate_index); + ret |= bh1770_lux_interrupt_control(chip, BH1770_ENABLE); + + /* This causes interrupt after the next measurement cycle */ + bh1770_lux_update_thresholds(chip, BH1770_LUX_DEF_THRES, + BH1770_LUX_DEF_THRES); + /* Inform that we are waiting for a result from ALS */ + chip->lux_wait_result = true; + bh1770_prox_mode_control(chip); + } + return ret; +} +#endif + +#ifdef CONFIG_PM +static int bh1770_runtime_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct bh1770_chip *chip = i2c_get_clientdata(client); + + bh1770_chip_off(chip); + + return 0; +} + +static int bh1770_runtime_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct bh1770_chip *chip = i2c_get_clientdata(client); + + bh1770_chip_on(chip); + + return 0; +} +#endif + +static const struct i2c_device_id bh1770_id[] = { + {"bh1770glc", 0 }, + {"sfh7770", 0 }, + {} +}; + +MODULE_DEVICE_TABLE(i2c, bh1770_id); + +static const struct dev_pm_ops bh1770_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(bh1770_suspend, bh1770_resume) + SET_RUNTIME_PM_OPS(bh1770_runtime_suspend, bh1770_runtime_resume, NULL) +}; + +static struct i2c_driver bh1770_driver = { + .driver = { + .name = "bh1770glc", + .pm = &bh1770_pm_ops, + }, + .probe = bh1770_probe, + .remove = bh1770_remove, + .id_table = bh1770_id, +}; + +module_i2c_driver(bh1770_driver); + +MODULE_DESCRIPTION("BH1770GLC / SFH7770 combined ALS and proximity sensor"); +MODULE_AUTHOR("Samu Onkalo, Nokia Corporation"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/c2port/Kconfig b/drivers/misc/c2port/Kconfig new file mode 100644 index 000000000..e20516ffd --- /dev/null +++ b/drivers/misc/c2port/Kconfig @@ -0,0 +1,33 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# C2 port devices +# + +menuconfig C2PORT + tristate "Silicon Labs C2 port support" + help + This option enables support for Silicon Labs C2 port used to + program Silicon micro controller chips (and other 8051 compatible). + + If your board have no such micro controllers you don't need this + interface at all. + + To compile this driver as a module, choose M here: the module will + be called c2port_core. Note that you also need a client module + usually called c2port-*. + + If you are not sure, say N here. + +if C2PORT + +config C2PORT_DURAMAR_2150 + tristate "C2 port support for Eurotech's Duramar 2150" + depends on X86 + help + This option enables C2 support for the Eurotech's Duramar 2150 + on board micro controller. + + To compile this driver as a module, choose M here: the module will + be called c2port-duramar2150. + +endif # C2PORT diff --git a/drivers/misc/c2port/Makefile b/drivers/misc/c2port/Makefile new file mode 100644 index 000000000..1dfe6ab06 --- /dev/null +++ b/drivers/misc/c2port/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_C2PORT) += core.o + +obj-$(CONFIG_C2PORT_DURAMAR_2150) += c2port-duramar2150.o diff --git a/drivers/misc/c2port/c2port-duramar2150.c b/drivers/misc/c2port/c2port-duramar2150.c new file mode 100644 index 000000000..7e370949e --- /dev/null +++ b/drivers/misc/c2port/c2port-duramar2150.c @@ -0,0 +1,156 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Silicon Labs C2 port Linux support for Eurotech Duramar 2150 + * + * Copyright (c) 2008 Rodolfo Giometti <giometti@linux.it> + * Copyright (c) 2008 Eurotech S.p.A. <info@eurotech.it> + */ + +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/ioport.h> +#include <linux/c2port.h> + +#define DATA_PORT 0x325 +#define DIR_PORT 0x326 +#define C2D (1 << 0) +#define C2CK (1 << 1) + +static DEFINE_MUTEX(update_lock); + +/* + * C2 port operations + */ + +static void duramar2150_c2port_access(struct c2port_device *dev, int status) +{ + u8 v; + + mutex_lock(&update_lock); + + v = inb(DIR_PORT); + + /* 0 = input, 1 = output */ + if (status) + outb(v | (C2D | C2CK), DIR_PORT); + else + /* When access is "off" is important that both lines are set + * as inputs or hi-impedance */ + outb(v & ~(C2D | C2CK), DIR_PORT); + + mutex_unlock(&update_lock); +} + +static void duramar2150_c2port_c2d_dir(struct c2port_device *dev, int dir) +{ + u8 v; + + mutex_lock(&update_lock); + + v = inb(DIR_PORT); + + if (dir) + outb(v & ~C2D, DIR_PORT); + else + outb(v | C2D, DIR_PORT); + + mutex_unlock(&update_lock); +} + +static int duramar2150_c2port_c2d_get(struct c2port_device *dev) +{ + return inb(DATA_PORT) & C2D; +} + +static void duramar2150_c2port_c2d_set(struct c2port_device *dev, int status) +{ + u8 v; + + mutex_lock(&update_lock); + + v = inb(DATA_PORT); + + if (status) + outb(v | C2D, DATA_PORT); + else + outb(v & ~C2D, DATA_PORT); + + mutex_unlock(&update_lock); +} + +static void duramar2150_c2port_c2ck_set(struct c2port_device *dev, int status) +{ + u8 v; + + mutex_lock(&update_lock); + + v = inb(DATA_PORT); + + if (status) + outb(v | C2CK, DATA_PORT); + else + outb(v & ~C2CK, DATA_PORT); + + mutex_unlock(&update_lock); +} + +static struct c2port_ops duramar2150_c2port_ops = { + .block_size = 512, /* bytes */ + .blocks_num = 30, /* total flash size: 15360 bytes */ + + .access = duramar2150_c2port_access, + .c2d_dir = duramar2150_c2port_c2d_dir, + .c2d_get = duramar2150_c2port_c2d_get, + .c2d_set = duramar2150_c2port_c2d_set, + .c2ck_set = duramar2150_c2port_c2ck_set, +}; + +static struct c2port_device *duramar2150_c2port_dev; + +/* + * Module stuff + */ + +static int __init duramar2150_c2port_init(void) +{ + struct resource *res; + int ret = 0; + + res = request_region(0x325, 2, "c2port"); + if (!res) + return -EBUSY; + + duramar2150_c2port_dev = c2port_device_register("uc", + &duramar2150_c2port_ops, NULL); + if (IS_ERR(duramar2150_c2port_dev)) { + ret = PTR_ERR(duramar2150_c2port_dev); + goto free_region; + } + + return 0; + +free_region: + release_region(0x325, 2); + return ret; +} + +static void __exit duramar2150_c2port_exit(void) +{ + /* Setup the GPIOs as input by default (access = 0) */ + duramar2150_c2port_access(duramar2150_c2port_dev, 0); + + c2port_device_unregister(duramar2150_c2port_dev); + + release_region(0x325, 2); +} + +module_init(duramar2150_c2port_init); +module_exit(duramar2150_c2port_exit); + +MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>"); +MODULE_DESCRIPTION("Silicon Labs C2 port Linux support for Duramar 2150"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c new file mode 100644 index 000000000..80d87e8a0 --- /dev/null +++ b/drivers/misc/c2port/core.c @@ -0,0 +1,1000 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Silicon Labs C2 port core Linux support + * + * Copyright (c) 2007 Rodolfo Giometti <giometti@linux.it> + * Copyright (c) 2007 Eurotech S.p.A. <info@eurotech.it> + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/ctype.h> +#include <linux/delay.h> +#include <linux/idr.h> +#include <linux/sched.h> +#include <linux/slab.h> + +#include <linux/c2port.h> + +#define DRIVER_NAME "c2port" +#define DRIVER_VERSION "0.51.0" + +static DEFINE_SPINLOCK(c2port_idr_lock); +static DEFINE_IDR(c2port_idr); + +/* + * Local variables + */ + +static struct class *c2port_class; + +/* + * C2 registers & commands defines + */ + +/* C2 registers */ +#define C2PORT_DEVICEID 0x00 +#define C2PORT_REVID 0x01 +#define C2PORT_FPCTL 0x02 +#define C2PORT_FPDAT 0xB4 + +/* C2 interface commands */ +#define C2PORT_GET_VERSION 0x01 +#define C2PORT_DEVICE_ERASE 0x03 +#define C2PORT_BLOCK_READ 0x06 +#define C2PORT_BLOCK_WRITE 0x07 +#define C2PORT_PAGE_ERASE 0x08 + +/* C2 status return codes */ +#define C2PORT_INVALID_COMMAND 0x00 +#define C2PORT_COMMAND_FAILED 0x02 +#define C2PORT_COMMAND_OK 0x0d + +/* + * C2 port low level signal managements + */ + +static void c2port_reset(struct c2port_device *dev) +{ + struct c2port_ops *ops = dev->ops; + + /* To reset the device we have to keep clock line low for at least + * 20us. + */ + local_irq_disable(); + ops->c2ck_set(dev, 0); + udelay(25); + ops->c2ck_set(dev, 1); + local_irq_enable(); + + udelay(1); +} + +static void c2port_strobe_ck(struct c2port_device *dev) +{ + struct c2port_ops *ops = dev->ops; + + /* During hi-low-hi transition we disable local IRQs to avoid + * interructions since C2 port specification says that it must be + * shorter than 5us, otherwise the microcontroller may consider + * it as a reset signal! + */ + local_irq_disable(); + ops->c2ck_set(dev, 0); + udelay(1); + ops->c2ck_set(dev, 1); + local_irq_enable(); + + udelay(1); +} + +/* + * C2 port basic functions + */ + +static void c2port_write_ar(struct c2port_device *dev, u8 addr) +{ + struct c2port_ops *ops = dev->ops; + int i; + + /* START field */ + c2port_strobe_ck(dev); + + /* INS field (11b, LSB first) */ + ops->c2d_dir(dev, 0); + ops->c2d_set(dev, 1); + c2port_strobe_ck(dev); + ops->c2d_set(dev, 1); + c2port_strobe_ck(dev); + + /* ADDRESS field */ + for (i = 0; i < 8; i++) { + ops->c2d_set(dev, addr & 0x01); + c2port_strobe_ck(dev); + + addr >>= 1; + } + + /* STOP field */ + ops->c2d_dir(dev, 1); + c2port_strobe_ck(dev); +} + +static int c2port_read_ar(struct c2port_device *dev, u8 *addr) +{ + struct c2port_ops *ops = dev->ops; + int i; + + /* START field */ + c2port_strobe_ck(dev); + + /* INS field (10b, LSB first) */ + ops->c2d_dir(dev, 0); + ops->c2d_set(dev, 0); + c2port_strobe_ck(dev); + ops->c2d_set(dev, 1); + c2port_strobe_ck(dev); + + /* ADDRESS field */ + ops->c2d_dir(dev, 1); + *addr = 0; + for (i = 0; i < 8; i++) { + *addr >>= 1; /* shift in 8-bit ADDRESS field LSB first */ + + c2port_strobe_ck(dev); + if (ops->c2d_get(dev)) + *addr |= 0x80; + } + + /* STOP field */ + c2port_strobe_ck(dev); + + return 0; +} + +static int c2port_write_dr(struct c2port_device *dev, u8 data) +{ + struct c2port_ops *ops = dev->ops; + int timeout, i; + + /* START field */ + c2port_strobe_ck(dev); + + /* INS field (01b, LSB first) */ + ops->c2d_dir(dev, 0); + ops->c2d_set(dev, 1); + c2port_strobe_ck(dev); + ops->c2d_set(dev, 0); + c2port_strobe_ck(dev); + + /* LENGTH field (00b, LSB first -> 1 byte) */ + ops->c2d_set(dev, 0); + c2port_strobe_ck(dev); + ops->c2d_set(dev, 0); + c2port_strobe_ck(dev); + + /* DATA field */ + for (i = 0; i < 8; i++) { + ops->c2d_set(dev, data & 0x01); + c2port_strobe_ck(dev); + + data >>= 1; + } + + /* WAIT field */ + ops->c2d_dir(dev, 1); + timeout = 20; + do { + c2port_strobe_ck(dev); + if (ops->c2d_get(dev)) + break; + + udelay(1); + } while (--timeout > 0); + if (timeout == 0) + return -EIO; + + /* STOP field */ + c2port_strobe_ck(dev); + + return 0; +} + +static int c2port_read_dr(struct c2port_device *dev, u8 *data) +{ + struct c2port_ops *ops = dev->ops; + int timeout, i; + + /* START field */ + c2port_strobe_ck(dev); + + /* INS field (00b, LSB first) */ + ops->c2d_dir(dev, 0); + ops->c2d_set(dev, 0); + c2port_strobe_ck(dev); + ops->c2d_set(dev, 0); + c2port_strobe_ck(dev); + + /* LENGTH field (00b, LSB first -> 1 byte) */ + ops->c2d_set(dev, 0); + c2port_strobe_ck(dev); + ops->c2d_set(dev, 0); + c2port_strobe_ck(dev); + + /* WAIT field */ + ops->c2d_dir(dev, 1); + timeout = 20; + do { + c2port_strobe_ck(dev); + if (ops->c2d_get(dev)) + break; + + udelay(1); + } while (--timeout > 0); + if (timeout == 0) + return -EIO; + + /* DATA field */ + *data = 0; + for (i = 0; i < 8; i++) { + *data >>= 1; /* shift in 8-bit DATA field LSB first */ + + c2port_strobe_ck(dev); + if (ops->c2d_get(dev)) + *data |= 0x80; + } + + /* STOP field */ + c2port_strobe_ck(dev); + + return 0; +} + +static int c2port_poll_in_busy(struct c2port_device *dev) +{ + u8 addr; + int ret, timeout = 20; + + do { + ret = (c2port_read_ar(dev, &addr)); + if (ret < 0) + return -EIO; + + if (!(addr & 0x02)) + break; + + udelay(1); + } while (--timeout > 0); + if (timeout == 0) + return -EIO; + + return 0; +} + +static int c2port_poll_out_ready(struct c2port_device *dev) +{ + u8 addr; + int ret, timeout = 10000; /* erase flash needs long time... */ + + do { + ret = (c2port_read_ar(dev, &addr)); + if (ret < 0) + return -EIO; + + if (addr & 0x01) + break; + + udelay(1); + } while (--timeout > 0); + if (timeout == 0) + return -EIO; + + return 0; +} + +/* + * sysfs methods + */ + +static ssize_t c2port_show_name(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct c2port_device *c2dev = dev_get_drvdata(dev); + + return sprintf(buf, "%s\n", c2dev->name); +} +static DEVICE_ATTR(name, 0444, c2port_show_name, NULL); + +static ssize_t c2port_show_flash_blocks_num(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct c2port_device *c2dev = dev_get_drvdata(dev); + struct c2port_ops *ops = c2dev->ops; + + return sprintf(buf, "%d\n", ops->blocks_num); +} +static DEVICE_ATTR(flash_blocks_num, 0444, c2port_show_flash_blocks_num, NULL); + +static ssize_t c2port_show_flash_block_size(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct c2port_device *c2dev = dev_get_drvdata(dev); + struct c2port_ops *ops = c2dev->ops; + + return sprintf(buf, "%d\n", ops->block_size); +} +static DEVICE_ATTR(flash_block_size, 0444, c2port_show_flash_block_size, NULL); + +static ssize_t c2port_show_flash_size(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct c2port_device *c2dev = dev_get_drvdata(dev); + struct c2port_ops *ops = c2dev->ops; + + return sprintf(buf, "%d\n", ops->blocks_num * ops->block_size); +} +static DEVICE_ATTR(flash_size, 0444, c2port_show_flash_size, NULL); + +static ssize_t access_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct c2port_device *c2dev = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", c2dev->access); +} + +static ssize_t access_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct c2port_device *c2dev = dev_get_drvdata(dev); + struct c2port_ops *ops = c2dev->ops; + int status, ret; + + ret = sscanf(buf, "%d", &status); + if (ret != 1) + return -EINVAL; + + mutex_lock(&c2dev->mutex); + + c2dev->access = !!status; + + /* If access is "on" clock should be HIGH _before_ setting the line + * as output and data line should be set as INPUT anyway */ + if (c2dev->access) + ops->c2ck_set(c2dev, 1); + ops->access(c2dev, c2dev->access); + if (c2dev->access) + ops->c2d_dir(c2dev, 1); + + mutex_unlock(&c2dev->mutex); + + return count; +} +static DEVICE_ATTR_RW(access); + +static ssize_t c2port_store_reset(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct c2port_device *c2dev = dev_get_drvdata(dev); + + /* Check the device access status */ + if (!c2dev->access) + return -EBUSY; + + mutex_lock(&c2dev->mutex); + + c2port_reset(c2dev); + c2dev->flash_access = 0; + + mutex_unlock(&c2dev->mutex); + + return count; +} +static DEVICE_ATTR(reset, 0200, NULL, c2port_store_reset); + +static ssize_t __c2port_show_dev_id(struct c2port_device *dev, char *buf) +{ + u8 data; + int ret; + + /* Select DEVICEID register for C2 data register accesses */ + c2port_write_ar(dev, C2PORT_DEVICEID); + + /* Read and return the device ID register */ + ret = c2port_read_dr(dev, &data); + if (ret < 0) + return ret; + + return sprintf(buf, "%d\n", data); +} + +static ssize_t c2port_show_dev_id(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct c2port_device *c2dev = dev_get_drvdata(dev); + ssize_t ret; + + /* Check the device access status */ + if (!c2dev->access) + return -EBUSY; + + mutex_lock(&c2dev->mutex); + ret = __c2port_show_dev_id(c2dev, buf); + mutex_unlock(&c2dev->mutex); + + if (ret < 0) + dev_err(dev, "cannot read from %s\n", c2dev->name); + + return ret; +} +static DEVICE_ATTR(dev_id, 0444, c2port_show_dev_id, NULL); + +static ssize_t __c2port_show_rev_id(struct c2port_device *dev, char *buf) +{ + u8 data; + int ret; + + /* Select REVID register for C2 data register accesses */ + c2port_write_ar(dev, C2PORT_REVID); + + /* Read and return the revision ID register */ + ret = c2port_read_dr(dev, &data); + if (ret < 0) + return ret; + + return sprintf(buf, "%d\n", data); +} + +static ssize_t c2port_show_rev_id(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct c2port_device *c2dev = dev_get_drvdata(dev); + ssize_t ret; + + /* Check the device access status */ + if (!c2dev->access) + return -EBUSY; + + mutex_lock(&c2dev->mutex); + ret = __c2port_show_rev_id(c2dev, buf); + mutex_unlock(&c2dev->mutex); + + if (ret < 0) + dev_err(c2dev->dev, "cannot read from %s\n", c2dev->name); + + return ret; +} +static DEVICE_ATTR(rev_id, 0444, c2port_show_rev_id, NULL); + +static ssize_t c2port_show_flash_access(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct c2port_device *c2dev = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", c2dev->flash_access); +} + +static ssize_t __c2port_store_flash_access(struct c2port_device *dev, + int status) +{ + int ret; + + /* Check the device access status */ + if (!dev->access) + return -EBUSY; + + dev->flash_access = !!status; + + /* If flash_access is off we have nothing to do... */ + if (dev->flash_access == 0) + return 0; + + /* Target the C2 flash programming control register for C2 data + * register access */ + c2port_write_ar(dev, C2PORT_FPCTL); + + /* Write the first keycode to enable C2 Flash programming */ + ret = c2port_write_dr(dev, 0x02); + if (ret < 0) + return ret; + + /* Write the second keycode to enable C2 Flash programming */ + ret = c2port_write_dr(dev, 0x01); + if (ret < 0) + return ret; + + /* Delay for at least 20ms to ensure the target is ready for + * C2 flash programming */ + mdelay(25); + + return 0; +} + +static ssize_t c2port_store_flash_access(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct c2port_device *c2dev = dev_get_drvdata(dev); + int status; + ssize_t ret; + + ret = sscanf(buf, "%d", &status); + if (ret != 1) + return -EINVAL; + + mutex_lock(&c2dev->mutex); + ret = __c2port_store_flash_access(c2dev, status); + mutex_unlock(&c2dev->mutex); + + if (ret < 0) { + dev_err(c2dev->dev, "cannot enable %s flash programming\n", + c2dev->name); + return ret; + } + + return count; +} +static DEVICE_ATTR(flash_access, 0644, c2port_show_flash_access, + c2port_store_flash_access); + +static ssize_t __c2port_write_flash_erase(struct c2port_device *dev) +{ + u8 status; + int ret; + + /* Target the C2 flash programming data register for C2 data register + * access. + */ + c2port_write_ar(dev, C2PORT_FPDAT); + + /* Send device erase command */ + c2port_write_dr(dev, C2PORT_DEVICE_ERASE); + + /* Wait for input acknowledge */ + ret = c2port_poll_in_busy(dev); + if (ret < 0) + return ret; + + /* Should check status before starting FLASH access sequence */ + + /* Wait for status information */ + ret = c2port_poll_out_ready(dev); + if (ret < 0) + return ret; + + /* Read flash programming interface status */ + ret = c2port_read_dr(dev, &status); + if (ret < 0) + return ret; + if (status != C2PORT_COMMAND_OK) + return -EBUSY; + + /* Send a three-byte arming sequence to enable the device erase. + * If the sequence is not received correctly, the command will be + * ignored. + * Sequence is: 0xde, 0xad, 0xa5. + */ + c2port_write_dr(dev, 0xde); + ret = c2port_poll_in_busy(dev); + if (ret < 0) + return ret; + c2port_write_dr(dev, 0xad); + ret = c2port_poll_in_busy(dev); + if (ret < 0) + return ret; + c2port_write_dr(dev, 0xa5); + ret = c2port_poll_in_busy(dev); + if (ret < 0) + return ret; + + ret = c2port_poll_out_ready(dev); + if (ret < 0) + return ret; + + return 0; +} + +static ssize_t c2port_store_flash_erase(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct c2port_device *c2dev = dev_get_drvdata(dev); + int ret; + + /* Check the device and flash access status */ + if (!c2dev->access || !c2dev->flash_access) + return -EBUSY; + + mutex_lock(&c2dev->mutex); + ret = __c2port_write_flash_erase(c2dev); + mutex_unlock(&c2dev->mutex); + + if (ret < 0) { + dev_err(c2dev->dev, "cannot erase %s flash\n", c2dev->name); + return ret; + } + + return count; +} +static DEVICE_ATTR(flash_erase, 0200, NULL, c2port_store_flash_erase); + +static ssize_t __c2port_read_flash_data(struct c2port_device *dev, + char *buffer, loff_t offset, size_t count) +{ + struct c2port_ops *ops = dev->ops; + u8 status, nread = 128; + int i, ret; + + /* Check for flash end */ + if (offset >= ops->block_size * ops->blocks_num) + return 0; + + if (ops->block_size * ops->blocks_num - offset < nread) + nread = ops->block_size * ops->blocks_num - offset; + if (count < nread) + nread = count; + if (nread == 0) + return nread; + + /* Target the C2 flash programming data register for C2 data register + * access */ + c2port_write_ar(dev, C2PORT_FPDAT); + + /* Send flash block read command */ + c2port_write_dr(dev, C2PORT_BLOCK_READ); + + /* Wait for input acknowledge */ + ret = c2port_poll_in_busy(dev); + if (ret < 0) + return ret; + + /* Should check status before starting FLASH access sequence */ + + /* Wait for status information */ + ret = c2port_poll_out_ready(dev); + if (ret < 0) + return ret; + + /* Read flash programming interface status */ + ret = c2port_read_dr(dev, &status); + if (ret < 0) + return ret; + if (status != C2PORT_COMMAND_OK) + return -EBUSY; + + /* Send address high byte */ + c2port_write_dr(dev, offset >> 8); + ret = c2port_poll_in_busy(dev); + if (ret < 0) + return ret; + + /* Send address low byte */ + c2port_write_dr(dev, offset & 0x00ff); + ret = c2port_poll_in_busy(dev); + if (ret < 0) + return ret; + + /* Send address block size */ + c2port_write_dr(dev, nread); + ret = c2port_poll_in_busy(dev); + if (ret < 0) + return ret; + + /* Should check status before reading FLASH block */ + + /* Wait for status information */ + ret = c2port_poll_out_ready(dev); + if (ret < 0) + return ret; + + /* Read flash programming interface status */ + ret = c2port_read_dr(dev, &status); + if (ret < 0) + return ret; + if (status != C2PORT_COMMAND_OK) + return -EBUSY; + + /* Read flash block */ + for (i = 0; i < nread; i++) { + ret = c2port_poll_out_ready(dev); + if (ret < 0) + return ret; + + ret = c2port_read_dr(dev, buffer+i); + if (ret < 0) + return ret; + } + + return nread; +} + +static ssize_t c2port_read_flash_data(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buffer, loff_t offset, size_t count) +{ + struct c2port_device *c2dev = dev_get_drvdata(kobj_to_dev(kobj)); + ssize_t ret; + + /* Check the device and flash access status */ + if (!c2dev->access || !c2dev->flash_access) + return -EBUSY; + + mutex_lock(&c2dev->mutex); + ret = __c2port_read_flash_data(c2dev, buffer, offset, count); + mutex_unlock(&c2dev->mutex); + + if (ret < 0) + dev_err(c2dev->dev, "cannot read %s flash\n", c2dev->name); + + return ret; +} + +static ssize_t __c2port_write_flash_data(struct c2port_device *dev, + char *buffer, loff_t offset, size_t count) +{ + struct c2port_ops *ops = dev->ops; + u8 status, nwrite = 128; + int i, ret; + + if (nwrite > count) + nwrite = count; + if (ops->block_size * ops->blocks_num - offset < nwrite) + nwrite = ops->block_size * ops->blocks_num - offset; + + /* Check for flash end */ + if (offset >= ops->block_size * ops->blocks_num) + return -EINVAL; + + /* Target the C2 flash programming data register for C2 data register + * access */ + c2port_write_ar(dev, C2PORT_FPDAT); + + /* Send flash block write command */ + c2port_write_dr(dev, C2PORT_BLOCK_WRITE); + + /* Wait for input acknowledge */ + ret = c2port_poll_in_busy(dev); + if (ret < 0) + return ret; + + /* Should check status before starting FLASH access sequence */ + + /* Wait for status information */ + ret = c2port_poll_out_ready(dev); + if (ret < 0) + return ret; + + /* Read flash programming interface status */ + ret = c2port_read_dr(dev, &status); + if (ret < 0) + return ret; + if (status != C2PORT_COMMAND_OK) + return -EBUSY; + + /* Send address high byte */ + c2port_write_dr(dev, offset >> 8); + ret = c2port_poll_in_busy(dev); + if (ret < 0) + return ret; + + /* Send address low byte */ + c2port_write_dr(dev, offset & 0x00ff); + ret = c2port_poll_in_busy(dev); + if (ret < 0) + return ret; + + /* Send address block size */ + c2port_write_dr(dev, nwrite); + ret = c2port_poll_in_busy(dev); + if (ret < 0) + return ret; + + /* Should check status before writing FLASH block */ + + /* Wait for status information */ + ret = c2port_poll_out_ready(dev); + if (ret < 0) + return ret; + + /* Read flash programming interface status */ + ret = c2port_read_dr(dev, &status); + if (ret < 0) + return ret; + if (status != C2PORT_COMMAND_OK) + return -EBUSY; + + /* Write flash block */ + for (i = 0; i < nwrite; i++) { + ret = c2port_write_dr(dev, *(buffer+i)); + if (ret < 0) + return ret; + + ret = c2port_poll_in_busy(dev); + if (ret < 0) + return ret; + + } + + /* Wait for last flash write to complete */ + ret = c2port_poll_out_ready(dev); + if (ret < 0) + return ret; + + return nwrite; +} + +static ssize_t c2port_write_flash_data(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buffer, loff_t offset, size_t count) +{ + struct c2port_device *c2dev = dev_get_drvdata(kobj_to_dev(kobj)); + int ret; + + /* Check the device access status */ + if (!c2dev->access || !c2dev->flash_access) + return -EBUSY; + + mutex_lock(&c2dev->mutex); + ret = __c2port_write_flash_data(c2dev, buffer, offset, count); + mutex_unlock(&c2dev->mutex); + + if (ret < 0) + dev_err(c2dev->dev, "cannot write %s flash\n", c2dev->name); + + return ret; +} +/* size is computed at run-time */ +static BIN_ATTR(flash_data, 0644, c2port_read_flash_data, + c2port_write_flash_data, 0); + +/* + * Class attributes + */ +static struct attribute *c2port_attrs[] = { + &dev_attr_name.attr, + &dev_attr_flash_blocks_num.attr, + &dev_attr_flash_block_size.attr, + &dev_attr_flash_size.attr, + &dev_attr_access.attr, + &dev_attr_reset.attr, + &dev_attr_dev_id.attr, + &dev_attr_rev_id.attr, + &dev_attr_flash_access.attr, + &dev_attr_flash_erase.attr, + NULL, +}; + +static struct bin_attribute *c2port_bin_attrs[] = { + &bin_attr_flash_data, + NULL, +}; + +static const struct attribute_group c2port_group = { + .attrs = c2port_attrs, + .bin_attrs = c2port_bin_attrs, +}; + +static const struct attribute_group *c2port_groups[] = { + &c2port_group, + NULL, +}; + +/* + * Exported functions + */ + +struct c2port_device *c2port_device_register(char *name, + struct c2port_ops *ops, void *devdata) +{ + struct c2port_device *c2dev; + int ret; + + if (unlikely(!ops) || unlikely(!ops->access) || \ + unlikely(!ops->c2d_dir) || unlikely(!ops->c2ck_set) || \ + unlikely(!ops->c2d_get) || unlikely(!ops->c2d_set)) + return ERR_PTR(-EINVAL); + + c2dev = kmalloc(sizeof(struct c2port_device), GFP_KERNEL); + if (unlikely(!c2dev)) + return ERR_PTR(-ENOMEM); + + idr_preload(GFP_KERNEL); + spin_lock_irq(&c2port_idr_lock); + ret = idr_alloc(&c2port_idr, c2dev, 0, 0, GFP_NOWAIT); + spin_unlock_irq(&c2port_idr_lock); + idr_preload_end(); + + if (ret < 0) + goto error_idr_alloc; + c2dev->id = ret; + + bin_attr_flash_data.size = ops->blocks_num * ops->block_size; + + c2dev->dev = device_create(c2port_class, NULL, 0, c2dev, + "c2port%d", c2dev->id); + if (IS_ERR(c2dev->dev)) { + ret = PTR_ERR(c2dev->dev); + goto error_device_create; + } + dev_set_drvdata(c2dev->dev, c2dev); + + strncpy(c2dev->name, name, C2PORT_NAME_LEN - 1); + c2dev->ops = ops; + mutex_init(&c2dev->mutex); + + /* By default C2 port access is off */ + c2dev->access = c2dev->flash_access = 0; + ops->access(c2dev, 0); + + dev_info(c2dev->dev, "C2 port %s added\n", name); + dev_info(c2dev->dev, "%s flash has %d blocks x %d bytes " + "(%d bytes total)\n", + name, ops->blocks_num, ops->block_size, + ops->blocks_num * ops->block_size); + + return c2dev; + +error_device_create: + spin_lock_irq(&c2port_idr_lock); + idr_remove(&c2port_idr, c2dev->id); + spin_unlock_irq(&c2port_idr_lock); + +error_idr_alloc: + kfree(c2dev); + + return ERR_PTR(ret); +} +EXPORT_SYMBOL(c2port_device_register); + +void c2port_device_unregister(struct c2port_device *c2dev) +{ + if (!c2dev) + return; + + dev_info(c2dev->dev, "C2 port %s removed\n", c2dev->name); + + spin_lock_irq(&c2port_idr_lock); + idr_remove(&c2port_idr, c2dev->id); + spin_unlock_irq(&c2port_idr_lock); + + device_destroy(c2port_class, c2dev->id); + + kfree(c2dev); +} +EXPORT_SYMBOL(c2port_device_unregister); + +/* + * Module stuff + */ + +static int __init c2port_init(void) +{ + printk(KERN_INFO "Silicon Labs C2 port support v. " DRIVER_VERSION + " - (C) 2007 Rodolfo Giometti\n"); + + c2port_class = class_create(THIS_MODULE, "c2port"); + if (IS_ERR(c2port_class)) { + printk(KERN_ERR "c2port: failed to allocate class\n"); + return PTR_ERR(c2port_class); + } + c2port_class->dev_groups = c2port_groups; + + return 0; +} + +static void __exit c2port_exit(void) +{ + class_destroy(c2port_class); +} + +module_init(c2port_init); +module_exit(c2port_exit); + +MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>"); +MODULE_DESCRIPTION("Silicon Labs C2 port support v. " DRIVER_VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/cardreader/Kconfig b/drivers/misc/cardreader/Kconfig new file mode 100644 index 000000000..022322dfb --- /dev/null +++ b/drivers/misc/cardreader/Kconfig @@ -0,0 +1,32 @@ +# SPDX-License-Identifier: GPL-2.0-only +config MISC_ALCOR_PCI + tristate "Alcor Micro/Alcor Link PCI-E card reader" + depends on PCI + select MFD_CORE + help + This supports for Alcor Micro PCI-Express card reader including au6601, + au6621. + Alcor Micro card readers support access to many types of memory cards, + such as Memory Stick, Memory Stick Pro, Secure Digital and + MultiMediaCard. + +config MISC_RTSX_PCI + tristate "Realtek PCI-E card reader" + depends on PCI + select MFD_CORE + help + This supports for Realtek PCI-Express card reader including rts5209, + rts5227, rts522A, rts5229, rts5249, rts524A, rts525A, rtl8411, rts5260. + Realtek card readers support access to many types of memory cards, + such as Memory Stick, Memory Stick Pro, Secure Digital and + MultiMediaCard. + +config MISC_RTSX_USB + tristate "Realtek USB card reader" + depends on USB + select MFD_CORE + help + Select this option to get support for Realtek USB 2.0 card readers + including RTS5129, RTS5139, RTS5179 and RTS5170. + Realtek card reader supports access to many types of memory cards, + such as Memory Stick Pro, Secure Digital and MultiMediaCard. diff --git a/drivers/misc/cardreader/Makefile b/drivers/misc/cardreader/Makefile new file mode 100644 index 000000000..895128475 --- /dev/null +++ b/drivers/misc/cardreader/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_MISC_ALCOR_PCI) += alcor_pci.o +obj-$(CONFIG_MISC_RTSX_PCI) += rtsx_pci.o +rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o rts5260.o rts5261.o rts5228.o +obj-$(CONFIG_MISC_RTSX_USB) += rtsx_usb.o diff --git a/drivers/misc/cardreader/alcor_pci.c b/drivers/misc/cardreader/alcor_pci.c new file mode 100644 index 000000000..3f514d77a --- /dev/null +++ b/drivers/misc/cardreader/alcor_pci.c @@ -0,0 +1,384 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2018 Oleksij Rempel <linux@rempel-privat.de> + * + * Driver for Alcor Micro AU6601 and AU6621 controllers + */ + +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/mfd/core.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/pm.h> + +#include <linux/alcor_pci.h> + +#define DRV_NAME_ALCOR_PCI "alcor_pci" + +static DEFINE_IDA(alcor_pci_idr); + +static struct mfd_cell alcor_pci_cells[] = { + [ALCOR_SD_CARD] = { + .name = DRV_NAME_ALCOR_PCI_SDMMC, + }, + [ALCOR_MS_CARD] = { + .name = DRV_NAME_ALCOR_PCI_MS, + }, +}; + +static const struct alcor_dev_cfg alcor_cfg = { + .dma = 0, +}; + +static const struct alcor_dev_cfg au6621_cfg = { + .dma = 1, +}; + +static const struct alcor_dev_cfg au6625_cfg = { + .dma = 0, +}; + +static const struct pci_device_id pci_ids[] = { + { PCI_DEVICE(PCI_ID_ALCOR_MICRO, PCI_ID_AU6601), + .driver_data = (kernel_ulong_t)&alcor_cfg }, + { PCI_DEVICE(PCI_ID_ALCOR_MICRO, PCI_ID_AU6621), + .driver_data = (kernel_ulong_t)&au6621_cfg }, + { PCI_DEVICE(PCI_ID_ALCOR_MICRO, PCI_ID_AU6625), + .driver_data = (kernel_ulong_t)&au6625_cfg }, + {}, +}; +MODULE_DEVICE_TABLE(pci, pci_ids); + +void alcor_write8(struct alcor_pci_priv *priv, u8 val, unsigned int addr) +{ + writeb(val, priv->iobase + addr); +} +EXPORT_SYMBOL_GPL(alcor_write8); + +void alcor_write16(struct alcor_pci_priv *priv, u16 val, unsigned int addr) +{ + writew(val, priv->iobase + addr); +} +EXPORT_SYMBOL_GPL(alcor_write16); + +void alcor_write32(struct alcor_pci_priv *priv, u32 val, unsigned int addr) +{ + writel(val, priv->iobase + addr); +} +EXPORT_SYMBOL_GPL(alcor_write32); + +void alcor_write32be(struct alcor_pci_priv *priv, u32 val, unsigned int addr) +{ + iowrite32be(val, priv->iobase + addr); +} +EXPORT_SYMBOL_GPL(alcor_write32be); + +u8 alcor_read8(struct alcor_pci_priv *priv, unsigned int addr) +{ + return readb(priv->iobase + addr); +} +EXPORT_SYMBOL_GPL(alcor_read8); + +u32 alcor_read32(struct alcor_pci_priv *priv, unsigned int addr) +{ + return readl(priv->iobase + addr); +} +EXPORT_SYMBOL_GPL(alcor_read32); + +u32 alcor_read32be(struct alcor_pci_priv *priv, unsigned int addr) +{ + return ioread32be(priv->iobase + addr); +} +EXPORT_SYMBOL_GPL(alcor_read32be); + +static int alcor_pci_find_cap_offset(struct alcor_pci_priv *priv, + struct pci_dev *pci) +{ + int where; + u8 val8; + u32 val32; + + where = ALCOR_CAP_START_OFFSET; + pci_read_config_byte(pci, where, &val8); + if (!val8) + return 0; + + where = (int)val8; + while (1) { + pci_read_config_dword(pci, where, &val32); + if (val32 == 0xffffffff) { + dev_dbg(priv->dev, "find_cap_offset invalid value %x.\n", + val32); + return 0; + } + + if ((val32 & 0xff) == 0x10) { + dev_dbg(priv->dev, "pcie cap offset: %x\n", where); + return where; + } + + if ((val32 & 0xff00) == 0x00) { + dev_dbg(priv->dev, "pci_find_cap_offset invalid value %x.\n", + val32); + break; + } + where = (int)((val32 >> 8) & 0xff); + } + + return 0; +} + +static void alcor_pci_init_check_aspm(struct alcor_pci_priv *priv) +{ + struct pci_dev *pci; + int where; + u32 val32; + + priv->pdev_cap_off = alcor_pci_find_cap_offset(priv, priv->pdev); + /* + * A device might be attached to root complex directly and + * priv->parent_pdev will be NULL. In this case we don't check its + * capability and disable ASPM completely. + */ + if (priv->parent_pdev) + priv->parent_cap_off = alcor_pci_find_cap_offset(priv, + priv->parent_pdev); + + if ((priv->pdev_cap_off == 0) || (priv->parent_cap_off == 0)) { + dev_dbg(priv->dev, "pci_cap_off: %x, parent_cap_off: %x\n", + priv->pdev_cap_off, priv->parent_cap_off); + return; + } + + /* link capability */ + pci = priv->pdev; + where = priv->pdev_cap_off + ALCOR_PCIE_LINK_CAP_OFFSET; + pci_read_config_dword(pci, where, &val32); + priv->pdev_aspm_cap = (u8)(val32 >> 10) & 0x03; + + pci = priv->parent_pdev; + where = priv->parent_cap_off + ALCOR_PCIE_LINK_CAP_OFFSET; + pci_read_config_dword(pci, where, &val32); + priv->parent_aspm_cap = (u8)(val32 >> 10) & 0x03; + + if (priv->pdev_aspm_cap != priv->parent_aspm_cap) { + u8 aspm_cap; + + dev_dbg(priv->dev, "pdev_aspm_cap: %x, parent_aspm_cap: %x\n", + priv->pdev_aspm_cap, priv->parent_aspm_cap); + aspm_cap = priv->pdev_aspm_cap & priv->parent_aspm_cap; + priv->pdev_aspm_cap = aspm_cap; + priv->parent_aspm_cap = aspm_cap; + } + + dev_dbg(priv->dev, "ext_config_dev_aspm: %x, pdev_aspm_cap: %x\n", + priv->ext_config_dev_aspm, priv->pdev_aspm_cap); + priv->ext_config_dev_aspm &= priv->pdev_aspm_cap; +} + +static void alcor_pci_aspm_ctrl(struct alcor_pci_priv *priv, u8 aspm_enable) +{ + struct pci_dev *pci; + u8 aspm_ctrl, i; + int where; + u32 val32; + + if ((!priv->pdev_cap_off) || (!priv->parent_cap_off)) { + dev_dbg(priv->dev, "pci_cap_off: %x, parent_cap_off: %x\n", + priv->pdev_cap_off, priv->parent_cap_off); + return; + } + + if (!priv->pdev_aspm_cap) + return; + + aspm_ctrl = 0; + if (aspm_enable) { + aspm_ctrl = priv->ext_config_dev_aspm; + + if (!aspm_ctrl) { + dev_dbg(priv->dev, "aspm_ctrl == 0\n"); + return; + } + } + + for (i = 0; i < 2; i++) { + + if (i) { + pci = priv->parent_pdev; + where = priv->parent_cap_off + + ALCOR_PCIE_LINK_CTRL_OFFSET; + } else { + pci = priv->pdev; + where = priv->pdev_cap_off + + ALCOR_PCIE_LINK_CTRL_OFFSET; + } + + pci_read_config_dword(pci, where, &val32); + val32 &= (~0x03); + val32 |= (aspm_ctrl & priv->pdev_aspm_cap); + pci_write_config_byte(pci, where, (u8)val32); + } + +} + +static inline void alcor_mask_sd_irqs(struct alcor_pci_priv *priv) +{ + alcor_write32(priv, 0, AU6601_REG_INT_ENABLE); +} + +static inline void alcor_unmask_sd_irqs(struct alcor_pci_priv *priv) +{ + alcor_write32(priv, AU6601_INT_CMD_MASK | AU6601_INT_DATA_MASK | + AU6601_INT_CARD_INSERT | AU6601_INT_CARD_REMOVE | + AU6601_INT_OVER_CURRENT_ERR, + AU6601_REG_INT_ENABLE); +} + +static inline void alcor_mask_ms_irqs(struct alcor_pci_priv *priv) +{ + alcor_write32(priv, 0, AU6601_MS_INT_ENABLE); +} + +static inline void alcor_unmask_ms_irqs(struct alcor_pci_priv *priv) +{ + alcor_write32(priv, 0x3d00fa, AU6601_MS_INT_ENABLE); +} + +static int alcor_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct alcor_dev_cfg *cfg; + struct alcor_pci_priv *priv; + int ret, i, bar = 0; + + cfg = (void *)ent->driver_data; + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + ret = ida_alloc(&alcor_pci_idr, GFP_KERNEL); + if (ret < 0) + return ret; + priv->id = ret; + + priv->pdev = pdev; + priv->parent_pdev = pdev->bus->self; + priv->dev = &pdev->dev; + priv->cfg = cfg; + priv->irq = pdev->irq; + + ret = pci_request_regions(pdev, DRV_NAME_ALCOR_PCI); + if (ret) { + dev_err(&pdev->dev, "Cannot request region\n"); + ret = -ENOMEM; + goto error_free_ida; + } + + if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) { + dev_err(&pdev->dev, "BAR %d is not iomem. Aborting.\n", bar); + ret = -ENODEV; + goto error_release_regions; + } + + priv->iobase = pcim_iomap(pdev, bar, 0); + if (!priv->iobase) { + ret = -ENOMEM; + goto error_release_regions; + } + + /* make sure irqs are disabled */ + alcor_write32(priv, 0, AU6601_REG_INT_ENABLE); + alcor_write32(priv, 0, AU6601_MS_INT_ENABLE); + + ret = dma_set_mask_and_coherent(priv->dev, AU6601_SDMA_MASK); + if (ret) { + dev_err(priv->dev, "Failed to set DMA mask\n"); + goto error_release_regions; + } + + pci_set_master(pdev); + pci_set_drvdata(pdev, priv); + alcor_pci_init_check_aspm(priv); + + for (i = 0; i < ARRAY_SIZE(alcor_pci_cells); i++) { + alcor_pci_cells[i].platform_data = priv; + alcor_pci_cells[i].pdata_size = sizeof(*priv); + } + ret = mfd_add_devices(&pdev->dev, priv->id, alcor_pci_cells, + ARRAY_SIZE(alcor_pci_cells), NULL, 0, NULL); + if (ret < 0) + goto error_release_regions; + + alcor_pci_aspm_ctrl(priv, 0); + + return 0; + +error_release_regions: + pci_release_regions(pdev); +error_free_ida: + ida_free(&alcor_pci_idr, priv->id); + return ret; +} + +static void alcor_pci_remove(struct pci_dev *pdev) +{ + struct alcor_pci_priv *priv; + + priv = pci_get_drvdata(pdev); + + alcor_pci_aspm_ctrl(priv, 1); + + mfd_remove_devices(&pdev->dev); + + ida_free(&alcor_pci_idr, priv->id); + + pci_release_regions(pdev); + pci_set_drvdata(pdev, NULL); +} + +#ifdef CONFIG_PM_SLEEP +static int alcor_suspend(struct device *dev) +{ + struct alcor_pci_priv *priv = dev_get_drvdata(dev); + + alcor_pci_aspm_ctrl(priv, 1); + return 0; +} + +static int alcor_resume(struct device *dev) +{ + + struct alcor_pci_priv *priv = dev_get_drvdata(dev); + + alcor_pci_aspm_ctrl(priv, 0); + return 0; +} +#endif /* CONFIG_PM_SLEEP */ + +static SIMPLE_DEV_PM_OPS(alcor_pci_pm_ops, alcor_suspend, alcor_resume); + +static struct pci_driver alcor_driver = { + .name = DRV_NAME_ALCOR_PCI, + .id_table = pci_ids, + .probe = alcor_pci_probe, + .remove = alcor_pci_remove, + .driver = { + .pm = &alcor_pci_pm_ops + }, +}; + +module_pci_driver(alcor_driver); + +MODULE_AUTHOR("Oleksij Rempel <linux@rempel-privat.de>"); +MODULE_DESCRIPTION("PCI driver for Alcor Micro AU6601 Secure Digital Host Controller Interface"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/cardreader/rtl8411.c b/drivers/misc/cardreader/rtl8411.c new file mode 100644 index 000000000..a07674ed0 --- /dev/null +++ b/drivers/misc/cardreader/rtl8411.c @@ -0,0 +1,498 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Driver for Realtek PCI-Express card reader + * + * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved. + * + * Author: + * Wei WANG <wei_wang@realsil.com.cn> + * Roger Tseng <rogerable@realtek.com> + */ + +#include <linux/module.h> +#include <linux/bitops.h> +#include <linux/delay.h> +#include <linux/rtsx_pci.h> + +#include "rtsx_pcr.h" + +static u8 rtl8411_get_ic_version(struct rtsx_pcr *pcr) +{ + u8 val; + + rtsx_pci_read_register(pcr, SYS_VER, &val); + return val & 0x0F; +} + +static int rtl8411b_is_qfn48(struct rtsx_pcr *pcr) +{ + u8 val = 0; + + rtsx_pci_read_register(pcr, RTL8411B_PACKAGE_MODE, &val); + + if (val & 0x2) + return 1; + else + return 0; +} + +static void rtl8411_fetch_vendor_settings(struct rtsx_pcr *pcr) +{ + struct pci_dev *pdev = pcr->pci; + u32 reg1 = 0; + u8 reg3 = 0; + + pci_read_config_dword(pdev, PCR_SETTING_REG1, ®1); + pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg1); + + if (!rtsx_vendor_setting_valid(reg1)) + return; + + pcr->aspm_en = rtsx_reg_to_aspm(reg1); + pcr->sd30_drive_sel_1v8 = + map_sd_drive(rtsx_reg_to_sd30_drive_sel_1v8(reg1)); + pcr->card_drive_sel &= 0x3F; + pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg1); + + pci_read_config_byte(pdev, PCR_SETTING_REG3, ®3); + pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG3, reg3); + pcr->sd30_drive_sel_3v3 = rtl8411_reg_to_sd30_drive_sel_3v3(reg3); +} + +static void rtl8411b_fetch_vendor_settings(struct rtsx_pcr *pcr) +{ + struct pci_dev *pdev = pcr->pci; + u32 reg = 0; + + pci_read_config_dword(pdev, PCR_SETTING_REG1, ®); + pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg); + + if (!rtsx_vendor_setting_valid(reg)) + return; + + pcr->aspm_en = rtsx_reg_to_aspm(reg); + pcr->sd30_drive_sel_1v8 = + map_sd_drive(rtsx_reg_to_sd30_drive_sel_1v8(reg)); + pcr->sd30_drive_sel_3v3 = + map_sd_drive(rtl8411b_reg_to_sd30_drive_sel_3v3(reg)); +} + +static void rtl8411_force_power_down(struct rtsx_pcr *pcr, u8 pm_state) +{ + rtsx_pci_write_register(pcr, FPDCTL, 0x07, 0x07); +} + +static int rtl8411_extra_init_hw(struct rtsx_pcr *pcr) +{ + rtsx_pci_init_cmd(pcr); + + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DRIVE_SEL, + 0xFF, pcr->sd30_drive_sel_3v3); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CD_PAD_CTL, + CD_DISABLE_MASK | CD_AUTO_DISABLE, CD_ENABLE); + + return rtsx_pci_send_cmd(pcr, 100); +} + +static int rtl8411b_extra_init_hw(struct rtsx_pcr *pcr) +{ + rtsx_pci_init_cmd(pcr); + + if (rtl8411b_is_qfn48(pcr)) + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, + CARD_PULL_CTL3, 0xFF, 0xF5); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DRIVE_SEL, + 0xFF, pcr->sd30_drive_sel_3v3); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CD_PAD_CTL, + CD_DISABLE_MASK | CD_AUTO_DISABLE, CD_ENABLE); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, FUNC_FORCE_CTL, + 0x06, 0x00); + + return rtsx_pci_send_cmd(pcr, 100); +} + +static int rtl8411_turn_on_led(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_register(pcr, CARD_GPIO, 0x01, 0x00); +} + +static int rtl8411_turn_off_led(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_register(pcr, CARD_GPIO, 0x01, 0x01); +} + +static int rtl8411_enable_auto_blink(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_register(pcr, CARD_AUTO_BLINK, 0xFF, 0x0D); +} + +static int rtl8411_disable_auto_blink(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_register(pcr, CARD_AUTO_BLINK, 0x08, 0x00); +} + +static int rtl8411_card_power_on(struct rtsx_pcr *pcr, int card) +{ + int err; + + rtsx_pci_init_cmd(pcr); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL, + BPP_POWER_MASK, BPP_POWER_5_PERCENT_ON); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_CTL, + BPP_LDO_POWB, BPP_LDO_SUSPEND); + err = rtsx_pci_send_cmd(pcr, 100); + if (err < 0) + return err; + + /* To avoid too large in-rush current */ + udelay(150); + + err = rtsx_pci_write_register(pcr, CARD_PWR_CTL, + BPP_POWER_MASK, BPP_POWER_10_PERCENT_ON); + if (err < 0) + return err; + + udelay(150); + + err = rtsx_pci_write_register(pcr, CARD_PWR_CTL, + BPP_POWER_MASK, BPP_POWER_15_PERCENT_ON); + if (err < 0) + return err; + + udelay(150); + + err = rtsx_pci_write_register(pcr, CARD_PWR_CTL, + BPP_POWER_MASK, BPP_POWER_ON); + if (err < 0) + return err; + + return rtsx_pci_write_register(pcr, LDO_CTL, BPP_LDO_POWB, BPP_LDO_ON); +} + +static int rtl8411_card_power_off(struct rtsx_pcr *pcr, int card) +{ + int err; + + err = rtsx_pci_write_register(pcr, CARD_PWR_CTL, + BPP_POWER_MASK, BPP_POWER_OFF); + if (err < 0) + return err; + + return rtsx_pci_write_register(pcr, LDO_CTL, + BPP_LDO_POWB, BPP_LDO_SUSPEND); +} + +static int rtl8411_do_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage, + int bpp_tuned18_shift, int bpp_asic_1v8) +{ + u8 mask, val; + int err; + + mask = (BPP_REG_TUNED18 << bpp_tuned18_shift) | BPP_PAD_MASK; + if (voltage == OUTPUT_3V3) { + err = rtsx_pci_write_register(pcr, + SD30_DRIVE_SEL, 0x07, pcr->sd30_drive_sel_3v3); + if (err < 0) + return err; + val = (BPP_ASIC_3V3 << bpp_tuned18_shift) | BPP_PAD_3V3; + } else if (voltage == OUTPUT_1V8) { + err = rtsx_pci_write_register(pcr, + SD30_DRIVE_SEL, 0x07, pcr->sd30_drive_sel_1v8); + if (err < 0) + return err; + val = (bpp_asic_1v8 << bpp_tuned18_shift) | BPP_PAD_1V8; + } else { + return -EINVAL; + } + + return rtsx_pci_write_register(pcr, LDO_CTL, mask, val); +} + +static int rtl8411_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage) +{ + return rtl8411_do_switch_output_voltage(pcr, voltage, + BPP_TUNED18_SHIFT_8411, BPP_ASIC_1V8); +} + +static int rtl8402_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage) +{ + return rtl8411_do_switch_output_voltage(pcr, voltage, + BPP_TUNED18_SHIFT_8402, BPP_ASIC_2V0); +} + +static unsigned int rtl8411_cd_deglitch(struct rtsx_pcr *pcr) +{ + unsigned int card_exist; + + card_exist = rtsx_pci_readl(pcr, RTSX_BIPR); + card_exist &= CARD_EXIST; + if (!card_exist) { + /* Enable card CD */ + rtsx_pci_write_register(pcr, CD_PAD_CTL, + CD_DISABLE_MASK, CD_ENABLE); + /* Enable card interrupt */ + rtsx_pci_write_register(pcr, EFUSE_CONTENT, 0xe0, 0x00); + return 0; + } + + if (hweight32(card_exist) > 1) { + rtsx_pci_write_register(pcr, CARD_PWR_CTL, + BPP_POWER_MASK, BPP_POWER_5_PERCENT_ON); + msleep(100); + + card_exist = rtsx_pci_readl(pcr, RTSX_BIPR); + if (card_exist & MS_EXIST) + card_exist = MS_EXIST; + else if (card_exist & SD_EXIST) + card_exist = SD_EXIST; + else + card_exist = 0; + + rtsx_pci_write_register(pcr, CARD_PWR_CTL, + BPP_POWER_MASK, BPP_POWER_OFF); + + pcr_dbg(pcr, "After CD deglitch, card_exist = 0x%x\n", + card_exist); + } + + if (card_exist & MS_EXIST) { + /* Disable SD interrupt */ + rtsx_pci_write_register(pcr, EFUSE_CONTENT, 0xe0, 0x40); + rtsx_pci_write_register(pcr, CD_PAD_CTL, + CD_DISABLE_MASK, MS_CD_EN_ONLY); + } else if (card_exist & SD_EXIST) { + /* Disable MS interrupt */ + rtsx_pci_write_register(pcr, EFUSE_CONTENT, 0xe0, 0x80); + rtsx_pci_write_register(pcr, CD_PAD_CTL, + CD_DISABLE_MASK, SD_CD_EN_ONLY); + } + + return card_exist; +} + +static int rtl8411_conv_clk_and_div_n(int input, int dir) +{ + int output; + + if (dir == CLK_TO_DIV_N) + output = input * 4 / 5 - 2; + else + output = (input + 2) * 5 / 4; + + return output; +} + +static const struct pcr_ops rtl8411_pcr_ops = { + .fetch_vendor_settings = rtl8411_fetch_vendor_settings, + .extra_init_hw = rtl8411_extra_init_hw, + .optimize_phy = NULL, + .turn_on_led = rtl8411_turn_on_led, + .turn_off_led = rtl8411_turn_off_led, + .enable_auto_blink = rtl8411_enable_auto_blink, + .disable_auto_blink = rtl8411_disable_auto_blink, + .card_power_on = rtl8411_card_power_on, + .card_power_off = rtl8411_card_power_off, + .switch_output_voltage = rtl8411_switch_output_voltage, + .cd_deglitch = rtl8411_cd_deglitch, + .conv_clk_and_div_n = rtl8411_conv_clk_and_div_n, + .force_power_down = rtl8411_force_power_down, +}; + +static const struct pcr_ops rtl8402_pcr_ops = { + .fetch_vendor_settings = rtl8411_fetch_vendor_settings, + .extra_init_hw = rtl8411_extra_init_hw, + .optimize_phy = NULL, + .turn_on_led = rtl8411_turn_on_led, + .turn_off_led = rtl8411_turn_off_led, + .enable_auto_blink = rtl8411_enable_auto_blink, + .disable_auto_blink = rtl8411_disable_auto_blink, + .card_power_on = rtl8411_card_power_on, + .card_power_off = rtl8411_card_power_off, + .switch_output_voltage = rtl8402_switch_output_voltage, + .cd_deglitch = rtl8411_cd_deglitch, + .conv_clk_and_div_n = rtl8411_conv_clk_and_div_n, + .force_power_down = rtl8411_force_power_down, +}; + +static const struct pcr_ops rtl8411b_pcr_ops = { + .fetch_vendor_settings = rtl8411b_fetch_vendor_settings, + .extra_init_hw = rtl8411b_extra_init_hw, + .optimize_phy = NULL, + .turn_on_led = rtl8411_turn_on_led, + .turn_off_led = rtl8411_turn_off_led, + .enable_auto_blink = rtl8411_enable_auto_blink, + .disable_auto_blink = rtl8411_disable_auto_blink, + .card_power_on = rtl8411_card_power_on, + .card_power_off = rtl8411_card_power_off, + .switch_output_voltage = rtl8411_switch_output_voltage, + .cd_deglitch = rtl8411_cd_deglitch, + .conv_clk_and_div_n = rtl8411_conv_clk_and_div_n, + .force_power_down = rtl8411_force_power_down, +}; + +/* SD Pull Control Enable: + * SD_DAT[3:0] ==> pull up + * SD_CD ==> pull up + * SD_WP ==> pull up + * SD_CMD ==> pull up + * SD_CLK ==> pull down + */ +static const u32 rtl8411_sd_pull_ctl_enable_tbl[] = { + RTSX_REG_PAIR(CARD_PULL_CTL1, 0xAA), + RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA), + RTSX_REG_PAIR(CARD_PULL_CTL3, 0xA9), + RTSX_REG_PAIR(CARD_PULL_CTL4, 0x09), + RTSX_REG_PAIR(CARD_PULL_CTL5, 0x09), + RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04), + 0, +}; + +/* SD Pull Control Disable: + * SD_DAT[3:0] ==> pull down + * SD_CD ==> pull up + * SD_WP ==> pull down + * SD_CMD ==> pull down + * SD_CLK ==> pull down + */ +static const u32 rtl8411_sd_pull_ctl_disable_tbl[] = { + RTSX_REG_PAIR(CARD_PULL_CTL1, 0x65), + RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55), + RTSX_REG_PAIR(CARD_PULL_CTL3, 0x95), + RTSX_REG_PAIR(CARD_PULL_CTL4, 0x09), + RTSX_REG_PAIR(CARD_PULL_CTL5, 0x05), + RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04), + 0, +}; + +/* MS Pull Control Enable: + * MS CD ==> pull up + * others ==> pull down + */ +static const u32 rtl8411_ms_pull_ctl_enable_tbl[] = { + RTSX_REG_PAIR(CARD_PULL_CTL1, 0x65), + RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55), + RTSX_REG_PAIR(CARD_PULL_CTL3, 0x95), + RTSX_REG_PAIR(CARD_PULL_CTL4, 0x05), + RTSX_REG_PAIR(CARD_PULL_CTL5, 0x05), + RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04), + 0, +}; + +/* MS Pull Control Disable: + * MS CD ==> pull up + * others ==> pull down + */ +static const u32 rtl8411_ms_pull_ctl_disable_tbl[] = { + RTSX_REG_PAIR(CARD_PULL_CTL1, 0x65), + RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55), + RTSX_REG_PAIR(CARD_PULL_CTL3, 0x95), + RTSX_REG_PAIR(CARD_PULL_CTL4, 0x09), + RTSX_REG_PAIR(CARD_PULL_CTL5, 0x05), + RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04), + 0, +}; + +static const u32 rtl8411b_qfn64_sd_pull_ctl_enable_tbl[] = { + RTSX_REG_PAIR(CARD_PULL_CTL1, 0xAA), + RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA), + RTSX_REG_PAIR(CARD_PULL_CTL3, 0x09 | 0xD0), + RTSX_REG_PAIR(CARD_PULL_CTL4, 0x09 | 0x50), + RTSX_REG_PAIR(CARD_PULL_CTL5, 0x05 | 0x50), + RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04 | 0x11), + 0, +}; + +static const u32 rtl8411b_qfn48_sd_pull_ctl_enable_tbl[] = { + RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA), + RTSX_REG_PAIR(CARD_PULL_CTL3, 0x69 | 0x90), + RTSX_REG_PAIR(CARD_PULL_CTL6, 0x08 | 0x11), + 0, +}; + +static const u32 rtl8411b_qfn64_sd_pull_ctl_disable_tbl[] = { + RTSX_REG_PAIR(CARD_PULL_CTL1, 0x65), + RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55), + RTSX_REG_PAIR(CARD_PULL_CTL3, 0x05 | 0xD0), + RTSX_REG_PAIR(CARD_PULL_CTL4, 0x09 | 0x50), + RTSX_REG_PAIR(CARD_PULL_CTL5, 0x05 | 0x50), + RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04 | 0x11), + 0, +}; + +static const u32 rtl8411b_qfn48_sd_pull_ctl_disable_tbl[] = { + RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55), + RTSX_REG_PAIR(CARD_PULL_CTL3, 0x65 | 0x90), + RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04 | 0x11), + 0, +}; + +static const u32 rtl8411b_qfn64_ms_pull_ctl_enable_tbl[] = { + RTSX_REG_PAIR(CARD_PULL_CTL1, 0x65), + RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55), + RTSX_REG_PAIR(CARD_PULL_CTL3, 0x05 | 0xD0), + RTSX_REG_PAIR(CARD_PULL_CTL4, 0x05 | 0x50), + RTSX_REG_PAIR(CARD_PULL_CTL5, 0x05 | 0x50), + RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04 | 0x11), + 0, +}; + +static const u32 rtl8411b_qfn48_ms_pull_ctl_enable_tbl[] = { + RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55), + RTSX_REG_PAIR(CARD_PULL_CTL3, 0x65 | 0x90), + RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04 | 0x11), + 0, +}; + +static const u32 rtl8411b_qfn64_ms_pull_ctl_disable_tbl[] = { + RTSX_REG_PAIR(CARD_PULL_CTL1, 0x65), + RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55), + RTSX_REG_PAIR(CARD_PULL_CTL3, 0x05 | 0xD0), + RTSX_REG_PAIR(CARD_PULL_CTL4, 0x09 | 0x50), + RTSX_REG_PAIR(CARD_PULL_CTL5, 0x05 | 0x50), + RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04 | 0x11), + 0, +}; + +static const u32 rtl8411b_qfn48_ms_pull_ctl_disable_tbl[] = { + RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55), + RTSX_REG_PAIR(CARD_PULL_CTL3, 0x65 | 0x90), + RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04 | 0x11), + 0, +}; + +static void rtl8411_init_common_params(struct rtsx_pcr *pcr) +{ + pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104; + pcr->num_slots = 2; + pcr->flags = 0; + pcr->card_drive_sel = RTL8411_CARD_DRIVE_DEFAULT; + pcr->sd30_drive_sel_1v8 = DRIVER_TYPE_B; + pcr->sd30_drive_sel_3v3 = DRIVER_TYPE_D; + pcr->aspm_en = ASPM_L1_EN; + pcr->tx_initial_phase = SET_CLOCK_PHASE(23, 7, 14); + pcr->rx_initial_phase = SET_CLOCK_PHASE(4, 3, 10); + pcr->ic_version = rtl8411_get_ic_version(pcr); +} + +void rtl8411_init_params(struct rtsx_pcr *pcr) +{ + rtl8411_init_common_params(pcr); + pcr->ops = &rtl8411_pcr_ops; + set_pull_ctrl_tables(pcr, rtl8411); +} + +void rtl8411b_init_params(struct rtsx_pcr *pcr) +{ + rtl8411_init_common_params(pcr); + pcr->ops = &rtl8411b_pcr_ops; + if (rtl8411b_is_qfn48(pcr)) + set_pull_ctrl_tables(pcr, rtl8411b_qfn48); + else + set_pull_ctrl_tables(pcr, rtl8411b_qfn64); +} + +void rtl8402_init_params(struct rtsx_pcr *pcr) +{ + rtl8411_init_common_params(pcr); + pcr->ops = &rtl8402_pcr_ops; + set_pull_ctrl_tables(pcr, rtl8411); +} diff --git a/drivers/misc/cardreader/rts5209.c b/drivers/misc/cardreader/rts5209.c new file mode 100644 index 000000000..39a6a7ecc --- /dev/null +++ b/drivers/misc/cardreader/rts5209.c @@ -0,0 +1,266 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Driver for Realtek PCI-Express card reader + * + * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved. + * + * Author: + * Wei WANG <wei_wang@realsil.com.cn> + */ + +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/rtsx_pci.h> + +#include "rtsx_pcr.h" + +static u8 rts5209_get_ic_version(struct rtsx_pcr *pcr) +{ + u8 val; + + val = rtsx_pci_readb(pcr, 0x1C); + return val & 0x0F; +} + +static void rts5209_fetch_vendor_settings(struct rtsx_pcr *pcr) +{ + struct pci_dev *pdev = pcr->pci; + u32 reg; + + pci_read_config_dword(pdev, PCR_SETTING_REG1, ®); + pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg); + + if (rts5209_vendor_setting1_valid(reg)) { + if (rts5209_reg_check_ms_pmos(reg)) + pcr->flags |= PCR_MS_PMOS; + pcr->aspm_en = rts5209_reg_to_aspm(reg); + } + + pci_read_config_dword(pdev, PCR_SETTING_REG2, ®); + pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg); + + if (rts5209_vendor_setting2_valid(reg)) { + pcr->sd30_drive_sel_1v8 = + rts5209_reg_to_sd30_drive_sel_1v8(reg); + pcr->sd30_drive_sel_3v3 = + rts5209_reg_to_sd30_drive_sel_3v3(reg); + pcr->card_drive_sel = rts5209_reg_to_card_drive_sel(reg); + } +} + +static void rts5209_force_power_down(struct rtsx_pcr *pcr, u8 pm_state) +{ + rtsx_pci_write_register(pcr, FPDCTL, 0x07, 0x07); +} + +static int rts5209_extra_init_hw(struct rtsx_pcr *pcr) +{ + rtsx_pci_init_cmd(pcr); + + /* Turn off LED */ + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_GPIO, 0xFF, 0x03); + /* Reset ASPM state to default value */ + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, ASPM_FORCE_CTL, 0x3F, 0); + /* Force CLKREQ# PIN to drive 0 to request clock */ + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x08, 0x08); + /* Configure GPIO as output */ + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_GPIO_DIR, 0xFF, 0x03); + /* Configure driving */ + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DRIVE_SEL, + 0xFF, pcr->sd30_drive_sel_3v3); + + return rtsx_pci_send_cmd(pcr, 100); +} + +static int rts5209_optimize_phy(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_phy_register(pcr, 0x00, 0xB966); +} + +static int rts5209_turn_on_led(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_register(pcr, CARD_GPIO, 0x01, 0x00); +} + +static int rts5209_turn_off_led(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_register(pcr, CARD_GPIO, 0x01, 0x01); +} + +static int rts5209_enable_auto_blink(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_register(pcr, CARD_AUTO_BLINK, 0xFF, 0x0D); +} + +static int rts5209_disable_auto_blink(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_register(pcr, CARD_AUTO_BLINK, 0x08, 0x00); +} + +static int rts5209_card_power_on(struct rtsx_pcr *pcr, int card) +{ + int err; + u8 pwr_mask, partial_pwr_on, pwr_on; + + pwr_mask = SD_POWER_MASK; + partial_pwr_on = SD_PARTIAL_POWER_ON; + pwr_on = SD_POWER_ON; + + if ((pcr->flags & PCR_MS_PMOS) && (card == RTSX_MS_CARD)) { + pwr_mask = MS_POWER_MASK; + partial_pwr_on = MS_PARTIAL_POWER_ON; + pwr_on = MS_POWER_ON; + } + + rtsx_pci_init_cmd(pcr); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL, + pwr_mask, partial_pwr_on); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL, + LDO3318_PWR_MASK, 0x04); + err = rtsx_pci_send_cmd(pcr, 100); + if (err < 0) + return err; + + /* To avoid too large in-rush current */ + udelay(150); + + rtsx_pci_init_cmd(pcr); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL, pwr_mask, pwr_on); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL, + LDO3318_PWR_MASK, 0x00); + return rtsx_pci_send_cmd(pcr, 100); +} + +static int rts5209_card_power_off(struct rtsx_pcr *pcr, int card) +{ + u8 pwr_mask, pwr_off; + + pwr_mask = SD_POWER_MASK; + pwr_off = SD_POWER_OFF; + + if ((pcr->flags & PCR_MS_PMOS) && (card == RTSX_MS_CARD)) { + pwr_mask = MS_POWER_MASK; + pwr_off = MS_POWER_OFF; + } + + rtsx_pci_init_cmd(pcr); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL, + pwr_mask | PMOS_STRG_MASK, pwr_off | PMOS_STRG_400mA); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL, + LDO3318_PWR_MASK, 0x06); + return rtsx_pci_send_cmd(pcr, 100); +} + +static int rts5209_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage) +{ + int err; + + if (voltage == OUTPUT_3V3) { + err = rtsx_pci_write_register(pcr, + SD30_DRIVE_SEL, 0x07, pcr->sd30_drive_sel_3v3); + if (err < 0) + return err; + err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4FC0 | 0x24); + if (err < 0) + return err; + } else if (voltage == OUTPUT_1V8) { + err = rtsx_pci_write_register(pcr, + SD30_DRIVE_SEL, 0x07, pcr->sd30_drive_sel_1v8); + if (err < 0) + return err; + err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4C40 | 0x24); + if (err < 0) + return err; + } else { + return -EINVAL; + } + + return 0; +} + +static const struct pcr_ops rts5209_pcr_ops = { + .fetch_vendor_settings = rts5209_fetch_vendor_settings, + .extra_init_hw = rts5209_extra_init_hw, + .optimize_phy = rts5209_optimize_phy, + .turn_on_led = rts5209_turn_on_led, + .turn_off_led = rts5209_turn_off_led, + .enable_auto_blink = rts5209_enable_auto_blink, + .disable_auto_blink = rts5209_disable_auto_blink, + .card_power_on = rts5209_card_power_on, + .card_power_off = rts5209_card_power_off, + .switch_output_voltage = rts5209_switch_output_voltage, + .cd_deglitch = NULL, + .conv_clk_and_div_n = NULL, + .force_power_down = rts5209_force_power_down, +}; + +/* SD Pull Control Enable: + * SD_DAT[3:0] ==> pull up + * SD_CD ==> pull up + * SD_WP ==> pull up + * SD_CMD ==> pull up + * SD_CLK ==> pull down + */ +static const u32 rts5209_sd_pull_ctl_enable_tbl[] = { + RTSX_REG_PAIR(CARD_PULL_CTL1, 0xAA), + RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA), + RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9), + 0, +}; + +/* SD Pull Control Disable: + * SD_DAT[3:0] ==> pull down + * SD_CD ==> pull up + * SD_WP ==> pull down + * SD_CMD ==> pull down + * SD_CLK ==> pull down + */ +static const u32 rts5209_sd_pull_ctl_disable_tbl[] = { + RTSX_REG_PAIR(CARD_PULL_CTL1, 0x55), + RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55), + RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5), + 0, +}; + +/* MS Pull Control Enable: + * MS CD ==> pull up + * others ==> pull down + */ +static const u32 rts5209_ms_pull_ctl_enable_tbl[] = { + RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55), + RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55), + RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15), + 0, +}; + +/* MS Pull Control Disable: + * MS CD ==> pull up + * others ==> pull down + */ +static const u32 rts5209_ms_pull_ctl_disable_tbl[] = { + RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55), + RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55), + RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15), + 0, +}; + +void rts5209_init_params(struct rtsx_pcr *pcr) +{ + pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | + EXTRA_CAPS_SD_SDR104 | EXTRA_CAPS_MMC_8BIT; + pcr->num_slots = 2; + pcr->ops = &rts5209_pcr_ops; + + pcr->flags = 0; + pcr->card_drive_sel = RTS5209_CARD_DRIVE_DEFAULT; + pcr->sd30_drive_sel_1v8 = DRIVER_TYPE_B; + pcr->sd30_drive_sel_3v3 = DRIVER_TYPE_D; + pcr->aspm_en = ASPM_L1_EN; + pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 16); + pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5); + + pcr->ic_version = rts5209_get_ic_version(pcr); + pcr->sd_pull_ctl_enable_tbl = rts5209_sd_pull_ctl_enable_tbl; + pcr->sd_pull_ctl_disable_tbl = rts5209_sd_pull_ctl_disable_tbl; + pcr->ms_pull_ctl_enable_tbl = rts5209_ms_pull_ctl_enable_tbl; + pcr->ms_pull_ctl_disable_tbl = rts5209_ms_pull_ctl_disable_tbl; +} diff --git a/drivers/misc/cardreader/rts5227.c b/drivers/misc/cardreader/rts5227.c new file mode 100644 index 000000000..8200af22b --- /dev/null +++ b/drivers/misc/cardreader/rts5227.c @@ -0,0 +1,506 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Driver for Realtek PCI-Express card reader + * + * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved. + * + * Author: + * Wei WANG <wei_wang@realsil.com.cn> + * Roger Tseng <rogerable@realtek.com> + */ + +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/rtsx_pci.h> + +#include "rtsx_pcr.h" + +static u8 rts5227_get_ic_version(struct rtsx_pcr *pcr) +{ + u8 val; + + rtsx_pci_read_register(pcr, DUMMY_REG_RESET_0, &val); + return val & 0x0F; +} + +static void rts5227_fill_driving(struct rtsx_pcr *pcr, u8 voltage) +{ + u8 driving_3v3[4][3] = { + {0x13, 0x13, 0x13}, + {0x96, 0x96, 0x96}, + {0x7F, 0x7F, 0x7F}, + {0x96, 0x96, 0x96}, + }; + u8 driving_1v8[4][3] = { + {0x99, 0x99, 0x99}, + {0xAA, 0xAA, 0xAA}, + {0xFE, 0xFE, 0xFE}, + {0xB3, 0xB3, 0xB3}, + }; + u8 (*driving)[3], drive_sel; + + if (voltage == OUTPUT_3V3) { + driving = driving_3v3; + drive_sel = pcr->sd30_drive_sel_3v3; + } else { + driving = driving_1v8; + drive_sel = pcr->sd30_drive_sel_1v8; + } + + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CLK_DRIVE_SEL, + 0xFF, driving[drive_sel][0]); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CMD_DRIVE_SEL, + 0xFF, driving[drive_sel][1]); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DAT_DRIVE_SEL, + 0xFF, driving[drive_sel][2]); +} + +static void rts5227_fetch_vendor_settings(struct rtsx_pcr *pcr) +{ + struct pci_dev *pdev = pcr->pci; + u32 reg; + + pci_read_config_dword(pdev, PCR_SETTING_REG1, ®); + pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg); + + if (!rtsx_vendor_setting_valid(reg)) + return; + + pcr->aspm_en = rtsx_reg_to_aspm(reg); + pcr->sd30_drive_sel_1v8 = rtsx_reg_to_sd30_drive_sel_1v8(reg); + pcr->card_drive_sel &= 0x3F; + pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg); + + pci_read_config_dword(pdev, PCR_SETTING_REG2, ®); + pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg); + if (rtsx_check_mmc_support(reg)) + pcr->extra_caps |= EXTRA_CAPS_NO_MMC; + pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg); + if (rtsx_reg_check_reverse_socket(reg)) + pcr->flags |= PCR_REVERSE_SOCKET; +} + +static void rts5227_init_from_cfg(struct rtsx_pcr *pcr) +{ + struct pci_dev *pdev = pcr->pci; + int l1ss; + u32 lval; + struct rtsx_cr_option *option = &pcr->option; + + l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS); + if (!l1ss) + return; + + pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval); + + if (CHK_PCI_PID(pcr, 0x522A)) { + if (0 == (lval & 0x0F)) + rtsx_pci_enable_oobs_polling(pcr); + else + rtsx_pci_disable_oobs_polling(pcr); + } + + if (lval & PCI_L1SS_CTL1_ASPM_L1_1) + rtsx_set_dev_flag(pcr, ASPM_L1_1_EN); + else + rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN); + + if (lval & PCI_L1SS_CTL1_ASPM_L1_2) + rtsx_set_dev_flag(pcr, ASPM_L1_2_EN); + else + rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN); + + if (lval & PCI_L1SS_CTL1_PCIPM_L1_1) + rtsx_set_dev_flag(pcr, PM_L1_1_EN); + else + rtsx_clear_dev_flag(pcr, PM_L1_1_EN); + + if (lval & PCI_L1SS_CTL1_PCIPM_L1_2) + rtsx_set_dev_flag(pcr, PM_L1_2_EN); + else + rtsx_clear_dev_flag(pcr, PM_L1_2_EN); + + if (option->ltr_en) { + u16 val; + + pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val); + if (val & PCI_EXP_DEVCTL2_LTR_EN) { + option->ltr_enabled = true; + option->ltr_active = true; + rtsx_set_ltr_latency(pcr, option->ltr_active_latency); + } else { + option->ltr_enabled = false; + } + } + + if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN + | PM_L1_1_EN | PM_L1_2_EN)) + option->force_clkreq_0 = false; + else + option->force_clkreq_0 = true; + +} + +static int rts5227_extra_init_hw(struct rtsx_pcr *pcr) +{ + u16 cap; + struct rtsx_cr_option *option = &pcr->option; + + rts5227_init_from_cfg(pcr); + rtsx_pci_init_cmd(pcr); + + /* Configure GPIO as output */ + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, GPIO_CTL, 0x02, 0x02); + /* Reset ASPM state to default value */ + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, ASPM_FORCE_CTL, 0x3F, 0); + /* Switch LDO3318 source from DV33 to card_3v3 */ + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_PWR_SEL, 0x03, 0x00); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_PWR_SEL, 0x03, 0x01); + /* LED shine disabled, set initial shine cycle period */ + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, OLT_LED_CTL, 0x0F, 0x02); + /* Configure LTR */ + pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &cap); + if (cap & PCI_EXP_DEVCTL2_LTR_EN) + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LTR_CTL, 0xFF, 0xA3); + /* Configure OBFF */ + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, OBFF_CFG, 0x03, 0x03); + /* Configure driving */ + rts5227_fill_driving(pcr, OUTPUT_3V3); + /* Configure force_clock_req */ + if (pcr->flags & PCR_REVERSE_SOCKET) + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x30, 0x30); + else + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x30, 0x00); + + if (option->force_clkreq_0) + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, + FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW); + else + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, + FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH); + + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, pcr->reg_pm_ctrl3, 0x10, 0x00); + + return rtsx_pci_send_cmd(pcr, 100); +} + +static int rts5227_optimize_phy(struct rtsx_pcr *pcr) +{ + int err; + + err = rtsx_pci_write_register(pcr, PM_CTRL3, D3_DELINK_MODE_EN, 0x00); + if (err < 0) + return err; + + /* Optimize RX sensitivity */ + return rtsx_pci_write_phy_register(pcr, 0x00, 0xBA42); +} + +static int rts5227_turn_on_led(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_register(pcr, GPIO_CTL, 0x02, 0x02); +} + +static int rts5227_turn_off_led(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_register(pcr, GPIO_CTL, 0x02, 0x00); +} + +static int rts5227_enable_auto_blink(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x08, 0x08); +} + +static int rts5227_disable_auto_blink(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x08, 0x00); +} + +static int rts5227_card_power_on(struct rtsx_pcr *pcr, int card) +{ + int err; + + if (pcr->option.ocp_en) + rtsx_pci_enable_ocp(pcr); + + rtsx_pci_init_cmd(pcr); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL, + SD_POWER_MASK, SD_PARTIAL_POWER_ON); + + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL, + LDO3318_PWR_MASK, 0x02); + + err = rtsx_pci_send_cmd(pcr, 100); + if (err < 0) + return err; + + /* To avoid too large in-rush current */ + msleep(20); + rtsx_pci_init_cmd(pcr); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL, + SD_POWER_MASK, SD_POWER_ON); + + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL, + LDO3318_PWR_MASK, 0x06); + + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_OE, + SD_OUTPUT_EN, SD_OUTPUT_EN); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_OE, + MS_OUTPUT_EN, MS_OUTPUT_EN); + return rtsx_pci_send_cmd(pcr, 100); +} + +static int rts5227_card_power_off(struct rtsx_pcr *pcr, int card) +{ + if (pcr->option.ocp_en) + rtsx_pci_disable_ocp(pcr); + + rtsx_pci_write_register(pcr, CARD_PWR_CTL, SD_POWER_MASK | + PMOS_STRG_MASK, SD_POWER_OFF | PMOS_STRG_400mA); + rtsx_pci_write_register(pcr, PWR_GATE_CTRL, LDO3318_PWR_MASK, 0X00); + + return 0; +} + +static int rts5227_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage) +{ + int err; + + if (voltage == OUTPUT_3V3) { + err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4FC0 | 0x24); + if (err < 0) + return err; + } else if (voltage == OUTPUT_1V8) { + err = rtsx_pci_write_phy_register(pcr, 0x11, 0x3C02); + if (err < 0) + return err; + err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4C80 | 0x24); + if (err < 0) + return err; + } else { + return -EINVAL; + } + + /* set pad drive */ + rtsx_pci_init_cmd(pcr); + rts5227_fill_driving(pcr, voltage); + return rtsx_pci_send_cmd(pcr, 100); +} + +static const struct pcr_ops rts5227_pcr_ops = { + .fetch_vendor_settings = rts5227_fetch_vendor_settings, + .extra_init_hw = rts5227_extra_init_hw, + .optimize_phy = rts5227_optimize_phy, + .turn_on_led = rts5227_turn_on_led, + .turn_off_led = rts5227_turn_off_led, + .enable_auto_blink = rts5227_enable_auto_blink, + .disable_auto_blink = rts5227_disable_auto_blink, + .card_power_on = rts5227_card_power_on, + .card_power_off = rts5227_card_power_off, + .switch_output_voltage = rts5227_switch_output_voltage, + .cd_deglitch = NULL, + .conv_clk_and_div_n = NULL, +}; + +/* SD Pull Control Enable: + * SD_DAT[3:0] ==> pull up + * SD_CD ==> pull up + * SD_WP ==> pull up + * SD_CMD ==> pull up + * SD_CLK ==> pull down + */ +static const u32 rts5227_sd_pull_ctl_enable_tbl[] = { + RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA), + RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9), + 0, +}; + +/* SD Pull Control Disable: + * SD_DAT[3:0] ==> pull down + * SD_CD ==> pull up + * SD_WP ==> pull down + * SD_CMD ==> pull down + * SD_CLK ==> pull down + */ +static const u32 rts5227_sd_pull_ctl_disable_tbl[] = { + RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55), + RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5), + 0, +}; + +/* MS Pull Control Enable: + * MS CD ==> pull up + * others ==> pull down + */ +static const u32 rts5227_ms_pull_ctl_enable_tbl[] = { + RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55), + RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15), + 0, +}; + +/* MS Pull Control Disable: + * MS CD ==> pull up + * others ==> pull down + */ +static const u32 rts5227_ms_pull_ctl_disable_tbl[] = { + RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55), + RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15), + 0, +}; + +void rts5227_init_params(struct rtsx_pcr *pcr) +{ + pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104; + pcr->num_slots = 2; + pcr->ops = &rts5227_pcr_ops; + + pcr->flags = 0; + pcr->card_drive_sel = RTSX_CARD_DRIVE_DEFAULT; + pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B; + pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B; + pcr->aspm_en = ASPM_L1_EN; + pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 15); + pcr->rx_initial_phase = SET_CLOCK_PHASE(30, 7, 7); + + pcr->ic_version = rts5227_get_ic_version(pcr); + pcr->sd_pull_ctl_enable_tbl = rts5227_sd_pull_ctl_enable_tbl; + pcr->sd_pull_ctl_disable_tbl = rts5227_sd_pull_ctl_disable_tbl; + pcr->ms_pull_ctl_enable_tbl = rts5227_ms_pull_ctl_enable_tbl; + pcr->ms_pull_ctl_disable_tbl = rts5227_ms_pull_ctl_disable_tbl; + + pcr->reg_pm_ctrl3 = PM_CTRL3; +} + +static int rts522a_optimize_phy(struct rtsx_pcr *pcr) +{ + int err; + + err = rtsx_pci_write_register(pcr, RTS522A_PM_CTRL3, D3_DELINK_MODE_EN, + 0x00); + if (err < 0) + return err; + + if (is_version(pcr, 0x522A, IC_VER_A)) { + err = rtsx_pci_write_phy_register(pcr, PHY_RCR2, + PHY_RCR2_INIT_27S); + if (err) + return err; + + rtsx_pci_write_phy_register(pcr, PHY_RCR1, PHY_RCR1_INIT_27S); + rtsx_pci_write_phy_register(pcr, PHY_FLD0, PHY_FLD0_INIT_27S); + rtsx_pci_write_phy_register(pcr, PHY_FLD3, PHY_FLD3_INIT_27S); + rtsx_pci_write_phy_register(pcr, PHY_FLD4, PHY_FLD4_INIT_27S); + } + + return 0; +} + +static int rts522a_extra_init_hw(struct rtsx_pcr *pcr) +{ + rts5227_extra_init_hw(pcr); + + /* Power down OCP for power consumption */ + if (!pcr->card_exist) + rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, + OC_POWER_DOWN); + + rtsx_pci_write_register(pcr, FUNC_FORCE_CTL, FUNC_FORCE_UPME_XMT_DBG, + FUNC_FORCE_UPME_XMT_DBG); + rtsx_pci_write_register(pcr, PCLK_CTL, 0x04, 0x04); + rtsx_pci_write_register(pcr, PM_EVENT_DEBUG, PME_DEBUG_0, PME_DEBUG_0); + rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 0xFF, 0x11); + + return 0; +} + +static int rts522a_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage) +{ + int err; + + if (voltage == OUTPUT_3V3) { + err = rtsx_pci_write_phy_register(pcr, 0x08, 0x57E4); + if (err < 0) + return err; + } else if (voltage == OUTPUT_1V8) { + err = rtsx_pci_write_phy_register(pcr, 0x11, 0x3C02); + if (err < 0) + return err; + err = rtsx_pci_write_phy_register(pcr, 0x08, 0x54A4); + if (err < 0) + return err; + } else { + return -EINVAL; + } + + /* set pad drive */ + rtsx_pci_init_cmd(pcr); + rts5227_fill_driving(pcr, voltage); + return rtsx_pci_send_cmd(pcr, 100); +} + +static void rts522a_set_l1off_cfg_sub_d0(struct rtsx_pcr *pcr, int active) +{ + struct rtsx_cr_option *option = &pcr->option; + int aspm_L1_1, aspm_L1_2; + u8 val = 0; + + aspm_L1_1 = rtsx_check_dev_flag(pcr, ASPM_L1_1_EN); + aspm_L1_2 = rtsx_check_dev_flag(pcr, ASPM_L1_2_EN); + + if (active) { + /* run, latency: 60us */ + if (aspm_L1_1) + val = option->ltr_l1off_snooze_sspwrgate; + } else { + /* l1off, latency: 300us */ + if (aspm_L1_2) + val = option->ltr_l1off_sspwrgate; + } + + rtsx_set_l1off_sub(pcr, val); +} + +/* rts522a operations mainly derived from rts5227, except phy/hw init setting. + */ +static const struct pcr_ops rts522a_pcr_ops = { + .fetch_vendor_settings = rts5227_fetch_vendor_settings, + .extra_init_hw = rts522a_extra_init_hw, + .optimize_phy = rts522a_optimize_phy, + .turn_on_led = rts5227_turn_on_led, + .turn_off_led = rts5227_turn_off_led, + .enable_auto_blink = rts5227_enable_auto_blink, + .disable_auto_blink = rts5227_disable_auto_blink, + .card_power_on = rts5227_card_power_on, + .card_power_off = rts5227_card_power_off, + .switch_output_voltage = rts522a_switch_output_voltage, + .cd_deglitch = NULL, + .conv_clk_and_div_n = NULL, + .set_l1off_cfg_sub_d0 = rts522a_set_l1off_cfg_sub_d0, +}; + +void rts522a_init_params(struct rtsx_pcr *pcr) +{ + struct rtsx_cr_option *option = &pcr->option; + + rts5227_init_params(pcr); + pcr->ops = &rts522a_pcr_ops; + pcr->tx_initial_phase = SET_CLOCK_PHASE(20, 20, 11); + pcr->reg_pm_ctrl3 = RTS522A_PM_CTRL3; + + option->dev_flags = LTR_L1SS_PWR_GATE_EN; + option->ltr_en = true; + + /* init latency of active, idle, L1OFF to 60us, 300us, 3ms */ + option->ltr_active_latency = LTR_ACTIVE_LATENCY_DEF; + option->ltr_idle_latency = LTR_IDLE_LATENCY_DEF; + option->ltr_l1off_latency = LTR_L1OFF_LATENCY_DEF; + option->l1_snooze_delay = L1_SNOOZE_DELAY_DEF; + option->ltr_l1off_sspwrgate = 0x7F; + option->ltr_l1off_snooze_sspwrgate = 0x78; + + pcr->option.ocp_en = 1; + if (pcr->option.ocp_en) + pcr->hw_param.interrupt_en |= SD_OC_INT_EN; + pcr->hw_param.ocp_glitch = SD_OCP_GLITCH_10M; + pcr->option.sd_800mA_ocp_thd = RTS522A_OCP_THD_800; + +} diff --git a/drivers/misc/cardreader/rts5228.c b/drivers/misc/cardreader/rts5228.c new file mode 100644 index 000000000..781a86def --- /dev/null +++ b/drivers/misc/cardreader/rts5228.c @@ -0,0 +1,746 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Driver for Realtek PCI-Express card reader + * + * Copyright(c) 2018-2019 Realtek Semiconductor Corp. All rights reserved. + * + * Author: + * Ricky WU <ricky_wu@realtek.com> + * Rui FENG <rui_feng@realsil.com.cn> + * Wei WANG <wei_wang@realsil.com.cn> + */ + +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/rtsx_pci.h> + +#include "rts5228.h" +#include "rtsx_pcr.h" + +static u8 rts5228_get_ic_version(struct rtsx_pcr *pcr) +{ + u8 val; + + rtsx_pci_read_register(pcr, DUMMY_REG_RESET_0, &val); + return val & IC_VERSION_MASK; +} + +static void rts5228_fill_driving(struct rtsx_pcr *pcr, u8 voltage) +{ + u8 driving_3v3[4][3] = { + {0x13, 0x13, 0x13}, + {0x96, 0x96, 0x96}, + {0x7F, 0x7F, 0x7F}, + {0x96, 0x96, 0x96}, + }; + u8 driving_1v8[4][3] = { + {0x99, 0x99, 0x99}, + {0xB5, 0xB5, 0xB5}, + {0xE6, 0x7E, 0xFE}, + {0x6B, 0x6B, 0x6B}, + }; + u8 (*driving)[3], drive_sel; + + if (voltage == OUTPUT_3V3) { + driving = driving_3v3; + drive_sel = pcr->sd30_drive_sel_3v3; + } else { + driving = driving_1v8; + drive_sel = pcr->sd30_drive_sel_1v8; + } + + rtsx_pci_write_register(pcr, SD30_CLK_DRIVE_SEL, + 0xFF, driving[drive_sel][0]); + + rtsx_pci_write_register(pcr, SD30_CMD_DRIVE_SEL, + 0xFF, driving[drive_sel][1]); + + rtsx_pci_write_register(pcr, SD30_DAT_DRIVE_SEL, + 0xFF, driving[drive_sel][2]); +} + +static void rtsx5228_fetch_vendor_settings(struct rtsx_pcr *pcr) +{ + struct pci_dev *pdev = pcr->pci; + u32 reg; + + /* 0x724~0x727 */ + pci_read_config_dword(pdev, PCR_SETTING_REG1, ®); + pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg); + + if (!rtsx_vendor_setting_valid(reg)) { + pcr_dbg(pcr, "skip fetch vendor setting\n"); + return; + } + pcr->sd30_drive_sel_1v8 = rtsx_reg_to_sd30_drive_sel_1v8(reg); + pcr->aspm_en = rtsx_reg_to_aspm(reg); + + /* 0x814~0x817 */ + pci_read_config_dword(pdev, PCR_SETTING_REG2, ®); + pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg); + + pcr->rtd3_en = rtsx_reg_to_rtd3(reg); + if (rtsx_check_mmc_support(reg)) + pcr->extra_caps |= EXTRA_CAPS_NO_MMC; + pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg); + if (rtsx_reg_check_reverse_socket(reg)) + pcr->flags |= PCR_REVERSE_SOCKET; +} + +static int rts5228_optimize_phy(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_phy_register(pcr, 0x07, 0x8F40); +} + +static void rts5228_force_power_down(struct rtsx_pcr *pcr, u8 pm_state) +{ + /* Set relink_time to 0 */ + rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0); + rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, MASK_8_BIT_DEF, 0); + rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3, + RELINK_TIME_MASK, 0); + + rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, + D3_DELINK_MODE_EN, D3_DELINK_MODE_EN); + + rtsx_pci_write_register(pcr, FPDCTL, + SSC_POWER_DOWN, SSC_POWER_DOWN); +} + +static int rts5228_enable_auto_blink(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_register(pcr, OLT_LED_CTL, + LED_SHINE_MASK, LED_SHINE_EN); +} + +static int rts5228_disable_auto_blink(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_register(pcr, OLT_LED_CTL, + LED_SHINE_MASK, LED_SHINE_DISABLE); +} + +static int rts5228_turn_on_led(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_register(pcr, GPIO_CTL, + 0x02, 0x02); +} + +static int rts5228_turn_off_led(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_register(pcr, GPIO_CTL, + 0x02, 0x00); +} + +/* SD Pull Control Enable: + * SD_DAT[3:0] ==> pull up + * SD_CD ==> pull up + * SD_WP ==> pull up + * SD_CMD ==> pull up + * SD_CLK ==> pull down + */ +static const u32 rts5228_sd_pull_ctl_enable_tbl[] = { + RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA), + RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9), + 0, +}; + +/* SD Pull Control Disable: + * SD_DAT[3:0] ==> pull down + * SD_CD ==> pull up + * SD_WP ==> pull down + * SD_CMD ==> pull down + * SD_CLK ==> pull down + */ +static const u32 rts5228_sd_pull_ctl_disable_tbl[] = { + RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55), + RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5), + 0, +}; + +static int rts5228_sd_set_sample_push_timing_sd30(struct rtsx_pcr *pcr) +{ + rtsx_pci_write_register(pcr, SD_CFG1, SD_MODE_SELECT_MASK + | SD_ASYNC_FIFO_NOT_RST, SD_30_MODE | SD_ASYNC_FIFO_NOT_RST); + rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, CLK_LOW_FREQ); + rtsx_pci_write_register(pcr, CARD_CLK_SOURCE, 0xFF, + CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1); + rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0); + + return 0; +} + +static int rts5228_card_power_on(struct rtsx_pcr *pcr, int card) +{ + struct rtsx_cr_option *option = &pcr->option; + + if (option->ocp_en) + rtsx_pci_enable_ocp(pcr); + + rtsx_pci_write_register(pcr, REG_CRC_DUMMY_0, + CFG_SD_POW_AUTO_PD, CFG_SD_POW_AUTO_PD); + + rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG1, + RTS5228_LDO1_TUNE_MASK, RTS5228_LDO1_33); + + rtsx_pci_write_register(pcr, RTS5228_LDO1233318_POW_CTL, + RTS5228_LDO1_POWERON_MASK, RTS5228_LDO1_SOFTSTART); + mdelay(2); + rtsx_pci_write_register(pcr, RTS5228_LDO1233318_POW_CTL, + RTS5228_LDO1_POWERON_MASK, RTS5228_LDO1_FULLON); + + + rtsx_pci_write_register(pcr, RTS5228_LDO1233318_POW_CTL, + RTS5228_LDO3318_POWERON, RTS5228_LDO3318_POWERON); + + msleep(20); + + rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, SD_OUTPUT_EN); + + /* Initialize SD_CFG1 register */ + rtsx_pci_write_register(pcr, SD_CFG1, 0xFF, + SD_CLK_DIVIDE_128 | SD_20_MODE | SD_BUS_WIDTH_1BIT); + + rtsx_pci_write_register(pcr, SD_SAMPLE_POINT_CTL, + 0xFF, SD20_RX_POS_EDGE); + rtsx_pci_write_register(pcr, SD_PUSH_POINT_CTL, 0xFF, 0); + rtsx_pci_write_register(pcr, CARD_STOP, SD_STOP | SD_CLR_ERR, + SD_STOP | SD_CLR_ERR); + + /* Reset SD_CFG3 register */ + rtsx_pci_write_register(pcr, SD_CFG3, SD30_CLK_END_EN, 0); + rtsx_pci_write_register(pcr, REG_SD_STOP_SDCLK_CFG, + SD30_CLK_STOP_CFG_EN | SD30_CLK_STOP_CFG1 | + SD30_CLK_STOP_CFG0, 0); + + if (pcr->extra_caps & EXTRA_CAPS_SD_SDR50 || + pcr->extra_caps & EXTRA_CAPS_SD_SDR104) + rts5228_sd_set_sample_push_timing_sd30(pcr); + + return 0; +} + +static int rts5228_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage) +{ + int err; + u16 val = 0; + + rtsx_pci_write_register(pcr, RTS5228_CARD_PWR_CTL, + RTS5228_PUPDC, RTS5228_PUPDC); + + switch (voltage) { + case OUTPUT_3V3: + rtsx_pci_read_phy_register(pcr, PHY_TUNE, &val); + val |= PHY_TUNE_SDBUS_33; + err = rtsx_pci_write_phy_register(pcr, PHY_TUNE, val); + if (err < 0) + return err; + + rtsx_pci_write_register(pcr, RTS5228_DV3318_CFG, + RTS5228_DV3318_TUNE_MASK, RTS5228_DV3318_33); + rtsx_pci_write_register(pcr, SD_PAD_CTL, + SD_IO_USING_1V8, 0); + break; + case OUTPUT_1V8: + rtsx_pci_read_phy_register(pcr, PHY_TUNE, &val); + val &= ~PHY_TUNE_SDBUS_33; + err = rtsx_pci_write_phy_register(pcr, PHY_TUNE, val); + if (err < 0) + return err; + + rtsx_pci_write_register(pcr, RTS5228_DV3318_CFG, + RTS5228_DV3318_TUNE_MASK, RTS5228_DV3318_18); + rtsx_pci_write_register(pcr, SD_PAD_CTL, + SD_IO_USING_1V8, SD_IO_USING_1V8); + break; + default: + return -EINVAL; + } + + /* set pad drive */ + rts5228_fill_driving(pcr, voltage); + + return 0; +} + +static void rts5228_stop_cmd(struct rtsx_pcr *pcr) +{ + rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD); + rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA); + rtsx_pci_write_register(pcr, RTS5260_DMA_RST_CTL_0, + RTS5260_DMA_RST | RTS5260_ADMA3_RST, + RTS5260_DMA_RST | RTS5260_ADMA3_RST); + rtsx_pci_write_register(pcr, RBCTL, RB_FLUSH, RB_FLUSH); +} + +static void rts5228_card_before_power_off(struct rtsx_pcr *pcr) +{ + rts5228_stop_cmd(pcr); + rts5228_switch_output_voltage(pcr, OUTPUT_3V3); +} + +static void rts5228_enable_ocp(struct rtsx_pcr *pcr) +{ + u8 val = 0; + + val = SD_OCP_INT_EN | SD_DETECT_EN; + rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val); + rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG0, + RTS5228_LDO1_OCP_EN | RTS5228_LDO1_OCP_LMT_EN, + RTS5228_LDO1_OCP_EN | RTS5228_LDO1_OCP_LMT_EN); +} + +static void rts5228_disable_ocp(struct rtsx_pcr *pcr) +{ + u8 mask = 0; + + mask = SD_OCP_INT_EN | SD_DETECT_EN; + rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0); + rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG0, + RTS5228_LDO1_OCP_EN | RTS5228_LDO1_OCP_LMT_EN, 0); +} + +static int rts5228_card_power_off(struct rtsx_pcr *pcr, int card) +{ + int err = 0; + + rts5228_card_before_power_off(pcr); + err = rtsx_pci_write_register(pcr, RTS5228_LDO1233318_POW_CTL, + RTS5228_LDO_POWERON_MASK, 0); + rtsx_pci_write_register(pcr, REG_CRC_DUMMY_0, CFG_SD_POW_AUTO_PD, 0); + + if (pcr->option.ocp_en) + rtsx_pci_disable_ocp(pcr); + + return err; +} + +static void rts5228_init_ocp(struct rtsx_pcr *pcr) +{ + struct rtsx_cr_option *option = &pcr->option; + + if (option->ocp_en) { + u8 mask, val; + + rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG0, + RTS5228_LDO1_OCP_EN | RTS5228_LDO1_OCP_LMT_EN, + RTS5228_LDO1_OCP_EN | RTS5228_LDO1_OCP_LMT_EN); + + rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG0, + RTS5228_LDO1_OCP_THD_MASK, option->sd_800mA_ocp_thd); + + rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG0, + RTS5228_LDO1_OCP_LMT_THD_MASK, + RTS5228_LDO1_LMT_THD_1500); + + rtsx_pci_read_register(pcr, RTS5228_LDO1_CFG0, &val); + + mask = SD_OCP_GLITCH_MASK; + val = pcr->hw_param.ocp_glitch; + rtsx_pci_write_register(pcr, REG_OCPGLITCH, mask, val); + + rts5228_enable_ocp(pcr); + + } else { + rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG0, + RTS5228_LDO1_OCP_EN | RTS5228_LDO1_OCP_LMT_EN, 0); + } +} + +static void rts5228_clear_ocpstat(struct rtsx_pcr *pcr) +{ + u8 mask = 0; + u8 val = 0; + + mask = SD_OCP_INT_CLR | SD_OC_CLR; + val = SD_OCP_INT_CLR | SD_OC_CLR; + + rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val); + + udelay(1000); + rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0); + +} + +static void rts5228_process_ocp(struct rtsx_pcr *pcr) +{ + if (!pcr->option.ocp_en) + return; + + rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat); + + if (pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) { + rts5228_clear_ocpstat(pcr); + rts5228_card_power_off(pcr, RTSX_SD_CARD); + rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0); + pcr->ocp_stat = 0; + } + +} + +static void rts5228_init_from_cfg(struct rtsx_pcr *pcr) +{ + struct pci_dev *pdev = pcr->pci; + int l1ss; + u32 lval; + struct rtsx_cr_option *option = &pcr->option; + + l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS); + if (!l1ss) + return; + + pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval); + + if (0 == (lval & 0x0F)) + rtsx_pci_enable_oobs_polling(pcr); + else + rtsx_pci_disable_oobs_polling(pcr); + + if (lval & PCI_L1SS_CTL1_ASPM_L1_1) + rtsx_set_dev_flag(pcr, ASPM_L1_1_EN); + else + rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN); + + if (lval & PCI_L1SS_CTL1_ASPM_L1_2) + rtsx_set_dev_flag(pcr, ASPM_L1_2_EN); + else + rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN); + + if (lval & PCI_L1SS_CTL1_PCIPM_L1_1) + rtsx_set_dev_flag(pcr, PM_L1_1_EN); + else + rtsx_clear_dev_flag(pcr, PM_L1_1_EN); + + if (lval & PCI_L1SS_CTL1_PCIPM_L1_2) + rtsx_set_dev_flag(pcr, PM_L1_2_EN); + else + rtsx_clear_dev_flag(pcr, PM_L1_2_EN); + + rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0xFF, 0); + if (option->ltr_en) { + u16 val; + + pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val); + if (val & PCI_EXP_DEVCTL2_LTR_EN) { + option->ltr_enabled = true; + option->ltr_active = true; + rtsx_set_ltr_latency(pcr, option->ltr_active_latency); + } else { + option->ltr_enabled = false; + } + } + + if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN + | PM_L1_1_EN | PM_L1_2_EN)) + option->force_clkreq_0 = false; + else + option->force_clkreq_0 = true; +} + +static int rts5228_extra_init_hw(struct rtsx_pcr *pcr) +{ + struct rtsx_cr_option *option = &pcr->option; + + rtsx_pci_write_register(pcr, RTS5228_AUTOLOAD_CFG1, + CD_RESUME_EN_MASK, CD_RESUME_EN_MASK); + + rts5228_init_from_cfg(pcr); + + rtsx_pci_write_register(pcr, L1SUB_CONFIG1, + AUX_CLK_ACTIVE_SEL_MASK, MAC_CKSW_DONE); + rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, 0); + + rtsx_pci_write_register(pcr, FUNC_FORCE_CTL, + FUNC_FORCE_UPME_XMT_DBG, FUNC_FORCE_UPME_XMT_DBG); + + rtsx_pci_write_register(pcr, PCLK_CTL, + PCLK_MODE_SEL, PCLK_MODE_SEL); + + rtsx_pci_write_register(pcr, PM_EVENT_DEBUG, PME_DEBUG_0, PME_DEBUG_0); + rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, CLK_PM_EN, CLK_PM_EN); + + /* LED shine disabled, set initial shine cycle period */ + rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x0F, 0x02); + + /* Configure driving */ + rts5228_fill_driving(pcr, OUTPUT_3V3); + + if (pcr->flags & PCR_REVERSE_SOCKET) + rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x30); + else + rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x00); + + /* + * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced + * to drive low, and we forcibly request clock. + */ + if (option->force_clkreq_0) + rtsx_pci_write_register(pcr, PETXCFG, + FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW); + else + rtsx_pci_write_register(pcr, PETXCFG, + FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH); + + rtsx_pci_write_register(pcr, PWD_SUSPEND_EN, 0xFF, 0xFB); + rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x10, 0x00); + rtsx_pci_write_register(pcr, RTS5228_REG_PME_FORCE_CTL, + FORCE_PM_CONTROL | FORCE_PM_VALUE, FORCE_PM_CONTROL); + + return 0; +} + +static void rts5228_enable_aspm(struct rtsx_pcr *pcr, bool enable) +{ + u8 mask, val; + + if (pcr->aspm_enabled == enable) + return; + + mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1; + val = FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1; + val |= (pcr->aspm_en & 0x02); + rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val); + pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_ASPMC, pcr->aspm_en); + pcr->aspm_enabled = enable; +} + +static void rts5228_disable_aspm(struct rtsx_pcr *pcr, bool enable) +{ + u8 mask, val; + + if (pcr->aspm_enabled == enable) + return; + + pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_ASPMC, 0); + mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1; + val = FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1; + rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val); + rtsx_pci_write_register(pcr, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0); + mdelay(10); + pcr->aspm_enabled = enable; +} + +static void rts5228_set_aspm(struct rtsx_pcr *pcr, bool enable) +{ + if (enable) + rts5228_enable_aspm(pcr, true); + else + rts5228_disable_aspm(pcr, false); +} + +static void rts5228_set_l1off_cfg_sub_d0(struct rtsx_pcr *pcr, int active) +{ + struct rtsx_cr_option *option = &pcr->option; + int aspm_L1_1, aspm_L1_2; + u8 val = 0; + + aspm_L1_1 = rtsx_check_dev_flag(pcr, ASPM_L1_1_EN); + aspm_L1_2 = rtsx_check_dev_flag(pcr, ASPM_L1_2_EN); + + if (active) { + /* run, latency: 60us */ + if (aspm_L1_1) + val = option->ltr_l1off_snooze_sspwrgate; + } else { + /* l1off, latency: 300us */ + if (aspm_L1_2) + val = option->ltr_l1off_sspwrgate; + } + + rtsx_set_l1off_sub(pcr, val); +} + +static const struct pcr_ops rts5228_pcr_ops = { + .fetch_vendor_settings = rtsx5228_fetch_vendor_settings, + .turn_on_led = rts5228_turn_on_led, + .turn_off_led = rts5228_turn_off_led, + .extra_init_hw = rts5228_extra_init_hw, + .enable_auto_blink = rts5228_enable_auto_blink, + .disable_auto_blink = rts5228_disable_auto_blink, + .card_power_on = rts5228_card_power_on, + .card_power_off = rts5228_card_power_off, + .switch_output_voltage = rts5228_switch_output_voltage, + .force_power_down = rts5228_force_power_down, + .stop_cmd = rts5228_stop_cmd, + .set_aspm = rts5228_set_aspm, + .set_l1off_cfg_sub_d0 = rts5228_set_l1off_cfg_sub_d0, + .enable_ocp = rts5228_enable_ocp, + .disable_ocp = rts5228_disable_ocp, + .init_ocp = rts5228_init_ocp, + .process_ocp = rts5228_process_ocp, + .clear_ocpstat = rts5228_clear_ocpstat, + .optimize_phy = rts5228_optimize_phy, +}; + + +static inline u8 double_ssc_depth(u8 depth) +{ + return ((depth > 1) ? (depth - 1) : depth); +} + +int rts5228_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock, + u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk) +{ + int err, clk; + u16 n; + u8 clk_divider, mcu_cnt, div; + static const u8 depth[] = { + [RTSX_SSC_DEPTH_4M] = RTS5228_SSC_DEPTH_4M, + [RTSX_SSC_DEPTH_2M] = RTS5228_SSC_DEPTH_2M, + [RTSX_SSC_DEPTH_1M] = RTS5228_SSC_DEPTH_1M, + [RTSX_SSC_DEPTH_500K] = RTS5228_SSC_DEPTH_512K, + }; + + if (initial_mode) { + /* We use 250k(around) here, in initial stage */ + clk_divider = SD_CLK_DIVIDE_128; + card_clock = 30000000; + } else { + clk_divider = SD_CLK_DIVIDE_0; + } + err = rtsx_pci_write_register(pcr, SD_CFG1, + SD_CLK_DIVIDE_MASK, clk_divider); + if (err < 0) + return err; + + card_clock /= 1000000; + pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock); + + clk = card_clock; + if (!initial_mode && double_clk) + clk = card_clock * 2; + pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n", + clk, pcr->cur_clock); + + if (clk == pcr->cur_clock) + return 0; + + if (pcr->ops->conv_clk_and_div_n) + n = pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N); + else + n = clk - 4; + if ((clk <= 4) || (n > 396)) + return -EINVAL; + + mcu_cnt = 125/clk + 3; + if (mcu_cnt > 15) + mcu_cnt = 15; + + div = CLK_DIV_1; + while ((n < MIN_DIV_N_PCR - 4) && (div < CLK_DIV_8)) { + if (pcr->ops->conv_clk_and_div_n) { + int dbl_clk = pcr->ops->conv_clk_and_div_n(n, + DIV_N_TO_CLK) * 2; + n = pcr->ops->conv_clk_and_div_n(dbl_clk, + CLK_TO_DIV_N); + } else { + n = (n + 4) * 2 - 4; + } + div++; + } + + n = (n / 2) - 1; + pcr_dbg(pcr, "n = %d, div = %d\n", n, div); + + ssc_depth = depth[ssc_depth]; + if (double_clk) + ssc_depth = double_ssc_depth(ssc_depth); + + if (ssc_depth) { + if (div == CLK_DIV_2) { + if (ssc_depth > 1) + ssc_depth -= 1; + else + ssc_depth = RTS5228_SSC_DEPTH_8M; + } else if (div == CLK_DIV_4) { + if (ssc_depth > 2) + ssc_depth -= 2; + else + ssc_depth = RTS5228_SSC_DEPTH_8M; + } else if (div == CLK_DIV_8) { + if (ssc_depth > 3) + ssc_depth -= 3; + else + ssc_depth = RTS5228_SSC_DEPTH_8M; + } + } else { + ssc_depth = 0; + } + pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth); + + rtsx_pci_init_cmd(pcr); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, + CLK_LOW_FREQ, CLK_LOW_FREQ); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV, + 0xFF, (div << 4) | mcu_cnt); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, + SSC_DEPTH_MASK, ssc_depth); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB); + if (vpclk) { + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL, + PHASE_NOT_RESET, 0); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK1_CTL, + PHASE_NOT_RESET, 0); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL, + PHASE_NOT_RESET, PHASE_NOT_RESET); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK1_CTL, + PHASE_NOT_RESET, PHASE_NOT_RESET); + } + + err = rtsx_pci_send_cmd(pcr, 2000); + if (err < 0) + return err; + + /* Wait SSC clock stable */ + udelay(SSC_CLOCK_STABLE_WAIT); + err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0); + if (err < 0) + return err; + + pcr->cur_clock = clk; + return 0; + +} + +void rts5228_init_params(struct rtsx_pcr *pcr) +{ + struct rtsx_cr_option *option = &pcr->option; + struct rtsx_hw_param *hw_param = &pcr->hw_param; + + pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104; + pcr->num_slots = 1; + pcr->ops = &rts5228_pcr_ops; + + pcr->flags = 0; + pcr->card_drive_sel = RTSX_CARD_DRIVE_DEFAULT; + pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B; + pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B; + pcr->aspm_en = ASPM_L1_EN; + pcr->tx_initial_phase = SET_CLOCK_PHASE(28, 27, 11); + pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5); + + pcr->ic_version = rts5228_get_ic_version(pcr); + pcr->sd_pull_ctl_enable_tbl = rts5228_sd_pull_ctl_enable_tbl; + pcr->sd_pull_ctl_disable_tbl = rts5228_sd_pull_ctl_disable_tbl; + + pcr->reg_pm_ctrl3 = RTS5228_AUTOLOAD_CFG3; + + option->dev_flags = (LTR_L1SS_PWR_GATE_CHECK_CARD_EN + | LTR_L1SS_PWR_GATE_EN); + option->ltr_en = true; + + /* init latency of active, idle, L1OFF to 60us, 300us, 3ms */ + option->ltr_active_latency = LTR_ACTIVE_LATENCY_DEF; + option->ltr_idle_latency = LTR_IDLE_LATENCY_DEF; + option->ltr_l1off_latency = LTR_L1OFF_LATENCY_DEF; + option->l1_snooze_delay = L1_SNOOZE_DELAY_DEF; + option->ltr_l1off_sspwrgate = 0x7F; + option->ltr_l1off_snooze_sspwrgate = 0x78; + + option->ocp_en = 1; + hw_param->interrupt_en |= SD_OC_INT_EN; + hw_param->ocp_glitch = SD_OCP_GLITCH_800U; + option->sd_800mA_ocp_thd = RTS5228_LDO1_OCP_THD_930; +} diff --git a/drivers/misc/cardreader/rts5228.h b/drivers/misc/cardreader/rts5228.h new file mode 100644 index 000000000..6a872246a --- /dev/null +++ b/drivers/misc/cardreader/rts5228.h @@ -0,0 +1,168 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Driver for Realtek PCI-Express card reader + * + * Copyright(c) 2018-2019 Realtek Semiconductor Corp. All rights reserved. + * + * Author: + * Ricky WU <ricky_wu@realtek.com> + * Rui FENG <rui_feng@realsil.com.cn> + * Wei WANG <wei_wang@realsil.com.cn> + */ +#ifndef RTS5228_H +#define RTS5228_H + + +#define RTS5228_AUTOLOAD_CFG0 0xFF7B +#define RTS5228_AUTOLOAD_CFG1 0xFF7C +#define RTS5228_AUTOLOAD_CFG2 0xFF7D +#define RTS5228_AUTOLOAD_CFG3 0xFF7E +#define RTS5228_AUTOLOAD_CFG4 0xFF7F + +#define RTS5228_REG_VREF 0xFE97 +#define RTS5228_PWD_SUSPND_EN (1 << 4) + +#define RTS5228_PAD_H3L1 0xFF79 +#define PAD_GPIO_H3L1 (1 << 3) + +/* SSC_CTL2 0xFC12 */ +#define RTS5228_SSC_DEPTH_MASK 0x07 +#define RTS5228_SSC_DEPTH_DISALBE 0x00 +#define RTS5228_SSC_DEPTH_8M 0x01 +#define RTS5228_SSC_DEPTH_4M 0x02 +#define RTS5228_SSC_DEPTH_2M 0x03 +#define RTS5228_SSC_DEPTH_1M 0x04 +#define RTS5228_SSC_DEPTH_512K 0x05 +#define RTS5228_SSC_DEPTH_256K 0x06 +#define RTS5228_SSC_DEPTH_128K 0x07 + +/* DMACTL 0xFE2C */ +#define RTS5228_DMA_PACK_SIZE_MASK 0xF0 + +#define RTS5228_REG_LDO12_CFG 0xFF6E +#define RTS5228_LDO12_VO_TUNE_MASK (0x07<<1) +#define RTS5228_LDO12_100 (0x00<<1) +#define RTS5228_LDO12_105 (0x01<<1) +#define RTS5228_LDO12_110 (0x02<<1) +#define RTS5228_LDO12_115 (0x03<<1) +#define RTS5228_LDO12_120 (0x04<<1) +#define RTS5228_LDO12_125 (0x05<<1) +#define RTS5228_LDO12_130 (0x06<<1) +#define RTS5228_LDO12_135 (0x07<<1) +#define RTS5228_REG_PWD_LDO12 (0x01<<0) + +#define RTS5228_REG_LDO12_L12 0xFF6F +#define RTS5228_LDO12_L12_MASK (0x07<<4) +#define RTS5228_LDO12_L12_120 (0x04<<4) + +/* LDO control register */ +#define RTS5228_CARD_PWR_CTL 0xFD50 +#define RTS5228_PUPDC (0x01<<5) + +#define RTS5228_LDO1233318_POW_CTL 0xFF70 +#define RTS5228_LDO3318_POWERON (0x01<<3) +#define RTS5228_LDO1_POWEROFF (0x00<<0) +#define RTS5228_LDO1_SOFTSTART (0x01<<0) +#define RTS5228_LDO1_FULLON (0x03<<0) +#define RTS5228_LDO1_POWERON_MASK (0x03<<0) +#define RTS5228_LDO_POWERON_MASK (0x0F<<0) + +#define RTS5228_DV3318_CFG 0xFF71 +#define RTS5228_DV3318_TUNE_MASK (0x07<<4) +#define RTS5228_DV3318_17 (0x00<<4) +#define RTS5228_DV3318_1V75 (0x01<<4) +#define RTS5228_DV3318_18 (0x02<<4) +#define RTS5228_DV3318_1V85 (0x03<<4) +#define RTS5228_DV3318_19 (0x04<<4) +#define RTS5228_DV3318_33 (0x07<<4) +#define RTS5228_DV3318_SR_MASK (0x03<<2) +#define RTS5228_DV3318_SR_0 (0x00<<2) +#define RTS5228_DV3318_SR_250 (0x01<<2) +#define RTS5228_DV3318_SR_500 (0x02<<2) +#define RTS5228_DV3318_SR_1000 (0x03<<2) + +#define RTS5228_LDO1_CFG0 0xFF72 +#define RTS5228_LDO1_OCP_THD_MASK (0x07<<5) +#define RTS5228_LDO1_OCP_EN (0x01<<4) +#define RTS5228_LDO1_OCP_LMT_THD_MASK (0x03<<2) +#define RTS5228_LDO1_OCP_LMT_EN (0x01<<1) + +#define RTS5228_LDO1_OCP_THD_730 (0x00<<5) +#define RTS5228_LDO1_OCP_THD_780 (0x01<<5) +#define RTS5228_LDO1_OCP_THD_860 (0x02<<5) +#define RTS5228_LDO1_OCP_THD_930 (0x03<<5) +#define RTS5228_LDO1_OCP_THD_1000 (0x04<<5) +#define RTS5228_LDO1_OCP_THD_1070 (0x05<<5) +#define RTS5228_LDO1_OCP_THD_1140 (0x06<<5) +#define RTS5228_LDO1_OCP_THD_1220 (0x07<<5) + +#define RTS5228_LDO1_LMT_THD_450 (0x00<<2) +#define RTS5228_LDO1_LMT_THD_1000 (0x01<<2) +#define RTS5228_LDO1_LMT_THD_1500 (0x02<<2) +#define RTS5228_LDO1_LMT_THD_2000 (0x03<<2) + +#define RTS5228_LDO1_CFG1 0xFF73 +#define RTS5228_LDO1_SR_TIME_MASK (0x03<<6) +#define RTS5228_LDO1_SR_0_0 (0x00<<6) +#define RTS5228_LDO1_SR_0_25 (0x01<<6) +#define RTS5228_LDO1_SR_0_5 (0x02<<6) +#define RTS5228_LDO1_SR_1_0 (0x03<<6) +#define RTS5228_LDO1_TUNE_MASK (0x07<<1) +#define RTS5228_LDO1_18 (0x05<<1) +#define RTS5228_LDO1_33 (0x07<<1) +#define RTS5228_LDO1_PWD_MASK (0x01<<0) + +#define RTS5228_AUXCLK_GAT_CTL 0xFF74 + +#define RTS5228_REG_RREF_CTL_0 0xFF75 +#define RTS5228_FORCE_RREF_EXTL (0x01<<7) +#define RTS5228_REG_BG33_MASK (0x07<<0) +#define RTS5228_RREF_12_1V (0x04<<0) +#define RTS5228_RREF_12_3V (0x05<<0) + +#define RTS5228_REG_RREF_CTL_1 0xFF76 + +#define RTS5228_REG_RREF_CTL_2 0xFF77 +#define RTS5228_TEST_INTL_RREF (0x01<<7) +#define RTS5228_DGLCH_TIME_MASK (0x03<<5) +#define RTS5228_DGLCH_TIME_50 (0x00<<5) +#define RTS5228_DGLCH_TIME_75 (0x01<<5) +#define RTS5228_DGLCH_TIME_100 (0x02<<5) +#define RTS5228_DGLCH_TIME_125 (0x03<<5) +#define RTS5228_REG_REXT_TUNE_MASK (0x1F<<0) + +#define RTS5228_REG_PME_FORCE_CTL 0xFF78 +#define FORCE_PM_CONTROL 0x20 +#define FORCE_PM_VALUE 0x10 + + +/* Single LUN, support SD */ +#define DEFAULT_SINGLE 0 +#define SD_LUN 1 + + +/* For Change_FPGA_SSCClock Function */ +#define MULTIPLY_BY_1 0x00 +#define MULTIPLY_BY_2 0x01 +#define MULTIPLY_BY_3 0x02 +#define MULTIPLY_BY_4 0x03 +#define MULTIPLY_BY_5 0x04 +#define MULTIPLY_BY_6 0x05 +#define MULTIPLY_BY_7 0x06 +#define MULTIPLY_BY_8 0x07 +#define MULTIPLY_BY_9 0x08 +#define MULTIPLY_BY_10 0x09 + +#define DIVIDE_BY_2 0x01 +#define DIVIDE_BY_3 0x02 +#define DIVIDE_BY_4 0x03 +#define DIVIDE_BY_5 0x04 +#define DIVIDE_BY_6 0x05 +#define DIVIDE_BY_7 0x06 +#define DIVIDE_BY_8 0x07 +#define DIVIDE_BY_9 0x08 +#define DIVIDE_BY_10 0x09 + +int rts5228_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock, + u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk); + +#endif /* RTS5228_H */ diff --git a/drivers/misc/cardreader/rts5229.c b/drivers/misc/cardreader/rts5229.c new file mode 100644 index 000000000..89e6f124c --- /dev/null +++ b/drivers/misc/cardreader/rts5229.c @@ -0,0 +1,262 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Driver for Realtek PCI-Express card reader + * + * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved. + * + * Author: + * Wei WANG <wei_wang@realsil.com.cn> + */ + +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/rtsx_pci.h> + +#include "rtsx_pcr.h" + +static u8 rts5229_get_ic_version(struct rtsx_pcr *pcr) +{ + u8 val; + + rtsx_pci_read_register(pcr, DUMMY_REG_RESET_0, &val); + return val & 0x0F; +} + +static void rts5229_fetch_vendor_settings(struct rtsx_pcr *pcr) +{ + struct pci_dev *pdev = pcr->pci; + u32 reg; + + pci_read_config_dword(pdev, PCR_SETTING_REG1, ®); + pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg); + + if (!rtsx_vendor_setting_valid(reg)) + return; + + pcr->aspm_en = rtsx_reg_to_aspm(reg); + pcr->sd30_drive_sel_1v8 = + map_sd_drive(rtsx_reg_to_sd30_drive_sel_1v8(reg)); + pcr->card_drive_sel &= 0x3F; + pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg); + + pci_read_config_dword(pdev, PCR_SETTING_REG2, ®); + pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg); + pcr->sd30_drive_sel_3v3 = + map_sd_drive(rtsx_reg_to_sd30_drive_sel_3v3(reg)); +} + +static void rts5229_force_power_down(struct rtsx_pcr *pcr, u8 pm_state) +{ + rtsx_pci_write_register(pcr, FPDCTL, 0x03, 0x03); +} + +static int rts5229_extra_init_hw(struct rtsx_pcr *pcr) +{ + rtsx_pci_init_cmd(pcr); + + /* Configure GPIO as output */ + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, GPIO_CTL, 0x02, 0x02); + /* Reset ASPM state to default value */ + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, ASPM_FORCE_CTL, 0x3F, 0); + /* Force CLKREQ# PIN to drive 0 to request clock */ + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x08, 0x08); + /* Switch LDO3318 source from DV33 to card_3v3 */ + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_PWR_SEL, 0x03, 0x00); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_PWR_SEL, 0x03, 0x01); + /* LED shine disabled, set initial shine cycle period */ + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, OLT_LED_CTL, 0x0F, 0x02); + /* Configure driving */ + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DRIVE_SEL, + 0xFF, pcr->sd30_drive_sel_3v3); + + return rtsx_pci_send_cmd(pcr, 100); +} + +static int rts5229_optimize_phy(struct rtsx_pcr *pcr) +{ + /* Optimize RX sensitivity */ + return rtsx_pci_write_phy_register(pcr, 0x00, 0xBA42); +} + +static int rts5229_turn_on_led(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_register(pcr, GPIO_CTL, 0x02, 0x02); +} + +static int rts5229_turn_off_led(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_register(pcr, GPIO_CTL, 0x02, 0x00); +} + +static int rts5229_enable_auto_blink(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x08, 0x08); +} + +static int rts5229_disable_auto_blink(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x08, 0x00); +} + +static int rts5229_card_power_on(struct rtsx_pcr *pcr, int card) +{ + int err; + + rtsx_pci_init_cmd(pcr); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL, + SD_POWER_MASK, SD_PARTIAL_POWER_ON); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL, + LDO3318_PWR_MASK, 0x02); + err = rtsx_pci_send_cmd(pcr, 100); + if (err < 0) + return err; + + /* To avoid too large in-rush current */ + udelay(150); + + rtsx_pci_init_cmd(pcr); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL, + SD_POWER_MASK, SD_POWER_ON); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL, + LDO3318_PWR_MASK, 0x06); + return rtsx_pci_send_cmd(pcr, 100); +} + +static int rts5229_card_power_off(struct rtsx_pcr *pcr, int card) +{ + rtsx_pci_init_cmd(pcr); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL, + SD_POWER_MASK | PMOS_STRG_MASK, + SD_POWER_OFF | PMOS_STRG_400mA); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL, + LDO3318_PWR_MASK, 0x00); + return rtsx_pci_send_cmd(pcr, 100); +} + +static int rts5229_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage) +{ + int err; + + if (voltage == OUTPUT_3V3) { + err = rtsx_pci_write_register(pcr, + SD30_DRIVE_SEL, 0x07, pcr->sd30_drive_sel_3v3); + if (err < 0) + return err; + err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4FC0 | 0x24); + if (err < 0) + return err; + } else if (voltage == OUTPUT_1V8) { + err = rtsx_pci_write_register(pcr, + SD30_DRIVE_SEL, 0x07, pcr->sd30_drive_sel_1v8); + if (err < 0) + return err; + err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4C40 | 0x24); + if (err < 0) + return err; + } else { + return -EINVAL; + } + + return 0; +} + +static const struct pcr_ops rts5229_pcr_ops = { + .fetch_vendor_settings = rts5229_fetch_vendor_settings, + .extra_init_hw = rts5229_extra_init_hw, + .optimize_phy = rts5229_optimize_phy, + .turn_on_led = rts5229_turn_on_led, + .turn_off_led = rts5229_turn_off_led, + .enable_auto_blink = rts5229_enable_auto_blink, + .disable_auto_blink = rts5229_disable_auto_blink, + .card_power_on = rts5229_card_power_on, + .card_power_off = rts5229_card_power_off, + .switch_output_voltage = rts5229_switch_output_voltage, + .cd_deglitch = NULL, + .conv_clk_and_div_n = NULL, + .force_power_down = rts5229_force_power_down, +}; + +/* SD Pull Control Enable: + * SD_DAT[3:0] ==> pull up + * SD_CD ==> pull up + * SD_WP ==> pull up + * SD_CMD ==> pull up + * SD_CLK ==> pull down + */ +static const u32 rts5229_sd_pull_ctl_enable_tbl1[] = { + RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA), + RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9), + 0, +}; + +/* For RTS5229 version C */ +static const u32 rts5229_sd_pull_ctl_enable_tbl2[] = { + RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA), + RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD9), + 0, +}; + +/* SD Pull Control Disable: + * SD_DAT[3:0] ==> pull down + * SD_CD ==> pull up + * SD_WP ==> pull down + * SD_CMD ==> pull down + * SD_CLK ==> pull down + */ +static const u32 rts5229_sd_pull_ctl_disable_tbl1[] = { + RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55), + RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5), + 0, +}; + +/* For RTS5229 version C */ +static const u32 rts5229_sd_pull_ctl_disable_tbl2[] = { + RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55), + RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE5), + 0, +}; + +/* MS Pull Control Enable: + * MS CD ==> pull up + * others ==> pull down + */ +static const u32 rts5229_ms_pull_ctl_enable_tbl[] = { + RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55), + RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15), + 0, +}; + +/* MS Pull Control Disable: + * MS CD ==> pull up + * others ==> pull down + */ +static const u32 rts5229_ms_pull_ctl_disable_tbl[] = { + RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55), + RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15), + 0, +}; + +void rts5229_init_params(struct rtsx_pcr *pcr) +{ + pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104; + pcr->num_slots = 2; + pcr->ops = &rts5229_pcr_ops; + + pcr->flags = 0; + pcr->card_drive_sel = RTSX_CARD_DRIVE_DEFAULT; + pcr->sd30_drive_sel_1v8 = DRIVER_TYPE_B; + pcr->sd30_drive_sel_3v3 = DRIVER_TYPE_D; + pcr->aspm_en = ASPM_L1_EN; + pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 15); + pcr->rx_initial_phase = SET_CLOCK_PHASE(30, 6, 6); + + pcr->ic_version = rts5229_get_ic_version(pcr); + if (pcr->ic_version == IC_VER_C) { + pcr->sd_pull_ctl_enable_tbl = rts5229_sd_pull_ctl_enable_tbl2; + pcr->sd_pull_ctl_disable_tbl = rts5229_sd_pull_ctl_disable_tbl2; + } else { + pcr->sd_pull_ctl_enable_tbl = rts5229_sd_pull_ctl_enable_tbl1; + pcr->sd_pull_ctl_disable_tbl = rts5229_sd_pull_ctl_disable_tbl1; + } + pcr->ms_pull_ctl_enable_tbl = rts5229_ms_pull_ctl_enable_tbl; + pcr->ms_pull_ctl_disable_tbl = rts5229_ms_pull_ctl_disable_tbl; +} diff --git a/drivers/misc/cardreader/rts5249.c b/drivers/misc/cardreader/rts5249.c new file mode 100644 index 000000000..b85279f1f --- /dev/null +++ b/drivers/misc/cardreader/rts5249.c @@ -0,0 +1,845 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Driver for Realtek PCI-Express card reader + * + * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved. + * + * Author: + * Wei WANG <wei_wang@realsil.com.cn> + */ + +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/rtsx_pci.h> + +#include "rtsx_pcr.h" + +static u8 rts5249_get_ic_version(struct rtsx_pcr *pcr) +{ + u8 val; + + rtsx_pci_read_register(pcr, DUMMY_REG_RESET_0, &val); + return val & 0x0F; +} + +static void rts5249_fill_driving(struct rtsx_pcr *pcr, u8 voltage) +{ + u8 driving_3v3[4][3] = { + {0x11, 0x11, 0x18}, + {0x55, 0x55, 0x5C}, + {0xFF, 0xFF, 0xFF}, + {0x96, 0x96, 0x96}, + }; + u8 driving_1v8[4][3] = { + {0xC4, 0xC4, 0xC4}, + {0x3C, 0x3C, 0x3C}, + {0xFE, 0xFE, 0xFE}, + {0xB3, 0xB3, 0xB3}, + }; + u8 (*driving)[3], drive_sel; + + if (voltage == OUTPUT_3V3) { + driving = driving_3v3; + drive_sel = pcr->sd30_drive_sel_3v3; + } else { + driving = driving_1v8; + drive_sel = pcr->sd30_drive_sel_1v8; + } + + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CLK_DRIVE_SEL, + 0xFF, driving[drive_sel][0]); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CMD_DRIVE_SEL, + 0xFF, driving[drive_sel][1]); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DAT_DRIVE_SEL, + 0xFF, driving[drive_sel][2]); +} + +static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr) +{ + struct pci_dev *pdev = pcr->pci; + u32 reg; + + pci_read_config_dword(pdev, PCR_SETTING_REG1, ®); + pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg); + + if (!rtsx_vendor_setting_valid(reg)) { + pcr_dbg(pcr, "skip fetch vendor setting\n"); + return; + } + + pcr->aspm_en = rtsx_reg_to_aspm(reg); + pcr->sd30_drive_sel_1v8 = rtsx_reg_to_sd30_drive_sel_1v8(reg); + pcr->card_drive_sel &= 0x3F; + pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg); + + pci_read_config_dword(pdev, PCR_SETTING_REG2, ®); + pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg); + if (rtsx_check_mmc_support(reg)) + pcr->extra_caps |= EXTRA_CAPS_NO_MMC; + pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg); + if (rtsx_reg_check_reverse_socket(reg)) + pcr->flags |= PCR_REVERSE_SOCKET; +} + +static void rts5249_init_from_cfg(struct rtsx_pcr *pcr) +{ + struct pci_dev *pdev = pcr->pci; + int l1ss; + struct rtsx_cr_option *option = &(pcr->option); + u32 lval; + + l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS); + if (!l1ss) + return; + + pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval); + + if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A)) { + if (0 == (lval & 0x0F)) + rtsx_pci_enable_oobs_polling(pcr); + else + rtsx_pci_disable_oobs_polling(pcr); + } + + + if (lval & PCI_L1SS_CTL1_ASPM_L1_1) + rtsx_set_dev_flag(pcr, ASPM_L1_1_EN); + + if (lval & PCI_L1SS_CTL1_ASPM_L1_2) + rtsx_set_dev_flag(pcr, ASPM_L1_2_EN); + + if (lval & PCI_L1SS_CTL1_PCIPM_L1_1) + rtsx_set_dev_flag(pcr, PM_L1_1_EN); + + if (lval & PCI_L1SS_CTL1_PCIPM_L1_2) + rtsx_set_dev_flag(pcr, PM_L1_2_EN); + + if (option->ltr_en) { + u16 val; + + pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &val); + if (val & PCI_EXP_DEVCTL2_LTR_EN) { + option->ltr_enabled = true; + option->ltr_active = true; + rtsx_set_ltr_latency(pcr, option->ltr_active_latency); + } else { + option->ltr_enabled = false; + } + } +} + +static int rts5249_init_from_hw(struct rtsx_pcr *pcr) +{ + struct rtsx_cr_option *option = &(pcr->option); + + if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN + | PM_L1_1_EN | PM_L1_2_EN)) + option->force_clkreq_0 = false; + else + option->force_clkreq_0 = true; + + return 0; +} + +static void rts52xa_save_content_from_efuse(struct rtsx_pcr *pcr) +{ + u8 cnt, sv; + u16 j = 0; + u8 tmp; + u8 val; + int i; + + rtsx_pci_write_register(pcr, RTS524A_PME_FORCE_CTL, + REG_EFUSE_BYPASS | REG_EFUSE_POR, REG_EFUSE_POR); + udelay(1); + + pcr_dbg(pcr, "Enable efuse por!"); + pcr_dbg(pcr, "save efuse to autoload"); + + rtsx_pci_write_register(pcr, RTS525A_EFUSE_ADD, REG_EFUSE_ADD_MASK, 0x00); + rtsx_pci_write_register(pcr, RTS525A_EFUSE_CTL, + REG_EFUSE_ENABLE | REG_EFUSE_MODE, REG_EFUSE_ENABLE); + /* Wait transfer end */ + for (j = 0; j < 1024; j++) { + rtsx_pci_read_register(pcr, RTS525A_EFUSE_CTL, &tmp); + if ((tmp & 0x80) == 0) + break; + } + rtsx_pci_read_register(pcr, RTS525A_EFUSE_DATA, &val); + cnt = val & 0x0F; + sv = val & 0x10; + + if (sv) { + for (i = 0; i < 4; i++) { + rtsx_pci_write_register(pcr, RTS525A_EFUSE_ADD, + REG_EFUSE_ADD_MASK, 0x04 + i); + rtsx_pci_write_register(pcr, RTS525A_EFUSE_CTL, + REG_EFUSE_ENABLE | REG_EFUSE_MODE, REG_EFUSE_ENABLE); + /* Wait transfer end */ + for (j = 0; j < 1024; j++) { + rtsx_pci_read_register(pcr, RTS525A_EFUSE_CTL, &tmp); + if ((tmp & 0x80) == 0) + break; + } + rtsx_pci_read_register(pcr, RTS525A_EFUSE_DATA, &val); + rtsx_pci_write_register(pcr, 0xFF04 + i, 0xFF, val); + } + } else { + rtsx_pci_write_register(pcr, 0xFF04, 0xFF, (u8)PCI_VID(pcr)); + rtsx_pci_write_register(pcr, 0xFF05, 0xFF, (u8)(PCI_VID(pcr) >> 8)); + rtsx_pci_write_register(pcr, 0xFF06, 0xFF, (u8)PCI_PID(pcr)); + rtsx_pci_write_register(pcr, 0xFF07, 0xFF, (u8)(PCI_PID(pcr) >> 8)); + } + + for (i = 0; i < cnt * 4; i++) { + if (sv) + rtsx_pci_write_register(pcr, RTS525A_EFUSE_ADD, + REG_EFUSE_ADD_MASK, 0x08 + i); + else + rtsx_pci_write_register(pcr, RTS525A_EFUSE_ADD, + REG_EFUSE_ADD_MASK, 0x04 + i); + rtsx_pci_write_register(pcr, RTS525A_EFUSE_CTL, + REG_EFUSE_ENABLE | REG_EFUSE_MODE, REG_EFUSE_ENABLE); + /* Wait transfer end */ + for (j = 0; j < 1024; j++) { + rtsx_pci_read_register(pcr, RTS525A_EFUSE_CTL, &tmp); + if ((tmp & 0x80) == 0) + break; + } + rtsx_pci_read_register(pcr, RTS525A_EFUSE_DATA, &val); + rtsx_pci_write_register(pcr, 0xFF08 + i, 0xFF, val); + } + rtsx_pci_write_register(pcr, 0xFF00, 0xFF, (cnt & 0x7F) | 0x80); + rtsx_pci_write_register(pcr, RTS524A_PME_FORCE_CTL, + REG_EFUSE_BYPASS | REG_EFUSE_POR, REG_EFUSE_BYPASS); + pcr_dbg(pcr, "Disable efuse por!"); +} + +static void rts52xa_save_content_to_autoload_space(struct rtsx_pcr *pcr) +{ + u8 val; + + rtsx_pci_read_register(pcr, RESET_LOAD_REG, &val); + if (val & 0x02) { + rtsx_pci_read_register(pcr, RTS525A_BIOS_CFG, &val); + if (val & RTS525A_LOAD_BIOS_FLAG) { + rtsx_pci_write_register(pcr, RTS525A_BIOS_CFG, + RTS525A_LOAD_BIOS_FLAG, RTS525A_CLEAR_BIOS_FLAG); + + rtsx_pci_write_register(pcr, RTS524A_PME_FORCE_CTL, + REG_EFUSE_POWER_MASK, REG_EFUSE_POWERON); + + pcr_dbg(pcr, "Power ON efuse!"); + mdelay(1); + rts52xa_save_content_from_efuse(pcr); + } else { + rtsx_pci_read_register(pcr, RTS524A_PME_FORCE_CTL, &val); + if (!(val & 0x08)) + rts52xa_save_content_from_efuse(pcr); + } + } else { + pcr_dbg(pcr, "Load from autoload"); + rtsx_pci_write_register(pcr, 0xFF00, 0xFF, 0x80); + rtsx_pci_write_register(pcr, 0xFF04, 0xFF, (u8)PCI_VID(pcr)); + rtsx_pci_write_register(pcr, 0xFF05, 0xFF, (u8)(PCI_VID(pcr) >> 8)); + rtsx_pci_write_register(pcr, 0xFF06, 0xFF, (u8)PCI_PID(pcr)); + rtsx_pci_write_register(pcr, 0xFF07, 0xFF, (u8)(PCI_PID(pcr) >> 8)); + } +} + +static int rts5249_extra_init_hw(struct rtsx_pcr *pcr) +{ + struct rtsx_cr_option *option = &(pcr->option); + + rts5249_init_from_cfg(pcr); + rts5249_init_from_hw(pcr); + + rtsx_pci_init_cmd(pcr); + + if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A)) + rts52xa_save_content_to_autoload_space(pcr); + + /* Rest L1SUB Config */ + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, L1SUB_CONFIG3, 0xFF, 0x00); + /* Configure GPIO as output */ + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, GPIO_CTL, 0x02, 0x02); + /* Reset ASPM state to default value */ + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, ASPM_FORCE_CTL, 0x3F, 0); + /* Switch LDO3318 source from DV33 to card_3v3 */ + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_PWR_SEL, 0x03, 0x00); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_PWR_SEL, 0x03, 0x01); + /* LED shine disabled, set initial shine cycle period */ + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, OLT_LED_CTL, 0x0F, 0x02); + /* Configure driving */ + rts5249_fill_driving(pcr, OUTPUT_3V3); + if (pcr->flags & PCR_REVERSE_SOCKET) + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0xB0, 0xB0); + else + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0xB0, 0x80); + + rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF); + + if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A)) { + rtsx_pci_write_register(pcr, REG_VREF, PWD_SUSPND_EN, PWD_SUSPND_EN); + rtsx_pci_write_register(pcr, RTS524A_PM_CTRL3, 0x01, 0x00); + rtsx_pci_write_register(pcr, RTS524A_PME_FORCE_CTL, 0x30, 0x20); + } else { + rtsx_pci_write_register(pcr, PME_FORCE_CTL, 0xFF, 0x30); + rtsx_pci_write_register(pcr, PM_CTRL3, 0x01, 0x00); + } + + /* + * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced + * to drive low, and we forcibly request clock. + */ + if (option->force_clkreq_0) + rtsx_pci_write_register(pcr, PETXCFG, + FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW); + else + rtsx_pci_write_register(pcr, PETXCFG, + FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH); + + rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x10, 0x00); + if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A)) { + rtsx_pci_write_register(pcr, RTS524A_PME_FORCE_CTL, + REG_EFUSE_POWER_MASK, REG_EFUSE_POWEROFF); + pcr_dbg(pcr, "Power OFF efuse!"); + } + + return 0; +} + +static int rts5249_optimize_phy(struct rtsx_pcr *pcr) +{ + int err; + + err = rtsx_pci_write_register(pcr, PM_CTRL3, D3_DELINK_MODE_EN, 0x00); + if (err < 0) + return err; + + err = rtsx_pci_write_phy_register(pcr, PHY_REV, + PHY_REV_RESV | PHY_REV_RXIDLE_LATCHED | + PHY_REV_P1_EN | PHY_REV_RXIDLE_EN | + PHY_REV_CLKREQ_TX_EN | PHY_REV_RX_PWST | + PHY_REV_CLKREQ_DT_1_0 | PHY_REV_STOP_CLKRD | + PHY_REV_STOP_CLKWR); + if (err < 0) + return err; + + msleep(1); + + err = rtsx_pci_write_phy_register(pcr, PHY_BPCR, + PHY_BPCR_IBRXSEL | PHY_BPCR_IBTXSEL | + PHY_BPCR_IB_FILTER | PHY_BPCR_CMIRROR_EN); + if (err < 0) + return err; + + err = rtsx_pci_write_phy_register(pcr, PHY_PCR, + PHY_PCR_FORCE_CODE | PHY_PCR_OOBS_CALI_50 | + PHY_PCR_OOBS_VCM_08 | PHY_PCR_OOBS_SEN_90 | + PHY_PCR_RSSI_EN | PHY_PCR_RX10K); + if (err < 0) + return err; + + err = rtsx_pci_write_phy_register(pcr, PHY_RCR2, + PHY_RCR2_EMPHASE_EN | PHY_RCR2_NADJR | + PHY_RCR2_CDR_SR_2 | PHY_RCR2_FREQSEL_12 | + PHY_RCR2_CDR_SC_12P | PHY_RCR2_CALIB_LATE); + if (err < 0) + return err; + + err = rtsx_pci_write_phy_register(pcr, PHY_FLD4, + PHY_FLD4_FLDEN_SEL | PHY_FLD4_REQ_REF | + PHY_FLD4_RXAMP_OFF | PHY_FLD4_REQ_ADDA | + PHY_FLD4_BER_COUNT | PHY_FLD4_BER_TIMER | + PHY_FLD4_BER_CHK_EN); + if (err < 0) + return err; + err = rtsx_pci_write_phy_register(pcr, PHY_RDR, + PHY_RDR_RXDSEL_1_9 | PHY_SSC_AUTO_PWD); + if (err < 0) + return err; + err = rtsx_pci_write_phy_register(pcr, PHY_RCR1, + PHY_RCR1_ADP_TIME_4 | PHY_RCR1_VCO_COARSE); + if (err < 0) + return err; + err = rtsx_pci_write_phy_register(pcr, PHY_FLD3, + PHY_FLD3_TIMER_4 | PHY_FLD3_TIMER_6 | + PHY_FLD3_RXDELINK); + if (err < 0) + return err; + + return rtsx_pci_write_phy_register(pcr, PHY_TUNE, + PHY_TUNE_TUNEREF_1_0 | PHY_TUNE_VBGSEL_1252 | + PHY_TUNE_SDBUS_33 | PHY_TUNE_TUNED18 | + PHY_TUNE_TUNED12 | PHY_TUNE_TUNEA12); +} + +static int rtsx_base_turn_on_led(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_register(pcr, GPIO_CTL, 0x02, 0x02); +} + +static int rtsx_base_turn_off_led(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_register(pcr, GPIO_CTL, 0x02, 0x00); +} + +static int rtsx_base_enable_auto_blink(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x08, 0x08); +} + +static int rtsx_base_disable_auto_blink(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x08, 0x00); +} + +static int rtsx_base_card_power_on(struct rtsx_pcr *pcr, int card) +{ + int err; + struct rtsx_cr_option *option = &pcr->option; + + if (option->ocp_en) + rtsx_pci_enable_ocp(pcr); + + rtsx_pci_init_cmd(pcr); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL, + SD_POWER_MASK, SD_VCC_PARTIAL_POWER_ON); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL, + LDO3318_PWR_MASK, 0x02); + err = rtsx_pci_send_cmd(pcr, 100); + if (err < 0) + return err; + + msleep(5); + + rtsx_pci_init_cmd(pcr); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL, + SD_POWER_MASK, SD_VCC_POWER_ON); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL, + LDO3318_PWR_MASK, 0x06); + return rtsx_pci_send_cmd(pcr, 100); +} + +static int rtsx_base_card_power_off(struct rtsx_pcr *pcr, int card) +{ + struct rtsx_cr_option *option = &pcr->option; + + if (option->ocp_en) + rtsx_pci_disable_ocp(pcr); + + rtsx_pci_write_register(pcr, CARD_PWR_CTL, SD_POWER_MASK, SD_POWER_OFF); + + rtsx_pci_write_register(pcr, PWR_GATE_CTRL, LDO3318_PWR_MASK, 0x00); + return 0; +} + +static int rtsx_base_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage) +{ + int err; + u16 append; + + switch (voltage) { + case OUTPUT_3V3: + err = rtsx_pci_update_phy(pcr, PHY_TUNE, PHY_TUNE_VOLTAGE_MASK, + PHY_TUNE_VOLTAGE_3V3); + if (err < 0) + return err; + break; + case OUTPUT_1V8: + append = PHY_TUNE_D18_1V8; + if (CHK_PCI_PID(pcr, 0x5249)) { + err = rtsx_pci_update_phy(pcr, PHY_BACR, + PHY_BACR_BASIC_MASK, 0); + if (err < 0) + return err; + append = PHY_TUNE_D18_1V7; + } + + err = rtsx_pci_update_phy(pcr, PHY_TUNE, PHY_TUNE_VOLTAGE_MASK, + append); + if (err < 0) + return err; + break; + default: + pcr_dbg(pcr, "unknown output voltage %d\n", voltage); + return -EINVAL; + } + + /* set pad drive */ + rtsx_pci_init_cmd(pcr); + rts5249_fill_driving(pcr, voltage); + return rtsx_pci_send_cmd(pcr, 100); +} + +static const struct pcr_ops rts5249_pcr_ops = { + .fetch_vendor_settings = rtsx_base_fetch_vendor_settings, + .extra_init_hw = rts5249_extra_init_hw, + .optimize_phy = rts5249_optimize_phy, + .turn_on_led = rtsx_base_turn_on_led, + .turn_off_led = rtsx_base_turn_off_led, + .enable_auto_blink = rtsx_base_enable_auto_blink, + .disable_auto_blink = rtsx_base_disable_auto_blink, + .card_power_on = rtsx_base_card_power_on, + .card_power_off = rtsx_base_card_power_off, + .switch_output_voltage = rtsx_base_switch_output_voltage, +}; + +/* SD Pull Control Enable: + * SD_DAT[3:0] ==> pull up + * SD_CD ==> pull up + * SD_WP ==> pull up + * SD_CMD ==> pull up + * SD_CLK ==> pull down + */ +static const u32 rts5249_sd_pull_ctl_enable_tbl[] = { + RTSX_REG_PAIR(CARD_PULL_CTL1, 0x66), + RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA), + RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9), + RTSX_REG_PAIR(CARD_PULL_CTL4, 0xAA), + 0, +}; + +/* SD Pull Control Disable: + * SD_DAT[3:0] ==> pull down + * SD_CD ==> pull up + * SD_WP ==> pull down + * SD_CMD ==> pull down + * SD_CLK ==> pull down + */ +static const u32 rts5249_sd_pull_ctl_disable_tbl[] = { + RTSX_REG_PAIR(CARD_PULL_CTL1, 0x66), + RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55), + RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5), + RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55), + 0, +}; + +/* MS Pull Control Enable: + * MS CD ==> pull up + * others ==> pull down + */ +static const u32 rts5249_ms_pull_ctl_enable_tbl[] = { + RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55), + RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55), + RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15), + 0, +}; + +/* MS Pull Control Disable: + * MS CD ==> pull up + * others ==> pull down + */ +static const u32 rts5249_ms_pull_ctl_disable_tbl[] = { + RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55), + RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55), + RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15), + 0, +}; + +void rts5249_init_params(struct rtsx_pcr *pcr) +{ + struct rtsx_cr_option *option = &(pcr->option); + + pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104; + pcr->num_slots = 2; + pcr->ops = &rts5249_pcr_ops; + + pcr->flags = 0; + pcr->card_drive_sel = RTSX_CARD_DRIVE_DEFAULT; + pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B; + pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B; + pcr->aspm_en = ASPM_L1_EN; + pcr->tx_initial_phase = SET_CLOCK_PHASE(1, 29, 16); + pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5); + + pcr->ic_version = rts5249_get_ic_version(pcr); + pcr->sd_pull_ctl_enable_tbl = rts5249_sd_pull_ctl_enable_tbl; + pcr->sd_pull_ctl_disable_tbl = rts5249_sd_pull_ctl_disable_tbl; + pcr->ms_pull_ctl_enable_tbl = rts5249_ms_pull_ctl_enable_tbl; + pcr->ms_pull_ctl_disable_tbl = rts5249_ms_pull_ctl_disable_tbl; + + pcr->reg_pm_ctrl3 = PM_CTRL3; + + option->dev_flags = (LTR_L1SS_PWR_GATE_CHECK_CARD_EN + | LTR_L1SS_PWR_GATE_EN); + option->ltr_en = true; + + /* Init latency of active, idle, L1OFF to 60us, 300us, 3ms */ + option->ltr_active_latency = LTR_ACTIVE_LATENCY_DEF; + option->ltr_idle_latency = LTR_IDLE_LATENCY_DEF; + option->ltr_l1off_latency = LTR_L1OFF_LATENCY_DEF; + option->l1_snooze_delay = L1_SNOOZE_DELAY_DEF; + option->ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5249_DEF; + option->ltr_l1off_snooze_sspwrgate = + LTR_L1OFF_SNOOZE_SSPWRGATE_5249_DEF; +} + +static int rts524a_write_phy(struct rtsx_pcr *pcr, u8 addr, u16 val) +{ + addr = addr & 0x80 ? (addr & 0x7F) | 0x40 : addr; + + return __rtsx_pci_write_phy_register(pcr, addr, val); +} + +static int rts524a_read_phy(struct rtsx_pcr *pcr, u8 addr, u16 *val) +{ + addr = addr & 0x80 ? (addr & 0x7F) | 0x40 : addr; + + return __rtsx_pci_read_phy_register(pcr, addr, val); +} + +static int rts524a_optimize_phy(struct rtsx_pcr *pcr) +{ + int err; + + err = rtsx_pci_write_register(pcr, RTS524A_PM_CTRL3, + D3_DELINK_MODE_EN, 0x00); + if (err < 0) + return err; + + rtsx_pci_write_phy_register(pcr, PHY_PCR, + PHY_PCR_FORCE_CODE | PHY_PCR_OOBS_CALI_50 | + PHY_PCR_OOBS_VCM_08 | PHY_PCR_OOBS_SEN_90 | PHY_PCR_RSSI_EN); + rtsx_pci_write_phy_register(pcr, PHY_SSCCR3, + PHY_SSCCR3_STEP_IN | PHY_SSCCR3_CHECK_DELAY); + + if (is_version(pcr, 0x524A, IC_VER_A)) { + rtsx_pci_write_phy_register(pcr, PHY_SSCCR3, + PHY_SSCCR3_STEP_IN | PHY_SSCCR3_CHECK_DELAY); + rtsx_pci_write_phy_register(pcr, PHY_SSCCR2, + PHY_SSCCR2_PLL_NCODE | PHY_SSCCR2_TIME0 | + PHY_SSCCR2_TIME2_WIDTH); + rtsx_pci_write_phy_register(pcr, PHY_ANA1A, + PHY_ANA1A_TXR_LOOPBACK | PHY_ANA1A_RXT_BIST | + PHY_ANA1A_TXR_BIST | PHY_ANA1A_REV); + rtsx_pci_write_phy_register(pcr, PHY_ANA1D, + PHY_ANA1D_DEBUG_ADDR); + rtsx_pci_write_phy_register(pcr, PHY_DIG1E, + PHY_DIG1E_REV | PHY_DIG1E_D0_X_D1 | + PHY_DIG1E_RX_ON_HOST | PHY_DIG1E_RCLK_REF_HOST | + PHY_DIG1E_RCLK_TX_EN_KEEP | + PHY_DIG1E_RCLK_TX_TERM_KEEP | + PHY_DIG1E_RCLK_RX_EIDLE_ON | PHY_DIG1E_TX_TERM_KEEP | + PHY_DIG1E_RX_TERM_KEEP | PHY_DIG1E_TX_EN_KEEP | + PHY_DIG1E_RX_EN_KEEP); + } + + rtsx_pci_write_phy_register(pcr, PHY_ANA08, + PHY_ANA08_RX_EQ_DCGAIN | PHY_ANA08_SEL_RX_EN | + PHY_ANA08_RX_EQ_VAL | PHY_ANA08_SCP | PHY_ANA08_SEL_IPI); + + return 0; +} + +static int rts524a_extra_init_hw(struct rtsx_pcr *pcr) +{ + rts5249_extra_init_hw(pcr); + + rtsx_pci_write_register(pcr, FUNC_FORCE_CTL, + FORCE_ASPM_L1_EN, FORCE_ASPM_L1_EN); + rtsx_pci_write_register(pcr, PM_EVENT_DEBUG, PME_DEBUG_0, PME_DEBUG_0); + rtsx_pci_write_register(pcr, LDO_VCC_CFG1, LDO_VCC_LMT_EN, + LDO_VCC_LMT_EN); + rtsx_pci_write_register(pcr, PCLK_CTL, PCLK_MODE_SEL, PCLK_MODE_SEL); + if (is_version(pcr, 0x524A, IC_VER_A)) { + rtsx_pci_write_register(pcr, LDO_DV18_CFG, + LDO_DV18_SR_MASK, LDO_DV18_SR_DF); + rtsx_pci_write_register(pcr, LDO_VCC_CFG1, + LDO_VCC_REF_TUNE_MASK, LDO_VCC_REF_1V2); + rtsx_pci_write_register(pcr, LDO_VIO_CFG, + LDO_VIO_REF_TUNE_MASK, LDO_VIO_REF_1V2); + rtsx_pci_write_register(pcr, LDO_VIO_CFG, + LDO_VIO_SR_MASK, LDO_VIO_SR_DF); + rtsx_pci_write_register(pcr, LDO_DV12S_CFG, + LDO_REF12_TUNE_MASK, LDO_REF12_TUNE_DF); + rtsx_pci_write_register(pcr, SD40_LDO_CTL1, + SD40_VIO_TUNE_MASK, SD40_VIO_TUNE_1V7); + } + + return 0; +} + +static void rts5250_set_l1off_cfg_sub_d0(struct rtsx_pcr *pcr, int active) +{ + struct rtsx_cr_option *option = &(pcr->option); + + u32 interrupt = rtsx_pci_readl(pcr, RTSX_BIPR); + int card_exist = (interrupt & SD_EXIST) | (interrupt & MS_EXIST); + int aspm_L1_1, aspm_L1_2; + u8 val = 0; + + aspm_L1_1 = rtsx_check_dev_flag(pcr, ASPM_L1_1_EN); + aspm_L1_2 = rtsx_check_dev_flag(pcr, ASPM_L1_2_EN); + + if (active) { + /* Run, latency: 60us */ + if (aspm_L1_1) + val = option->ltr_l1off_snooze_sspwrgate; + } else { + /* L1off, latency: 300us */ + if (aspm_L1_2) + val = option->ltr_l1off_sspwrgate; + } + + if (aspm_L1_1 || aspm_L1_2) { + if (rtsx_check_dev_flag(pcr, + LTR_L1SS_PWR_GATE_CHECK_CARD_EN)) { + if (card_exist) + val &= ~L1OFF_MBIAS2_EN_5250; + else + val |= L1OFF_MBIAS2_EN_5250; + } + } + rtsx_set_l1off_sub(pcr, val); +} + +static const struct pcr_ops rts524a_pcr_ops = { + .write_phy = rts524a_write_phy, + .read_phy = rts524a_read_phy, + .fetch_vendor_settings = rtsx_base_fetch_vendor_settings, + .extra_init_hw = rts524a_extra_init_hw, + .optimize_phy = rts524a_optimize_phy, + .turn_on_led = rtsx_base_turn_on_led, + .turn_off_led = rtsx_base_turn_off_led, + .enable_auto_blink = rtsx_base_enable_auto_blink, + .disable_auto_blink = rtsx_base_disable_auto_blink, + .card_power_on = rtsx_base_card_power_on, + .card_power_off = rtsx_base_card_power_off, + .switch_output_voltage = rtsx_base_switch_output_voltage, + .set_l1off_cfg_sub_d0 = rts5250_set_l1off_cfg_sub_d0, +}; + +void rts524a_init_params(struct rtsx_pcr *pcr) +{ + rts5249_init_params(pcr); + pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 29, 11); + pcr->option.ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF; + pcr->option.ltr_l1off_snooze_sspwrgate = + LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF; + + pcr->reg_pm_ctrl3 = RTS524A_PM_CTRL3; + pcr->ops = &rts524a_pcr_ops; + + pcr->option.ocp_en = 1; + if (pcr->option.ocp_en) + pcr->hw_param.interrupt_en |= SD_OC_INT_EN; + pcr->hw_param.ocp_glitch = SD_OCP_GLITCH_10M; + pcr->option.sd_800mA_ocp_thd = RTS524A_OCP_THD_800; + +} + +static int rts525a_card_power_on(struct rtsx_pcr *pcr, int card) +{ + rtsx_pci_write_register(pcr, LDO_VCC_CFG1, + LDO_VCC_TUNE_MASK, LDO_VCC_3V3); + return rtsx_base_card_power_on(pcr, card); +} + +static int rts525a_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage) +{ + switch (voltage) { + case OUTPUT_3V3: + rtsx_pci_write_register(pcr, LDO_CONFIG2, + LDO_D3318_MASK, LDO_D3318_33V); + rtsx_pci_write_register(pcr, SD_PAD_CTL, SD_IO_USING_1V8, 0); + break; + case OUTPUT_1V8: + rtsx_pci_write_register(pcr, LDO_CONFIG2, + LDO_D3318_MASK, LDO_D3318_18V); + rtsx_pci_write_register(pcr, SD_PAD_CTL, SD_IO_USING_1V8, + SD_IO_USING_1V8); + break; + default: + return -EINVAL; + } + + rtsx_pci_init_cmd(pcr); + rts5249_fill_driving(pcr, voltage); + return rtsx_pci_send_cmd(pcr, 100); +} + +static int rts525a_optimize_phy(struct rtsx_pcr *pcr) +{ + int err; + + err = rtsx_pci_write_register(pcr, RTS524A_PM_CTRL3, + D3_DELINK_MODE_EN, 0x00); + if (err < 0) + return err; + + rtsx_pci_write_phy_register(pcr, _PHY_FLD0, + _PHY_FLD0_CLK_REQ_20C | _PHY_FLD0_RX_IDLE_EN | + _PHY_FLD0_BIT_ERR_RSTN | _PHY_FLD0_BER_COUNT | + _PHY_FLD0_BER_TIMER | _PHY_FLD0_CHECK_EN); + + rtsx_pci_write_phy_register(pcr, _PHY_ANA03, + _PHY_ANA03_TIMER_MAX | _PHY_ANA03_OOBS_DEB_EN | + _PHY_CMU_DEBUG_EN); + + if (is_version(pcr, 0x525A, IC_VER_A)) + rtsx_pci_write_phy_register(pcr, _PHY_REV0, + _PHY_REV0_FILTER_OUT | _PHY_REV0_CDR_BYPASS_PFD | + _PHY_REV0_CDR_RX_IDLE_BYPASS); + + return 0; +} + +static int rts525a_extra_init_hw(struct rtsx_pcr *pcr) +{ + rts5249_extra_init_hw(pcr); + + rtsx_pci_write_register(pcr, RTS5250_CLK_CFG3, RTS525A_CFG_MEM_PD, RTS525A_CFG_MEM_PD); + + rtsx_pci_write_register(pcr, PCLK_CTL, PCLK_MODE_SEL, PCLK_MODE_SEL); + if (is_version(pcr, 0x525A, IC_VER_A)) { + rtsx_pci_write_register(pcr, L1SUB_CONFIG2, + L1SUB_AUTO_CFG, L1SUB_AUTO_CFG); + rtsx_pci_write_register(pcr, RREF_CFG, + RREF_VBGSEL_MASK, RREF_VBGSEL_1V25); + rtsx_pci_write_register(pcr, LDO_VIO_CFG, + LDO_VIO_TUNE_MASK, LDO_VIO_1V7); + rtsx_pci_write_register(pcr, LDO_DV12S_CFG, + LDO_D12_TUNE_MASK, LDO_D12_TUNE_DF); + rtsx_pci_write_register(pcr, LDO_AV12S_CFG, + LDO_AV12S_TUNE_MASK, LDO_AV12S_TUNE_DF); + rtsx_pci_write_register(pcr, LDO_VCC_CFG0, + LDO_VCC_LMTVTH_MASK, LDO_VCC_LMTVTH_2A); + rtsx_pci_write_register(pcr, OOBS_CONFIG, + OOBS_AUTOK_DIS | OOBS_VAL_MASK, 0x89); + } + + return 0; +} + +static const struct pcr_ops rts525a_pcr_ops = { + .fetch_vendor_settings = rtsx_base_fetch_vendor_settings, + .extra_init_hw = rts525a_extra_init_hw, + .optimize_phy = rts525a_optimize_phy, + .turn_on_led = rtsx_base_turn_on_led, + .turn_off_led = rtsx_base_turn_off_led, + .enable_auto_blink = rtsx_base_enable_auto_blink, + .disable_auto_blink = rtsx_base_disable_auto_blink, + .card_power_on = rts525a_card_power_on, + .card_power_off = rtsx_base_card_power_off, + .switch_output_voltage = rts525a_switch_output_voltage, + .set_l1off_cfg_sub_d0 = rts5250_set_l1off_cfg_sub_d0, +}; + +void rts525a_init_params(struct rtsx_pcr *pcr) +{ + rts5249_init_params(pcr); + pcr->tx_initial_phase = SET_CLOCK_PHASE(25, 29, 11); + pcr->option.ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF; + pcr->option.ltr_l1off_snooze_sspwrgate = + LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF; + + pcr->reg_pm_ctrl3 = RTS524A_PM_CTRL3; + pcr->ops = &rts525a_pcr_ops; + + pcr->option.ocp_en = 1; + if (pcr->option.ocp_en) + pcr->hw_param.interrupt_en |= SD_OC_INT_EN; + pcr->hw_param.ocp_glitch = SD_OCP_GLITCH_10M; + pcr->option.sd_800mA_ocp_thd = RTS525A_OCP_THD_800; +} diff --git a/drivers/misc/cardreader/rts5260.c b/drivers/misc/cardreader/rts5260.c new file mode 100644 index 000000000..080a7d67a --- /dev/null +++ b/drivers/misc/cardreader/rts5260.c @@ -0,0 +1,663 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Driver for Realtek PCI-Express card reader + * + * Copyright(c) 2016-2017 Realtek Semiconductor Corp. All rights reserved. + * + * Author: + * Steven FENG <steven_feng@realsil.com.cn> + * Rui FENG <rui_feng@realsil.com.cn> + * Wei WANG <wei_wang@realsil.com.cn> + */ + +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/rtsx_pci.h> + +#include "rts5260.h" +#include "rtsx_pcr.h" + +static u8 rts5260_get_ic_version(struct rtsx_pcr *pcr) +{ + u8 val; + + rtsx_pci_read_register(pcr, DUMMY_REG_RESET_0, &val); + return val & IC_VERSION_MASK; +} + +static void rts5260_fill_driving(struct rtsx_pcr *pcr, u8 voltage) +{ + u8 driving_3v3[4][3] = { + {0x11, 0x11, 0x11}, + {0x22, 0x22, 0x22}, + {0x55, 0x55, 0x55}, + {0x33, 0x33, 0x33}, + }; + u8 driving_1v8[4][3] = { + {0x35, 0x33, 0x33}, + {0x8A, 0x88, 0x88}, + {0xBD, 0xBB, 0xBB}, + {0x9B, 0x99, 0x99}, + }; + u8 (*driving)[3], drive_sel; + + if (voltage == OUTPUT_3V3) { + driving = driving_3v3; + drive_sel = pcr->sd30_drive_sel_3v3; + } else { + driving = driving_1v8; + drive_sel = pcr->sd30_drive_sel_1v8; + } + + rtsx_pci_write_register(pcr, SD30_CLK_DRIVE_SEL, + 0xFF, driving[drive_sel][0]); + + rtsx_pci_write_register(pcr, SD30_CMD_DRIVE_SEL, + 0xFF, driving[drive_sel][1]); + + rtsx_pci_write_register(pcr, SD30_DAT_DRIVE_SEL, + 0xFF, driving[drive_sel][2]); +} + +static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr) +{ + struct pci_dev *pdev = pcr->pci; + u32 reg; + + pci_read_config_dword(pdev, PCR_SETTING_REG1, ®); + pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg); + + if (!rtsx_vendor_setting_valid(reg)) { + pcr_dbg(pcr, "skip fetch vendor setting\n"); + return; + } + + pcr->aspm_en = rtsx_reg_to_aspm(reg); + pcr->sd30_drive_sel_1v8 = rtsx_reg_to_sd30_drive_sel_1v8(reg); + pcr->card_drive_sel &= 0x3F; + pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg); + + pci_read_config_dword(pdev, PCR_SETTING_REG2, ®); + pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg); + if (rtsx_check_mmc_support(reg)) + pcr->extra_caps |= EXTRA_CAPS_NO_MMC; + pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg); + if (rtsx_reg_check_reverse_socket(reg)) + pcr->flags |= PCR_REVERSE_SOCKET; +} + +static int rtsx_base_enable_auto_blink(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_register(pcr, OLT_LED_CTL, + LED_SHINE_MASK, LED_SHINE_EN); +} + +static int rtsx_base_disable_auto_blink(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_register(pcr, OLT_LED_CTL, + LED_SHINE_MASK, LED_SHINE_DISABLE); +} + +static int rts5260_turn_on_led(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_register(pcr, RTS5260_REG_GPIO_CTL0, + RTS5260_REG_GPIO_MASK, RTS5260_REG_GPIO_ON); +} + +static int rts5260_turn_off_led(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_register(pcr, RTS5260_REG_GPIO_CTL0, + RTS5260_REG_GPIO_MASK, RTS5260_REG_GPIO_OFF); +} + +/* SD Pull Control Enable: + * SD_DAT[3:0] ==> pull up + * SD_CD ==> pull up + * SD_WP ==> pull up + * SD_CMD ==> pull up + * SD_CLK ==> pull down + */ +static const u32 rts5260_sd_pull_ctl_enable_tbl[] = { + RTSX_REG_PAIR(CARD_PULL_CTL1, 0x66), + RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA), + RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9), + RTSX_REG_PAIR(CARD_PULL_CTL4, 0xAA), + 0, +}; + +/* SD Pull Control Disable: + * SD_DAT[3:0] ==> pull down + * SD_CD ==> pull up + * SD_WP ==> pull down + * SD_CMD ==> pull down + * SD_CLK ==> pull down + */ +static const u32 rts5260_sd_pull_ctl_disable_tbl[] = { + RTSX_REG_PAIR(CARD_PULL_CTL1, 0x66), + RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55), + RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5), + RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55), + 0, +}; + +/* MS Pull Control Enable: + * MS CD ==> pull up + * others ==> pull down + */ +static const u32 rts5260_ms_pull_ctl_enable_tbl[] = { + RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55), + RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55), + RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15), + 0, +}; + +/* MS Pull Control Disable: + * MS CD ==> pull up + * others ==> pull down + */ +static const u32 rts5260_ms_pull_ctl_disable_tbl[] = { + RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55), + RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55), + RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15), + 0, +}; + +static int sd_set_sample_push_timing_sd30(struct rtsx_pcr *pcr) +{ + rtsx_pci_write_register(pcr, SD_CFG1, SD_MODE_SELECT_MASK + | SD_ASYNC_FIFO_NOT_RST, SD_30_MODE | SD_ASYNC_FIFO_NOT_RST); + rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, CLK_LOW_FREQ); + rtsx_pci_write_register(pcr, CARD_CLK_SOURCE, 0xFF, + CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1); + rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0); + + return 0; +} + +static int rts5260_card_power_on(struct rtsx_pcr *pcr, int card) +{ + struct rtsx_cr_option *option = &pcr->option; + + if (option->ocp_en) + rtsx_pci_enable_ocp(pcr); + + + rtsx_pci_write_register(pcr, LDO_CONFIG2, DV331812_VDD1, DV331812_VDD1); + rtsx_pci_write_register(pcr, LDO_VCC_CFG0, + RTS5260_DVCC_TUNE_MASK, RTS5260_DVCC_33); + + rtsx_pci_write_register(pcr, LDO_VCC_CFG1, LDO_POW_SDVDD1_MASK, + LDO_POW_SDVDD1_ON); + + rtsx_pci_write_register(pcr, LDO_CONFIG2, + DV331812_POWERON, DV331812_POWERON); + msleep(20); + + if (pcr->extra_caps & EXTRA_CAPS_SD_SDR50 || + pcr->extra_caps & EXTRA_CAPS_SD_SDR104) + sd_set_sample_push_timing_sd30(pcr); + + /* Initialize SD_CFG1 register */ + rtsx_pci_write_register(pcr, SD_CFG1, 0xFF, + SD_CLK_DIVIDE_128 | SD_20_MODE); + + rtsx_pci_write_register(pcr, SD_SAMPLE_POINT_CTL, + 0xFF, SD20_RX_POS_EDGE); + rtsx_pci_write_register(pcr, SD_PUSH_POINT_CTL, 0xFF, 0); + rtsx_pci_write_register(pcr, CARD_STOP, SD_STOP | SD_CLR_ERR, + SD_STOP | SD_CLR_ERR); + + /* Reset SD_CFG3 register */ + rtsx_pci_write_register(pcr, SD_CFG3, SD30_CLK_END_EN, 0); + rtsx_pci_write_register(pcr, REG_SD_STOP_SDCLK_CFG, + SD30_CLK_STOP_CFG_EN | SD30_CLK_STOP_CFG1 | + SD30_CLK_STOP_CFG0, 0); + + rtsx_pci_write_register(pcr, REG_PRE_RW_MODE, EN_INFINITE_MODE, 0); + + return 0; +} + +static int rts5260_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage) +{ + switch (voltage) { + case OUTPUT_3V3: + rtsx_pci_write_register(pcr, LDO_CONFIG2, + DV331812_VDD1, DV331812_VDD1); + rtsx_pci_write_register(pcr, LDO_DV18_CFG, + DV331812_MASK, DV331812_33); + rtsx_pci_write_register(pcr, SD_PAD_CTL, SD_IO_USING_1V8, 0); + break; + case OUTPUT_1V8: + rtsx_pci_write_register(pcr, LDO_CONFIG2, + DV331812_VDD1, DV331812_VDD1); + rtsx_pci_write_register(pcr, LDO_DV18_CFG, + DV331812_MASK, DV331812_17); + rtsx_pci_write_register(pcr, SD_PAD_CTL, SD_IO_USING_1V8, + SD_IO_USING_1V8); + break; + default: + return -EINVAL; + } + + /* set pad drive */ + rts5260_fill_driving(pcr, voltage); + + return 0; +} + +static void rts5260_stop_cmd(struct rtsx_pcr *pcr) +{ + rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD); + rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA); + rtsx_pci_write_register(pcr, RTS5260_DMA_RST_CTL_0, + RTS5260_DMA_RST | RTS5260_ADMA3_RST, + RTS5260_DMA_RST | RTS5260_ADMA3_RST); + rtsx_pci_write_register(pcr, RBCTL, RB_FLUSH, RB_FLUSH); +} + +static void rts5260_card_before_power_off(struct rtsx_pcr *pcr) +{ + rts5260_stop_cmd(pcr); + rts5260_switch_output_voltage(pcr, OUTPUT_3V3); + +} + +static int rts5260_card_power_off(struct rtsx_pcr *pcr, int card) +{ + int err = 0; + + rts5260_card_before_power_off(pcr); + err = rtsx_pci_write_register(pcr, LDO_VCC_CFG1, + LDO_POW_SDVDD1_MASK, LDO_POW_SDVDD1_OFF); + err = rtsx_pci_write_register(pcr, LDO_CONFIG2, + DV331812_POWERON, DV331812_POWEROFF); + if (pcr->option.ocp_en) + rtsx_pci_disable_ocp(pcr); + + return err; +} + +static void rts5260_init_ocp(struct rtsx_pcr *pcr) +{ + struct rtsx_cr_option *option = &pcr->option; + + if (option->ocp_en) { + u8 mask, val; + + + rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL, + RTS5260_DVCC_OCP_THD_MASK, + option->sd_800mA_ocp_thd); + + rtsx_pci_write_register(pcr, RTS5260_DV331812_CFG, + RTS5260_DV331812_OCP_THD_MASK, + RTS5260_DV331812_OCP_THD_270); + + mask = SD_OCP_GLITCH_MASK; + val = pcr->hw_param.ocp_glitch; + rtsx_pci_write_register(pcr, REG_OCPGLITCH, mask, val); + rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL, + RTS5260_DVCC_OCP_EN | + RTS5260_DVCC_OCP_CL_EN, + RTS5260_DVCC_OCP_EN | + RTS5260_DVCC_OCP_CL_EN); + + rtsx_pci_enable_ocp(pcr); + } else { + rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL, + RTS5260_DVCC_OCP_EN | + RTS5260_DVCC_OCP_CL_EN, 0); + } +} + +static void rts5260_enable_ocp(struct rtsx_pcr *pcr) +{ + u8 val = 0; + + val = SD_OCP_INT_EN | SD_DETECT_EN; + rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val); + +} + +static void rts5260_disable_ocp(struct rtsx_pcr *pcr) +{ + u8 mask = 0; + + mask = SD_OCP_INT_EN | SD_DETECT_EN; + rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0); + +} + + +static int rts5260_get_ocpstat(struct rtsx_pcr *pcr, u8 *val) +{ + return rtsx_pci_read_register(pcr, REG_OCPSTAT, val); +} + +static int rts5260_get_ocpstat2(struct rtsx_pcr *pcr, u8 *val) +{ + return rtsx_pci_read_register(pcr, REG_DV3318_OCPSTAT, val); +} + +static void rts5260_clear_ocpstat(struct rtsx_pcr *pcr) +{ + u8 mask = 0; + u8 val = 0; + + mask = SD_OCP_INT_CLR | SD_OC_CLR; + val = SD_OCP_INT_CLR | SD_OC_CLR; + + rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val); + rtsx_pci_write_register(pcr, REG_DV3318_OCPCTL, + DV3318_OCP_INT_CLR | DV3318_OCP_CLR, + DV3318_OCP_INT_CLR | DV3318_OCP_CLR); + udelay(10); + rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0); + rtsx_pci_write_register(pcr, REG_DV3318_OCPCTL, + DV3318_OCP_INT_CLR | DV3318_OCP_CLR, 0); +} + +static void rts5260_process_ocp(struct rtsx_pcr *pcr) +{ + if (!pcr->option.ocp_en) + return; + + rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat); + rts5260_get_ocpstat2(pcr, &pcr->ocp_stat2); + + if ((pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) || + (pcr->ocp_stat2 & (DV3318_OCP_NOW | DV3318_OCP_EVER))) { + rtsx_pci_card_power_off(pcr, RTSX_SD_CARD); + rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0); + rtsx_pci_clear_ocpstat(pcr); + pcr->ocp_stat = 0; + pcr->ocp_stat2 = 0; + } + +} + +static int rts5260_init_hw(struct rtsx_pcr *pcr) +{ + int err; + + rtsx_pci_init_cmd(pcr); + + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, L1SUB_CONFIG1, + AUX_CLK_ACTIVE_SEL_MASK, MAC_CKSW_DONE); + /* Rest L1SUB Config */ + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, L1SUB_CONFIG3, 0xFF, 0x00); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PM_CLK_FORCE_CTL, + CLK_PM_EN, CLK_PM_EN); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWD_SUSPEND_EN, 0xFF, 0xFF); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL, + PWR_GATE_EN, PWR_GATE_EN); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, REG_VREF, + PWD_SUSPND_EN, PWD_SUSPND_EN); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RBCTL, + U_AUTO_DMA_EN_MASK, U_AUTO_DMA_DISABLE); + + if (pcr->flags & PCR_REVERSE_SOCKET) + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0xB0, 0xB0); + else + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0xB0, 0x80); + + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, OBFF_CFG, + OBFF_EN_MASK, OBFF_DISABLE); + + err = rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF); + if (err < 0) + return err; + + rtsx_pci_init_ocp(pcr); + + return 0; +} + +static void rts5260_pwr_saving_setting(struct rtsx_pcr *pcr) +{ + int lss_l1_1, lss_l1_2; + + lss_l1_1 = rtsx_check_dev_flag(pcr, ASPM_L1_1_EN) + | rtsx_check_dev_flag(pcr, PM_L1_1_EN); + lss_l1_2 = rtsx_check_dev_flag(pcr, ASPM_L1_2_EN) + | rtsx_check_dev_flag(pcr, PM_L1_2_EN); + + rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0xFF, 0); + if (lss_l1_2) { + pcr_dbg(pcr, "Set parameters for L1.2."); + rtsx_pci_write_register(pcr, PWR_GLOBAL_CTRL, + 0xFF, PCIE_L1_2_EN); + rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL, + RTS5260_DVCC_OCP_EN | + RTS5260_DVCC_OCP_CL_EN, + RTS5260_DVCC_OCP_EN | + RTS5260_DVCC_OCP_CL_EN); + + rtsx_pci_write_register(pcr, PWR_FE_CTL, + 0xFF, PCIE_L1_2_PD_FE_EN); + } else if (lss_l1_1) { + pcr_dbg(pcr, "Set parameters for L1.1."); + rtsx_pci_write_register(pcr, PWR_GLOBAL_CTRL, + 0xFF, PCIE_L1_1_EN); + rtsx_pci_write_register(pcr, PWR_FE_CTL, + 0xFF, PCIE_L1_1_PD_FE_EN); + } else { + pcr_dbg(pcr, "Set parameters for L1."); + rtsx_pci_write_register(pcr, PWR_GLOBAL_CTRL, + 0xFF, PCIE_L1_0_EN); + rtsx_pci_write_register(pcr, PWR_FE_CTL, + 0xFF, PCIE_L1_0_PD_FE_EN); + } + + rtsx_pci_write_register(pcr, CFG_L1_0_PCIE_DPHY_RET_VALUE, + 0xFF, CFG_L1_0_RET_VALUE_DEFAULT); + rtsx_pci_write_register(pcr, CFG_L1_0_PCIE_MAC_RET_VALUE, + 0xFF, CFG_L1_0_RET_VALUE_DEFAULT); + rtsx_pci_write_register(pcr, CFG_L1_0_CRC_SD30_RET_VALUE, + 0xFF, CFG_L1_0_RET_VALUE_DEFAULT); + rtsx_pci_write_register(pcr, CFG_L1_0_CRC_SD40_RET_VALUE, + 0xFF, CFG_L1_0_RET_VALUE_DEFAULT); + rtsx_pci_write_register(pcr, CFG_L1_0_SYS_RET_VALUE, + 0xFF, CFG_L1_0_RET_VALUE_DEFAULT); + /*Option cut APHY*/ + rtsx_pci_write_register(pcr, CFG_PCIE_APHY_OFF_0, + 0xFF, CFG_PCIE_APHY_OFF_0_DEFAULT); + rtsx_pci_write_register(pcr, CFG_PCIE_APHY_OFF_1, + 0xFF, CFG_PCIE_APHY_OFF_1_DEFAULT); + rtsx_pci_write_register(pcr, CFG_PCIE_APHY_OFF_2, + 0xFF, CFG_PCIE_APHY_OFF_2_DEFAULT); + rtsx_pci_write_register(pcr, CFG_PCIE_APHY_OFF_3, + 0xFF, CFG_PCIE_APHY_OFF_3_DEFAULT); + /*CDR DEC*/ + rtsx_pci_write_register(pcr, PWC_CDR, 0xFF, PWC_CDR_DEFAULT); + /*PWMPFM*/ + rtsx_pci_write_register(pcr, CFG_LP_FPWM_VALUE, + 0xFF, CFG_LP_FPWM_VALUE_DEFAULT); + /*No Power Saving WA*/ + rtsx_pci_write_register(pcr, CFG_L1_0_CRC_MISC_RET_VALUE, + 0xFF, CFG_L1_0_CRC_MISC_RET_VALUE_DEFAULT); +} + +static void rts5260_init_from_cfg(struct rtsx_pcr *pcr) +{ + struct pci_dev *pdev = pcr->pci; + int l1ss; + struct rtsx_cr_option *option = &pcr->option; + u32 lval; + + l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS); + if (!l1ss) + return; + + pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval); + + if (lval & PCI_L1SS_CTL1_ASPM_L1_1) + rtsx_set_dev_flag(pcr, ASPM_L1_1_EN); + + if (lval & PCI_L1SS_CTL1_ASPM_L1_2) + rtsx_set_dev_flag(pcr, ASPM_L1_2_EN); + + if (lval & PCI_L1SS_CTL1_PCIPM_L1_1) + rtsx_set_dev_flag(pcr, PM_L1_1_EN); + + if (lval & PCI_L1SS_CTL1_PCIPM_L1_2) + rtsx_set_dev_flag(pcr, PM_L1_2_EN); + + rts5260_pwr_saving_setting(pcr); + + if (option->ltr_en) { + u16 val; + + pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &val); + if (val & PCI_EXP_DEVCTL2_LTR_EN) { + option->ltr_enabled = true; + option->ltr_active = true; + rtsx_set_ltr_latency(pcr, option->ltr_active_latency); + } else { + option->ltr_enabled = false; + } + } + + if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN + | PM_L1_1_EN | PM_L1_2_EN)) + option->force_clkreq_0 = false; + else + option->force_clkreq_0 = true; +} + +static int rts5260_extra_init_hw(struct rtsx_pcr *pcr) +{ + struct rtsx_cr_option *option = &pcr->option; + + /* Set mcu_cnt to 7 to ensure data can be sampled properly */ + rtsx_pci_write_register(pcr, 0xFC03, 0x7F, 0x07); + rtsx_pci_write_register(pcr, SSC_DIV_N_0, 0xFF, 0x5D); + + rts5260_init_from_cfg(pcr); + + /* force no MDIO*/ + rtsx_pci_write_register(pcr, RTS5260_AUTOLOAD_CFG4, + 0xFF, RTS5260_MIMO_DISABLE); + /*Modify SDVCC Tune Default Parameters!*/ + rtsx_pci_write_register(pcr, LDO_VCC_CFG0, + RTS5260_DVCC_TUNE_MASK, RTS5260_DVCC_33); + + rtsx_pci_write_register(pcr, PCLK_CTL, PCLK_MODE_SEL, PCLK_MODE_SEL); + + rts5260_init_hw(pcr); + + /* + * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced + * to drive low, and we forcibly request clock. + */ + if (option->force_clkreq_0) + rtsx_pci_write_register(pcr, PETXCFG, + FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW); + else + rtsx_pci_write_register(pcr, PETXCFG, + FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH); + + rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x10, 0x00); + + return 0; +} + +static void rts5260_set_l1off_cfg_sub_d0(struct rtsx_pcr *pcr, int active) +{ + struct rtsx_cr_option *option = &pcr->option; + u32 interrupt = rtsx_pci_readl(pcr, RTSX_BIPR); + int card_exist = (interrupt & SD_EXIST) | (interrupt & MS_EXIST); + int aspm_L1_1, aspm_L1_2; + u8 val = 0; + + aspm_L1_1 = rtsx_check_dev_flag(pcr, ASPM_L1_1_EN); + aspm_L1_2 = rtsx_check_dev_flag(pcr, ASPM_L1_2_EN); + + if (active) { + /* run, latency: 60us */ + if (aspm_L1_1) + val = option->ltr_l1off_snooze_sspwrgate; + } else { + /* l1off, latency: 300us */ + if (aspm_L1_2) + val = option->ltr_l1off_sspwrgate; + } + + if (aspm_L1_1 || aspm_L1_2) { + if (rtsx_check_dev_flag(pcr, + LTR_L1SS_PWR_GATE_CHECK_CARD_EN)) { + if (card_exist) + val &= ~L1OFF_MBIAS2_EN_5250; + else + val |= L1OFF_MBIAS2_EN_5250; + } + } + rtsx_set_l1off_sub(pcr, val); +} + +static const struct pcr_ops rts5260_pcr_ops = { + .fetch_vendor_settings = rtsx_base_fetch_vendor_settings, + .turn_on_led = rts5260_turn_on_led, + .turn_off_led = rts5260_turn_off_led, + .extra_init_hw = rts5260_extra_init_hw, + .enable_auto_blink = rtsx_base_enable_auto_blink, + .disable_auto_blink = rtsx_base_disable_auto_blink, + .card_power_on = rts5260_card_power_on, + .card_power_off = rts5260_card_power_off, + .switch_output_voltage = rts5260_switch_output_voltage, + .stop_cmd = rts5260_stop_cmd, + .set_l1off_cfg_sub_d0 = rts5260_set_l1off_cfg_sub_d0, + .enable_ocp = rts5260_enable_ocp, + .disable_ocp = rts5260_disable_ocp, + .init_ocp = rts5260_init_ocp, + .process_ocp = rts5260_process_ocp, + .get_ocpstat = rts5260_get_ocpstat, + .clear_ocpstat = rts5260_clear_ocpstat, +}; + +void rts5260_init_params(struct rtsx_pcr *pcr) +{ + struct rtsx_cr_option *option = &pcr->option; + struct rtsx_hw_param *hw_param = &pcr->hw_param; + + pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104; + pcr->num_slots = 2; + + pcr->flags = 0; + pcr->card_drive_sel = RTSX_CARD_DRIVE_DEFAULT; + pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B; + pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B; + pcr->aspm_en = ASPM_L1_EN; + pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 29, 11); + pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5); + + pcr->ic_version = rts5260_get_ic_version(pcr); + pcr->sd_pull_ctl_enable_tbl = rts5260_sd_pull_ctl_enable_tbl; + pcr->sd_pull_ctl_disable_tbl = rts5260_sd_pull_ctl_disable_tbl; + pcr->ms_pull_ctl_enable_tbl = rts5260_ms_pull_ctl_enable_tbl; + pcr->ms_pull_ctl_disable_tbl = rts5260_ms_pull_ctl_disable_tbl; + + pcr->reg_pm_ctrl3 = RTS524A_PM_CTRL3; + + pcr->ops = &rts5260_pcr_ops; + + option->dev_flags = (LTR_L1SS_PWR_GATE_CHECK_CARD_EN + | LTR_L1SS_PWR_GATE_EN); + option->ltr_en = true; + + /* init latency of active, idle, L1OFF to 60us, 300us, 3ms */ + option->ltr_active_latency = LTR_ACTIVE_LATENCY_DEF; + option->ltr_idle_latency = LTR_IDLE_LATENCY_DEF; + option->ltr_l1off_latency = LTR_L1OFF_LATENCY_DEF; + option->l1_snooze_delay = L1_SNOOZE_DELAY_DEF; + option->ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF; + option->ltr_l1off_snooze_sspwrgate = + LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF; + + option->ocp_en = 1; + if (option->ocp_en) + hw_param->interrupt_en |= SD_OC_INT_EN; + hw_param->ocp_glitch = SD_OCP_GLITCH_100U | SDVIO_OCP_GLITCH_800U; + option->sd_400mA_ocp_thd = RTS5260_DVCC_OCP_THD_550; + option->sd_800mA_ocp_thd = RTS5260_DVCC_OCP_THD_970; +} diff --git a/drivers/misc/cardreader/rts5260.h b/drivers/misc/cardreader/rts5260.h new file mode 100644 index 000000000..53a1411c8 --- /dev/null +++ b/drivers/misc/cardreader/rts5260.h @@ -0,0 +1,45 @@ +#ifndef __RTS5260_H__ +#define __RTS5260_H__ + +#define RTS5260_DVCC_CTRL 0xFF73 +#define RTS5260_DVCC_OCP_EN (0x01 << 7) +#define RTS5260_DVCC_OCP_THD_MASK (0x07 << 4) +#define RTS5260_DVCC_POWERON (0x01 << 3) +#define RTS5260_DVCC_OCP_CL_EN (0x01 << 2) + +#define RTS5260_DVIO_CTRL 0xFF75 +#define RTS5260_DVIO_OCP_EN (0x01 << 7) +#define RTS5260_DVIO_OCP_THD_MASK (0x07 << 4) +#define RTS5260_DVIO_POWERON (0x01 << 3) +#define RTS5260_DVIO_OCP_CL_EN (0x01 << 2) + +#define RTS5260_DV331812_CFG 0xFF71 +#define RTS5260_DV331812_OCP_EN (0x01 << 7) +#define RTS5260_DV331812_OCP_THD_MASK (0x07 << 4) +#define RTS5260_DV331812_POWERON (0x01 << 3) +#define RTS5260_DV331812_SEL (0x01 << 2) +#define RTS5260_DV331812_VDD1 (0x01 << 2) +#define RTS5260_DV331812_VDD2 (0x00 << 2) + +#define RTS5260_DV331812_OCP_THD_120 (0x00 << 4) +#define RTS5260_DV331812_OCP_THD_140 (0x01 << 4) +#define RTS5260_DV331812_OCP_THD_160 (0x02 << 4) +#define RTS5260_DV331812_OCP_THD_180 (0x03 << 4) +#define RTS5260_DV331812_OCP_THD_210 (0x04 << 4) +#define RTS5260_DV331812_OCP_THD_240 (0x05 << 4) +#define RTS5260_DV331812_OCP_THD_270 (0x06 << 4) +#define RTS5260_DV331812_OCP_THD_300 (0x07 << 4) + +#define RTS5260_DVIO_OCP_THD_250 (0x00 << 4) +#define RTS5260_DVIO_OCP_THD_300 (0x01 << 4) +#define RTS5260_DVIO_OCP_THD_350 (0x02 << 4) +#define RTS5260_DVIO_OCP_THD_400 (0x03 << 4) +#define RTS5260_DVIO_OCP_THD_450 (0x04 << 4) +#define RTS5260_DVIO_OCP_THD_500 (0x05 << 4) +#define RTS5260_DVIO_OCP_THD_550 (0x06 << 4) +#define RTS5260_DVIO_OCP_THD_600 (0x07 << 4) + +#define RTS5260_DVCC_OCP_THD_550 (0x00 << 4) +#define RTS5260_DVCC_OCP_THD_970 (0x05 << 4) + +#endif diff --git a/drivers/misc/cardreader/rts5261.c b/drivers/misc/cardreader/rts5261.c new file mode 100644 index 000000000..471961487 --- /dev/null +++ b/drivers/misc/cardreader/rts5261.c @@ -0,0 +1,776 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Driver for Realtek PCI-Express card reader + * + * Copyright(c) 2018-2019 Realtek Semiconductor Corp. All rights reserved. + * + * Author: + * Rui FENG <rui_feng@realsil.com.cn> + * Wei WANG <wei_wang@realsil.com.cn> + */ + +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/rtsx_pci.h> + +#include "rts5261.h" +#include "rtsx_pcr.h" + +static u8 rts5261_get_ic_version(struct rtsx_pcr *pcr) +{ + u8 val; + + rtsx_pci_read_register(pcr, DUMMY_REG_RESET_0, &val); + return val & IC_VERSION_MASK; +} + +static void rts5261_fill_driving(struct rtsx_pcr *pcr, u8 voltage) +{ + u8 driving_3v3[4][3] = { + {0x13, 0x13, 0x13}, + {0x96, 0x96, 0x96}, + {0x7F, 0x7F, 0x7F}, + {0x96, 0x96, 0x96}, + }; + u8 driving_1v8[4][3] = { + {0x99, 0x99, 0x99}, + {0x3A, 0x3A, 0x3A}, + {0xE6, 0xE6, 0xE6}, + {0xB3, 0xB3, 0xB3}, + }; + u8 (*driving)[3], drive_sel; + + if (voltage == OUTPUT_3V3) { + driving = driving_3v3; + drive_sel = pcr->sd30_drive_sel_3v3; + } else { + driving = driving_1v8; + drive_sel = pcr->sd30_drive_sel_1v8; + } + + rtsx_pci_write_register(pcr, SD30_CLK_DRIVE_SEL, + 0xFF, driving[drive_sel][0]); + + rtsx_pci_write_register(pcr, SD30_CMD_DRIVE_SEL, + 0xFF, driving[drive_sel][1]); + + rtsx_pci_write_register(pcr, SD30_DAT_DRIVE_SEL, + 0xFF, driving[drive_sel][2]); +} + +static void rtsx5261_fetch_vendor_settings(struct rtsx_pcr *pcr) +{ + struct pci_dev *pdev = pcr->pci; + u32 reg; + + /* 0x814~0x817 */ + pci_read_config_dword(pdev, PCR_SETTING_REG2, ®); + pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg); + + if (!rts5261_vendor_setting_valid(reg)) { + pcr_dbg(pcr, "skip fetch vendor setting\n"); + return; + } + + pcr->card_drive_sel &= 0x3F; + pcr->card_drive_sel |= rts5261_reg_to_card_drive_sel(reg); + + if (rts5261_reg_check_reverse_socket(reg)) + pcr->flags |= PCR_REVERSE_SOCKET; + + /* 0x724~0x727 */ + pci_read_config_dword(pdev, PCR_SETTING_REG1, ®); + pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg); + + pcr->aspm_en = rts5261_reg_to_aspm(reg); + pcr->sd30_drive_sel_1v8 = rts5261_reg_to_sd30_drive_sel_1v8(reg); + pcr->sd30_drive_sel_3v3 = rts5261_reg_to_sd30_drive_sel_3v3(reg); +} + +static void rts5261_force_power_down(struct rtsx_pcr *pcr, u8 pm_state) +{ + /* Set relink_time to 0 */ + rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0); + rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, MASK_8_BIT_DEF, 0); + rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3, + RELINK_TIME_MASK, 0); + + if (pm_state == HOST_ENTER_S3) + rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, + D3_DELINK_MODE_EN, D3_DELINK_MODE_EN); + + rtsx_pci_write_register(pcr, RTS5261_REG_FPDCTL, + SSC_POWER_DOWN, SSC_POWER_DOWN); +} + +static int rts5261_enable_auto_blink(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_register(pcr, OLT_LED_CTL, + LED_SHINE_MASK, LED_SHINE_EN); +} + +static int rts5261_disable_auto_blink(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_register(pcr, OLT_LED_CTL, + LED_SHINE_MASK, LED_SHINE_DISABLE); +} + +static int rts5261_turn_on_led(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_register(pcr, GPIO_CTL, + 0x02, 0x02); +} + +static int rts5261_turn_off_led(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_register(pcr, GPIO_CTL, + 0x02, 0x00); +} + +/* SD Pull Control Enable: + * SD_DAT[3:0] ==> pull up + * SD_CD ==> pull up + * SD_WP ==> pull up + * SD_CMD ==> pull up + * SD_CLK ==> pull down + */ +static const u32 rts5261_sd_pull_ctl_enable_tbl[] = { + RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA), + RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9), + 0, +}; + +/* SD Pull Control Disable: + * SD_DAT[3:0] ==> pull down + * SD_CD ==> pull up + * SD_WP ==> pull down + * SD_CMD ==> pull down + * SD_CLK ==> pull down + */ +static const u32 rts5261_sd_pull_ctl_disable_tbl[] = { + RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55), + RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5), + 0, +}; + +static int rts5261_sd_set_sample_push_timing_sd30(struct rtsx_pcr *pcr) +{ + rtsx_pci_write_register(pcr, SD_CFG1, SD_MODE_SELECT_MASK + | SD_ASYNC_FIFO_NOT_RST, SD_30_MODE | SD_ASYNC_FIFO_NOT_RST); + rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, CLK_LOW_FREQ); + rtsx_pci_write_register(pcr, CARD_CLK_SOURCE, 0xFF, + CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1); + rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0); + + return 0; +} + +static int rts5261_card_power_on(struct rtsx_pcr *pcr, int card) +{ + struct rtsx_cr_option *option = &pcr->option; + + if (option->ocp_en) + rtsx_pci_enable_ocp(pcr); + + + rtsx_pci_write_register(pcr, RTS5261_LDO1_CFG1, + RTS5261_LDO1_TUNE_MASK, RTS5261_LDO1_33); + rtsx_pci_write_register(pcr, RTS5261_LDO1233318_POW_CTL, + RTS5261_LDO1_POWERON, RTS5261_LDO1_POWERON); + + rtsx_pci_write_register(pcr, RTS5261_LDO1233318_POW_CTL, + RTS5261_LDO3318_POWERON, RTS5261_LDO3318_POWERON); + + msleep(20); + + rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, SD_OUTPUT_EN); + + /* Initialize SD_CFG1 register */ + rtsx_pci_write_register(pcr, SD_CFG1, 0xFF, + SD_CLK_DIVIDE_128 | SD_20_MODE | SD_BUS_WIDTH_1BIT); + + rtsx_pci_write_register(pcr, SD_SAMPLE_POINT_CTL, + 0xFF, SD20_RX_POS_EDGE); + rtsx_pci_write_register(pcr, SD_PUSH_POINT_CTL, 0xFF, 0); + rtsx_pci_write_register(pcr, CARD_STOP, SD_STOP | SD_CLR_ERR, + SD_STOP | SD_CLR_ERR); + + /* Reset SD_CFG3 register */ + rtsx_pci_write_register(pcr, SD_CFG3, SD30_CLK_END_EN, 0); + rtsx_pci_write_register(pcr, REG_SD_STOP_SDCLK_CFG, + SD30_CLK_STOP_CFG_EN | SD30_CLK_STOP_CFG1 | + SD30_CLK_STOP_CFG0, 0); + + if (pcr->extra_caps & EXTRA_CAPS_SD_SDR50 || + pcr->extra_caps & EXTRA_CAPS_SD_SDR104) + rts5261_sd_set_sample_push_timing_sd30(pcr); + + return 0; +} + +static int rts5261_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage) +{ + int err; + u16 val = 0; + + rtsx_pci_write_register(pcr, RTS5261_CARD_PWR_CTL, + RTS5261_PUPDC, RTS5261_PUPDC); + + switch (voltage) { + case OUTPUT_3V3: + rtsx_pci_read_phy_register(pcr, PHY_TUNE, &val); + val |= PHY_TUNE_SDBUS_33; + err = rtsx_pci_write_phy_register(pcr, PHY_TUNE, val); + if (err < 0) + return err; + + rtsx_pci_write_register(pcr, RTS5261_DV3318_CFG, + RTS5261_DV3318_TUNE_MASK, RTS5261_DV3318_33); + rtsx_pci_write_register(pcr, SD_PAD_CTL, + SD_IO_USING_1V8, 0); + break; + case OUTPUT_1V8: + rtsx_pci_read_phy_register(pcr, PHY_TUNE, &val); + val &= ~PHY_TUNE_SDBUS_33; + err = rtsx_pci_write_phy_register(pcr, PHY_TUNE, val); + if (err < 0) + return err; + + rtsx_pci_write_register(pcr, RTS5261_DV3318_CFG, + RTS5261_DV3318_TUNE_MASK, RTS5261_DV3318_18); + rtsx_pci_write_register(pcr, SD_PAD_CTL, + SD_IO_USING_1V8, SD_IO_USING_1V8); + break; + default: + return -EINVAL; + } + + /* set pad drive */ + rts5261_fill_driving(pcr, voltage); + + return 0; +} + +static void rts5261_stop_cmd(struct rtsx_pcr *pcr) +{ + rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD); + rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA); + rtsx_pci_write_register(pcr, RTS5260_DMA_RST_CTL_0, + RTS5260_DMA_RST | RTS5260_ADMA3_RST, + RTS5260_DMA_RST | RTS5260_ADMA3_RST); + rtsx_pci_write_register(pcr, RBCTL, RB_FLUSH, RB_FLUSH); +} + +static void rts5261_card_before_power_off(struct rtsx_pcr *pcr) +{ + rts5261_stop_cmd(pcr); + rts5261_switch_output_voltage(pcr, OUTPUT_3V3); + +} + +static void rts5261_enable_ocp(struct rtsx_pcr *pcr) +{ + u8 val = 0; + + val = SD_OCP_INT_EN | SD_DETECT_EN; + rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val); + +} + +static void rts5261_disable_ocp(struct rtsx_pcr *pcr) +{ + u8 mask = 0; + + mask = SD_OCP_INT_EN | SD_DETECT_EN; + rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0); + rtsx_pci_write_register(pcr, RTS5261_LDO1_CFG0, + RTS5261_LDO1_OCP_EN | RTS5261_LDO1_OCP_LMT_EN, 0); + +} + +static int rts5261_card_power_off(struct rtsx_pcr *pcr, int card) +{ + int err = 0; + + rts5261_card_before_power_off(pcr); + err = rtsx_pci_write_register(pcr, RTS5261_LDO1233318_POW_CTL, + RTS5261_LDO_POWERON_MASK, 0); + + if (pcr->option.ocp_en) + rtsx_pci_disable_ocp(pcr); + + return err; +} + +static void rts5261_init_ocp(struct rtsx_pcr *pcr) +{ + struct rtsx_cr_option *option = &pcr->option; + + if (option->ocp_en) { + u8 mask, val; + + rtsx_pci_write_register(pcr, RTS5261_LDO1_CFG0, + RTS5261_LDO1_OCP_EN | RTS5261_LDO1_OCP_LMT_EN, + RTS5261_LDO1_OCP_EN | RTS5261_LDO1_OCP_LMT_EN); + + rtsx_pci_write_register(pcr, RTS5261_LDO1_CFG0, + RTS5261_LDO1_OCP_THD_MASK, option->sd_800mA_ocp_thd); + + rtsx_pci_write_register(pcr, RTS5261_LDO1_CFG0, + RTS5261_LDO1_OCP_LMT_THD_MASK, + RTS5261_LDO1_LMT_THD_2000); + + mask = SD_OCP_GLITCH_MASK; + val = pcr->hw_param.ocp_glitch; + rtsx_pci_write_register(pcr, REG_OCPGLITCH, mask, val); + + rts5261_enable_ocp(pcr); + } else { + rtsx_pci_write_register(pcr, RTS5261_LDO1_CFG0, + RTS5261_LDO1_OCP_EN | RTS5261_LDO1_OCP_LMT_EN, 0); + } +} + +static void rts5261_clear_ocpstat(struct rtsx_pcr *pcr) +{ + u8 mask = 0; + u8 val = 0; + + mask = SD_OCP_INT_CLR | SD_OC_CLR; + val = SD_OCP_INT_CLR | SD_OC_CLR; + + rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val); + + udelay(10); + rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0); + +} + +static void rts5261_process_ocp(struct rtsx_pcr *pcr) +{ + if (!pcr->option.ocp_en) + return; + + rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat); + + if (pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) { + rts5261_card_power_off(pcr, RTSX_SD_CARD); + rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0); + rts5261_clear_ocpstat(pcr); + pcr->ocp_stat = 0; + } + +} + +static int rts5261_init_from_hw(struct rtsx_pcr *pcr) +{ + struct pci_dev *pdev = pcr->pci; + int retval; + u32 lval, i; + u8 valid, efuse_valid, tmp; + + rtsx_pci_write_register(pcr, RTS5261_REG_PME_FORCE_CTL, + REG_EFUSE_POR | REG_EFUSE_POWER_MASK, + REG_EFUSE_POR | REG_EFUSE_POWERON); + udelay(1); + rtsx_pci_write_register(pcr, RTS5261_EFUSE_ADDR, + RTS5261_EFUSE_ADDR_MASK, 0x00); + rtsx_pci_write_register(pcr, RTS5261_EFUSE_CTL, + RTS5261_EFUSE_ENABLE | RTS5261_EFUSE_MODE_MASK, + RTS5261_EFUSE_ENABLE); + + /* Wait transfer end */ + for (i = 0; i < MAX_RW_REG_CNT; i++) { + rtsx_pci_read_register(pcr, RTS5261_EFUSE_CTL, &tmp); + if ((tmp & 0x80) == 0) + break; + } + rtsx_pci_read_register(pcr, RTS5261_EFUSE_READ_DATA, &tmp); + efuse_valid = ((tmp & 0x0C) >> 2); + pcr_dbg(pcr, "Load efuse valid: 0x%x\n", efuse_valid); + + if (efuse_valid == 0) { + retval = pci_read_config_dword(pdev, PCR_SETTING_REG2, &lval); + if (retval != 0) + pcr_dbg(pcr, "read 0x814 DW fail\n"); + pcr_dbg(pcr, "DW from 0x814: 0x%x\n", lval); + /* 0x816 */ + valid = (u8)((lval >> 16) & 0x03); + pcr_dbg(pcr, "0x816: %d\n", valid); + } + rtsx_pci_write_register(pcr, RTS5261_REG_PME_FORCE_CTL, + REG_EFUSE_POR, 0); + pcr_dbg(pcr, "Disable efuse por!\n"); + + pci_read_config_dword(pdev, PCR_SETTING_REG2, &lval); + lval = lval & 0x00FFFFFF; + retval = pci_write_config_dword(pdev, PCR_SETTING_REG2, lval); + if (retval != 0) + pcr_dbg(pcr, "write config fail\n"); + + return retval; +} + +static void rts5261_init_from_cfg(struct rtsx_pcr *pcr) +{ + struct pci_dev *pdev = pcr->pci; + int l1ss; + u32 lval; + struct rtsx_cr_option *option = &pcr->option; + + l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS); + if (!l1ss) + return; + + pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval); + + if (lval & PCI_L1SS_CTL1_ASPM_L1_1) + rtsx_set_dev_flag(pcr, ASPM_L1_1_EN); + else + rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN); + + if (lval & PCI_L1SS_CTL1_ASPM_L1_2) + rtsx_set_dev_flag(pcr, ASPM_L1_2_EN); + else + rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN); + + if (lval & PCI_L1SS_CTL1_PCIPM_L1_1) + rtsx_set_dev_flag(pcr, PM_L1_1_EN); + else + rtsx_clear_dev_flag(pcr, PM_L1_1_EN); + + if (lval & PCI_L1SS_CTL1_PCIPM_L1_2) + rtsx_set_dev_flag(pcr, PM_L1_2_EN); + else + rtsx_clear_dev_flag(pcr, PM_L1_2_EN); + + rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0xFF, 0); + if (option->ltr_en) { + u16 val; + + pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &val); + if (val & PCI_EXP_DEVCTL2_LTR_EN) { + option->ltr_enabled = true; + option->ltr_active = true; + rtsx_set_ltr_latency(pcr, option->ltr_active_latency); + } else { + option->ltr_enabled = false; + } + } + + if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN + | PM_L1_1_EN | PM_L1_2_EN)) + option->force_clkreq_0 = false; + else + option->force_clkreq_0 = true; +} + +static int rts5261_extra_init_hw(struct rtsx_pcr *pcr) +{ + struct rtsx_cr_option *option = &pcr->option; + + rtsx_pci_write_register(pcr, RTS5261_AUTOLOAD_CFG1, + CD_RESUME_EN_MASK, CD_RESUME_EN_MASK); + + rts5261_init_from_cfg(pcr); + rts5261_init_from_hw(pcr); + + /* power off efuse */ + rtsx_pci_write_register(pcr, RTS5261_REG_PME_FORCE_CTL, + REG_EFUSE_POWER_MASK, REG_EFUSE_POWEROFF); + rtsx_pci_write_register(pcr, L1SUB_CONFIG1, + AUX_CLK_ACTIVE_SEL_MASK, MAC_CKSW_DONE); + rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, 0); + + rtsx_pci_write_register(pcr, RTS5261_AUTOLOAD_CFG4, + RTS5261_AUX_CLK_16M_EN, 0); + + /* Release PRSNT# */ + rtsx_pci_write_register(pcr, RTS5261_AUTOLOAD_CFG4, + RTS5261_FORCE_PRSNT_LOW, 0); + rtsx_pci_write_register(pcr, FUNC_FORCE_CTL, + FUNC_FORCE_UPME_XMT_DBG, FUNC_FORCE_UPME_XMT_DBG); + + rtsx_pci_write_register(pcr, PCLK_CTL, + PCLK_MODE_SEL, PCLK_MODE_SEL); + + rtsx_pci_write_register(pcr, PM_EVENT_DEBUG, PME_DEBUG_0, PME_DEBUG_0); + rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, CLK_PM_EN, CLK_PM_EN); + + /* LED shine disabled, set initial shine cycle period */ + rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x0F, 0x02); + + /* Configure driving */ + rts5261_fill_driving(pcr, OUTPUT_3V3); + + /* + * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced + * to drive low, and we forcibly request clock. + */ + if (option->force_clkreq_0) + rtsx_pci_write_register(pcr, PETXCFG, + FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW); + else + rtsx_pci_write_register(pcr, PETXCFG, + FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH); + + rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x10, 0x00); + rtsx_pci_write_register(pcr, RTS5261_REG_PME_FORCE_CTL, + FORCE_PM_CONTROL | FORCE_PM_VALUE, FORCE_PM_CONTROL); + + /* Clear Enter RTD3_cold Information*/ + rtsx_pci_write_register(pcr, RTS5261_FW_CTL, + RTS5261_INFORM_RTD3_COLD, 0); + + return 0; +} + +static void rts5261_enable_aspm(struct rtsx_pcr *pcr, bool enable) +{ + if (pcr->aspm_enabled == enable) + return; + + pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_ASPMC, pcr->aspm_en); + pcr->aspm_enabled = enable; + +} + +static void rts5261_disable_aspm(struct rtsx_pcr *pcr, bool enable) +{ + if (pcr->aspm_enabled == enable) + return; + + pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_ASPMC, 0); + rtsx_pci_write_register(pcr, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0); + udelay(10); + pcr->aspm_enabled = enable; +} + +static void rts5261_set_aspm(struct rtsx_pcr *pcr, bool enable) +{ + if (enable) + rts5261_enable_aspm(pcr, true); + else + rts5261_disable_aspm(pcr, false); +} + +static void rts5261_set_l1off_cfg_sub_d0(struct rtsx_pcr *pcr, int active) +{ + struct rtsx_cr_option *option = &pcr->option; + int aspm_L1_1, aspm_L1_2; + u8 val = 0; + + aspm_L1_1 = rtsx_check_dev_flag(pcr, ASPM_L1_1_EN); + aspm_L1_2 = rtsx_check_dev_flag(pcr, ASPM_L1_2_EN); + + if (active) { + /* run, latency: 60us */ + if (aspm_L1_1) + val = option->ltr_l1off_snooze_sspwrgate; + } else { + /* l1off, latency: 300us */ + if (aspm_L1_2) + val = option->ltr_l1off_sspwrgate; + } + + rtsx_set_l1off_sub(pcr, val); +} + +static const struct pcr_ops rts5261_pcr_ops = { + .fetch_vendor_settings = rtsx5261_fetch_vendor_settings, + .turn_on_led = rts5261_turn_on_led, + .turn_off_led = rts5261_turn_off_led, + .extra_init_hw = rts5261_extra_init_hw, + .enable_auto_blink = rts5261_enable_auto_blink, + .disable_auto_blink = rts5261_disable_auto_blink, + .card_power_on = rts5261_card_power_on, + .card_power_off = rts5261_card_power_off, + .switch_output_voltage = rts5261_switch_output_voltage, + .force_power_down = rts5261_force_power_down, + .stop_cmd = rts5261_stop_cmd, + .set_aspm = rts5261_set_aspm, + .set_l1off_cfg_sub_d0 = rts5261_set_l1off_cfg_sub_d0, + .enable_ocp = rts5261_enable_ocp, + .disable_ocp = rts5261_disable_ocp, + .init_ocp = rts5261_init_ocp, + .process_ocp = rts5261_process_ocp, + .clear_ocpstat = rts5261_clear_ocpstat, +}; + +static inline u8 double_ssc_depth(u8 depth) +{ + return ((depth > 1) ? (depth - 1) : depth); +} + +int rts5261_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock, + u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk) +{ + int err, clk; + u16 n; + u8 clk_divider, mcu_cnt, div; + static const u8 depth[] = { + [RTSX_SSC_DEPTH_4M] = RTS5261_SSC_DEPTH_4M, + [RTSX_SSC_DEPTH_2M] = RTS5261_SSC_DEPTH_2M, + [RTSX_SSC_DEPTH_1M] = RTS5261_SSC_DEPTH_1M, + [RTSX_SSC_DEPTH_500K] = RTS5261_SSC_DEPTH_512K, + }; + + if (initial_mode) { + /* We use 250k(around) here, in initial stage */ + if (is_version(pcr, PID_5261, IC_VER_D)) { + clk_divider = SD_CLK_DIVIDE_256; + card_clock = 60000000; + } else { + clk_divider = SD_CLK_DIVIDE_128; + card_clock = 30000000; + } + } else { + clk_divider = SD_CLK_DIVIDE_0; + } + err = rtsx_pci_write_register(pcr, SD_CFG1, + SD_CLK_DIVIDE_MASK, clk_divider); + if (err < 0) + return err; + + card_clock /= 1000000; + pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock); + + clk = card_clock; + if (!initial_mode && double_clk) + clk = card_clock * 2; + pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n", + clk, pcr->cur_clock); + + if (clk == pcr->cur_clock) + return 0; + + if (pcr->ops->conv_clk_and_div_n) + n = pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N); + else + n = clk - 4; + if ((clk <= 4) || (n > 396)) + return -EINVAL; + + mcu_cnt = 125/clk + 3; + if (mcu_cnt > 15) + mcu_cnt = 15; + + div = CLK_DIV_1; + while ((n < MIN_DIV_N_PCR - 4) && (div < CLK_DIV_8)) { + if (pcr->ops->conv_clk_and_div_n) { + int dbl_clk = pcr->ops->conv_clk_and_div_n(n, + DIV_N_TO_CLK) * 2; + n = pcr->ops->conv_clk_and_div_n(dbl_clk, + CLK_TO_DIV_N); + } else { + n = (n + 4) * 2 - 4; + } + div++; + } + + n = (n / 2); + pcr_dbg(pcr, "n = %d, div = %d\n", n, div); + + ssc_depth = depth[ssc_depth]; + if (double_clk) + ssc_depth = double_ssc_depth(ssc_depth); + + if (ssc_depth) { + if (div == CLK_DIV_2) { + if (ssc_depth > 1) + ssc_depth -= 1; + else + ssc_depth = RTS5261_SSC_DEPTH_8M; + } else if (div == CLK_DIV_4) { + if (ssc_depth > 2) + ssc_depth -= 2; + else + ssc_depth = RTS5261_SSC_DEPTH_8M; + } else if (div == CLK_DIV_8) { + if (ssc_depth > 3) + ssc_depth -= 3; + else + ssc_depth = RTS5261_SSC_DEPTH_8M; + } + } else { + ssc_depth = 0; + } + pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth); + + rtsx_pci_init_cmd(pcr); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, + CLK_LOW_FREQ, CLK_LOW_FREQ); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV, + 0xFF, (div << 4) | mcu_cnt); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, + SSC_DEPTH_MASK, ssc_depth); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB); + if (vpclk) { + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL, + PHASE_NOT_RESET, 0); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK1_CTL, + PHASE_NOT_RESET, 0); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL, + PHASE_NOT_RESET, PHASE_NOT_RESET); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK1_CTL, + PHASE_NOT_RESET, PHASE_NOT_RESET); + } + + err = rtsx_pci_send_cmd(pcr, 2000); + if (err < 0) + return err; + + /* Wait SSC clock stable */ + udelay(SSC_CLOCK_STABLE_WAIT); + err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0); + if (err < 0) + return err; + + pcr->cur_clock = clk; + return 0; + +} + +void rts5261_init_params(struct rtsx_pcr *pcr) +{ + struct rtsx_cr_option *option = &pcr->option; + struct rtsx_hw_param *hw_param = &pcr->hw_param; + + pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104; + pcr->num_slots = 1; + pcr->ops = &rts5261_pcr_ops; + + pcr->flags = 0; + pcr->card_drive_sel = RTSX_CARD_DRIVE_DEFAULT; + pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B; + pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B; + pcr->aspm_en = ASPM_L1_EN; + pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 11); + pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5); + + pcr->ic_version = rts5261_get_ic_version(pcr); + pcr->sd_pull_ctl_enable_tbl = rts5261_sd_pull_ctl_enable_tbl; + pcr->sd_pull_ctl_disable_tbl = rts5261_sd_pull_ctl_disable_tbl; + + pcr->reg_pm_ctrl3 = RTS5261_AUTOLOAD_CFG3; + + option->dev_flags = (LTR_L1SS_PWR_GATE_CHECK_CARD_EN + | LTR_L1SS_PWR_GATE_EN); + option->ltr_en = true; + + /* init latency of active, idle, L1OFF to 60us, 300us, 3ms */ + option->ltr_active_latency = LTR_ACTIVE_LATENCY_DEF; + option->ltr_idle_latency = LTR_IDLE_LATENCY_DEF; + option->ltr_l1off_latency = LTR_L1OFF_LATENCY_DEF; + option->l1_snooze_delay = L1_SNOOZE_DELAY_DEF; + option->ltr_l1off_sspwrgate = 0x7F; + option->ltr_l1off_snooze_sspwrgate = 0x78; + + option->ocp_en = 1; + hw_param->interrupt_en |= SD_OC_INT_EN; + hw_param->ocp_glitch = SD_OCP_GLITCH_800U; + option->sd_800mA_ocp_thd = RTS5261_LDO1_OCP_THD_1040; +} diff --git a/drivers/misc/cardreader/rts5261.h b/drivers/misc/cardreader/rts5261.h new file mode 100644 index 000000000..ebfdd236a --- /dev/null +++ b/drivers/misc/cardreader/rts5261.h @@ -0,0 +1,233 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Driver for Realtek PCI-Express card reader + * + * Copyright(c) 2018-2019 Realtek Semiconductor Corp. All rights reserved. + * + * Author: + * Rui FENG <rui_feng@realsil.com.cn> + * Wei WANG <wei_wang@realsil.com.cn> + */ +#ifndef RTS5261_H +#define RTS5261_H + +/*New add*/ +#define rts5261_vendor_setting_valid(reg) ((reg) & 0x010000) +#define rts5261_reg_to_aspm(reg) (((reg) >> 28) ^ 0x03) +#define rts5261_reg_check_reverse_socket(reg) ((reg) & 0x04) +#define rts5261_reg_to_card_drive_sel(reg) ((((reg) >> 6) & 0x01) << 6) +#define rts5261_reg_to_sd30_drive_sel_1v8(reg) (((reg) >> 22) ^ 0x03) +#define rts5261_reg_to_sd30_drive_sel_3v3(reg) (((reg) >> 16) ^ 0x03) + + +#define RTS5261_AUTOLOAD_CFG0 0xFF7B +#define RTS5261_AUTOLOAD_CFG1 0xFF7C +#define RTS5261_AUTOLOAD_CFG2 0xFF7D +#define RTS5261_AUTOLOAD_CFG3 0xFF7E +#define RTS5261_AUTOLOAD_CFG4 0xFF7F +#define RTS5261_FORCE_PRSNT_LOW (1 << 6) +#define RTS5261_AUX_CLK_16M_EN (1 << 5) + +#define RTS5261_REG_VREF 0xFE97 +#define RTS5261_PWD_SUSPND_EN (1 << 4) + +#define RTS5261_PAD_H3L1 0xFF79 +#define PAD_GPIO_H3L1 (1 << 3) + +/* SSC_CTL2 0xFC12 */ +#define RTS5261_SSC_DEPTH_MASK 0x07 +#define RTS5261_SSC_DEPTH_DISALBE 0x00 +#define RTS5261_SSC_DEPTH_8M 0x01 +#define RTS5261_SSC_DEPTH_4M 0x02 +#define RTS5261_SSC_DEPTH_2M 0x03 +#define RTS5261_SSC_DEPTH_1M 0x04 +#define RTS5261_SSC_DEPTH_512K 0x05 +#define RTS5261_SSC_DEPTH_256K 0x06 +#define RTS5261_SSC_DEPTH_128K 0x07 + +/* efuse control register*/ +#define RTS5261_EFUSE_CTL 0xFC30 +#define RTS5261_EFUSE_ENABLE 0x80 +/* EFUSE_MODE: 0=READ 1=PROGRAM */ +#define RTS5261_EFUSE_MODE_MASK 0x40 +#define RTS5261_EFUSE_PROGRAM 0x40 + +#define RTS5261_EFUSE_ADDR 0xFC31 +#define RTS5261_EFUSE_ADDR_MASK 0x3F + +#define RTS5261_EFUSE_WRITE_DATA 0xFC32 +#define RTS5261_EFUSE_READ_DATA 0xFC34 + +/* DMACTL 0xFE2C */ +#define RTS5261_DMA_PACK_SIZE_MASK 0xF0 + +/* FW config info register */ +#define RTS5261_FW_CFG_INFO0 0xFF50 +#define RTS5261_FW_EXPRESS_TEST_MASK (0x01<<0) +#define RTS5261_FW_EA_MODE_MASK (0x01<<5) + +/* FW config register */ +#define RTS5261_FW_CFG0 0xFF54 +#define RTS5261_FW_ENTER_EXPRESS (0x01<<0) + +#define RTS5261_FW_CFG1 0xFF55 +#define RTS5261_SYS_CLK_SEL_MCU_CLK (0x01<<7) +#define RTS5261_CRC_CLK_SEL_MCU_CLK (0x01<<6) +#define RTS5261_FAKE_MCU_CLOCK_GATING (0x01<<5) +/*MCU_bus_mode_sel: 0=real 8051 1=fake mcu*/ +#define RTS5261_MCU_BUS_SEL_MASK (0x01<<4) +/*MCU_clock_sel:VerA 00=aux16M 01=aux400K 1x=REFCLK100M*/ +/*MCU_clock_sel:VerB 00=aux400K 01=aux16M 10=REFCLK100M*/ +#define RTS5261_MCU_CLOCK_SEL_MASK (0x03<<2) +#define RTS5261_MCU_CLOCK_SEL_16M (0x01<<2) +#define RTS5261_MCU_CLOCK_GATING (0x01<<1) +#define RTS5261_DRIVER_ENABLE_FW (0x01<<0) + +/* FW status register */ +#define RTS5261_FW_STATUS 0xFF56 +#define RTS5261_EXPRESS_LINK_FAIL_MASK (0x01<<7) + +/* FW control register */ +#define RTS5261_FW_CTL 0xFF5F +#define RTS5261_INFORM_RTD3_COLD (0x01<<5) + +#define RTS5261_REG_FPDCTL 0xFF60 + +#define RTS5261_REG_LDO12_CFG 0xFF6E +#define RTS5261_LDO12_VO_TUNE_MASK (0x07<<1) +#define RTS5261_LDO12_115 (0x03<<1) +#define RTS5261_LDO12_120 (0x04<<1) +#define RTS5261_LDO12_125 (0x05<<1) +#define RTS5261_LDO12_130 (0x06<<1) +#define RTS5261_LDO12_135 (0x07<<1) + +/* LDO control register */ +#define RTS5261_CARD_PWR_CTL 0xFD50 +#define RTS5261_SD_CLK_ISO (0x01<<7) +#define RTS5261_PAD_SD_DAT_FW_CTRL (0x01<<6) +#define RTS5261_PUPDC (0x01<<5) +#define RTS5261_SD_CMD_ISO (0x01<<4) +#define RTS5261_SD_DAT_ISO_MASK (0x0F<<0) + +#define RTS5261_LDO1233318_POW_CTL 0xFF70 +#define RTS5261_LDO3318_POWERON (0x01<<3) +#define RTS5261_LDO3_POWERON (0x01<<2) +#define RTS5261_LDO2_POWERON (0x01<<1) +#define RTS5261_LDO1_POWERON (0x01<<0) +#define RTS5261_LDO_POWERON_MASK (0x0F<<0) + +#define RTS5261_DV3318_CFG 0xFF71 +#define RTS5261_DV3318_TUNE_MASK (0x07<<4) +#define RTS5261_DV3318_18 (0x02<<4) +#define RTS5261_DV3318_19 (0x04<<4) +#define RTS5261_DV3318_33 (0x07<<4) + +#define RTS5261_LDO1_CFG0 0xFF72 +#define RTS5261_LDO1_OCP_THD_MASK (0x07<<5) +#define RTS5261_LDO1_OCP_EN (0x01<<4) +#define RTS5261_LDO1_OCP_LMT_THD_MASK (0x03<<2) +#define RTS5261_LDO1_OCP_LMT_EN (0x01<<1) + +/* CRD6603-433 190319 request changed */ +#define RTS5261_LDO1_OCP_THD_740 (0x00<<5) +#define RTS5261_LDO1_OCP_THD_800 (0x01<<5) +#define RTS5261_LDO1_OCP_THD_860 (0x02<<5) +#define RTS5261_LDO1_OCP_THD_920 (0x03<<5) +#define RTS5261_LDO1_OCP_THD_980 (0x04<<5) +#define RTS5261_LDO1_OCP_THD_1040 (0x05<<5) +#define RTS5261_LDO1_OCP_THD_1100 (0x06<<5) +#define RTS5261_LDO1_OCP_THD_1160 (0x07<<5) + +#define RTS5261_LDO1_LMT_THD_450 (0x00<<2) +#define RTS5261_LDO1_LMT_THD_1000 (0x01<<2) +#define RTS5261_LDO1_LMT_THD_1500 (0x02<<2) +#define RTS5261_LDO1_LMT_THD_2000 (0x03<<2) + +#define RTS5261_LDO1_CFG1 0xFF73 +#define RTS5261_LDO1_TUNE_MASK (0x07<<1) +#define RTS5261_LDO1_18 (0x05<<1) +#define RTS5261_LDO1_33 (0x07<<1) +#define RTS5261_LDO1_PWD_MASK (0x01<<0) + +#define RTS5261_LDO2_CFG0 0xFF74 +#define RTS5261_LDO2_OCP_THD_MASK (0x07<<5) +#define RTS5261_LDO2_OCP_EN (0x01<<4) +#define RTS5261_LDO2_OCP_LMT_THD_MASK (0x03<<2) +#define RTS5261_LDO2_OCP_LMT_EN (0x01<<1) + +#define RTS5261_LDO2_OCP_THD_620 (0x00<<5) +#define RTS5261_LDO2_OCP_THD_650 (0x01<<5) +#define RTS5261_LDO2_OCP_THD_680 (0x02<<5) +#define RTS5261_LDO2_OCP_THD_720 (0x03<<5) +#define RTS5261_LDO2_OCP_THD_750 (0x04<<5) +#define RTS5261_LDO2_OCP_THD_780 (0x05<<5) +#define RTS5261_LDO2_OCP_THD_810 (0x06<<5) +#define RTS5261_LDO2_OCP_THD_840 (0x07<<5) + +#define RTS5261_LDO2_CFG1 0xFF75 +#define RTS5261_LDO2_TUNE_MASK (0x07<<1) +#define RTS5261_LDO2_18 (0x05<<1) +#define RTS5261_LDO2_33 (0x07<<1) +#define RTS5261_LDO2_PWD_MASK (0x01<<0) + +#define RTS5261_LDO3_CFG0 0xFF76 +#define RTS5261_LDO3_OCP_THD_MASK (0x07<<5) +#define RTS5261_LDO3_OCP_EN (0x01<<4) +#define RTS5261_LDO3_OCP_LMT_THD_MASK (0x03<<2) +#define RTS5261_LDO3_OCP_LMT_EN (0x01<<1) + +#define RTS5261_LDO3_OCP_THD_620 (0x00<<5) +#define RTS5261_LDO3_OCP_THD_650 (0x01<<5) +#define RTS5261_LDO3_OCP_THD_680 (0x02<<5) +#define RTS5261_LDO3_OCP_THD_720 (0x03<<5) +#define RTS5261_LDO3_OCP_THD_750 (0x04<<5) +#define RTS5261_LDO3_OCP_THD_780 (0x05<<5) +#define RTS5261_LDO3_OCP_THD_810 (0x06<<5) +#define RTS5261_LDO3_OCP_THD_840 (0x07<<5) + +#define RTS5261_LDO3_CFG1 0xFF77 +#define RTS5261_LDO3_TUNE_MASK (0x07<<1) +#define RTS5261_LDO3_18 (0x05<<1) +#define RTS5261_LDO3_33 (0x07<<1) +#define RTS5261_LDO3_PWD_MASK (0x01<<0) + +#define RTS5261_REG_PME_FORCE_CTL 0xFF78 +#define FORCE_PM_CONTROL 0x20 +#define FORCE_PM_VALUE 0x10 +#define REG_EFUSE_BYPASS 0x08 +#define REG_EFUSE_POR 0x04 +#define REG_EFUSE_POWER_MASK 0x03 +#define REG_EFUSE_POWERON 0x03 +#define REG_EFUSE_POWEROFF 0x00 + + +/* Single LUN, support SD/SD EXPRESS */ +#define DEFAULT_SINGLE 0 +#define SD_LUN 1 +#define SD_EXPRESS_LUN 2 + +/* For Change_FPGA_SSCClock Function */ +#define MULTIPLY_BY_1 0x00 +#define MULTIPLY_BY_2 0x01 +#define MULTIPLY_BY_3 0x02 +#define MULTIPLY_BY_4 0x03 +#define MULTIPLY_BY_5 0x04 +#define MULTIPLY_BY_6 0x05 +#define MULTIPLY_BY_7 0x06 +#define MULTIPLY_BY_8 0x07 +#define MULTIPLY_BY_9 0x08 +#define MULTIPLY_BY_10 0x09 + +#define DIVIDE_BY_2 0x01 +#define DIVIDE_BY_3 0x02 +#define DIVIDE_BY_4 0x03 +#define DIVIDE_BY_5 0x04 +#define DIVIDE_BY_6 0x05 +#define DIVIDE_BY_7 0x06 +#define DIVIDE_BY_8 0x07 +#define DIVIDE_BY_9 0x08 +#define DIVIDE_BY_10 0x09 + +int rts5261_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock, + u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk); + +#endif /* RTS5261_H */ diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c new file mode 100644 index 000000000..358b000b3 --- /dev/null +++ b/drivers/misc/cardreader/rtsx_pcr.c @@ -0,0 +1,1743 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Driver for Realtek PCI-Express card reader + * + * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved. + * + * Author: + * Wei WANG <wei_wang@realsil.com.cn> + */ + +#include <linux/pci.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/dma-mapping.h> +#include <linux/highmem.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/idr.h> +#include <linux/platform_device.h> +#include <linux/mfd/core.h> +#include <linux/rtsx_pci.h> +#include <linux/mmc/card.h> +#include <asm/unaligned.h> + +#include "rtsx_pcr.h" +#include "rts5261.h" +#include "rts5228.h" + +static bool msi_en = true; +module_param(msi_en, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(msi_en, "Enable MSI"); + +static DEFINE_IDR(rtsx_pci_idr); +static DEFINE_SPINLOCK(rtsx_pci_lock); + +static struct mfd_cell rtsx_pcr_cells[] = { + [RTSX_SD_CARD] = { + .name = DRV_NAME_RTSX_PCI_SDMMC, + }, +}; + +static const struct pci_device_id rtsx_pci_ids[] = { + { PCI_DEVICE(0x10EC, 0x5209), PCI_CLASS_OTHERS << 16, 0xFF0000 }, + { PCI_DEVICE(0x10EC, 0x5229), PCI_CLASS_OTHERS << 16, 0xFF0000 }, + { PCI_DEVICE(0x10EC, 0x5289), PCI_CLASS_OTHERS << 16, 0xFF0000 }, + { PCI_DEVICE(0x10EC, 0x5227), PCI_CLASS_OTHERS << 16, 0xFF0000 }, + { PCI_DEVICE(0x10EC, 0x522A), PCI_CLASS_OTHERS << 16, 0xFF0000 }, + { PCI_DEVICE(0x10EC, 0x5249), PCI_CLASS_OTHERS << 16, 0xFF0000 }, + { PCI_DEVICE(0x10EC, 0x5287), PCI_CLASS_OTHERS << 16, 0xFF0000 }, + { PCI_DEVICE(0x10EC, 0x5286), PCI_CLASS_OTHERS << 16, 0xFF0000 }, + { PCI_DEVICE(0x10EC, 0x524A), PCI_CLASS_OTHERS << 16, 0xFF0000 }, + { PCI_DEVICE(0x10EC, 0x525A), PCI_CLASS_OTHERS << 16, 0xFF0000 }, + { PCI_DEVICE(0x10EC, 0x5260), PCI_CLASS_OTHERS << 16, 0xFF0000 }, + { PCI_DEVICE(0x10EC, 0x5261), PCI_CLASS_OTHERS << 16, 0xFF0000 }, + { PCI_DEVICE(0x10EC, 0x5228), PCI_CLASS_OTHERS << 16, 0xFF0000 }, + { 0, } +}; + +MODULE_DEVICE_TABLE(pci, rtsx_pci_ids); + +static inline void rtsx_pci_disable_aspm(struct rtsx_pcr *pcr) +{ + pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_ASPMC, 0); +} + +static int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency) +{ + rtsx_pci_write_register(pcr, MSGTXDATA0, + MASK_8_BIT_DEF, (u8) (latency & 0xFF)); + rtsx_pci_write_register(pcr, MSGTXDATA1, + MASK_8_BIT_DEF, (u8)((latency >> 8) & 0xFF)); + rtsx_pci_write_register(pcr, MSGTXDATA2, + MASK_8_BIT_DEF, (u8)((latency >> 16) & 0xFF)); + rtsx_pci_write_register(pcr, MSGTXDATA3, + MASK_8_BIT_DEF, (u8)((latency >> 24) & 0xFF)); + rtsx_pci_write_register(pcr, LTR_CTL, LTR_TX_EN_MASK | + LTR_LATENCY_MODE_MASK, LTR_TX_EN_1 | LTR_LATENCY_MODE_SW); + + return 0; +} + +int rtsx_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency) +{ + return rtsx_comm_set_ltr_latency(pcr, latency); +} + +static void rtsx_comm_set_aspm(struct rtsx_pcr *pcr, bool enable) +{ + if (pcr->aspm_enabled == enable) + return; + + pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_ASPMC, + enable ? pcr->aspm_en : 0); + + pcr->aspm_enabled = enable; +} + +static void rtsx_disable_aspm(struct rtsx_pcr *pcr) +{ + if (pcr->ops->set_aspm) + pcr->ops->set_aspm(pcr, false); + else + rtsx_comm_set_aspm(pcr, false); +} + +int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val) +{ + rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, val); + + return 0; +} + +static void rtsx_set_l1off_sub_cfg_d0(struct rtsx_pcr *pcr, int active) +{ + if (pcr->ops->set_l1off_cfg_sub_d0) + pcr->ops->set_l1off_cfg_sub_d0(pcr, active); +} + +static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr) +{ + struct rtsx_cr_option *option = &pcr->option; + + rtsx_disable_aspm(pcr); + + /* Fixes DMA transfer timout issue after disabling ASPM on RTS5260 */ + msleep(1); + + if (option->ltr_enabled) + rtsx_set_ltr_latency(pcr, option->ltr_active_latency); + + if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN)) + rtsx_set_l1off_sub_cfg_d0(pcr, 1); +} + +static void rtsx_pm_full_on(struct rtsx_pcr *pcr) +{ + rtsx_comm_pm_full_on(pcr); +} + +void rtsx_pci_start_run(struct rtsx_pcr *pcr) +{ + /* If pci device removed, don't queue idle work any more */ + if (pcr->remove_pci) + return; + + if (pcr->state != PDEV_STAT_RUN) { + pcr->state = PDEV_STAT_RUN; + if (pcr->ops->enable_auto_blink) + pcr->ops->enable_auto_blink(pcr); + rtsx_pm_full_on(pcr); + } + + mod_delayed_work(system_wq, &pcr->idle_work, msecs_to_jiffies(200)); +} +EXPORT_SYMBOL_GPL(rtsx_pci_start_run); + +int rtsx_pci_write_register(struct rtsx_pcr *pcr, u16 addr, u8 mask, u8 data) +{ + int i; + u32 val = HAIMR_WRITE_START; + + val |= (u32)(addr & 0x3FFF) << 16; + val |= (u32)mask << 8; + val |= (u32)data; + + rtsx_pci_writel(pcr, RTSX_HAIMR, val); + + for (i = 0; i < MAX_RW_REG_CNT; i++) { + val = rtsx_pci_readl(pcr, RTSX_HAIMR); + if ((val & HAIMR_TRANS_END) == 0) { + if (data != (u8)val) + return -EIO; + return 0; + } + } + + return -ETIMEDOUT; +} +EXPORT_SYMBOL_GPL(rtsx_pci_write_register); + +int rtsx_pci_read_register(struct rtsx_pcr *pcr, u16 addr, u8 *data) +{ + u32 val = HAIMR_READ_START; + int i; + + val |= (u32)(addr & 0x3FFF) << 16; + rtsx_pci_writel(pcr, RTSX_HAIMR, val); + + for (i = 0; i < MAX_RW_REG_CNT; i++) { + val = rtsx_pci_readl(pcr, RTSX_HAIMR); + if ((val & HAIMR_TRANS_END) == 0) + break; + } + + if (i >= MAX_RW_REG_CNT) + return -ETIMEDOUT; + + if (data) + *data = (u8)(val & 0xFF); + + return 0; +} +EXPORT_SYMBOL_GPL(rtsx_pci_read_register); + +int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val) +{ + int err, i, finished = 0; + u8 tmp; + + rtsx_pci_write_register(pcr, PHYDATA0, 0xFF, (u8)val); + rtsx_pci_write_register(pcr, PHYDATA1, 0xFF, (u8)(val >> 8)); + rtsx_pci_write_register(pcr, PHYADDR, 0xFF, addr); + rtsx_pci_write_register(pcr, PHYRWCTL, 0xFF, 0x81); + + for (i = 0; i < 100000; i++) { + err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp); + if (err < 0) + return err; + + if (!(tmp & 0x80)) { + finished = 1; + break; + } + } + + if (!finished) + return -ETIMEDOUT; + + return 0; +} + +int rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val) +{ + if (pcr->ops->write_phy) + return pcr->ops->write_phy(pcr, addr, val); + + return __rtsx_pci_write_phy_register(pcr, addr, val); +} +EXPORT_SYMBOL_GPL(rtsx_pci_write_phy_register); + +int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val) +{ + int err, i, finished = 0; + u16 data; + u8 tmp, val1, val2; + + rtsx_pci_write_register(pcr, PHYADDR, 0xFF, addr); + rtsx_pci_write_register(pcr, PHYRWCTL, 0xFF, 0x80); + + for (i = 0; i < 100000; i++) { + err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp); + if (err < 0) + return err; + + if (!(tmp & 0x80)) { + finished = 1; + break; + } + } + + if (!finished) + return -ETIMEDOUT; + + rtsx_pci_read_register(pcr, PHYDATA0, &val1); + rtsx_pci_read_register(pcr, PHYDATA1, &val2); + data = val1 | (val2 << 8); + + if (val) + *val = data; + + return 0; +} + +int rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val) +{ + if (pcr->ops->read_phy) + return pcr->ops->read_phy(pcr, addr, val); + + return __rtsx_pci_read_phy_register(pcr, addr, val); +} +EXPORT_SYMBOL_GPL(rtsx_pci_read_phy_register); + +void rtsx_pci_stop_cmd(struct rtsx_pcr *pcr) +{ + if (pcr->ops->stop_cmd) + return pcr->ops->stop_cmd(pcr); + + rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD); + rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA); + + rtsx_pci_write_register(pcr, DMACTL, 0x80, 0x80); + rtsx_pci_write_register(pcr, RBCTL, 0x80, 0x80); +} +EXPORT_SYMBOL_GPL(rtsx_pci_stop_cmd); + +void rtsx_pci_add_cmd(struct rtsx_pcr *pcr, + u8 cmd_type, u16 reg_addr, u8 mask, u8 data) +{ + unsigned long flags; + u32 val = 0; + u32 *ptr = (u32 *)(pcr->host_cmds_ptr); + + val |= (u32)(cmd_type & 0x03) << 30; + val |= (u32)(reg_addr & 0x3FFF) << 16; + val |= (u32)mask << 8; + val |= (u32)data; + + spin_lock_irqsave(&pcr->lock, flags); + ptr += pcr->ci; + if (pcr->ci < (HOST_CMDS_BUF_LEN / 4)) { + put_unaligned_le32(val, ptr); + ptr++; + pcr->ci++; + } + spin_unlock_irqrestore(&pcr->lock, flags); +} +EXPORT_SYMBOL_GPL(rtsx_pci_add_cmd); + +void rtsx_pci_send_cmd_no_wait(struct rtsx_pcr *pcr) +{ + u32 val = 1 << 31; + + rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr); + + val |= (u32)(pcr->ci * 4) & 0x00FFFFFF; + /* Hardware Auto Response */ + val |= 0x40000000; + rtsx_pci_writel(pcr, RTSX_HCBCTLR, val); +} +EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd_no_wait); + +int rtsx_pci_send_cmd(struct rtsx_pcr *pcr, int timeout) +{ + struct completion trans_done; + u32 val = 1 << 31; + long timeleft; + unsigned long flags; + int err = 0; + + spin_lock_irqsave(&pcr->lock, flags); + + /* set up data structures for the wakeup system */ + pcr->done = &trans_done; + pcr->trans_result = TRANS_NOT_READY; + init_completion(&trans_done); + + rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr); + + val |= (u32)(pcr->ci * 4) & 0x00FFFFFF; + /* Hardware Auto Response */ + val |= 0x40000000; + rtsx_pci_writel(pcr, RTSX_HCBCTLR, val); + + spin_unlock_irqrestore(&pcr->lock, flags); + + /* Wait for TRANS_OK_INT */ + timeleft = wait_for_completion_interruptible_timeout( + &trans_done, msecs_to_jiffies(timeout)); + if (timeleft <= 0) { + pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__); + err = -ETIMEDOUT; + goto finish_send_cmd; + } + + spin_lock_irqsave(&pcr->lock, flags); + if (pcr->trans_result == TRANS_RESULT_FAIL) + err = -EINVAL; + else if (pcr->trans_result == TRANS_RESULT_OK) + err = 0; + else if (pcr->trans_result == TRANS_NO_DEVICE) + err = -ENODEV; + spin_unlock_irqrestore(&pcr->lock, flags); + +finish_send_cmd: + spin_lock_irqsave(&pcr->lock, flags); + pcr->done = NULL; + spin_unlock_irqrestore(&pcr->lock, flags); + + if ((err < 0) && (err != -ENODEV)) + rtsx_pci_stop_cmd(pcr); + + if (pcr->finish_me) + complete(pcr->finish_me); + + return err; +} +EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd); + +static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr, + dma_addr_t addr, unsigned int len, int end) +{ + u64 *ptr = (u64 *)(pcr->host_sg_tbl_ptr) + pcr->sgi; + u64 val; + u8 option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA; + + pcr_dbg(pcr, "DMA addr: 0x%x, Len: 0x%x\n", (unsigned int)addr, len); + + if (end) + option |= RTSX_SG_END; + + if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5228)) { + if (len > 0xFFFF) + val = ((u64)addr << 32) | (((u64)len & 0xFFFF) << 16) + | (((u64)len >> 16) << 6) | option; + else + val = ((u64)addr << 32) | ((u64)len << 16) | option; + } else { + val = ((u64)addr << 32) | ((u64)len << 12) | option; + } + put_unaligned_le64(val, ptr); + pcr->sgi++; +} + +int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist, + int num_sg, bool read, int timeout) +{ + int err = 0, count; + + pcr_dbg(pcr, "--> %s: num_sg = %d\n", __func__, num_sg); + count = rtsx_pci_dma_map_sg(pcr, sglist, num_sg, read); + if (count < 1) + return -EINVAL; + pcr_dbg(pcr, "DMA mapping count: %d\n", count); + + err = rtsx_pci_dma_transfer(pcr, sglist, count, read, timeout); + + rtsx_pci_dma_unmap_sg(pcr, sglist, num_sg, read); + + return err; +} +EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data); + +int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist, + int num_sg, bool read) +{ + enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE; + + if (pcr->remove_pci) + return -EINVAL; + + if ((sglist == NULL) || (num_sg <= 0)) + return -EINVAL; + + return dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dir); +} +EXPORT_SYMBOL_GPL(rtsx_pci_dma_map_sg); + +void rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist, + int num_sg, bool read) +{ + enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE; + + dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dir); +} +EXPORT_SYMBOL_GPL(rtsx_pci_dma_unmap_sg); + +int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist, + int count, bool read, int timeout) +{ + struct completion trans_done; + struct scatterlist *sg; + dma_addr_t addr; + long timeleft; + unsigned long flags; + unsigned int len; + int i, err = 0; + u32 val; + u8 dir = read ? DEVICE_TO_HOST : HOST_TO_DEVICE; + + if (pcr->remove_pci) + return -ENODEV; + + if ((sglist == NULL) || (count < 1)) + return -EINVAL; + + val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE; + pcr->sgi = 0; + for_each_sg(sglist, sg, count, i) { + addr = sg_dma_address(sg); + len = sg_dma_len(sg); + rtsx_pci_add_sg_tbl(pcr, addr, len, i == count - 1); + } + + spin_lock_irqsave(&pcr->lock, flags); + + pcr->done = &trans_done; + pcr->trans_result = TRANS_NOT_READY; + init_completion(&trans_done); + rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr); + rtsx_pci_writel(pcr, RTSX_HDBCTLR, val); + + spin_unlock_irqrestore(&pcr->lock, flags); + + timeleft = wait_for_completion_interruptible_timeout( + &trans_done, msecs_to_jiffies(timeout)); + if (timeleft <= 0) { + pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__); + err = -ETIMEDOUT; + goto out; + } + + spin_lock_irqsave(&pcr->lock, flags); + if (pcr->trans_result == TRANS_RESULT_FAIL) { + err = -EILSEQ; + if (pcr->dma_error_count < RTS_MAX_TIMES_FREQ_REDUCTION) + pcr->dma_error_count++; + } + + else if (pcr->trans_result == TRANS_NO_DEVICE) + err = -ENODEV; + spin_unlock_irqrestore(&pcr->lock, flags); + +out: + spin_lock_irqsave(&pcr->lock, flags); + pcr->done = NULL; + spin_unlock_irqrestore(&pcr->lock, flags); + + if ((err < 0) && (err != -ENODEV)) + rtsx_pci_stop_cmd(pcr); + + if (pcr->finish_me) + complete(pcr->finish_me); + + return err; +} +EXPORT_SYMBOL_GPL(rtsx_pci_dma_transfer); + +int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len) +{ + int err; + int i, j; + u16 reg; + u8 *ptr; + + if (buf_len > 512) + buf_len = 512; + + ptr = buf; + reg = PPBUF_BASE2; + for (i = 0; i < buf_len / 256; i++) { + rtsx_pci_init_cmd(pcr); + + for (j = 0; j < 256; j++) + rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0); + + err = rtsx_pci_send_cmd(pcr, 250); + if (err < 0) + return err; + + memcpy(ptr, rtsx_pci_get_cmd_data(pcr), 256); + ptr += 256; + } + + if (buf_len % 256) { + rtsx_pci_init_cmd(pcr); + + for (j = 0; j < buf_len % 256; j++) + rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0); + + err = rtsx_pci_send_cmd(pcr, 250); + if (err < 0) + return err; + } + + memcpy(ptr, rtsx_pci_get_cmd_data(pcr), buf_len % 256); + + return 0; +} +EXPORT_SYMBOL_GPL(rtsx_pci_read_ppbuf); + +int rtsx_pci_write_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len) +{ + int err; + int i, j; + u16 reg; + u8 *ptr; + + if (buf_len > 512) + buf_len = 512; + + ptr = buf; + reg = PPBUF_BASE2; + for (i = 0; i < buf_len / 256; i++) { + rtsx_pci_init_cmd(pcr); + + for (j = 0; j < 256; j++) { + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, + reg++, 0xFF, *ptr); + ptr++; + } + + err = rtsx_pci_send_cmd(pcr, 250); + if (err < 0) + return err; + } + + if (buf_len % 256) { + rtsx_pci_init_cmd(pcr); + + for (j = 0; j < buf_len % 256; j++) { + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, + reg++, 0xFF, *ptr); + ptr++; + } + + err = rtsx_pci_send_cmd(pcr, 250); + if (err < 0) + return err; + } + + return 0; +} +EXPORT_SYMBOL_GPL(rtsx_pci_write_ppbuf); + +static int rtsx_pci_set_pull_ctl(struct rtsx_pcr *pcr, const u32 *tbl) +{ + rtsx_pci_init_cmd(pcr); + + while (*tbl & 0xFFFF0000) { + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, + (u16)(*tbl >> 16), 0xFF, (u8)(*tbl)); + tbl++; + } + + return rtsx_pci_send_cmd(pcr, 100); +} + +int rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr *pcr, int card) +{ + const u32 *tbl; + + if (card == RTSX_SD_CARD) + tbl = pcr->sd_pull_ctl_enable_tbl; + else if (card == RTSX_MS_CARD) + tbl = pcr->ms_pull_ctl_enable_tbl; + else + return -EINVAL; + + return rtsx_pci_set_pull_ctl(pcr, tbl); +} +EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_enable); + +int rtsx_pci_card_pull_ctl_disable(struct rtsx_pcr *pcr, int card) +{ + const u32 *tbl; + + if (card == RTSX_SD_CARD) + tbl = pcr->sd_pull_ctl_disable_tbl; + else if (card == RTSX_MS_CARD) + tbl = pcr->ms_pull_ctl_disable_tbl; + else + return -EINVAL; + + return rtsx_pci_set_pull_ctl(pcr, tbl); +} +EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_disable); + +static void rtsx_pci_enable_bus_int(struct rtsx_pcr *pcr) +{ + struct rtsx_hw_param *hw_param = &pcr->hw_param; + + pcr->bier = TRANS_OK_INT_EN | TRANS_FAIL_INT_EN | SD_INT_EN + | hw_param->interrupt_en; + + if (pcr->num_slots > 1) + pcr->bier |= MS_INT_EN; + + /* Enable Bus Interrupt */ + rtsx_pci_writel(pcr, RTSX_BIER, pcr->bier); + + pcr_dbg(pcr, "RTSX_BIER: 0x%08x\n", pcr->bier); +} + +static inline u8 double_ssc_depth(u8 depth) +{ + return ((depth > 1) ? (depth - 1) : depth); +} + +static u8 revise_ssc_depth(u8 ssc_depth, u8 div) +{ + if (div > CLK_DIV_1) { + if (ssc_depth > (div - 1)) + ssc_depth -= (div - 1); + else + ssc_depth = SSC_DEPTH_4M; + } + + return ssc_depth; +} + +int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock, + u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk) +{ + int err, clk; + u8 n, clk_divider, mcu_cnt, div; + static const u8 depth[] = { + [RTSX_SSC_DEPTH_4M] = SSC_DEPTH_4M, + [RTSX_SSC_DEPTH_2M] = SSC_DEPTH_2M, + [RTSX_SSC_DEPTH_1M] = SSC_DEPTH_1M, + [RTSX_SSC_DEPTH_500K] = SSC_DEPTH_500K, + [RTSX_SSC_DEPTH_250K] = SSC_DEPTH_250K, + }; + + if (PCI_PID(pcr) == PID_5261) + return rts5261_pci_switch_clock(pcr, card_clock, + ssc_depth, initial_mode, double_clk, vpclk); + if (PCI_PID(pcr) == PID_5228) + return rts5228_pci_switch_clock(pcr, card_clock, + ssc_depth, initial_mode, double_clk, vpclk); + + if (initial_mode) { + /* We use 250k(around) here, in initial stage */ + clk_divider = SD_CLK_DIVIDE_128; + card_clock = 30000000; + } else { + clk_divider = SD_CLK_DIVIDE_0; + } + err = rtsx_pci_write_register(pcr, SD_CFG1, + SD_CLK_DIVIDE_MASK, clk_divider); + if (err < 0) + return err; + + /* Reduce card clock by 20MHz each time a DMA transfer error occurs */ + if (card_clock == UHS_SDR104_MAX_DTR && + pcr->dma_error_count && + PCI_PID(pcr) == RTS5227_DEVICE_ID) + card_clock = UHS_SDR104_MAX_DTR - + (pcr->dma_error_count * 20000000); + + card_clock /= 1000000; + pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock); + + clk = card_clock; + if (!initial_mode && double_clk) + clk = card_clock * 2; + pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n", + clk, pcr->cur_clock); + + if (clk == pcr->cur_clock) + return 0; + + if (pcr->ops->conv_clk_and_div_n) + n = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N); + else + n = (u8)(clk - 2); + if ((clk <= 2) || (n > MAX_DIV_N_PCR)) + return -EINVAL; + + mcu_cnt = (u8)(125/clk + 3); + if (mcu_cnt > 15) + mcu_cnt = 15; + + /* Make sure that the SSC clock div_n is not less than MIN_DIV_N_PCR */ + div = CLK_DIV_1; + while ((n < MIN_DIV_N_PCR) && (div < CLK_DIV_8)) { + if (pcr->ops->conv_clk_and_div_n) { + int dbl_clk = pcr->ops->conv_clk_and_div_n(n, + DIV_N_TO_CLK) * 2; + n = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk, + CLK_TO_DIV_N); + } else { + n = (n + 2) * 2 - 2; + } + div++; + } + pcr_dbg(pcr, "n = %d, div = %d\n", n, div); + + ssc_depth = depth[ssc_depth]; + if (double_clk) + ssc_depth = double_ssc_depth(ssc_depth); + + ssc_depth = revise_ssc_depth(ssc_depth, div); + pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth); + + rtsx_pci_init_cmd(pcr); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, + CLK_LOW_FREQ, CLK_LOW_FREQ); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV, + 0xFF, (div << 4) | mcu_cnt); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, + SSC_DEPTH_MASK, ssc_depth); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB); + if (vpclk) { + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL, + PHASE_NOT_RESET, 0); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL, + PHASE_NOT_RESET, PHASE_NOT_RESET); + } + + err = rtsx_pci_send_cmd(pcr, 2000); + if (err < 0) + return err; + + /* Wait SSC clock stable */ + udelay(SSC_CLOCK_STABLE_WAIT); + err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0); + if (err < 0) + return err; + + pcr->cur_clock = clk; + return 0; +} +EXPORT_SYMBOL_GPL(rtsx_pci_switch_clock); + +int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card) +{ + if (pcr->ops->card_power_on) + return pcr->ops->card_power_on(pcr, card); + + return 0; +} +EXPORT_SYMBOL_GPL(rtsx_pci_card_power_on); + +int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card) +{ + if (pcr->ops->card_power_off) + return pcr->ops->card_power_off(pcr, card); + + return 0; +} +EXPORT_SYMBOL_GPL(rtsx_pci_card_power_off); + +int rtsx_pci_card_exclusive_check(struct rtsx_pcr *pcr, int card) +{ + static const unsigned int cd_mask[] = { + [RTSX_SD_CARD] = SD_EXIST, + [RTSX_MS_CARD] = MS_EXIST + }; + + if (!(pcr->flags & PCR_MS_PMOS)) { + /* When using single PMOS, accessing card is not permitted + * if the existing card is not the designated one. + */ + if (pcr->card_exist & (~cd_mask[card])) + return -EIO; + } + + return 0; +} +EXPORT_SYMBOL_GPL(rtsx_pci_card_exclusive_check); + +int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage) +{ + if (pcr->ops->switch_output_voltage) + return pcr->ops->switch_output_voltage(pcr, voltage); + + return 0; +} +EXPORT_SYMBOL_GPL(rtsx_pci_switch_output_voltage); + +unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr) +{ + unsigned int val; + + val = rtsx_pci_readl(pcr, RTSX_BIPR); + if (pcr->ops->cd_deglitch) + val = pcr->ops->cd_deglitch(pcr); + + return val; +} +EXPORT_SYMBOL_GPL(rtsx_pci_card_exist); + +void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr) +{ + struct completion finish; + + pcr->finish_me = &finish; + init_completion(&finish); + + if (pcr->done) + complete(pcr->done); + + if (!pcr->remove_pci) + rtsx_pci_stop_cmd(pcr); + + wait_for_completion_interruptible_timeout(&finish, + msecs_to_jiffies(2)); + pcr->finish_me = NULL; +} +EXPORT_SYMBOL_GPL(rtsx_pci_complete_unfinished_transfer); + +static void rtsx_pci_card_detect(struct work_struct *work) +{ + struct delayed_work *dwork; + struct rtsx_pcr *pcr; + unsigned long flags; + unsigned int card_detect = 0, card_inserted, card_removed; + u32 irq_status; + + dwork = to_delayed_work(work); + pcr = container_of(dwork, struct rtsx_pcr, carddet_work); + + pcr_dbg(pcr, "--> %s\n", __func__); + + mutex_lock(&pcr->pcr_mutex); + spin_lock_irqsave(&pcr->lock, flags); + + irq_status = rtsx_pci_readl(pcr, RTSX_BIPR); + pcr_dbg(pcr, "irq_status: 0x%08x\n", irq_status); + + irq_status &= CARD_EXIST; + card_inserted = pcr->card_inserted & irq_status; + card_removed = pcr->card_removed; + pcr->card_inserted = 0; + pcr->card_removed = 0; + + spin_unlock_irqrestore(&pcr->lock, flags); + + if (card_inserted || card_removed) { + pcr_dbg(pcr, "card_inserted: 0x%x, card_removed: 0x%x\n", + card_inserted, card_removed); + + if (pcr->ops->cd_deglitch) + card_inserted = pcr->ops->cd_deglitch(pcr); + + card_detect = card_inserted | card_removed; + + pcr->card_exist |= card_inserted; + pcr->card_exist &= ~card_removed; + } + + mutex_unlock(&pcr->pcr_mutex); + + if ((card_detect & SD_EXIST) && pcr->slots[RTSX_SD_CARD].card_event) + pcr->slots[RTSX_SD_CARD].card_event( + pcr->slots[RTSX_SD_CARD].p_dev); + if ((card_detect & MS_EXIST) && pcr->slots[RTSX_MS_CARD].card_event) + pcr->slots[RTSX_MS_CARD].card_event( + pcr->slots[RTSX_MS_CARD].p_dev); +} + +static void rtsx_pci_process_ocp(struct rtsx_pcr *pcr) +{ + if (pcr->ops->process_ocp) { + pcr->ops->process_ocp(pcr); + } else { + if (!pcr->option.ocp_en) + return; + rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat); + if (pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) { + rtsx_pci_card_power_off(pcr, RTSX_SD_CARD); + rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0); + rtsx_pci_clear_ocpstat(pcr); + pcr->ocp_stat = 0; + } + } +} + +static int rtsx_pci_process_ocp_interrupt(struct rtsx_pcr *pcr) +{ + if (pcr->option.ocp_en) + rtsx_pci_process_ocp(pcr); + + return 0; +} + +static irqreturn_t rtsx_pci_isr(int irq, void *dev_id) +{ + struct rtsx_pcr *pcr = dev_id; + u32 int_reg; + + if (!pcr) + return IRQ_NONE; + + spin_lock(&pcr->lock); + + int_reg = rtsx_pci_readl(pcr, RTSX_BIPR); + /* Clear interrupt flag */ + rtsx_pci_writel(pcr, RTSX_BIPR, int_reg); + if ((int_reg & pcr->bier) == 0) { + spin_unlock(&pcr->lock); + return IRQ_NONE; + } + if (int_reg == 0xFFFFFFFF) { + spin_unlock(&pcr->lock); + return IRQ_HANDLED; + } + + int_reg &= (pcr->bier | 0x7FFFFF); + + if (int_reg & SD_OC_INT) + rtsx_pci_process_ocp_interrupt(pcr); + + if (int_reg & SD_INT) { + if (int_reg & SD_EXIST) { + pcr->card_inserted |= SD_EXIST; + } else { + pcr->card_removed |= SD_EXIST; + pcr->card_inserted &= ~SD_EXIST; + } + pcr->dma_error_count = 0; + } + + if (int_reg & MS_INT) { + if (int_reg & MS_EXIST) { + pcr->card_inserted |= MS_EXIST; + } else { + pcr->card_removed |= MS_EXIST; + pcr->card_inserted &= ~MS_EXIST; + } + } + + if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) { + if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) { + pcr->trans_result = TRANS_RESULT_FAIL; + if (pcr->done) + complete(pcr->done); + } else if (int_reg & TRANS_OK_INT) { + pcr->trans_result = TRANS_RESULT_OK; + if (pcr->done) + complete(pcr->done); + } + } + + if ((pcr->card_inserted || pcr->card_removed) && !(int_reg & SD_OC_INT)) + schedule_delayed_work(&pcr->carddet_work, + msecs_to_jiffies(200)); + + spin_unlock(&pcr->lock); + return IRQ_HANDLED; +} + +static int rtsx_pci_acquire_irq(struct rtsx_pcr *pcr) +{ + pcr_dbg(pcr, "%s: pcr->msi_en = %d, pci->irq = %d\n", + __func__, pcr->msi_en, pcr->pci->irq); + + if (request_irq(pcr->pci->irq, rtsx_pci_isr, + pcr->msi_en ? 0 : IRQF_SHARED, + DRV_NAME_RTSX_PCI, pcr)) { + dev_err(&(pcr->pci->dev), + "rtsx_sdmmc: unable to grab IRQ %d, disabling device\n", + pcr->pci->irq); + return -1; + } + + pcr->irq = pcr->pci->irq; + pci_intx(pcr->pci, !pcr->msi_en); + + return 0; +} + +static void rtsx_enable_aspm(struct rtsx_pcr *pcr) +{ + if (pcr->ops->set_aspm) + pcr->ops->set_aspm(pcr, true); + else + rtsx_comm_set_aspm(pcr, true); +} + +static void rtsx_comm_pm_power_saving(struct rtsx_pcr *pcr) +{ + struct rtsx_cr_option *option = &pcr->option; + + if (option->ltr_enabled) { + u32 latency = option->ltr_l1off_latency; + + if (rtsx_check_dev_flag(pcr, L1_SNOOZE_TEST_EN)) + mdelay(option->l1_snooze_delay); + + rtsx_set_ltr_latency(pcr, latency); + } + + if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN)) + rtsx_set_l1off_sub_cfg_d0(pcr, 0); + + rtsx_enable_aspm(pcr); +} + +static void rtsx_pm_power_saving(struct rtsx_pcr *pcr) +{ + rtsx_comm_pm_power_saving(pcr); +} + +static void rtsx_pci_idle_work(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct rtsx_pcr *pcr = container_of(dwork, struct rtsx_pcr, idle_work); + + pcr_dbg(pcr, "--> %s\n", __func__); + + mutex_lock(&pcr->pcr_mutex); + + pcr->state = PDEV_STAT_IDLE; + + if (pcr->ops->disable_auto_blink) + pcr->ops->disable_auto_blink(pcr); + if (pcr->ops->turn_off_led) + pcr->ops->turn_off_led(pcr); + + rtsx_pm_power_saving(pcr); + + mutex_unlock(&pcr->pcr_mutex); +} + +static void rtsx_base_force_power_down(struct rtsx_pcr *pcr, u8 pm_state) +{ + /* Set relink_time to 0 */ + rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0); + rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, MASK_8_BIT_DEF, 0); + rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3, + RELINK_TIME_MASK, 0); + + rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, + D3_DELINK_MODE_EN, D3_DELINK_MODE_EN); + + rtsx_pci_write_register(pcr, FPDCTL, ALL_POWER_DOWN, ALL_POWER_DOWN); +} + +static void __maybe_unused rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state) +{ + if (pcr->ops->turn_off_led) + pcr->ops->turn_off_led(pcr); + + rtsx_pci_writel(pcr, RTSX_BIER, 0); + pcr->bier = 0; + + rtsx_pci_write_register(pcr, PETXCFG, 0x08, 0x08); + rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, pm_state); + + if (pcr->ops->force_power_down) + pcr->ops->force_power_down(pcr, pm_state); + else + rtsx_base_force_power_down(pcr, pm_state); +} + +void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr) +{ + u8 val = SD_OCP_INT_EN | SD_DETECT_EN; + + if (pcr->ops->enable_ocp) { + pcr->ops->enable_ocp(pcr); + } else { + rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0); + rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val); + } + +} + +void rtsx_pci_disable_ocp(struct rtsx_pcr *pcr) +{ + u8 mask = SD_OCP_INT_EN | SD_DETECT_EN; + + if (pcr->ops->disable_ocp) { + pcr->ops->disable_ocp(pcr); + } else { + rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0); + rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, + OC_POWER_DOWN); + } +} + +void rtsx_pci_init_ocp(struct rtsx_pcr *pcr) +{ + if (pcr->ops->init_ocp) { + pcr->ops->init_ocp(pcr); + } else { + struct rtsx_cr_option *option = &(pcr->option); + + if (option->ocp_en) { + u8 val = option->sd_800mA_ocp_thd; + + rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0); + rtsx_pci_write_register(pcr, REG_OCPPARA1, + SD_OCP_TIME_MASK, SD_OCP_TIME_800); + rtsx_pci_write_register(pcr, REG_OCPPARA2, + SD_OCP_THD_MASK, val); + rtsx_pci_write_register(pcr, REG_OCPGLITCH, + SD_OCP_GLITCH_MASK, pcr->hw_param.ocp_glitch); + rtsx_pci_enable_ocp(pcr); + } + } +} + +int rtsx_pci_get_ocpstat(struct rtsx_pcr *pcr, u8 *val) +{ + if (pcr->ops->get_ocpstat) + return pcr->ops->get_ocpstat(pcr, val); + else + return rtsx_pci_read_register(pcr, REG_OCPSTAT, val); +} + +void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr) +{ + if (pcr->ops->clear_ocpstat) { + pcr->ops->clear_ocpstat(pcr); + } else { + u8 mask = SD_OCP_INT_CLR | SD_OC_CLR; + u8 val = SD_OCP_INT_CLR | SD_OC_CLR; + + rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val); + udelay(100); + rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0); + } +} + +void rtsx_pci_enable_oobs_polling(struct rtsx_pcr *pcr) +{ + u16 val; + + if ((PCI_PID(pcr) != PID_525A) && (PCI_PID(pcr) != PID_5260)) { + rtsx_pci_read_phy_register(pcr, 0x01, &val); + val |= 1<<9; + rtsx_pci_write_phy_register(pcr, 0x01, val); + } + rtsx_pci_write_register(pcr, REG_CFG_OOBS_OFF_TIMER, 0xFF, 0x32); + rtsx_pci_write_register(pcr, REG_CFG_OOBS_ON_TIMER, 0xFF, 0x05); + rtsx_pci_write_register(pcr, REG_CFG_VCM_ON_TIMER, 0xFF, 0x83); + rtsx_pci_write_register(pcr, REG_CFG_OOBS_POLLING, 0xFF, 0xDE); + +} + +void rtsx_pci_disable_oobs_polling(struct rtsx_pcr *pcr) +{ + u16 val; + + if ((PCI_PID(pcr) != PID_525A) && (PCI_PID(pcr) != PID_5260)) { + rtsx_pci_read_phy_register(pcr, 0x01, &val); + val &= ~(1<<9); + rtsx_pci_write_phy_register(pcr, 0x01, val); + } + rtsx_pci_write_register(pcr, REG_CFG_VCM_ON_TIMER, 0xFF, 0x03); + rtsx_pci_write_register(pcr, REG_CFG_OOBS_POLLING, 0xFF, 0x00); + +} + +int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr) +{ + rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN | + MS_CLK_EN | SD40_CLK_EN, 0); + rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0); + rtsx_pci_card_power_off(pcr, RTSX_SD_CARD); + + msleep(50); + + rtsx_pci_card_pull_ctl_disable(pcr, RTSX_SD_CARD); + + return 0; +} + +int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr) +{ + rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN | + MS_CLK_EN | SD40_CLK_EN, 0); + + rtsx_pci_card_pull_ctl_disable(pcr, RTSX_MS_CARD); + + rtsx_pci_write_register(pcr, CARD_OE, MS_OUTPUT_EN, 0); + rtsx_pci_card_power_off(pcr, RTSX_MS_CARD); + + return 0; +} + +static int rtsx_pci_init_hw(struct rtsx_pcr *pcr) +{ + struct pci_dev *pdev = pcr->pci; + int err; + + if (PCI_PID(pcr) == PID_5228) + rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG1, RTS5228_LDO1_SR_TIME_MASK, + RTS5228_LDO1_SR_0_5); + + rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr); + + rtsx_pci_enable_bus_int(pcr); + + /* Power on SSC */ + if (PCI_PID(pcr) == PID_5261) { + /* Gating real mcu clock */ + err = rtsx_pci_write_register(pcr, RTS5261_FW_CFG1, + RTS5261_MCU_CLOCK_GATING, 0); + err = rtsx_pci_write_register(pcr, RTS5261_REG_FPDCTL, + SSC_POWER_DOWN, 0); + } else { + err = rtsx_pci_write_register(pcr, FPDCTL, SSC_POWER_DOWN, 0); + } + if (err < 0) + return err; + + /* Wait SSC power stable */ + udelay(200); + + rtsx_pci_disable_aspm(pcr); + if (pcr->ops->optimize_phy) { + err = pcr->ops->optimize_phy(pcr); + if (err < 0) + return err; + } + + rtsx_pci_init_cmd(pcr); + + /* Set mcu_cnt to 7 to ensure data can be sampled properly */ + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV, 0x07, 0x07); + + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, HOST_SLEEP_STATE, 0x03, 0x00); + /* Disable card clock */ + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, 0x1E, 0); + /* Reset delink mode */ + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x0A, 0); + /* Card driving select */ + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DRIVE_SEL, + 0xFF, pcr->card_drive_sel); + /* Enable SSC Clock */ + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, + 0xFF, SSC_8X_EN | SSC_SEL_4M); + if (PCI_PID(pcr) == PID_5261) + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, + RTS5261_SSC_DEPTH_2M); + else if (PCI_PID(pcr) == PID_5228) + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, + RTS5228_SSC_DEPTH_2M); + else + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12); + + /* Disable cd_pwr_save */ + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x16, 0x10); + /* Clear Link Ready Interrupt */ + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0, + LINK_RDY_INT, LINK_RDY_INT); + /* Enlarge the estimation window of PERST# glitch + * to reduce the chance of invalid card interrupt + */ + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PERST_GLITCH_WIDTH, 0xFF, 0x80); + /* Update RC oscillator to 400k + * bit[0] F_HIGH: for RC oscillator, Rst_value is 1'b1 + * 1: 2M 0: 400k + */ + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RCCTL, 0x01, 0x00); + /* Set interrupt write clear + * bit 1: U_elbi_if_rd_clr_en + * 1: Enable ELBI interrupt[31:22] & [7:0] flag read clear + * 0: ELBI interrupt flag[31:22] & [7:0] only can be write clear + */ + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, NFTS_TX_CTRL, 0x02, 0); + + err = rtsx_pci_send_cmd(pcr, 100); + if (err < 0) + return err; + + switch (PCI_PID(pcr)) { + case PID_5250: + case PID_524A: + case PID_525A: + case PID_5260: + case PID_5261: + case PID_5228: + rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1); + break; + default: + break; + } + + /*init ocp*/ + rtsx_pci_init_ocp(pcr); + + /* Enable clk_request_n to enable clock power management */ + pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CLKREQ_EN); + /* Enter L1 when host tx idle */ + pci_write_config_byte(pdev, 0x70F, 0x5B); + + if (pcr->ops->extra_init_hw) { + err = pcr->ops->extra_init_hw(pcr); + if (err < 0) + return err; + } + + /* No CD interrupt if probing driver with card inserted. + * So we need to initialize pcr->card_exist here. + */ + if (pcr->ops->cd_deglitch) + pcr->card_exist = pcr->ops->cd_deglitch(pcr); + else + pcr->card_exist = rtsx_pci_readl(pcr, RTSX_BIPR) & CARD_EXIST; + + return 0; +} + +static int rtsx_pci_init_chip(struct rtsx_pcr *pcr) +{ + int err; + + spin_lock_init(&pcr->lock); + mutex_init(&pcr->pcr_mutex); + + switch (PCI_PID(pcr)) { + default: + case 0x5209: + rts5209_init_params(pcr); + break; + + case 0x5229: + rts5229_init_params(pcr); + break; + + case 0x5289: + rtl8411_init_params(pcr); + break; + + case 0x5227: + rts5227_init_params(pcr); + break; + + case 0x522A: + rts522a_init_params(pcr); + break; + + case 0x5249: + rts5249_init_params(pcr); + break; + + case 0x524A: + rts524a_init_params(pcr); + break; + + case 0x525A: + rts525a_init_params(pcr); + break; + + case 0x5287: + rtl8411b_init_params(pcr); + break; + + case 0x5286: + rtl8402_init_params(pcr); + break; + + case 0x5260: + rts5260_init_params(pcr); + break; + + case 0x5261: + rts5261_init_params(pcr); + break; + + case 0x5228: + rts5228_init_params(pcr); + break; + } + + pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n", + PCI_PID(pcr), pcr->ic_version); + + pcr->slots = kcalloc(pcr->num_slots, sizeof(struct rtsx_slot), + GFP_KERNEL); + if (!pcr->slots) + return -ENOMEM; + + if (pcr->ops->fetch_vendor_settings) + pcr->ops->fetch_vendor_settings(pcr); + + pcr_dbg(pcr, "pcr->aspm_en = 0x%x\n", pcr->aspm_en); + pcr_dbg(pcr, "pcr->sd30_drive_sel_1v8 = 0x%x\n", + pcr->sd30_drive_sel_1v8); + pcr_dbg(pcr, "pcr->sd30_drive_sel_3v3 = 0x%x\n", + pcr->sd30_drive_sel_3v3); + pcr_dbg(pcr, "pcr->card_drive_sel = 0x%x\n", + pcr->card_drive_sel); + pcr_dbg(pcr, "pcr->flags = 0x%x\n", pcr->flags); + + pcr->state = PDEV_STAT_IDLE; + err = rtsx_pci_init_hw(pcr); + if (err < 0) { + kfree(pcr->slots); + return err; + } + + return 0; +} + +static int rtsx_pci_probe(struct pci_dev *pcidev, + const struct pci_device_id *id) +{ + struct rtsx_pcr *pcr; + struct pcr_handle *handle; + u32 base, len; + int ret, i, bar = 0; + + dev_dbg(&(pcidev->dev), + ": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n", + pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device, + (int)pcidev->revision); + + ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)); + if (ret < 0) + return ret; + + ret = pci_enable_device(pcidev); + if (ret) + return ret; + + ret = pci_request_regions(pcidev, DRV_NAME_RTSX_PCI); + if (ret) + goto disable; + + pcr = kzalloc(sizeof(*pcr), GFP_KERNEL); + if (!pcr) { + ret = -ENOMEM; + goto release_pci; + } + + handle = kzalloc(sizeof(*handle), GFP_KERNEL); + if (!handle) { + ret = -ENOMEM; + goto free_pcr; + } + handle->pcr = pcr; + + idr_preload(GFP_KERNEL); + spin_lock(&rtsx_pci_lock); + ret = idr_alloc(&rtsx_pci_idr, pcr, 0, 0, GFP_NOWAIT); + if (ret >= 0) + pcr->id = ret; + spin_unlock(&rtsx_pci_lock); + idr_preload_end(); + if (ret < 0) + goto free_handle; + + pcr->pci = pcidev; + dev_set_drvdata(&pcidev->dev, handle); + + if (CHK_PCI_PID(pcr, 0x525A)) + bar = 1; + len = pci_resource_len(pcidev, bar); + base = pci_resource_start(pcidev, bar); + pcr->remap_addr = ioremap(base, len); + if (!pcr->remap_addr) { + ret = -ENOMEM; + goto free_idr; + } + + pcr->rtsx_resv_buf = dma_alloc_coherent(&(pcidev->dev), + RTSX_RESV_BUF_LEN, &(pcr->rtsx_resv_buf_addr), + GFP_KERNEL); + if (pcr->rtsx_resv_buf == NULL) { + ret = -ENXIO; + goto unmap; + } + pcr->host_cmds_ptr = pcr->rtsx_resv_buf; + pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr; + pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN; + pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN; + + pcr->card_inserted = 0; + pcr->card_removed = 0; + INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect); + INIT_DELAYED_WORK(&pcr->idle_work, rtsx_pci_idle_work); + + pcr->msi_en = msi_en; + if (pcr->msi_en) { + ret = pci_enable_msi(pcidev); + if (ret) + pcr->msi_en = false; + } + + ret = rtsx_pci_acquire_irq(pcr); + if (ret < 0) + goto disable_msi; + + pci_set_master(pcidev); + synchronize_irq(pcr->irq); + + ret = rtsx_pci_init_chip(pcr); + if (ret < 0) + goto disable_irq; + + for (i = 0; i < ARRAY_SIZE(rtsx_pcr_cells); i++) { + rtsx_pcr_cells[i].platform_data = handle; + rtsx_pcr_cells[i].pdata_size = sizeof(*handle); + } + ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells, + ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL); + if (ret < 0) + goto free_slots; + + schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200)); + + return 0; + +free_slots: + kfree(pcr->slots); +disable_irq: + free_irq(pcr->irq, (void *)pcr); +disable_msi: + if (pcr->msi_en) + pci_disable_msi(pcr->pci); + dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN, + pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr); +unmap: + iounmap(pcr->remap_addr); +free_idr: + spin_lock(&rtsx_pci_lock); + idr_remove(&rtsx_pci_idr, pcr->id); + spin_unlock(&rtsx_pci_lock); +free_handle: + kfree(handle); +free_pcr: + kfree(pcr); +release_pci: + pci_release_regions(pcidev); +disable: + pci_disable_device(pcidev); + + return ret; +} + +static void rtsx_pci_remove(struct pci_dev *pcidev) +{ + struct pcr_handle *handle = pci_get_drvdata(pcidev); + struct rtsx_pcr *pcr = handle->pcr; + + pcr->remove_pci = true; + + /* Disable interrupts at the pcr level */ + spin_lock_irq(&pcr->lock); + rtsx_pci_writel(pcr, RTSX_BIER, 0); + pcr->bier = 0; + spin_unlock_irq(&pcr->lock); + + cancel_delayed_work_sync(&pcr->carddet_work); + cancel_delayed_work_sync(&pcr->idle_work); + + mfd_remove_devices(&pcidev->dev); + + dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN, + pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr); + free_irq(pcr->irq, (void *)pcr); + if (pcr->msi_en) + pci_disable_msi(pcr->pci); + iounmap(pcr->remap_addr); + + pci_release_regions(pcidev); + pci_disable_device(pcidev); + + spin_lock(&rtsx_pci_lock); + idr_remove(&rtsx_pci_idr, pcr->id); + spin_unlock(&rtsx_pci_lock); + + kfree(pcr->slots); + kfree(pcr); + kfree(handle); + + dev_dbg(&(pcidev->dev), + ": Realtek PCI-E Card Reader at %s [%04x:%04x] has been removed\n", + pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device); +} + +static int __maybe_unused rtsx_pci_suspend(struct device *dev_d) +{ + struct pci_dev *pcidev = to_pci_dev(dev_d); + struct pcr_handle *handle; + struct rtsx_pcr *pcr; + + dev_dbg(&(pcidev->dev), "--> %s\n", __func__); + + handle = pci_get_drvdata(pcidev); + pcr = handle->pcr; + + cancel_delayed_work(&pcr->carddet_work); + cancel_delayed_work(&pcr->idle_work); + + mutex_lock(&pcr->pcr_mutex); + + rtsx_pci_power_off(pcr, HOST_ENTER_S3); + + device_wakeup_disable(dev_d); + + mutex_unlock(&pcr->pcr_mutex); + return 0; +} + +static int __maybe_unused rtsx_pci_resume(struct device *dev_d) +{ + struct pci_dev *pcidev = to_pci_dev(dev_d); + struct pcr_handle *handle; + struct rtsx_pcr *pcr; + int ret = 0; + + dev_dbg(&(pcidev->dev), "--> %s\n", __func__); + + handle = pci_get_drvdata(pcidev); + pcr = handle->pcr; + + mutex_lock(&pcr->pcr_mutex); + + ret = rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00); + if (ret) + goto out; + + ret = rtsx_pci_init_hw(pcr); + if (ret) + goto out; + + schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200)); + +out: + mutex_unlock(&pcr->pcr_mutex); + return ret; +} + +#ifdef CONFIG_PM + +static void rtsx_pci_shutdown(struct pci_dev *pcidev) +{ + struct pcr_handle *handle; + struct rtsx_pcr *pcr; + + dev_dbg(&(pcidev->dev), "--> %s\n", __func__); + + handle = pci_get_drvdata(pcidev); + pcr = handle->pcr; + rtsx_pci_power_off(pcr, HOST_ENTER_S1); + + pci_disable_device(pcidev); + free_irq(pcr->irq, (void *)pcr); + if (pcr->msi_en) + pci_disable_msi(pcr->pci); +} + +#else /* CONFIG_PM */ + +#define rtsx_pci_shutdown NULL + +#endif /* CONFIG_PM */ + +static SIMPLE_DEV_PM_OPS(rtsx_pci_pm_ops, rtsx_pci_suspend, rtsx_pci_resume); + +static struct pci_driver rtsx_pci_driver = { + .name = DRV_NAME_RTSX_PCI, + .id_table = rtsx_pci_ids, + .probe = rtsx_pci_probe, + .remove = rtsx_pci_remove, + .driver.pm = &rtsx_pci_pm_ops, + .shutdown = rtsx_pci_shutdown, +}; +module_pci_driver(rtsx_pci_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Wei WANG <wei_wang@realsil.com.cn>"); +MODULE_DESCRIPTION("Realtek PCI-E Card Reader Driver"); diff --git a/drivers/misc/cardreader/rtsx_pcr.h b/drivers/misc/cardreader/rtsx_pcr.h new file mode 100644 index 000000000..fe5f4ca0f --- /dev/null +++ b/drivers/misc/cardreader/rtsx_pcr.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Driver for Realtek PCI-Express card reader + * + * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved. + * + * Author: + * Wei WANG <wei_wang@realsil.com.cn> + */ + +#ifndef __RTSX_PCR_H +#define __RTSX_PCR_H + +#include <linux/rtsx_pci.h> + +#define MIN_DIV_N_PCR 80 +#define MAX_DIV_N_PCR 208 + +#define RTS522A_PM_CTRL3 0xFF7E + +#define RTS524A_PME_FORCE_CTL 0xFF78 +#define REG_EFUSE_BYPASS 0x08 +#define REG_EFUSE_POR 0x04 +#define REG_EFUSE_POWER_MASK 0x03 +#define REG_EFUSE_POWERON 0x03 +#define REG_EFUSE_POWEROFF 0x00 +#define RTS5250_CLK_CFG3 0xFF79 +#define RTS525A_CFG_MEM_PD 0xF0 +#define RTS524A_PM_CTRL3 0xFF7E +#define RTS525A_BIOS_CFG 0xFF2D +#define RTS525A_LOAD_BIOS_FLAG 0x01 +#define RTS525A_CLEAR_BIOS_FLAG 0x00 + +#define RTS525A_EFUSE_CTL 0xFC32 +#define REG_EFUSE_ENABLE 0x80 +#define REG_EFUSE_MODE 0x40 +#define RTS525A_EFUSE_ADD 0xFC33 +#define REG_EFUSE_ADD_MASK 0x3F +#define RTS525A_EFUSE_DATA 0xFC35 + +#define LTR_ACTIVE_LATENCY_DEF 0x883C +#define LTR_IDLE_LATENCY_DEF 0x892C +#define LTR_L1OFF_LATENCY_DEF 0x9003 +#define L1_SNOOZE_DELAY_DEF 1 +#define LTR_L1OFF_SSPWRGATE_5249_DEF 0xAF +#define LTR_L1OFF_SSPWRGATE_5250_DEF 0xFF +#define LTR_L1OFF_SNOOZE_SSPWRGATE_5249_DEF 0xAC +#define LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF 0xF8 +#define CMD_TIMEOUT_DEF 100 +#define MASK_8_BIT_DEF 0xFF + +#define SSC_CLOCK_STABLE_WAIT 130 + +#define RTS524A_OCP_THD_800 0x04 +#define RTS525A_OCP_THD_800 0x05 +#define RTS522A_OCP_THD_800 0x06 + + +int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val); +int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val); + +void rts5209_init_params(struct rtsx_pcr *pcr); +void rts5229_init_params(struct rtsx_pcr *pcr); +void rtl8411_init_params(struct rtsx_pcr *pcr); +void rtl8402_init_params(struct rtsx_pcr *pcr); +void rts5227_init_params(struct rtsx_pcr *pcr); +void rts522a_init_params(struct rtsx_pcr *pcr); +void rts5249_init_params(struct rtsx_pcr *pcr); +void rts524a_init_params(struct rtsx_pcr *pcr); +void rts525a_init_params(struct rtsx_pcr *pcr); +void rtl8411b_init_params(struct rtsx_pcr *pcr); +void rts5260_init_params(struct rtsx_pcr *pcr); +void rts5261_init_params(struct rtsx_pcr *pcr); +void rts5228_init_params(struct rtsx_pcr *pcr); + +static inline u8 map_sd_drive(int idx) +{ + u8 sd_drive[4] = { + 0x01, /* Type D */ + 0x02, /* Type C */ + 0x05, /* Type A */ + 0x03 /* Type B */ + }; + + return sd_drive[idx]; +} + +#define rtsx_vendor_setting_valid(reg) (!((reg) & 0x1000000)) +#define rts5209_vendor_setting1_valid(reg) (!((reg) & 0x80)) +#define rts5209_vendor_setting2_valid(reg) ((reg) & 0x80) + +#define rtsx_check_mmc_support(reg) ((reg) & 0x10) +#define rtsx_reg_to_rtd3(reg) ((reg) & 0x02) +#define rtsx_reg_to_aspm(reg) (((reg) >> 28) & 0x03) +#define rtsx_reg_to_sd30_drive_sel_1v8(reg) (((reg) >> 26) & 0x03) +#define rtsx_reg_to_sd30_drive_sel_3v3(reg) (((reg) >> 5) & 0x03) +#define rtsx_reg_to_card_drive_sel(reg) ((((reg) >> 25) & 0x01) << 6) +#define rtsx_reg_check_reverse_socket(reg) ((reg) & 0x4000) +#define rts5209_reg_to_aspm(reg) (((reg) >> 5) & 0x03) +#define rts5209_reg_check_ms_pmos(reg) (!((reg) & 0x08)) +#define rts5209_reg_to_sd30_drive_sel_1v8(reg) (((reg) >> 3) & 0x07) +#define rts5209_reg_to_sd30_drive_sel_3v3(reg) ((reg) & 0x07) +#define rts5209_reg_to_card_drive_sel(reg) ((reg) >> 8) +#define rtl8411_reg_to_sd30_drive_sel_3v3(reg) (((reg) >> 5) & 0x07) +#define rtl8411b_reg_to_sd30_drive_sel_3v3(reg) ((reg) & 0x03) + +#define set_pull_ctrl_tables(pcr, __device) \ +do { \ + pcr->sd_pull_ctl_enable_tbl = __device##_sd_pull_ctl_enable_tbl; \ + pcr->sd_pull_ctl_disable_tbl = __device##_sd_pull_ctl_disable_tbl; \ + pcr->ms_pull_ctl_enable_tbl = __device##_ms_pull_ctl_enable_tbl; \ + pcr->ms_pull_ctl_disable_tbl = __device##_ms_pull_ctl_disable_tbl; \ +} while (0) + +/* generic operations */ +int rtsx_gops_pm_reset(struct rtsx_pcr *pcr); +int rtsx_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency); +int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val); +void rtsx_pci_init_ocp(struct rtsx_pcr *pcr); +void rtsx_pci_disable_ocp(struct rtsx_pcr *pcr); +void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr); +int rtsx_pci_get_ocpstat(struct rtsx_pcr *pcr, u8 *val); +void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr); +void rtsx_pci_enable_oobs_polling(struct rtsx_pcr *pcr); +void rtsx_pci_disable_oobs_polling(struct rtsx_pcr *pcr); +int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr); +int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr); + +#endif diff --git a/drivers/misc/cardreader/rtsx_usb.c b/drivers/misc/cardreader/rtsx_usb.c new file mode 100644 index 000000000..f150d8769 --- /dev/null +++ b/drivers/misc/cardreader/rtsx_usb.c @@ -0,0 +1,800 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Driver for Realtek USB card reader + * + * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved. + * + * Author: + * Roger Tseng <rogerable@realtek.com> + */ +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/mutex.h> +#include <linux/usb.h> +#include <linux/platform_device.h> +#include <linux/mfd/core.h> +#include <linux/rtsx_usb.h> + +static int polling_pipe = 1; +module_param(polling_pipe, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(polling_pipe, "polling pipe (0: ctl, 1: bulk)"); + +static const struct mfd_cell rtsx_usb_cells[] = { + [RTSX_USB_SD_CARD] = { + .name = "rtsx_usb_sdmmc", + .pdata_size = 0, + }, + [RTSX_USB_MS_CARD] = { + .name = "rtsx_usb_ms", + .pdata_size = 0, + }, +}; + +static void rtsx_usb_sg_timed_out(struct timer_list *t) +{ + struct rtsx_ucr *ucr = from_timer(ucr, t, sg_timer); + + dev_dbg(&ucr->pusb_intf->dev, "%s: sg transfer timed out", __func__); + usb_sg_cancel(&ucr->current_sg); +} + +static int rtsx_usb_bulk_transfer_sglist(struct rtsx_ucr *ucr, + unsigned int pipe, struct scatterlist *sg, int num_sg, + unsigned int length, unsigned int *act_len, int timeout) +{ + int ret; + + dev_dbg(&ucr->pusb_intf->dev, "%s: xfer %u bytes, %d entries\n", + __func__, length, num_sg); + ret = usb_sg_init(&ucr->current_sg, ucr->pusb_dev, pipe, 0, + sg, num_sg, length, GFP_NOIO); + if (ret) + return ret; + + ucr->sg_timer.expires = jiffies + msecs_to_jiffies(timeout); + add_timer(&ucr->sg_timer); + usb_sg_wait(&ucr->current_sg); + if (!del_timer_sync(&ucr->sg_timer)) + ret = -ETIMEDOUT; + else + ret = ucr->current_sg.status; + + if (act_len) + *act_len = ucr->current_sg.bytes; + + return ret; +} + +int rtsx_usb_transfer_data(struct rtsx_ucr *ucr, unsigned int pipe, + void *buf, unsigned int len, int num_sg, + unsigned int *act_len, int timeout) +{ + if (timeout < 600) + timeout = 600; + + if (num_sg) + return rtsx_usb_bulk_transfer_sglist(ucr, pipe, + (struct scatterlist *)buf, num_sg, len, act_len, + timeout); + else + return usb_bulk_msg(ucr->pusb_dev, pipe, buf, len, act_len, + timeout); +} +EXPORT_SYMBOL_GPL(rtsx_usb_transfer_data); + +static inline void rtsx_usb_seq_cmd_hdr(struct rtsx_ucr *ucr, + u16 addr, u16 len, u8 seq_type) +{ + rtsx_usb_cmd_hdr_tag(ucr); + + ucr->cmd_buf[PACKET_TYPE] = seq_type; + ucr->cmd_buf[5] = (u8)(len >> 8); + ucr->cmd_buf[6] = (u8)len; + ucr->cmd_buf[8] = (u8)(addr >> 8); + ucr->cmd_buf[9] = (u8)addr; + + if (seq_type == SEQ_WRITE) + ucr->cmd_buf[STAGE_FLAG] = 0; + else + ucr->cmd_buf[STAGE_FLAG] = STAGE_R; +} + +static int rtsx_usb_seq_write_register(struct rtsx_ucr *ucr, + u16 addr, u16 len, u8 *data) +{ + u16 cmd_len = ALIGN(SEQ_WRITE_DATA_OFFSET + len, 4); + + if (!data) + return -EINVAL; + + if (cmd_len > IOBUF_SIZE) + return -EINVAL; + + rtsx_usb_seq_cmd_hdr(ucr, addr, len, SEQ_WRITE); + memcpy(ucr->cmd_buf + SEQ_WRITE_DATA_OFFSET, data, len); + + return rtsx_usb_transfer_data(ucr, + usb_sndbulkpipe(ucr->pusb_dev, EP_BULK_OUT), + ucr->cmd_buf, cmd_len, 0, NULL, 100); +} + +static int rtsx_usb_seq_read_register(struct rtsx_ucr *ucr, + u16 addr, u16 len, u8 *data) +{ + int i, ret; + u16 rsp_len = round_down(len, 4); + u16 res_len = len - rsp_len; + + if (!data) + return -EINVAL; + + /* 4-byte aligned part */ + if (rsp_len) { + rtsx_usb_seq_cmd_hdr(ucr, addr, len, SEQ_READ); + ret = rtsx_usb_transfer_data(ucr, + usb_sndbulkpipe(ucr->pusb_dev, EP_BULK_OUT), + ucr->cmd_buf, 12, 0, NULL, 100); + if (ret) + return ret; + + ret = rtsx_usb_transfer_data(ucr, + usb_rcvbulkpipe(ucr->pusb_dev, EP_BULK_IN), + data, rsp_len, 0, NULL, 100); + if (ret) + return ret; + } + + /* unaligned part */ + for (i = 0; i < res_len; i++) { + ret = rtsx_usb_read_register(ucr, addr + rsp_len + i, + data + rsp_len + i); + if (ret) + return ret; + } + + return 0; +} + +int rtsx_usb_read_ppbuf(struct rtsx_ucr *ucr, u8 *buf, int buf_len) +{ + return rtsx_usb_seq_read_register(ucr, PPBUF_BASE2, (u16)buf_len, buf); +} +EXPORT_SYMBOL_GPL(rtsx_usb_read_ppbuf); + +int rtsx_usb_write_ppbuf(struct rtsx_ucr *ucr, u8 *buf, int buf_len) +{ + return rtsx_usb_seq_write_register(ucr, PPBUF_BASE2, (u16)buf_len, buf); +} +EXPORT_SYMBOL_GPL(rtsx_usb_write_ppbuf); + +int rtsx_usb_ep0_write_register(struct rtsx_ucr *ucr, u16 addr, + u8 mask, u8 data) +{ + u16 value, index; + + addr |= EP0_WRITE_REG_CMD << EP0_OP_SHIFT; + value = swab16(addr); + index = mask | data << 8; + + return usb_control_msg(ucr->pusb_dev, + usb_sndctrlpipe(ucr->pusb_dev, 0), RTSX_USB_REQ_REG_OP, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + value, index, NULL, 0, 100); +} +EXPORT_SYMBOL_GPL(rtsx_usb_ep0_write_register); + +int rtsx_usb_ep0_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data) +{ + u16 value; + u8 *buf; + int ret; + + if (!data) + return -EINVAL; + + buf = kzalloc(sizeof(u8), GFP_KERNEL); + if (!buf) + return -ENOMEM; + + addr |= EP0_READ_REG_CMD << EP0_OP_SHIFT; + value = swab16(addr); + + ret = usb_control_msg(ucr->pusb_dev, + usb_rcvctrlpipe(ucr->pusb_dev, 0), RTSX_USB_REQ_REG_OP, + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + value, 0, buf, 1, 100); + *data = *buf; + + kfree(buf); + return ret; +} +EXPORT_SYMBOL_GPL(rtsx_usb_ep0_read_register); + +void rtsx_usb_add_cmd(struct rtsx_ucr *ucr, u8 cmd_type, u16 reg_addr, + u8 mask, u8 data) +{ + int i; + + if (ucr->cmd_idx < (IOBUF_SIZE - CMD_OFFSET) / 4) { + i = CMD_OFFSET + ucr->cmd_idx * 4; + + ucr->cmd_buf[i++] = ((cmd_type & 0x03) << 6) | + (u8)((reg_addr >> 8) & 0x3F); + ucr->cmd_buf[i++] = (u8)reg_addr; + ucr->cmd_buf[i++] = mask; + ucr->cmd_buf[i++] = data; + + ucr->cmd_idx++; + } +} +EXPORT_SYMBOL_GPL(rtsx_usb_add_cmd); + +int rtsx_usb_send_cmd(struct rtsx_ucr *ucr, u8 flag, int timeout) +{ + int ret; + + ucr->cmd_buf[CNT_H] = (u8)(ucr->cmd_idx >> 8); + ucr->cmd_buf[CNT_L] = (u8)(ucr->cmd_idx); + ucr->cmd_buf[STAGE_FLAG] = flag; + + ret = rtsx_usb_transfer_data(ucr, + usb_sndbulkpipe(ucr->pusb_dev, EP_BULK_OUT), + ucr->cmd_buf, ucr->cmd_idx * 4 + CMD_OFFSET, + 0, NULL, timeout); + if (ret) { + rtsx_usb_clear_fsm_err(ucr); + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(rtsx_usb_send_cmd); + +int rtsx_usb_get_rsp(struct rtsx_ucr *ucr, int rsp_len, int timeout) +{ + if (rsp_len <= 0) + return -EINVAL; + + rsp_len = ALIGN(rsp_len, 4); + + return rtsx_usb_transfer_data(ucr, + usb_rcvbulkpipe(ucr->pusb_dev, EP_BULK_IN), + ucr->rsp_buf, rsp_len, 0, NULL, timeout); +} +EXPORT_SYMBOL_GPL(rtsx_usb_get_rsp); + +static int rtsx_usb_get_status_with_bulk(struct rtsx_ucr *ucr, u16 *status) +{ + int ret; + + rtsx_usb_init_cmd(ucr); + rtsx_usb_add_cmd(ucr, READ_REG_CMD, CARD_EXIST, 0x00, 0x00); + rtsx_usb_add_cmd(ucr, READ_REG_CMD, OCPSTAT, 0x00, 0x00); + ret = rtsx_usb_send_cmd(ucr, MODE_CR, 100); + if (ret) + return ret; + + ret = rtsx_usb_get_rsp(ucr, 2, 100); + if (ret) + return ret; + + *status = ((ucr->rsp_buf[0] >> 2) & 0x0f) | + ((ucr->rsp_buf[1] & 0x03) << 4); + + return 0; +} + +int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status) +{ + int ret; + u16 *buf; + + if (!status) + return -EINVAL; + + if (polling_pipe == 0) { + buf = kzalloc(sizeof(u16), GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ret = usb_control_msg(ucr->pusb_dev, + usb_rcvctrlpipe(ucr->pusb_dev, 0), + RTSX_USB_REQ_POLL, + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + 0, 0, buf, 2, 100); + *status = *buf; + + kfree(buf); + } else { + ret = rtsx_usb_get_status_with_bulk(ucr, status); + } + + /* usb_control_msg may return positive when success */ + if (ret < 0) + return ret; + + return 0; +} +EXPORT_SYMBOL_GPL(rtsx_usb_get_card_status); + +static int rtsx_usb_write_phy_register(struct rtsx_ucr *ucr, u8 addr, u8 val) +{ + dev_dbg(&ucr->pusb_intf->dev, "Write 0x%x to phy register 0x%x\n", + val, addr); + + rtsx_usb_init_cmd(ucr); + + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, HS_VSTAIN, 0xFF, val); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, HS_VCONTROL, 0xFF, addr & 0x0F); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, HS_VLOADM, 0xFF, 0x00); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, HS_VLOADM, 0xFF, 0x00); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, HS_VLOADM, 0xFF, 0x01); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, HS_VCONTROL, + 0xFF, (addr >> 4) & 0x0F); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, HS_VLOADM, 0xFF, 0x00); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, HS_VLOADM, 0xFF, 0x00); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, HS_VLOADM, 0xFF, 0x01); + + return rtsx_usb_send_cmd(ucr, MODE_C, 100); +} + +int rtsx_usb_write_register(struct rtsx_ucr *ucr, u16 addr, u8 mask, u8 data) +{ + rtsx_usb_init_cmd(ucr); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, addr, mask, data); + return rtsx_usb_send_cmd(ucr, MODE_C, 100); +} +EXPORT_SYMBOL_GPL(rtsx_usb_write_register); + +int rtsx_usb_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data) +{ + int ret; + + if (data != NULL) + *data = 0; + + rtsx_usb_init_cmd(ucr); + rtsx_usb_add_cmd(ucr, READ_REG_CMD, addr, 0, 0); + ret = rtsx_usb_send_cmd(ucr, MODE_CR, 100); + if (ret) + return ret; + + ret = rtsx_usb_get_rsp(ucr, 1, 100); + if (ret) + return ret; + + if (data != NULL) + *data = ucr->rsp_buf[0]; + + return 0; +} +EXPORT_SYMBOL_GPL(rtsx_usb_read_register); + +static inline u8 double_ssc_depth(u8 depth) +{ + return (depth > 1) ? (depth - 1) : depth; +} + +static u8 revise_ssc_depth(u8 ssc_depth, u8 div) +{ + if (div > CLK_DIV_1) { + if (ssc_depth > div - 1) + ssc_depth -= (div - 1); + else + ssc_depth = SSC_DEPTH_2M; + } + + return ssc_depth; +} + +int rtsx_usb_switch_clock(struct rtsx_ucr *ucr, unsigned int card_clock, + u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk) +{ + int ret; + u8 n, clk_divider, mcu_cnt, div; + + if (!card_clock) { + ucr->cur_clk = 0; + return 0; + } + + if (initial_mode) { + /* We use 250k(around) here, in initial stage */ + clk_divider = SD_CLK_DIVIDE_128; + card_clock = 30000000; + } else { + clk_divider = SD_CLK_DIVIDE_0; + } + + ret = rtsx_usb_write_register(ucr, SD_CFG1, + SD_CLK_DIVIDE_MASK, clk_divider); + if (ret < 0) + return ret; + + card_clock /= 1000000; + dev_dbg(&ucr->pusb_intf->dev, + "Switch card clock to %dMHz\n", card_clock); + + if (!initial_mode && double_clk) + card_clock *= 2; + dev_dbg(&ucr->pusb_intf->dev, + "Internal SSC clock: %dMHz (cur_clk = %d)\n", + card_clock, ucr->cur_clk); + + if (card_clock == ucr->cur_clk) + return 0; + + /* Converting clock value into internal settings: n and div */ + n = card_clock - 2; + if ((card_clock <= 2) || (n > MAX_DIV_N)) + return -EINVAL; + + mcu_cnt = 60/card_clock + 3; + if (mcu_cnt > 15) + mcu_cnt = 15; + + /* Make sure that the SSC clock div_n is not less than MIN_DIV_N */ + + div = CLK_DIV_1; + while (n < MIN_DIV_N && div < CLK_DIV_4) { + n = (n + 2) * 2 - 2; + div++; + } + dev_dbg(&ucr->pusb_intf->dev, "n = %d, div = %d\n", n, div); + + if (double_clk) + ssc_depth = double_ssc_depth(ssc_depth); + + ssc_depth = revise_ssc_depth(ssc_depth, div); + dev_dbg(&ucr->pusb_intf->dev, "ssc_depth = %d\n", ssc_depth); + + rtsx_usb_init_cmd(ucr); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CLK_DIV, CLK_CHANGE, CLK_CHANGE); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CLK_DIV, + 0x3F, (div << 4) | mcu_cnt); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SSC_CTL2, + SSC_DEPTH_MASK, ssc_depth); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB); + if (vpclk) { + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_VPCLK0_CTL, + PHASE_NOT_RESET, 0); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_VPCLK0_CTL, + PHASE_NOT_RESET, PHASE_NOT_RESET); + } + + ret = rtsx_usb_send_cmd(ucr, MODE_C, 2000); + if (ret < 0) + return ret; + + ret = rtsx_usb_write_register(ucr, SSC_CTL1, 0xff, + SSC_RSTB | SSC_8X_EN | SSC_SEL_4M); + if (ret < 0) + return ret; + + /* Wait SSC clock stable */ + usleep_range(100, 1000); + + ret = rtsx_usb_write_register(ucr, CLK_DIV, CLK_CHANGE, 0); + if (ret < 0) + return ret; + + ucr->cur_clk = card_clock; + + return 0; +} +EXPORT_SYMBOL_GPL(rtsx_usb_switch_clock); + +int rtsx_usb_card_exclusive_check(struct rtsx_ucr *ucr, int card) +{ + int ret; + u16 val; + u16 cd_mask[] = { + [RTSX_USB_SD_CARD] = (CD_MASK & ~SD_CD), + [RTSX_USB_MS_CARD] = (CD_MASK & ~MS_CD) + }; + + ret = rtsx_usb_get_card_status(ucr, &val); + /* + * If get status fails, return 0 (ok) for the exclusive check + * and let the flow fail at somewhere else. + */ + if (ret) + return 0; + + if (val & cd_mask[card]) + return -EIO; + + return 0; +} +EXPORT_SYMBOL_GPL(rtsx_usb_card_exclusive_check); + +static int rtsx_usb_reset_chip(struct rtsx_ucr *ucr) +{ + int ret; + u8 val; + + rtsx_usb_init_cmd(ucr); + + if (CHECK_PKG(ucr, LQFP48)) { + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PWR_CTL, + LDO3318_PWR_MASK, LDO_SUSPEND); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PWR_CTL, + FORCE_LDO_POWERB, FORCE_LDO_POWERB); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL1, + 0x30, 0x10); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL5, + 0x03, 0x01); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL6, + 0x0C, 0x04); + } + + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SYS_DUMMY0, NYET_MSAK, NYET_EN); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CD_DEGLITCH_WIDTH, 0xFF, 0x08); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, + CD_DEGLITCH_EN, XD_CD_DEGLITCH_EN, 0x0); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD30_DRIVE_SEL, + SD30_DRIVE_MASK, DRIVER_TYPE_D); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, + CARD_DRIVE_SEL, SD20_DRIVE_MASK, 0x0); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, LDO_POWER_CFG, 0xE0, 0x0); + + if (ucr->is_rts5179) + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, + CARD_PULL_CTL5, 0x03, 0x01); + + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_DMA1_CTL, + EXTEND_DMA1_ASYNC_SIGNAL, EXTEND_DMA1_ASYNC_SIGNAL); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_INT_PEND, + XD_INT | MS_INT | SD_INT, + XD_INT | MS_INT | SD_INT); + + ret = rtsx_usb_send_cmd(ucr, MODE_C, 100); + if (ret) + return ret; + + /* config non-crystal mode */ + rtsx_usb_read_register(ucr, CFG_MODE, &val); + if ((val & XTAL_FREE) || ((val & CLK_MODE_MASK) == CLK_MODE_NON_XTAL)) { + ret = rtsx_usb_write_phy_register(ucr, 0xC2, 0x7C); + if (ret) + return ret; + } + + return 0; +} + +static int rtsx_usb_init_chip(struct rtsx_ucr *ucr) +{ + int ret; + u8 val; + + rtsx_usb_clear_fsm_err(ucr); + + /* power on SSC */ + ret = rtsx_usb_write_register(ucr, + FPDCTL, SSC_POWER_MASK, SSC_POWER_ON); + if (ret) + return ret; + + usleep_range(100, 1000); + ret = rtsx_usb_write_register(ucr, CLK_DIV, CLK_CHANGE, 0x00); + if (ret) + return ret; + + /* determine IC version */ + ret = rtsx_usb_read_register(ucr, HW_VERSION, &val); + if (ret) + return ret; + + ucr->ic_version = val & HW_VER_MASK; + + /* determine package */ + ret = rtsx_usb_read_register(ucr, CARD_SHARE_MODE, &val); + if (ret) + return ret; + + if (val & CARD_SHARE_LQFP_SEL) { + ucr->package = LQFP48; + dev_dbg(&ucr->pusb_intf->dev, "Package: LQFP48\n"); + } else { + ucr->package = QFN24; + dev_dbg(&ucr->pusb_intf->dev, "Package: QFN24\n"); + } + + /* determine IC variations */ + rtsx_usb_read_register(ucr, CFG_MODE_1, &val); + if (val & RTS5179) { + ucr->is_rts5179 = true; + dev_dbg(&ucr->pusb_intf->dev, "Device is rts5179\n"); + } else { + ucr->is_rts5179 = false; + } + + return rtsx_usb_reset_chip(ucr); +} + +static int rtsx_usb_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + struct usb_device *usb_dev = interface_to_usbdev(intf); + struct rtsx_ucr *ucr; + int ret; + + dev_dbg(&intf->dev, + ": Realtek USB Card Reader found at bus %03d address %03d\n", + usb_dev->bus->busnum, usb_dev->devnum); + + ucr = devm_kzalloc(&intf->dev, sizeof(*ucr), GFP_KERNEL); + if (!ucr) + return -ENOMEM; + + ucr->pusb_dev = usb_dev; + + ucr->cmd_buf = kmalloc(IOBUF_SIZE, GFP_KERNEL); + if (!ucr->cmd_buf) + return -ENOMEM; + + ucr->rsp_buf = kmalloc(IOBUF_SIZE, GFP_KERNEL); + if (!ucr->rsp_buf) { + ret = -ENOMEM; + goto out_free_cmd_buf; + } + + usb_set_intfdata(intf, ucr); + + ucr->vendor_id = id->idVendor; + ucr->product_id = id->idProduct; + + mutex_init(&ucr->dev_mutex); + + ucr->pusb_intf = intf; + + /* initialize */ + ret = rtsx_usb_init_chip(ucr); + if (ret) + goto out_init_fail; + + /* initialize USB SG transfer timer */ + timer_setup(&ucr->sg_timer, rtsx_usb_sg_timed_out, 0); + + ret = mfd_add_hotplug_devices(&intf->dev, rtsx_usb_cells, + ARRAY_SIZE(rtsx_usb_cells)); + if (ret) + goto out_init_fail; + +#ifdef CONFIG_PM + intf->needs_remote_wakeup = 1; + usb_enable_autosuspend(usb_dev); +#endif + + return 0; + +out_init_fail: + usb_set_intfdata(ucr->pusb_intf, NULL); + kfree(ucr->rsp_buf); + ucr->rsp_buf = NULL; +out_free_cmd_buf: + kfree(ucr->cmd_buf); + ucr->cmd_buf = NULL; + return ret; +} + +static void rtsx_usb_disconnect(struct usb_interface *intf) +{ + struct rtsx_ucr *ucr = (struct rtsx_ucr *)usb_get_intfdata(intf); + + dev_dbg(&intf->dev, "%s called\n", __func__); + + mfd_remove_devices(&intf->dev); + + usb_set_intfdata(ucr->pusb_intf, NULL); + + kfree(ucr->cmd_buf); + ucr->cmd_buf = NULL; + + kfree(ucr->rsp_buf); + ucr->rsp_buf = NULL; +} + +#ifdef CONFIG_PM +static int rtsx_usb_suspend(struct usb_interface *intf, pm_message_t message) +{ + struct rtsx_ucr *ucr = + (struct rtsx_ucr *)usb_get_intfdata(intf); + u16 val = 0; + + dev_dbg(&intf->dev, "%s called with pm message 0x%04x\n", + __func__, message.event); + + if (PMSG_IS_AUTO(message)) { + if (mutex_trylock(&ucr->dev_mutex)) { + rtsx_usb_get_card_status(ucr, &val); + mutex_unlock(&ucr->dev_mutex); + + /* Defer the autosuspend if card exists */ + if (val & (SD_CD | MS_CD)) + return -EAGAIN; + } else { + /* There is an ongoing operation*/ + return -EAGAIN; + } + } + + return 0; +} + +static int rtsx_usb_resume_child(struct device *dev, void *data) +{ + pm_request_resume(dev); + return 0; +} + +static int rtsx_usb_resume(struct usb_interface *intf) +{ + device_for_each_child(&intf->dev, NULL, rtsx_usb_resume_child); + return 0; +} + +static int rtsx_usb_reset_resume(struct usb_interface *intf) +{ + struct rtsx_ucr *ucr = + (struct rtsx_ucr *)usb_get_intfdata(intf); + + rtsx_usb_reset_chip(ucr); + device_for_each_child(&intf->dev, NULL, rtsx_usb_resume_child); + return 0; +} + +#else /* CONFIG_PM */ + +#define rtsx_usb_suspend NULL +#define rtsx_usb_resume NULL +#define rtsx_usb_reset_resume NULL + +#endif /* CONFIG_PM */ + + +static int rtsx_usb_pre_reset(struct usb_interface *intf) +{ + struct rtsx_ucr *ucr = (struct rtsx_ucr *)usb_get_intfdata(intf); + + mutex_lock(&ucr->dev_mutex); + return 0; +} + +static int rtsx_usb_post_reset(struct usb_interface *intf) +{ + struct rtsx_ucr *ucr = (struct rtsx_ucr *)usb_get_intfdata(intf); + + mutex_unlock(&ucr->dev_mutex); + return 0; +} + +static const struct usb_device_id rtsx_usb_usb_ids[] = { + { USB_DEVICE(0x0BDA, 0x0129) }, + { USB_DEVICE(0x0BDA, 0x0139) }, + { USB_DEVICE(0x0BDA, 0x0140) }, + { } +}; +MODULE_DEVICE_TABLE(usb, rtsx_usb_usb_ids); + +static struct usb_driver rtsx_usb_driver = { + .name = "rtsx_usb", + .probe = rtsx_usb_probe, + .disconnect = rtsx_usb_disconnect, + .suspend = rtsx_usb_suspend, + .resume = rtsx_usb_resume, + .reset_resume = rtsx_usb_reset_resume, + .pre_reset = rtsx_usb_pre_reset, + .post_reset = rtsx_usb_post_reset, + .id_table = rtsx_usb_usb_ids, + .supports_autosuspend = 1, + .soft_unbind = 1, +}; + +module_usb_driver(rtsx_usb_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Roger Tseng <rogerable@realtek.com>"); +MODULE_DESCRIPTION("Realtek USB Card Reader Driver"); diff --git a/drivers/misc/cb710/Kconfig b/drivers/misc/cb710/Kconfig new file mode 100644 index 000000000..a696d7509 --- /dev/null +++ b/drivers/misc/cb710/Kconfig @@ -0,0 +1,25 @@ +# SPDX-License-Identifier: GPL-2.0-only +config CB710_CORE + tristate "ENE CB710/720 Flash memory card reader support" + depends on PCI + help + This option enables support for PCI ENE CB710/720 Flash memory card + reader found in some laptops (ie. some versions of HP Compaq nx9500). + + You will also have to select some flash card format drivers (MMC/SD, + MemoryStick). + + This driver can also be built as a module. If so, the module + will be called cb710. + +config CB710_DEBUG + bool "Enable driver debugging" + depends on CB710_CORE != n + help + This is an option for use by developers; most people should + say N here. This adds a lot of debugging output to dmesg. + +config CB710_DEBUG_ASSUMPTIONS + bool + depends on CB710_CORE != n + default y diff --git a/drivers/misc/cb710/Makefile b/drivers/misc/cb710/Makefile new file mode 100644 index 000000000..8a38c66eb --- /dev/null +++ b/drivers/misc/cb710/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0-only +ccflags-$(CONFIG_CB710_DEBUG) := -DDEBUG + +obj-$(CONFIG_CB710_CORE) += cb710.o + +cb710-y := core.o sgbuf2.o +cb710-$(CONFIG_CB710_DEBUG) += debug.o diff --git a/drivers/misc/cb710/core.c b/drivers/misc/cb710/core.c new file mode 100644 index 000000000..55b7ee0e8 --- /dev/null +++ b/drivers/misc/cb710/core.c @@ -0,0 +1,329 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * cb710/core.c + * + * Copyright by MichaÅ‚ MirosÅ‚aw, 2008-2009 + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/spinlock.h> +#include <linux/idr.h> +#include <linux/cb710.h> +#include <linux/gfp.h> + +static DEFINE_IDA(cb710_ida); + +void cb710_pci_update_config_reg(struct pci_dev *pdev, + int reg, uint32_t mask, uint32_t xor) +{ + u32 rval; + + pci_read_config_dword(pdev, reg, &rval); + rval = (rval & mask) ^ xor; + pci_write_config_dword(pdev, reg, rval); +} +EXPORT_SYMBOL_GPL(cb710_pci_update_config_reg); + +/* Some magic writes based on Windows driver init code */ +static int cb710_pci_configure(struct pci_dev *pdev) +{ + unsigned int devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0); + struct pci_dev *pdev0; + u32 val; + + cb710_pci_update_config_reg(pdev, 0x48, + ~0x000000FF, 0x0000003F); + + pci_read_config_dword(pdev, 0x48, &val); + if (val & 0x80000000) + return 0; + + pdev0 = pci_get_slot(pdev->bus, devfn); + if (!pdev0) + return -ENODEV; + + if (pdev0->vendor == PCI_VENDOR_ID_ENE + && pdev0->device == PCI_DEVICE_ID_ENE_720) { + cb710_pci_update_config_reg(pdev0, 0x8C, + ~0x00F00000, 0x00100000); + cb710_pci_update_config_reg(pdev0, 0xB0, + ~0x08000000, 0x08000000); + } + + cb710_pci_update_config_reg(pdev0, 0x8C, + ~0x00000F00, 0x00000200); + cb710_pci_update_config_reg(pdev0, 0x90, + ~0x00060000, 0x00040000); + + pci_dev_put(pdev0); + + return 0; +} + +static irqreturn_t cb710_irq_handler(int irq, void *data) +{ + struct cb710_chip *chip = data; + struct cb710_slot *slot = &chip->slot[0]; + irqreturn_t handled = IRQ_NONE; + unsigned nr; + + spin_lock(&chip->irq_lock); /* incl. smp_rmb() */ + + for (nr = chip->slots; nr; ++slot, --nr) { + cb710_irq_handler_t handler_func = slot->irq_handler; + if (handler_func && handler_func(slot)) + handled = IRQ_HANDLED; + } + + spin_unlock(&chip->irq_lock); + + return handled; +} + +static void cb710_release_slot(struct device *dev) +{ +#ifdef CONFIG_CB710_DEBUG_ASSUMPTIONS + struct cb710_slot *slot = cb710_pdev_to_slot(to_platform_device(dev)); + struct cb710_chip *chip = cb710_slot_to_chip(slot); + + /* slot struct can be freed now */ + atomic_dec(&chip->slot_refs_count); +#endif +} + +static int cb710_register_slot(struct cb710_chip *chip, + unsigned slot_mask, unsigned io_offset, const char *name) +{ + int nr = chip->slots; + struct cb710_slot *slot = &chip->slot[nr]; + int err; + + dev_dbg(cb710_chip_dev(chip), + "register: %s.%d; slot %d; mask %d; IO offset: 0x%02X\n", + name, chip->platform_id, nr, slot_mask, io_offset); + + /* slot->irq_handler == NULL here; this needs to be + * seen before platform_device_register() */ + ++chip->slots; + smp_wmb(); + + slot->iobase = chip->iobase + io_offset; + slot->pdev.name = name; + slot->pdev.id = chip->platform_id; + slot->pdev.dev.parent = &chip->pdev->dev; + slot->pdev.dev.release = cb710_release_slot; + + err = platform_device_register(&slot->pdev); + +#ifdef CONFIG_CB710_DEBUG_ASSUMPTIONS + atomic_inc(&chip->slot_refs_count); +#endif + + if (err) { + /* device_initialize() called from platform_device_register() + * wants this on error path */ + platform_device_put(&slot->pdev); + + /* slot->irq_handler == NULL here anyway, so no lock needed */ + --chip->slots; + return err; + } + + chip->slot_mask |= slot_mask; + + return 0; +} + +static void cb710_unregister_slot(struct cb710_chip *chip, + unsigned slot_mask) +{ + int nr = chip->slots - 1; + + if (!(chip->slot_mask & slot_mask)) + return; + + platform_device_unregister(&chip->slot[nr].pdev); + + /* complementary to spin_unlock() in cb710_set_irq_handler() */ + smp_rmb(); + BUG_ON(chip->slot[nr].irq_handler != NULL); + + /* slot->irq_handler == NULL here, so no lock needed */ + --chip->slots; + chip->slot_mask &= ~slot_mask; +} + +void cb710_set_irq_handler(struct cb710_slot *slot, + cb710_irq_handler_t handler) +{ + struct cb710_chip *chip = cb710_slot_to_chip(slot); + unsigned long flags; + + spin_lock_irqsave(&chip->irq_lock, flags); + slot->irq_handler = handler; + spin_unlock_irqrestore(&chip->irq_lock, flags); +} +EXPORT_SYMBOL_GPL(cb710_set_irq_handler); + +static int __maybe_unused cb710_suspend(struct device *dev_d) +{ + struct pci_dev *pdev = to_pci_dev(dev_d); + struct cb710_chip *chip = pci_get_drvdata(pdev); + + devm_free_irq(&pdev->dev, pdev->irq, chip); + return 0; +} + +static int __maybe_unused cb710_resume(struct device *dev_d) +{ + struct pci_dev *pdev = to_pci_dev(dev_d); + struct cb710_chip *chip = pci_get_drvdata(pdev); + + return devm_request_irq(&pdev->dev, pdev->irq, + cb710_irq_handler, IRQF_SHARED, KBUILD_MODNAME, chip); +} + +static int cb710_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct cb710_chip *chip; + u32 val; + int err; + int n = 0; + + err = cb710_pci_configure(pdev); + if (err) + return err; + + /* this is actually magic... */ + pci_read_config_dword(pdev, 0x48, &val); + if (!(val & 0x80000000)) { + pci_write_config_dword(pdev, 0x48, val|0x71000000); + pci_read_config_dword(pdev, 0x48, &val); + } + + dev_dbg(&pdev->dev, "PCI config[0x48] = 0x%08X\n", val); + if (!(val & 0x70000000)) + return -ENODEV; + val = (val >> 28) & 7; + if (val & CB710_SLOT_MMC) + ++n; + if (val & CB710_SLOT_MS) + ++n; + if (val & CB710_SLOT_SM) + ++n; + + chip = devm_kzalloc(&pdev->dev, struct_size(chip, slot, n), + GFP_KERNEL); + if (!chip) + return -ENOMEM; + + err = pcim_enable_device(pdev); + if (err) + return err; + + err = pcim_iomap_regions(pdev, 0x0001, KBUILD_MODNAME); + if (err) + return err; + + spin_lock_init(&chip->irq_lock); + chip->pdev = pdev; + chip->iobase = pcim_iomap_table(pdev)[0]; + + pci_set_drvdata(pdev, chip); + + err = devm_request_irq(&pdev->dev, pdev->irq, + cb710_irq_handler, IRQF_SHARED, KBUILD_MODNAME, chip); + if (err) + return err; + + err = ida_alloc(&cb710_ida, GFP_KERNEL); + if (err < 0) + return err; + chip->platform_id = err; + + dev_info(&pdev->dev, "id %d, IO 0x%p, IRQ %d\n", + chip->platform_id, chip->iobase, pdev->irq); + + if (val & CB710_SLOT_MMC) { /* MMC/SD slot */ + err = cb710_register_slot(chip, + CB710_SLOT_MMC, 0x00, "cb710-mmc"); + if (err) + return err; + } + + if (val & CB710_SLOT_MS) { /* MemoryStick slot */ + err = cb710_register_slot(chip, + CB710_SLOT_MS, 0x40, "cb710-ms"); + if (err) + goto unreg_mmc; + } + + if (val & CB710_SLOT_SM) { /* SmartMedia slot */ + err = cb710_register_slot(chip, + CB710_SLOT_SM, 0x60, "cb710-sm"); + if (err) + goto unreg_ms; + } + + return 0; +unreg_ms: + cb710_unregister_slot(chip, CB710_SLOT_MS); +unreg_mmc: + cb710_unregister_slot(chip, CB710_SLOT_MMC); + +#ifdef CONFIG_CB710_DEBUG_ASSUMPTIONS + BUG_ON(atomic_read(&chip->slot_refs_count) != 0); +#endif + return err; +} + +static void cb710_remove_one(struct pci_dev *pdev) +{ + struct cb710_chip *chip = pci_get_drvdata(pdev); + + cb710_unregister_slot(chip, CB710_SLOT_SM); + cb710_unregister_slot(chip, CB710_SLOT_MS); + cb710_unregister_slot(chip, CB710_SLOT_MMC); +#ifdef CONFIG_CB710_DEBUG_ASSUMPTIONS + BUG_ON(atomic_read(&chip->slot_refs_count) != 0); +#endif + + ida_free(&cb710_ida, chip->platform_id); +} + +static const struct pci_device_id cb710_pci_tbl[] = { + { PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_CB710_FLASH, + PCI_ANY_ID, PCI_ANY_ID, }, + { 0, } +}; + +static SIMPLE_DEV_PM_OPS(cb710_pm_ops, cb710_suspend, cb710_resume); + +static struct pci_driver cb710_driver = { + .name = KBUILD_MODNAME, + .id_table = cb710_pci_tbl, + .probe = cb710_probe, + .remove = cb710_remove_one, + .driver.pm = &cb710_pm_ops, +}; + +static int __init cb710_init_module(void) +{ + return pci_register_driver(&cb710_driver); +} + +static void __exit cb710_cleanup_module(void) +{ + pci_unregister_driver(&cb710_driver); + ida_destroy(&cb710_ida); +} + +module_init(cb710_init_module); +module_exit(cb710_cleanup_module); + +MODULE_AUTHOR("MichaÅ‚ MirosÅ‚aw <mirq-linux@rere.qmqm.pl>"); +MODULE_DESCRIPTION("ENE CB710 memory card reader driver"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, cb710_pci_tbl); diff --git a/drivers/misc/cb710/debug.c b/drivers/misc/cb710/debug.c new file mode 100644 index 000000000..20d672edf --- /dev/null +++ b/drivers/misc/cb710/debug.c @@ -0,0 +1,115 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * cb710/debug.c + * + * Copyright by MichaÅ‚ MirosÅ‚aw, 2008-2009 + */ +#include <linux/cb710.h> +#include <linux/kernel.h> +#include <linux/module.h> + +#define CB710_REG_COUNT 0x80 + +static const u16 allow[CB710_REG_COUNT/16] = { + 0xFFF0, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFF0, 0xFFFF, 0xFFFF, 0xFFFF, +}; +static const char *const prefix[ARRAY_SIZE(allow)] = { + "MMC", "MMC", "MMC", "MMC", + "MS?", "MS?", "SM?", "SM?" +}; + +static inline int allow_reg_read(unsigned block, unsigned offset, unsigned bits) +{ + unsigned mask = (1 << bits/8) - 1; + offset *= bits/8; + return ((allow[block] >> offset) & mask) == mask; +} + +#define CB710_READ_REGS_TEMPLATE(t) \ +static void cb710_read_regs_##t(void __iomem *iobase, \ + u##t *reg, unsigned select) \ +{ \ + unsigned i, j; \ + \ + for (i = 0; i < ARRAY_SIZE(allow); ++i, reg += 16/(t/8)) { \ + if (!(select & (1 << i))) \ + continue; \ + \ + for (j = 0; j < 0x10/(t/8); ++j) { \ + if (!allow_reg_read(i, j, t)) \ + continue; \ + reg[j] = ioread##t(iobase \ + + (i << 4) + (j * (t/8))); \ + } \ + } \ +} + +static const char cb710_regf_8[] = "%02X"; +static const char cb710_regf_16[] = "%04X"; +static const char cb710_regf_32[] = "%08X"; +static const char cb710_xes[] = "xxxxxxxx"; + +#define CB710_DUMP_REGS_TEMPLATE(t) \ +static void cb710_dump_regs_##t(struct device *dev, \ + const u##t *reg, unsigned select) \ +{ \ + const char *const xp = &cb710_xes[8 - t/4]; \ + const char *const format = cb710_regf_##t; \ + \ + char msg[100], *p; \ + unsigned i, j; \ + \ + for (i = 0; i < ARRAY_SIZE(allow); ++i, reg += 16/(t/8)) { \ + if (!(select & (1 << i))) \ + continue; \ + p = msg; \ + for (j = 0; j < 0x10/(t/8); ++j) { \ + *p++ = ' '; \ + if (j == 8/(t/8)) \ + *p++ = ' '; \ + if (allow_reg_read(i, j, t)) \ + p += sprintf(p, format, reg[j]); \ + else \ + p += sprintf(p, "%s", xp); \ + } \ + dev_dbg(dev, "%s 0x%02X %s\n", prefix[i], i << 4, msg); \ + } \ +} + +#define CB710_READ_AND_DUMP_REGS_TEMPLATE(t) \ +static void cb710_read_and_dump_regs_##t(struct cb710_chip *chip, \ + unsigned select) \ +{ \ + u##t regs[CB710_REG_COUNT/sizeof(u##t)]; \ + \ + memset(®s, 0, sizeof(regs)); \ + cb710_read_regs_##t(chip->iobase, regs, select); \ + cb710_dump_regs_##t(cb710_chip_dev(chip), regs, select); \ +} + +#define CB710_REG_ACCESS_TEMPLATES(t) \ + CB710_READ_REGS_TEMPLATE(t) \ + CB710_DUMP_REGS_TEMPLATE(t) \ + CB710_READ_AND_DUMP_REGS_TEMPLATE(t) + +CB710_REG_ACCESS_TEMPLATES(8) +CB710_REG_ACCESS_TEMPLATES(16) +CB710_REG_ACCESS_TEMPLATES(32) + +void cb710_dump_regs(struct cb710_chip *chip, unsigned select) +{ + if (!(select & CB710_DUMP_REGS_MASK)) + select = CB710_DUMP_REGS_ALL; + if (!(select & CB710_DUMP_ACCESS_MASK)) + select |= CB710_DUMP_ACCESS_8; + + if (select & CB710_DUMP_ACCESS_32) + cb710_read_and_dump_regs_32(chip, select); + if (select & CB710_DUMP_ACCESS_16) + cb710_read_and_dump_regs_16(chip, select); + if (select & CB710_DUMP_ACCESS_8) + cb710_read_and_dump_regs_8(chip, select); +} +EXPORT_SYMBOL_GPL(cb710_dump_regs); + diff --git a/drivers/misc/cb710/sgbuf2.c b/drivers/misc/cb710/sgbuf2.c new file mode 100644 index 000000000..a798fad5f --- /dev/null +++ b/drivers/misc/cb710/sgbuf2.c @@ -0,0 +1,144 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * cb710/sgbuf2.c + * + * Copyright by MichaÅ‚ MirosÅ‚aw, 2008-2009 + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/cb710.h> + +static bool sg_dwiter_next(struct sg_mapping_iter *miter) +{ + if (sg_miter_next(miter)) { + miter->consumed = 0; + return true; + } else + return false; +} + +static bool sg_dwiter_is_at_end(struct sg_mapping_iter *miter) +{ + return miter->length == miter->consumed && !sg_dwiter_next(miter); +} + +static uint32_t sg_dwiter_read_buffer(struct sg_mapping_iter *miter) +{ + size_t len, left = 4; + uint32_t data; + void *addr = &data; + + do { + len = min(miter->length - miter->consumed, left); + memcpy(addr, miter->addr + miter->consumed, len); + miter->consumed += len; + left -= len; + if (!left) + return data; + addr += len; + } while (sg_dwiter_next(miter)); + + memset(addr, 0, left); + return data; +} + +static inline bool needs_unaligned_copy(const void *ptr) +{ +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + return false; +#else + return ((uintptr_t)ptr & 3) != 0; +#endif +} + +static bool sg_dwiter_get_next_block(struct sg_mapping_iter *miter, uint32_t **ptr) +{ + size_t len; + + if (sg_dwiter_is_at_end(miter)) + return true; + + len = miter->length - miter->consumed; + + if (likely(len >= 4 && !needs_unaligned_copy( + miter->addr + miter->consumed))) { + *ptr = miter->addr + miter->consumed; + miter->consumed += 4; + return true; + } + + return false; +} + +/** + * cb710_sg_dwiter_read_next_block() - get next 32-bit word from sg buffer + * @miter: sg mapping iterator used for reading + * + * Description: + * Returns 32-bit word starting at byte pointed to by @miter@ + * handling any alignment issues. Bytes past the buffer's end + * are not accessed (read) but are returned as zeroes. @miter@ + * is advanced by 4 bytes or to the end of buffer whichever is + * closer. + * + * Context: + * Same requirements as in sg_miter_next(). + * + * Returns: + * 32-bit word just read. + */ +uint32_t cb710_sg_dwiter_read_next_block(struct sg_mapping_iter *miter) +{ + uint32_t *ptr = NULL; + + if (likely(sg_dwiter_get_next_block(miter, &ptr))) + return ptr ? *ptr : 0; + + return sg_dwiter_read_buffer(miter); +} +EXPORT_SYMBOL_GPL(cb710_sg_dwiter_read_next_block); + +static void sg_dwiter_write_slow(struct sg_mapping_iter *miter, uint32_t data) +{ + size_t len, left = 4; + void *addr = &data; + + do { + len = min(miter->length - miter->consumed, left); + memcpy(miter->addr, addr, len); + miter->consumed += len; + left -= len; + if (!left) + return; + addr += len; + } while (sg_dwiter_next(miter)); +} + +/** + * cb710_sg_dwiter_write_next_block() - write next 32-bit word to sg buffer + * @miter: sg mapping iterator used for writing + * @data: data to write to sg buffer + * + * Description: + * Writes 32-bit word starting at byte pointed to by @miter@ + * handling any alignment issues. Bytes which would be written + * past the buffer's end are silently discarded. @miter@ is + * advanced by 4 bytes or to the end of buffer whichever is closer. + * + * Context: + * Same requirements as in sg_miter_next(). + */ +void cb710_sg_dwiter_write_next_block(struct sg_mapping_iter *miter, uint32_t data) +{ + uint32_t *ptr = NULL; + + if (likely(sg_dwiter_get_next_block(miter, &ptr))) { + if (ptr) + *ptr = data; + else + return; + } else + sg_dwiter_write_slow(miter, data); +} +EXPORT_SYMBOL_GPL(cb710_sg_dwiter_write_next_block); + diff --git a/drivers/misc/cs5535-mfgpt.c b/drivers/misc/cs5535-mfgpt.c new file mode 100644 index 000000000..18fc1aaa5 --- /dev/null +++ b/drivers/misc/cs5535-mfgpt.c @@ -0,0 +1,380 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Driver for the CS5535/CS5536 Multi-Function General Purpose Timers (MFGPT) + * + * Copyright (C) 2006, Advanced Micro Devices, Inc. + * Copyright (C) 2007 Andres Salomon <dilinger@debian.org> + * Copyright (C) 2009 Andres Salomon <dilinger@collabora.co.uk> + * + * The MFGPTs are documented in AMD Geode CS5536 Companion Device Data Book. + */ + +#include <linux/kernel.h> +#include <linux/spinlock.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/cs5535.h> +#include <linux/slab.h> + +#define DRV_NAME "cs5535-mfgpt" + +static int mfgpt_reset_timers; +module_param_named(mfgptfix, mfgpt_reset_timers, int, 0644); +MODULE_PARM_DESC(mfgptfix, "Try to reset the MFGPT timers during init; " + "required by some broken BIOSes (ie, TinyBIOS < 0.99) or kexec " + "(1 = reset the MFGPT using an undocumented bit, " + "2 = perform a soft reset by unconfiguring all timers); " + "use what works best for you."); + +struct cs5535_mfgpt_timer { + struct cs5535_mfgpt_chip *chip; + int nr; +}; + +static struct cs5535_mfgpt_chip { + DECLARE_BITMAP(avail, MFGPT_MAX_TIMERS); + resource_size_t base; + + struct platform_device *pdev; + spinlock_t lock; + int initialized; +} cs5535_mfgpt_chip; + +int cs5535_mfgpt_toggle_event(struct cs5535_mfgpt_timer *timer, int cmp, + int event, int enable) +{ + uint32_t msr, mask, value, dummy; + int shift = (cmp == MFGPT_CMP1) ? 0 : 8; + + if (!timer) { + WARN_ON(1); + return -EIO; + } + + /* + * The register maps for these are described in sections 6.17.1.x of + * the AMD Geode CS5536 Companion Device Data Book. + */ + switch (event) { + case MFGPT_EVENT_RESET: + /* + * XXX: According to the docs, we cannot reset timers above + * 6; that is, resets for 7 and 8 will be ignored. Is this + * a problem? -dilinger + */ + msr = MSR_MFGPT_NR; + mask = 1 << (timer->nr + 24); + break; + + case MFGPT_EVENT_NMI: + msr = MSR_MFGPT_NR; + mask = 1 << (timer->nr + shift); + break; + + case MFGPT_EVENT_IRQ: + msr = MSR_MFGPT_IRQ; + mask = 1 << (timer->nr + shift); + break; + + default: + return -EIO; + } + + rdmsr(msr, value, dummy); + + if (enable) + value |= mask; + else + value &= ~mask; + + wrmsr(msr, value, dummy); + return 0; +} +EXPORT_SYMBOL_GPL(cs5535_mfgpt_toggle_event); + +int cs5535_mfgpt_set_irq(struct cs5535_mfgpt_timer *timer, int cmp, int *irq, + int enable) +{ + uint32_t zsel, lpc, dummy; + int shift; + + if (!timer) { + WARN_ON(1); + return -EIO; + } + + /* + * Unfortunately, MFGPTs come in pairs sharing their IRQ lines. If VSA + * is using the same CMP of the timer's Siamese twin, the IRQ is set to + * 2, and we mustn't use nor change it. + * XXX: Likewise, 2 Linux drivers might clash if the 2nd overwrites the + * IRQ of the 1st. This can only happen if forcing an IRQ, calling this + * with *irq==0 is safe. Currently there _are_ no 2 drivers. + */ + rdmsr(MSR_PIC_ZSEL_LOW, zsel, dummy); + shift = ((cmp == MFGPT_CMP1 ? 0 : 4) + timer->nr % 4) * 4; + if (((zsel >> shift) & 0xF) == 2) + return -EIO; + + /* Choose IRQ: if none supplied, keep IRQ already set or use default */ + if (!*irq) + *irq = (zsel >> shift) & 0xF; + if (!*irq) + *irq = CONFIG_CS5535_MFGPT_DEFAULT_IRQ; + + /* Can't use IRQ if it's 0 (=disabled), 2, or routed to LPC */ + if (*irq < 1 || *irq == 2 || *irq > 15) + return -EIO; + rdmsr(MSR_PIC_IRQM_LPC, lpc, dummy); + if (lpc & (1 << *irq)) + return -EIO; + + /* All chosen and checked - go for it */ + if (cs5535_mfgpt_toggle_event(timer, cmp, MFGPT_EVENT_IRQ, enable)) + return -EIO; + if (enable) { + zsel = (zsel & ~(0xF << shift)) | (*irq << shift); + wrmsr(MSR_PIC_ZSEL_LOW, zsel, dummy); + } + + return 0; +} +EXPORT_SYMBOL_GPL(cs5535_mfgpt_set_irq); + +struct cs5535_mfgpt_timer *cs5535_mfgpt_alloc_timer(int timer_nr, int domain) +{ + struct cs5535_mfgpt_chip *mfgpt = &cs5535_mfgpt_chip; + struct cs5535_mfgpt_timer *timer = NULL; + unsigned long flags; + int max; + + if (!mfgpt->initialized) + goto done; + + /* only allocate timers from the working domain if requested */ + if (domain == MFGPT_DOMAIN_WORKING) + max = 6; + else + max = MFGPT_MAX_TIMERS; + + if (timer_nr >= max) { + /* programmer error. silly programmers! */ + WARN_ON(1); + goto done; + } + + spin_lock_irqsave(&mfgpt->lock, flags); + if (timer_nr < 0) { + unsigned long t; + + /* try to find any available timer */ + t = find_first_bit(mfgpt->avail, max); + /* set timer_nr to -1 if no timers available */ + timer_nr = t < max ? (int) t : -1; + } else { + /* check if the requested timer's available */ + if (!test_bit(timer_nr, mfgpt->avail)) + timer_nr = -1; + } + + if (timer_nr >= 0) + /* if timer_nr is not -1, it's an available timer */ + __clear_bit(timer_nr, mfgpt->avail); + spin_unlock_irqrestore(&mfgpt->lock, flags); + + if (timer_nr < 0) + goto done; + + timer = kmalloc(sizeof(*timer), GFP_KERNEL); + if (!timer) { + /* aw hell */ + spin_lock_irqsave(&mfgpt->lock, flags); + __set_bit(timer_nr, mfgpt->avail); + spin_unlock_irqrestore(&mfgpt->lock, flags); + goto done; + } + timer->chip = mfgpt; + timer->nr = timer_nr; + dev_info(&mfgpt->pdev->dev, "registered timer %d\n", timer_nr); + +done: + return timer; +} +EXPORT_SYMBOL_GPL(cs5535_mfgpt_alloc_timer); + +/* + * XXX: This frees the timer memory, but never resets the actual hardware + * timer. The old geode_mfgpt code did this; it would be good to figure + * out a way to actually release the hardware timer. See comments below. + */ +void cs5535_mfgpt_free_timer(struct cs5535_mfgpt_timer *timer) +{ + unsigned long flags; + uint16_t val; + + /* timer can be made available again only if never set up */ + val = cs5535_mfgpt_read(timer, MFGPT_REG_SETUP); + if (!(val & MFGPT_SETUP_SETUP)) { + spin_lock_irqsave(&timer->chip->lock, flags); + __set_bit(timer->nr, timer->chip->avail); + spin_unlock_irqrestore(&timer->chip->lock, flags); + } + + kfree(timer); +} +EXPORT_SYMBOL_GPL(cs5535_mfgpt_free_timer); + +uint16_t cs5535_mfgpt_read(struct cs5535_mfgpt_timer *timer, uint16_t reg) +{ + return inw(timer->chip->base + reg + (timer->nr * 8)); +} +EXPORT_SYMBOL_GPL(cs5535_mfgpt_read); + +void cs5535_mfgpt_write(struct cs5535_mfgpt_timer *timer, uint16_t reg, + uint16_t value) +{ + outw(value, timer->chip->base + reg + (timer->nr * 8)); +} +EXPORT_SYMBOL_GPL(cs5535_mfgpt_write); + +/* + * This is a sledgehammer that resets all MFGPT timers. This is required by + * some broken BIOSes which leave the system in an unstable state + * (TinyBIOS 0.98, for example; fixed in 0.99). It's uncertain as to + * whether or not this secret MSR can be used to release individual timers. + * Jordan tells me that he and Mitch once played w/ it, but it's unclear + * what the results of that were (and they experienced some instability). + */ +static void reset_all_timers(void) +{ + uint32_t val, dummy; + + /* The following undocumented bit resets the MFGPT timers */ + val = 0xFF; dummy = 0; + wrmsr(MSR_MFGPT_SETUP, val, dummy); +} + +/* + * This is another sledgehammer to reset all MFGPT timers. + * Instead of using the undocumented bit method it clears + * IRQ, NMI and RESET settings. + */ +static void soft_reset(void) +{ + int i; + struct cs5535_mfgpt_timer t; + + for (i = 0; i < MFGPT_MAX_TIMERS; i++) { + t.nr = i; + + cs5535_mfgpt_toggle_event(&t, MFGPT_CMP1, MFGPT_EVENT_RESET, 0); + cs5535_mfgpt_toggle_event(&t, MFGPT_CMP2, MFGPT_EVENT_RESET, 0); + cs5535_mfgpt_toggle_event(&t, MFGPT_CMP1, MFGPT_EVENT_NMI, 0); + cs5535_mfgpt_toggle_event(&t, MFGPT_CMP2, MFGPT_EVENT_NMI, 0); + cs5535_mfgpt_toggle_event(&t, MFGPT_CMP1, MFGPT_EVENT_IRQ, 0); + cs5535_mfgpt_toggle_event(&t, MFGPT_CMP2, MFGPT_EVENT_IRQ, 0); + } +} + +/* + * Check whether any MFGPTs are available for the kernel to use. In most + * cases, firmware that uses AMD's VSA code will claim all timers during + * bootup; we certainly don't want to take them if they're already in use. + * In other cases (such as with VSAless OpenFirmware), the system firmware + * leaves timers available for us to use. + */ +static int scan_timers(struct cs5535_mfgpt_chip *mfgpt) +{ + struct cs5535_mfgpt_timer timer = { .chip = mfgpt }; + unsigned long flags; + int timers = 0; + uint16_t val; + int i; + + /* bios workaround */ + if (mfgpt_reset_timers == 1) + reset_all_timers(); + else if (mfgpt_reset_timers == 2) + soft_reset(); + + /* just to be safe, protect this section w/ lock */ + spin_lock_irqsave(&mfgpt->lock, flags); + for (i = 0; i < MFGPT_MAX_TIMERS; i++) { + timer.nr = i; + val = cs5535_mfgpt_read(&timer, MFGPT_REG_SETUP); + if (!(val & MFGPT_SETUP_SETUP) || mfgpt_reset_timers == 2) { + __set_bit(i, mfgpt->avail); + timers++; + } + } + spin_unlock_irqrestore(&mfgpt->lock, flags); + + return timers; +} + +static int cs5535_mfgpt_probe(struct platform_device *pdev) +{ + struct resource *res; + int err = -EIO, t; + + if (mfgpt_reset_timers < 0 || mfgpt_reset_timers > 2) { + dev_err(&pdev->dev, "Bad mfgpt_reset_timers value: %i\n", + mfgpt_reset_timers); + goto done; + } + + /* There are two ways to get the MFGPT base address; one is by + * fetching it from MSR_LBAR_MFGPT, the other is by reading the + * PCI BAR info. The latter method is easier (especially across + * different architectures), so we'll stick with that for now. If + * it turns out to be unreliable in the face of crappy BIOSes, we + * can always go back to using MSRs.. */ + + res = platform_get_resource(pdev, IORESOURCE_IO, 0); + if (!res) { + dev_err(&pdev->dev, "can't fetch device resource info\n"); + goto done; + } + + if (!request_region(res->start, resource_size(res), pdev->name)) { + dev_err(&pdev->dev, "can't request region\n"); + goto done; + } + + /* set up the driver-specific struct */ + cs5535_mfgpt_chip.base = res->start; + cs5535_mfgpt_chip.pdev = pdev; + spin_lock_init(&cs5535_mfgpt_chip.lock); + + dev_info(&pdev->dev, "reserved resource region %pR\n", res); + + /* detect the available timers */ + t = scan_timers(&cs5535_mfgpt_chip); + dev_info(&pdev->dev, "%d MFGPT timers available\n", t); + cs5535_mfgpt_chip.initialized = 1; + return 0; + +done: + return err; +} + +static struct platform_driver cs5535_mfgpt_driver = { + .driver = { + .name = DRV_NAME, + }, + .probe = cs5535_mfgpt_probe, +}; + + +static int __init cs5535_mfgpt_init(void) +{ + return platform_driver_register(&cs5535_mfgpt_driver); +} + +module_init(cs5535_mfgpt_init); + +MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>"); +MODULE_DESCRIPTION("CS5535/CS5536 MFGPT timer driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/misc/cxl/Kconfig b/drivers/misc/cxl/Kconfig new file mode 100644 index 000000000..51aecafdc --- /dev/null +++ b/drivers/misc/cxl/Kconfig @@ -0,0 +1,25 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# IBM Coherent Accelerator (CXL) compatible devices +# + +config CXL_BASE + bool + select PPC_COPRO_BASE + +config CXL + tristate "Support for IBM Coherent Accelerators (CXL)" + depends on PPC_POWERNV && PCI_MSI && EEH + select CXL_BASE + default m + help + Select this option to enable driver support for IBM Coherent + Accelerators (CXL). CXL is otherwise known as Coherent Accelerator + Processor Interface (CAPI). CAPI allows accelerators in FPGAs to be + coherently attached to a CPU via an MMU. This driver enables + userspace programs to access these accelerators via /dev/cxl/afuM.N + devices. + + CAPI adapters are found in POWER8 based systems. + + If unsure, say N. diff --git a/drivers/misc/cxl/Makefile b/drivers/misc/cxl/Makefile new file mode 100644 index 000000000..5eea61b95 --- /dev/null +++ b/drivers/misc/cxl/Makefile @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0 +ccflags-y := $(call cc-disable-warning, unused-const-variable) +ccflags-$(CONFIG_PPC_WERROR) += -Werror + +cxl-y += main.o file.o irq.o fault.o native.o +cxl-y += context.o sysfs.o pci.o trace.o +cxl-y += vphb.o api.o cxllib.o +cxl-$(CONFIG_PPC_PSERIES) += flash.o guest.o of.o hcalls.o +cxl-$(CONFIG_DEBUG_FS) += debugfs.o +obj-$(CONFIG_CXL) += cxl.o +obj-$(CONFIG_CXL_BASE) += base.o + +# For tracepoints to include our trace.h from tracepoint infrastructure: +CFLAGS_trace.o := -I$(src) diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c new file mode 100644 index 000000000..b493de962 --- /dev/null +++ b/drivers/misc/cxl/api.c @@ -0,0 +1,531 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright 2014 IBM Corp. + */ + +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/file.h> +#include <misc/cxl.h> +#include <linux/module.h> +#include <linux/mount.h> +#include <linux/pseudo_fs.h> +#include <linux/sched/mm.h> +#include <linux/mmu_context.h> + +#include "cxl.h" + +/* + * Since we want to track memory mappings to be able to force-unmap + * when the AFU is no longer reachable, we need an inode. For devices + * opened through the cxl user API, this is not a problem, but a + * userland process can also get a cxl fd through the cxl_get_fd() + * API, which is used by the cxlflash driver. + * + * Therefore we implement our own simple pseudo-filesystem and inode + * allocator. We don't use the anonymous inode, as we need the + * meta-data associated with it (address_space) and it is shared by + * other drivers/processes, so it could lead to cxl unmapping VMAs + * from random processes. + */ + +#define CXL_PSEUDO_FS_MAGIC 0x1697697f + +static int cxl_fs_cnt; +static struct vfsmount *cxl_vfs_mount; + +static int cxl_fs_init_fs_context(struct fs_context *fc) +{ + return init_pseudo(fc, CXL_PSEUDO_FS_MAGIC) ? 0 : -ENOMEM; +} + +static struct file_system_type cxl_fs_type = { + .name = "cxl", + .owner = THIS_MODULE, + .init_fs_context = cxl_fs_init_fs_context, + .kill_sb = kill_anon_super, +}; + + +void cxl_release_mapping(struct cxl_context *ctx) +{ + if (ctx->kernelapi && ctx->mapping) + simple_release_fs(&cxl_vfs_mount, &cxl_fs_cnt); +} + +static struct file *cxl_getfile(const char *name, + const struct file_operations *fops, + void *priv, int flags) +{ + struct file *file; + struct inode *inode; + int rc; + + /* strongly inspired by anon_inode_getfile() */ + + if (fops->owner && !try_module_get(fops->owner)) + return ERR_PTR(-ENOENT); + + rc = simple_pin_fs(&cxl_fs_type, &cxl_vfs_mount, &cxl_fs_cnt); + if (rc < 0) { + pr_err("Cannot mount cxl pseudo filesystem: %d\n", rc); + file = ERR_PTR(rc); + goto err_module; + } + + inode = alloc_anon_inode(cxl_vfs_mount->mnt_sb); + if (IS_ERR(inode)) { + file = ERR_CAST(inode); + goto err_fs; + } + + file = alloc_file_pseudo(inode, cxl_vfs_mount, name, + flags & (O_ACCMODE | O_NONBLOCK), fops); + if (IS_ERR(file)) + goto err_inode; + + file->private_data = priv; + + return file; + +err_inode: + iput(inode); +err_fs: + simple_release_fs(&cxl_vfs_mount, &cxl_fs_cnt); +err_module: + module_put(fops->owner); + return file; +} + +struct cxl_context *cxl_dev_context_init(struct pci_dev *dev) +{ + struct cxl_afu *afu; + struct cxl_context *ctx; + int rc; + + afu = cxl_pci_to_afu(dev); + if (IS_ERR(afu)) + return ERR_CAST(afu); + + ctx = cxl_context_alloc(); + if (!ctx) + return ERR_PTR(-ENOMEM); + + ctx->kernelapi = true; + + /* Make it a slave context. We can promote it later? */ + rc = cxl_context_init(ctx, afu, false); + if (rc) + goto err_ctx; + + return ctx; + +err_ctx: + kfree(ctx); + return ERR_PTR(rc); +} +EXPORT_SYMBOL_GPL(cxl_dev_context_init); + +struct cxl_context *cxl_get_context(struct pci_dev *dev) +{ + return dev->dev.archdata.cxl_ctx; +} +EXPORT_SYMBOL_GPL(cxl_get_context); + +int cxl_release_context(struct cxl_context *ctx) +{ + if (ctx->status >= STARTED) + return -EBUSY; + + cxl_context_free(ctx); + + return 0; +} +EXPORT_SYMBOL_GPL(cxl_release_context); + +static irq_hw_number_t cxl_find_afu_irq(struct cxl_context *ctx, int num) +{ + __u16 range; + int r; + + for (r = 0; r < CXL_IRQ_RANGES; r++) { + range = ctx->irqs.range[r]; + if (num < range) { + return ctx->irqs.offset[r] + num; + } + num -= range; + } + return 0; +} + + +int cxl_set_priv(struct cxl_context *ctx, void *priv) +{ + if (!ctx) + return -EINVAL; + + ctx->priv = priv; + + return 0; +} +EXPORT_SYMBOL_GPL(cxl_set_priv); + +void *cxl_get_priv(struct cxl_context *ctx) +{ + if (!ctx) + return ERR_PTR(-EINVAL); + + return ctx->priv; +} +EXPORT_SYMBOL_GPL(cxl_get_priv); + +int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num) +{ + int res; + irq_hw_number_t hwirq; + + if (num == 0) + num = ctx->afu->pp_irqs; + res = afu_allocate_irqs(ctx, num); + if (res) + return res; + + if (!cpu_has_feature(CPU_FTR_HVMODE)) { + /* In a guest, the PSL interrupt is not multiplexed. It was + * allocated above, and we need to set its handler + */ + hwirq = cxl_find_afu_irq(ctx, 0); + if (hwirq) + cxl_map_irq(ctx->afu->adapter, hwirq, cxl_ops->psl_interrupt, ctx, "psl"); + } + + if (ctx->status == STARTED) { + if (cxl_ops->update_ivtes) + cxl_ops->update_ivtes(ctx); + else WARN(1, "BUG: cxl_allocate_afu_irqs must be called prior to starting the context on this platform\n"); + } + + return res; +} +EXPORT_SYMBOL_GPL(cxl_allocate_afu_irqs); + +void cxl_free_afu_irqs(struct cxl_context *ctx) +{ + irq_hw_number_t hwirq; + unsigned int virq; + + if (!cpu_has_feature(CPU_FTR_HVMODE)) { + hwirq = cxl_find_afu_irq(ctx, 0); + if (hwirq) { + virq = irq_find_mapping(NULL, hwirq); + if (virq) + cxl_unmap_irq(virq, ctx); + } + } + afu_irq_name_free(ctx); + cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter); +} +EXPORT_SYMBOL_GPL(cxl_free_afu_irqs); + +int cxl_map_afu_irq(struct cxl_context *ctx, int num, + irq_handler_t handler, void *cookie, char *name) +{ + irq_hw_number_t hwirq; + + /* + * Find interrupt we are to register. + */ + hwirq = cxl_find_afu_irq(ctx, num); + if (!hwirq) + return -ENOENT; + + return cxl_map_irq(ctx->afu->adapter, hwirq, handler, cookie, name); +} +EXPORT_SYMBOL_GPL(cxl_map_afu_irq); + +void cxl_unmap_afu_irq(struct cxl_context *ctx, int num, void *cookie) +{ + irq_hw_number_t hwirq; + unsigned int virq; + + hwirq = cxl_find_afu_irq(ctx, num); + if (!hwirq) + return; + + virq = irq_find_mapping(NULL, hwirq); + if (virq) + cxl_unmap_irq(virq, cookie); +} +EXPORT_SYMBOL_GPL(cxl_unmap_afu_irq); + +/* + * Start a context + * Code here similar to afu_ioctl_start_work(). + */ +int cxl_start_context(struct cxl_context *ctx, u64 wed, + struct task_struct *task) +{ + int rc = 0; + bool kernel = true; + + pr_devel("%s: pe: %i\n", __func__, ctx->pe); + + mutex_lock(&ctx->status_mutex); + if (ctx->status == STARTED) + goto out; /* already started */ + + /* + * Increment the mapped context count for adapter. This also checks + * if adapter_context_lock is taken. + */ + rc = cxl_adapter_context_get(ctx->afu->adapter); + if (rc) + goto out; + + if (task) { + ctx->pid = get_task_pid(task, PIDTYPE_PID); + kernel = false; + + /* acquire a reference to the task's mm */ + ctx->mm = get_task_mm(current); + + /* ensure this mm_struct can't be freed */ + cxl_context_mm_count_get(ctx); + + if (ctx->mm) { + /* decrement the use count from above */ + mmput(ctx->mm); + /* make TLBIs for this context global */ + mm_context_add_copro(ctx->mm); + } + } + + /* + * Increment driver use count. Enables global TLBIs for hash + * and callbacks to handle the segment table + */ + cxl_ctx_get(); + + /* See the comment in afu_ioctl_start_work() */ + smp_mb(); + + if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) { + put_pid(ctx->pid); + ctx->pid = NULL; + cxl_adapter_context_put(ctx->afu->adapter); + cxl_ctx_put(); + if (task) { + cxl_context_mm_count_put(ctx); + if (ctx->mm) + mm_context_remove_copro(ctx->mm); + } + goto out; + } + + ctx->status = STARTED; +out: + mutex_unlock(&ctx->status_mutex); + return rc; +} +EXPORT_SYMBOL_GPL(cxl_start_context); + +int cxl_process_element(struct cxl_context *ctx) +{ + return ctx->external_pe; +} +EXPORT_SYMBOL_GPL(cxl_process_element); + +/* Stop a context. Returns 0 on success, otherwise -Errno */ +int cxl_stop_context(struct cxl_context *ctx) +{ + return __detach_context(ctx); +} +EXPORT_SYMBOL_GPL(cxl_stop_context); + +void cxl_set_master(struct cxl_context *ctx) +{ + ctx->master = true; +} +EXPORT_SYMBOL_GPL(cxl_set_master); + +/* wrappers around afu_* file ops which are EXPORTED */ +int cxl_fd_open(struct inode *inode, struct file *file) +{ + return afu_open(inode, file); +} +EXPORT_SYMBOL_GPL(cxl_fd_open); +int cxl_fd_release(struct inode *inode, struct file *file) +{ + return afu_release(inode, file); +} +EXPORT_SYMBOL_GPL(cxl_fd_release); +long cxl_fd_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + return afu_ioctl(file, cmd, arg); +} +EXPORT_SYMBOL_GPL(cxl_fd_ioctl); +int cxl_fd_mmap(struct file *file, struct vm_area_struct *vm) +{ + return afu_mmap(file, vm); +} +EXPORT_SYMBOL_GPL(cxl_fd_mmap); +__poll_t cxl_fd_poll(struct file *file, struct poll_table_struct *poll) +{ + return afu_poll(file, poll); +} +EXPORT_SYMBOL_GPL(cxl_fd_poll); +ssize_t cxl_fd_read(struct file *file, char __user *buf, size_t count, + loff_t *off) +{ + return afu_read(file, buf, count, off); +} +EXPORT_SYMBOL_GPL(cxl_fd_read); + +#define PATCH_FOPS(NAME) if (!fops->NAME) fops->NAME = afu_fops.NAME + +/* Get a struct file and fd for a context and attach the ops */ +struct file *cxl_get_fd(struct cxl_context *ctx, struct file_operations *fops, + int *fd) +{ + struct file *file; + int rc, flags, fdtmp; + char *name = NULL; + + /* only allow one per context */ + if (ctx->mapping) + return ERR_PTR(-EEXIST); + + flags = O_RDWR | O_CLOEXEC; + + /* This code is similar to anon_inode_getfd() */ + rc = get_unused_fd_flags(flags); + if (rc < 0) + return ERR_PTR(rc); + fdtmp = rc; + + /* + * Patch the file ops. Needs to be careful that this is rentrant safe. + */ + if (fops) { + PATCH_FOPS(open); + PATCH_FOPS(poll); + PATCH_FOPS(read); + PATCH_FOPS(release); + PATCH_FOPS(unlocked_ioctl); + PATCH_FOPS(compat_ioctl); + PATCH_FOPS(mmap); + } else /* use default ops */ + fops = (struct file_operations *)&afu_fops; + + name = kasprintf(GFP_KERNEL, "cxl:%d", ctx->pe); + file = cxl_getfile(name, fops, ctx, flags); + kfree(name); + if (IS_ERR(file)) + goto err_fd; + + cxl_context_set_mapping(ctx, file->f_mapping); + *fd = fdtmp; + return file; + +err_fd: + put_unused_fd(fdtmp); + return NULL; +} +EXPORT_SYMBOL_GPL(cxl_get_fd); + +struct cxl_context *cxl_fops_get_context(struct file *file) +{ + return file->private_data; +} +EXPORT_SYMBOL_GPL(cxl_fops_get_context); + +void cxl_set_driver_ops(struct cxl_context *ctx, + struct cxl_afu_driver_ops *ops) +{ + WARN_ON(!ops->fetch_event || !ops->event_delivered); + atomic_set(&ctx->afu_driver_events, 0); + ctx->afu_driver_ops = ops; +} +EXPORT_SYMBOL_GPL(cxl_set_driver_ops); + +void cxl_context_events_pending(struct cxl_context *ctx, + unsigned int new_events) +{ + atomic_add(new_events, &ctx->afu_driver_events); + wake_up_all(&ctx->wq); +} +EXPORT_SYMBOL_GPL(cxl_context_events_pending); + +int cxl_start_work(struct cxl_context *ctx, + struct cxl_ioctl_start_work *work) +{ + int rc; + + /* code taken from afu_ioctl_start_work */ + if (!(work->flags & CXL_START_WORK_NUM_IRQS)) + work->num_interrupts = ctx->afu->pp_irqs; + else if ((work->num_interrupts < ctx->afu->pp_irqs) || + (work->num_interrupts > ctx->afu->irqs_max)) { + return -EINVAL; + } + + rc = afu_register_irqs(ctx, work->num_interrupts); + if (rc) + return rc; + + rc = cxl_start_context(ctx, work->work_element_descriptor, current); + if (rc < 0) { + afu_release_irqs(ctx, ctx); + return rc; + } + + return 0; +} +EXPORT_SYMBOL_GPL(cxl_start_work); + +void __iomem *cxl_psa_map(struct cxl_context *ctx) +{ + if (ctx->status != STARTED) + return NULL; + + pr_devel("%s: psn_phys%llx size:%llx\n", + __func__, ctx->psn_phys, ctx->psn_size); + return ioremap(ctx->psn_phys, ctx->psn_size); +} +EXPORT_SYMBOL_GPL(cxl_psa_map); + +void cxl_psa_unmap(void __iomem *addr) +{ + iounmap(addr); +} +EXPORT_SYMBOL_GPL(cxl_psa_unmap); + +int cxl_afu_reset(struct cxl_context *ctx) +{ + struct cxl_afu *afu = ctx->afu; + int rc; + + rc = cxl_ops->afu_reset(afu); + if (rc) + return rc; + + return cxl_ops->afu_check_and_enable(afu); +} +EXPORT_SYMBOL_GPL(cxl_afu_reset); + +void cxl_perst_reloads_same_image(struct cxl_afu *afu, + bool perst_reloads_same_image) +{ + afu->adapter->perst_same_image = perst_reloads_same_image; +} +EXPORT_SYMBOL_GPL(cxl_perst_reloads_same_image); + +ssize_t cxl_read_adapter_vpd(struct pci_dev *dev, void *buf, size_t count) +{ + struct cxl_afu *afu = cxl_pci_to_afu(dev); + if (IS_ERR(afu)) + return -ENODEV; + + return cxl_ops->read_adapter_vpd(afu->adapter, buf, count); +} +EXPORT_SYMBOL_GPL(cxl_read_adapter_vpd); diff --git a/drivers/misc/cxl/base.c b/drivers/misc/cxl/base.c new file mode 100644 index 000000000..cc0caf919 --- /dev/null +++ b/drivers/misc/cxl/base.c @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright 2014 IBM Corp. + */ + +#include <linux/module.h> +#include <linux/rcupdate.h> +#include <asm/errno.h> +#include <misc/cxl-base.h> +#include <linux/of_platform.h> +#include "cxl.h" + +/* protected by rcu */ +static struct cxl_calls *cxl_calls; + +atomic_t cxl_use_count = ATOMIC_INIT(0); +EXPORT_SYMBOL(cxl_use_count); + +#ifdef CONFIG_CXL_MODULE + +static inline struct cxl_calls *cxl_calls_get(void) +{ + struct cxl_calls *calls = NULL; + + rcu_read_lock(); + calls = rcu_dereference(cxl_calls); + if (calls && !try_module_get(calls->owner)) + calls = NULL; + rcu_read_unlock(); + + return calls; +} + +static inline void cxl_calls_put(struct cxl_calls *calls) +{ + BUG_ON(calls != cxl_calls); + + /* we don't need to rcu this, as we hold a reference to the module */ + module_put(cxl_calls->owner); +} + +#else /* !defined CONFIG_CXL_MODULE */ + +static inline struct cxl_calls *cxl_calls_get(void) +{ + return cxl_calls; +} + +static inline void cxl_calls_put(struct cxl_calls *calls) { } + +#endif /* CONFIG_CXL_MODULE */ + +/* AFU refcount management */ +struct cxl_afu *cxl_afu_get(struct cxl_afu *afu) +{ + return (get_device(&afu->dev) == NULL) ? NULL : afu; +} +EXPORT_SYMBOL_GPL(cxl_afu_get); + +void cxl_afu_put(struct cxl_afu *afu) +{ + put_device(&afu->dev); +} +EXPORT_SYMBOL_GPL(cxl_afu_put); + +void cxl_slbia(struct mm_struct *mm) +{ + struct cxl_calls *calls; + + calls = cxl_calls_get(); + if (!calls) + return; + + if (cxl_ctx_in_use()) + calls->cxl_slbia(mm); + + cxl_calls_put(calls); +} + +int register_cxl_calls(struct cxl_calls *calls) +{ + if (cxl_calls) + return -EBUSY; + + rcu_assign_pointer(cxl_calls, calls); + return 0; +} +EXPORT_SYMBOL_GPL(register_cxl_calls); + +void unregister_cxl_calls(struct cxl_calls *calls) +{ + BUG_ON(cxl_calls->owner != calls->owner); + RCU_INIT_POINTER(cxl_calls, NULL); + synchronize_rcu(); +} +EXPORT_SYMBOL_GPL(unregister_cxl_calls); + +int cxl_update_properties(struct device_node *dn, + struct property *new_prop) +{ + return of_update_property(dn, new_prop); +} +EXPORT_SYMBOL_GPL(cxl_update_properties); + +static int __init cxl_base_init(void) +{ + struct device_node *np; + struct platform_device *dev; + int count = 0; + + /* + * Scan for compatible devices in guest only + */ + if (cpu_has_feature(CPU_FTR_HVMODE)) + return 0; + + for_each_compatible_node(np, NULL, "ibm,coherent-platform-facility") { + dev = of_platform_device_create(np, NULL, NULL); + if (dev) + count++; + } + pr_devel("Found %d cxl device(s)\n", count); + return 0; +} +device_initcall(cxl_base_init); diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c new file mode 100644 index 000000000..fb2eff69e --- /dev/null +++ b/drivers/misc/cxl/context.c @@ -0,0 +1,362 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright 2014 IBM Corp. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/bitmap.h> +#include <linux/sched.h> +#include <linux/pid.h> +#include <linux/fs.h> +#include <linux/mm.h> +#include <linux/debugfs.h> +#include <linux/slab.h> +#include <linux/idr.h> +#include <linux/sched/mm.h> +#include <linux/mmu_context.h> +#include <asm/cputable.h> +#include <asm/current.h> +#include <asm/copro.h> + +#include "cxl.h" + +/* + * Allocates space for a CXL context. + */ +struct cxl_context *cxl_context_alloc(void) +{ + return kzalloc(sizeof(struct cxl_context), GFP_KERNEL); +} + +/* + * Initialises a CXL context. + */ +int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master) +{ + int i; + + ctx->afu = afu; + ctx->master = master; + ctx->pid = NULL; /* Set in start work ioctl */ + mutex_init(&ctx->mapping_lock); + ctx->mapping = NULL; + ctx->tidr = 0; + ctx->assign_tidr = false; + + if (cxl_is_power8()) { + spin_lock_init(&ctx->sste_lock); + + /* + * Allocate the segment table before we put it in the IDR so that we + * can always access it when dereferenced from IDR. For the same + * reason, the segment table is only destroyed after the context is + * removed from the IDR. Access to this in the IOCTL is protected by + * Linux filesytem symantics (can't IOCTL until open is complete). + */ + i = cxl_alloc_sst(ctx); + if (i) + return i; + } + + INIT_WORK(&ctx->fault_work, cxl_handle_fault); + + init_waitqueue_head(&ctx->wq); + spin_lock_init(&ctx->lock); + + ctx->irq_bitmap = NULL; + ctx->pending_irq = false; + ctx->pending_fault = false; + ctx->pending_afu_err = false; + + INIT_LIST_HEAD(&ctx->irq_names); + + /* + * When we have to destroy all contexts in cxl_context_detach_all() we + * end up with afu_release_irqs() called from inside a + * idr_for_each_entry(). Hence we need to make sure that anything + * dereferenced from this IDR is ok before we allocate the IDR here. + * This clears out the IRQ ranges to ensure this. + */ + for (i = 0; i < CXL_IRQ_RANGES; i++) + ctx->irqs.range[i] = 0; + + mutex_init(&ctx->status_mutex); + + ctx->status = OPENED; + + /* + * Allocating IDR! We better make sure everything's setup that + * dereferences from it. + */ + mutex_lock(&afu->contexts_lock); + idr_preload(GFP_KERNEL); + i = idr_alloc(&ctx->afu->contexts_idr, ctx, 0, + ctx->afu->num_procs, GFP_NOWAIT); + idr_preload_end(); + mutex_unlock(&afu->contexts_lock); + if (i < 0) + return i; + + ctx->pe = i; + if (cpu_has_feature(CPU_FTR_HVMODE)) { + ctx->elem = &ctx->afu->native->spa[i]; + ctx->external_pe = ctx->pe; + } else { + ctx->external_pe = -1; /* assigned when attaching */ + } + ctx->pe_inserted = false; + + /* + * take a ref on the afu so that it stays alive at-least till + * this context is reclaimed inside reclaim_ctx. + */ + cxl_afu_get(afu); + return 0; +} + +void cxl_context_set_mapping(struct cxl_context *ctx, + struct address_space *mapping) +{ + mutex_lock(&ctx->mapping_lock); + ctx->mapping = mapping; + mutex_unlock(&ctx->mapping_lock); +} + +static vm_fault_t cxl_mmap_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct cxl_context *ctx = vma->vm_file->private_data; + u64 area, offset; + vm_fault_t ret; + + offset = vmf->pgoff << PAGE_SHIFT; + + pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n", + __func__, ctx->pe, vmf->address, offset); + + if (ctx->afu->current_mode == CXL_MODE_DEDICATED) { + area = ctx->afu->psn_phys; + if (offset >= ctx->afu->adapter->ps_size) + return VM_FAULT_SIGBUS; + } else { + area = ctx->psn_phys; + if (offset >= ctx->psn_size) + return VM_FAULT_SIGBUS; + } + + mutex_lock(&ctx->status_mutex); + + if (ctx->status != STARTED) { + mutex_unlock(&ctx->status_mutex); + pr_devel("%s: Context not started, failing problem state access\n", __func__); + if (ctx->mmio_err_ff) { + if (!ctx->ff_page) { + ctx->ff_page = alloc_page(GFP_USER); + if (!ctx->ff_page) + return VM_FAULT_OOM; + memset(page_address(ctx->ff_page), 0xff, PAGE_SIZE); + } + get_page(ctx->ff_page); + vmf->page = ctx->ff_page; + vma->vm_page_prot = pgprot_cached(vma->vm_page_prot); + return 0; + } + return VM_FAULT_SIGBUS; + } + + ret = vmf_insert_pfn(vma, vmf->address, (area + offset) >> PAGE_SHIFT); + + mutex_unlock(&ctx->status_mutex); + + return ret; +} + +static const struct vm_operations_struct cxl_mmap_vmops = { + .fault = cxl_mmap_fault, +}; + +/* + * Map a per-context mmio space into the given vma. + */ +int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma) +{ + u64 start = vma->vm_pgoff << PAGE_SHIFT; + u64 len = vma->vm_end - vma->vm_start; + + if (ctx->afu->current_mode == CXL_MODE_DEDICATED) { + if (start + len > ctx->afu->adapter->ps_size) + return -EINVAL; + + if (cxl_is_power9()) { + /* + * Make sure there is a valid problem state + * area space for this AFU. + */ + if (ctx->master && !ctx->afu->psa) { + pr_devel("AFU doesn't support mmio space\n"); + return -EINVAL; + } + + /* Can't mmap until the AFU is enabled */ + if (!ctx->afu->enabled) + return -EBUSY; + } + } else { + if (start + len > ctx->psn_size) + return -EINVAL; + + /* Make sure there is a valid per process space for this AFU */ + if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) { + pr_devel("AFU doesn't support mmio space\n"); + return -EINVAL; + } + + /* Can't mmap until the AFU is enabled */ + if (!ctx->afu->enabled) + return -EBUSY; + } + + pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__, + ctx->psn_phys, ctx->pe , ctx->master); + + vma->vm_flags |= VM_IO | VM_PFNMAP; + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + vma->vm_ops = &cxl_mmap_vmops; + return 0; +} + +/* + * Detach a context from the hardware. This disables interrupts and doesn't + * return until all outstanding interrupts for this context have completed. The + * hardware should no longer access *ctx after this has returned. + */ +int __detach_context(struct cxl_context *ctx) +{ + enum cxl_context_status status; + + mutex_lock(&ctx->status_mutex); + status = ctx->status; + ctx->status = CLOSED; + mutex_unlock(&ctx->status_mutex); + if (status != STARTED) + return -EBUSY; + + /* Only warn if we detached while the link was OK. + * If detach fails when hw is down, we don't care. + */ + WARN_ON(cxl_ops->detach_process(ctx) && + cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)); + flush_work(&ctx->fault_work); /* Only needed for dedicated process */ + + /* + * Wait until no further interrupts are presented by the PSL + * for this context. + */ + if (cxl_ops->irq_wait) + cxl_ops->irq_wait(ctx); + + /* release the reference to the group leader and mm handling pid */ + put_pid(ctx->pid); + + cxl_ctx_put(); + + /* Decrease the attached context count on the adapter */ + cxl_adapter_context_put(ctx->afu->adapter); + + /* Decrease the mm count on the context */ + cxl_context_mm_count_put(ctx); + if (ctx->mm) + mm_context_remove_copro(ctx->mm); + ctx->mm = NULL; + + return 0; +} + +/* + * Detach the given context from the AFU. This doesn't actually + * free the context but it should stop the context running in hardware + * (ie. prevent this context from generating any further interrupts + * so that it can be freed). + */ +void cxl_context_detach(struct cxl_context *ctx) +{ + int rc; + + rc = __detach_context(ctx); + if (rc) + return; + + afu_release_irqs(ctx, ctx); + wake_up_all(&ctx->wq); +} + +/* + * Detach all contexts on the given AFU. + */ +void cxl_context_detach_all(struct cxl_afu *afu) +{ + struct cxl_context *ctx; + int tmp; + + mutex_lock(&afu->contexts_lock); + idr_for_each_entry(&afu->contexts_idr, ctx, tmp) { + /* + * Anything done in here needs to be setup before the IDR is + * created and torn down after the IDR removed + */ + cxl_context_detach(ctx); + + /* + * We are force detaching - remove any active PSA mappings so + * userspace cannot interfere with the card if it comes back. + * Easiest way to exercise this is to unbind and rebind the + * driver via sysfs while it is in use. + */ + mutex_lock(&ctx->mapping_lock); + if (ctx->mapping) + unmap_mapping_range(ctx->mapping, 0, 0, 1); + mutex_unlock(&ctx->mapping_lock); + } + mutex_unlock(&afu->contexts_lock); +} + +static void reclaim_ctx(struct rcu_head *rcu) +{ + struct cxl_context *ctx = container_of(rcu, struct cxl_context, rcu); + + if (cxl_is_power8()) + free_page((u64)ctx->sstp); + if (ctx->ff_page) + __free_page(ctx->ff_page); + ctx->sstp = NULL; + + kfree(ctx->irq_bitmap); + + /* Drop ref to the afu device taken during cxl_context_init */ + cxl_afu_put(ctx->afu); + + kfree(ctx); +} + +void cxl_context_free(struct cxl_context *ctx) +{ + if (ctx->kernelapi && ctx->mapping) + cxl_release_mapping(ctx); + mutex_lock(&ctx->afu->contexts_lock); + idr_remove(&ctx->afu->contexts_idr, ctx->pe); + mutex_unlock(&ctx->afu->contexts_lock); + call_rcu(&ctx->rcu, reclaim_ctx); +} + +void cxl_context_mm_count_get(struct cxl_context *ctx) +{ + if (ctx->mm) + mmgrab(ctx->mm); +} + +void cxl_context_mm_count_put(struct cxl_context *ctx) +{ + if (ctx->mm) + mmdrop(ctx->mm); +} diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h new file mode 100644 index 000000000..5dc0f6093 --- /dev/null +++ b/drivers/misc/cxl/cxl.h @@ -0,0 +1,1132 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright 2014 IBM Corp. + */ + +#ifndef _CXL_H_ +#define _CXL_H_ + +#include <linux/interrupt.h> +#include <linux/semaphore.h> +#include <linux/device.h> +#include <linux/types.h> +#include <linux/cdev.h> +#include <linux/pid.h> +#include <linux/io.h> +#include <linux/pci.h> +#include <linux/fs.h> +#include <asm/cputable.h> +#include <asm/mmu.h> +#include <asm/reg.h> +#include <misc/cxl-base.h> + +#include <misc/cxl.h> +#include <uapi/misc/cxl.h> + +extern uint cxl_verbose; + +#define CXL_TIMEOUT 5 + +/* + * Bump version each time a user API change is made, whether it is + * backwards compatible ot not. + */ +#define CXL_API_VERSION 3 +#define CXL_API_VERSION_COMPATIBLE 1 + +/* + * Opaque types to avoid accidentally passing registers for the wrong MMIO + * + * At the end of the day, I'm not married to using typedef here, but it might + * (and has!) help avoid bugs like mixing up CXL_PSL_CtxTime and + * CXL_PSL_CtxTime_An, or calling cxl_p1n_write instead of cxl_p1_write. + * + * I'm quite happy if these are changed back to #defines before upstreaming, it + * should be little more than a regexp search+replace operation in this file. + */ +typedef struct { + const int x; +} cxl_p1_reg_t; +typedef struct { + const int x; +} cxl_p1n_reg_t; +typedef struct { + const int x; +} cxl_p2n_reg_t; +#define cxl_reg_off(reg) \ + (reg.x) + +/* Memory maps. Ref CXL Appendix A */ + +/* PSL Privilege 1 Memory Map */ +/* Configuration and Control area - CAIA 1&2 */ +static const cxl_p1_reg_t CXL_PSL_CtxTime = {0x0000}; +static const cxl_p1_reg_t CXL_PSL_ErrIVTE = {0x0008}; +static const cxl_p1_reg_t CXL_PSL_KEY1 = {0x0010}; +static const cxl_p1_reg_t CXL_PSL_KEY2 = {0x0018}; +static const cxl_p1_reg_t CXL_PSL_Control = {0x0020}; +/* Downloading */ +static const cxl_p1_reg_t CXL_PSL_DLCNTL = {0x0060}; +static const cxl_p1_reg_t CXL_PSL_DLADDR = {0x0068}; + +/* PSL Lookaside Buffer Management Area - CAIA 1 */ +static const cxl_p1_reg_t CXL_PSL_LBISEL = {0x0080}; +static const cxl_p1_reg_t CXL_PSL_SLBIE = {0x0088}; +static const cxl_p1_reg_t CXL_PSL_SLBIA = {0x0090}; +static const cxl_p1_reg_t CXL_PSL_TLBIE = {0x00A0}; +static const cxl_p1_reg_t CXL_PSL_TLBIA = {0x00A8}; +static const cxl_p1_reg_t CXL_PSL_AFUSEL = {0x00B0}; + +/* 0x00C0:7EFF Implementation dependent area */ +/* PSL registers - CAIA 1 */ +static const cxl_p1_reg_t CXL_PSL_FIR1 = {0x0100}; +static const cxl_p1_reg_t CXL_PSL_FIR2 = {0x0108}; +static const cxl_p1_reg_t CXL_PSL_Timebase = {0x0110}; +static const cxl_p1_reg_t CXL_PSL_VERSION = {0x0118}; +static const cxl_p1_reg_t CXL_PSL_RESLCKTO = {0x0128}; +static const cxl_p1_reg_t CXL_PSL_TB_CTLSTAT = {0x0140}; +static const cxl_p1_reg_t CXL_PSL_FIR_CNTL = {0x0148}; +static const cxl_p1_reg_t CXL_PSL_DSNDCTL = {0x0150}; +static const cxl_p1_reg_t CXL_PSL_SNWRALLOC = {0x0158}; +static const cxl_p1_reg_t CXL_PSL_TRACE = {0x0170}; +/* PSL registers - CAIA 2 */ +static const cxl_p1_reg_t CXL_PSL9_CONTROL = {0x0020}; +static const cxl_p1_reg_t CXL_XSL9_INV = {0x0110}; +static const cxl_p1_reg_t CXL_XSL9_DBG = {0x0130}; +static const cxl_p1_reg_t CXL_XSL9_DEF = {0x0140}; +static const cxl_p1_reg_t CXL_XSL9_DSNCTL = {0x0168}; +static const cxl_p1_reg_t CXL_PSL9_FIR1 = {0x0300}; +static const cxl_p1_reg_t CXL_PSL9_FIR_MASK = {0x0308}; +static const cxl_p1_reg_t CXL_PSL9_Timebase = {0x0310}; +static const cxl_p1_reg_t CXL_PSL9_DEBUG = {0x0320}; +static const cxl_p1_reg_t CXL_PSL9_FIR_CNTL = {0x0348}; +static const cxl_p1_reg_t CXL_PSL9_DSNDCTL = {0x0350}; +static const cxl_p1_reg_t CXL_PSL9_TB_CTLSTAT = {0x0340}; +static const cxl_p1_reg_t CXL_PSL9_TRACECFG = {0x0368}; +static const cxl_p1_reg_t CXL_PSL9_APCDEDALLOC = {0x0378}; +static const cxl_p1_reg_t CXL_PSL9_APCDEDTYPE = {0x0380}; +static const cxl_p1_reg_t CXL_PSL9_TNR_ADDR = {0x0388}; +static const cxl_p1_reg_t CXL_PSL9_CTCCFG = {0x0390}; +static const cxl_p1_reg_t CXL_PSL9_GP_CT = {0x0398}; +static const cxl_p1_reg_t CXL_XSL9_IERAT = {0x0588}; +static const cxl_p1_reg_t CXL_XSL9_ILPP = {0x0590}; + +/* 0x7F00:7FFF Reserved PCIe MSI-X Pending Bit Array area */ +/* 0x8000:FFFF Reserved PCIe MSI-X Table Area */ + +/* PSL Slice Privilege 1 Memory Map */ +/* Configuration Area - CAIA 1&2 */ +static const cxl_p1n_reg_t CXL_PSL_SR_An = {0x00}; +static const cxl_p1n_reg_t CXL_PSL_LPID_An = {0x08}; +static const cxl_p1n_reg_t CXL_PSL_AMBAR_An = {0x10}; +static const cxl_p1n_reg_t CXL_PSL_SPOffset_An = {0x18}; +static const cxl_p1n_reg_t CXL_PSL_ID_An = {0x20}; +static const cxl_p1n_reg_t CXL_PSL_SERR_An = {0x28}; +/* Memory Management and Lookaside Buffer Management - CAIA 1*/ +static const cxl_p1n_reg_t CXL_PSL_SDR_An = {0x30}; +/* Memory Management and Lookaside Buffer Management - CAIA 1&2 */ +static const cxl_p1n_reg_t CXL_PSL_AMOR_An = {0x38}; +/* Pointer Area - CAIA 1&2 */ +static const cxl_p1n_reg_t CXL_HAURP_An = {0x80}; +static const cxl_p1n_reg_t CXL_PSL_SPAP_An = {0x88}; +static const cxl_p1n_reg_t CXL_PSL_LLCMD_An = {0x90}; +/* Control Area - CAIA 1&2 */ +static const cxl_p1n_reg_t CXL_PSL_SCNTL_An = {0xA0}; +static const cxl_p1n_reg_t CXL_PSL_CtxTime_An = {0xA8}; +static const cxl_p1n_reg_t CXL_PSL_IVTE_Offset_An = {0xB0}; +static const cxl_p1n_reg_t CXL_PSL_IVTE_Limit_An = {0xB8}; +/* 0xC0:FF Implementation Dependent Area - CAIA 1&2 */ +static const cxl_p1n_reg_t CXL_PSL_FIR_SLICE_An = {0xC0}; +static const cxl_p1n_reg_t CXL_AFU_DEBUG_An = {0xC8}; +/* 0xC0:FF Implementation Dependent Area - CAIA 1 */ +static const cxl_p1n_reg_t CXL_PSL_APCALLOC_A = {0xD0}; +static const cxl_p1n_reg_t CXL_PSL_COALLOC_A = {0xD8}; +static const cxl_p1n_reg_t CXL_PSL_RXCTL_A = {0xE0}; +static const cxl_p1n_reg_t CXL_PSL_SLICE_TRACE = {0xE8}; + +/* PSL Slice Privilege 2 Memory Map */ +/* Configuration and Control Area - CAIA 1&2 */ +static const cxl_p2n_reg_t CXL_PSL_PID_TID_An = {0x000}; +static const cxl_p2n_reg_t CXL_CSRP_An = {0x008}; +/* Configuration and Control Area - CAIA 1 */ +static const cxl_p2n_reg_t CXL_AURP0_An = {0x010}; +static const cxl_p2n_reg_t CXL_AURP1_An = {0x018}; +static const cxl_p2n_reg_t CXL_SSTP0_An = {0x020}; +static const cxl_p2n_reg_t CXL_SSTP1_An = {0x028}; +/* Configuration and Control Area - CAIA 1 */ +static const cxl_p2n_reg_t CXL_PSL_AMR_An = {0x030}; +/* Segment Lookaside Buffer Management - CAIA 1 */ +static const cxl_p2n_reg_t CXL_SLBIE_An = {0x040}; +static const cxl_p2n_reg_t CXL_SLBIA_An = {0x048}; +static const cxl_p2n_reg_t CXL_SLBI_Select_An = {0x050}; +/* Interrupt Registers - CAIA 1&2 */ +static const cxl_p2n_reg_t CXL_PSL_DSISR_An = {0x060}; +static const cxl_p2n_reg_t CXL_PSL_DAR_An = {0x068}; +static const cxl_p2n_reg_t CXL_PSL_DSR_An = {0x070}; +static const cxl_p2n_reg_t CXL_PSL_TFC_An = {0x078}; +static const cxl_p2n_reg_t CXL_PSL_PEHandle_An = {0x080}; +static const cxl_p2n_reg_t CXL_PSL_ErrStat_An = {0x088}; +/* AFU Registers - CAIA 1&2 */ +static const cxl_p2n_reg_t CXL_AFU_Cntl_An = {0x090}; +static const cxl_p2n_reg_t CXL_AFU_ERR_An = {0x098}; +/* Work Element Descriptor - CAIA 1&2 */ +static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0}; +/* 0x0C0:FFF Implementation Dependent Area */ + +#define CXL_PSL_SPAP_Addr 0x0ffffffffffff000ULL +#define CXL_PSL_SPAP_Size 0x0000000000000ff0ULL +#define CXL_PSL_SPAP_Size_Shift 4 +#define CXL_PSL_SPAP_V 0x0000000000000001ULL + +/****** CXL_PSL_Control ****************************************************/ +#define CXL_PSL_Control_tb (0x1ull << (63-63)) +#define CXL_PSL_Control_Fr (0x1ull << (63-31)) +#define CXL_PSL_Control_Fs_MASK (0x3ull << (63-29)) +#define CXL_PSL_Control_Fs_Complete (0x3ull << (63-29)) + +/****** CXL_PSL_DLCNTL *****************************************************/ +#define CXL_PSL_DLCNTL_D (0x1ull << (63-28)) +#define CXL_PSL_DLCNTL_C (0x1ull << (63-29)) +#define CXL_PSL_DLCNTL_E (0x1ull << (63-30)) +#define CXL_PSL_DLCNTL_S (0x1ull << (63-31)) +#define CXL_PSL_DLCNTL_CE (CXL_PSL_DLCNTL_C | CXL_PSL_DLCNTL_E) +#define CXL_PSL_DLCNTL_DCES (CXL_PSL_DLCNTL_D | CXL_PSL_DLCNTL_CE | CXL_PSL_DLCNTL_S) + +/****** CXL_PSL_SR_An ******************************************************/ +#define CXL_PSL_SR_An_SF MSR_SF /* 64bit */ +#define CXL_PSL_SR_An_TA (1ull << (63-1)) /* Tags active, GA1: 0 */ +#define CXL_PSL_SR_An_HV MSR_HV /* Hypervisor, GA1: 0 */ +#define CXL_PSL_SR_An_XLAT_hpt (0ull << (63-6))/* Hashed page table (HPT) mode */ +#define CXL_PSL_SR_An_XLAT_roh (2ull << (63-6))/* Radix on HPT mode */ +#define CXL_PSL_SR_An_XLAT_ror (3ull << (63-6))/* Radix on Radix mode */ +#define CXL_PSL_SR_An_BOT (1ull << (63-10)) /* Use the in-memory segment table */ +#define CXL_PSL_SR_An_PR MSR_PR /* Problem state, GA1: 1 */ +#define CXL_PSL_SR_An_ISL (1ull << (63-53)) /* Ignore Segment Large Page */ +#define CXL_PSL_SR_An_TC (1ull << (63-54)) /* Page Table secondary hash */ +#define CXL_PSL_SR_An_US (1ull << (63-56)) /* User state, GA1: X */ +#define CXL_PSL_SR_An_SC (1ull << (63-58)) /* Segment Table secondary hash */ +#define CXL_PSL_SR_An_R MSR_DR /* Relocate, GA1: 1 */ +#define CXL_PSL_SR_An_MP (1ull << (63-62)) /* Master Process */ +#define CXL_PSL_SR_An_LE (1ull << (63-63)) /* Little Endian */ + +/****** CXL_PSL_ID_An ****************************************************/ +#define CXL_PSL_ID_An_F (1ull << (63-31)) +#define CXL_PSL_ID_An_L (1ull << (63-30)) + +/****** CXL_PSL_SERR_An ****************************************************/ +#define CXL_PSL_SERR_An_afuto (1ull << (63-0)) +#define CXL_PSL_SERR_An_afudis (1ull << (63-1)) +#define CXL_PSL_SERR_An_afuov (1ull << (63-2)) +#define CXL_PSL_SERR_An_badsrc (1ull << (63-3)) +#define CXL_PSL_SERR_An_badctx (1ull << (63-4)) +#define CXL_PSL_SERR_An_llcmdis (1ull << (63-5)) +#define CXL_PSL_SERR_An_llcmdto (1ull << (63-6)) +#define CXL_PSL_SERR_An_afupar (1ull << (63-7)) +#define CXL_PSL_SERR_An_afudup (1ull << (63-8)) +#define CXL_PSL_SERR_An_IRQS ( \ + CXL_PSL_SERR_An_afuto | CXL_PSL_SERR_An_afudis | CXL_PSL_SERR_An_afuov | \ + CXL_PSL_SERR_An_badsrc | CXL_PSL_SERR_An_badctx | CXL_PSL_SERR_An_llcmdis | \ + CXL_PSL_SERR_An_llcmdto | CXL_PSL_SERR_An_afupar | CXL_PSL_SERR_An_afudup) +#define CXL_PSL_SERR_An_afuto_mask (1ull << (63-32)) +#define CXL_PSL_SERR_An_afudis_mask (1ull << (63-33)) +#define CXL_PSL_SERR_An_afuov_mask (1ull << (63-34)) +#define CXL_PSL_SERR_An_badsrc_mask (1ull << (63-35)) +#define CXL_PSL_SERR_An_badctx_mask (1ull << (63-36)) +#define CXL_PSL_SERR_An_llcmdis_mask (1ull << (63-37)) +#define CXL_PSL_SERR_An_llcmdto_mask (1ull << (63-38)) +#define CXL_PSL_SERR_An_afupar_mask (1ull << (63-39)) +#define CXL_PSL_SERR_An_afudup_mask (1ull << (63-40)) +#define CXL_PSL_SERR_An_IRQ_MASKS ( \ + CXL_PSL_SERR_An_afuto_mask | CXL_PSL_SERR_An_afudis_mask | CXL_PSL_SERR_An_afuov_mask | \ + CXL_PSL_SERR_An_badsrc_mask | CXL_PSL_SERR_An_badctx_mask | CXL_PSL_SERR_An_llcmdis_mask | \ + CXL_PSL_SERR_An_llcmdto_mask | CXL_PSL_SERR_An_afupar_mask | CXL_PSL_SERR_An_afudup_mask) + +#define CXL_PSL_SERR_An_AE (1ull << (63-30)) + +/****** CXL_PSL_SCNTL_An ****************************************************/ +#define CXL_PSL_SCNTL_An_CR (0x1ull << (63-15)) +/* Programming Modes: */ +#define CXL_PSL_SCNTL_An_PM_MASK (0xffffull << (63-31)) +#define CXL_PSL_SCNTL_An_PM_Shared (0x0000ull << (63-31)) +#define CXL_PSL_SCNTL_An_PM_OS (0x0001ull << (63-31)) +#define CXL_PSL_SCNTL_An_PM_Process (0x0002ull << (63-31)) +#define CXL_PSL_SCNTL_An_PM_AFU (0x0004ull << (63-31)) +#define CXL_PSL_SCNTL_An_PM_AFU_PBT (0x0104ull << (63-31)) +/* Purge Status (ro) */ +#define CXL_PSL_SCNTL_An_Ps_MASK (0x3ull << (63-39)) +#define CXL_PSL_SCNTL_An_Ps_Pending (0x1ull << (63-39)) +#define CXL_PSL_SCNTL_An_Ps_Complete (0x3ull << (63-39)) +/* Purge */ +#define CXL_PSL_SCNTL_An_Pc (0x1ull << (63-48)) +/* Suspend Status (ro) */ +#define CXL_PSL_SCNTL_An_Ss_MASK (0x3ull << (63-55)) +#define CXL_PSL_SCNTL_An_Ss_Pending (0x1ull << (63-55)) +#define CXL_PSL_SCNTL_An_Ss_Complete (0x3ull << (63-55)) +/* Suspend Control */ +#define CXL_PSL_SCNTL_An_Sc (0x1ull << (63-63)) + +/* AFU Slice Enable Status (ro) */ +#define CXL_AFU_Cntl_An_ES_MASK (0x7ull << (63-2)) +#define CXL_AFU_Cntl_An_ES_Disabled (0x0ull << (63-2)) +#define CXL_AFU_Cntl_An_ES_Enabled (0x4ull << (63-2)) +/* AFU Slice Enable */ +#define CXL_AFU_Cntl_An_E (0x1ull << (63-3)) +/* AFU Slice Reset status (ro) */ +#define CXL_AFU_Cntl_An_RS_MASK (0x3ull << (63-5)) +#define CXL_AFU_Cntl_An_RS_Pending (0x1ull << (63-5)) +#define CXL_AFU_Cntl_An_RS_Complete (0x2ull << (63-5)) +/* AFU Slice Reset */ +#define CXL_AFU_Cntl_An_RA (0x1ull << (63-7)) + +/****** CXL_SSTP0/1_An ******************************************************/ +/* These top bits are for the segment that CONTAINS the segment table */ +#define CXL_SSTP0_An_B_SHIFT SLB_VSID_SSIZE_SHIFT +#define CXL_SSTP0_An_KS (1ull << (63-2)) +#define CXL_SSTP0_An_KP (1ull << (63-3)) +#define CXL_SSTP0_An_N (1ull << (63-4)) +#define CXL_SSTP0_An_L (1ull << (63-5)) +#define CXL_SSTP0_An_C (1ull << (63-6)) +#define CXL_SSTP0_An_TA (1ull << (63-7)) +#define CXL_SSTP0_An_LP_SHIFT (63-9) /* 2 Bits */ +/* And finally, the virtual address & size of the segment table: */ +#define CXL_SSTP0_An_SegTableSize_SHIFT (63-31) /* 12 Bits */ +#define CXL_SSTP0_An_SegTableSize_MASK \ + (((1ull << 12) - 1) << CXL_SSTP0_An_SegTableSize_SHIFT) +#define CXL_SSTP0_An_STVA_U_MASK ((1ull << (63-49))-1) +#define CXL_SSTP1_An_STVA_L_MASK (~((1ull << (63-55))-1)) +#define CXL_SSTP1_An_V (1ull << (63-63)) + +/****** CXL_PSL_SLBIE_[An] - CAIA 1 **************************************************/ +/* write: */ +#define CXL_SLBIE_C PPC_BIT(36) /* Class */ +#define CXL_SLBIE_SS PPC_BITMASK(37, 38) /* Segment Size */ +#define CXL_SLBIE_SS_SHIFT PPC_BITLSHIFT(38) +#define CXL_SLBIE_TA PPC_BIT(38) /* Tags Active */ +/* read: */ +#define CXL_SLBIE_MAX PPC_BITMASK(24, 31) +#define CXL_SLBIE_PENDING PPC_BITMASK(56, 63) + +/****** Common to all CXL_TLBIA/SLBIA_[An] - CAIA 1 **********************************/ +#define CXL_TLB_SLB_P (1ull) /* Pending (read) */ + +/****** Common to all CXL_TLB/SLB_IA/IE_[An] registers - CAIA 1 **********************/ +#define CXL_TLB_SLB_IQ_ALL (0ull) /* Inv qualifier */ +#define CXL_TLB_SLB_IQ_LPID (1ull) /* Inv qualifier */ +#define CXL_TLB_SLB_IQ_LPIDPID (3ull) /* Inv qualifier */ + +/****** CXL_PSL_AFUSEL ******************************************************/ +#define CXL_PSL_AFUSEL_A (1ull << (63-55)) /* Adapter wide invalidates affect all AFUs */ + +/****** CXL_PSL_DSISR_An - CAIA 1 ****************************************************/ +#define CXL_PSL_DSISR_An_DS (1ull << (63-0)) /* Segment not found */ +#define CXL_PSL_DSISR_An_DM (1ull << (63-1)) /* PTE not found (See also: M) or protection fault */ +#define CXL_PSL_DSISR_An_ST (1ull << (63-2)) /* Segment Table PTE not found */ +#define CXL_PSL_DSISR_An_UR (1ull << (63-3)) /* AURP PTE not found */ +#define CXL_PSL_DSISR_TRANS (CXL_PSL_DSISR_An_DS | CXL_PSL_DSISR_An_DM | CXL_PSL_DSISR_An_ST | CXL_PSL_DSISR_An_UR) +#define CXL_PSL_DSISR_An_PE (1ull << (63-4)) /* PSL Error (implementation specific) */ +#define CXL_PSL_DSISR_An_AE (1ull << (63-5)) /* AFU Error */ +#define CXL_PSL_DSISR_An_OC (1ull << (63-6)) /* OS Context Warning */ +#define CXL_PSL_DSISR_PENDING (CXL_PSL_DSISR_TRANS | CXL_PSL_DSISR_An_PE | CXL_PSL_DSISR_An_AE | CXL_PSL_DSISR_An_OC) +/* NOTE: Bits 32:63 are undefined if DSISR[DS] = 1 */ +#define CXL_PSL_DSISR_An_M DSISR_NOHPTE /* PTE not found */ +#define CXL_PSL_DSISR_An_P DSISR_PROTFAULT /* Storage protection violation */ +#define CXL_PSL_DSISR_An_A (1ull << (63-37)) /* AFU lock access to write through or cache inhibited storage */ +#define CXL_PSL_DSISR_An_S DSISR_ISSTORE /* Access was afu_wr or afu_zero */ +#define CXL_PSL_DSISR_An_K DSISR_KEYFAULT /* Access not permitted by virtual page class key protection */ + +/****** CXL_PSL_DSISR_An - CAIA 2 ****************************************************/ +#define CXL_PSL9_DSISR_An_TF (1ull << (63-3)) /* Translation fault */ +#define CXL_PSL9_DSISR_An_PE (1ull << (63-4)) /* PSL Error (implementation specific) */ +#define CXL_PSL9_DSISR_An_AE (1ull << (63-5)) /* AFU Error */ +#define CXL_PSL9_DSISR_An_OC (1ull << (63-6)) /* OS Context Warning */ +#define CXL_PSL9_DSISR_An_S (1ull << (63-38)) /* TF for a write operation */ +#define CXL_PSL9_DSISR_PENDING (CXL_PSL9_DSISR_An_TF | CXL_PSL9_DSISR_An_PE | CXL_PSL9_DSISR_An_AE | CXL_PSL9_DSISR_An_OC) +/* + * NOTE: Bits 56:63 (Checkout Response Status) are valid when DSISR_An[TF] = 1 + * Status (0:7) Encoding + */ +#define CXL_PSL9_DSISR_An_CO_MASK 0x00000000000000ffULL +#define CXL_PSL9_DSISR_An_SF 0x0000000000000080ULL /* Segment Fault 0b10000000 */ +#define CXL_PSL9_DSISR_An_PF_SLR 0x0000000000000088ULL /* PTE not found (Single Level Radix) 0b10001000 */ +#define CXL_PSL9_DSISR_An_PF_RGC 0x000000000000008CULL /* PTE not found (Radix Guest (child)) 0b10001100 */ +#define CXL_PSL9_DSISR_An_PF_RGP 0x0000000000000090ULL /* PTE not found (Radix Guest (parent)) 0b10010000 */ +#define CXL_PSL9_DSISR_An_PF_HRH 0x0000000000000094ULL /* PTE not found (HPT/Radix Host) 0b10010100 */ +#define CXL_PSL9_DSISR_An_PF_STEG 0x000000000000009CULL /* PTE not found (STEG VA) 0b10011100 */ +#define CXL_PSL9_DSISR_An_URTCH 0x00000000000000B4ULL /* Unsupported Radix Tree Configuration 0b10110100 */ + +/****** CXL_PSL_TFC_An ******************************************************/ +#define CXL_PSL_TFC_An_A (1ull << (63-28)) /* Acknowledge non-translation fault */ +#define CXL_PSL_TFC_An_C (1ull << (63-29)) /* Continue (abort transaction) */ +#define CXL_PSL_TFC_An_AE (1ull << (63-30)) /* Restart PSL with address error */ +#define CXL_PSL_TFC_An_R (1ull << (63-31)) /* Restart PSL transaction */ + +/****** CXL_PSL_DEBUG *****************************************************/ +#define CXL_PSL_DEBUG_CDC (1ull << (63-27)) /* Coherent Data cache support */ + +/****** CXL_XSL9_IERAT_ERAT - CAIA 2 **********************************/ +#define CXL_XSL9_IERAT_MLPID (1ull << (63-0)) /* Match LPID */ +#define CXL_XSL9_IERAT_MPID (1ull << (63-1)) /* Match PID */ +#define CXL_XSL9_IERAT_PRS (1ull << (63-4)) /* PRS bit for Radix invalidations */ +#define CXL_XSL9_IERAT_INVR (1ull << (63-3)) /* Invalidate Radix */ +#define CXL_XSL9_IERAT_IALL (1ull << (63-8)) /* Invalidate All */ +#define CXL_XSL9_IERAT_IINPROG (1ull << (63-63)) /* Invalidate in progress */ + +/* cxl_process_element->software_status */ +#define CXL_PE_SOFTWARE_STATE_V (1ul << (31 - 0)) /* Valid */ +#define CXL_PE_SOFTWARE_STATE_C (1ul << (31 - 29)) /* Complete */ +#define CXL_PE_SOFTWARE_STATE_S (1ul << (31 - 30)) /* Suspend */ +#define CXL_PE_SOFTWARE_STATE_T (1ul << (31 - 31)) /* Terminate */ + +/****** CXL_PSL_RXCTL_An (Implementation Specific) ************************** + * Controls AFU Hang Pulse, which sets the timeout for the AFU to respond to + * the PSL for any response (except MMIO). Timeouts will occur between 1x to 2x + * of the hang pulse frequency. + */ +#define CXL_PSL_RXCTL_AFUHP_4S 0x7000000000000000ULL + +/* SPA->sw_command_status */ +#define CXL_SPA_SW_CMD_MASK 0xffff000000000000ULL +#define CXL_SPA_SW_CMD_TERMINATE 0x0001000000000000ULL +#define CXL_SPA_SW_CMD_REMOVE 0x0002000000000000ULL +#define CXL_SPA_SW_CMD_SUSPEND 0x0003000000000000ULL +#define CXL_SPA_SW_CMD_RESUME 0x0004000000000000ULL +#define CXL_SPA_SW_CMD_ADD 0x0005000000000000ULL +#define CXL_SPA_SW_CMD_UPDATE 0x0006000000000000ULL +#define CXL_SPA_SW_STATE_MASK 0x0000ffff00000000ULL +#define CXL_SPA_SW_STATE_TERMINATED 0x0000000100000000ULL +#define CXL_SPA_SW_STATE_REMOVED 0x0000000200000000ULL +#define CXL_SPA_SW_STATE_SUSPENDED 0x0000000300000000ULL +#define CXL_SPA_SW_STATE_RESUMED 0x0000000400000000ULL +#define CXL_SPA_SW_STATE_ADDED 0x0000000500000000ULL +#define CXL_SPA_SW_STATE_UPDATED 0x0000000600000000ULL +#define CXL_SPA_SW_PSL_ID_MASK 0x00000000ffff0000ULL +#define CXL_SPA_SW_LINK_MASK 0x000000000000ffffULL + +#define CXL_MAX_SLICES 4 +#define MAX_AFU_MMIO_REGS 3 + +#define CXL_MODE_TIME_SLICED 0x4 +#define CXL_SUPPORTED_MODES (CXL_MODE_DEDICATED | CXL_MODE_DIRECTED) + +#define CXL_DEV_MINORS 13 /* 1 control + 4 AFUs * 3 (dedicated/master/shared) */ +#define CXL_CARD_MINOR(adapter) (adapter->adapter_num * CXL_DEV_MINORS) +#define CXL_DEVT_ADAPTER(dev) (MINOR(dev) / CXL_DEV_MINORS) + +#define CXL_PSL9_TRACEID_MAX 0xAU +#define CXL_PSL9_TRACESTATE_FIN 0x3U + +enum cxl_context_status { + CLOSED, + OPENED, + STARTED +}; + +enum prefault_modes { + CXL_PREFAULT_NONE, + CXL_PREFAULT_WED, + CXL_PREFAULT_ALL, +}; + +enum cxl_attrs { + CXL_ADAPTER_ATTRS, + CXL_AFU_MASTER_ATTRS, + CXL_AFU_ATTRS, +}; + +struct cxl_sste { + __be64 esid_data; + __be64 vsid_data; +}; + +#define to_cxl_adapter(d) container_of(d, struct cxl, dev) +#define to_cxl_afu(d) container_of(d, struct cxl_afu, dev) + +struct cxl_afu_native { + void __iomem *p1n_mmio; + void __iomem *afu_desc_mmio; + irq_hw_number_t psl_hwirq; + unsigned int psl_virq; + struct mutex spa_mutex; + /* + * Only the first part of the SPA is used for the process element + * linked list. The only other part that software needs to worry about + * is sw_command_status, which we store a separate pointer to. + * Everything else in the SPA is only used by hardware + */ + struct cxl_process_element *spa; + __be64 *sw_command_status; + unsigned int spa_size; + int spa_order; + int spa_max_procs; + u64 pp_offset; +}; + +struct cxl_afu_guest { + struct cxl_afu *parent; + u64 handle; + phys_addr_t p2n_phys; + u64 p2n_size; + int max_ints; + bool handle_err; + struct delayed_work work_err; + int previous_state; +}; + +struct cxl_afu { + struct cxl_afu_native *native; + struct cxl_afu_guest *guest; + irq_hw_number_t serr_hwirq; + unsigned int serr_virq; + char *psl_irq_name; + char *err_irq_name; + void __iomem *p2n_mmio; + phys_addr_t psn_phys; + u64 pp_size; + + struct cxl *adapter; + struct device dev; + struct cdev afu_cdev_s, afu_cdev_m, afu_cdev_d; + struct device *chardev_s, *chardev_m, *chardev_d; + struct idr contexts_idr; + struct dentry *debugfs; + struct mutex contexts_lock; + spinlock_t afu_cntl_lock; + + /* -1: AFU deconfigured/locked, >= 0: number of readers */ + atomic_t configured_state; + + /* AFU error buffer fields and bin attribute for sysfs */ + u64 eb_len, eb_offset; + struct bin_attribute attr_eb; + + /* pointer to the vphb */ + struct pci_controller *phb; + + int pp_irqs; + int irqs_max; + int num_procs; + int max_procs_virtualised; + int slice; + int modes_supported; + int current_mode; + int crs_num; + u64 crs_len; + u64 crs_offset; + struct list_head crs; + enum prefault_modes prefault_mode; + bool psa; + bool pp_psa; + bool enabled; +}; + + +struct cxl_irq_name { + struct list_head list; + char *name; +}; + +struct irq_avail { + irq_hw_number_t offset; + irq_hw_number_t range; + unsigned long *bitmap; +}; + +/* + * This is a cxl context. If the PSL is in dedicated mode, there will be one + * of these per AFU. If in AFU directed there can be lots of these. + */ +struct cxl_context { + struct cxl_afu *afu; + + /* Problem state MMIO */ + phys_addr_t psn_phys; + u64 psn_size; + + /* Used to unmap any mmaps when force detaching */ + struct address_space *mapping; + struct mutex mapping_lock; + struct page *ff_page; + bool mmio_err_ff; + bool kernelapi; + + spinlock_t sste_lock; /* Protects segment table entries */ + struct cxl_sste *sstp; + u64 sstp0, sstp1; + unsigned int sst_size, sst_lru; + + wait_queue_head_t wq; + /* use mm context associated with this pid for ds faults */ + struct pid *pid; + spinlock_t lock; /* Protects pending_irq_mask, pending_fault and fault_addr */ + /* Only used in PR mode */ + u64 process_token; + + /* driver private data */ + void *priv; + + unsigned long *irq_bitmap; /* Accessed from IRQ context */ + struct cxl_irq_ranges irqs; + struct list_head irq_names; + u64 fault_addr; + u64 fault_dsisr; + u64 afu_err; + + /* + * This status and it's lock pretects start and detach context + * from racing. It also prevents detach from racing with + * itself + */ + enum cxl_context_status status; + struct mutex status_mutex; + + + /* XXX: Is it possible to need multiple work items at once? */ + struct work_struct fault_work; + u64 dsisr; + u64 dar; + + struct cxl_process_element *elem; + + /* + * pe is the process element handle, assigned by this driver when the + * context is initialized. + * + * external_pe is the PE shown outside of cxl. + * On bare-metal, pe=external_pe, because we decide what the handle is. + * In a guest, we only find out about the pe used by pHyp when the + * context is attached, and that's the value we want to report outside + * of cxl. + */ + int pe; + int external_pe; + + u32 irq_count; + bool pe_inserted; + bool master; + bool kernel; + bool pending_irq; + bool pending_fault; + bool pending_afu_err; + + /* Used by AFU drivers for driver specific event delivery */ + struct cxl_afu_driver_ops *afu_driver_ops; + atomic_t afu_driver_events; + + struct rcu_head rcu; + + struct mm_struct *mm; + + u16 tidr; + bool assign_tidr; +}; + +struct cxl_irq_info; + +struct cxl_service_layer_ops { + int (*adapter_regs_init)(struct cxl *adapter, struct pci_dev *dev); + int (*invalidate_all)(struct cxl *adapter); + int (*afu_regs_init)(struct cxl_afu *afu); + int (*sanitise_afu_regs)(struct cxl_afu *afu); + int (*register_serr_irq)(struct cxl_afu *afu); + void (*release_serr_irq)(struct cxl_afu *afu); + irqreturn_t (*handle_interrupt)(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info); + irqreturn_t (*fail_irq)(struct cxl_afu *afu, struct cxl_irq_info *irq_info); + int (*activate_dedicated_process)(struct cxl_afu *afu); + int (*attach_afu_directed)(struct cxl_context *ctx, u64 wed, u64 amr); + int (*attach_dedicated_process)(struct cxl_context *ctx, u64 wed, u64 amr); + void (*update_dedicated_ivtes)(struct cxl_context *ctx); + void (*debugfs_add_adapter_regs)(struct cxl *adapter, struct dentry *dir); + void (*debugfs_add_afu_regs)(struct cxl_afu *afu, struct dentry *dir); + void (*psl_irq_dump_registers)(struct cxl_context *ctx); + void (*err_irq_dump_registers)(struct cxl *adapter); + void (*debugfs_stop_trace)(struct cxl *adapter); + void (*write_timebase_ctrl)(struct cxl *adapter); + u64 (*timebase_read)(struct cxl *adapter); + int capi_mode; + bool needs_reset_before_disable; +}; + +struct cxl_native { + u64 afu_desc_off; + u64 afu_desc_size; + void __iomem *p1_mmio; + void __iomem *p2_mmio; + irq_hw_number_t err_hwirq; + unsigned int err_virq; + u64 ps_off; + bool no_data_cache; /* set if no data cache on the card */ + const struct cxl_service_layer_ops *sl_ops; +}; + +struct cxl_guest { + struct platform_device *pdev; + int irq_nranges; + struct cdev cdev; + irq_hw_number_t irq_base_offset; + struct irq_avail *irq_avail; + spinlock_t irq_alloc_lock; + u64 handle; + char *status; + u16 vendor; + u16 device; + u16 subsystem_vendor; + u16 subsystem; +}; + +struct cxl { + struct cxl_native *native; + struct cxl_guest *guest; + spinlock_t afu_list_lock; + struct cxl_afu *afu[CXL_MAX_SLICES]; + struct device dev; + struct dentry *trace; + struct dentry *psl_err_chk; + struct dentry *debugfs; + char *irq_name; + struct bin_attribute cxl_attr; + int adapter_num; + int user_irqs; + u64 ps_size; + u16 psl_rev; + u16 base_image; + u8 vsec_status; + u8 caia_major; + u8 caia_minor; + u8 slices; + bool user_image_loaded; + bool perst_loads_image; + bool perst_select_user; + bool perst_same_image; + bool psl_timebase_synced; + bool tunneled_ops_supported; + + /* + * number of contexts mapped on to this card. Possible values are: + * >0: Number of contexts mapped and new one can be mapped. + * 0: No active contexts and new ones can be mapped. + * -1: No contexts mapped and new ones cannot be mapped. + */ + atomic_t contexts_num; +}; + +int cxl_pci_alloc_one_irq(struct cxl *adapter); +void cxl_pci_release_one_irq(struct cxl *adapter, int hwirq); +int cxl_pci_alloc_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter, unsigned int num); +void cxl_pci_release_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter); +int cxl_pci_setup_irq(struct cxl *adapter, unsigned int hwirq, unsigned int virq); +int cxl_update_image_control(struct cxl *adapter); +int cxl_pci_reset(struct cxl *adapter); +void cxl_pci_release_afu(struct device *dev); +ssize_t cxl_pci_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len); + +/* common == phyp + powernv - CAIA 1&2 */ +struct cxl_process_element_common { + __be32 tid; + __be32 pid; + __be64 csrp; + union { + struct { + __be64 aurp0; + __be64 aurp1; + __be64 sstp0; + __be64 sstp1; + } psl8; /* CAIA 1 */ + struct { + u8 reserved2[8]; + u8 reserved3[8]; + u8 reserved4[8]; + u8 reserved5[8]; + } psl9; /* CAIA 2 */ + } u; + __be64 amr; + u8 reserved6[4]; + __be64 wed; +} __packed; + +/* just powernv - CAIA 1&2 */ +struct cxl_process_element { + __be64 sr; + __be64 SPOffset; + union { + __be64 sdr; /* CAIA 1 */ + u8 reserved1[8]; /* CAIA 2 */ + } u; + __be64 haurp; + __be32 ctxtime; + __be16 ivte_offsets[4]; + __be16 ivte_ranges[4]; + __be32 lpid; + struct cxl_process_element_common common; + __be32 software_state; +} __packed; + +static inline bool cxl_adapter_link_ok(struct cxl *cxl, struct cxl_afu *afu) +{ + struct pci_dev *pdev; + + if (cpu_has_feature(CPU_FTR_HVMODE)) { + pdev = to_pci_dev(cxl->dev.parent); + return !pci_channel_offline(pdev); + } + return true; +} + +static inline void __iomem *_cxl_p1_addr(struct cxl *cxl, cxl_p1_reg_t reg) +{ + WARN_ON(!cpu_has_feature(CPU_FTR_HVMODE)); + return cxl->native->p1_mmio + cxl_reg_off(reg); +} + +static inline void cxl_p1_write(struct cxl *cxl, cxl_p1_reg_t reg, u64 val) +{ + if (likely(cxl_adapter_link_ok(cxl, NULL))) + out_be64(_cxl_p1_addr(cxl, reg), val); +} + +static inline u64 cxl_p1_read(struct cxl *cxl, cxl_p1_reg_t reg) +{ + if (likely(cxl_adapter_link_ok(cxl, NULL))) + return in_be64(_cxl_p1_addr(cxl, reg)); + else + return ~0ULL; +} + +static inline void __iomem *_cxl_p1n_addr(struct cxl_afu *afu, cxl_p1n_reg_t reg) +{ + WARN_ON(!cpu_has_feature(CPU_FTR_HVMODE)); + return afu->native->p1n_mmio + cxl_reg_off(reg); +} + +static inline void cxl_p1n_write(struct cxl_afu *afu, cxl_p1n_reg_t reg, u64 val) +{ + if (likely(cxl_adapter_link_ok(afu->adapter, afu))) + out_be64(_cxl_p1n_addr(afu, reg), val); +} + +static inline u64 cxl_p1n_read(struct cxl_afu *afu, cxl_p1n_reg_t reg) +{ + if (likely(cxl_adapter_link_ok(afu->adapter, afu))) + return in_be64(_cxl_p1n_addr(afu, reg)); + else + return ~0ULL; +} + +static inline void __iomem *_cxl_p2n_addr(struct cxl_afu *afu, cxl_p2n_reg_t reg) +{ + return afu->p2n_mmio + cxl_reg_off(reg); +} + +static inline void cxl_p2n_write(struct cxl_afu *afu, cxl_p2n_reg_t reg, u64 val) +{ + if (likely(cxl_adapter_link_ok(afu->adapter, afu))) + out_be64(_cxl_p2n_addr(afu, reg), val); +} + +static inline u64 cxl_p2n_read(struct cxl_afu *afu, cxl_p2n_reg_t reg) +{ + if (likely(cxl_adapter_link_ok(afu->adapter, afu))) + return in_be64(_cxl_p2n_addr(afu, reg)); + else + return ~0ULL; +} + +static inline bool cxl_is_power8(void) +{ + if ((pvr_version_is(PVR_POWER8E)) || + (pvr_version_is(PVR_POWER8NVL)) || + (pvr_version_is(PVR_POWER8))) + return true; + return false; +} + +static inline bool cxl_is_power9(void) +{ + if (pvr_version_is(PVR_POWER9)) + return true; + return false; +} + +ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf, + loff_t off, size_t count); + + +struct cxl_calls { + void (*cxl_slbia)(struct mm_struct *mm); + struct module *owner; +}; +int register_cxl_calls(struct cxl_calls *calls); +void unregister_cxl_calls(struct cxl_calls *calls); +int cxl_update_properties(struct device_node *dn, struct property *new_prop); + +void cxl_remove_adapter_nr(struct cxl *adapter); + +void cxl_release_spa(struct cxl_afu *afu); + +dev_t cxl_get_dev(void); +int cxl_file_init(void); +void cxl_file_exit(void); +int cxl_register_adapter(struct cxl *adapter); +int cxl_register_afu(struct cxl_afu *afu); +int cxl_chardev_d_afu_add(struct cxl_afu *afu); +int cxl_chardev_m_afu_add(struct cxl_afu *afu); +int cxl_chardev_s_afu_add(struct cxl_afu *afu); +void cxl_chardev_afu_remove(struct cxl_afu *afu); + +void cxl_context_detach_all(struct cxl_afu *afu); +void cxl_context_free(struct cxl_context *ctx); +void cxl_context_detach(struct cxl_context *ctx); + +int cxl_sysfs_adapter_add(struct cxl *adapter); +void cxl_sysfs_adapter_remove(struct cxl *adapter); +int cxl_sysfs_afu_add(struct cxl_afu *afu); +void cxl_sysfs_afu_remove(struct cxl_afu *afu); +int cxl_sysfs_afu_m_add(struct cxl_afu *afu); +void cxl_sysfs_afu_m_remove(struct cxl_afu *afu); + +struct cxl *cxl_alloc_adapter(void); +struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice); +int cxl_afu_select_best_mode(struct cxl_afu *afu); + +int cxl_native_register_psl_irq(struct cxl_afu *afu); +void cxl_native_release_psl_irq(struct cxl_afu *afu); +int cxl_native_register_psl_err_irq(struct cxl *adapter); +void cxl_native_release_psl_err_irq(struct cxl *adapter); +int cxl_native_register_serr_irq(struct cxl_afu *afu); +void cxl_native_release_serr_irq(struct cxl_afu *afu); +int afu_register_irqs(struct cxl_context *ctx, u32 count); +void afu_release_irqs(struct cxl_context *ctx, void *cookie); +void afu_irq_name_free(struct cxl_context *ctx); + +int cxl_attach_afu_directed_psl9(struct cxl_context *ctx, u64 wed, u64 amr); +int cxl_attach_afu_directed_psl8(struct cxl_context *ctx, u64 wed, u64 amr); +int cxl_activate_dedicated_process_psl9(struct cxl_afu *afu); +int cxl_activate_dedicated_process_psl8(struct cxl_afu *afu); +int cxl_attach_dedicated_process_psl9(struct cxl_context *ctx, u64 wed, u64 amr); +int cxl_attach_dedicated_process_psl8(struct cxl_context *ctx, u64 wed, u64 amr); +void cxl_update_dedicated_ivtes_psl9(struct cxl_context *ctx); +void cxl_update_dedicated_ivtes_psl8(struct cxl_context *ctx); + +#ifdef CONFIG_DEBUG_FS + +void cxl_debugfs_init(void); +void cxl_debugfs_exit(void); +void cxl_debugfs_adapter_add(struct cxl *adapter); +void cxl_debugfs_adapter_remove(struct cxl *adapter); +void cxl_debugfs_afu_add(struct cxl_afu *afu); +void cxl_debugfs_afu_remove(struct cxl_afu *afu); +void cxl_debugfs_add_adapter_regs_psl9(struct cxl *adapter, struct dentry *dir); +void cxl_debugfs_add_adapter_regs_psl8(struct cxl *adapter, struct dentry *dir); +void cxl_debugfs_add_afu_regs_psl9(struct cxl_afu *afu, struct dentry *dir); +void cxl_debugfs_add_afu_regs_psl8(struct cxl_afu *afu, struct dentry *dir); + +#else /* CONFIG_DEBUG_FS */ + +static inline void __init cxl_debugfs_init(void) +{ +} + +static inline void cxl_debugfs_exit(void) +{ +} + +static inline void cxl_debugfs_adapter_add(struct cxl *adapter) +{ +} + +static inline void cxl_debugfs_adapter_remove(struct cxl *adapter) +{ +} + +static inline void cxl_debugfs_afu_add(struct cxl_afu *afu) +{ +} + +static inline void cxl_debugfs_afu_remove(struct cxl_afu *afu) +{ +} + +static inline void cxl_debugfs_add_adapter_regs_psl9(struct cxl *adapter, + struct dentry *dir) +{ +} + +static inline void cxl_debugfs_add_adapter_regs_psl8(struct cxl *adapter, + struct dentry *dir) +{ +} + +static inline void cxl_debugfs_add_afu_regs_psl9(struct cxl_afu *afu, struct dentry *dir) +{ +} + +static inline void cxl_debugfs_add_afu_regs_psl8(struct cxl_afu *afu, struct dentry *dir) +{ +} + +#endif /* CONFIG_DEBUG_FS */ + +void cxl_handle_fault(struct work_struct *work); +void cxl_prefault(struct cxl_context *ctx, u64 wed); +int cxl_handle_mm_fault(struct mm_struct *mm, u64 dsisr, u64 dar); + +struct cxl *get_cxl_adapter(int num); +int cxl_alloc_sst(struct cxl_context *ctx); +void cxl_dump_debug_buffer(void *addr, size_t size); + +void init_cxl_native(void); + +struct cxl_context *cxl_context_alloc(void); +int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master); +void cxl_context_set_mapping(struct cxl_context *ctx, + struct address_space *mapping); +void cxl_context_free(struct cxl_context *ctx); +int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma); +unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq, + irq_handler_t handler, void *cookie, const char *name); +void cxl_unmap_irq(unsigned int virq, void *cookie); +int __detach_context(struct cxl_context *ctx); + +/* + * This must match the layout of the H_COLLECT_CA_INT_INFO retbuf defined + * in PAPR. + * Field pid_tid is now 'reserved' because it's no more used on bare-metal. + * On a guest environment, PSL_PID_An is located on the upper 32 bits and + * PSL_TID_An register in the lower 32 bits. + */ +struct cxl_irq_info { + u64 dsisr; + u64 dar; + u64 dsr; + u64 reserved; + u64 afu_err; + u64 errstat; + u64 proc_handle; + u64 padding[2]; /* to match the expected retbuf size for plpar_hcall9 */ +}; + +void cxl_assign_psn_space(struct cxl_context *ctx); +int cxl_invalidate_all_psl9(struct cxl *adapter); +int cxl_invalidate_all_psl8(struct cxl *adapter); +irqreturn_t cxl_irq_psl9(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info); +irqreturn_t cxl_irq_psl8(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info); +irqreturn_t cxl_fail_irq_psl(struct cxl_afu *afu, struct cxl_irq_info *irq_info); +int cxl_register_one_irq(struct cxl *adapter, irq_handler_t handler, + void *cookie, irq_hw_number_t *dest_hwirq, + unsigned int *dest_virq, const char *name); + +int cxl_check_error(struct cxl_afu *afu); +int cxl_afu_slbia(struct cxl_afu *afu); +int cxl_data_cache_flush(struct cxl *adapter); +int cxl_afu_disable(struct cxl_afu *afu); +int cxl_psl_purge(struct cxl_afu *afu); +int cxl_calc_capp_routing(struct pci_dev *dev, u64 *chipid, + u32 *phb_index, u64 *capp_unit_id); +int cxl_slot_is_switched(struct pci_dev *dev); +int cxl_get_xsl9_dsnctl(struct pci_dev *dev, u64 capp_unit_id, u64 *reg); +u64 cxl_calculate_sr(bool master, bool kernel, bool real_mode, bool p9); + +void cxl_native_irq_dump_regs_psl9(struct cxl_context *ctx); +void cxl_native_irq_dump_regs_psl8(struct cxl_context *ctx); +void cxl_native_err_irq_dump_regs_psl8(struct cxl *adapter); +void cxl_native_err_irq_dump_regs_psl9(struct cxl *adapter); +int cxl_pci_vphb_add(struct cxl_afu *afu); +void cxl_pci_vphb_remove(struct cxl_afu *afu); +void cxl_release_mapping(struct cxl_context *ctx); + +extern struct pci_driver cxl_pci_driver; +extern struct platform_driver cxl_of_driver; +int afu_allocate_irqs(struct cxl_context *ctx, u32 count); + +int afu_open(struct inode *inode, struct file *file); +int afu_release(struct inode *inode, struct file *file); +long afu_ioctl(struct file *file, unsigned int cmd, unsigned long arg); +int afu_mmap(struct file *file, struct vm_area_struct *vm); +__poll_t afu_poll(struct file *file, struct poll_table_struct *poll); +ssize_t afu_read(struct file *file, char __user *buf, size_t count, loff_t *off); +extern const struct file_operations afu_fops; + +struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_device *dev); +void cxl_guest_remove_adapter(struct cxl *adapter); +int cxl_of_read_adapter_handle(struct cxl *adapter, struct device_node *np); +int cxl_of_read_adapter_properties(struct cxl *adapter, struct device_node *np); +ssize_t cxl_guest_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len); +ssize_t cxl_guest_read_afu_vpd(struct cxl_afu *afu, void *buf, size_t len); +int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_np); +void cxl_guest_remove_afu(struct cxl_afu *afu); +int cxl_of_read_afu_handle(struct cxl_afu *afu, struct device_node *afu_np); +int cxl_of_read_afu_properties(struct cxl_afu *afu, struct device_node *afu_np); +int cxl_guest_add_chardev(struct cxl *adapter); +void cxl_guest_remove_chardev(struct cxl *adapter); +void cxl_guest_reload_module(struct cxl *adapter); +int cxl_of_probe(struct platform_device *pdev); + +struct cxl_backend_ops { + struct module *module; + int (*adapter_reset)(struct cxl *adapter); + int (*alloc_one_irq)(struct cxl *adapter); + void (*release_one_irq)(struct cxl *adapter, int hwirq); + int (*alloc_irq_ranges)(struct cxl_irq_ranges *irqs, + struct cxl *adapter, unsigned int num); + void (*release_irq_ranges)(struct cxl_irq_ranges *irqs, + struct cxl *adapter); + int (*setup_irq)(struct cxl *adapter, unsigned int hwirq, + unsigned int virq); + irqreturn_t (*handle_psl_slice_error)(struct cxl_context *ctx, + u64 dsisr, u64 errstat); + irqreturn_t (*psl_interrupt)(int irq, void *data); + int (*ack_irq)(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask); + void (*irq_wait)(struct cxl_context *ctx); + int (*attach_process)(struct cxl_context *ctx, bool kernel, + u64 wed, u64 amr); + int (*detach_process)(struct cxl_context *ctx); + void (*update_ivtes)(struct cxl_context *ctx); + bool (*support_attributes)(const char *attr_name, enum cxl_attrs type); + bool (*link_ok)(struct cxl *cxl, struct cxl_afu *afu); + void (*release_afu)(struct device *dev); + ssize_t (*afu_read_err_buffer)(struct cxl_afu *afu, char *buf, + loff_t off, size_t count); + int (*afu_check_and_enable)(struct cxl_afu *afu); + int (*afu_activate_mode)(struct cxl_afu *afu, int mode); + int (*afu_deactivate_mode)(struct cxl_afu *afu, int mode); + int (*afu_reset)(struct cxl_afu *afu); + int (*afu_cr_read8)(struct cxl_afu *afu, int cr_idx, u64 offset, u8 *val); + int (*afu_cr_read16)(struct cxl_afu *afu, int cr_idx, u64 offset, u16 *val); + int (*afu_cr_read32)(struct cxl_afu *afu, int cr_idx, u64 offset, u32 *val); + int (*afu_cr_read64)(struct cxl_afu *afu, int cr_idx, u64 offset, u64 *val); + int (*afu_cr_write8)(struct cxl_afu *afu, int cr_idx, u64 offset, u8 val); + int (*afu_cr_write16)(struct cxl_afu *afu, int cr_idx, u64 offset, u16 val); + int (*afu_cr_write32)(struct cxl_afu *afu, int cr_idx, u64 offset, u32 val); + ssize_t (*read_adapter_vpd)(struct cxl *adapter, void *buf, size_t count); +}; +extern const struct cxl_backend_ops cxl_native_ops; +extern const struct cxl_backend_ops cxl_guest_ops; +extern const struct cxl_backend_ops *cxl_ops; + +/* check if the given pci_dev is on the the cxl vphb bus */ +bool cxl_pci_is_vphb_device(struct pci_dev *dev); + +/* decode AFU error bits in the PSL register PSL_SERR_An */ +void cxl_afu_decode_psl_serr(struct cxl_afu *afu, u64 serr); + +/* + * Increments the number of attached contexts on an adapter. + * In case an adapter_context_lock is taken the return -EBUSY. + */ +int cxl_adapter_context_get(struct cxl *adapter); + +/* Decrements the number of attached contexts on an adapter */ +void cxl_adapter_context_put(struct cxl *adapter); + +/* If no active contexts then prevents contexts from being attached */ +int cxl_adapter_context_lock(struct cxl *adapter); + +/* Unlock the contexts-lock if taken. Warn and force unlock otherwise */ +void cxl_adapter_context_unlock(struct cxl *adapter); + +/* Increases the reference count to "struct mm_struct" */ +void cxl_context_mm_count_get(struct cxl_context *ctx); + +/* Decrements the reference count to "struct mm_struct" */ +void cxl_context_mm_count_put(struct cxl_context *ctx); + +#endif diff --git a/drivers/misc/cxl/cxllib.c b/drivers/misc/cxl/cxllib.c new file mode 100644 index 000000000..2a1783f32 --- /dev/null +++ b/drivers/misc/cxl/cxllib.c @@ -0,0 +1,272 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright 2017 IBM Corp. + */ + +#include <linux/hugetlb.h> +#include <linux/sched/mm.h> +#include <asm/pnv-pci.h> +#include <misc/cxllib.h> + +#include "cxl.h" + +#define CXL_INVALID_DRA ~0ull +#define CXL_DUMMY_READ_SIZE 128 +#define CXL_DUMMY_READ_ALIGN 8 +#define CXL_CAPI_WINDOW_START 0x2000000000000ull +#define CXL_CAPI_WINDOW_LOG_SIZE 48 +#define CXL_XSL_CONFIG_CURRENT_VERSION CXL_XSL_CONFIG_VERSION1 + + +bool cxllib_slot_is_supported(struct pci_dev *dev, unsigned long flags) +{ + int rc; + u32 phb_index; + u64 chip_id, capp_unit_id; + + /* No flags currently supported */ + if (flags) + return false; + + if (!cpu_has_feature(CPU_FTR_HVMODE)) + return false; + + if (!cxl_is_power9()) + return false; + + if (cxl_slot_is_switched(dev)) + return false; + + /* on p9, some pci slots are not connected to a CAPP unit */ + rc = cxl_calc_capp_routing(dev, &chip_id, &phb_index, &capp_unit_id); + if (rc) + return false; + + return true; +} +EXPORT_SYMBOL_GPL(cxllib_slot_is_supported); + +static DEFINE_MUTEX(dra_mutex); +static u64 dummy_read_addr = CXL_INVALID_DRA; + +static int allocate_dummy_read_buf(void) +{ + u64 buf, vaddr; + size_t buf_size; + + /* + * Dummy read buffer is 128-byte long, aligned on a + * 256-byte boundary and we need the physical address. + */ + buf_size = CXL_DUMMY_READ_SIZE + (1ull << CXL_DUMMY_READ_ALIGN); + buf = (u64) kzalloc(buf_size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + vaddr = (buf + (1ull << CXL_DUMMY_READ_ALIGN) - 1) & + (~0ull << CXL_DUMMY_READ_ALIGN); + + WARN((vaddr + CXL_DUMMY_READ_SIZE) > (buf + buf_size), + "Dummy read buffer alignment issue"); + dummy_read_addr = virt_to_phys((void *) vaddr); + return 0; +} + +int cxllib_get_xsl_config(struct pci_dev *dev, struct cxllib_xsl_config *cfg) +{ + int rc; + u32 phb_index; + u64 chip_id, capp_unit_id; + + if (!cpu_has_feature(CPU_FTR_HVMODE)) + return -EINVAL; + + mutex_lock(&dra_mutex); + if (dummy_read_addr == CXL_INVALID_DRA) { + rc = allocate_dummy_read_buf(); + if (rc) { + mutex_unlock(&dra_mutex); + return rc; + } + } + mutex_unlock(&dra_mutex); + + rc = cxl_calc_capp_routing(dev, &chip_id, &phb_index, &capp_unit_id); + if (rc) + return rc; + + rc = cxl_get_xsl9_dsnctl(dev, capp_unit_id, &cfg->dsnctl); + if (rc) + return rc; + + cfg->version = CXL_XSL_CONFIG_CURRENT_VERSION; + cfg->log_bar_size = CXL_CAPI_WINDOW_LOG_SIZE; + cfg->bar_addr = CXL_CAPI_WINDOW_START; + cfg->dra = dummy_read_addr; + return 0; +} +EXPORT_SYMBOL_GPL(cxllib_get_xsl_config); + +int cxllib_switch_phb_mode(struct pci_dev *dev, enum cxllib_mode mode, + unsigned long flags) +{ + int rc = 0; + + if (!cpu_has_feature(CPU_FTR_HVMODE)) + return -EINVAL; + + switch (mode) { + case CXL_MODE_PCI: + /* + * We currently don't support going back to PCI mode + * However, we'll turn the invalidations off, so that + * the firmware doesn't have to ack them and can do + * things like reset, etc.. with no worries. + * So always return EPERM (can't go back to PCI) or + * EBUSY if we couldn't even turn off snooping + */ + rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_SNOOP_OFF); + if (rc) + rc = -EBUSY; + else + rc = -EPERM; + break; + case CXL_MODE_CXL: + /* DMA only supported on TVT1 for the time being */ + if (flags != CXL_MODE_DMA_TVT1) + return -EINVAL; + rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_DMA_TVT1); + if (rc) + return rc; + rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_SNOOP_ON); + break; + default: + rc = -EINVAL; + } + return rc; +} +EXPORT_SYMBOL_GPL(cxllib_switch_phb_mode); + +/* + * When switching the PHB to capi mode, the TVT#1 entry for + * the Partitionable Endpoint is set in bypass mode, like + * in PCI mode. + * Configure the device dma to use TVT#1, which is done + * by calling dma_set_mask() with a mask large enough. + */ +int cxllib_set_device_dma(struct pci_dev *dev, unsigned long flags) +{ + int rc; + + if (flags) + return -EINVAL; + + rc = dma_set_mask(&dev->dev, DMA_BIT_MASK(64)); + return rc; +} +EXPORT_SYMBOL_GPL(cxllib_set_device_dma); + +int cxllib_get_PE_attributes(struct task_struct *task, + unsigned long translation_mode, + struct cxllib_pe_attributes *attr) +{ + struct mm_struct *mm = NULL; + + if (translation_mode != CXL_TRANSLATED_MODE && + translation_mode != CXL_REAL_MODE) + return -EINVAL; + + attr->sr = cxl_calculate_sr(false, + task == NULL, + translation_mode == CXL_REAL_MODE, + true); + attr->lpid = mfspr(SPRN_LPID); + if (task) { + mm = get_task_mm(task); + if (mm == NULL) + return -EINVAL; + /* + * Caller is keeping a reference on mm_users for as long + * as XSL uses the memory context + */ + attr->pid = mm->context.id; + mmput(mm); + attr->tid = task->thread.tidr; + } else { + attr->pid = 0; + attr->tid = 0; + } + return 0; +} +EXPORT_SYMBOL_GPL(cxllib_get_PE_attributes); + +static int get_vma_info(struct mm_struct *mm, u64 addr, + u64 *vma_start, u64 *vma_end, + unsigned long *page_size) +{ + struct vm_area_struct *vma = NULL; + int rc = 0; + + mmap_read_lock(mm); + + vma = find_vma(mm, addr); + if (!vma) { + rc = -EFAULT; + goto out; + } + *page_size = vma_kernel_pagesize(vma); + *vma_start = vma->vm_start; + *vma_end = vma->vm_end; +out: + mmap_read_unlock(mm); + return rc; +} + +int cxllib_handle_fault(struct mm_struct *mm, u64 addr, u64 size, u64 flags) +{ + int rc; + u64 dar, vma_start, vma_end; + unsigned long page_size; + + if (mm == NULL) + return -EFAULT; + + /* + * The buffer we have to process can extend over several pages + * and may also cover several VMAs. + * We iterate over all the pages. The page size could vary + * between VMAs. + */ + rc = get_vma_info(mm, addr, &vma_start, &vma_end, &page_size); + if (rc) + return rc; + + for (dar = (addr & ~(page_size - 1)); dar < (addr + size); + dar += page_size) { + if (dar < vma_start || dar >= vma_end) { + /* + * We don't hold mm->mmap_lock while iterating, since + * the lock is required by one of the lower-level page + * fault processing functions and it could + * create a deadlock. + * + * It means the VMAs can be altered between 2 + * loop iterations and we could theoretically + * miss a page (however unlikely). But that's + * not really a problem, as the driver will + * retry access, get another page fault on the + * missing page and call us again. + */ + rc = get_vma_info(mm, dar, &vma_start, &vma_end, + &page_size); + if (rc) + return rc; + } + + rc = cxl_handle_mm_fault(mm, flags, dar); + if (rc) + return -EFAULT; + } + return 0; +} +EXPORT_SYMBOL_GPL(cxllib_handle_fault); diff --git a/drivers/misc/cxl/debugfs.c b/drivers/misc/cxl/debugfs.c new file mode 100644 index 000000000..7b987bf49 --- /dev/null +++ b/drivers/misc/cxl/debugfs.c @@ -0,0 +1,134 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright 2014 IBM Corp. + */ + +#include <linux/debugfs.h> +#include <linux/kernel.h> +#include <linux/slab.h> + +#include "cxl.h" + +static struct dentry *cxl_debugfs; + +/* Helpers to export CXL mmaped IO registers via debugfs */ +static int debugfs_io_u64_get(void *data, u64 *val) +{ + *val = in_be64((u64 __iomem *)data); + return 0; +} + +static int debugfs_io_u64_set(void *data, u64 val) +{ + out_be64((u64 __iomem *)data, val); + return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE(fops_io_x64, debugfs_io_u64_get, debugfs_io_u64_set, + "0x%016llx\n"); + +static void debugfs_create_io_x64(const char *name, umode_t mode, + struct dentry *parent, u64 __iomem *value) +{ + debugfs_create_file_unsafe(name, mode, parent, (void __force *)value, + &fops_io_x64); +} + +void cxl_debugfs_add_adapter_regs_psl9(struct cxl *adapter, struct dentry *dir) +{ + debugfs_create_io_x64("fir1", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL9_FIR1)); + debugfs_create_io_x64("fir_mask", 0400, dir, + _cxl_p1_addr(adapter, CXL_PSL9_FIR_MASK)); + debugfs_create_io_x64("fir_cntl", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL9_FIR_CNTL)); + debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1_addr(adapter, CXL_PSL9_TRACECFG)); + debugfs_create_io_x64("debug", 0600, dir, + _cxl_p1_addr(adapter, CXL_PSL9_DEBUG)); + debugfs_create_io_x64("xsl-debug", 0600, dir, + _cxl_p1_addr(adapter, CXL_XSL9_DBG)); +} + +void cxl_debugfs_add_adapter_regs_psl8(struct cxl *adapter, struct dentry *dir) +{ + debugfs_create_io_x64("fir1", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR1)); + debugfs_create_io_x64("fir2", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR2)); + debugfs_create_io_x64("fir_cntl", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR_CNTL)); + debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_TRACE)); +} + +void cxl_debugfs_adapter_add(struct cxl *adapter) +{ + struct dentry *dir; + char buf[32]; + + if (!cxl_debugfs) + return; + + snprintf(buf, 32, "card%i", adapter->adapter_num); + dir = debugfs_create_dir(buf, cxl_debugfs); + adapter->debugfs = dir; + + debugfs_create_io_x64("err_ivte", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_ErrIVTE)); + + if (adapter->native->sl_ops->debugfs_add_adapter_regs) + adapter->native->sl_ops->debugfs_add_adapter_regs(adapter, dir); +} + +void cxl_debugfs_adapter_remove(struct cxl *adapter) +{ + debugfs_remove_recursive(adapter->debugfs); +} + +void cxl_debugfs_add_afu_regs_psl9(struct cxl_afu *afu, struct dentry *dir) +{ + debugfs_create_io_x64("serr", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SERR_An)); +} + +void cxl_debugfs_add_afu_regs_psl8(struct cxl_afu *afu, struct dentry *dir) +{ + debugfs_create_io_x64("sstp0", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_SSTP0_An)); + debugfs_create_io_x64("sstp1", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_SSTP1_An)); + + debugfs_create_io_x64("fir", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_FIR_SLICE_An)); + debugfs_create_io_x64("serr", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SERR_An)); + debugfs_create_io_x64("afu_debug", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_AFU_DEBUG_An)); + debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SLICE_TRACE)); +} + +void cxl_debugfs_afu_add(struct cxl_afu *afu) +{ + struct dentry *dir; + char buf[32]; + + if (!afu->adapter->debugfs) + return; + + snprintf(buf, 32, "psl%i.%i", afu->adapter->adapter_num, afu->slice); + dir = debugfs_create_dir(buf, afu->adapter->debugfs); + afu->debugfs = dir; + + debugfs_create_io_x64("sr", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SR_An)); + debugfs_create_io_x64("dsisr", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_PSL_DSISR_An)); + debugfs_create_io_x64("dar", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_PSL_DAR_An)); + + debugfs_create_io_x64("err_status", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_PSL_ErrStat_An)); + + if (afu->adapter->native->sl_ops->debugfs_add_afu_regs) + afu->adapter->native->sl_ops->debugfs_add_afu_regs(afu, dir); +} + +void cxl_debugfs_afu_remove(struct cxl_afu *afu) +{ + debugfs_remove_recursive(afu->debugfs); +} + +void __init cxl_debugfs_init(void) +{ + if (!cpu_has_feature(CPU_FTR_HVMODE)) + return; + + cxl_debugfs = debugfs_create_dir("cxl", NULL); +} + +void cxl_debugfs_exit(void) +{ + debugfs_remove_recursive(cxl_debugfs); +} diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c new file mode 100644 index 000000000..01153b743 --- /dev/null +++ b/drivers/misc/cxl/fault.c @@ -0,0 +1,356 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright 2014 IBM Corp. + */ + +#include <linux/workqueue.h> +#include <linux/sched/signal.h> +#include <linux/sched/mm.h> +#include <linux/pid.h> +#include <linux/mm.h> +#include <linux/moduleparam.h> + +#undef MODULE_PARAM_PREFIX +#define MODULE_PARAM_PREFIX "cxl" "." +#include <asm/current.h> +#include <asm/copro.h> +#include <asm/mmu.h> + +#include "cxl.h" +#include "trace.h" + +static bool sste_matches(struct cxl_sste *sste, struct copro_slb *slb) +{ + return ((sste->vsid_data == cpu_to_be64(slb->vsid)) && + (sste->esid_data == cpu_to_be64(slb->esid))); +} + +/* + * This finds a free SSTE for the given SLB, or returns NULL if it's already in + * the segment table. + */ +static struct cxl_sste *find_free_sste(struct cxl_context *ctx, + struct copro_slb *slb) +{ + struct cxl_sste *primary, *sste, *ret = NULL; + unsigned int mask = (ctx->sst_size >> 7) - 1; /* SSTP0[SegTableSize] */ + unsigned int entry; + unsigned int hash; + + if (slb->vsid & SLB_VSID_B_1T) + hash = (slb->esid >> SID_SHIFT_1T) & mask; + else /* 256M */ + hash = (slb->esid >> SID_SHIFT) & mask; + + primary = ctx->sstp + (hash << 3); + + for (entry = 0, sste = primary; entry < 8; entry++, sste++) { + if (!ret && !(be64_to_cpu(sste->esid_data) & SLB_ESID_V)) + ret = sste; + if (sste_matches(sste, slb)) + return NULL; + } + if (ret) + return ret; + + /* Nothing free, select an entry to cast out */ + ret = primary + ctx->sst_lru; + ctx->sst_lru = (ctx->sst_lru + 1) & 0x7; + + return ret; +} + +static void cxl_load_segment(struct cxl_context *ctx, struct copro_slb *slb) +{ + /* mask is the group index, we search primary and secondary here. */ + struct cxl_sste *sste; + unsigned long flags; + + spin_lock_irqsave(&ctx->sste_lock, flags); + sste = find_free_sste(ctx, slb); + if (!sste) + goto out_unlock; + + pr_devel("CXL Populating SST[%li]: %#llx %#llx\n", + sste - ctx->sstp, slb->vsid, slb->esid); + trace_cxl_ste_write(ctx, sste - ctx->sstp, slb->esid, slb->vsid); + + sste->vsid_data = cpu_to_be64(slb->vsid); + sste->esid_data = cpu_to_be64(slb->esid); +out_unlock: + spin_unlock_irqrestore(&ctx->sste_lock, flags); +} + +static int cxl_fault_segment(struct cxl_context *ctx, struct mm_struct *mm, + u64 ea) +{ + struct copro_slb slb = {0,0}; + int rc; + + if (!(rc = copro_calculate_slb(mm, ea, &slb))) { + cxl_load_segment(ctx, &slb); + } + + return rc; +} + +static void cxl_ack_ae(struct cxl_context *ctx) +{ + unsigned long flags; + + cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_AE, 0); + + spin_lock_irqsave(&ctx->lock, flags); + ctx->pending_fault = true; + ctx->fault_addr = ctx->dar; + ctx->fault_dsisr = ctx->dsisr; + spin_unlock_irqrestore(&ctx->lock, flags); + + wake_up_all(&ctx->wq); +} + +static int cxl_handle_segment_miss(struct cxl_context *ctx, + struct mm_struct *mm, u64 ea) +{ + int rc; + + pr_devel("CXL interrupt: Segment fault pe: %i ea: %#llx\n", ctx->pe, ea); + trace_cxl_ste_miss(ctx, ea); + + if ((rc = cxl_fault_segment(ctx, mm, ea))) + cxl_ack_ae(ctx); + else { + + mb(); /* Order seg table write to TFC MMIO write */ + cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0); + } + + return IRQ_HANDLED; +} + +int cxl_handle_mm_fault(struct mm_struct *mm, u64 dsisr, u64 dar) +{ + vm_fault_t flt = 0; + int result; + unsigned long access, flags, inv_flags = 0; + + /* + * Add the fault handling cpu to task mm cpumask so that we + * can do a safe lockless page table walk when inserting the + * hash page table entry. This function get called with a + * valid mm for user space addresses. Hence using the if (mm) + * check is sufficient here. + */ + if (mm && !cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) { + cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); + /* + * We need to make sure we walk the table only after + * we update the cpumask. The other side of the barrier + * is explained in serialize_against_pte_lookup() + */ + smp_mb(); + } + if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) { + pr_devel("copro_handle_mm_fault failed: %#x\n", result); + return result; + } + + if (!radix_enabled()) { + /* + * update_mmu_cache() will not have loaded the hash since current->trap + * is not a 0x400 or 0x300, so just call hash_page_mm() here. + */ + access = _PAGE_PRESENT | _PAGE_READ; + if (dsisr & CXL_PSL_DSISR_An_S) + access |= _PAGE_WRITE; + + if (!mm && (get_region_id(dar) != USER_REGION_ID)) + access |= _PAGE_PRIVILEGED; + + if (dsisr & DSISR_NOHPTE) + inv_flags |= HPTE_NOHPTE_UPDATE; + + local_irq_save(flags); + hash_page_mm(mm, dar, access, 0x300, inv_flags); + local_irq_restore(flags); + } + return 0; +} + +static void cxl_handle_page_fault(struct cxl_context *ctx, + struct mm_struct *mm, + u64 dsisr, u64 dar) +{ + trace_cxl_pte_miss(ctx, dsisr, dar); + + if (cxl_handle_mm_fault(mm, dsisr, dar)) { + cxl_ack_ae(ctx); + } else { + pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe); + cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0); + } +} + +/* + * Returns the mm_struct corresponding to the context ctx. + * mm_users == 0, the context may be in the process of being closed. + */ +static struct mm_struct *get_mem_context(struct cxl_context *ctx) +{ + if (ctx->mm == NULL) + return NULL; + + if (!atomic_inc_not_zero(&ctx->mm->mm_users)) + return NULL; + + return ctx->mm; +} + +static bool cxl_is_segment_miss(struct cxl_context *ctx, u64 dsisr) +{ + if ((cxl_is_power8() && (dsisr & CXL_PSL_DSISR_An_DS))) + return true; + + return false; +} + +static bool cxl_is_page_fault(struct cxl_context *ctx, u64 dsisr) +{ + if ((cxl_is_power8()) && (dsisr & CXL_PSL_DSISR_An_DM)) + return true; + + if (cxl_is_power9()) + return true; + + return false; +} + +void cxl_handle_fault(struct work_struct *fault_work) +{ + struct cxl_context *ctx = + container_of(fault_work, struct cxl_context, fault_work); + u64 dsisr = ctx->dsisr; + u64 dar = ctx->dar; + struct mm_struct *mm = NULL; + + if (cpu_has_feature(CPU_FTR_HVMODE)) { + if (cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An) != dsisr || + cxl_p2n_read(ctx->afu, CXL_PSL_DAR_An) != dar || + cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) != ctx->pe) { + /* Most likely explanation is harmless - a dedicated + * process has detached and these were cleared by the + * PSL purge, but warn about it just in case + */ + dev_notice(&ctx->afu->dev, "cxl_handle_fault: Translation fault regs changed\n"); + return; + } + } + + /* Early return if the context is being / has been detached */ + if (ctx->status == CLOSED) { + cxl_ack_ae(ctx); + return; + } + + pr_devel("CXL BOTTOM HALF handling fault for afu pe: %i. " + "DSISR: %#llx DAR: %#llx\n", ctx->pe, dsisr, dar); + + if (!ctx->kernel) { + + mm = get_mem_context(ctx); + if (mm == NULL) { + pr_devel("%s: unable to get mm for pe=%d pid=%i\n", + __func__, ctx->pe, pid_nr(ctx->pid)); + cxl_ack_ae(ctx); + return; + } else { + pr_devel("Handling page fault for pe=%d pid=%i\n", + ctx->pe, pid_nr(ctx->pid)); + } + } + + if (cxl_is_segment_miss(ctx, dsisr)) + cxl_handle_segment_miss(ctx, mm, dar); + else if (cxl_is_page_fault(ctx, dsisr)) + cxl_handle_page_fault(ctx, mm, dsisr, dar); + else + WARN(1, "cxl_handle_fault has nothing to handle\n"); + + if (mm) + mmput(mm); +} + +static void cxl_prefault_one(struct cxl_context *ctx, u64 ea) +{ + struct mm_struct *mm; + + mm = get_mem_context(ctx); + if (mm == NULL) { + pr_devel("cxl_prefault_one unable to get mm %i\n", + pid_nr(ctx->pid)); + return; + } + + cxl_fault_segment(ctx, mm, ea); + + mmput(mm); +} + +static u64 next_segment(u64 ea, u64 vsid) +{ + if (vsid & SLB_VSID_B_1T) + ea |= (1ULL << 40) - 1; + else + ea |= (1ULL << 28) - 1; + + return ea + 1; +} + +static void cxl_prefault_vma(struct cxl_context *ctx) +{ + u64 ea, last_esid = 0; + struct copro_slb slb; + struct vm_area_struct *vma; + int rc; + struct mm_struct *mm; + + mm = get_mem_context(ctx); + if (mm == NULL) { + pr_devel("cxl_prefault_vm unable to get mm %i\n", + pid_nr(ctx->pid)); + return; + } + + mmap_read_lock(mm); + for (vma = mm->mmap; vma; vma = vma->vm_next) { + for (ea = vma->vm_start; ea < vma->vm_end; + ea = next_segment(ea, slb.vsid)) { + rc = copro_calculate_slb(mm, ea, &slb); + if (rc) + continue; + + if (last_esid == slb.esid) + continue; + + cxl_load_segment(ctx, &slb); + last_esid = slb.esid; + } + } + mmap_read_unlock(mm); + + mmput(mm); +} + +void cxl_prefault(struct cxl_context *ctx, u64 wed) +{ + switch (ctx->afu->prefault_mode) { + case CXL_PREFAULT_WED: + cxl_prefault_one(ctx, wed); + break; + case CXL_PREFAULT_ALL: + cxl_prefault_vma(ctx); + break; + default: + break; + } +} diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c new file mode 100644 index 000000000..bd3bd3233 --- /dev/null +++ b/drivers/misc/cxl/file.c @@ -0,0 +1,699 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright 2014 IBM Corp. + */ + +#include <linux/spinlock.h> +#include <linux/module.h> +#include <linux/export.h> +#include <linux/kernel.h> +#include <linux/bitmap.h> +#include <linux/sched/signal.h> +#include <linux/poll.h> +#include <linux/pid.h> +#include <linux/fs.h> +#include <linux/mm.h> +#include <linux/slab.h> +#include <linux/sched/mm.h> +#include <linux/mmu_context.h> +#include <asm/cputable.h> +#include <asm/current.h> +#include <asm/copro.h> + +#include "cxl.h" +#include "trace.h" + +#define CXL_NUM_MINORS 256 /* Total to reserve */ + +#define CXL_AFU_MINOR_D(afu) (CXL_CARD_MINOR(afu->adapter) + 1 + (3 * afu->slice)) +#define CXL_AFU_MINOR_M(afu) (CXL_AFU_MINOR_D(afu) + 1) +#define CXL_AFU_MINOR_S(afu) (CXL_AFU_MINOR_D(afu) + 2) +#define CXL_AFU_MKDEV_D(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_D(afu)) +#define CXL_AFU_MKDEV_M(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_M(afu)) +#define CXL_AFU_MKDEV_S(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_S(afu)) + +#define CXL_DEVT_AFU(dev) ((MINOR(dev) % CXL_DEV_MINORS - 1) / 3) + +#define CXL_DEVT_IS_CARD(dev) (MINOR(dev) % CXL_DEV_MINORS == 0) + +static dev_t cxl_dev; + +static struct class *cxl_class; + +static int __afu_open(struct inode *inode, struct file *file, bool master) +{ + struct cxl *adapter; + struct cxl_afu *afu; + struct cxl_context *ctx; + int adapter_num = CXL_DEVT_ADAPTER(inode->i_rdev); + int slice = CXL_DEVT_AFU(inode->i_rdev); + int rc = -ENODEV; + + pr_devel("afu_open afu%i.%i\n", slice, adapter_num); + + if (!(adapter = get_cxl_adapter(adapter_num))) + return -ENODEV; + + if (slice > adapter->slices) + goto err_put_adapter; + + spin_lock(&adapter->afu_list_lock); + if (!(afu = adapter->afu[slice])) { + spin_unlock(&adapter->afu_list_lock); + goto err_put_adapter; + } + + /* + * taking a ref to the afu so that it doesn't go away + * for rest of the function. This ref is released before + * we return. + */ + cxl_afu_get(afu); + spin_unlock(&adapter->afu_list_lock); + + if (!afu->current_mode) + goto err_put_afu; + + if (!cxl_ops->link_ok(adapter, afu)) { + rc = -EIO; + goto err_put_afu; + } + + if (!(ctx = cxl_context_alloc())) { + rc = -ENOMEM; + goto err_put_afu; + } + + rc = cxl_context_init(ctx, afu, master); + if (rc) + goto err_put_afu; + + cxl_context_set_mapping(ctx, inode->i_mapping); + + pr_devel("afu_open pe: %i\n", ctx->pe); + file->private_data = ctx; + + /* indicate success */ + rc = 0; + +err_put_afu: + /* release the ref taken earlier */ + cxl_afu_put(afu); +err_put_adapter: + put_device(&adapter->dev); + return rc; +} + +int afu_open(struct inode *inode, struct file *file) +{ + return __afu_open(inode, file, false); +} + +static int afu_master_open(struct inode *inode, struct file *file) +{ + return __afu_open(inode, file, true); +} + +int afu_release(struct inode *inode, struct file *file) +{ + struct cxl_context *ctx = file->private_data; + + pr_devel("%s: closing cxl file descriptor. pe: %i\n", + __func__, ctx->pe); + cxl_context_detach(ctx); + + + /* + * Delete the context's mapping pointer, unless it's created by the + * kernel API, in which case leave it so it can be freed by reclaim_ctx() + */ + if (!ctx->kernelapi) { + mutex_lock(&ctx->mapping_lock); + ctx->mapping = NULL; + mutex_unlock(&ctx->mapping_lock); + } + + /* + * At this this point all bottom halfs have finished and we should be + * getting no more IRQs from the hardware for this context. Once it's + * removed from the IDR (and RCU synchronised) it's safe to free the + * sstp and context. + */ + cxl_context_free(ctx); + + return 0; +} + +static long afu_ioctl_start_work(struct cxl_context *ctx, + struct cxl_ioctl_start_work __user *uwork) +{ + struct cxl_ioctl_start_work work; + u64 amr = 0; + int rc; + + pr_devel("%s: pe: %i\n", __func__, ctx->pe); + + /* Do this outside the status_mutex to avoid a circular dependency with + * the locking in cxl_mmap_fault() */ + if (copy_from_user(&work, uwork, sizeof(work))) + return -EFAULT; + + mutex_lock(&ctx->status_mutex); + if (ctx->status != OPENED) { + rc = -EIO; + goto out; + } + + /* + * if any of the reserved fields are set or any of the unused + * flags are set it's invalid + */ + if (work.reserved1 || work.reserved2 || work.reserved3 || + work.reserved4 || work.reserved5 || + (work.flags & ~CXL_START_WORK_ALL)) { + rc = -EINVAL; + goto out; + } + + if (!(work.flags & CXL_START_WORK_NUM_IRQS)) + work.num_interrupts = ctx->afu->pp_irqs; + else if ((work.num_interrupts < ctx->afu->pp_irqs) || + (work.num_interrupts > ctx->afu->irqs_max)) { + rc = -EINVAL; + goto out; + } + + if ((rc = afu_register_irqs(ctx, work.num_interrupts))) + goto out; + + if (work.flags & CXL_START_WORK_AMR) + amr = work.amr & mfspr(SPRN_UAMOR); + + if (work.flags & CXL_START_WORK_TID) + ctx->assign_tidr = true; + + ctx->mmio_err_ff = !!(work.flags & CXL_START_WORK_ERR_FF); + + /* + * Increment the mapped context count for adapter. This also checks + * if adapter_context_lock is taken. + */ + rc = cxl_adapter_context_get(ctx->afu->adapter); + if (rc) { + afu_release_irqs(ctx, ctx); + goto out; + } + + /* + * We grab the PID here and not in the file open to allow for the case + * where a process (master, some daemon, etc) has opened the chardev on + * behalf of another process, so the AFU's mm gets bound to the process + * that performs this ioctl and not the process that opened the file. + * Also we grab the PID of the group leader so that if the task that + * has performed the attach operation exits the mm context of the + * process is still accessible. + */ + ctx->pid = get_task_pid(current, PIDTYPE_PID); + + /* acquire a reference to the task's mm */ + ctx->mm = get_task_mm(current); + + /* ensure this mm_struct can't be freed */ + cxl_context_mm_count_get(ctx); + + if (ctx->mm) { + /* decrement the use count from above */ + mmput(ctx->mm); + /* make TLBIs for this context global */ + mm_context_add_copro(ctx->mm); + } + + /* + * Increment driver use count. Enables global TLBIs for hash + * and callbacks to handle the segment table + */ + cxl_ctx_get(); + + /* + * A barrier is needed to make sure all TLBIs are global + * before we attach and the context starts being used by the + * adapter. + * + * Needed after mm_context_add_copro() for radix and + * cxl_ctx_get() for hash/p8. + * + * The barrier should really be mb(), since it involves a + * device. However, it's only useful when we have local + * vs. global TLBIs, i.e SMP=y. So keep smp_mb(). + */ + smp_mb(); + + trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr); + + if ((rc = cxl_ops->attach_process(ctx, false, work.work_element_descriptor, + amr))) { + afu_release_irqs(ctx, ctx); + cxl_adapter_context_put(ctx->afu->adapter); + put_pid(ctx->pid); + ctx->pid = NULL; + cxl_ctx_put(); + cxl_context_mm_count_put(ctx); + if (ctx->mm) + mm_context_remove_copro(ctx->mm); + goto out; + } + + rc = 0; + if (work.flags & CXL_START_WORK_TID) { + work.tid = ctx->tidr; + if (copy_to_user(uwork, &work, sizeof(work))) + rc = -EFAULT; + } + + ctx->status = STARTED; + +out: + mutex_unlock(&ctx->status_mutex); + return rc; +} + +static long afu_ioctl_process_element(struct cxl_context *ctx, + int __user *upe) +{ + pr_devel("%s: pe: %i\n", __func__, ctx->pe); + + if (copy_to_user(upe, &ctx->external_pe, sizeof(__u32))) + return -EFAULT; + + return 0; +} + +static long afu_ioctl_get_afu_id(struct cxl_context *ctx, + struct cxl_afu_id __user *upafuid) +{ + struct cxl_afu_id afuid = { 0 }; + + afuid.card_id = ctx->afu->adapter->adapter_num; + afuid.afu_offset = ctx->afu->slice; + afuid.afu_mode = ctx->afu->current_mode; + + /* set the flag bit in case the afu is a slave */ + if (ctx->afu->current_mode == CXL_MODE_DIRECTED && !ctx->master) + afuid.flags |= CXL_AFUID_FLAG_SLAVE; + + if (copy_to_user(upafuid, &afuid, sizeof(afuid))) + return -EFAULT; + + return 0; +} + +long afu_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct cxl_context *ctx = file->private_data; + + if (ctx->status == CLOSED) + return -EIO; + + if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) + return -EIO; + + pr_devel("afu_ioctl\n"); + switch (cmd) { + case CXL_IOCTL_START_WORK: + return afu_ioctl_start_work(ctx, (struct cxl_ioctl_start_work __user *)arg); + case CXL_IOCTL_GET_PROCESS_ELEMENT: + return afu_ioctl_process_element(ctx, (__u32 __user *)arg); + case CXL_IOCTL_GET_AFU_ID: + return afu_ioctl_get_afu_id(ctx, (struct cxl_afu_id __user *) + arg); + } + return -EINVAL; +} + +static long afu_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + return afu_ioctl(file, cmd, arg); +} + +int afu_mmap(struct file *file, struct vm_area_struct *vm) +{ + struct cxl_context *ctx = file->private_data; + + /* AFU must be started before we can MMIO */ + if (ctx->status != STARTED) + return -EIO; + + if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) + return -EIO; + + return cxl_context_iomap(ctx, vm); +} + +static inline bool ctx_event_pending(struct cxl_context *ctx) +{ + if (ctx->pending_irq || ctx->pending_fault || ctx->pending_afu_err) + return true; + + if (ctx->afu_driver_ops && atomic_read(&ctx->afu_driver_events)) + return true; + + return false; +} + +__poll_t afu_poll(struct file *file, struct poll_table_struct *poll) +{ + struct cxl_context *ctx = file->private_data; + __poll_t mask = 0; + unsigned long flags; + + + poll_wait(file, &ctx->wq, poll); + + pr_devel("afu_poll wait done pe: %i\n", ctx->pe); + + spin_lock_irqsave(&ctx->lock, flags); + if (ctx_event_pending(ctx)) + mask |= EPOLLIN | EPOLLRDNORM; + else if (ctx->status == CLOSED) + /* Only error on closed when there are no futher events pending + */ + mask |= EPOLLERR; + spin_unlock_irqrestore(&ctx->lock, flags); + + pr_devel("afu_poll pe: %i returning %#x\n", ctx->pe, mask); + + return mask; +} + +static ssize_t afu_driver_event_copy(struct cxl_context *ctx, + char __user *buf, + struct cxl_event *event, + struct cxl_event_afu_driver_reserved *pl) +{ + /* Check event */ + if (!pl) { + ctx->afu_driver_ops->event_delivered(ctx, pl, -EINVAL); + return -EFAULT; + } + + /* Check event size */ + event->header.size += pl->data_size; + if (event->header.size > CXL_READ_MIN_SIZE) { + ctx->afu_driver_ops->event_delivered(ctx, pl, -EINVAL); + return -EFAULT; + } + + /* Copy event header */ + if (copy_to_user(buf, event, sizeof(struct cxl_event_header))) { + ctx->afu_driver_ops->event_delivered(ctx, pl, -EFAULT); + return -EFAULT; + } + + /* Copy event data */ + buf += sizeof(struct cxl_event_header); + if (copy_to_user(buf, &pl->data, pl->data_size)) { + ctx->afu_driver_ops->event_delivered(ctx, pl, -EFAULT); + return -EFAULT; + } + + ctx->afu_driver_ops->event_delivered(ctx, pl, 0); /* Success */ + return event->header.size; +} + +ssize_t afu_read(struct file *file, char __user *buf, size_t count, + loff_t *off) +{ + struct cxl_context *ctx = file->private_data; + struct cxl_event_afu_driver_reserved *pl = NULL; + struct cxl_event event; + unsigned long flags; + int rc; + DEFINE_WAIT(wait); + + if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) + return -EIO; + + if (count < CXL_READ_MIN_SIZE) + return -EINVAL; + + spin_lock_irqsave(&ctx->lock, flags); + + for (;;) { + prepare_to_wait(&ctx->wq, &wait, TASK_INTERRUPTIBLE); + if (ctx_event_pending(ctx) || (ctx->status == CLOSED)) + break; + + if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) { + rc = -EIO; + goto out; + } + + if (file->f_flags & O_NONBLOCK) { + rc = -EAGAIN; + goto out; + } + + if (signal_pending(current)) { + rc = -ERESTARTSYS; + goto out; + } + + spin_unlock_irqrestore(&ctx->lock, flags); + pr_devel("afu_read going to sleep...\n"); + schedule(); + pr_devel("afu_read woken up\n"); + spin_lock_irqsave(&ctx->lock, flags); + } + + finish_wait(&ctx->wq, &wait); + + memset(&event, 0, sizeof(event)); + event.header.process_element = ctx->pe; + event.header.size = sizeof(struct cxl_event_header); + if (ctx->afu_driver_ops && atomic_read(&ctx->afu_driver_events)) { + pr_devel("afu_read delivering AFU driver specific event\n"); + pl = ctx->afu_driver_ops->fetch_event(ctx); + atomic_dec(&ctx->afu_driver_events); + event.header.type = CXL_EVENT_AFU_DRIVER; + } else if (ctx->pending_irq) { + pr_devel("afu_read delivering AFU interrupt\n"); + event.header.size += sizeof(struct cxl_event_afu_interrupt); + event.header.type = CXL_EVENT_AFU_INTERRUPT; + event.irq.irq = find_first_bit(ctx->irq_bitmap, ctx->irq_count) + 1; + clear_bit(event.irq.irq - 1, ctx->irq_bitmap); + if (bitmap_empty(ctx->irq_bitmap, ctx->irq_count)) + ctx->pending_irq = false; + } else if (ctx->pending_fault) { + pr_devel("afu_read delivering data storage fault\n"); + event.header.size += sizeof(struct cxl_event_data_storage); + event.header.type = CXL_EVENT_DATA_STORAGE; + event.fault.addr = ctx->fault_addr; + event.fault.dsisr = ctx->fault_dsisr; + ctx->pending_fault = false; + } else if (ctx->pending_afu_err) { + pr_devel("afu_read delivering afu error\n"); + event.header.size += sizeof(struct cxl_event_afu_error); + event.header.type = CXL_EVENT_AFU_ERROR; + event.afu_error.error = ctx->afu_err; + ctx->pending_afu_err = false; + } else if (ctx->status == CLOSED) { + pr_devel("afu_read fatal error\n"); + spin_unlock_irqrestore(&ctx->lock, flags); + return -EIO; + } else + WARN(1, "afu_read must be buggy\n"); + + spin_unlock_irqrestore(&ctx->lock, flags); + + if (event.header.type == CXL_EVENT_AFU_DRIVER) + return afu_driver_event_copy(ctx, buf, &event, pl); + + if (copy_to_user(buf, &event, event.header.size)) + return -EFAULT; + return event.header.size; + +out: + finish_wait(&ctx->wq, &wait); + spin_unlock_irqrestore(&ctx->lock, flags); + return rc; +} + +/* + * Note: if this is updated, we need to update api.c to patch the new ones in + * too + */ +const struct file_operations afu_fops = { + .owner = THIS_MODULE, + .open = afu_open, + .poll = afu_poll, + .read = afu_read, + .release = afu_release, + .unlocked_ioctl = afu_ioctl, + .compat_ioctl = afu_compat_ioctl, + .mmap = afu_mmap, +}; + +static const struct file_operations afu_master_fops = { + .owner = THIS_MODULE, + .open = afu_master_open, + .poll = afu_poll, + .read = afu_read, + .release = afu_release, + .unlocked_ioctl = afu_ioctl, + .compat_ioctl = afu_compat_ioctl, + .mmap = afu_mmap, +}; + + +static char *cxl_devnode(struct device *dev, umode_t *mode) +{ + if (cpu_has_feature(CPU_FTR_HVMODE) && + CXL_DEVT_IS_CARD(dev->devt)) { + /* + * These minor numbers will eventually be used to program the + * PSL and AFUs once we have dynamic reprogramming support + */ + return NULL; + } + return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev)); +} + +extern struct class *cxl_class; + +static int cxl_add_chardev(struct cxl_afu *afu, dev_t devt, struct cdev *cdev, + struct device **chardev, char *postfix, char *desc, + const struct file_operations *fops) +{ + struct device *dev; + int rc; + + cdev_init(cdev, fops); + if ((rc = cdev_add(cdev, devt, 1))) { + dev_err(&afu->dev, "Unable to add %s chardev: %i\n", desc, rc); + return rc; + } + + dev = device_create(cxl_class, &afu->dev, devt, afu, + "afu%i.%i%s", afu->adapter->adapter_num, afu->slice, postfix); + if (IS_ERR(dev)) { + dev_err(&afu->dev, "Unable to create %s chardev in sysfs: %i\n", desc, rc); + rc = PTR_ERR(dev); + goto err; + } + + *chardev = dev; + + return 0; +err: + cdev_del(cdev); + return rc; +} + +int cxl_chardev_d_afu_add(struct cxl_afu *afu) +{ + return cxl_add_chardev(afu, CXL_AFU_MKDEV_D(afu), &afu->afu_cdev_d, + &afu->chardev_d, "d", "dedicated", + &afu_master_fops); /* Uses master fops */ +} + +int cxl_chardev_m_afu_add(struct cxl_afu *afu) +{ + return cxl_add_chardev(afu, CXL_AFU_MKDEV_M(afu), &afu->afu_cdev_m, + &afu->chardev_m, "m", "master", + &afu_master_fops); +} + +int cxl_chardev_s_afu_add(struct cxl_afu *afu) +{ + return cxl_add_chardev(afu, CXL_AFU_MKDEV_S(afu), &afu->afu_cdev_s, + &afu->chardev_s, "s", "shared", + &afu_fops); +} + +void cxl_chardev_afu_remove(struct cxl_afu *afu) +{ + if (afu->chardev_d) { + cdev_del(&afu->afu_cdev_d); + device_unregister(afu->chardev_d); + afu->chardev_d = NULL; + } + if (afu->chardev_m) { + cdev_del(&afu->afu_cdev_m); + device_unregister(afu->chardev_m); + afu->chardev_m = NULL; + } + if (afu->chardev_s) { + cdev_del(&afu->afu_cdev_s); + device_unregister(afu->chardev_s); + afu->chardev_s = NULL; + } +} + +int cxl_register_afu(struct cxl_afu *afu) +{ + afu->dev.class = cxl_class; + + return device_register(&afu->dev); +} + +int cxl_register_adapter(struct cxl *adapter) +{ + adapter->dev.class = cxl_class; + + /* + * Future: When we support dynamically reprogramming the PSL & AFU we + * will expose the interface to do that via a chardev: + * adapter->dev.devt = CXL_CARD_MKDEV(adapter); + */ + + return device_register(&adapter->dev); +} + +dev_t cxl_get_dev(void) +{ + return cxl_dev; +} + +int __init cxl_file_init(void) +{ + int rc; + + /* + * If these change we really need to update API. Either change some + * flags or update API version number CXL_API_VERSION. + */ + BUILD_BUG_ON(CXL_API_VERSION != 3); + BUILD_BUG_ON(sizeof(struct cxl_ioctl_start_work) != 64); + BUILD_BUG_ON(sizeof(struct cxl_event_header) != 8); + BUILD_BUG_ON(sizeof(struct cxl_event_afu_interrupt) != 8); + BUILD_BUG_ON(sizeof(struct cxl_event_data_storage) != 32); + BUILD_BUG_ON(sizeof(struct cxl_event_afu_error) != 16); + + if ((rc = alloc_chrdev_region(&cxl_dev, 0, CXL_NUM_MINORS, "cxl"))) { + pr_err("Unable to allocate CXL major number: %i\n", rc); + return rc; + } + + pr_devel("CXL device allocated, MAJOR %i\n", MAJOR(cxl_dev)); + + cxl_class = class_create(THIS_MODULE, "cxl"); + if (IS_ERR(cxl_class)) { + pr_err("Unable to create CXL class\n"); + rc = PTR_ERR(cxl_class); + goto err; + } + cxl_class->devnode = cxl_devnode; + + return 0; + +err: + unregister_chrdev_region(cxl_dev, CXL_NUM_MINORS); + return rc; +} + +void cxl_file_exit(void) +{ + unregister_chrdev_region(cxl_dev, CXL_NUM_MINORS); + class_destroy(cxl_class); +} diff --git a/drivers/misc/cxl/flash.c b/drivers/misc/cxl/flash.c new file mode 100644 index 000000000..5b93ff51d --- /dev/null +++ b/drivers/misc/cxl/flash.c @@ -0,0 +1,537 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/kernel.h> +#include <linux/fs.h> +#include <linux/semaphore.h> +#include <linux/slab.h> +#include <linux/uaccess.h> +#include <asm/rtas.h> + +#include "cxl.h" +#include "hcalls.h" + +#define DOWNLOAD_IMAGE 1 +#define VALIDATE_IMAGE 2 + +struct ai_header { + u16 version; + u8 reserved0[6]; + u16 vendor; + u16 device; + u16 subsystem_vendor; + u16 subsystem; + u64 image_offset; + u64 image_length; + u8 reserved1[96]; +}; + +static struct semaphore sem; +static unsigned long *buffer[CXL_AI_MAX_ENTRIES]; +static struct sg_list *le; +static u64 continue_token; +static unsigned int transfer; + +struct update_props_workarea { + __be32 phandle; + __be32 state; + __be64 reserved; + __be32 nprops; +} __packed; + +struct update_nodes_workarea { + __be32 state; + __be64 unit_address; + __be32 reserved; +} __packed; + +#define DEVICE_SCOPE 3 +#define NODE_ACTION_MASK 0xff000000 +#define NODE_COUNT_MASK 0x00ffffff +#define OPCODE_DELETE 0x01000000 +#define OPCODE_UPDATE 0x02000000 +#define OPCODE_ADD 0x03000000 + +static int rcall(int token, char *buf, s32 scope) +{ + int rc; + + spin_lock(&rtas_data_buf_lock); + + memcpy(rtas_data_buf, buf, RTAS_DATA_BUF_SIZE); + rc = rtas_call(token, 2, 1, NULL, rtas_data_buf, scope); + memcpy(buf, rtas_data_buf, RTAS_DATA_BUF_SIZE); + + spin_unlock(&rtas_data_buf_lock); + return rc; +} + +static int update_property(struct device_node *dn, const char *name, + u32 vd, char *value) +{ + struct property *new_prop; + u32 *val; + int rc; + + new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL); + if (!new_prop) + return -ENOMEM; + + new_prop->name = kstrdup(name, GFP_KERNEL); + if (!new_prop->name) { + kfree(new_prop); + return -ENOMEM; + } + + new_prop->length = vd; + new_prop->value = kzalloc(new_prop->length, GFP_KERNEL); + if (!new_prop->value) { + kfree(new_prop->name); + kfree(new_prop); + return -ENOMEM; + } + memcpy(new_prop->value, value, vd); + + val = (u32 *)new_prop->value; + rc = cxl_update_properties(dn, new_prop); + pr_devel("%pOFn: update property (%s, length: %i, value: %#x)\n", + dn, name, vd, be32_to_cpu(*val)); + + if (rc) { + kfree(new_prop->name); + kfree(new_prop->value); + kfree(new_prop); + } + return rc; +} + +static int update_node(__be32 phandle, s32 scope) +{ + struct update_props_workarea *upwa; + struct device_node *dn; + int i, rc, ret; + char *prop_data; + char *buf; + int token; + u32 nprops; + u32 vd; + + token = rtas_token("ibm,update-properties"); + if (token == RTAS_UNKNOWN_SERVICE) + return -EINVAL; + + buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + dn = of_find_node_by_phandle(be32_to_cpu(phandle)); + if (!dn) { + kfree(buf); + return -ENOENT; + } + + upwa = (struct update_props_workarea *)&buf[0]; + upwa->phandle = phandle; + do { + rc = rcall(token, buf, scope); + if (rc < 0) + break; + + prop_data = buf + sizeof(*upwa); + nprops = be32_to_cpu(upwa->nprops); + + if (*prop_data == 0) { + prop_data++; + vd = be32_to_cpu(*(__be32 *)prop_data); + prop_data += vd + sizeof(vd); + nprops--; + } + + for (i = 0; i < nprops; i++) { + char *prop_name; + + prop_name = prop_data; + prop_data += strlen(prop_name) + 1; + vd = be32_to_cpu(*(__be32 *)prop_data); + prop_data += sizeof(vd); + + if ((vd != 0x00000000) && (vd != 0x80000000)) { + ret = update_property(dn, prop_name, vd, + prop_data); + if (ret) + pr_err("cxl: Could not update property %s - %i\n", + prop_name, ret); + + prop_data += vd; + } + } + } while (rc == 1); + + of_node_put(dn); + kfree(buf); + return rc; +} + +static int update_devicetree(struct cxl *adapter, s32 scope) +{ + struct update_nodes_workarea *unwa; + u32 action, node_count; + int token, rc, i; + __be32 *data, phandle; + char *buf; + + token = rtas_token("ibm,update-nodes"); + if (token == RTAS_UNKNOWN_SERVICE) + return -EINVAL; + + buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + unwa = (struct update_nodes_workarea *)&buf[0]; + unwa->unit_address = cpu_to_be64(adapter->guest->handle); + do { + rc = rcall(token, buf, scope); + if (rc && rc != 1) + break; + + data = (__be32 *)buf + 4; + while (be32_to_cpu(*data) & NODE_ACTION_MASK) { + action = be32_to_cpu(*data) & NODE_ACTION_MASK; + node_count = be32_to_cpu(*data) & NODE_COUNT_MASK; + pr_devel("device reconfiguration - action: %#x, nodes: %#x\n", + action, node_count); + data++; + + for (i = 0; i < node_count; i++) { + phandle = *data++; + + switch (action) { + case OPCODE_DELETE: + /* nothing to do */ + break; + case OPCODE_UPDATE: + update_node(phandle, scope); + break; + case OPCODE_ADD: + /* nothing to do, just move pointer */ + data++; + break; + } + } + } + } while (rc == 1); + + kfree(buf); + return 0; +} + +static int handle_image(struct cxl *adapter, int operation, + long (*fct)(u64, u64, u64, u64 *), + struct cxl_adapter_image *ai) +{ + size_t mod, s_copy, len_chunk = 0; + struct ai_header *header = NULL; + unsigned int entries = 0, i; + void *dest, *from; + int rc = 0, need_header; + + /* base adapter image header */ + need_header = (ai->flags & CXL_AI_NEED_HEADER); + if (need_header) { + header = kzalloc(sizeof(struct ai_header), GFP_KERNEL); + if (!header) + return -ENOMEM; + header->version = cpu_to_be16(1); + header->vendor = cpu_to_be16(adapter->guest->vendor); + header->device = cpu_to_be16(adapter->guest->device); + header->subsystem_vendor = cpu_to_be16(adapter->guest->subsystem_vendor); + header->subsystem = cpu_to_be16(adapter->guest->subsystem); + header->image_offset = cpu_to_be64(CXL_AI_HEADER_SIZE); + header->image_length = cpu_to_be64(ai->len_image); + } + + /* number of entries in the list */ + len_chunk = ai->len_data; + if (need_header) + len_chunk += CXL_AI_HEADER_SIZE; + + entries = len_chunk / CXL_AI_BUFFER_SIZE; + mod = len_chunk % CXL_AI_BUFFER_SIZE; + if (mod) + entries++; + + if (entries > CXL_AI_MAX_ENTRIES) { + rc = -EINVAL; + goto err; + } + + /* < -- MAX_CHUNK_SIZE = 4096 * 256 = 1048576 bytes --> + * chunk 0 ---------------------------------------------------- + * | header | data | + * ---------------------------------------------------- + * chunk 1 ---------------------------------------------------- + * | data | + * ---------------------------------------------------- + * .... + * chunk n ---------------------------------------------------- + * | data | + * ---------------------------------------------------- + */ + from = (void *) ai->data; + for (i = 0; i < entries; i++) { + dest = buffer[i]; + s_copy = CXL_AI_BUFFER_SIZE; + + if ((need_header) && (i == 0)) { + /* add adapter image header */ + memcpy(buffer[i], header, sizeof(struct ai_header)); + s_copy = CXL_AI_BUFFER_SIZE - CXL_AI_HEADER_SIZE; + dest += CXL_AI_HEADER_SIZE; /* image offset */ + } + if ((i == (entries - 1)) && mod) + s_copy = mod; + + /* copy data */ + if (copy_from_user(dest, from, s_copy)) + goto err; + + /* fill in the list */ + le[i].phys_addr = cpu_to_be64(virt_to_phys(buffer[i])); + le[i].len = cpu_to_be64(CXL_AI_BUFFER_SIZE); + if ((i == (entries - 1)) && mod) + le[i].len = cpu_to_be64(mod); + from += s_copy; + } + pr_devel("%s (op: %i, need header: %i, entries: %i, token: %#llx)\n", + __func__, operation, need_header, entries, continue_token); + + /* + * download/validate the adapter image to the coherent + * platform facility + */ + rc = fct(adapter->guest->handle, virt_to_phys(le), entries, + &continue_token); + if (rc == 0) /* success of download/validation operation */ + continue_token = 0; + +err: + kfree(header); + + return rc; +} + +static int transfer_image(struct cxl *adapter, int operation, + struct cxl_adapter_image *ai) +{ + int rc = 0; + int afu; + + switch (operation) { + case DOWNLOAD_IMAGE: + rc = handle_image(adapter, operation, + &cxl_h_download_adapter_image, ai); + if (rc < 0) { + pr_devel("resetting adapter\n"); + cxl_h_reset_adapter(adapter->guest->handle); + } + return rc; + + case VALIDATE_IMAGE: + rc = handle_image(adapter, operation, + &cxl_h_validate_adapter_image, ai); + if (rc < 0) { + pr_devel("resetting adapter\n"); + cxl_h_reset_adapter(adapter->guest->handle); + return rc; + } + if (rc == 0) { + pr_devel("remove current afu\n"); + for (afu = 0; afu < adapter->slices; afu++) + cxl_guest_remove_afu(adapter->afu[afu]); + + pr_devel("resetting adapter\n"); + cxl_h_reset_adapter(adapter->guest->handle); + + /* The entire image has now been + * downloaded and the validation has + * been successfully performed. + * After that, the partition should call + * ibm,update-nodes and + * ibm,update-properties to receive the + * current configuration + */ + rc = update_devicetree(adapter, DEVICE_SCOPE); + transfer = 1; + } + return rc; + } + + return -EINVAL; +} + +static long ioctl_transfer_image(struct cxl *adapter, int operation, + struct cxl_adapter_image __user *uai) +{ + struct cxl_adapter_image ai; + + pr_devel("%s\n", __func__); + + if (copy_from_user(&ai, uai, sizeof(struct cxl_adapter_image))) + return -EFAULT; + + /* + * Make sure reserved fields and bits are set to 0 + */ + if (ai.reserved1 || ai.reserved2 || ai.reserved3 || ai.reserved4 || + (ai.flags & ~CXL_AI_ALL)) + return -EINVAL; + + return transfer_image(adapter, operation, &ai); +} + +static int device_open(struct inode *inode, struct file *file) +{ + int adapter_num = CXL_DEVT_ADAPTER(inode->i_rdev); + struct cxl *adapter; + int rc = 0, i; + + pr_devel("in %s\n", __func__); + + BUG_ON(sizeof(struct ai_header) != CXL_AI_HEADER_SIZE); + + /* Allows one process to open the device by using a semaphore */ + if (down_interruptible(&sem) != 0) + return -EPERM; + + if (!(adapter = get_cxl_adapter(adapter_num))) { + rc = -ENODEV; + goto err_unlock; + } + + file->private_data = adapter; + continue_token = 0; + transfer = 0; + + for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) + buffer[i] = NULL; + + /* aligned buffer containing list entries which describes up to + * 1 megabyte of data (256 entries of 4096 bytes each) + * Logical real address of buffer 0 - Buffer 0 length in bytes + * Logical real address of buffer 1 - Buffer 1 length in bytes + * Logical real address of buffer 2 - Buffer 2 length in bytes + * .... + * .... + * Logical real address of buffer N - Buffer N length in bytes + */ + le = (struct sg_list *)get_zeroed_page(GFP_KERNEL); + if (!le) { + rc = -ENOMEM; + goto err; + } + + for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) { + buffer[i] = (unsigned long *)get_zeroed_page(GFP_KERNEL); + if (!buffer[i]) { + rc = -ENOMEM; + goto err1; + } + } + + return 0; + +err1: + for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) { + if (buffer[i]) + free_page((unsigned long) buffer[i]); + } + + if (le) + free_page((unsigned long) le); +err: + put_device(&adapter->dev); +err_unlock: + up(&sem); + + return rc; +} + +static long device_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct cxl *adapter = file->private_data; + + pr_devel("in %s\n", __func__); + + if (cmd == CXL_IOCTL_DOWNLOAD_IMAGE) + return ioctl_transfer_image(adapter, + DOWNLOAD_IMAGE, + (struct cxl_adapter_image __user *)arg); + else if (cmd == CXL_IOCTL_VALIDATE_IMAGE) + return ioctl_transfer_image(adapter, + VALIDATE_IMAGE, + (struct cxl_adapter_image __user *)arg); + else + return -EINVAL; +} + +static int device_close(struct inode *inode, struct file *file) +{ + struct cxl *adapter = file->private_data; + int i; + + pr_devel("in %s\n", __func__); + + for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) { + if (buffer[i]) + free_page((unsigned long) buffer[i]); + } + + if (le) + free_page((unsigned long) le); + + up(&sem); + put_device(&adapter->dev); + continue_token = 0; + + /* reload the module */ + if (transfer) + cxl_guest_reload_module(adapter); + else { + pr_devel("resetting adapter\n"); + cxl_h_reset_adapter(adapter->guest->handle); + } + + transfer = 0; + return 0; +} + +static const struct file_operations fops = { + .owner = THIS_MODULE, + .open = device_open, + .unlocked_ioctl = device_ioctl, + .compat_ioctl = compat_ptr_ioctl, + .release = device_close, +}; + +void cxl_guest_remove_chardev(struct cxl *adapter) +{ + cdev_del(&adapter->guest->cdev); +} + +int cxl_guest_add_chardev(struct cxl *adapter) +{ + dev_t devt; + int rc; + + devt = MKDEV(MAJOR(cxl_get_dev()), CXL_CARD_MINOR(adapter)); + cdev_init(&adapter->guest->cdev, &fops); + if ((rc = cdev_add(&adapter->guest->cdev, devt, 1))) { + dev_err(&adapter->dev, + "Unable to add chardev on adapter (card%i): %i\n", + adapter->adapter_num, rc); + goto err; + } + adapter->dev.devt = devt; + sema_init(&sem, 1); +err: + return rc; +} diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c new file mode 100644 index 000000000..633437682 --- /dev/null +++ b/drivers/misc/cxl/guest.c @@ -0,0 +1,1202 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright 2015 IBM Corp. + */ + +#include <linux/spinlock.h> +#include <linux/uaccess.h> +#include <linux/delay.h> + +#include "cxl.h" +#include "hcalls.h" +#include "trace.h" + +#define CXL_ERROR_DETECTED_EVENT 1 +#define CXL_SLOT_RESET_EVENT 2 +#define CXL_RESUME_EVENT 3 + +static void pci_error_handlers(struct cxl_afu *afu, + int bus_error_event, + pci_channel_state_t state) +{ + struct pci_dev *afu_dev; + + if (afu->phb == NULL) + return; + + list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { + if (!afu_dev->driver) + continue; + + switch (bus_error_event) { + case CXL_ERROR_DETECTED_EVENT: + afu_dev->error_state = state; + + if (afu_dev->driver->err_handler && + afu_dev->driver->err_handler->error_detected) + afu_dev->driver->err_handler->error_detected(afu_dev, state); + break; + case CXL_SLOT_RESET_EVENT: + afu_dev->error_state = state; + + if (afu_dev->driver->err_handler && + afu_dev->driver->err_handler->slot_reset) + afu_dev->driver->err_handler->slot_reset(afu_dev); + break; + case CXL_RESUME_EVENT: + if (afu_dev->driver->err_handler && + afu_dev->driver->err_handler->resume) + afu_dev->driver->err_handler->resume(afu_dev); + break; + } + } +} + +static irqreturn_t guest_handle_psl_slice_error(struct cxl_context *ctx, u64 dsisr, + u64 errstat) +{ + pr_devel("in %s\n", __func__); + dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%.16llx\n", errstat); + + return cxl_ops->ack_irq(ctx, 0, errstat); +} + +static ssize_t guest_collect_vpd(struct cxl *adapter, struct cxl_afu *afu, + void *buf, size_t len) +{ + unsigned int entries, mod; + unsigned long **vpd_buf = NULL; + struct sg_list *le; + int rc = 0, i, tocopy; + u64 out = 0; + + if (buf == NULL) + return -EINVAL; + + /* number of entries in the list */ + entries = len / SG_BUFFER_SIZE; + mod = len % SG_BUFFER_SIZE; + if (mod) + entries++; + + if (entries > SG_MAX_ENTRIES) { + entries = SG_MAX_ENTRIES; + len = SG_MAX_ENTRIES * SG_BUFFER_SIZE; + mod = 0; + } + + vpd_buf = kcalloc(entries, sizeof(unsigned long *), GFP_KERNEL); + if (!vpd_buf) + return -ENOMEM; + + le = (struct sg_list *)get_zeroed_page(GFP_KERNEL); + if (!le) { + rc = -ENOMEM; + goto err1; + } + + for (i = 0; i < entries; i++) { + vpd_buf[i] = (unsigned long *)get_zeroed_page(GFP_KERNEL); + if (!vpd_buf[i]) { + rc = -ENOMEM; + goto err2; + } + le[i].phys_addr = cpu_to_be64(virt_to_phys(vpd_buf[i])); + le[i].len = cpu_to_be64(SG_BUFFER_SIZE); + if ((i == (entries - 1)) && mod) + le[i].len = cpu_to_be64(mod); + } + + if (adapter) + rc = cxl_h_collect_vpd_adapter(adapter->guest->handle, + virt_to_phys(le), entries, &out); + else + rc = cxl_h_collect_vpd(afu->guest->handle, 0, + virt_to_phys(le), entries, &out); + pr_devel("length of available (entries: %i), vpd: %#llx\n", + entries, out); + + if (!rc) { + /* + * hcall returns in 'out' the size of available VPDs. + * It fills the buffer with as much data as possible. + */ + if (out < len) + len = out; + rc = len; + if (out) { + for (i = 0; i < entries; i++) { + if (len < SG_BUFFER_SIZE) + tocopy = len; + else + tocopy = SG_BUFFER_SIZE; + memcpy(buf, vpd_buf[i], tocopy); + buf += tocopy; + len -= tocopy; + } + } + } +err2: + for (i = 0; i < entries; i++) { + if (vpd_buf[i]) + free_page((unsigned long) vpd_buf[i]); + } + free_page((unsigned long) le); +err1: + kfree(vpd_buf); + return rc; +} + +static int guest_get_irq_info(struct cxl_context *ctx, struct cxl_irq_info *info) +{ + return cxl_h_collect_int_info(ctx->afu->guest->handle, ctx->process_token, info); +} + +static irqreturn_t guest_psl_irq(int irq, void *data) +{ + struct cxl_context *ctx = data; + struct cxl_irq_info irq_info; + int rc; + + pr_devel("%d: received PSL interrupt %i\n", ctx->pe, irq); + rc = guest_get_irq_info(ctx, &irq_info); + if (rc) { + WARN(1, "Unable to get IRQ info: %i\n", rc); + return IRQ_HANDLED; + } + + rc = cxl_irq_psl8(irq, ctx, &irq_info); + return rc; +} + +static int afu_read_error_state(struct cxl_afu *afu, int *state_out) +{ + u64 state; + int rc = 0; + + if (!afu) + return -EIO; + + rc = cxl_h_read_error_state(afu->guest->handle, &state); + if (!rc) { + WARN_ON(state != H_STATE_NORMAL && + state != H_STATE_DISABLE && + state != H_STATE_TEMP_UNAVAILABLE && + state != H_STATE_PERM_UNAVAILABLE); + *state_out = state & 0xffffffff; + } + return rc; +} + +static irqreturn_t guest_slice_irq_err(int irq, void *data) +{ + struct cxl_afu *afu = data; + int rc; + u64 serr, afu_error, dsisr; + + rc = cxl_h_get_fn_error_interrupt(afu->guest->handle, &serr); + if (rc) { + dev_crit(&afu->dev, "Couldn't read PSL_SERR_An: %d\n", rc); + return IRQ_HANDLED; + } + afu_error = cxl_p2n_read(afu, CXL_AFU_ERR_An); + dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An); + cxl_afu_decode_psl_serr(afu, serr); + dev_crit(&afu->dev, "AFU_ERR_An: 0x%.16llx\n", afu_error); + dev_crit(&afu->dev, "PSL_DSISR_An: 0x%.16llx\n", dsisr); + + rc = cxl_h_ack_fn_error_interrupt(afu->guest->handle, serr); + if (rc) + dev_crit(&afu->dev, "Couldn't ack slice error interrupt: %d\n", + rc); + + return IRQ_HANDLED; +} + + +static int irq_alloc_range(struct cxl *adapter, int len, int *irq) +{ + int i, n; + struct irq_avail *cur; + + for (i = 0; i < adapter->guest->irq_nranges; i++) { + cur = &adapter->guest->irq_avail[i]; + n = bitmap_find_next_zero_area(cur->bitmap, cur->range, + 0, len, 0); + if (n < cur->range) { + bitmap_set(cur->bitmap, n, len); + *irq = cur->offset + n; + pr_devel("guest: allocate IRQs %#x->%#x\n", + *irq, *irq + len - 1); + + return 0; + } + } + return -ENOSPC; +} + +static int irq_free_range(struct cxl *adapter, int irq, int len) +{ + int i, n; + struct irq_avail *cur; + + if (len == 0) + return -ENOENT; + + for (i = 0; i < adapter->guest->irq_nranges; i++) { + cur = &adapter->guest->irq_avail[i]; + if (irq >= cur->offset && + (irq + len) <= (cur->offset + cur->range)) { + n = irq - cur->offset; + bitmap_clear(cur->bitmap, n, len); + pr_devel("guest: release IRQs %#x->%#x\n", + irq, irq + len - 1); + return 0; + } + } + return -ENOENT; +} + +static int guest_reset(struct cxl *adapter) +{ + struct cxl_afu *afu = NULL; + int i, rc; + + pr_devel("Adapter reset request\n"); + spin_lock(&adapter->afu_list_lock); + for (i = 0; i < adapter->slices; i++) { + if ((afu = adapter->afu[i])) { + pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT, + pci_channel_io_frozen); + cxl_context_detach_all(afu); + } + } + + rc = cxl_h_reset_adapter(adapter->guest->handle); + for (i = 0; i < adapter->slices; i++) { + if (!rc && (afu = adapter->afu[i])) { + pci_error_handlers(afu, CXL_SLOT_RESET_EVENT, + pci_channel_io_normal); + pci_error_handlers(afu, CXL_RESUME_EVENT, 0); + } + } + spin_unlock(&adapter->afu_list_lock); + return rc; +} + +static int guest_alloc_one_irq(struct cxl *adapter) +{ + int irq; + + spin_lock(&adapter->guest->irq_alloc_lock); + if (irq_alloc_range(adapter, 1, &irq)) + irq = -ENOSPC; + spin_unlock(&adapter->guest->irq_alloc_lock); + return irq; +} + +static void guest_release_one_irq(struct cxl *adapter, int irq) +{ + spin_lock(&adapter->guest->irq_alloc_lock); + irq_free_range(adapter, irq, 1); + spin_unlock(&adapter->guest->irq_alloc_lock); +} + +static int guest_alloc_irq_ranges(struct cxl_irq_ranges *irqs, + struct cxl *adapter, unsigned int num) +{ + int i, try, irq; + + memset(irqs, 0, sizeof(struct cxl_irq_ranges)); + + spin_lock(&adapter->guest->irq_alloc_lock); + for (i = 0; i < CXL_IRQ_RANGES && num; i++) { + try = num; + while (try) { + if (irq_alloc_range(adapter, try, &irq) == 0) + break; + try /= 2; + } + if (!try) + goto error; + irqs->offset[i] = irq; + irqs->range[i] = try; + num -= try; + } + if (num) + goto error; + spin_unlock(&adapter->guest->irq_alloc_lock); + return 0; + +error: + for (i = 0; i < CXL_IRQ_RANGES; i++) + irq_free_range(adapter, irqs->offset[i], irqs->range[i]); + spin_unlock(&adapter->guest->irq_alloc_lock); + return -ENOSPC; +} + +static void guest_release_irq_ranges(struct cxl_irq_ranges *irqs, + struct cxl *adapter) +{ + int i; + + spin_lock(&adapter->guest->irq_alloc_lock); + for (i = 0; i < CXL_IRQ_RANGES; i++) + irq_free_range(adapter, irqs->offset[i], irqs->range[i]); + spin_unlock(&adapter->guest->irq_alloc_lock); +} + +static int guest_register_serr_irq(struct cxl_afu *afu) +{ + afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err", + dev_name(&afu->dev)); + if (!afu->err_irq_name) + return -ENOMEM; + + if (!(afu->serr_virq = cxl_map_irq(afu->adapter, afu->serr_hwirq, + guest_slice_irq_err, afu, afu->err_irq_name))) { + kfree(afu->err_irq_name); + afu->err_irq_name = NULL; + return -ENOMEM; + } + + return 0; +} + +static void guest_release_serr_irq(struct cxl_afu *afu) +{ + cxl_unmap_irq(afu->serr_virq, afu); + cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq); + kfree(afu->err_irq_name); +} + +static int guest_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask) +{ + return cxl_h_control_faults(ctx->afu->guest->handle, ctx->process_token, + tfc >> 32, (psl_reset_mask != 0)); +} + +static void disable_afu_irqs(struct cxl_context *ctx) +{ + irq_hw_number_t hwirq; + unsigned int virq; + int r, i; + + pr_devel("Disabling AFU(%d) interrupts\n", ctx->afu->slice); + for (r = 0; r < CXL_IRQ_RANGES; r++) { + hwirq = ctx->irqs.offset[r]; + for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { + virq = irq_find_mapping(NULL, hwirq); + disable_irq(virq); + } + } +} + +static void enable_afu_irqs(struct cxl_context *ctx) +{ + irq_hw_number_t hwirq; + unsigned int virq; + int r, i; + + pr_devel("Enabling AFU(%d) interrupts\n", ctx->afu->slice); + for (r = 0; r < CXL_IRQ_RANGES; r++) { + hwirq = ctx->irqs.offset[r]; + for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { + virq = irq_find_mapping(NULL, hwirq); + enable_irq(virq); + } + } +} + +static int _guest_afu_cr_readXX(int sz, struct cxl_afu *afu, int cr_idx, + u64 offset, u64 *val) +{ + unsigned long cr; + char c; + int rc = 0; + + if (afu->crs_len < sz) + return -ENOENT; + + if (unlikely(offset >= afu->crs_len)) + return -ERANGE; + + cr = get_zeroed_page(GFP_KERNEL); + if (!cr) + return -ENOMEM; + + rc = cxl_h_get_config(afu->guest->handle, cr_idx, offset, + virt_to_phys((void *)cr), sz); + if (rc) + goto err; + + switch (sz) { + case 1: + c = *((char *) cr); + *val = c; + break; + case 2: + *val = in_le16((u16 *)cr); + break; + case 4: + *val = in_le32((unsigned *)cr); + break; + case 8: + *val = in_le64((u64 *)cr); + break; + default: + WARN_ON(1); + } +err: + free_page(cr); + return rc; +} + +static int guest_afu_cr_read32(struct cxl_afu *afu, int cr_idx, u64 offset, + u32 *out) +{ + int rc; + u64 val; + + rc = _guest_afu_cr_readXX(4, afu, cr_idx, offset, &val); + if (!rc) + *out = (u32) val; + return rc; +} + +static int guest_afu_cr_read16(struct cxl_afu *afu, int cr_idx, u64 offset, + u16 *out) +{ + int rc; + u64 val; + + rc = _guest_afu_cr_readXX(2, afu, cr_idx, offset, &val); + if (!rc) + *out = (u16) val; + return rc; +} + +static int guest_afu_cr_read8(struct cxl_afu *afu, int cr_idx, u64 offset, + u8 *out) +{ + int rc; + u64 val; + + rc = _guest_afu_cr_readXX(1, afu, cr_idx, offset, &val); + if (!rc) + *out = (u8) val; + return rc; +} + +static int guest_afu_cr_read64(struct cxl_afu *afu, int cr_idx, u64 offset, + u64 *out) +{ + return _guest_afu_cr_readXX(8, afu, cr_idx, offset, out); +} + +static int guest_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in) +{ + /* config record is not writable from guest */ + return -EPERM; +} + +static int guest_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in) +{ + /* config record is not writable from guest */ + return -EPERM; +} + +static int guest_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in) +{ + /* config record is not writable from guest */ + return -EPERM; +} + +static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr) +{ + struct cxl_process_element_hcall *elem; + struct cxl *adapter = ctx->afu->adapter; + const struct cred *cred; + u32 pid, idx; + int rc, r, i; + u64 mmio_addr, mmio_size; + __be64 flags = 0; + + /* Must be 8 byte aligned and cannot cross a 4096 byte boundary */ + if (!(elem = (struct cxl_process_element_hcall *) + get_zeroed_page(GFP_KERNEL))) + return -ENOMEM; + + elem->version = cpu_to_be64(CXL_PROCESS_ELEMENT_VERSION); + if (ctx->kernel) { + pid = 0; + flags |= CXL_PE_TRANSLATION_ENABLED; + flags |= CXL_PE_PRIVILEGED_PROCESS; + if (mfmsr() & MSR_SF) + flags |= CXL_PE_64_BIT; + } else { + pid = current->pid; + flags |= CXL_PE_PROBLEM_STATE; + flags |= CXL_PE_TRANSLATION_ENABLED; + if (!test_tsk_thread_flag(current, TIF_32BIT)) + flags |= CXL_PE_64_BIT; + cred = get_current_cred(); + if (uid_eq(cred->euid, GLOBAL_ROOT_UID)) + flags |= CXL_PE_PRIVILEGED_PROCESS; + put_cred(cred); + } + elem->flags = cpu_to_be64(flags); + elem->common.tid = cpu_to_be32(0); /* Unused */ + elem->common.pid = cpu_to_be32(pid); + elem->common.csrp = cpu_to_be64(0); /* disable */ + elem->common.u.psl8.aurp0 = cpu_to_be64(0); /* disable */ + elem->common.u.psl8.aurp1 = cpu_to_be64(0); /* disable */ + + cxl_prefault(ctx, wed); + + elem->common.u.psl8.sstp0 = cpu_to_be64(ctx->sstp0); + elem->common.u.psl8.sstp1 = cpu_to_be64(ctx->sstp1); + + /* + * Ensure we have at least one interrupt allocated to take faults for + * kernel contexts that may not have allocated any AFU IRQs at all: + */ + if (ctx->irqs.range[0] == 0) { + rc = afu_register_irqs(ctx, 0); + if (rc) + goto out_free; + } + + for (r = 0; r < CXL_IRQ_RANGES; r++) { + for (i = 0; i < ctx->irqs.range[r]; i++) { + if (r == 0 && i == 0) { + elem->pslVirtualIsn = cpu_to_be32(ctx->irqs.offset[0]); + } else { + idx = ctx->irqs.offset[r] + i - adapter->guest->irq_base_offset; + elem->applicationVirtualIsnBitmap[idx / 8] |= 0x80 >> (idx % 8); + } + } + } + elem->common.amr = cpu_to_be64(amr); + elem->common.wed = cpu_to_be64(wed); + + disable_afu_irqs(ctx); + + rc = cxl_h_attach_process(ctx->afu->guest->handle, elem, + &ctx->process_token, &mmio_addr, &mmio_size); + if (rc == H_SUCCESS) { + if (ctx->master || !ctx->afu->pp_psa) { + ctx->psn_phys = ctx->afu->psn_phys; + ctx->psn_size = ctx->afu->adapter->ps_size; + } else { + ctx->psn_phys = mmio_addr; + ctx->psn_size = mmio_size; + } + if (ctx->afu->pp_psa && mmio_size && + ctx->afu->pp_size == 0) { + /* + * There's no property in the device tree to read the + * pp_size. We only find out at the 1st attach. + * Compared to bare-metal, it is too late and we + * should really lock here. However, on powerVM, + * pp_size is really only used to display in /sys. + * Being discussed with pHyp for their next release. + */ + ctx->afu->pp_size = mmio_size; + } + /* from PAPR: process element is bytes 4-7 of process token */ + ctx->external_pe = ctx->process_token & 0xFFFFFFFF; + pr_devel("CXL pe=%i is known as %i for pHyp, mmio_size=%#llx", + ctx->pe, ctx->external_pe, ctx->psn_size); + ctx->pe_inserted = true; + enable_afu_irqs(ctx); + } + +out_free: + free_page((u64)elem); + return rc; +} + +static int guest_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr) +{ + pr_devel("in %s\n", __func__); + + ctx->kernel = kernel; + if (ctx->afu->current_mode == CXL_MODE_DIRECTED) + return attach_afu_directed(ctx, wed, amr); + + /* dedicated mode not supported on FW840 */ + + return -EINVAL; +} + +static int detach_afu_directed(struct cxl_context *ctx) +{ + if (!ctx->pe_inserted) + return 0; + if (cxl_h_detach_process(ctx->afu->guest->handle, ctx->process_token)) + return -1; + return 0; +} + +static int guest_detach_process(struct cxl_context *ctx) +{ + pr_devel("in %s\n", __func__); + trace_cxl_detach(ctx); + + if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) + return -EIO; + + if (ctx->afu->current_mode == CXL_MODE_DIRECTED) + return detach_afu_directed(ctx); + + return -EINVAL; +} + +static void guest_release_afu(struct device *dev) +{ + struct cxl_afu *afu = to_cxl_afu(dev); + + pr_devel("%s\n", __func__); + + idr_destroy(&afu->contexts_idr); + + kfree(afu->guest); + kfree(afu); +} + +ssize_t cxl_guest_read_afu_vpd(struct cxl_afu *afu, void *buf, size_t len) +{ + return guest_collect_vpd(NULL, afu, buf, len); +} + +#define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE +static ssize_t guest_afu_read_err_buffer(struct cxl_afu *afu, char *buf, + loff_t off, size_t count) +{ + void *tbuf = NULL; + int rc = 0; + + tbuf = (void *) get_zeroed_page(GFP_KERNEL); + if (!tbuf) + return -ENOMEM; + + rc = cxl_h_get_afu_err(afu->guest->handle, + off & 0x7, + virt_to_phys(tbuf), + count); + if (rc) + goto err; + + if (count > ERR_BUFF_MAX_COPY_SIZE) + count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7); + memcpy(buf, tbuf, count); +err: + free_page((u64)tbuf); + + return rc; +} + +static int guest_afu_check_and_enable(struct cxl_afu *afu) +{ + return 0; +} + +static bool guest_support_attributes(const char *attr_name, + enum cxl_attrs type) +{ + switch (type) { + case CXL_ADAPTER_ATTRS: + if ((strcmp(attr_name, "base_image") == 0) || + (strcmp(attr_name, "load_image_on_perst") == 0) || + (strcmp(attr_name, "perst_reloads_same_image") == 0) || + (strcmp(attr_name, "image_loaded") == 0)) + return false; + break; + case CXL_AFU_MASTER_ATTRS: + if ((strcmp(attr_name, "pp_mmio_off") == 0)) + return false; + break; + case CXL_AFU_ATTRS: + break; + default: + break; + } + + return true; +} + +static int activate_afu_directed(struct cxl_afu *afu) +{ + int rc; + + dev_info(&afu->dev, "Activating AFU(%d) directed mode\n", afu->slice); + + afu->current_mode = CXL_MODE_DIRECTED; + + afu->num_procs = afu->max_procs_virtualised; + + if ((rc = cxl_chardev_m_afu_add(afu))) + return rc; + + if ((rc = cxl_sysfs_afu_m_add(afu))) + goto err; + + if ((rc = cxl_chardev_s_afu_add(afu))) + goto err1; + + return 0; +err1: + cxl_sysfs_afu_m_remove(afu); +err: + cxl_chardev_afu_remove(afu); + return rc; +} + +static int guest_afu_activate_mode(struct cxl_afu *afu, int mode) +{ + if (!mode) + return 0; + if (!(mode & afu->modes_supported)) + return -EINVAL; + + if (mode == CXL_MODE_DIRECTED) + return activate_afu_directed(afu); + + if (mode == CXL_MODE_DEDICATED) + dev_err(&afu->dev, "Dedicated mode not supported\n"); + + return -EINVAL; +} + +static int deactivate_afu_directed(struct cxl_afu *afu) +{ + dev_info(&afu->dev, "Deactivating AFU(%d) directed mode\n", afu->slice); + + afu->current_mode = 0; + afu->num_procs = 0; + + cxl_sysfs_afu_m_remove(afu); + cxl_chardev_afu_remove(afu); + + cxl_ops->afu_reset(afu); + + return 0; +} + +static int guest_afu_deactivate_mode(struct cxl_afu *afu, int mode) +{ + if (!mode) + return 0; + if (!(mode & afu->modes_supported)) + return -EINVAL; + + if (mode == CXL_MODE_DIRECTED) + return deactivate_afu_directed(afu); + return 0; +} + +static int guest_afu_reset(struct cxl_afu *afu) +{ + pr_devel("AFU(%d) reset request\n", afu->slice); + return cxl_h_reset_afu(afu->guest->handle); +} + +static int guest_map_slice_regs(struct cxl_afu *afu) +{ + if (!(afu->p2n_mmio = ioremap(afu->guest->p2n_phys, afu->guest->p2n_size))) { + dev_err(&afu->dev, "Error mapping AFU(%d) MMIO regions\n", + afu->slice); + return -ENOMEM; + } + return 0; +} + +static void guest_unmap_slice_regs(struct cxl_afu *afu) +{ + if (afu->p2n_mmio) + iounmap(afu->p2n_mmio); +} + +static int afu_update_state(struct cxl_afu *afu) +{ + int rc, cur_state; + + rc = afu_read_error_state(afu, &cur_state); + if (rc) + return rc; + + if (afu->guest->previous_state == cur_state) + return 0; + + pr_devel("AFU(%d) update state to %#x\n", afu->slice, cur_state); + + switch (cur_state) { + case H_STATE_NORMAL: + afu->guest->previous_state = cur_state; + break; + + case H_STATE_DISABLE: + pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT, + pci_channel_io_frozen); + + cxl_context_detach_all(afu); + if ((rc = cxl_ops->afu_reset(afu))) + pr_devel("reset hcall failed %d\n", rc); + + rc = afu_read_error_state(afu, &cur_state); + if (!rc && cur_state == H_STATE_NORMAL) { + pci_error_handlers(afu, CXL_SLOT_RESET_EVENT, + pci_channel_io_normal); + pci_error_handlers(afu, CXL_RESUME_EVENT, 0); + } + afu->guest->previous_state = 0; + break; + + case H_STATE_TEMP_UNAVAILABLE: + afu->guest->previous_state = cur_state; + break; + + case H_STATE_PERM_UNAVAILABLE: + dev_err(&afu->dev, "AFU is in permanent error state\n"); + pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT, + pci_channel_io_perm_failure); + afu->guest->previous_state = cur_state; + break; + + default: + pr_err("Unexpected AFU(%d) error state: %#x\n", + afu->slice, cur_state); + return -EINVAL; + } + + return rc; +} + +static void afu_handle_errstate(struct work_struct *work) +{ + struct cxl_afu_guest *afu_guest = + container_of(to_delayed_work(work), struct cxl_afu_guest, work_err); + + if (!afu_update_state(afu_guest->parent) && + afu_guest->previous_state == H_STATE_PERM_UNAVAILABLE) + return; + + if (afu_guest->handle_err) + schedule_delayed_work(&afu_guest->work_err, + msecs_to_jiffies(3000)); +} + +static bool guest_link_ok(struct cxl *cxl, struct cxl_afu *afu) +{ + int state; + + if (afu && (!afu_read_error_state(afu, &state))) { + if (state == H_STATE_NORMAL) + return true; + } + + return false; +} + +static int afu_properties_look_ok(struct cxl_afu *afu) +{ + if (afu->pp_irqs < 0) { + dev_err(&afu->dev, "Unexpected per-process minimum interrupt value\n"); + return -EINVAL; + } + + if (afu->max_procs_virtualised < 1) { + dev_err(&afu->dev, "Unexpected max number of processes virtualised value\n"); + return -EINVAL; + } + + return 0; +} + +int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_np) +{ + struct cxl_afu *afu; + bool free = true; + int rc; + + pr_devel("in %s - AFU(%d)\n", __func__, slice); + if (!(afu = cxl_alloc_afu(adapter, slice))) + return -ENOMEM; + + if (!(afu->guest = kzalloc(sizeof(struct cxl_afu_guest), GFP_KERNEL))) { + kfree(afu); + return -ENOMEM; + } + + if ((rc = dev_set_name(&afu->dev, "afu%i.%i", + adapter->adapter_num, + slice))) + goto err1; + + adapter->slices++; + + if ((rc = cxl_of_read_afu_handle(afu, afu_np))) + goto err1; + + if ((rc = cxl_ops->afu_reset(afu))) + goto err1; + + if ((rc = cxl_of_read_afu_properties(afu, afu_np))) + goto err1; + + if ((rc = afu_properties_look_ok(afu))) + goto err1; + + if ((rc = guest_map_slice_regs(afu))) + goto err1; + + if ((rc = guest_register_serr_irq(afu))) + goto err2; + + /* + * After we call this function we must not free the afu directly, even + * if it returns an error! + */ + if ((rc = cxl_register_afu(afu))) + goto err_put_dev; + + if ((rc = cxl_sysfs_afu_add(afu))) + goto err_del_dev; + + /* + * pHyp doesn't expose the programming models supported by the + * AFU. pHyp currently only supports directed mode. If it adds + * dedicated mode later, this version of cxl has no way to + * detect it. So we'll initialize the driver, but the first + * attach will fail. + * Being discussed with pHyp to do better (likely new property) + */ + if (afu->max_procs_virtualised == 1) + afu->modes_supported = CXL_MODE_DEDICATED; + else + afu->modes_supported = CXL_MODE_DIRECTED; + + if ((rc = cxl_afu_select_best_mode(afu))) + goto err_remove_sysfs; + + adapter->afu[afu->slice] = afu; + + afu->enabled = true; + + /* + * wake up the cpu periodically to check the state + * of the AFU using "afu" stored in the guest structure. + */ + afu->guest->parent = afu; + afu->guest->handle_err = true; + INIT_DELAYED_WORK(&afu->guest->work_err, afu_handle_errstate); + schedule_delayed_work(&afu->guest->work_err, msecs_to_jiffies(1000)); + + if ((rc = cxl_pci_vphb_add(afu))) + dev_info(&afu->dev, "Can't register vPHB\n"); + + return 0; + +err_remove_sysfs: + cxl_sysfs_afu_remove(afu); +err_del_dev: + device_del(&afu->dev); +err_put_dev: + put_device(&afu->dev); + free = false; + guest_release_serr_irq(afu); +err2: + guest_unmap_slice_regs(afu); +err1: + if (free) { + kfree(afu->guest); + kfree(afu); + } + return rc; +} + +void cxl_guest_remove_afu(struct cxl_afu *afu) +{ + if (!afu) + return; + + /* flush and stop pending job */ + afu->guest->handle_err = false; + flush_delayed_work(&afu->guest->work_err); + + cxl_pci_vphb_remove(afu); + cxl_sysfs_afu_remove(afu); + + spin_lock(&afu->adapter->afu_list_lock); + afu->adapter->afu[afu->slice] = NULL; + spin_unlock(&afu->adapter->afu_list_lock); + + cxl_context_detach_all(afu); + cxl_ops->afu_deactivate_mode(afu, afu->current_mode); + guest_release_serr_irq(afu); + guest_unmap_slice_regs(afu); + + device_unregister(&afu->dev); +} + +static void free_adapter(struct cxl *adapter) +{ + struct irq_avail *cur; + int i; + + if (adapter->guest) { + if (adapter->guest->irq_avail) { + for (i = 0; i < adapter->guest->irq_nranges; i++) { + cur = &adapter->guest->irq_avail[i]; + kfree(cur->bitmap); + } + kfree(adapter->guest->irq_avail); + } + kfree(adapter->guest->status); + kfree(adapter->guest); + } + cxl_remove_adapter_nr(adapter); + kfree(adapter); +} + +static int properties_look_ok(struct cxl *adapter) +{ + /* The absence of this property means that the operational + * status is unknown or okay + */ + if (strlen(adapter->guest->status) && + strcmp(adapter->guest->status, "okay")) { + pr_err("ABORTING:Bad operational status of the device\n"); + return -EINVAL; + } + + return 0; +} + +ssize_t cxl_guest_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len) +{ + return guest_collect_vpd(adapter, NULL, buf, len); +} + +void cxl_guest_remove_adapter(struct cxl *adapter) +{ + pr_devel("in %s\n", __func__); + + cxl_sysfs_adapter_remove(adapter); + + cxl_guest_remove_chardev(adapter); + device_unregister(&adapter->dev); +} + +static void release_adapter(struct device *dev) +{ + free_adapter(to_cxl_adapter(dev)); +} + +struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_device *pdev) +{ + struct cxl *adapter; + bool free = true; + int rc; + + if (!(adapter = cxl_alloc_adapter())) + return ERR_PTR(-ENOMEM); + + if (!(adapter->guest = kzalloc(sizeof(struct cxl_guest), GFP_KERNEL))) { + free_adapter(adapter); + return ERR_PTR(-ENOMEM); + } + + adapter->slices = 0; + adapter->guest->pdev = pdev; + adapter->dev.parent = &pdev->dev; + adapter->dev.release = release_adapter; + dev_set_drvdata(&pdev->dev, adapter); + + /* + * Hypervisor controls PSL timebase initialization (p1 register). + * On FW840, PSL is initialized. + */ + adapter->psl_timebase_synced = true; + + if ((rc = cxl_of_read_adapter_handle(adapter, np))) + goto err1; + + if ((rc = cxl_of_read_adapter_properties(adapter, np))) + goto err1; + + if ((rc = properties_look_ok(adapter))) + goto err1; + + if ((rc = cxl_guest_add_chardev(adapter))) + goto err1; + + /* + * After we call this function we must not free the adapter directly, + * even if it returns an error! + */ + if ((rc = cxl_register_adapter(adapter))) + goto err_put_dev; + + if ((rc = cxl_sysfs_adapter_add(adapter))) + goto err_del_dev; + + /* release the context lock as the adapter is configured */ + cxl_adapter_context_unlock(adapter); + + return adapter; + +err_del_dev: + device_del(&adapter->dev); +err_put_dev: + put_device(&adapter->dev); + free = false; + cxl_guest_remove_chardev(adapter); +err1: + if (free) + free_adapter(adapter); + return ERR_PTR(rc); +} + +void cxl_guest_reload_module(struct cxl *adapter) +{ + struct platform_device *pdev; + + pdev = adapter->guest->pdev; + cxl_guest_remove_adapter(adapter); + + cxl_of_probe(pdev); +} + +const struct cxl_backend_ops cxl_guest_ops = { + .module = THIS_MODULE, + .adapter_reset = guest_reset, + .alloc_one_irq = guest_alloc_one_irq, + .release_one_irq = guest_release_one_irq, + .alloc_irq_ranges = guest_alloc_irq_ranges, + .release_irq_ranges = guest_release_irq_ranges, + .setup_irq = NULL, + .handle_psl_slice_error = guest_handle_psl_slice_error, + .psl_interrupt = guest_psl_irq, + .ack_irq = guest_ack_irq, + .attach_process = guest_attach_process, + .detach_process = guest_detach_process, + .update_ivtes = NULL, + .support_attributes = guest_support_attributes, + .link_ok = guest_link_ok, + .release_afu = guest_release_afu, + .afu_read_err_buffer = guest_afu_read_err_buffer, + .afu_check_and_enable = guest_afu_check_and_enable, + .afu_activate_mode = guest_afu_activate_mode, + .afu_deactivate_mode = guest_afu_deactivate_mode, + .afu_reset = guest_afu_reset, + .afu_cr_read8 = guest_afu_cr_read8, + .afu_cr_read16 = guest_afu_cr_read16, + .afu_cr_read32 = guest_afu_cr_read32, + .afu_cr_read64 = guest_afu_cr_read64, + .afu_cr_write8 = guest_afu_cr_write8, + .afu_cr_write16 = guest_afu_cr_write16, + .afu_cr_write32 = guest_afu_cr_write32, + .read_adapter_vpd = cxl_guest_read_adapter_vpd, +}; diff --git a/drivers/misc/cxl/hcalls.c b/drivers/misc/cxl/hcalls.c new file mode 100644 index 000000000..aba5e20ee --- /dev/null +++ b/drivers/misc/cxl/hcalls.c @@ -0,0 +1,643 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright 2015 IBM Corp. + */ + + +#include <linux/compiler.h> +#include <linux/types.h> +#include <linux/delay.h> +#include <asm/byteorder.h> +#include "hcalls.h" +#include "trace.h" + +#define CXL_HCALL_TIMEOUT 60000 +#define CXL_HCALL_TIMEOUT_DOWNLOAD 120000 + +#define H_ATTACH_CA_PROCESS 0x344 +#define H_CONTROL_CA_FUNCTION 0x348 +#define H_DETACH_CA_PROCESS 0x34C +#define H_COLLECT_CA_INT_INFO 0x350 +#define H_CONTROL_CA_FAULTS 0x354 +#define H_DOWNLOAD_CA_FUNCTION 0x35C +#define H_DOWNLOAD_CA_FACILITY 0x364 +#define H_CONTROL_CA_FACILITY 0x368 + +#define H_CONTROL_CA_FUNCTION_RESET 1 /* perform a reset */ +#define H_CONTROL_CA_FUNCTION_SUSPEND_PROCESS 2 /* suspend a process from being executed */ +#define H_CONTROL_CA_FUNCTION_RESUME_PROCESS 3 /* resume a process to be executed */ +#define H_CONTROL_CA_FUNCTION_READ_ERR_STATE 4 /* read the error state */ +#define H_CONTROL_CA_FUNCTION_GET_AFU_ERR 5 /* collect the AFU error buffer */ +#define H_CONTROL_CA_FUNCTION_GET_CONFIG 6 /* collect configuration record */ +#define H_CONTROL_CA_FUNCTION_GET_DOWNLOAD_STATE 7 /* query to return download status */ +#define H_CONTROL_CA_FUNCTION_TERMINATE_PROCESS 8 /* terminate the process before completion */ +#define H_CONTROL_CA_FUNCTION_COLLECT_VPD 9 /* collect VPD */ +#define H_CONTROL_CA_FUNCTION_GET_FUNCTION_ERR_INT 11 /* read the function-wide error data based on an interrupt */ +#define H_CONTROL_CA_FUNCTION_ACK_FUNCTION_ERR_INT 12 /* acknowledge function-wide error data based on an interrupt */ +#define H_CONTROL_CA_FUNCTION_GET_ERROR_LOG 13 /* retrieve the Platform Log ID (PLID) of an error log */ + +#define H_CONTROL_CA_FAULTS_RESPOND_PSL 1 +#define H_CONTROL_CA_FAULTS_RESPOND_AFU 2 + +#define H_CONTROL_CA_FACILITY_RESET 1 /* perform a reset */ +#define H_CONTROL_CA_FACILITY_COLLECT_VPD 2 /* collect VPD */ + +#define H_DOWNLOAD_CA_FACILITY_DOWNLOAD 1 /* download adapter image */ +#define H_DOWNLOAD_CA_FACILITY_VALIDATE 2 /* validate adapter image */ + + +#define _CXL_LOOP_HCALL(call, rc, retbuf, fn, ...) \ + { \ + unsigned int delay, total_delay = 0; \ + u64 token = 0; \ + \ + memset(retbuf, 0, sizeof(retbuf)); \ + while (1) { \ + rc = call(fn, retbuf, __VA_ARGS__, token); \ + token = retbuf[0]; \ + if (rc != H_BUSY && !H_IS_LONG_BUSY(rc)) \ + break; \ + \ + if (rc == H_BUSY) \ + delay = 10; \ + else \ + delay = get_longbusy_msecs(rc); \ + \ + total_delay += delay; \ + if (total_delay > CXL_HCALL_TIMEOUT) { \ + WARN(1, "Warning: Giving up waiting for CXL hcall " \ + "%#x after %u msec\n", fn, total_delay); \ + rc = H_BUSY; \ + break; \ + } \ + msleep(delay); \ + } \ + } +#define CXL_H_WAIT_UNTIL_DONE(...) _CXL_LOOP_HCALL(plpar_hcall, __VA_ARGS__) +#define CXL_H9_WAIT_UNTIL_DONE(...) _CXL_LOOP_HCALL(plpar_hcall9, __VA_ARGS__) + +#define _PRINT_MSG(rc, format, ...) \ + { \ + if ((rc != H_SUCCESS) && (rc != H_CONTINUE)) \ + pr_err(format, __VA_ARGS__); \ + else \ + pr_devel(format, __VA_ARGS__); \ + } \ + + +static char *afu_op_names[] = { + "UNKNOWN_OP", /* 0 undefined */ + "RESET", /* 1 */ + "SUSPEND_PROCESS", /* 2 */ + "RESUME_PROCESS", /* 3 */ + "READ_ERR_STATE", /* 4 */ + "GET_AFU_ERR", /* 5 */ + "GET_CONFIG", /* 6 */ + "GET_DOWNLOAD_STATE", /* 7 */ + "TERMINATE_PROCESS", /* 8 */ + "COLLECT_VPD", /* 9 */ + "UNKNOWN_OP", /* 10 undefined */ + "GET_FUNCTION_ERR_INT", /* 11 */ + "ACK_FUNCTION_ERR_INT", /* 12 */ + "GET_ERROR_LOG", /* 13 */ +}; + +static char *control_adapter_op_names[] = { + "UNKNOWN_OP", /* 0 undefined */ + "RESET", /* 1 */ + "COLLECT_VPD", /* 2 */ +}; + +static char *download_op_names[] = { + "UNKNOWN_OP", /* 0 undefined */ + "DOWNLOAD", /* 1 */ + "VALIDATE", /* 2 */ +}; + +static char *op_str(unsigned int op, char *name_array[], int array_len) +{ + if (op >= array_len) + return "UNKNOWN_OP"; + return name_array[op]; +} + +#define OP_STR(op, name_array) op_str(op, name_array, ARRAY_SIZE(name_array)) + +#define OP_STR_AFU(op) OP_STR(op, afu_op_names) +#define OP_STR_CONTROL_ADAPTER(op) OP_STR(op, control_adapter_op_names) +#define OP_STR_DOWNLOAD_ADAPTER(op) OP_STR(op, download_op_names) + + +long cxl_h_attach_process(u64 unit_address, + struct cxl_process_element_hcall *element, + u64 *process_token, u64 *mmio_addr, u64 *mmio_size) +{ + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + long rc; + + CXL_H_WAIT_UNTIL_DONE(rc, retbuf, H_ATTACH_CA_PROCESS, unit_address, virt_to_phys(element)); + _PRINT_MSG(rc, "cxl_h_attach_process(%#.16llx, %#.16lx): %li\n", + unit_address, virt_to_phys(element), rc); + trace_cxl_hcall_attach(unit_address, virt_to_phys(element), retbuf[0], retbuf[1], retbuf[2], rc); + + pr_devel("token: 0x%.8lx mmio_addr: 0x%lx mmio_size: 0x%lx\nProcess Element Structure:\n", + retbuf[0], retbuf[1], retbuf[2]); + cxl_dump_debug_buffer(element, sizeof(*element)); + + switch (rc) { + case H_SUCCESS: /* The process info is attached to the coherent platform function */ + *process_token = retbuf[0]; + if (mmio_addr) + *mmio_addr = retbuf[1]; + if (mmio_size) + *mmio_size = retbuf[2]; + return 0; + case H_PARAMETER: /* An incorrect parameter was supplied. */ + case H_FUNCTION: /* The function is not supported. */ + return -EINVAL; + case H_AUTHORITY: /* The partition does not have authority to perform this hcall */ + case H_RESOURCE: /* The coherent platform function does not have enough additional resource to attach the process */ + case H_HARDWARE: /* A hardware event prevented the attach operation */ + case H_STATE: /* The coherent platform function is not in a valid state */ + case H_BUSY: + return -EBUSY; + default: + WARN(1, "Unexpected return code: %lx", rc); + return -EINVAL; + } +} + +/* + * cxl_h_detach_process - Detach a process element from a coherent + * platform function. + */ +long cxl_h_detach_process(u64 unit_address, u64 process_token) +{ + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + long rc; + + CXL_H_WAIT_UNTIL_DONE(rc, retbuf, H_DETACH_CA_PROCESS, unit_address, process_token); + _PRINT_MSG(rc, "cxl_h_detach_process(%#.16llx, 0x%.8llx): %li\n", unit_address, process_token, rc); + trace_cxl_hcall_detach(unit_address, process_token, rc); + + switch (rc) { + case H_SUCCESS: /* The process was detached from the coherent platform function */ + return 0; + case H_PARAMETER: /* An incorrect parameter was supplied. */ + return -EINVAL; + case H_AUTHORITY: /* The partition does not have authority to perform this hcall */ + case H_RESOURCE: /* The function has page table mappings for MMIO */ + case H_HARDWARE: /* A hardware event prevented the detach operation */ + case H_STATE: /* The coherent platform function is not in a valid state */ + case H_BUSY: + return -EBUSY; + default: + WARN(1, "Unexpected return code: %lx", rc); + return -EINVAL; + } +} + +/* + * cxl_h_control_function - This H_CONTROL_CA_FUNCTION hypervisor call allows + * the partition to manipulate or query + * certain coherent platform function behaviors. + */ +static long cxl_h_control_function(u64 unit_address, u64 op, + u64 p1, u64 p2, u64 p3, u64 p4, u64 *out) +{ + unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; + long rc; + + CXL_H9_WAIT_UNTIL_DONE(rc, retbuf, H_CONTROL_CA_FUNCTION, unit_address, op, p1, p2, p3, p4); + _PRINT_MSG(rc, "cxl_h_control_function(%#.16llx, %s(%#llx, %#llx, %#llx, %#llx, R4: %#lx)): %li\n", + unit_address, OP_STR_AFU(op), p1, p2, p3, p4, retbuf[0], rc); + trace_cxl_hcall_control_function(unit_address, OP_STR_AFU(op), p1, p2, p3, p4, retbuf[0], rc); + + switch (rc) { + case H_SUCCESS: /* The operation is completed for the coherent platform function */ + if ((op == H_CONTROL_CA_FUNCTION_GET_FUNCTION_ERR_INT || + op == H_CONTROL_CA_FUNCTION_READ_ERR_STATE || + op == H_CONTROL_CA_FUNCTION_COLLECT_VPD)) + *out = retbuf[0]; + return 0; + case H_PARAMETER: /* An incorrect parameter was supplied. */ + case H_FUNCTION: /* The function is not supported. */ + case H_NOT_FOUND: /* The operation supplied was not valid */ + case H_NOT_AVAILABLE: /* The operation cannot be performed because the AFU has not been downloaded */ + case H_SG_LIST: /* An block list entry was invalid */ + return -EINVAL; + case H_AUTHORITY: /* The partition does not have authority to perform this hcall */ + case H_RESOURCE: /* The function has page table mappings for MMIO */ + case H_HARDWARE: /* A hardware event prevented the attach operation */ + case H_STATE: /* The coherent platform function is not in a valid state */ + case H_BUSY: + return -EBUSY; + default: + WARN(1, "Unexpected return code: %lx", rc); + return -EINVAL; + } +} + +/* + * cxl_h_reset_afu - Perform a reset to the coherent platform function. + */ +long cxl_h_reset_afu(u64 unit_address) +{ + return cxl_h_control_function(unit_address, + H_CONTROL_CA_FUNCTION_RESET, + 0, 0, 0, 0, + NULL); +} + +/* + * cxl_h_suspend_process - Suspend a process from being executed + * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when + * process was attached. + */ +long cxl_h_suspend_process(u64 unit_address, u64 process_token) +{ + return cxl_h_control_function(unit_address, + H_CONTROL_CA_FUNCTION_SUSPEND_PROCESS, + process_token, 0, 0, 0, + NULL); +} + +/* + * cxl_h_resume_process - Resume a process to be executed + * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when + * process was attached. + */ +long cxl_h_resume_process(u64 unit_address, u64 process_token) +{ + return cxl_h_control_function(unit_address, + H_CONTROL_CA_FUNCTION_RESUME_PROCESS, + process_token, 0, 0, 0, + NULL); +} + +/* + * cxl_h_read_error_state - Checks the error state of the coherent + * platform function. + * R4 contains the error state + */ +long cxl_h_read_error_state(u64 unit_address, u64 *state) +{ + return cxl_h_control_function(unit_address, + H_CONTROL_CA_FUNCTION_READ_ERR_STATE, + 0, 0, 0, 0, + state); +} + +/* + * cxl_h_get_afu_err - collect the AFU error buffer + * Parameter1 = byte offset into error buffer to retrieve, valid values + * are between 0 and (ibm,error-buffer-size - 1) + * Parameter2 = 4K aligned real address of error buffer, to be filled in + * Parameter3 = length of error buffer, valid values are 4K or less + */ +long cxl_h_get_afu_err(u64 unit_address, u64 offset, + u64 buf_address, u64 len) +{ + return cxl_h_control_function(unit_address, + H_CONTROL_CA_FUNCTION_GET_AFU_ERR, + offset, buf_address, len, 0, + NULL); +} + +/* + * cxl_h_get_config - collect configuration record for the + * coherent platform function + * Parameter1 = # of configuration record to retrieve, valid values are + * between 0 and (ibm,#config-records - 1) + * Parameter2 = byte offset into configuration record to retrieve, + * valid values are between 0 and (ibm,config-record-size - 1) + * Parameter3 = 4K aligned real address of configuration record buffer, + * to be filled in + * Parameter4 = length of configuration buffer, valid values are 4K or less + */ +long cxl_h_get_config(u64 unit_address, u64 cr_num, u64 offset, + u64 buf_address, u64 len) +{ + return cxl_h_control_function(unit_address, + H_CONTROL_CA_FUNCTION_GET_CONFIG, + cr_num, offset, buf_address, len, + NULL); +} + +/* + * cxl_h_terminate_process - Terminate the process before completion + * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when + * process was attached. + */ +long cxl_h_terminate_process(u64 unit_address, u64 process_token) +{ + return cxl_h_control_function(unit_address, + H_CONTROL_CA_FUNCTION_TERMINATE_PROCESS, + process_token, 0, 0, 0, + NULL); +} + +/* + * cxl_h_collect_vpd - Collect VPD for the coherent platform function. + * Parameter1 = # of VPD record to retrieve, valid values are between 0 + * and (ibm,#config-records - 1). + * Parameter2 = 4K naturally aligned real buffer containing block + * list entries + * Parameter3 = number of block list entries in the block list, valid + * values are between 0 and 256 + */ +long cxl_h_collect_vpd(u64 unit_address, u64 record, u64 list_address, + u64 num, u64 *out) +{ + return cxl_h_control_function(unit_address, + H_CONTROL_CA_FUNCTION_COLLECT_VPD, + record, list_address, num, 0, + out); +} + +/* + * cxl_h_get_fn_error_interrupt - Read the function-wide error data based on an interrupt + */ +long cxl_h_get_fn_error_interrupt(u64 unit_address, u64 *reg) +{ + return cxl_h_control_function(unit_address, + H_CONTROL_CA_FUNCTION_GET_FUNCTION_ERR_INT, + 0, 0, 0, 0, reg); +} + +/* + * cxl_h_ack_fn_error_interrupt - Acknowledge function-wide error data + * based on an interrupt + * Parameter1 = value to write to the function-wide error interrupt register + */ +long cxl_h_ack_fn_error_interrupt(u64 unit_address, u64 value) +{ + return cxl_h_control_function(unit_address, + H_CONTROL_CA_FUNCTION_ACK_FUNCTION_ERR_INT, + value, 0, 0, 0, + NULL); +} + +/* + * cxl_h_get_error_log - Retrieve the Platform Log ID (PLID) of + * an error log + */ +long cxl_h_get_error_log(u64 unit_address, u64 value) +{ + return cxl_h_control_function(unit_address, + H_CONTROL_CA_FUNCTION_GET_ERROR_LOG, + 0, 0, 0, 0, + NULL); +} + +/* + * cxl_h_collect_int_info - Collect interrupt info about a coherent + * platform function after an interrupt occurred. + */ +long cxl_h_collect_int_info(u64 unit_address, u64 process_token, + struct cxl_irq_info *info) +{ + long rc; + + BUG_ON(sizeof(*info) != sizeof(unsigned long[PLPAR_HCALL9_BUFSIZE])); + + rc = plpar_hcall9(H_COLLECT_CA_INT_INFO, (unsigned long *) info, + unit_address, process_token); + _PRINT_MSG(rc, "cxl_h_collect_int_info(%#.16llx, 0x%llx): %li\n", + unit_address, process_token, rc); + trace_cxl_hcall_collect_int_info(unit_address, process_token, rc); + + switch (rc) { + case H_SUCCESS: /* The interrupt info is returned in return registers. */ + pr_devel("dsisr:%#llx, dar:%#llx, dsr:%#llx, pid_tid:%#llx, afu_err:%#llx, errstat:%#llx\n", + info->dsisr, info->dar, info->dsr, info->reserved, + info->afu_err, info->errstat); + return 0; + case H_PARAMETER: /* An incorrect parameter was supplied. */ + return -EINVAL; + case H_AUTHORITY: /* The partition does not have authority to perform this hcall. */ + case H_HARDWARE: /* A hardware event prevented the collection of the interrupt info.*/ + case H_STATE: /* The coherent platform function is not in a valid state to collect interrupt info. */ + return -EBUSY; + default: + WARN(1, "Unexpected return code: %lx", rc); + return -EINVAL; + } +} + +/* + * cxl_h_control_faults - Control the operation of a coherent platform + * function after a fault occurs. + * + * Parameters + * control-mask: value to control the faults + * looks like PSL_TFC_An shifted >> 32 + * reset-mask: mask to control reset of function faults + * Set reset_mask = 1 to reset PSL errors + */ +long cxl_h_control_faults(u64 unit_address, u64 process_token, + u64 control_mask, u64 reset_mask) +{ + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + long rc; + + memset(retbuf, 0, sizeof(retbuf)); + + rc = plpar_hcall(H_CONTROL_CA_FAULTS, retbuf, unit_address, + H_CONTROL_CA_FAULTS_RESPOND_PSL, process_token, + control_mask, reset_mask); + _PRINT_MSG(rc, "cxl_h_control_faults(%#.16llx, 0x%llx, %#llx, %#llx): %li (%#lx)\n", + unit_address, process_token, control_mask, reset_mask, + rc, retbuf[0]); + trace_cxl_hcall_control_faults(unit_address, process_token, + control_mask, reset_mask, retbuf[0], rc); + + switch (rc) { + case H_SUCCESS: /* Faults were successfully controlled for the function. */ + return 0; + case H_PARAMETER: /* An incorrect parameter was supplied. */ + return -EINVAL; + case H_HARDWARE: /* A hardware event prevented the control of faults. */ + case H_STATE: /* The function was in an invalid state. */ + case H_AUTHORITY: /* The partition does not have authority to perform this hcall; the coherent platform facilities may need to be licensed. */ + return -EBUSY; + case H_FUNCTION: /* The function is not supported */ + case H_NOT_FOUND: /* The operation supplied was not valid */ + return -EINVAL; + default: + WARN(1, "Unexpected return code: %lx", rc); + return -EINVAL; + } +} + +/* + * cxl_h_control_facility - This H_CONTROL_CA_FACILITY hypervisor call + * allows the partition to manipulate or query + * certain coherent platform facility behaviors. + */ +static long cxl_h_control_facility(u64 unit_address, u64 op, + u64 p1, u64 p2, u64 p3, u64 p4, u64 *out) +{ + unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; + long rc; + + CXL_H9_WAIT_UNTIL_DONE(rc, retbuf, H_CONTROL_CA_FACILITY, unit_address, op, p1, p2, p3, p4); + _PRINT_MSG(rc, "cxl_h_control_facility(%#.16llx, %s(%#llx, %#llx, %#llx, %#llx, R4: %#lx)): %li\n", + unit_address, OP_STR_CONTROL_ADAPTER(op), p1, p2, p3, p4, retbuf[0], rc); + trace_cxl_hcall_control_facility(unit_address, OP_STR_CONTROL_ADAPTER(op), p1, p2, p3, p4, retbuf[0], rc); + + switch (rc) { + case H_SUCCESS: /* The operation is completed for the coherent platform facility */ + if (op == H_CONTROL_CA_FACILITY_COLLECT_VPD) + *out = retbuf[0]; + return 0; + case H_PARAMETER: /* An incorrect parameter was supplied. */ + case H_FUNCTION: /* The function is not supported. */ + case H_NOT_FOUND: /* The operation supplied was not valid */ + case H_NOT_AVAILABLE: /* The operation cannot be performed because the AFU has not been downloaded */ + case H_SG_LIST: /* An block list entry was invalid */ + return -EINVAL; + case H_AUTHORITY: /* The partition does not have authority to perform this hcall */ + case H_RESOURCE: /* The function has page table mappings for MMIO */ + case H_HARDWARE: /* A hardware event prevented the attach operation */ + case H_STATE: /* The coherent platform facility is not in a valid state */ + case H_BUSY: + return -EBUSY; + default: + WARN(1, "Unexpected return code: %lx", rc); + return -EINVAL; + } +} + +/* + * cxl_h_reset_adapter - Perform a reset to the coherent platform facility. + */ +long cxl_h_reset_adapter(u64 unit_address) +{ + return cxl_h_control_facility(unit_address, + H_CONTROL_CA_FACILITY_RESET, + 0, 0, 0, 0, + NULL); +} + +/* + * cxl_h_collect_vpd - Collect VPD for the coherent platform function. + * Parameter1 = 4K naturally aligned real buffer containing block + * list entries + * Parameter2 = number of block list entries in the block list, valid + * values are between 0 and 256 + */ +long cxl_h_collect_vpd_adapter(u64 unit_address, u64 list_address, + u64 num, u64 *out) +{ + return cxl_h_control_facility(unit_address, + H_CONTROL_CA_FACILITY_COLLECT_VPD, + list_address, num, 0, 0, + out); +} + +/* + * cxl_h_download_facility - This H_DOWNLOAD_CA_FACILITY + * hypervisor call provide platform support for + * downloading a base adapter image to the coherent + * platform facility, and for validating the entire + * image after the download. + * Parameters + * op: operation to perform to the coherent platform function + * Download: operation = 1, the base image in the coherent platform + * facility is first erased, and then + * programmed using the image supplied + * in the scatter/gather list. + * Validate: operation = 2, the base image in the coherent platform + * facility is compared with the image + * supplied in the scatter/gather list. + * list_address: 4K naturally aligned real buffer containing + * scatter/gather list entries. + * num: number of block list entries in the scatter/gather list. + */ +static long cxl_h_download_facility(u64 unit_address, u64 op, + u64 list_address, u64 num, + u64 *out) +{ + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + unsigned int delay, total_delay = 0; + u64 token = 0; + long rc; + + if (*out != 0) + token = *out; + + memset(retbuf, 0, sizeof(retbuf)); + while (1) { + rc = plpar_hcall(H_DOWNLOAD_CA_FACILITY, retbuf, + unit_address, op, list_address, num, + token); + token = retbuf[0]; + if (rc != H_BUSY && !H_IS_LONG_BUSY(rc)) + break; + + if (rc != H_BUSY) { + delay = get_longbusy_msecs(rc); + total_delay += delay; + if (total_delay > CXL_HCALL_TIMEOUT_DOWNLOAD) { + WARN(1, "Warning: Giving up waiting for CXL hcall " + "%#x after %u msec\n", + H_DOWNLOAD_CA_FACILITY, total_delay); + rc = H_BUSY; + break; + } + msleep(delay); + } + } + _PRINT_MSG(rc, "cxl_h_download_facility(%#.16llx, %s(%#llx, %#llx), %#lx): %li\n", + unit_address, OP_STR_DOWNLOAD_ADAPTER(op), list_address, num, retbuf[0], rc); + trace_cxl_hcall_download_facility(unit_address, OP_STR_DOWNLOAD_ADAPTER(op), list_address, num, retbuf[0], rc); + + switch (rc) { + case H_SUCCESS: /* The operation is completed for the coherent platform facility */ + return 0; + case H_PARAMETER: /* An incorrect parameter was supplied */ + case H_FUNCTION: /* The function is not supported. */ + case H_SG_LIST: /* An block list entry was invalid */ + case H_BAD_DATA: /* Image verification failed */ + return -EINVAL; + case H_AUTHORITY: /* The partition does not have authority to perform this hcall */ + case H_RESOURCE: /* The function has page table mappings for MMIO */ + case H_HARDWARE: /* A hardware event prevented the attach operation */ + case H_STATE: /* The coherent platform facility is not in a valid state */ + case H_BUSY: + return -EBUSY; + case H_CONTINUE: + *out = retbuf[0]; + return 1; /* More data is needed for the complete image */ + default: + WARN(1, "Unexpected return code: %lx", rc); + return -EINVAL; + } +} + +/* + * cxl_h_download_adapter_image - Download the base image to the coherent + * platform facility. + */ +long cxl_h_download_adapter_image(u64 unit_address, + u64 list_address, u64 num, + u64 *out) +{ + return cxl_h_download_facility(unit_address, + H_DOWNLOAD_CA_FACILITY_DOWNLOAD, + list_address, num, out); +} + +/* + * cxl_h_validate_adapter_image - Validate the base image in the coherent + * platform facility. + */ +long cxl_h_validate_adapter_image(u64 unit_address, + u64 list_address, u64 num, + u64 *out) +{ + return cxl_h_download_facility(unit_address, + H_DOWNLOAD_CA_FACILITY_VALIDATE, + list_address, num, out); +} diff --git a/drivers/misc/cxl/hcalls.h b/drivers/misc/cxl/hcalls.h new file mode 100644 index 000000000..d200465dc --- /dev/null +++ b/drivers/misc/cxl/hcalls.h @@ -0,0 +1,200 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright 2015 IBM Corp. + */ + +#ifndef _HCALLS_H +#define _HCALLS_H + +#include <linux/types.h> +#include <asm/byteorder.h> +#include <asm/hvcall.h> +#include "cxl.h" + +#define SG_BUFFER_SIZE 4096 +#define SG_MAX_ENTRIES 256 + +struct sg_list { + u64 phys_addr; + u64 len; +}; + +/* + * This is straight out of PAPR, but replacing some of the compound fields with + * a single field, where they were identical to the register layout. + * + * The 'flags' parameter regroups the various bit-fields + */ +#define CXL_PE_CSRP_VALID (1ULL << 63) +#define CXL_PE_PROBLEM_STATE (1ULL << 62) +#define CXL_PE_SECONDARY_SEGMENT_TBL_SRCH (1ULL << 61) +#define CXL_PE_TAGS_ACTIVE (1ULL << 60) +#define CXL_PE_USER_STATE (1ULL << 59) +#define CXL_PE_TRANSLATION_ENABLED (1ULL << 58) +#define CXL_PE_64_BIT (1ULL << 57) +#define CXL_PE_PRIVILEGED_PROCESS (1ULL << 56) + +#define CXL_PROCESS_ELEMENT_VERSION 1 +struct cxl_process_element_hcall { + __be64 version; + __be64 flags; + u8 reserved0[12]; + __be32 pslVirtualIsn; + u8 applicationVirtualIsnBitmap[256]; + u8 reserved1[144]; + struct cxl_process_element_common common; + u8 reserved4[12]; +} __packed; + +#define H_STATE_NORMAL 1 +#define H_STATE_DISABLE 2 +#define H_STATE_TEMP_UNAVAILABLE 3 +#define H_STATE_PERM_UNAVAILABLE 4 + +/* NOTE: element must be a logical real address, and must be pinned */ +long cxl_h_attach_process(u64 unit_address, struct cxl_process_element_hcall *element, + u64 *process_token, u64 *mmio_addr, u64 *mmio_size); + +/** + * cxl_h_detach_process - Detach a process element from a coherent + * platform function. + */ +long cxl_h_detach_process(u64 unit_address, u64 process_token); + +/** + * cxl_h_reset_afu - Perform a reset to the coherent platform function. + */ +long cxl_h_reset_afu(u64 unit_address); + +/** + * cxl_h_suspend_process - Suspend a process from being executed + * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when + * process was attached. + */ +long cxl_h_suspend_process(u64 unit_address, u64 process_token); + +/** + * cxl_h_resume_process - Resume a process to be executed + * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when + * process was attached. + */ +long cxl_h_resume_process(u64 unit_address, u64 process_token); + +/** + * cxl_h_read_error_state - Reads the error state of the coherent + * platform function. + * R4 contains the error state + */ +long cxl_h_read_error_state(u64 unit_address, u64 *state); + +/** + * cxl_h_get_afu_err - collect the AFU error buffer + * Parameter1 = byte offset into error buffer to retrieve, valid values + * are between 0 and (ibm,error-buffer-size - 1) + * Parameter2 = 4K aligned real address of error buffer, to be filled in + * Parameter3 = length of error buffer, valid values are 4K or less + */ +long cxl_h_get_afu_err(u64 unit_address, u64 offset, u64 buf_address, u64 len); + +/** + * cxl_h_get_config - collect configuration record for the + * coherent platform function + * Parameter1 = # of configuration record to retrieve, valid values are + * between 0 and (ibm,#config-records - 1) + * Parameter2 = byte offset into configuration record to retrieve, + * valid values are between 0 and (ibm,config-record-size - 1) + * Parameter3 = 4K aligned real address of configuration record buffer, + * to be filled in + * Parameter4 = length of configuration buffer, valid values are 4K or less + */ +long cxl_h_get_config(u64 unit_address, u64 cr_num, u64 offset, + u64 buf_address, u64 len); + +/** + * cxl_h_terminate_process - Terminate the process before completion + * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when + * process was attached. + */ +long cxl_h_terminate_process(u64 unit_address, u64 process_token); + +/** + * cxl_h_collect_vpd - Collect VPD for the coherent platform function. + * Parameter1 = # of VPD record to retrieve, valid values are between 0 + * and (ibm,#config-records - 1). + * Parameter2 = 4K naturally aligned real buffer containing block + * list entries + * Parameter3 = number of block list entries in the block list, valid + * values are between 0 and 256 + */ +long cxl_h_collect_vpd(u64 unit_address, u64 record, u64 list_address, + u64 num, u64 *out); + +/** + * cxl_h_get_fn_error_interrupt - Read the function-wide error data based on an interrupt + */ +long cxl_h_get_fn_error_interrupt(u64 unit_address, u64 *reg); + +/** + * cxl_h_ack_fn_error_interrupt - Acknowledge function-wide error data + * based on an interrupt + * Parameter1 = value to write to the function-wide error interrupt register + */ +long cxl_h_ack_fn_error_interrupt(u64 unit_address, u64 value); + +/** + * cxl_h_get_error_log - Retrieve the Platform Log ID (PLID) of + * an error log + */ +long cxl_h_get_error_log(u64 unit_address, u64 value); + +/** + * cxl_h_collect_int_info - Collect interrupt info about a coherent + * platform function after an interrupt occurred. + */ +long cxl_h_collect_int_info(u64 unit_address, u64 process_token, + struct cxl_irq_info *info); + +/** + * cxl_h_control_faults - Control the operation of a coherent platform + * function after a fault occurs. + * + * Parameters + * control-mask: value to control the faults + * looks like PSL_TFC_An shifted >> 32 + * reset-mask: mask to control reset of function faults + * Set reset_mask = 1 to reset PSL errors + */ +long cxl_h_control_faults(u64 unit_address, u64 process_token, + u64 control_mask, u64 reset_mask); + +/** + * cxl_h_reset_adapter - Perform a reset to the coherent platform facility. + */ +long cxl_h_reset_adapter(u64 unit_address); + +/** + * cxl_h_collect_vpd - Collect VPD for the coherent platform function. + * Parameter1 = 4K naturally aligned real buffer containing block + * list entries + * Parameter2 = number of block list entries in the block list, valid + * values are between 0 and 256 + */ +long cxl_h_collect_vpd_adapter(u64 unit_address, u64 list_address, + u64 num, u64 *out); + +/** + * cxl_h_download_adapter_image - Download the base image to the coherent + * platform facility. + */ +long cxl_h_download_adapter_image(u64 unit_address, + u64 list_address, u64 num, + u64 *out); + +/** + * cxl_h_validate_adapter_image - Validate the base image in the coherent + * platform facility. + */ +long cxl_h_validate_adapter_image(u64 unit_address, + u64 list_address, u64 num, + u64 *out); +#endif /* _HCALLS_H */ diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c new file mode 100644 index 000000000..2e4dcfebf --- /dev/null +++ b/drivers/misc/cxl/irq.c @@ -0,0 +1,450 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright 2014 IBM Corp. + */ + +#include <linux/interrupt.h> +#include <linux/workqueue.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/slab.h> +#include <linux/pid.h> +#include <asm/cputable.h> +#include <misc/cxl-base.h> + +#include "cxl.h" +#include "trace.h" + +static int afu_irq_range_start(void) +{ + if (cpu_has_feature(CPU_FTR_HVMODE)) + return 1; + return 0; +} + +static irqreturn_t schedule_cxl_fault(struct cxl_context *ctx, u64 dsisr, u64 dar) +{ + ctx->dsisr = dsisr; + ctx->dar = dar; + schedule_work(&ctx->fault_work); + return IRQ_HANDLED; +} + +irqreturn_t cxl_irq_psl9(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info) +{ + u64 dsisr, dar; + + dsisr = irq_info->dsisr; + dar = irq_info->dar; + + trace_cxl_psl9_irq(ctx, irq, dsisr, dar); + + pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq, ctx->pe, dsisr, dar); + + if (dsisr & CXL_PSL9_DSISR_An_TF) { + pr_devel("CXL interrupt: Scheduling translation fault handling for later (pe: %i)\n", ctx->pe); + return schedule_cxl_fault(ctx, dsisr, dar); + } + + if (dsisr & CXL_PSL9_DSISR_An_PE) + return cxl_ops->handle_psl_slice_error(ctx, dsisr, + irq_info->errstat); + if (dsisr & CXL_PSL9_DSISR_An_AE) { + pr_devel("CXL interrupt: AFU Error 0x%016llx\n", irq_info->afu_err); + + if (ctx->pending_afu_err) { + /* + * This shouldn't happen - the PSL treats these errors + * as fatal and will have reset the AFU, so there's not + * much point buffering multiple AFU errors. + * OTOH if we DO ever see a storm of these come in it's + * probably best that we log them somewhere: + */ + dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error undelivered to pe %i: 0x%016llx\n", + ctx->pe, irq_info->afu_err); + } else { + spin_lock(&ctx->lock); + ctx->afu_err = irq_info->afu_err; + ctx->pending_afu_err = 1; + spin_unlock(&ctx->lock); + + wake_up_all(&ctx->wq); + } + + cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_A, 0); + return IRQ_HANDLED; + } + if (dsisr & CXL_PSL9_DSISR_An_OC) + pr_devel("CXL interrupt: OS Context Warning\n"); + + WARN(1, "Unhandled CXL PSL IRQ\n"); + return IRQ_HANDLED; +} + +irqreturn_t cxl_irq_psl8(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info) +{ + u64 dsisr, dar; + + dsisr = irq_info->dsisr; + dar = irq_info->dar; + + trace_cxl_psl_irq(ctx, irq, dsisr, dar); + + pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq, ctx->pe, dsisr, dar); + + if (dsisr & CXL_PSL_DSISR_An_DS) { + /* + * We don't inherently need to sleep to handle this, but we do + * need to get a ref to the task's mm, which we can't do from + * irq context without the potential for a deadlock since it + * takes the task_lock. An alternate option would be to keep a + * reference to the task's mm the entire time it has cxl open, + * but to do that we need to solve the issue where we hold a + * ref to the mm, but the mm can hold a ref to the fd after an + * mmap preventing anything from being cleaned up. + */ + pr_devel("Scheduling segment miss handling for later pe: %i\n", ctx->pe); + return schedule_cxl_fault(ctx, dsisr, dar); + } + + if (dsisr & CXL_PSL_DSISR_An_M) + pr_devel("CXL interrupt: PTE not found\n"); + if (dsisr & CXL_PSL_DSISR_An_P) + pr_devel("CXL interrupt: Storage protection violation\n"); + if (dsisr & CXL_PSL_DSISR_An_A) + pr_devel("CXL interrupt: AFU lock access to write through or cache inhibited storage\n"); + if (dsisr & CXL_PSL_DSISR_An_S) + pr_devel("CXL interrupt: Access was afu_wr or afu_zero\n"); + if (dsisr & CXL_PSL_DSISR_An_K) + pr_devel("CXL interrupt: Access not permitted by virtual page class key protection\n"); + + if (dsisr & CXL_PSL_DSISR_An_DM) { + /* + * In some cases we might be able to handle the fault + * immediately if hash_page would succeed, but we still need + * the task's mm, which as above we can't get without a lock + */ + pr_devel("Scheduling page fault handling for later pe: %i\n", ctx->pe); + return schedule_cxl_fault(ctx, dsisr, dar); + } + if (dsisr & CXL_PSL_DSISR_An_ST) + WARN(1, "CXL interrupt: Segment Table PTE not found\n"); + if (dsisr & CXL_PSL_DSISR_An_UR) + pr_devel("CXL interrupt: AURP PTE not found\n"); + if (dsisr & CXL_PSL_DSISR_An_PE) + return cxl_ops->handle_psl_slice_error(ctx, dsisr, + irq_info->errstat); + if (dsisr & CXL_PSL_DSISR_An_AE) { + pr_devel("CXL interrupt: AFU Error 0x%016llx\n", irq_info->afu_err); + + if (ctx->pending_afu_err) { + /* + * This shouldn't happen - the PSL treats these errors + * as fatal and will have reset the AFU, so there's not + * much point buffering multiple AFU errors. + * OTOH if we DO ever see a storm of these come in it's + * probably best that we log them somewhere: + */ + dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error " + "undelivered to pe %i: 0x%016llx\n", + ctx->pe, irq_info->afu_err); + } else { + spin_lock(&ctx->lock); + ctx->afu_err = irq_info->afu_err; + ctx->pending_afu_err = true; + spin_unlock(&ctx->lock); + + wake_up_all(&ctx->wq); + } + + cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_A, 0); + return IRQ_HANDLED; + } + if (dsisr & CXL_PSL_DSISR_An_OC) + pr_devel("CXL interrupt: OS Context Warning\n"); + + WARN(1, "Unhandled CXL PSL IRQ\n"); + return IRQ_HANDLED; +} + +static irqreturn_t cxl_irq_afu(int irq, void *data) +{ + struct cxl_context *ctx = data; + irq_hw_number_t hwirq = irqd_to_hwirq(irq_get_irq_data(irq)); + int irq_off, afu_irq = 0; + __u16 range; + int r; + + /* + * Look for the interrupt number. + * On bare-metal, we know range 0 only contains the PSL + * interrupt so we could start counting at range 1 and initialize + * afu_irq at 1. + * In a guest, range 0 also contains AFU interrupts, so it must + * be counted for. Therefore we initialize afu_irq at 0 to take into + * account the PSL interrupt. + * + * For code-readability, it just seems easier to go over all + * the ranges on bare-metal and guest. The end result is the same. + */ + for (r = 0; r < CXL_IRQ_RANGES; r++) { + irq_off = hwirq - ctx->irqs.offset[r]; + range = ctx->irqs.range[r]; + if (irq_off >= 0 && irq_off < range) { + afu_irq += irq_off; + break; + } + afu_irq += range; + } + if (unlikely(r >= CXL_IRQ_RANGES)) { + WARN(1, "Received AFU IRQ out of range for pe %i (virq %i hwirq %lx)\n", + ctx->pe, irq, hwirq); + return IRQ_HANDLED; + } + + trace_cxl_afu_irq(ctx, afu_irq, irq, hwirq); + pr_devel("Received AFU interrupt %i for pe: %i (virq %i hwirq %lx)\n", + afu_irq, ctx->pe, irq, hwirq); + + if (unlikely(!ctx->irq_bitmap)) { + WARN(1, "Received AFU IRQ for context with no IRQ bitmap\n"); + return IRQ_HANDLED; + } + spin_lock(&ctx->lock); + set_bit(afu_irq - 1, ctx->irq_bitmap); + ctx->pending_irq = true; + spin_unlock(&ctx->lock); + + wake_up_all(&ctx->wq); + + return IRQ_HANDLED; +} + +unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq, + irq_handler_t handler, void *cookie, const char *name) +{ + unsigned int virq; + int result; + + /* IRQ Domain? */ + virq = irq_create_mapping(NULL, hwirq); + if (!virq) { + dev_warn(&adapter->dev, "cxl_map_irq: irq_create_mapping failed\n"); + return 0; + } + + if (cxl_ops->setup_irq) + cxl_ops->setup_irq(adapter, hwirq, virq); + + pr_devel("hwirq %#lx mapped to virq %u\n", hwirq, virq); + + result = request_irq(virq, handler, 0, name, cookie); + if (result) { + dev_warn(&adapter->dev, "cxl_map_irq: request_irq failed: %i\n", result); + return 0; + } + + return virq; +} + +void cxl_unmap_irq(unsigned int virq, void *cookie) +{ + free_irq(virq, cookie); +} + +int cxl_register_one_irq(struct cxl *adapter, + irq_handler_t handler, + void *cookie, + irq_hw_number_t *dest_hwirq, + unsigned int *dest_virq, + const char *name) +{ + int hwirq, virq; + + if ((hwirq = cxl_ops->alloc_one_irq(adapter)) < 0) + return hwirq; + + if (!(virq = cxl_map_irq(adapter, hwirq, handler, cookie, name))) + goto err; + + *dest_hwirq = hwirq; + *dest_virq = virq; + + return 0; + +err: + cxl_ops->release_one_irq(adapter, hwirq); + return -ENOMEM; +} + +void afu_irq_name_free(struct cxl_context *ctx) +{ + struct cxl_irq_name *irq_name, *tmp; + + list_for_each_entry_safe(irq_name, tmp, &ctx->irq_names, list) { + kfree(irq_name->name); + list_del(&irq_name->list); + kfree(irq_name); + } +} + +int afu_allocate_irqs(struct cxl_context *ctx, u32 count) +{ + int rc, r, i, j = 1; + struct cxl_irq_name *irq_name; + int alloc_count; + + /* + * In native mode, range 0 is reserved for the multiplexed + * PSL interrupt. It has been allocated when the AFU was initialized. + * + * In a guest, the PSL interrupt is not mutliplexed, but per-context, + * and is the first interrupt from range 0. It still needs to be + * allocated, so bump the count by one. + */ + if (cpu_has_feature(CPU_FTR_HVMODE)) + alloc_count = count; + else + alloc_count = count + 1; + + if ((rc = cxl_ops->alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter, + alloc_count))) + return rc; + + if (cpu_has_feature(CPU_FTR_HVMODE)) { + /* Multiplexed PSL Interrupt */ + ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq; + ctx->irqs.range[0] = 1; + } + + ctx->irq_count = count; + ctx->irq_bitmap = kcalloc(BITS_TO_LONGS(count), + sizeof(*ctx->irq_bitmap), GFP_KERNEL); + if (!ctx->irq_bitmap) + goto out; + + /* + * Allocate names first. If any fail, bail out before allocating + * actual hardware IRQs. + */ + for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) { + for (i = 0; i < ctx->irqs.range[r]; i++) { + irq_name = kmalloc(sizeof(struct cxl_irq_name), + GFP_KERNEL); + if (!irq_name) + goto out; + irq_name->name = kasprintf(GFP_KERNEL, "cxl-%s-pe%i-%i", + dev_name(&ctx->afu->dev), + ctx->pe, j); + if (!irq_name->name) { + kfree(irq_name); + goto out; + } + /* Add to tail so next look get the correct order */ + list_add_tail(&irq_name->list, &ctx->irq_names); + j++; + } + } + return 0; + +out: + cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter); + bitmap_free(ctx->irq_bitmap); + afu_irq_name_free(ctx); + return -ENOMEM; +} + +static void afu_register_hwirqs(struct cxl_context *ctx) +{ + irq_hw_number_t hwirq; + struct cxl_irq_name *irq_name; + int r, i; + irqreturn_t (*handler)(int irq, void *data); + + /* We've allocated all memory now, so let's do the irq allocations */ + irq_name = list_first_entry(&ctx->irq_names, struct cxl_irq_name, list); + for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) { + hwirq = ctx->irqs.offset[r]; + for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { + if (r == 0 && i == 0) + /* + * The very first interrupt of range 0 is + * always the PSL interrupt, but we only + * need to connect a handler for guests, + * because there's one PSL interrupt per + * context. + * On bare-metal, the PSL interrupt is + * multiplexed and was setup when the AFU + * was configured. + */ + handler = cxl_ops->psl_interrupt; + else + handler = cxl_irq_afu; + cxl_map_irq(ctx->afu->adapter, hwirq, handler, ctx, + irq_name->name); + irq_name = list_next_entry(irq_name, list); + } + } +} + +int afu_register_irqs(struct cxl_context *ctx, u32 count) +{ + int rc; + + rc = afu_allocate_irqs(ctx, count); + if (rc) + return rc; + + afu_register_hwirqs(ctx); + return 0; +} + +void afu_release_irqs(struct cxl_context *ctx, void *cookie) +{ + irq_hw_number_t hwirq; + unsigned int virq; + int r, i; + + for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) { + hwirq = ctx->irqs.offset[r]; + for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { + virq = irq_find_mapping(NULL, hwirq); + if (virq) + cxl_unmap_irq(virq, cookie); + } + } + + afu_irq_name_free(ctx); + cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter); + + ctx->irq_count = 0; +} + +void cxl_afu_decode_psl_serr(struct cxl_afu *afu, u64 serr) +{ + dev_crit(&afu->dev, + "PSL Slice error received. Check AFU for root cause.\n"); + dev_crit(&afu->dev, "PSL_SERR_An: 0x%016llx\n", serr); + if (serr & CXL_PSL_SERR_An_afuto) + dev_crit(&afu->dev, "AFU MMIO Timeout\n"); + if (serr & CXL_PSL_SERR_An_afudis) + dev_crit(&afu->dev, + "MMIO targeted Accelerator that was not enabled\n"); + if (serr & CXL_PSL_SERR_An_afuov) + dev_crit(&afu->dev, "AFU CTAG Overflow\n"); + if (serr & CXL_PSL_SERR_An_badsrc) + dev_crit(&afu->dev, "Bad Interrupt Source\n"); + if (serr & CXL_PSL_SERR_An_badctx) + dev_crit(&afu->dev, "Bad Context Handle\n"); + if (serr & CXL_PSL_SERR_An_llcmdis) + dev_crit(&afu->dev, "LLCMD to Disabled AFU\n"); + if (serr & CXL_PSL_SERR_An_llcmdto) + dev_crit(&afu->dev, "LLCMD Timeout to AFU\n"); + if (serr & CXL_PSL_SERR_An_afupar) + dev_crit(&afu->dev, "AFU MMIO Parity Error\n"); + if (serr & CXL_PSL_SERR_An_afudup) + dev_crit(&afu->dev, "AFU MMIO Duplicate CTAG Error\n"); + if (serr & CXL_PSL_SERR_An_AE) + dev_crit(&afu->dev, + "AFU asserted JDONE with JERROR in AFU Directed Mode\n"); +} diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c new file mode 100644 index 000000000..43b312d06 --- /dev/null +++ b/drivers/misc/cxl/main.c @@ -0,0 +1,382 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright 2014 IBM Corp. + */ + +#include <linux/spinlock.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/mutex.h> +#include <linux/init.h> +#include <linux/list.h> +#include <linux/mm.h> +#include <linux/of.h> +#include <linux/slab.h> +#include <linux/idr.h> +#include <linux/pci.h> +#include <linux/sched/task.h> + +#include <asm/cputable.h> +#include <asm/mmu.h> +#include <misc/cxl-base.h> + +#include "cxl.h" +#include "trace.h" + +static DEFINE_SPINLOCK(adapter_idr_lock); +static DEFINE_IDR(cxl_adapter_idr); + +uint cxl_verbose; +module_param_named(verbose, cxl_verbose, uint, 0600); +MODULE_PARM_DESC(verbose, "Enable verbose dmesg output"); + +const struct cxl_backend_ops *cxl_ops; + +int cxl_afu_slbia(struct cxl_afu *afu) +{ + unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); + + pr_devel("cxl_afu_slbia issuing SLBIA command\n"); + cxl_p2n_write(afu, CXL_SLBIA_An, CXL_TLB_SLB_IQ_ALL); + while (cxl_p2n_read(afu, CXL_SLBIA_An) & CXL_TLB_SLB_P) { + if (time_after_eq(jiffies, timeout)) { + dev_warn(&afu->dev, "WARNING: CXL AFU SLBIA timed out!\n"); + return -EBUSY; + } + /* If the adapter has gone down, we can assume that we + * will PERST it and that will invalidate everything. + */ + if (!cxl_ops->link_ok(afu->adapter, afu)) + return -EIO; + cpu_relax(); + } + return 0; +} + +static inline void _cxl_slbia(struct cxl_context *ctx, struct mm_struct *mm) +{ + unsigned long flags; + + if (ctx->mm != mm) + return; + + pr_devel("%s matched mm - card: %i afu: %i pe: %i\n", __func__, + ctx->afu->adapter->adapter_num, ctx->afu->slice, ctx->pe); + + spin_lock_irqsave(&ctx->sste_lock, flags); + trace_cxl_slbia(ctx); + memset(ctx->sstp, 0, ctx->sst_size); + spin_unlock_irqrestore(&ctx->sste_lock, flags); + mb(); + cxl_afu_slbia(ctx->afu); +} + +static inline void cxl_slbia_core(struct mm_struct *mm) +{ + struct cxl *adapter; + struct cxl_afu *afu; + struct cxl_context *ctx; + int card, slice, id; + + pr_devel("%s called\n", __func__); + + spin_lock(&adapter_idr_lock); + idr_for_each_entry(&cxl_adapter_idr, adapter, card) { + /* XXX: Make this lookup faster with link from mm to ctx */ + spin_lock(&adapter->afu_list_lock); + for (slice = 0; slice < adapter->slices; slice++) { + afu = adapter->afu[slice]; + if (!afu || !afu->enabled) + continue; + rcu_read_lock(); + idr_for_each_entry(&afu->contexts_idr, ctx, id) + _cxl_slbia(ctx, mm); + rcu_read_unlock(); + } + spin_unlock(&adapter->afu_list_lock); + } + spin_unlock(&adapter_idr_lock); +} + +static struct cxl_calls cxl_calls = { + .cxl_slbia = cxl_slbia_core, + .owner = THIS_MODULE, +}; + +int cxl_alloc_sst(struct cxl_context *ctx) +{ + unsigned long vsid; + u64 ea_mask, size, sstp0, sstp1; + + sstp0 = 0; + sstp1 = 0; + + ctx->sst_size = PAGE_SIZE; + ctx->sst_lru = 0; + ctx->sstp = (struct cxl_sste *)get_zeroed_page(GFP_KERNEL); + if (!ctx->sstp) { + pr_err("cxl_alloc_sst: Unable to allocate segment table\n"); + return -ENOMEM; + } + pr_devel("SSTP allocated at 0x%p\n", ctx->sstp); + + vsid = get_kernel_vsid((u64)ctx->sstp, mmu_kernel_ssize) << 12; + + sstp0 |= (u64)mmu_kernel_ssize << CXL_SSTP0_An_B_SHIFT; + sstp0 |= (SLB_VSID_KERNEL | mmu_psize_defs[mmu_linear_psize].sllp) << 50; + + size = (((u64)ctx->sst_size >> 8) - 1) << CXL_SSTP0_An_SegTableSize_SHIFT; + if (unlikely(size & ~CXL_SSTP0_An_SegTableSize_MASK)) { + WARN(1, "Impossible segment table size\n"); + return -EINVAL; + } + sstp0 |= size; + + if (mmu_kernel_ssize == MMU_SEGSIZE_256M) + ea_mask = 0xfffff00ULL; + else + ea_mask = 0xffffffff00ULL; + + sstp0 |= vsid >> (50-14); /* Top 14 bits of VSID */ + sstp1 |= (vsid << (64-(50-14))) & ~ea_mask; + sstp1 |= (u64)ctx->sstp & ea_mask; + sstp1 |= CXL_SSTP1_An_V; + + pr_devel("Looked up %#llx: slbfee. %#llx (ssize: %x, vsid: %#lx), copied to SSTP0: %#llx, SSTP1: %#llx\n", + (u64)ctx->sstp, (u64)ctx->sstp & ESID_MASK, mmu_kernel_ssize, vsid, sstp0, sstp1); + + /* Store calculated sstp hardware points for use later */ + ctx->sstp0 = sstp0; + ctx->sstp1 = sstp1; + + return 0; +} + +/* print buffer content as integers when debugging */ +void cxl_dump_debug_buffer(void *buf, size_t buf_len) +{ +#ifdef DEBUG + int i, *ptr; + + /* + * We want to regroup up to 4 integers per line, which means they + * need to be in the same pr_devel() statement + */ + ptr = (int *) buf; + for (i = 0; i * 4 < buf_len; i += 4) { + if ((i + 3) * 4 < buf_len) + pr_devel("%.8x %.8x %.8x %.8x\n", ptr[i], ptr[i + 1], + ptr[i + 2], ptr[i + 3]); + else if ((i + 2) * 4 < buf_len) + pr_devel("%.8x %.8x %.8x\n", ptr[i], ptr[i + 1], + ptr[i + 2]); + else if ((i + 1) * 4 < buf_len) + pr_devel("%.8x %.8x\n", ptr[i], ptr[i + 1]); + else + pr_devel("%.8x\n", ptr[i]); + } +#endif /* DEBUG */ +} + +/* Find a CXL adapter by it's number and increase it's refcount */ +struct cxl *get_cxl_adapter(int num) +{ + struct cxl *adapter; + + spin_lock(&adapter_idr_lock); + if ((adapter = idr_find(&cxl_adapter_idr, num))) + get_device(&adapter->dev); + spin_unlock(&adapter_idr_lock); + + return adapter; +} + +static int cxl_alloc_adapter_nr(struct cxl *adapter) +{ + int i; + + idr_preload(GFP_KERNEL); + spin_lock(&adapter_idr_lock); + i = idr_alloc(&cxl_adapter_idr, adapter, 0, 0, GFP_NOWAIT); + spin_unlock(&adapter_idr_lock); + idr_preload_end(); + if (i < 0) + return i; + + adapter->adapter_num = i; + + return 0; +} + +void cxl_remove_adapter_nr(struct cxl *adapter) +{ + idr_remove(&cxl_adapter_idr, adapter->adapter_num); +} + +struct cxl *cxl_alloc_adapter(void) +{ + struct cxl *adapter; + + if (!(adapter = kzalloc(sizeof(struct cxl), GFP_KERNEL))) + return NULL; + + spin_lock_init(&adapter->afu_list_lock); + + if (cxl_alloc_adapter_nr(adapter)) + goto err1; + + if (dev_set_name(&adapter->dev, "card%i", adapter->adapter_num)) + goto err2; + + /* start with context lock taken */ + atomic_set(&adapter->contexts_num, -1); + + return adapter; +err2: + cxl_remove_adapter_nr(adapter); +err1: + kfree(adapter); + return NULL; +} + +struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice) +{ + struct cxl_afu *afu; + + if (!(afu = kzalloc(sizeof(struct cxl_afu), GFP_KERNEL))) + return NULL; + + afu->adapter = adapter; + afu->dev.parent = &adapter->dev; + afu->dev.release = cxl_ops->release_afu; + afu->slice = slice; + idr_init(&afu->contexts_idr); + mutex_init(&afu->contexts_lock); + spin_lock_init(&afu->afu_cntl_lock); + atomic_set(&afu->configured_state, -1); + afu->prefault_mode = CXL_PREFAULT_NONE; + afu->irqs_max = afu->adapter->user_irqs; + + return afu; +} + +int cxl_afu_select_best_mode(struct cxl_afu *afu) +{ + if (afu->modes_supported & CXL_MODE_DIRECTED) + return cxl_ops->afu_activate_mode(afu, CXL_MODE_DIRECTED); + + if (afu->modes_supported & CXL_MODE_DEDICATED) + return cxl_ops->afu_activate_mode(afu, CXL_MODE_DEDICATED); + + dev_warn(&afu->dev, "No supported programming modes available\n"); + /* We don't fail this so the user can inspect sysfs */ + return 0; +} + +int cxl_adapter_context_get(struct cxl *adapter) +{ + int rc; + + rc = atomic_inc_unless_negative(&adapter->contexts_num); + return rc ? 0 : -EBUSY; +} + +void cxl_adapter_context_put(struct cxl *adapter) +{ + atomic_dec_if_positive(&adapter->contexts_num); +} + +int cxl_adapter_context_lock(struct cxl *adapter) +{ + int rc; + /* no active contexts -> contexts_num == 0 */ + rc = atomic_cmpxchg(&adapter->contexts_num, 0, -1); + return rc ? -EBUSY : 0; +} + +void cxl_adapter_context_unlock(struct cxl *adapter) +{ + int val = atomic_cmpxchg(&adapter->contexts_num, -1, 0); + + /* + * contexts lock taken -> contexts_num == -1 + * If not true then show a warning and force reset the lock. + * This will happen when context_unlock was requested without + * doing a context_lock. + */ + if (val != -1) { + atomic_set(&adapter->contexts_num, 0); + WARN(1, "Adapter context unlocked with %d active contexts", + val); + } +} + +static int __init init_cxl(void) +{ + int rc = 0; + + if (!tlbie_capable) + return -EINVAL; + + if ((rc = cxl_file_init())) + return rc; + + cxl_debugfs_init(); + + /* + * we don't register the callback on P9. slb callack is only + * used for the PSL8 MMU and CX4. + */ + if (cxl_is_power8()) { + rc = register_cxl_calls(&cxl_calls); + if (rc) + goto err; + } + + if (cpu_has_feature(CPU_FTR_HVMODE)) { + cxl_ops = &cxl_native_ops; + rc = pci_register_driver(&cxl_pci_driver); + } +#ifdef CONFIG_PPC_PSERIES + else { + cxl_ops = &cxl_guest_ops; + rc = platform_driver_register(&cxl_of_driver); + } +#endif + if (rc) + goto err1; + + return 0; +err1: + if (cxl_is_power8()) + unregister_cxl_calls(&cxl_calls); +err: + cxl_debugfs_exit(); + cxl_file_exit(); + + return rc; +} + +static void exit_cxl(void) +{ + if (cpu_has_feature(CPU_FTR_HVMODE)) + pci_unregister_driver(&cxl_pci_driver); +#ifdef CONFIG_PPC_PSERIES + else + platform_driver_unregister(&cxl_of_driver); +#endif + + cxl_debugfs_exit(); + cxl_file_exit(); + if (cxl_is_power8()) + unregister_cxl_calls(&cxl_calls); + idr_destroy(&cxl_adapter_idr); +} + +module_init(init_cxl); +module_exit(exit_cxl); + +MODULE_DESCRIPTION("IBM Coherent Accelerator"); +MODULE_AUTHOR("Ian Munsie <imunsie@au1.ibm.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c new file mode 100644 index 000000000..1a7f22836 --- /dev/null +++ b/drivers/misc/cxl/native.c @@ -0,0 +1,1596 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright 2014 IBM Corp. + */ + +#include <linux/spinlock.h> +#include <linux/sched.h> +#include <linux/sched/clock.h> +#include <linux/slab.h> +#include <linux/mutex.h> +#include <linux/mm.h> +#include <linux/uaccess.h> +#include <linux/delay.h> +#include <asm/synch.h> +#include <asm/switch_to.h> +#include <misc/cxl-base.h> + +#include "cxl.h" +#include "trace.h" + +static int afu_control(struct cxl_afu *afu, u64 command, u64 clear, + u64 result, u64 mask, bool enabled) +{ + u64 AFU_Cntl; + unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); + int rc = 0; + + spin_lock(&afu->afu_cntl_lock); + pr_devel("AFU command starting: %llx\n", command); + + trace_cxl_afu_ctrl(afu, command); + + AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); + cxl_p2n_write(afu, CXL_AFU_Cntl_An, (AFU_Cntl & ~clear) | command); + + AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); + while ((AFU_Cntl & mask) != result) { + if (time_after_eq(jiffies, timeout)) { + dev_warn(&afu->dev, "WARNING: AFU control timed out!\n"); + rc = -EBUSY; + goto out; + } + + if (!cxl_ops->link_ok(afu->adapter, afu)) { + afu->enabled = enabled; + rc = -EIO; + goto out; + } + + pr_devel_ratelimited("AFU control... (0x%016llx)\n", + AFU_Cntl | command); + cpu_relax(); + AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); + } + + if (AFU_Cntl & CXL_AFU_Cntl_An_RA) { + /* + * Workaround for a bug in the XSL used in the Mellanox CX4 + * that fails to clear the RA bit after an AFU reset, + * preventing subsequent AFU resets from working. + */ + cxl_p2n_write(afu, CXL_AFU_Cntl_An, AFU_Cntl & ~CXL_AFU_Cntl_An_RA); + } + + pr_devel("AFU command complete: %llx\n", command); + afu->enabled = enabled; +out: + trace_cxl_afu_ctrl_done(afu, command, rc); + spin_unlock(&afu->afu_cntl_lock); + + return rc; +} + +static int afu_enable(struct cxl_afu *afu) +{ + pr_devel("AFU enable request\n"); + + return afu_control(afu, CXL_AFU_Cntl_An_E, 0, + CXL_AFU_Cntl_An_ES_Enabled, + CXL_AFU_Cntl_An_ES_MASK, true); +} + +int cxl_afu_disable(struct cxl_afu *afu) +{ + pr_devel("AFU disable request\n"); + + return afu_control(afu, 0, CXL_AFU_Cntl_An_E, + CXL_AFU_Cntl_An_ES_Disabled, + CXL_AFU_Cntl_An_ES_MASK, false); +} + +/* This will disable as well as reset */ +static int native_afu_reset(struct cxl_afu *afu) +{ + int rc; + u64 serr; + + pr_devel("AFU reset request\n"); + + rc = afu_control(afu, CXL_AFU_Cntl_An_RA, 0, + CXL_AFU_Cntl_An_RS_Complete | CXL_AFU_Cntl_An_ES_Disabled, + CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK, + false); + + /* + * Re-enable any masked interrupts when the AFU is not + * activated to avoid side effects after attaching a process + * in dedicated mode. + */ + if (afu->current_mode == 0) { + serr = cxl_p1n_read(afu, CXL_PSL_SERR_An); + serr &= ~CXL_PSL_SERR_An_IRQ_MASKS; + cxl_p1n_write(afu, CXL_PSL_SERR_An, serr); + } + + return rc; +} + +static int native_afu_check_and_enable(struct cxl_afu *afu) +{ + if (!cxl_ops->link_ok(afu->adapter, afu)) { + WARN(1, "Refusing to enable afu while link down!\n"); + return -EIO; + } + if (afu->enabled) + return 0; + return afu_enable(afu); +} + +int cxl_psl_purge(struct cxl_afu *afu) +{ + u64 PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An); + u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); + u64 dsisr, dar; + u64 start, end; + u64 trans_fault = 0x0ULL; + unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); + int rc = 0; + + trace_cxl_psl_ctrl(afu, CXL_PSL_SCNTL_An_Pc); + + pr_devel("PSL purge request\n"); + + if (cxl_is_power8()) + trans_fault = CXL_PSL_DSISR_TRANS; + if (cxl_is_power9()) + trans_fault = CXL_PSL9_DSISR_An_TF; + + if (!cxl_ops->link_ok(afu->adapter, afu)) { + dev_warn(&afu->dev, "PSL Purge called with link down, ignoring\n"); + rc = -EIO; + goto out; + } + + if ((AFU_Cntl & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) { + WARN(1, "psl_purge request while AFU not disabled!\n"); + cxl_afu_disable(afu); + } + + cxl_p1n_write(afu, CXL_PSL_SCNTL_An, + PSL_CNTL | CXL_PSL_SCNTL_An_Pc); + start = local_clock(); + PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An); + while ((PSL_CNTL & CXL_PSL_SCNTL_An_Ps_MASK) + == CXL_PSL_SCNTL_An_Ps_Pending) { + if (time_after_eq(jiffies, timeout)) { + dev_warn(&afu->dev, "WARNING: PSL Purge timed out!\n"); + rc = -EBUSY; + goto out; + } + if (!cxl_ops->link_ok(afu->adapter, afu)) { + rc = -EIO; + goto out; + } + + dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An); + pr_devel_ratelimited("PSL purging... PSL_CNTL: 0x%016llx PSL_DSISR: 0x%016llx\n", + PSL_CNTL, dsisr); + + if (dsisr & trans_fault) { + dar = cxl_p2n_read(afu, CXL_PSL_DAR_An); + dev_notice(&afu->dev, "PSL purge terminating pending translation, DSISR: 0x%016llx, DAR: 0x%016llx\n", + dsisr, dar); + cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE); + } else if (dsisr) { + dev_notice(&afu->dev, "PSL purge acknowledging pending non-translation fault, DSISR: 0x%016llx\n", + dsisr); + cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A); + } else { + cpu_relax(); + } + PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An); + } + end = local_clock(); + pr_devel("PSL purged in %lld ns\n", end - start); + + cxl_p1n_write(afu, CXL_PSL_SCNTL_An, + PSL_CNTL & ~CXL_PSL_SCNTL_An_Pc); +out: + trace_cxl_psl_ctrl_done(afu, CXL_PSL_SCNTL_An_Pc, rc); + return rc; +} + +static int spa_max_procs(int spa_size) +{ + /* + * From the CAIA: + * end_of_SPA_area = SPA_Base + ((n+4) * 128) + (( ((n*8) + 127) >> 7) * 128) + 255 + * Most of that junk is really just an overly-complicated way of saying + * the last 256 bytes are __aligned(128), so it's really: + * end_of_SPA_area = end_of_PSL_queue_area + __aligned(128) 255 + * and + * end_of_PSL_queue_area = SPA_Base + ((n+4) * 128) + (n*8) - 1 + * so + * sizeof(SPA) = ((n+4) * 128) + (n*8) + __aligned(128) 256 + * Ignore the alignment (which is safe in this case as long as we are + * careful with our rounding) and solve for n: + */ + return ((spa_size / 8) - 96) / 17; +} + +static int cxl_alloc_spa(struct cxl_afu *afu, int mode) +{ + unsigned spa_size; + + /* Work out how many pages to allocate */ + afu->native->spa_order = -1; + do { + afu->native->spa_order++; + spa_size = (1 << afu->native->spa_order) * PAGE_SIZE; + + if (spa_size > 0x100000) { + dev_warn(&afu->dev, "num_of_processes too large for the SPA, limiting to %i (0x%x)\n", + afu->native->spa_max_procs, afu->native->spa_size); + if (mode != CXL_MODE_DEDICATED) + afu->num_procs = afu->native->spa_max_procs; + break; + } + + afu->native->spa_size = spa_size; + afu->native->spa_max_procs = spa_max_procs(afu->native->spa_size); + } while (afu->native->spa_max_procs < afu->num_procs); + + if (!(afu->native->spa = (struct cxl_process_element *) + __get_free_pages(GFP_KERNEL | __GFP_ZERO, afu->native->spa_order))) { + pr_err("cxl_alloc_spa: Unable to allocate scheduled process area\n"); + return -ENOMEM; + } + pr_devel("spa pages: %i afu->spa_max_procs: %i afu->num_procs: %i\n", + 1<<afu->native->spa_order, afu->native->spa_max_procs, afu->num_procs); + + return 0; +} + +static void attach_spa(struct cxl_afu *afu) +{ + u64 spap; + + afu->native->sw_command_status = (__be64 *)((char *)afu->native->spa + + ((afu->native->spa_max_procs + 3) * 128)); + + spap = virt_to_phys(afu->native->spa) & CXL_PSL_SPAP_Addr; + spap |= ((afu->native->spa_size >> (12 - CXL_PSL_SPAP_Size_Shift)) - 1) & CXL_PSL_SPAP_Size; + spap |= CXL_PSL_SPAP_V; + pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n", + afu->native->spa, afu->native->spa_max_procs, + afu->native->sw_command_status, spap); + cxl_p1n_write(afu, CXL_PSL_SPAP_An, spap); +} + +static inline void detach_spa(struct cxl_afu *afu) +{ + cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0); +} + +void cxl_release_spa(struct cxl_afu *afu) +{ + if (afu->native->spa) { + free_pages((unsigned long) afu->native->spa, + afu->native->spa_order); + afu->native->spa = NULL; + } +} + +/* + * Invalidation of all ERAT entries is no longer required by CAIA2. Use + * only for debug. + */ +int cxl_invalidate_all_psl9(struct cxl *adapter) +{ + unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); + u64 ierat; + + pr_devel("CXL adapter - invalidation of all ERAT entries\n"); + + /* Invalidates all ERAT entries for Radix or HPT */ + ierat = CXL_XSL9_IERAT_IALL; + if (radix_enabled()) + ierat |= CXL_XSL9_IERAT_INVR; + cxl_p1_write(adapter, CXL_XSL9_IERAT, ierat); + + while (cxl_p1_read(adapter, CXL_XSL9_IERAT) & CXL_XSL9_IERAT_IINPROG) { + if (time_after_eq(jiffies, timeout)) { + dev_warn(&adapter->dev, + "WARNING: CXL adapter invalidation of all ERAT entries timed out!\n"); + return -EBUSY; + } + if (!cxl_ops->link_ok(adapter, NULL)) + return -EIO; + cpu_relax(); + } + return 0; +} + +int cxl_invalidate_all_psl8(struct cxl *adapter) +{ + unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); + + pr_devel("CXL adapter wide TLBIA & SLBIA\n"); + + cxl_p1_write(adapter, CXL_PSL_AFUSEL, CXL_PSL_AFUSEL_A); + + cxl_p1_write(adapter, CXL_PSL_TLBIA, CXL_TLB_SLB_IQ_ALL); + while (cxl_p1_read(adapter, CXL_PSL_TLBIA) & CXL_TLB_SLB_P) { + if (time_after_eq(jiffies, timeout)) { + dev_warn(&adapter->dev, "WARNING: CXL adapter wide TLBIA timed out!\n"); + return -EBUSY; + } + if (!cxl_ops->link_ok(adapter, NULL)) + return -EIO; + cpu_relax(); + } + + cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_ALL); + while (cxl_p1_read(adapter, CXL_PSL_SLBIA) & CXL_TLB_SLB_P) { + if (time_after_eq(jiffies, timeout)) { + dev_warn(&adapter->dev, "WARNING: CXL adapter wide SLBIA timed out!\n"); + return -EBUSY; + } + if (!cxl_ops->link_ok(adapter, NULL)) + return -EIO; + cpu_relax(); + } + return 0; +} + +int cxl_data_cache_flush(struct cxl *adapter) +{ + u64 reg; + unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); + + /* + * Do a datacache flush only if datacache is available. + * In case of PSL9D datacache absent hence flush operation. + * would timeout. + */ + if (adapter->native->no_data_cache) { + pr_devel("No PSL data cache. Ignoring cache flush req.\n"); + return 0; + } + + pr_devel("Flushing data cache\n"); + reg = cxl_p1_read(adapter, CXL_PSL_Control); + reg |= CXL_PSL_Control_Fr; + cxl_p1_write(adapter, CXL_PSL_Control, reg); + + reg = cxl_p1_read(adapter, CXL_PSL_Control); + while ((reg & CXL_PSL_Control_Fs_MASK) != CXL_PSL_Control_Fs_Complete) { + if (time_after_eq(jiffies, timeout)) { + dev_warn(&adapter->dev, "WARNING: cache flush timed out!\n"); + return -EBUSY; + } + + if (!cxl_ops->link_ok(adapter, NULL)) { + dev_warn(&adapter->dev, "WARNING: link down when flushing cache\n"); + return -EIO; + } + cpu_relax(); + reg = cxl_p1_read(adapter, CXL_PSL_Control); + } + + reg &= ~CXL_PSL_Control_Fr; + cxl_p1_write(adapter, CXL_PSL_Control, reg); + return 0; +} + +static int cxl_write_sstp(struct cxl_afu *afu, u64 sstp0, u64 sstp1) +{ + int rc; + + /* 1. Disable SSTP by writing 0 to SSTP1[V] */ + cxl_p2n_write(afu, CXL_SSTP1_An, 0); + + /* 2. Invalidate all SLB entries */ + if ((rc = cxl_afu_slbia(afu))) + return rc; + + /* 3. Set SSTP0_An */ + cxl_p2n_write(afu, CXL_SSTP0_An, sstp0); + + /* 4. Set SSTP1_An */ + cxl_p2n_write(afu, CXL_SSTP1_An, sstp1); + + return 0; +} + +/* Using per slice version may improve performance here. (ie. SLBIA_An) */ +static void slb_invalid(struct cxl_context *ctx) +{ + struct cxl *adapter = ctx->afu->adapter; + u64 slbia; + + WARN_ON(!mutex_is_locked(&ctx->afu->native->spa_mutex)); + + cxl_p1_write(adapter, CXL_PSL_LBISEL, + ((u64)be32_to_cpu(ctx->elem->common.pid) << 32) | + be32_to_cpu(ctx->elem->lpid)); + cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_LPIDPID); + + while (1) { + if (!cxl_ops->link_ok(adapter, NULL)) + break; + slbia = cxl_p1_read(adapter, CXL_PSL_SLBIA); + if (!(slbia & CXL_TLB_SLB_P)) + break; + cpu_relax(); + } +} + +static int do_process_element_cmd(struct cxl_context *ctx, + u64 cmd, u64 pe_state) +{ + u64 state; + unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); + int rc = 0; + + trace_cxl_llcmd(ctx, cmd); + + WARN_ON(!ctx->afu->enabled); + + ctx->elem->software_state = cpu_to_be32(pe_state); + smp_wmb(); + *(ctx->afu->native->sw_command_status) = cpu_to_be64(cmd | 0 | ctx->pe); + smp_mb(); + cxl_p1n_write(ctx->afu, CXL_PSL_LLCMD_An, cmd | ctx->pe); + while (1) { + if (time_after_eq(jiffies, timeout)) { + dev_warn(&ctx->afu->dev, "WARNING: Process Element Command timed out!\n"); + rc = -EBUSY; + goto out; + } + if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) { + dev_warn(&ctx->afu->dev, "WARNING: Device link down, aborting Process Element Command!\n"); + rc = -EIO; + goto out; + } + state = be64_to_cpup(ctx->afu->native->sw_command_status); + if (state == ~0ULL) { + pr_err("cxl: Error adding process element to AFU\n"); + rc = -1; + goto out; + } + if ((state & (CXL_SPA_SW_CMD_MASK | CXL_SPA_SW_STATE_MASK | CXL_SPA_SW_LINK_MASK)) == + (cmd | (cmd >> 16) | ctx->pe)) + break; + /* + * The command won't finish in the PSL if there are + * outstanding DSIs. Hence we need to yield here in + * case there are outstanding DSIs that we need to + * service. Tuning possiblity: we could wait for a + * while before sched + */ + schedule(); + + } +out: + trace_cxl_llcmd_done(ctx, cmd, rc); + return rc; +} + +static int add_process_element(struct cxl_context *ctx) +{ + int rc = 0; + + mutex_lock(&ctx->afu->native->spa_mutex); + pr_devel("%s Adding pe: %i started\n", __func__, ctx->pe); + if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_ADD, CXL_PE_SOFTWARE_STATE_V))) + ctx->pe_inserted = true; + pr_devel("%s Adding pe: %i finished\n", __func__, ctx->pe); + mutex_unlock(&ctx->afu->native->spa_mutex); + return rc; +} + +static int terminate_process_element(struct cxl_context *ctx) +{ + int rc = 0; + + /* fast path terminate if it's already invalid */ + if (!(ctx->elem->software_state & cpu_to_be32(CXL_PE_SOFTWARE_STATE_V))) + return rc; + + mutex_lock(&ctx->afu->native->spa_mutex); + pr_devel("%s Terminate pe: %i started\n", __func__, ctx->pe); + /* We could be asked to terminate when the hw is down. That + * should always succeed: it's not running if the hw has gone + * away and is being reset. + */ + if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) + rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_TERMINATE, + CXL_PE_SOFTWARE_STATE_V | CXL_PE_SOFTWARE_STATE_T); + ctx->elem->software_state = 0; /* Remove Valid bit */ + pr_devel("%s Terminate pe: %i finished\n", __func__, ctx->pe); + mutex_unlock(&ctx->afu->native->spa_mutex); + return rc; +} + +static int remove_process_element(struct cxl_context *ctx) +{ + int rc = 0; + + mutex_lock(&ctx->afu->native->spa_mutex); + pr_devel("%s Remove pe: %i started\n", __func__, ctx->pe); + + /* We could be asked to remove when the hw is down. Again, if + * the hw is down, the PE is gone, so we succeed. + */ + if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) + rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_REMOVE, 0); + + if (!rc) + ctx->pe_inserted = false; + if (cxl_is_power8()) + slb_invalid(ctx); + pr_devel("%s Remove pe: %i finished\n", __func__, ctx->pe); + mutex_unlock(&ctx->afu->native->spa_mutex); + + return rc; +} + +void cxl_assign_psn_space(struct cxl_context *ctx) +{ + if (!ctx->afu->pp_size || ctx->master) { + ctx->psn_phys = ctx->afu->psn_phys; + ctx->psn_size = ctx->afu->adapter->ps_size; + } else { + ctx->psn_phys = ctx->afu->psn_phys + + (ctx->afu->native->pp_offset + ctx->afu->pp_size * ctx->pe); + ctx->psn_size = ctx->afu->pp_size; + } +} + +static int activate_afu_directed(struct cxl_afu *afu) +{ + int rc; + + dev_info(&afu->dev, "Activating AFU directed mode\n"); + + afu->num_procs = afu->max_procs_virtualised; + if (afu->native->spa == NULL) { + if (cxl_alloc_spa(afu, CXL_MODE_DIRECTED)) + return -ENOMEM; + } + attach_spa(afu); + + cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_AFU); + if (cxl_is_power8()) + cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL); + cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L); + + afu->current_mode = CXL_MODE_DIRECTED; + + if ((rc = cxl_chardev_m_afu_add(afu))) + return rc; + + if ((rc = cxl_sysfs_afu_m_add(afu))) + goto err; + + if ((rc = cxl_chardev_s_afu_add(afu))) + goto err1; + + return 0; +err1: + cxl_sysfs_afu_m_remove(afu); +err: + cxl_chardev_afu_remove(afu); + return rc; +} + +#ifdef CONFIG_CPU_LITTLE_ENDIAN +#define set_endian(sr) ((sr) |= CXL_PSL_SR_An_LE) +#else +#define set_endian(sr) ((sr) &= ~(CXL_PSL_SR_An_LE)) +#endif + +u64 cxl_calculate_sr(bool master, bool kernel, bool real_mode, bool p9) +{ + u64 sr = 0; + + set_endian(sr); + if (master) + sr |= CXL_PSL_SR_An_MP; + if (mfspr(SPRN_LPCR) & LPCR_TC) + sr |= CXL_PSL_SR_An_TC; + + if (kernel) { + if (!real_mode) + sr |= CXL_PSL_SR_An_R; + sr |= (mfmsr() & MSR_SF) | CXL_PSL_SR_An_HV; + } else { + sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R; + if (radix_enabled()) + sr |= CXL_PSL_SR_An_HV; + else + sr &= ~(CXL_PSL_SR_An_HV); + if (!test_tsk_thread_flag(current, TIF_32BIT)) + sr |= CXL_PSL_SR_An_SF; + } + if (p9) { + if (radix_enabled()) + sr |= CXL_PSL_SR_An_XLAT_ror; + else + sr |= CXL_PSL_SR_An_XLAT_hpt; + } + return sr; +} + +static u64 calculate_sr(struct cxl_context *ctx) +{ + return cxl_calculate_sr(ctx->master, ctx->kernel, false, + cxl_is_power9()); +} + +static void update_ivtes_directed(struct cxl_context *ctx) +{ + bool need_update = (ctx->status == STARTED); + int r; + + if (need_update) { + WARN_ON(terminate_process_element(ctx)); + WARN_ON(remove_process_element(ctx)); + } + + for (r = 0; r < CXL_IRQ_RANGES; r++) { + ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]); + ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]); + } + + /* + * Theoretically we could use the update llcmd, instead of a + * terminate/remove/add (or if an atomic update was required we could + * do a suspend/update/resume), however it seems there might be issues + * with the update llcmd on some cards (including those using an XSL on + * an ASIC) so for now it's safest to go with the commands that are + * known to work. In the future if we come across a situation where the + * card may be performing transactions using the same PE while we are + * doing this update we might need to revisit this. + */ + if (need_update) + WARN_ON(add_process_element(ctx)); +} + +static int process_element_entry_psl9(struct cxl_context *ctx, u64 wed, u64 amr) +{ + u32 pid; + int rc; + + cxl_assign_psn_space(ctx); + + ctx->elem->ctxtime = 0; /* disable */ + ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID)); + ctx->elem->haurp = 0; /* disable */ + + if (ctx->kernel) + pid = 0; + else { + if (ctx->mm == NULL) { + pr_devel("%s: unable to get mm for pe=%d pid=%i\n", + __func__, ctx->pe, pid_nr(ctx->pid)); + return -EINVAL; + } + pid = ctx->mm->context.id; + } + + /* Assign a unique TIDR (thread id) for the current thread */ + if (!(ctx->tidr) && (ctx->assign_tidr)) { + rc = set_thread_tidr(current); + if (rc) + return -ENODEV; + ctx->tidr = current->thread.tidr; + pr_devel("%s: current tidr: %d\n", __func__, ctx->tidr); + } + + ctx->elem->common.tid = cpu_to_be32(ctx->tidr); + ctx->elem->common.pid = cpu_to_be32(pid); + + ctx->elem->sr = cpu_to_be64(calculate_sr(ctx)); + + ctx->elem->common.csrp = 0; /* disable */ + + cxl_prefault(ctx, wed); + + /* + * Ensure we have the multiplexed PSL interrupt set up to take faults + * for kernel contexts that may not have allocated any AFU IRQs at all: + */ + if (ctx->irqs.range[0] == 0) { + ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq; + ctx->irqs.range[0] = 1; + } + + ctx->elem->common.amr = cpu_to_be64(amr); + ctx->elem->common.wed = cpu_to_be64(wed); + + return 0; +} + +int cxl_attach_afu_directed_psl9(struct cxl_context *ctx, u64 wed, u64 amr) +{ + int result; + + /* fill the process element entry */ + result = process_element_entry_psl9(ctx, wed, amr); + if (result) + return result; + + update_ivtes_directed(ctx); + + /* first guy needs to enable */ + result = cxl_ops->afu_check_and_enable(ctx->afu); + if (result) + return result; + + return add_process_element(ctx); +} + +int cxl_attach_afu_directed_psl8(struct cxl_context *ctx, u64 wed, u64 amr) +{ + u32 pid; + int result; + + cxl_assign_psn_space(ctx); + + ctx->elem->ctxtime = 0; /* disable */ + ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID)); + ctx->elem->haurp = 0; /* disable */ + ctx->elem->u.sdr = cpu_to_be64(mfspr(SPRN_SDR1)); + + pid = current->pid; + if (ctx->kernel) + pid = 0; + ctx->elem->common.tid = 0; + ctx->elem->common.pid = cpu_to_be32(pid); + + ctx->elem->sr = cpu_to_be64(calculate_sr(ctx)); + + ctx->elem->common.csrp = 0; /* disable */ + ctx->elem->common.u.psl8.aurp0 = 0; /* disable */ + ctx->elem->common.u.psl8.aurp1 = 0; /* disable */ + + cxl_prefault(ctx, wed); + + ctx->elem->common.u.psl8.sstp0 = cpu_to_be64(ctx->sstp0); + ctx->elem->common.u.psl8.sstp1 = cpu_to_be64(ctx->sstp1); + + /* + * Ensure we have the multiplexed PSL interrupt set up to take faults + * for kernel contexts that may not have allocated any AFU IRQs at all: + */ + if (ctx->irqs.range[0] == 0) { + ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq; + ctx->irqs.range[0] = 1; + } + + update_ivtes_directed(ctx); + + ctx->elem->common.amr = cpu_to_be64(amr); + ctx->elem->common.wed = cpu_to_be64(wed); + + /* first guy needs to enable */ + if ((result = cxl_ops->afu_check_and_enable(ctx->afu))) + return result; + + return add_process_element(ctx); +} + +static int deactivate_afu_directed(struct cxl_afu *afu) +{ + dev_info(&afu->dev, "Deactivating AFU directed mode\n"); + + afu->current_mode = 0; + afu->num_procs = 0; + + cxl_sysfs_afu_m_remove(afu); + cxl_chardev_afu_remove(afu); + + /* + * The CAIA section 2.2.1 indicates that the procedure for starting and + * stopping an AFU in AFU directed mode is AFU specific, which is not + * ideal since this code is generic and with one exception has no + * knowledge of the AFU. This is in contrast to the procedure for + * disabling a dedicated process AFU, which is documented to just + * require a reset. The architecture does indicate that both an AFU + * reset and an AFU disable should result in the AFU being disabled and + * we do both followed by a PSL purge for safety. + * + * Notably we used to have some issues with the disable sequence on PSL + * cards, which is why we ended up using this heavy weight procedure in + * the first place, however a bug was discovered that had rendered the + * disable operation ineffective, so it is conceivable that was the + * sole explanation for those difficulties. Careful regression testing + * is recommended if anyone attempts to remove or reorder these + * operations. + * + * The XSL on the Mellanox CX4 behaves a little differently from the + * PSL based cards and will time out an AFU reset if the AFU is still + * enabled. That card is special in that we do have a means to identify + * it from this code, so in that case we skip the reset and just use a + * disable/purge to avoid the timeout and corresponding noise in the + * kernel log. + */ + if (afu->adapter->native->sl_ops->needs_reset_before_disable) + cxl_ops->afu_reset(afu); + cxl_afu_disable(afu); + cxl_psl_purge(afu); + + return 0; +} + +int cxl_activate_dedicated_process_psl9(struct cxl_afu *afu) +{ + dev_info(&afu->dev, "Activating dedicated process mode\n"); + + /* + * If XSL is set to dedicated mode (Set in PSL_SCNTL reg), the + * XSL and AFU are programmed to work with a single context. + * The context information should be configured in the SPA area + * index 0 (so PSL_SPAP must be configured before enabling the + * AFU). + */ + afu->num_procs = 1; + if (afu->native->spa == NULL) { + if (cxl_alloc_spa(afu, CXL_MODE_DEDICATED)) + return -ENOMEM; + } + attach_spa(afu); + + cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_Process); + cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L); + + afu->current_mode = CXL_MODE_DEDICATED; + + return cxl_chardev_d_afu_add(afu); +} + +int cxl_activate_dedicated_process_psl8(struct cxl_afu *afu) +{ + dev_info(&afu->dev, "Activating dedicated process mode\n"); + + cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_Process); + + cxl_p1n_write(afu, CXL_PSL_CtxTime_An, 0); /* disable */ + cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0); /* disable */ + cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL); + cxl_p1n_write(afu, CXL_PSL_LPID_An, mfspr(SPRN_LPID)); + cxl_p1n_write(afu, CXL_HAURP_An, 0); /* disable */ + cxl_p1n_write(afu, CXL_PSL_SDR_An, mfspr(SPRN_SDR1)); + + cxl_p2n_write(afu, CXL_CSRP_An, 0); /* disable */ + cxl_p2n_write(afu, CXL_AURP0_An, 0); /* disable */ + cxl_p2n_write(afu, CXL_AURP1_An, 0); /* disable */ + + afu->current_mode = CXL_MODE_DEDICATED; + afu->num_procs = 1; + + return cxl_chardev_d_afu_add(afu); +} + +void cxl_update_dedicated_ivtes_psl9(struct cxl_context *ctx) +{ + int r; + + for (r = 0; r < CXL_IRQ_RANGES; r++) { + ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]); + ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]); + } +} + +void cxl_update_dedicated_ivtes_psl8(struct cxl_context *ctx) +{ + struct cxl_afu *afu = ctx->afu; + + cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An, + (((u64)ctx->irqs.offset[0] & 0xffff) << 48) | + (((u64)ctx->irqs.offset[1] & 0xffff) << 32) | + (((u64)ctx->irqs.offset[2] & 0xffff) << 16) | + ((u64)ctx->irqs.offset[3] & 0xffff)); + cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, (u64) + (((u64)ctx->irqs.range[0] & 0xffff) << 48) | + (((u64)ctx->irqs.range[1] & 0xffff) << 32) | + (((u64)ctx->irqs.range[2] & 0xffff) << 16) | + ((u64)ctx->irqs.range[3] & 0xffff)); +} + +int cxl_attach_dedicated_process_psl9(struct cxl_context *ctx, u64 wed, u64 amr) +{ + struct cxl_afu *afu = ctx->afu; + int result; + + /* fill the process element entry */ + result = process_element_entry_psl9(ctx, wed, amr); + if (result) + return result; + + if (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes) + afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx); + + ctx->elem->software_state = cpu_to_be32(CXL_PE_SOFTWARE_STATE_V); + /* + * Ideally we should do a wmb() here to make sure the changes to the + * PE are visible to the card before we call afu_enable. + * On ppc64 though all mmios are preceded by a 'sync' instruction hence + * we dont dont need one here. + */ + + result = cxl_ops->afu_reset(afu); + if (result) + return result; + + return afu_enable(afu); +} + +int cxl_attach_dedicated_process_psl8(struct cxl_context *ctx, u64 wed, u64 amr) +{ + struct cxl_afu *afu = ctx->afu; + u64 pid; + int rc; + + pid = (u64)current->pid << 32; + if (ctx->kernel) + pid = 0; + cxl_p2n_write(afu, CXL_PSL_PID_TID_An, pid); + + cxl_p1n_write(afu, CXL_PSL_SR_An, calculate_sr(ctx)); + + if ((rc = cxl_write_sstp(afu, ctx->sstp0, ctx->sstp1))) + return rc; + + cxl_prefault(ctx, wed); + + if (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes) + afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx); + + cxl_p2n_write(afu, CXL_PSL_AMR_An, amr); + + /* master only context for dedicated */ + cxl_assign_psn_space(ctx); + + if ((rc = cxl_ops->afu_reset(afu))) + return rc; + + cxl_p2n_write(afu, CXL_PSL_WED_An, wed); + + return afu_enable(afu); +} + +static int deactivate_dedicated_process(struct cxl_afu *afu) +{ + dev_info(&afu->dev, "Deactivating dedicated process mode\n"); + + afu->current_mode = 0; + afu->num_procs = 0; + + cxl_chardev_afu_remove(afu); + + return 0; +} + +static int native_afu_deactivate_mode(struct cxl_afu *afu, int mode) +{ + if (mode == CXL_MODE_DIRECTED) + return deactivate_afu_directed(afu); + if (mode == CXL_MODE_DEDICATED) + return deactivate_dedicated_process(afu); + return 0; +} + +static int native_afu_activate_mode(struct cxl_afu *afu, int mode) +{ + if (!mode) + return 0; + if (!(mode & afu->modes_supported)) + return -EINVAL; + + if (!cxl_ops->link_ok(afu->adapter, afu)) { + WARN(1, "Device link is down, refusing to activate!\n"); + return -EIO; + } + + if (mode == CXL_MODE_DIRECTED) + return activate_afu_directed(afu); + if ((mode == CXL_MODE_DEDICATED) && + (afu->adapter->native->sl_ops->activate_dedicated_process)) + return afu->adapter->native->sl_ops->activate_dedicated_process(afu); + + return -EINVAL; +} + +static int native_attach_process(struct cxl_context *ctx, bool kernel, + u64 wed, u64 amr) +{ + if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) { + WARN(1, "Device link is down, refusing to attach process!\n"); + return -EIO; + } + + ctx->kernel = kernel; + if ((ctx->afu->current_mode == CXL_MODE_DIRECTED) && + (ctx->afu->adapter->native->sl_ops->attach_afu_directed)) + return ctx->afu->adapter->native->sl_ops->attach_afu_directed(ctx, wed, amr); + + if ((ctx->afu->current_mode == CXL_MODE_DEDICATED) && + (ctx->afu->adapter->native->sl_ops->attach_dedicated_process)) + return ctx->afu->adapter->native->sl_ops->attach_dedicated_process(ctx, wed, amr); + + return -EINVAL; +} + +static inline int detach_process_native_dedicated(struct cxl_context *ctx) +{ + /* + * The CAIA section 2.1.1 indicates that we need to do an AFU reset to + * stop the AFU in dedicated mode (we therefore do not make that + * optional like we do in the afu directed path). It does not indicate + * that we need to do an explicit disable (which should occur + * implicitly as part of the reset) or purge, but we do these as well + * to be on the safe side. + * + * Notably we used to have some issues with the disable sequence + * (before the sequence was spelled out in the architecture) which is + * why we were so heavy weight in the first place, however a bug was + * discovered that had rendered the disable operation ineffective, so + * it is conceivable that was the sole explanation for those + * difficulties. Point is, we should be careful and do some regression + * testing if we ever attempt to remove any part of this procedure. + */ + cxl_ops->afu_reset(ctx->afu); + cxl_afu_disable(ctx->afu); + cxl_psl_purge(ctx->afu); + return 0; +} + +static void native_update_ivtes(struct cxl_context *ctx) +{ + if (ctx->afu->current_mode == CXL_MODE_DIRECTED) + return update_ivtes_directed(ctx); + if ((ctx->afu->current_mode == CXL_MODE_DEDICATED) && + (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes)) + return ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx); + WARN(1, "native_update_ivtes: Bad mode\n"); +} + +static inline int detach_process_native_afu_directed(struct cxl_context *ctx) +{ + if (!ctx->pe_inserted) + return 0; + if (terminate_process_element(ctx)) + return -1; + if (remove_process_element(ctx)) + return -1; + + return 0; +} + +static int native_detach_process(struct cxl_context *ctx) +{ + trace_cxl_detach(ctx); + + if (ctx->afu->current_mode == CXL_MODE_DEDICATED) + return detach_process_native_dedicated(ctx); + + return detach_process_native_afu_directed(ctx); +} + +static int native_get_irq_info(struct cxl_afu *afu, struct cxl_irq_info *info) +{ + /* If the adapter has gone away, we can't get any meaningful + * information. + */ + if (!cxl_ops->link_ok(afu->adapter, afu)) + return -EIO; + + info->dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An); + info->dar = cxl_p2n_read(afu, CXL_PSL_DAR_An); + if (cxl_is_power8()) + info->dsr = cxl_p2n_read(afu, CXL_PSL_DSR_An); + info->afu_err = cxl_p2n_read(afu, CXL_AFU_ERR_An); + info->errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An); + info->proc_handle = 0; + + return 0; +} + +void cxl_native_irq_dump_regs_psl9(struct cxl_context *ctx) +{ + u64 fir1, serr; + + fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL9_FIR1); + + dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1); + if (ctx->afu->adapter->native->sl_ops->register_serr_irq) { + serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An); + cxl_afu_decode_psl_serr(ctx->afu, serr); + } +} + +void cxl_native_irq_dump_regs_psl8(struct cxl_context *ctx) +{ + u64 fir1, fir2, fir_slice, serr, afu_debug; + + fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1); + fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2); + fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An); + afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An); + + dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1); + dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2); + if (ctx->afu->adapter->native->sl_ops->register_serr_irq) { + serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An); + cxl_afu_decode_psl_serr(ctx->afu, serr); + } + dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice); + dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug); +} + +static irqreturn_t native_handle_psl_slice_error(struct cxl_context *ctx, + u64 dsisr, u64 errstat) +{ + + dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat); + + if (ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers) + ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers(ctx); + + if (ctx->afu->adapter->native->sl_ops->debugfs_stop_trace) { + dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n"); + ctx->afu->adapter->native->sl_ops->debugfs_stop_trace(ctx->afu->adapter); + } + + return cxl_ops->ack_irq(ctx, 0, errstat); +} + +static bool cxl_is_translation_fault(struct cxl_afu *afu, u64 dsisr) +{ + if ((cxl_is_power8()) && (dsisr & CXL_PSL_DSISR_TRANS)) + return true; + + if ((cxl_is_power9()) && (dsisr & CXL_PSL9_DSISR_An_TF)) + return true; + + return false; +} + +irqreturn_t cxl_fail_irq_psl(struct cxl_afu *afu, struct cxl_irq_info *irq_info) +{ + if (cxl_is_translation_fault(afu, irq_info->dsisr)) + cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE); + else + cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A); + + return IRQ_HANDLED; +} + +static irqreturn_t native_irq_multiplexed(int irq, void *data) +{ + struct cxl_afu *afu = data; + struct cxl_context *ctx; + struct cxl_irq_info irq_info; + u64 phreg = cxl_p2n_read(afu, CXL_PSL_PEHandle_An); + int ph, ret = IRQ_HANDLED, res; + + /* check if eeh kicked in while the interrupt was in flight */ + if (unlikely(phreg == ~0ULL)) { + dev_warn(&afu->dev, + "Ignoring slice interrupt(%d) due to fenced card", + irq); + return IRQ_HANDLED; + } + /* Mask the pe-handle from register value */ + ph = phreg & 0xffff; + if ((res = native_get_irq_info(afu, &irq_info))) { + WARN(1, "Unable to get CXL IRQ Info: %i\n", res); + if (afu->adapter->native->sl_ops->fail_irq) + return afu->adapter->native->sl_ops->fail_irq(afu, &irq_info); + return ret; + } + + rcu_read_lock(); + ctx = idr_find(&afu->contexts_idr, ph); + if (ctx) { + if (afu->adapter->native->sl_ops->handle_interrupt) + ret = afu->adapter->native->sl_ops->handle_interrupt(irq, ctx, &irq_info); + rcu_read_unlock(); + return ret; + } + rcu_read_unlock(); + + WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %016llx DAR" + " %016llx\n(Possible AFU HW issue - was a term/remove acked" + " with outstanding transactions?)\n", ph, irq_info.dsisr, + irq_info.dar); + if (afu->adapter->native->sl_ops->fail_irq) + ret = afu->adapter->native->sl_ops->fail_irq(afu, &irq_info); + return ret; +} + +static void native_irq_wait(struct cxl_context *ctx) +{ + u64 dsisr; + int timeout = 1000; + int ph; + + /* + * Wait until no further interrupts are presented by the PSL + * for this context. + */ + while (timeout--) { + ph = cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) & 0xffff; + if (ph != ctx->pe) + return; + dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An); + if (cxl_is_power8() && + ((dsisr & CXL_PSL_DSISR_PENDING) == 0)) + return; + if (cxl_is_power9() && + ((dsisr & CXL_PSL9_DSISR_PENDING) == 0)) + return; + /* + * We are waiting for the workqueue to process our + * irq, so need to let that run here. + */ + msleep(1); + } + + dev_warn(&ctx->afu->dev, "WARNING: waiting on DSI for PE %i" + " DSISR %016llx!\n", ph, dsisr); + return; +} + +static irqreturn_t native_slice_irq_err(int irq, void *data) +{ + struct cxl_afu *afu = data; + u64 errstat, serr, afu_error, dsisr; + u64 fir_slice, afu_debug, irq_mask; + + /* + * slice err interrupt is only used with full PSL (no XSL) + */ + serr = cxl_p1n_read(afu, CXL_PSL_SERR_An); + errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An); + afu_error = cxl_p2n_read(afu, CXL_AFU_ERR_An); + dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An); + cxl_afu_decode_psl_serr(afu, serr); + + if (cxl_is_power8()) { + fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An); + afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An); + dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice); + dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug); + } + dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat); + dev_crit(&afu->dev, "AFU_ERR_An: 0x%.16llx\n", afu_error); + dev_crit(&afu->dev, "PSL_DSISR_An: 0x%.16llx\n", dsisr); + + /* mask off the IRQ so it won't retrigger until the AFU is reset */ + irq_mask = (serr & CXL_PSL_SERR_An_IRQS) >> 32; + serr |= irq_mask; + cxl_p1n_write(afu, CXL_PSL_SERR_An, serr); + dev_info(&afu->dev, "Further such interrupts will be masked until the AFU is reset\n"); + + return IRQ_HANDLED; +} + +void cxl_native_err_irq_dump_regs_psl9(struct cxl *adapter) +{ + u64 fir1; + + fir1 = cxl_p1_read(adapter, CXL_PSL9_FIR1); + dev_crit(&adapter->dev, "PSL_FIR: 0x%016llx\n", fir1); +} + +void cxl_native_err_irq_dump_regs_psl8(struct cxl *adapter) +{ + u64 fir1, fir2; + + fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1); + fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2); + dev_crit(&adapter->dev, + "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n", + fir1, fir2); +} + +static irqreturn_t native_irq_err(int irq, void *data) +{ + struct cxl *adapter = data; + u64 err_ivte; + + WARN(1, "CXL ERROR interrupt %i\n", irq); + + err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE); + dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%016llx\n", err_ivte); + + if (adapter->native->sl_ops->debugfs_stop_trace) { + dev_crit(&adapter->dev, "STOPPING CXL TRACE\n"); + adapter->native->sl_ops->debugfs_stop_trace(adapter); + } + + if (adapter->native->sl_ops->err_irq_dump_registers) + adapter->native->sl_ops->err_irq_dump_registers(adapter); + + return IRQ_HANDLED; +} + +int cxl_native_register_psl_err_irq(struct cxl *adapter) +{ + int rc; + + adapter->irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err", + dev_name(&adapter->dev)); + if (!adapter->irq_name) + return -ENOMEM; + + if ((rc = cxl_register_one_irq(adapter, native_irq_err, adapter, + &adapter->native->err_hwirq, + &adapter->native->err_virq, + adapter->irq_name))) { + kfree(adapter->irq_name); + adapter->irq_name = NULL; + return rc; + } + + cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->native->err_hwirq & 0xffff); + + return 0; +} + +void cxl_native_release_psl_err_irq(struct cxl *adapter) +{ + if (adapter->native->err_virq == 0 || + adapter->native->err_virq != + irq_find_mapping(NULL, adapter->native->err_hwirq)) + return; + + cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000); + cxl_unmap_irq(adapter->native->err_virq, adapter); + cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq); + kfree(adapter->irq_name); + adapter->native->err_virq = 0; +} + +int cxl_native_register_serr_irq(struct cxl_afu *afu) +{ + u64 serr; + int rc; + + afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err", + dev_name(&afu->dev)); + if (!afu->err_irq_name) + return -ENOMEM; + + if ((rc = cxl_register_one_irq(afu->adapter, native_slice_irq_err, afu, + &afu->serr_hwirq, + &afu->serr_virq, afu->err_irq_name))) { + kfree(afu->err_irq_name); + afu->err_irq_name = NULL; + return rc; + } + + serr = cxl_p1n_read(afu, CXL_PSL_SERR_An); + if (cxl_is_power8()) + serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff); + if (cxl_is_power9()) { + /* + * By default, all errors are masked. So don't set all masks. + * Slice errors will be transfered. + */ + serr = (serr & ~0xff0000007fffffffULL) | (afu->serr_hwirq & 0xffff); + } + cxl_p1n_write(afu, CXL_PSL_SERR_An, serr); + + return 0; +} + +void cxl_native_release_serr_irq(struct cxl_afu *afu) +{ + if (afu->serr_virq == 0 || + afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq)) + return; + + cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000); + cxl_unmap_irq(afu->serr_virq, afu); + cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq); + kfree(afu->err_irq_name); + afu->serr_virq = 0; +} + +int cxl_native_register_psl_irq(struct cxl_afu *afu) +{ + int rc; + + afu->psl_irq_name = kasprintf(GFP_KERNEL, "cxl-%s", + dev_name(&afu->dev)); + if (!afu->psl_irq_name) + return -ENOMEM; + + if ((rc = cxl_register_one_irq(afu->adapter, native_irq_multiplexed, + afu, &afu->native->psl_hwirq, &afu->native->psl_virq, + afu->psl_irq_name))) { + kfree(afu->psl_irq_name); + afu->psl_irq_name = NULL; + } + return rc; +} + +void cxl_native_release_psl_irq(struct cxl_afu *afu) +{ + if (afu->native->psl_virq == 0 || + afu->native->psl_virq != + irq_find_mapping(NULL, afu->native->psl_hwirq)) + return; + + cxl_unmap_irq(afu->native->psl_virq, afu); + cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq); + kfree(afu->psl_irq_name); + afu->native->psl_virq = 0; +} + +static void recover_psl_err(struct cxl_afu *afu, u64 errstat) +{ + u64 dsisr; + + pr_devel("RECOVERING FROM PSL ERROR... (0x%016llx)\n", errstat); + + /* Clear PSL_DSISR[PE] */ + dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An); + cxl_p2n_write(afu, CXL_PSL_DSISR_An, dsisr & ~CXL_PSL_DSISR_An_PE); + + /* Write 1s to clear error status bits */ + cxl_p2n_write(afu, CXL_PSL_ErrStat_An, errstat); +} + +static int native_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask) +{ + trace_cxl_psl_irq_ack(ctx, tfc); + if (tfc) + cxl_p2n_write(ctx->afu, CXL_PSL_TFC_An, tfc); + if (psl_reset_mask) + recover_psl_err(ctx->afu, psl_reset_mask); + + return 0; +} + +int cxl_check_error(struct cxl_afu *afu) +{ + return (cxl_p1n_read(afu, CXL_PSL_SCNTL_An) == ~0ULL); +} + +static bool native_support_attributes(const char *attr_name, + enum cxl_attrs type) +{ + return true; +} + +static int native_afu_cr_read64(struct cxl_afu *afu, int cr, u64 off, u64 *out) +{ + if (unlikely(!cxl_ops->link_ok(afu->adapter, afu))) + return -EIO; + if (unlikely(off >= afu->crs_len)) + return -ERANGE; + *out = in_le64(afu->native->afu_desc_mmio + afu->crs_offset + + (cr * afu->crs_len) + off); + return 0; +} + +static int native_afu_cr_read32(struct cxl_afu *afu, int cr, u64 off, u32 *out) +{ + if (unlikely(!cxl_ops->link_ok(afu->adapter, afu))) + return -EIO; + if (unlikely(off >= afu->crs_len)) + return -ERANGE; + *out = in_le32(afu->native->afu_desc_mmio + afu->crs_offset + + (cr * afu->crs_len) + off); + return 0; +} + +static int native_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off, u16 *out) +{ + u64 aligned_off = off & ~0x3L; + u32 val; + int rc; + + rc = native_afu_cr_read32(afu, cr, aligned_off, &val); + if (!rc) + *out = (val >> ((off & 0x3) * 8)) & 0xffff; + return rc; +} + +static int native_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off, u8 *out) +{ + u64 aligned_off = off & ~0x3L; + u32 val; + int rc; + + rc = native_afu_cr_read32(afu, cr, aligned_off, &val); + if (!rc) + *out = (val >> ((off & 0x3) * 8)) & 0xff; + return rc; +} + +static int native_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in) +{ + if (unlikely(!cxl_ops->link_ok(afu->adapter, afu))) + return -EIO; + if (unlikely(off >= afu->crs_len)) + return -ERANGE; + out_le32(afu->native->afu_desc_mmio + afu->crs_offset + + (cr * afu->crs_len) + off, in); + return 0; +} + +static int native_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in) +{ + u64 aligned_off = off & ~0x3L; + u32 val32, mask, shift; + int rc; + + rc = native_afu_cr_read32(afu, cr, aligned_off, &val32); + if (rc) + return rc; + shift = (off & 0x3) * 8; + WARN_ON(shift == 24); + mask = 0xffff << shift; + val32 = (val32 & ~mask) | (in << shift); + + rc = native_afu_cr_write32(afu, cr, aligned_off, val32); + return rc; +} + +static int native_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in) +{ + u64 aligned_off = off & ~0x3L; + u32 val32, mask, shift; + int rc; + + rc = native_afu_cr_read32(afu, cr, aligned_off, &val32); + if (rc) + return rc; + shift = (off & 0x3) * 8; + mask = 0xff << shift; + val32 = (val32 & ~mask) | (in << shift); + + rc = native_afu_cr_write32(afu, cr, aligned_off, val32); + return rc; +} + +const struct cxl_backend_ops cxl_native_ops = { + .module = THIS_MODULE, + .adapter_reset = cxl_pci_reset, + .alloc_one_irq = cxl_pci_alloc_one_irq, + .release_one_irq = cxl_pci_release_one_irq, + .alloc_irq_ranges = cxl_pci_alloc_irq_ranges, + .release_irq_ranges = cxl_pci_release_irq_ranges, + .setup_irq = cxl_pci_setup_irq, + .handle_psl_slice_error = native_handle_psl_slice_error, + .psl_interrupt = NULL, + .ack_irq = native_ack_irq, + .irq_wait = native_irq_wait, + .attach_process = native_attach_process, + .detach_process = native_detach_process, + .update_ivtes = native_update_ivtes, + .support_attributes = native_support_attributes, + .link_ok = cxl_adapter_link_ok, + .release_afu = cxl_pci_release_afu, + .afu_read_err_buffer = cxl_pci_afu_read_err_buffer, + .afu_check_and_enable = native_afu_check_and_enable, + .afu_activate_mode = native_afu_activate_mode, + .afu_deactivate_mode = native_afu_deactivate_mode, + .afu_reset = native_afu_reset, + .afu_cr_read8 = native_afu_cr_read8, + .afu_cr_read16 = native_afu_cr_read16, + .afu_cr_read32 = native_afu_cr_read32, + .afu_cr_read64 = native_afu_cr_read64, + .afu_cr_write8 = native_afu_cr_write8, + .afu_cr_write16 = native_afu_cr_write16, + .afu_cr_write32 = native_afu_cr_write32, + .read_adapter_vpd = cxl_pci_read_adapter_vpd, +}; diff --git a/drivers/misc/cxl/of.c b/drivers/misc/cxl/of.c new file mode 100644 index 000000000..1cfecba42 --- /dev/null +++ b/drivers/misc/cxl/of.c @@ -0,0 +1,507 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright 2015 IBM Corp. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/of_address.h> +#include <linux/of_platform.h> + +#include "cxl.h" + + +static const __be32 *read_prop_string(const struct device_node *np, + const char *prop_name) +{ + const __be32 *prop; + + prop = of_get_property(np, prop_name, NULL); + if (cxl_verbose && prop) + pr_info("%s: %s\n", prop_name, (char *) prop); + return prop; +} + +static const __be32 *read_prop_dword(const struct device_node *np, + const char *prop_name, u32 *val) +{ + const __be32 *prop; + + prop = of_get_property(np, prop_name, NULL); + if (prop) + *val = be32_to_cpu(prop[0]); + if (cxl_verbose && prop) + pr_info("%s: %#x (%u)\n", prop_name, *val, *val); + return prop; +} + +static const __be64 *read_prop64_dword(const struct device_node *np, + const char *prop_name, u64 *val) +{ + const __be64 *prop; + + prop = of_get_property(np, prop_name, NULL); + if (prop) + *val = be64_to_cpu(prop[0]); + if (cxl_verbose && prop) + pr_info("%s: %#llx (%llu)\n", prop_name, *val, *val); + return prop; +} + + +static int read_handle(struct device_node *np, u64 *handle) +{ + const __be32 *prop; + u64 size; + + /* Get address and size of the node */ + prop = of_get_address(np, 0, &size, NULL); + if (size) + return -EINVAL; + + /* Helper to read a big number; size is in cells (not bytes) */ + *handle = of_read_number(prop, of_n_addr_cells(np)); + return 0; +} + +static int read_phys_addr(struct device_node *np, char *prop_name, + struct cxl_afu *afu) +{ + int i, len, entry_size, naddr, nsize, type; + u64 addr, size; + const __be32 *prop; + + naddr = of_n_addr_cells(np); + nsize = of_n_size_cells(np); + + prop = of_get_property(np, prop_name, &len); + if (prop) { + entry_size = naddr + nsize; + for (i = 0; i < (len / 4); i += entry_size, prop += entry_size) { + type = be32_to_cpu(prop[0]); + addr = of_read_number(prop, naddr); + size = of_read_number(&prop[naddr], nsize); + switch (type) { + case 0: /* unit address */ + afu->guest->handle = addr; + break; + case 1: /* p2 area */ + afu->guest->p2n_phys += addr; + afu->guest->p2n_size = size; + break; + case 2: /* problem state area */ + afu->psn_phys += addr; + afu->adapter->ps_size = size; + break; + default: + pr_err("Invalid address type %d found in %s property of AFU\n", + type, prop_name); + return -EINVAL; + } + if (cxl_verbose) + pr_info("%s: %#x %#llx (size %#llx)\n", + prop_name, type, addr, size); + } + } + return 0; +} + +static int read_vpd(struct cxl *adapter, struct cxl_afu *afu) +{ + char vpd[256]; + int rc; + size_t len = sizeof(vpd); + + memset(vpd, 0, len); + + if (adapter) + rc = cxl_guest_read_adapter_vpd(adapter, vpd, len); + else + rc = cxl_guest_read_afu_vpd(afu, vpd, len); + + if (rc > 0) { + cxl_dump_debug_buffer(vpd, rc); + rc = 0; + } + return rc; +} + +int cxl_of_read_afu_handle(struct cxl_afu *afu, struct device_node *afu_np) +{ + if (read_handle(afu_np, &afu->guest->handle)) + return -EINVAL; + pr_devel("AFU handle: 0x%.16llx\n", afu->guest->handle); + + return 0; +} + +int cxl_of_read_afu_properties(struct cxl_afu *afu, struct device_node *np) +{ + int i, len, rc; + char *p; + const __be32 *prop; + u16 device_id, vendor_id; + u32 val = 0, class_code; + + /* Properties are read in the same order as listed in PAPR */ + + if (cxl_verbose) { + pr_info("Dump of the 'ibm,coherent-platform-function' node properties:\n"); + + prop = of_get_property(np, "compatible", &len); + i = 0; + while (i < len) { + p = (char *) prop + i; + pr_info("compatible: %s\n", p); + i += strlen(p) + 1; + } + read_prop_string(np, "name"); + } + + rc = read_phys_addr(np, "reg", afu); + if (rc) + return rc; + + rc = read_phys_addr(np, "assigned-addresses", afu); + if (rc) + return rc; + + if (afu->psn_phys == 0) + afu->psa = false; + else + afu->psa = true; + + if (cxl_verbose) { + read_prop_string(np, "ibm,loc-code"); + read_prop_string(np, "device_type"); + } + + read_prop_dword(np, "ibm,#processes", &afu->max_procs_virtualised); + + if (cxl_verbose) { + read_prop_dword(np, "ibm,scratchpad-size", &val); + read_prop_dword(np, "ibm,programmable", &val); + read_prop_string(np, "ibm,phandle"); + read_vpd(NULL, afu); + } + + read_prop_dword(np, "ibm,max-ints-per-process", &afu->guest->max_ints); + afu->irqs_max = afu->guest->max_ints; + + prop = read_prop_dword(np, "ibm,min-ints-per-process", &afu->pp_irqs); + if (prop) { + /* One extra interrupt for the PSL interrupt is already + * included. Remove it now to keep only AFU interrupts and + * match the native case. + */ + afu->pp_irqs--; + } + + if (cxl_verbose) { + read_prop_dword(np, "ibm,max-ints", &val); + read_prop_dword(np, "ibm,vpd-size", &val); + } + + read_prop64_dword(np, "ibm,error-buffer-size", &afu->eb_len); + afu->eb_offset = 0; + + if (cxl_verbose) + read_prop_dword(np, "ibm,config-record-type", &val); + + read_prop64_dword(np, "ibm,config-record-size", &afu->crs_len); + afu->crs_offset = 0; + + read_prop_dword(np, "ibm,#config-records", &afu->crs_num); + + if (cxl_verbose) { + for (i = 0; i < afu->crs_num; i++) { + rc = cxl_ops->afu_cr_read16(afu, i, PCI_DEVICE_ID, + &device_id); + if (!rc) + pr_info("record %d - device-id: %#x\n", + i, device_id); + rc = cxl_ops->afu_cr_read16(afu, i, PCI_VENDOR_ID, + &vendor_id); + if (!rc) + pr_info("record %d - vendor-id: %#x\n", + i, vendor_id); + rc = cxl_ops->afu_cr_read32(afu, i, PCI_CLASS_REVISION, + &class_code); + if (!rc) { + class_code >>= 8; + pr_info("record %d - class-code: %#x\n", + i, class_code); + } + } + + read_prop_dword(np, "ibm,function-number", &val); + read_prop_dword(np, "ibm,privileged-function", &val); + read_prop_dword(np, "vendor-id", &val); + read_prop_dword(np, "device-id", &val); + read_prop_dword(np, "revision-id", &val); + read_prop_dword(np, "class-code", &val); + read_prop_dword(np, "subsystem-vendor-id", &val); + read_prop_dword(np, "subsystem-id", &val); + } + /* + * if "ibm,process-mmio" doesn't exist then per-process mmio is + * not supported + */ + val = 0; + prop = read_prop_dword(np, "ibm,process-mmio", &val); + if (prop && val == 1) + afu->pp_psa = true; + else + afu->pp_psa = false; + + if (cxl_verbose) { + read_prop_dword(np, "ibm,supports-aur", &val); + read_prop_dword(np, "ibm,supports-csrp", &val); + read_prop_dword(np, "ibm,supports-prr", &val); + } + + prop = read_prop_dword(np, "ibm,function-error-interrupt", &val); + if (prop) + afu->serr_hwirq = val; + + pr_devel("AFU handle: %#llx\n", afu->guest->handle); + pr_devel("p2n_phys: %#llx (size %#llx)\n", + afu->guest->p2n_phys, afu->guest->p2n_size); + pr_devel("psn_phys: %#llx (size %#llx)\n", + afu->psn_phys, afu->adapter->ps_size); + pr_devel("Max number of processes virtualised=%i\n", + afu->max_procs_virtualised); + pr_devel("Per-process irqs min=%i, max=%i\n", afu->pp_irqs, + afu->irqs_max); + pr_devel("Slice error interrupt=%#lx\n", afu->serr_hwirq); + + return 0; +} + +static int read_adapter_irq_config(struct cxl *adapter, struct device_node *np) +{ + const __be32 *ranges; + int len, nranges, i; + struct irq_avail *cur; + + ranges = of_get_property(np, "interrupt-ranges", &len); + if (ranges == NULL || len < (2 * sizeof(int))) + return -EINVAL; + + /* + * encoded array of two cells per entry, each cell encoded as + * with encode-int + */ + nranges = len / (2 * sizeof(int)); + if (nranges == 0 || (nranges * 2 * sizeof(int)) != len) + return -EINVAL; + + adapter->guest->irq_avail = kcalloc(nranges, sizeof(struct irq_avail), + GFP_KERNEL); + if (adapter->guest->irq_avail == NULL) + return -ENOMEM; + + adapter->guest->irq_base_offset = be32_to_cpu(ranges[0]); + for (i = 0; i < nranges; i++) { + cur = &adapter->guest->irq_avail[i]; + cur->offset = be32_to_cpu(ranges[i * 2]); + cur->range = be32_to_cpu(ranges[i * 2 + 1]); + cur->bitmap = kcalloc(BITS_TO_LONGS(cur->range), + sizeof(*cur->bitmap), GFP_KERNEL); + if (cur->bitmap == NULL) + goto err; + if (cur->offset < adapter->guest->irq_base_offset) + adapter->guest->irq_base_offset = cur->offset; + if (cxl_verbose) + pr_info("available IRQ range: %#lx-%#lx (%lu)\n", + cur->offset, cur->offset + cur->range - 1, + cur->range); + } + adapter->guest->irq_nranges = nranges; + spin_lock_init(&adapter->guest->irq_alloc_lock); + + return 0; +err: + for (i--; i >= 0; i--) { + cur = &adapter->guest->irq_avail[i]; + kfree(cur->bitmap); + } + kfree(adapter->guest->irq_avail); + adapter->guest->irq_avail = NULL; + return -ENOMEM; +} + +int cxl_of_read_adapter_handle(struct cxl *adapter, struct device_node *np) +{ + if (read_handle(np, &adapter->guest->handle)) + return -EINVAL; + pr_devel("Adapter handle: 0x%.16llx\n", adapter->guest->handle); + + return 0; +} + +int cxl_of_read_adapter_properties(struct cxl *adapter, struct device_node *np) +{ + int rc, len, naddr, i; + char *p; + const __be32 *prop; + u32 val = 0; + + /* Properties are read in the same order as listed in PAPR */ + + naddr = of_n_addr_cells(np); + + if (cxl_verbose) { + pr_info("Dump of the 'ibm,coherent-platform-facility' node properties:\n"); + + read_prop_dword(np, "#address-cells", &val); + read_prop_dword(np, "#size-cells", &val); + + prop = of_get_property(np, "compatible", &len); + i = 0; + while (i < len) { + p = (char *) prop + i; + pr_info("compatible: %s\n", p); + i += strlen(p) + 1; + } + read_prop_string(np, "name"); + read_prop_string(np, "model"); + + prop = of_get_property(np, "reg", NULL); + if (prop) { + pr_info("reg: addr:%#llx size:%#x\n", + of_read_number(prop, naddr), + be32_to_cpu(prop[naddr])); + } + + read_prop_string(np, "ibm,loc-code"); + } + + if ((rc = read_adapter_irq_config(adapter, np))) + return rc; + + if (cxl_verbose) { + read_prop_string(np, "device_type"); + read_prop_string(np, "ibm,phandle"); + } + + prop = read_prop_dword(np, "ibm,caia-version", &val); + if (prop) { + adapter->caia_major = (val & 0xFF00) >> 8; + adapter->caia_minor = val & 0xFF; + } + + prop = read_prop_dword(np, "ibm,psl-revision", &val); + if (prop) + adapter->psl_rev = val; + + prop = read_prop_string(np, "status"); + if (prop) { + adapter->guest->status = kasprintf(GFP_KERNEL, "%s", (char *) prop); + if (adapter->guest->status == NULL) + return -ENOMEM; + } + + prop = read_prop_dword(np, "vendor-id", &val); + if (prop) + adapter->guest->vendor = val; + + prop = read_prop_dword(np, "device-id", &val); + if (prop) + adapter->guest->device = val; + + if (cxl_verbose) { + read_prop_dword(np, "ibm,privileged-facility", &val); + read_prop_dword(np, "revision-id", &val); + read_prop_dword(np, "class-code", &val); + } + + prop = read_prop_dword(np, "subsystem-vendor-id", &val); + if (prop) + adapter->guest->subsystem_vendor = val; + + prop = read_prop_dword(np, "subsystem-id", &val); + if (prop) + adapter->guest->subsystem = val; + + if (cxl_verbose) + read_vpd(adapter, NULL); + + return 0; +} + +static int cxl_of_remove(struct platform_device *pdev) +{ + struct cxl *adapter; + int afu; + + adapter = dev_get_drvdata(&pdev->dev); + for (afu = 0; afu < adapter->slices; afu++) + cxl_guest_remove_afu(adapter->afu[afu]); + + cxl_guest_remove_adapter(adapter); + return 0; +} + +static void cxl_of_shutdown(struct platform_device *pdev) +{ + cxl_of_remove(pdev); +} + +int cxl_of_probe(struct platform_device *pdev) +{ + struct device_node *np = NULL; + struct device_node *afu_np = NULL; + struct cxl *adapter = NULL; + int ret; + int slice = 0, slice_ok = 0; + + pr_devel("in %s\n", __func__); + + np = pdev->dev.of_node; + if (np == NULL) + return -ENODEV; + + /* init adapter */ + adapter = cxl_guest_init_adapter(np, pdev); + if (IS_ERR(adapter)) { + dev_err(&pdev->dev, "guest_init_adapter failed: %li\n", PTR_ERR(adapter)); + return PTR_ERR(adapter); + } + + /* init afu */ + for_each_child_of_node(np, afu_np) { + if ((ret = cxl_guest_init_afu(adapter, slice, afu_np))) + dev_err(&pdev->dev, "AFU %i failed to initialise: %i\n", + slice, ret); + else + slice_ok++; + slice++; + } + + if (slice_ok == 0) { + dev_info(&pdev->dev, "No active AFU"); + adapter->slices = 0; + } + + return 0; +} + +static const struct of_device_id cxl_of_match[] = { + { .compatible = "ibm,coherent-platform-facility",}, + {}, +}; +MODULE_DEVICE_TABLE(of, cxl_of_match); + +struct platform_driver cxl_of_driver = { + .driver = { + .name = "cxl_of", + .of_match_table = cxl_of_match, + .owner = THIS_MODULE + }, + .probe = cxl_of_probe, + .remove = cxl_of_remove, + .shutdown = cxl_of_shutdown, +}; diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c new file mode 100644 index 000000000..d183836d8 --- /dev/null +++ b/drivers/misc/cxl/pci.c @@ -0,0 +1,2103 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright 2014 IBM Corp. + */ + +#include <linux/pci_regs.h> +#include <linux/pci_ids.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/sort.h> +#include <linux/pci.h> +#include <linux/of.h> +#include <linux/delay.h> +#include <asm/opal.h> +#include <asm/msi_bitmap.h> +#include <asm/pnv-pci.h> +#include <asm/io.h> +#include <asm/reg.h> + +#include "cxl.h" +#include <misc/cxl.h> + + +#define CXL_PCI_VSEC_ID 0x1280 +#define CXL_VSEC_MIN_SIZE 0x80 + +#define CXL_READ_VSEC_LENGTH(dev, vsec, dest) \ + { \ + pci_read_config_word(dev, vsec + 0x6, dest); \ + *dest >>= 4; \ + } +#define CXL_READ_VSEC_NAFUS(dev, vsec, dest) \ + pci_read_config_byte(dev, vsec + 0x8, dest) + +#define CXL_READ_VSEC_STATUS(dev, vsec, dest) \ + pci_read_config_byte(dev, vsec + 0x9, dest) +#define CXL_STATUS_SECOND_PORT 0x80 +#define CXL_STATUS_MSI_X_FULL 0x40 +#define CXL_STATUS_MSI_X_SINGLE 0x20 +#define CXL_STATUS_FLASH_RW 0x08 +#define CXL_STATUS_FLASH_RO 0x04 +#define CXL_STATUS_LOADABLE_AFU 0x02 +#define CXL_STATUS_LOADABLE_PSL 0x01 +/* If we see these features we won't try to use the card */ +#define CXL_UNSUPPORTED_FEATURES \ + (CXL_STATUS_MSI_X_FULL | CXL_STATUS_MSI_X_SINGLE) + +#define CXL_READ_VSEC_MODE_CONTROL(dev, vsec, dest) \ + pci_read_config_byte(dev, vsec + 0xa, dest) +#define CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val) \ + pci_write_config_byte(dev, vsec + 0xa, val) +#define CXL_VSEC_PROTOCOL_MASK 0xe0 +#define CXL_VSEC_PROTOCOL_1024TB 0x80 +#define CXL_VSEC_PROTOCOL_512TB 0x40 +#define CXL_VSEC_PROTOCOL_256TB 0x20 /* Power 8/9 uses this */ +#define CXL_VSEC_PROTOCOL_ENABLE 0x01 + +#define CXL_READ_VSEC_PSL_REVISION(dev, vsec, dest) \ + pci_read_config_word(dev, vsec + 0xc, dest) +#define CXL_READ_VSEC_CAIA_MINOR(dev, vsec, dest) \ + pci_read_config_byte(dev, vsec + 0xe, dest) +#define CXL_READ_VSEC_CAIA_MAJOR(dev, vsec, dest) \ + pci_read_config_byte(dev, vsec + 0xf, dest) +#define CXL_READ_VSEC_BASE_IMAGE(dev, vsec, dest) \ + pci_read_config_word(dev, vsec + 0x10, dest) + +#define CXL_READ_VSEC_IMAGE_STATE(dev, vsec, dest) \ + pci_read_config_byte(dev, vsec + 0x13, dest) +#define CXL_WRITE_VSEC_IMAGE_STATE(dev, vsec, val) \ + pci_write_config_byte(dev, vsec + 0x13, val) +#define CXL_VSEC_USER_IMAGE_LOADED 0x80 /* RO */ +#define CXL_VSEC_PERST_LOADS_IMAGE 0x20 /* RW */ +#define CXL_VSEC_PERST_SELECT_USER 0x10 /* RW */ + +#define CXL_READ_VSEC_AFU_DESC_OFF(dev, vsec, dest) \ + pci_read_config_dword(dev, vsec + 0x20, dest) +#define CXL_READ_VSEC_AFU_DESC_SIZE(dev, vsec, dest) \ + pci_read_config_dword(dev, vsec + 0x24, dest) +#define CXL_READ_VSEC_PS_OFF(dev, vsec, dest) \ + pci_read_config_dword(dev, vsec + 0x28, dest) +#define CXL_READ_VSEC_PS_SIZE(dev, vsec, dest) \ + pci_read_config_dword(dev, vsec + 0x2c, dest) + + +/* This works a little different than the p1/p2 register accesses to make it + * easier to pull out individual fields */ +#define AFUD_READ(afu, off) in_be64(afu->native->afu_desc_mmio + off) +#define AFUD_READ_LE(afu, off) in_le64(afu->native->afu_desc_mmio + off) +#define EXTRACT_PPC_BIT(val, bit) (!!(val & PPC_BIT(bit))) +#define EXTRACT_PPC_BITS(val, bs, be) ((val & PPC_BITMASK(bs, be)) >> PPC_BITLSHIFT(be)) + +#define AFUD_READ_INFO(afu) AFUD_READ(afu, 0x0) +#define AFUD_NUM_INTS_PER_PROC(val) EXTRACT_PPC_BITS(val, 0, 15) +#define AFUD_NUM_PROCS(val) EXTRACT_PPC_BITS(val, 16, 31) +#define AFUD_NUM_CRS(val) EXTRACT_PPC_BITS(val, 32, 47) +#define AFUD_MULTIMODE(val) EXTRACT_PPC_BIT(val, 48) +#define AFUD_PUSH_BLOCK_TRANSFER(val) EXTRACT_PPC_BIT(val, 55) +#define AFUD_DEDICATED_PROCESS(val) EXTRACT_PPC_BIT(val, 59) +#define AFUD_AFU_DIRECTED(val) EXTRACT_PPC_BIT(val, 61) +#define AFUD_TIME_SLICED(val) EXTRACT_PPC_BIT(val, 63) +#define AFUD_READ_CR(afu) AFUD_READ(afu, 0x20) +#define AFUD_CR_LEN(val) EXTRACT_PPC_BITS(val, 8, 63) +#define AFUD_READ_CR_OFF(afu) AFUD_READ(afu, 0x28) +#define AFUD_READ_PPPSA(afu) AFUD_READ(afu, 0x30) +#define AFUD_PPPSA_PP(val) EXTRACT_PPC_BIT(val, 6) +#define AFUD_PPPSA_PSA(val) EXTRACT_PPC_BIT(val, 7) +#define AFUD_PPPSA_LEN(val) EXTRACT_PPC_BITS(val, 8, 63) +#define AFUD_READ_PPPSA_OFF(afu) AFUD_READ(afu, 0x38) +#define AFUD_READ_EB(afu) AFUD_READ(afu, 0x40) +#define AFUD_EB_LEN(val) EXTRACT_PPC_BITS(val, 8, 63) +#define AFUD_READ_EB_OFF(afu) AFUD_READ(afu, 0x48) + +static const struct pci_device_id cxl_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0477), }, + { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x044b), }, + { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x04cf), }, + { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0601), }, + { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0623), }, + { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0628), }, + { } +}; +MODULE_DEVICE_TABLE(pci, cxl_pci_tbl); + + +/* + * Mostly using these wrappers to avoid confusion: + * priv 1 is BAR2, while priv 2 is BAR0 + */ +static inline resource_size_t p1_base(struct pci_dev *dev) +{ + return pci_resource_start(dev, 2); +} + +static inline resource_size_t p1_size(struct pci_dev *dev) +{ + return pci_resource_len(dev, 2); +} + +static inline resource_size_t p2_base(struct pci_dev *dev) +{ + return pci_resource_start(dev, 0); +} + +static inline resource_size_t p2_size(struct pci_dev *dev) +{ + return pci_resource_len(dev, 0); +} + +static int find_cxl_vsec(struct pci_dev *dev) +{ + int vsec = 0; + u16 val; + + while ((vsec = pci_find_next_ext_capability(dev, vsec, PCI_EXT_CAP_ID_VNDR))) { + pci_read_config_word(dev, vsec + 0x4, &val); + if (val == CXL_PCI_VSEC_ID) + return vsec; + } + return 0; + +} + +static void dump_cxl_config_space(struct pci_dev *dev) +{ + int vsec; + u32 val; + + dev_info(&dev->dev, "dump_cxl_config_space\n"); + + pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &val); + dev_info(&dev->dev, "BAR0: %#.8x\n", val); + pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, &val); + dev_info(&dev->dev, "BAR1: %#.8x\n", val); + pci_read_config_dword(dev, PCI_BASE_ADDRESS_2, &val); + dev_info(&dev->dev, "BAR2: %#.8x\n", val); + pci_read_config_dword(dev, PCI_BASE_ADDRESS_3, &val); + dev_info(&dev->dev, "BAR3: %#.8x\n", val); + pci_read_config_dword(dev, PCI_BASE_ADDRESS_4, &val); + dev_info(&dev->dev, "BAR4: %#.8x\n", val); + pci_read_config_dword(dev, PCI_BASE_ADDRESS_5, &val); + dev_info(&dev->dev, "BAR5: %#.8x\n", val); + + dev_info(&dev->dev, "p1 regs: %#llx, len: %#llx\n", + p1_base(dev), p1_size(dev)); + dev_info(&dev->dev, "p2 regs: %#llx, len: %#llx\n", + p2_base(dev), p2_size(dev)); + dev_info(&dev->dev, "BAR 4/5: %#llx, len: %#llx\n", + pci_resource_start(dev, 4), pci_resource_len(dev, 4)); + + if (!(vsec = find_cxl_vsec(dev))) + return; + +#define show_reg(name, what) \ + dev_info(&dev->dev, "cxl vsec: %30s: %#x\n", name, what) + + pci_read_config_dword(dev, vsec + 0x0, &val); + show_reg("Cap ID", (val >> 0) & 0xffff); + show_reg("Cap Ver", (val >> 16) & 0xf); + show_reg("Next Cap Ptr", (val >> 20) & 0xfff); + pci_read_config_dword(dev, vsec + 0x4, &val); + show_reg("VSEC ID", (val >> 0) & 0xffff); + show_reg("VSEC Rev", (val >> 16) & 0xf); + show_reg("VSEC Length", (val >> 20) & 0xfff); + pci_read_config_dword(dev, vsec + 0x8, &val); + show_reg("Num AFUs", (val >> 0) & 0xff); + show_reg("Status", (val >> 8) & 0xff); + show_reg("Mode Control", (val >> 16) & 0xff); + show_reg("Reserved", (val >> 24) & 0xff); + pci_read_config_dword(dev, vsec + 0xc, &val); + show_reg("PSL Rev", (val >> 0) & 0xffff); + show_reg("CAIA Ver", (val >> 16) & 0xffff); + pci_read_config_dword(dev, vsec + 0x10, &val); + show_reg("Base Image Rev", (val >> 0) & 0xffff); + show_reg("Reserved", (val >> 16) & 0x0fff); + show_reg("Image Control", (val >> 28) & 0x3); + show_reg("Reserved", (val >> 30) & 0x1); + show_reg("Image Loaded", (val >> 31) & 0x1); + + pci_read_config_dword(dev, vsec + 0x14, &val); + show_reg("Reserved", val); + pci_read_config_dword(dev, vsec + 0x18, &val); + show_reg("Reserved", val); + pci_read_config_dword(dev, vsec + 0x1c, &val); + show_reg("Reserved", val); + + pci_read_config_dword(dev, vsec + 0x20, &val); + show_reg("AFU Descriptor Offset", val); + pci_read_config_dword(dev, vsec + 0x24, &val); + show_reg("AFU Descriptor Size", val); + pci_read_config_dword(dev, vsec + 0x28, &val); + show_reg("Problem State Offset", val); + pci_read_config_dword(dev, vsec + 0x2c, &val); + show_reg("Problem State Size", val); + + pci_read_config_dword(dev, vsec + 0x30, &val); + show_reg("Reserved", val); + pci_read_config_dword(dev, vsec + 0x34, &val); + show_reg("Reserved", val); + pci_read_config_dword(dev, vsec + 0x38, &val); + show_reg("Reserved", val); + pci_read_config_dword(dev, vsec + 0x3c, &val); + show_reg("Reserved", val); + + pci_read_config_dword(dev, vsec + 0x40, &val); + show_reg("PSL Programming Port", val); + pci_read_config_dword(dev, vsec + 0x44, &val); + show_reg("PSL Programming Control", val); + + pci_read_config_dword(dev, vsec + 0x48, &val); + show_reg("Reserved", val); + pci_read_config_dword(dev, vsec + 0x4c, &val); + show_reg("Reserved", val); + + pci_read_config_dword(dev, vsec + 0x50, &val); + show_reg("Flash Address Register", val); + pci_read_config_dword(dev, vsec + 0x54, &val); + show_reg("Flash Size Register", val); + pci_read_config_dword(dev, vsec + 0x58, &val); + show_reg("Flash Status/Control Register", val); + pci_read_config_dword(dev, vsec + 0x58, &val); + show_reg("Flash Data Port", val); + +#undef show_reg +} + +static void dump_afu_descriptor(struct cxl_afu *afu) +{ + u64 val, afu_cr_num, afu_cr_off, afu_cr_len; + int i; + +#define show_reg(name, what) \ + dev_info(&afu->dev, "afu desc: %30s: %#llx\n", name, what) + + val = AFUD_READ_INFO(afu); + show_reg("num_ints_per_process", AFUD_NUM_INTS_PER_PROC(val)); + show_reg("num_of_processes", AFUD_NUM_PROCS(val)); + show_reg("num_of_afu_CRs", AFUD_NUM_CRS(val)); + show_reg("req_prog_mode", val & 0xffffULL); + afu_cr_num = AFUD_NUM_CRS(val); + + val = AFUD_READ(afu, 0x8); + show_reg("Reserved", val); + val = AFUD_READ(afu, 0x10); + show_reg("Reserved", val); + val = AFUD_READ(afu, 0x18); + show_reg("Reserved", val); + + val = AFUD_READ_CR(afu); + show_reg("Reserved", (val >> (63-7)) & 0xff); + show_reg("AFU_CR_len", AFUD_CR_LEN(val)); + afu_cr_len = AFUD_CR_LEN(val) * 256; + + val = AFUD_READ_CR_OFF(afu); + afu_cr_off = val; + show_reg("AFU_CR_offset", val); + + val = AFUD_READ_PPPSA(afu); + show_reg("PerProcessPSA_control", (val >> (63-7)) & 0xff); + show_reg("PerProcessPSA Length", AFUD_PPPSA_LEN(val)); + + val = AFUD_READ_PPPSA_OFF(afu); + show_reg("PerProcessPSA_offset", val); + + val = AFUD_READ_EB(afu); + show_reg("Reserved", (val >> (63-7)) & 0xff); + show_reg("AFU_EB_len", AFUD_EB_LEN(val)); + + val = AFUD_READ_EB_OFF(afu); + show_reg("AFU_EB_offset", val); + + for (i = 0; i < afu_cr_num; i++) { + val = AFUD_READ_LE(afu, afu_cr_off + i * afu_cr_len); + show_reg("CR Vendor", val & 0xffff); + show_reg("CR Device", (val >> 16) & 0xffff); + } +#undef show_reg +} + +#define P8_CAPP_UNIT0_ID 0xBA +#define P8_CAPP_UNIT1_ID 0XBE +#define P9_CAPP_UNIT0_ID 0xC0 +#define P9_CAPP_UNIT1_ID 0xE0 + +static int get_phb_index(struct device_node *np, u32 *phb_index) +{ + if (of_property_read_u32(np, "ibm,phb-index", phb_index)) + return -ENODEV; + return 0; +} + +static u64 get_capp_unit_id(struct device_node *np, u32 phb_index) +{ + /* + * POWER 8: + * - For chips other than POWER8NVL, we only have CAPP 0, + * irrespective of which PHB is used. + * - For POWER8NVL, assume CAPP 0 is attached to PHB0 and + * CAPP 1 is attached to PHB1. + */ + if (cxl_is_power8()) { + if (!pvr_version_is(PVR_POWER8NVL)) + return P8_CAPP_UNIT0_ID; + + if (phb_index == 0) + return P8_CAPP_UNIT0_ID; + + if (phb_index == 1) + return P8_CAPP_UNIT1_ID; + } + + /* + * POWER 9: + * PEC0 (PHB0). Capp ID = CAPP0 (0b1100_0000) + * PEC1 (PHB1 - PHB2). No capi mode + * PEC2 (PHB3 - PHB4 - PHB5): Capi mode on PHB3 only. Capp ID = CAPP1 (0b1110_0000) + */ + if (cxl_is_power9()) { + if (phb_index == 0) + return P9_CAPP_UNIT0_ID; + + if (phb_index == 3) + return P9_CAPP_UNIT1_ID; + } + + return 0; +} + +int cxl_calc_capp_routing(struct pci_dev *dev, u64 *chipid, + u32 *phb_index, u64 *capp_unit_id) +{ + int rc; + struct device_node *np; + const __be32 *prop; + + if (!(np = pnv_pci_get_phb_node(dev))) + return -ENODEV; + + while (np && !(prop = of_get_property(np, "ibm,chip-id", NULL))) + np = of_get_next_parent(np); + if (!np) + return -ENODEV; + + *chipid = be32_to_cpup(prop); + + rc = get_phb_index(np, phb_index); + if (rc) { + pr_err("cxl: invalid phb index\n"); + of_node_put(np); + return rc; + } + + *capp_unit_id = get_capp_unit_id(np, *phb_index); + of_node_put(np); + if (!*capp_unit_id) { + pr_err("cxl: No capp unit found for PHB[%lld,%d]. Make sure the adapter is on a capi-compatible slot\n", + *chipid, *phb_index); + return -ENODEV; + } + + return 0; +} + +static DEFINE_MUTEX(indications_mutex); + +static int get_phb_indications(struct pci_dev *dev, u64 *capiind, u64 *asnind, + u64 *nbwind) +{ + static u64 nbw, asn, capi = 0; + struct device_node *np; + const __be32 *prop; + + mutex_lock(&indications_mutex); + if (!capi) { + if (!(np = pnv_pci_get_phb_node(dev))) { + mutex_unlock(&indications_mutex); + return -ENODEV; + } + + prop = of_get_property(np, "ibm,phb-indications", NULL); + if (!prop) { + nbw = 0x0300UL; /* legacy values */ + asn = 0x0400UL; + capi = 0x0200UL; + } else { + nbw = (u64)be32_to_cpu(prop[2]); + asn = (u64)be32_to_cpu(prop[1]); + capi = (u64)be32_to_cpu(prop[0]); + } + of_node_put(np); + } + *capiind = capi; + *asnind = asn; + *nbwind = nbw; + mutex_unlock(&indications_mutex); + return 0; +} + +int cxl_get_xsl9_dsnctl(struct pci_dev *dev, u64 capp_unit_id, u64 *reg) +{ + u64 xsl_dsnctl; + u64 capiind, asnind, nbwind; + + /* + * CAPI Identifier bits [0:7] + * bit 61:60 MSI bits --> 0 + * bit 59 TVT selector --> 0 + */ + if (get_phb_indications(dev, &capiind, &asnind, &nbwind)) + return -ENODEV; + + /* + * Tell XSL where to route data to. + * The field chipid should match the PHB CAPI_CMPM register + */ + xsl_dsnctl = (capiind << (63-15)); /* Bit 57 */ + xsl_dsnctl |= (capp_unit_id << (63-15)); + + /* nMMU_ID Defaults to: b’000001001’*/ + xsl_dsnctl |= ((u64)0x09 << (63-28)); + + /* + * Used to identify CAPI packets which should be sorted into + * the Non-Blocking queues by the PHB. This field should match + * the PHB PBL_NBW_CMPM register + * nbwind=0x03, bits [57:58], must include capi indicator. + * Not supported on P9 DD1. + */ + xsl_dsnctl |= (nbwind << (63-55)); + + /* + * Upper 16b address bits of ASB_Notify messages sent to the + * system. Need to match the PHB’s ASN Compare/Mask Register. + * Not supported on P9 DD1. + */ + xsl_dsnctl |= asnind; + + *reg = xsl_dsnctl; + return 0; +} + +static int init_implementation_adapter_regs_psl9(struct cxl *adapter, + struct pci_dev *dev) +{ + u64 xsl_dsnctl, psl_fircntl; + u64 chipid; + u32 phb_index; + u64 capp_unit_id; + u64 psl_debug; + int rc; + + rc = cxl_calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id); + if (rc) + return rc; + + rc = cxl_get_xsl9_dsnctl(dev, capp_unit_id, &xsl_dsnctl); + if (rc) + return rc; + + cxl_p1_write(adapter, CXL_XSL9_DSNCTL, xsl_dsnctl); + + /* Set fir_cntl to recommended value for production env */ + psl_fircntl = (0x2ULL << (63-3)); /* ce_report */ + psl_fircntl |= (0x1ULL << (63-6)); /* FIR_report */ + psl_fircntl |= 0x1ULL; /* ce_thresh */ + cxl_p1_write(adapter, CXL_PSL9_FIR_CNTL, psl_fircntl); + + /* Setup the PSL to transmit packets on the PCIe before the + * CAPP is enabled. Make sure that CAPP virtual machines are disabled + */ + cxl_p1_write(adapter, CXL_PSL9_DSNDCTL, 0x0001001000012A10ULL); + + /* + * A response to an ASB_Notify request is returned by the + * system as an MMIO write to the address defined in + * the PSL_TNR_ADDR register. + * keep the Reset Value: 0x00020000E0000000 + */ + + /* Enable XSL rty limit */ + cxl_p1_write(adapter, CXL_XSL9_DEF, 0x51F8000000000005ULL); + + /* Change XSL_INV dummy read threshold */ + cxl_p1_write(adapter, CXL_XSL9_INV, 0x0000040007FFC200ULL); + + if (phb_index == 3) { + /* disable machines 31-47 and 20-27 for DMA */ + cxl_p1_write(adapter, CXL_PSL9_APCDEDTYPE, 0x40000FF3FFFF0000ULL); + } + + /* Snoop machines */ + cxl_p1_write(adapter, CXL_PSL9_APCDEDALLOC, 0x800F000200000000ULL); + + /* Enable NORST and DD2 features */ + cxl_p1_write(adapter, CXL_PSL9_DEBUG, 0xC000000000000000ULL); + + /* + * Check if PSL has data-cache. We need to flush adapter datacache + * when as its about to be removed. + */ + psl_debug = cxl_p1_read(adapter, CXL_PSL9_DEBUG); + if (psl_debug & CXL_PSL_DEBUG_CDC) { + dev_dbg(&dev->dev, "No data-cache present\n"); + adapter->native->no_data_cache = true; + } + + return 0; +} + +static int init_implementation_adapter_regs_psl8(struct cxl *adapter, struct pci_dev *dev) +{ + u64 psl_dsnctl, psl_fircntl; + u64 chipid; + u32 phb_index; + u64 capp_unit_id; + int rc; + + rc = cxl_calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id); + if (rc) + return rc; + + psl_dsnctl = 0x0000900000000000ULL; /* pteupd ttype, scdone */ + psl_dsnctl |= (0x2ULL << (63-38)); /* MMIO hang pulse: 256 us */ + /* Tell PSL where to route data to */ + psl_dsnctl |= (chipid << (63-5)); + psl_dsnctl |= (capp_unit_id << (63-13)); + + cxl_p1_write(adapter, CXL_PSL_DSNDCTL, psl_dsnctl); + cxl_p1_write(adapter, CXL_PSL_RESLCKTO, 0x20000000200ULL); + /* snoop write mask */ + cxl_p1_write(adapter, CXL_PSL_SNWRALLOC, 0x00000000FFFFFFFFULL); + /* set fir_cntl to recommended value for production env */ + psl_fircntl = (0x2ULL << (63-3)); /* ce_report */ + psl_fircntl |= (0x1ULL << (63-6)); /* FIR_report */ + psl_fircntl |= 0x1ULL; /* ce_thresh */ + cxl_p1_write(adapter, CXL_PSL_FIR_CNTL, psl_fircntl); + /* for debugging with trace arrays */ + cxl_p1_write(adapter, CXL_PSL_TRACE, 0x0000FF7C00000000ULL); + + return 0; +} + +/* PSL */ +#define TBSYNC_CAL(n) (((u64)n & 0x7) << (63-3)) +#define TBSYNC_CNT(n) (((u64)n & 0x7) << (63-6)) +/* For the PSL this is a multiple for 0 < n <= 7: */ +#define PSL_2048_250MHZ_CYCLES 1 + +static void write_timebase_ctrl_psl8(struct cxl *adapter) +{ + cxl_p1_write(adapter, CXL_PSL_TB_CTLSTAT, + TBSYNC_CNT(2 * PSL_2048_250MHZ_CYCLES)); +} + +static u64 timebase_read_psl9(struct cxl *adapter) +{ + return cxl_p1_read(adapter, CXL_PSL9_Timebase); +} + +static u64 timebase_read_psl8(struct cxl *adapter) +{ + return cxl_p1_read(adapter, CXL_PSL_Timebase); +} + +static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev) +{ + struct device_node *np; + + adapter->psl_timebase_synced = false; + + if (!(np = pnv_pci_get_phb_node(dev))) + return; + + /* Do not fail when CAPP timebase sync is not supported by OPAL */ + of_node_get(np); + if (! of_get_property(np, "ibm,capp-timebase-sync", NULL)) { + of_node_put(np); + dev_info(&dev->dev, "PSL timebase inactive: OPAL support missing\n"); + return; + } + of_node_put(np); + + /* + * Setup PSL Timebase Control and Status register + * with the recommended Timebase Sync Count value + */ + if (adapter->native->sl_ops->write_timebase_ctrl) + adapter->native->sl_ops->write_timebase_ctrl(adapter); + + /* Enable PSL Timebase */ + cxl_p1_write(adapter, CXL_PSL_Control, 0x0000000000000000); + cxl_p1_write(adapter, CXL_PSL_Control, CXL_PSL_Control_tb); + + return; +} + +static int init_implementation_afu_regs_psl9(struct cxl_afu *afu) +{ + return 0; +} + +static int init_implementation_afu_regs_psl8(struct cxl_afu *afu) +{ + /* read/write masks for this slice */ + cxl_p1n_write(afu, CXL_PSL_APCALLOC_A, 0xFFFFFFFEFEFEFEFEULL); + /* APC read/write masks for this slice */ + cxl_p1n_write(afu, CXL_PSL_COALLOC_A, 0xFF000000FEFEFEFEULL); + /* for debugging with trace arrays */ + cxl_p1n_write(afu, CXL_PSL_SLICE_TRACE, 0x0000FFFF00000000ULL); + cxl_p1n_write(afu, CXL_PSL_RXCTL_A, CXL_PSL_RXCTL_AFUHP_4S); + + return 0; +} + +int cxl_pci_setup_irq(struct cxl *adapter, unsigned int hwirq, + unsigned int virq) +{ + struct pci_dev *dev = to_pci_dev(adapter->dev.parent); + + return pnv_cxl_ioda_msi_setup(dev, hwirq, virq); +} + +int cxl_update_image_control(struct cxl *adapter) +{ + struct pci_dev *dev = to_pci_dev(adapter->dev.parent); + int rc; + int vsec; + u8 image_state; + + if (!(vsec = find_cxl_vsec(dev))) { + dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n"); + return -ENODEV; + } + + if ((rc = CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state))) { + dev_err(&dev->dev, "failed to read image state: %i\n", rc); + return rc; + } + + if (adapter->perst_loads_image) + image_state |= CXL_VSEC_PERST_LOADS_IMAGE; + else + image_state &= ~CXL_VSEC_PERST_LOADS_IMAGE; + + if (adapter->perst_select_user) + image_state |= CXL_VSEC_PERST_SELECT_USER; + else + image_state &= ~CXL_VSEC_PERST_SELECT_USER; + + if ((rc = CXL_WRITE_VSEC_IMAGE_STATE(dev, vsec, image_state))) { + dev_err(&dev->dev, "failed to update image control: %i\n", rc); + return rc; + } + + return 0; +} + +int cxl_pci_alloc_one_irq(struct cxl *adapter) +{ + struct pci_dev *dev = to_pci_dev(adapter->dev.parent); + + return pnv_cxl_alloc_hwirqs(dev, 1); +} + +void cxl_pci_release_one_irq(struct cxl *adapter, int hwirq) +{ + struct pci_dev *dev = to_pci_dev(adapter->dev.parent); + + return pnv_cxl_release_hwirqs(dev, hwirq, 1); +} + +int cxl_pci_alloc_irq_ranges(struct cxl_irq_ranges *irqs, + struct cxl *adapter, unsigned int num) +{ + struct pci_dev *dev = to_pci_dev(adapter->dev.parent); + + return pnv_cxl_alloc_hwirq_ranges(irqs, dev, num); +} + +void cxl_pci_release_irq_ranges(struct cxl_irq_ranges *irqs, + struct cxl *adapter) +{ + struct pci_dev *dev = to_pci_dev(adapter->dev.parent); + + pnv_cxl_release_hwirq_ranges(irqs, dev); +} + +static int setup_cxl_bars(struct pci_dev *dev) +{ + /* Safety check in case we get backported to < 3.17 without M64 */ + if ((p1_base(dev) < 0x100000000ULL) || + (p2_base(dev) < 0x100000000ULL)) { + dev_err(&dev->dev, "ABORTING: M32 BAR assignment incompatible with CXL\n"); + return -ENODEV; + } + + /* + * BAR 4/5 has a special meaning for CXL and must be programmed with a + * special value corresponding to the CXL protocol address range. + * For POWER 8/9 that means bits 48:49 must be set to 10 + */ + pci_write_config_dword(dev, PCI_BASE_ADDRESS_4, 0x00000000); + pci_write_config_dword(dev, PCI_BASE_ADDRESS_5, 0x00020000); + + return 0; +} + +/* pciex node: ibm,opal-m64-window = <0x3d058 0x0 0x3d058 0x0 0x8 0x0>; */ +static int switch_card_to_cxl(struct pci_dev *dev) +{ + int vsec; + u8 val; + int rc; + + dev_info(&dev->dev, "switch card to CXL\n"); + + if (!(vsec = find_cxl_vsec(dev))) { + dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n"); + return -ENODEV; + } + + if ((rc = CXL_READ_VSEC_MODE_CONTROL(dev, vsec, &val))) { + dev_err(&dev->dev, "failed to read current mode control: %i", rc); + return rc; + } + val &= ~CXL_VSEC_PROTOCOL_MASK; + val |= CXL_VSEC_PROTOCOL_256TB | CXL_VSEC_PROTOCOL_ENABLE; + if ((rc = CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val))) { + dev_err(&dev->dev, "failed to enable CXL protocol: %i", rc); + return rc; + } + /* + * The CAIA spec (v0.12 11.6 Bi-modal Device Support) states + * we must wait 100ms after this mode switch before touching + * PCIe config space. + */ + msleep(100); + + return 0; +} + +static int pci_map_slice_regs(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev) +{ + u64 p1n_base, p2n_base, afu_desc; + const u64 p1n_size = 0x100; + const u64 p2n_size = 0x1000; + + p1n_base = p1_base(dev) + 0x10000 + (afu->slice * p1n_size); + p2n_base = p2_base(dev) + (afu->slice * p2n_size); + afu->psn_phys = p2_base(dev) + (adapter->native->ps_off + (afu->slice * adapter->ps_size)); + afu_desc = p2_base(dev) + adapter->native->afu_desc_off + (afu->slice * adapter->native->afu_desc_size); + + if (!(afu->native->p1n_mmio = ioremap(p1n_base, p1n_size))) + goto err; + if (!(afu->p2n_mmio = ioremap(p2n_base, p2n_size))) + goto err1; + if (afu_desc) { + if (!(afu->native->afu_desc_mmio = ioremap(afu_desc, adapter->native->afu_desc_size))) + goto err2; + } + + return 0; +err2: + iounmap(afu->p2n_mmio); +err1: + iounmap(afu->native->p1n_mmio); +err: + dev_err(&afu->dev, "Error mapping AFU MMIO regions\n"); + return -ENOMEM; +} + +static void pci_unmap_slice_regs(struct cxl_afu *afu) +{ + if (afu->p2n_mmio) { + iounmap(afu->p2n_mmio); + afu->p2n_mmio = NULL; + } + if (afu->native->p1n_mmio) { + iounmap(afu->native->p1n_mmio); + afu->native->p1n_mmio = NULL; + } + if (afu->native->afu_desc_mmio) { + iounmap(afu->native->afu_desc_mmio); + afu->native->afu_desc_mmio = NULL; + } +} + +void cxl_pci_release_afu(struct device *dev) +{ + struct cxl_afu *afu = to_cxl_afu(dev); + + pr_devel("%s\n", __func__); + + idr_destroy(&afu->contexts_idr); + cxl_release_spa(afu); + + kfree(afu->native); + kfree(afu); +} + +/* Expects AFU struct to have recently been zeroed out */ +static int cxl_read_afu_descriptor(struct cxl_afu *afu) +{ + u64 val; + + val = AFUD_READ_INFO(afu); + afu->pp_irqs = AFUD_NUM_INTS_PER_PROC(val); + afu->max_procs_virtualised = AFUD_NUM_PROCS(val); + afu->crs_num = AFUD_NUM_CRS(val); + + if (AFUD_AFU_DIRECTED(val)) + afu->modes_supported |= CXL_MODE_DIRECTED; + if (AFUD_DEDICATED_PROCESS(val)) + afu->modes_supported |= CXL_MODE_DEDICATED; + if (AFUD_TIME_SLICED(val)) + afu->modes_supported |= CXL_MODE_TIME_SLICED; + + val = AFUD_READ_PPPSA(afu); + afu->pp_size = AFUD_PPPSA_LEN(val) * 4096; + afu->psa = AFUD_PPPSA_PSA(val); + if ((afu->pp_psa = AFUD_PPPSA_PP(val))) + afu->native->pp_offset = AFUD_READ_PPPSA_OFF(afu); + + val = AFUD_READ_CR(afu); + afu->crs_len = AFUD_CR_LEN(val) * 256; + afu->crs_offset = AFUD_READ_CR_OFF(afu); + + + /* eb_len is in multiple of 4K */ + afu->eb_len = AFUD_EB_LEN(AFUD_READ_EB(afu)) * 4096; + afu->eb_offset = AFUD_READ_EB_OFF(afu); + + /* eb_off is 4K aligned so lower 12 bits are always zero */ + if (EXTRACT_PPC_BITS(afu->eb_offset, 0, 11) != 0) { + dev_warn(&afu->dev, + "Invalid AFU error buffer offset %Lx\n", + afu->eb_offset); + dev_info(&afu->dev, + "Ignoring AFU error buffer in the descriptor\n"); + /* indicate that no afu buffer exists */ + afu->eb_len = 0; + } + + return 0; +} + +static int cxl_afu_descriptor_looks_ok(struct cxl_afu *afu) +{ + int i, rc; + u32 val; + + if (afu->psa && afu->adapter->ps_size < + (afu->native->pp_offset + afu->pp_size*afu->max_procs_virtualised)) { + dev_err(&afu->dev, "per-process PSA can't fit inside the PSA!\n"); + return -ENODEV; + } + + if (afu->pp_psa && (afu->pp_size < PAGE_SIZE)) + dev_warn(&afu->dev, "AFU uses pp_size(%#016llx) < PAGE_SIZE per-process PSA!\n", afu->pp_size); + + for (i = 0; i < afu->crs_num; i++) { + rc = cxl_ops->afu_cr_read32(afu, i, 0, &val); + if (rc || val == 0) { + dev_err(&afu->dev, "ABORTING: AFU configuration record %i is invalid\n", i); + return -EINVAL; + } + } + + if ((afu->modes_supported & ~CXL_MODE_DEDICATED) && afu->max_procs_virtualised == 0) { + /* + * We could also check this for the dedicated process model + * since the architecture indicates it should be set to 1, but + * in that case we ignore the value and I'd rather not risk + * breaking any existing dedicated process AFUs that left it as + * 0 (not that I'm aware of any). It is clearly an error for an + * AFU directed AFU to set this to 0, and would have previously + * triggered a bug resulting in the maximum not being enforced + * at all since idr_alloc treats 0 as no maximum. + */ + dev_err(&afu->dev, "AFU does not support any processes\n"); + return -EINVAL; + } + + return 0; +} + +static int sanitise_afu_regs_psl9(struct cxl_afu *afu) +{ + u64 reg; + + /* + * Clear out any regs that contain either an IVTE or address or may be + * waiting on an acknowledgment to try to be a bit safer as we bring + * it online + */ + reg = cxl_p2n_read(afu, CXL_AFU_Cntl_An); + if ((reg & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) { + dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#016llx\n", reg); + if (cxl_ops->afu_reset(afu)) + return -EIO; + if (cxl_afu_disable(afu)) + return -EIO; + if (cxl_psl_purge(afu)) + return -EIO; + } + cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0x0000000000000000); + cxl_p1n_write(afu, CXL_PSL_AMBAR_An, 0x0000000000000000); + reg = cxl_p2n_read(afu, CXL_PSL_DSISR_An); + if (reg) { + dev_warn(&afu->dev, "AFU had pending DSISR: %#016llx\n", reg); + if (reg & CXL_PSL9_DSISR_An_TF) + cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE); + else + cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A); + } + if (afu->adapter->native->sl_ops->register_serr_irq) { + reg = cxl_p1n_read(afu, CXL_PSL_SERR_An); + if (reg) { + if (reg & ~0x000000007fffffff) + dev_warn(&afu->dev, "AFU had pending SERR: %#016llx\n", reg); + cxl_p1n_write(afu, CXL_PSL_SERR_An, reg & ~0xffff); + } + } + reg = cxl_p2n_read(afu, CXL_PSL_ErrStat_An); + if (reg) { + dev_warn(&afu->dev, "AFU had pending error status: %#016llx\n", reg); + cxl_p2n_write(afu, CXL_PSL_ErrStat_An, reg); + } + + return 0; +} + +static int sanitise_afu_regs_psl8(struct cxl_afu *afu) +{ + u64 reg; + + /* + * Clear out any regs that contain either an IVTE or address or may be + * waiting on an acknowledgement to try to be a bit safer as we bring + * it online + */ + reg = cxl_p2n_read(afu, CXL_AFU_Cntl_An); + if ((reg & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) { + dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#016llx\n", reg); + if (cxl_ops->afu_reset(afu)) + return -EIO; + if (cxl_afu_disable(afu)) + return -EIO; + if (cxl_psl_purge(afu)) + return -EIO; + } + cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0x0000000000000000); + cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, 0x0000000000000000); + cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An, 0x0000000000000000); + cxl_p1n_write(afu, CXL_PSL_AMBAR_An, 0x0000000000000000); + cxl_p1n_write(afu, CXL_PSL_SPOffset_An, 0x0000000000000000); + cxl_p1n_write(afu, CXL_HAURP_An, 0x0000000000000000); + cxl_p2n_write(afu, CXL_CSRP_An, 0x0000000000000000); + cxl_p2n_write(afu, CXL_AURP1_An, 0x0000000000000000); + cxl_p2n_write(afu, CXL_AURP0_An, 0x0000000000000000); + cxl_p2n_write(afu, CXL_SSTP1_An, 0x0000000000000000); + cxl_p2n_write(afu, CXL_SSTP0_An, 0x0000000000000000); + reg = cxl_p2n_read(afu, CXL_PSL_DSISR_An); + if (reg) { + dev_warn(&afu->dev, "AFU had pending DSISR: %#016llx\n", reg); + if (reg & CXL_PSL_DSISR_TRANS) + cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE); + else + cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A); + } + if (afu->adapter->native->sl_ops->register_serr_irq) { + reg = cxl_p1n_read(afu, CXL_PSL_SERR_An); + if (reg) { + if (reg & ~0xffff) + dev_warn(&afu->dev, "AFU had pending SERR: %#016llx\n", reg); + cxl_p1n_write(afu, CXL_PSL_SERR_An, reg & ~0xffff); + } + } + reg = cxl_p2n_read(afu, CXL_PSL_ErrStat_An); + if (reg) { + dev_warn(&afu->dev, "AFU had pending error status: %#016llx\n", reg); + cxl_p2n_write(afu, CXL_PSL_ErrStat_An, reg); + } + + return 0; +} + +#define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE +/* + * afu_eb_read: + * Called from sysfs and reads the afu error info buffer. The h/w only supports + * 4/8 bytes aligned access. So in case the requested offset/count arent 8 byte + * aligned the function uses a bounce buffer which can be max PAGE_SIZE. + */ +ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf, + loff_t off, size_t count) +{ + loff_t aligned_start, aligned_end; + size_t aligned_length; + void *tbuf; + const void __iomem *ebuf = afu->native->afu_desc_mmio + afu->eb_offset; + + if (count == 0 || off < 0 || (size_t)off >= afu->eb_len) + return 0; + + /* calculate aligned read window */ + count = min((size_t)(afu->eb_len - off), count); + aligned_start = round_down(off, 8); + aligned_end = round_up(off + count, 8); + aligned_length = aligned_end - aligned_start; + + /* max we can copy in one read is PAGE_SIZE */ + if (aligned_length > ERR_BUFF_MAX_COPY_SIZE) { + aligned_length = ERR_BUFF_MAX_COPY_SIZE; + count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7); + } + + /* use bounce buffer for copy */ + tbuf = (void *)__get_free_page(GFP_KERNEL); + if (!tbuf) + return -ENOMEM; + + /* perform aligned read from the mmio region */ + memcpy_fromio(tbuf, ebuf + aligned_start, aligned_length); + memcpy(buf, tbuf + (off & 0x7), count); + + free_page((unsigned long)tbuf); + + return count; +} + +static int pci_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev) +{ + int rc; + + if ((rc = pci_map_slice_regs(afu, adapter, dev))) + return rc; + + if (adapter->native->sl_ops->sanitise_afu_regs) { + rc = adapter->native->sl_ops->sanitise_afu_regs(afu); + if (rc) + goto err1; + } + + /* We need to reset the AFU before we can read the AFU descriptor */ + if ((rc = cxl_ops->afu_reset(afu))) + goto err1; + + if (cxl_verbose) + dump_afu_descriptor(afu); + + if ((rc = cxl_read_afu_descriptor(afu))) + goto err1; + + if ((rc = cxl_afu_descriptor_looks_ok(afu))) + goto err1; + + if (adapter->native->sl_ops->afu_regs_init) + if ((rc = adapter->native->sl_ops->afu_regs_init(afu))) + goto err1; + + if (adapter->native->sl_ops->register_serr_irq) + if ((rc = adapter->native->sl_ops->register_serr_irq(afu))) + goto err1; + + if ((rc = cxl_native_register_psl_irq(afu))) + goto err2; + + atomic_set(&afu->configured_state, 0); + return 0; + +err2: + if (adapter->native->sl_ops->release_serr_irq) + adapter->native->sl_ops->release_serr_irq(afu); +err1: + pci_unmap_slice_regs(afu); + return rc; +} + +static void pci_deconfigure_afu(struct cxl_afu *afu) +{ + /* + * It's okay to deconfigure when AFU is already locked, otherwise wait + * until there are no readers + */ + if (atomic_read(&afu->configured_state) != -1) { + while (atomic_cmpxchg(&afu->configured_state, 0, -1) != -1) + schedule(); + } + cxl_native_release_psl_irq(afu); + if (afu->adapter->native->sl_ops->release_serr_irq) + afu->adapter->native->sl_ops->release_serr_irq(afu); + pci_unmap_slice_regs(afu); +} + +static int pci_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev) +{ + struct cxl_afu *afu; + int rc = -ENOMEM; + + afu = cxl_alloc_afu(adapter, slice); + if (!afu) + return -ENOMEM; + + afu->native = kzalloc(sizeof(struct cxl_afu_native), GFP_KERNEL); + if (!afu->native) + goto err_free_afu; + + mutex_init(&afu->native->spa_mutex); + + rc = dev_set_name(&afu->dev, "afu%i.%i", adapter->adapter_num, slice); + if (rc) + goto err_free_native; + + rc = pci_configure_afu(afu, adapter, dev); + if (rc) + goto err_free_native; + + /* Don't care if this fails */ + cxl_debugfs_afu_add(afu); + + /* + * After we call this function we must not free the afu directly, even + * if it returns an error! + */ + if ((rc = cxl_register_afu(afu))) + goto err_put_dev; + + if ((rc = cxl_sysfs_afu_add(afu))) + goto err_del_dev; + + adapter->afu[afu->slice] = afu; + + if ((rc = cxl_pci_vphb_add(afu))) + dev_info(&afu->dev, "Can't register vPHB\n"); + + return 0; + +err_del_dev: + device_del(&afu->dev); +err_put_dev: + pci_deconfigure_afu(afu); + cxl_debugfs_afu_remove(afu); + put_device(&afu->dev); + return rc; + +err_free_native: + kfree(afu->native); +err_free_afu: + kfree(afu); + return rc; + +} + +static void cxl_pci_remove_afu(struct cxl_afu *afu) +{ + pr_devel("%s\n", __func__); + + if (!afu) + return; + + cxl_pci_vphb_remove(afu); + cxl_sysfs_afu_remove(afu); + cxl_debugfs_afu_remove(afu); + + spin_lock(&afu->adapter->afu_list_lock); + afu->adapter->afu[afu->slice] = NULL; + spin_unlock(&afu->adapter->afu_list_lock); + + cxl_context_detach_all(afu); + cxl_ops->afu_deactivate_mode(afu, afu->current_mode); + + pci_deconfigure_afu(afu); + device_unregister(&afu->dev); +} + +int cxl_pci_reset(struct cxl *adapter) +{ + struct pci_dev *dev = to_pci_dev(adapter->dev.parent); + int rc; + + if (adapter->perst_same_image) { + dev_warn(&dev->dev, + "cxl: refusing to reset/reflash when perst_reloads_same_image is set.\n"); + return -EINVAL; + } + + dev_info(&dev->dev, "CXL reset\n"); + + /* + * The adapter is about to be reset, so ignore errors. + */ + cxl_data_cache_flush(adapter); + + /* pcie_warm_reset requests a fundamental pci reset which includes a + * PERST assert/deassert. PERST triggers a loading of the image + * if "user" or "factory" is selected in sysfs */ + if ((rc = pci_set_pcie_reset_state(dev, pcie_warm_reset))) { + dev_err(&dev->dev, "cxl: pcie_warm_reset failed\n"); + return rc; + } + + return rc; +} + +static int cxl_map_adapter_regs(struct cxl *adapter, struct pci_dev *dev) +{ + if (pci_request_region(dev, 2, "priv 2 regs")) + goto err1; + if (pci_request_region(dev, 0, "priv 1 regs")) + goto err2; + + pr_devel("cxl_map_adapter_regs: p1: %#016llx %#llx, p2: %#016llx %#llx", + p1_base(dev), p1_size(dev), p2_base(dev), p2_size(dev)); + + if (!(adapter->native->p1_mmio = ioremap(p1_base(dev), p1_size(dev)))) + goto err3; + + if (!(adapter->native->p2_mmio = ioremap(p2_base(dev), p2_size(dev)))) + goto err4; + + return 0; + +err4: + iounmap(adapter->native->p1_mmio); + adapter->native->p1_mmio = NULL; +err3: + pci_release_region(dev, 0); +err2: + pci_release_region(dev, 2); +err1: + return -ENOMEM; +} + +static void cxl_unmap_adapter_regs(struct cxl *adapter) +{ + if (adapter->native->p1_mmio) { + iounmap(adapter->native->p1_mmio); + adapter->native->p1_mmio = NULL; + pci_release_region(to_pci_dev(adapter->dev.parent), 2); + } + if (adapter->native->p2_mmio) { + iounmap(adapter->native->p2_mmio); + adapter->native->p2_mmio = NULL; + pci_release_region(to_pci_dev(adapter->dev.parent), 0); + } +} + +static int cxl_read_vsec(struct cxl *adapter, struct pci_dev *dev) +{ + int vsec; + u32 afu_desc_off, afu_desc_size; + u32 ps_off, ps_size; + u16 vseclen; + u8 image_state; + + if (!(vsec = find_cxl_vsec(dev))) { + dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n"); + return -ENODEV; + } + + CXL_READ_VSEC_LENGTH(dev, vsec, &vseclen); + if (vseclen < CXL_VSEC_MIN_SIZE) { + dev_err(&dev->dev, "ABORTING: CXL VSEC too short\n"); + return -EINVAL; + } + + CXL_READ_VSEC_STATUS(dev, vsec, &adapter->vsec_status); + CXL_READ_VSEC_PSL_REVISION(dev, vsec, &adapter->psl_rev); + CXL_READ_VSEC_CAIA_MAJOR(dev, vsec, &adapter->caia_major); + CXL_READ_VSEC_CAIA_MINOR(dev, vsec, &adapter->caia_minor); + CXL_READ_VSEC_BASE_IMAGE(dev, vsec, &adapter->base_image); + CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state); + adapter->user_image_loaded = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED); + adapter->perst_select_user = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED); + adapter->perst_loads_image = !!(image_state & CXL_VSEC_PERST_LOADS_IMAGE); + + CXL_READ_VSEC_NAFUS(dev, vsec, &adapter->slices); + CXL_READ_VSEC_AFU_DESC_OFF(dev, vsec, &afu_desc_off); + CXL_READ_VSEC_AFU_DESC_SIZE(dev, vsec, &afu_desc_size); + CXL_READ_VSEC_PS_OFF(dev, vsec, &ps_off); + CXL_READ_VSEC_PS_SIZE(dev, vsec, &ps_size); + + /* Convert everything to bytes, because there is NO WAY I'd look at the + * code a month later and forget what units these are in ;-) */ + adapter->native->ps_off = ps_off * 64 * 1024; + adapter->ps_size = ps_size * 64 * 1024; + adapter->native->afu_desc_off = afu_desc_off * 64 * 1024; + adapter->native->afu_desc_size = afu_desc_size * 64 * 1024; + + /* Total IRQs - 1 PSL ERROR - #AFU*(1 slice error + 1 DSI) */ + adapter->user_irqs = pnv_cxl_get_irq_count(dev) - 1 - 2*adapter->slices; + + return 0; +} + +/* + * Workaround a PCIe Host Bridge defect on some cards, that can cause + * malformed Transaction Layer Packet (TLP) errors to be erroneously + * reported. Mask this error in the Uncorrectable Error Mask Register. + * + * The upper nibble of the PSL revision is used to distinguish between + * different cards. The affected ones have it set to 0. + */ +static void cxl_fixup_malformed_tlp(struct cxl *adapter, struct pci_dev *dev) +{ + int aer; + u32 data; + + if (adapter->psl_rev & 0xf000) + return; + if (!(aer = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR))) + return; + pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, &data); + if (data & PCI_ERR_UNC_MALF_TLP) + if (data & PCI_ERR_UNC_INTN) + return; + data |= PCI_ERR_UNC_MALF_TLP; + data |= PCI_ERR_UNC_INTN; + pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, data); +} + +static bool cxl_compatible_caia_version(struct cxl *adapter) +{ + if (cxl_is_power8() && (adapter->caia_major == 1)) + return true; + + if (cxl_is_power9() && (adapter->caia_major == 2)) + return true; + + return false; +} + +static int cxl_vsec_looks_ok(struct cxl *adapter, struct pci_dev *dev) +{ + if (adapter->vsec_status & CXL_STATUS_SECOND_PORT) + return -EBUSY; + + if (adapter->vsec_status & CXL_UNSUPPORTED_FEATURES) { + dev_err(&dev->dev, "ABORTING: CXL requires unsupported features\n"); + return -EINVAL; + } + + if (!cxl_compatible_caia_version(adapter)) { + dev_info(&dev->dev, "Ignoring card. PSL type is not supported (caia version: %d)\n", + adapter->caia_major); + return -ENODEV; + } + + if (!adapter->slices) { + /* Once we support dynamic reprogramming we can use the card if + * it supports loadable AFUs */ + dev_err(&dev->dev, "ABORTING: Device has no AFUs\n"); + return -EINVAL; + } + + if (!adapter->native->afu_desc_off || !adapter->native->afu_desc_size) { + dev_err(&dev->dev, "ABORTING: VSEC shows no AFU descriptors\n"); + return -EINVAL; + } + + if (adapter->ps_size > p2_size(dev) - adapter->native->ps_off) { + dev_err(&dev->dev, "ABORTING: Problem state size larger than " + "available in BAR2: 0x%llx > 0x%llx\n", + adapter->ps_size, p2_size(dev) - adapter->native->ps_off); + return -EINVAL; + } + + return 0; +} + +ssize_t cxl_pci_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len) +{ + return pci_read_vpd(to_pci_dev(adapter->dev.parent), 0, len, buf); +} + +static void cxl_release_adapter(struct device *dev) +{ + struct cxl *adapter = to_cxl_adapter(dev); + + pr_devel("cxl_release_adapter\n"); + + cxl_remove_adapter_nr(adapter); + + kfree(adapter->native); + kfree(adapter); +} + +#define CXL_PSL_ErrIVTE_tberror (0x1ull << (63-31)) + +static int sanitise_adapter_regs(struct cxl *adapter) +{ + int rc = 0; + + /* Clear PSL tberror bit by writing 1 to it */ + cxl_p1_write(adapter, CXL_PSL_ErrIVTE, CXL_PSL_ErrIVTE_tberror); + + if (adapter->native->sl_ops->invalidate_all) { + /* do not invalidate ERAT entries when not reloading on PERST */ + if (cxl_is_power9() && (adapter->perst_loads_image)) + return 0; + rc = adapter->native->sl_ops->invalidate_all(adapter); + } + + return rc; +} + +/* This should contain *only* operations that can safely be done in + * both creation and recovery. + */ +static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev) +{ + int rc; + + adapter->dev.parent = &dev->dev; + adapter->dev.release = cxl_release_adapter; + pci_set_drvdata(dev, adapter); + + rc = pci_enable_device(dev); + if (rc) { + dev_err(&dev->dev, "pci_enable_device failed: %i\n", rc); + return rc; + } + + if ((rc = cxl_read_vsec(adapter, dev))) + return rc; + + if ((rc = cxl_vsec_looks_ok(adapter, dev))) + return rc; + + cxl_fixup_malformed_tlp(adapter, dev); + + if ((rc = setup_cxl_bars(dev))) + return rc; + + if ((rc = switch_card_to_cxl(dev))) + return rc; + + if ((rc = cxl_update_image_control(adapter))) + return rc; + + if ((rc = cxl_map_adapter_regs(adapter, dev))) + return rc; + + if ((rc = sanitise_adapter_regs(adapter))) + goto err; + + if ((rc = adapter->native->sl_ops->adapter_regs_init(adapter, dev))) + goto err; + + /* Required for devices using CAPP DMA mode, harmless for others */ + pci_set_master(dev); + + adapter->tunneled_ops_supported = false; + + if (cxl_is_power9()) { + if (pnv_pci_set_tunnel_bar(dev, 0x00020000E0000000ull, 1)) + dev_info(&dev->dev, "Tunneled operations unsupported\n"); + else + adapter->tunneled_ops_supported = true; + } + + if ((rc = pnv_phb_to_cxl_mode(dev, adapter->native->sl_ops->capi_mode))) + goto err; + + /* If recovery happened, the last step is to turn on snooping. + * In the non-recovery case this has no effect */ + if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_SNOOP_ON))) + goto err; + + /* Ignore error, adapter init is not dependant on timebase sync */ + cxl_setup_psl_timebase(adapter, dev); + + if ((rc = cxl_native_register_psl_err_irq(adapter))) + goto err; + + return 0; + +err: + cxl_unmap_adapter_regs(adapter); + return rc; + +} + +static void cxl_deconfigure_adapter(struct cxl *adapter) +{ + struct pci_dev *pdev = to_pci_dev(adapter->dev.parent); + + if (cxl_is_power9()) + pnv_pci_set_tunnel_bar(pdev, 0x00020000E0000000ull, 0); + + cxl_native_release_psl_err_irq(adapter); + cxl_unmap_adapter_regs(adapter); + + pci_disable_device(pdev); +} + +static void cxl_stop_trace_psl9(struct cxl *adapter) +{ + int traceid; + u64 trace_state, trace_mask; + struct pci_dev *dev = to_pci_dev(adapter->dev.parent); + + /* read each tracearray state and issue mmio to stop them is needed */ + for (traceid = 0; traceid <= CXL_PSL9_TRACEID_MAX; ++traceid) { + trace_state = cxl_p1_read(adapter, CXL_PSL9_CTCCFG); + trace_mask = (0x3ULL << (62 - traceid * 2)); + trace_state = (trace_state & trace_mask) >> (62 - traceid * 2); + dev_dbg(&dev->dev, "cxl: Traceid-%d trace_state=0x%0llX\n", + traceid, trace_state); + + /* issue mmio if the trace array isn't in FIN state */ + if (trace_state != CXL_PSL9_TRACESTATE_FIN) + cxl_p1_write(adapter, CXL_PSL9_TRACECFG, + 0x8400000000000000ULL | traceid); + } +} + +static void cxl_stop_trace_psl8(struct cxl *adapter) +{ + int slice; + + /* Stop the trace */ + cxl_p1_write(adapter, CXL_PSL_TRACE, 0x8000000000000017LL); + + /* Stop the slice traces */ + spin_lock(&adapter->afu_list_lock); + for (slice = 0; slice < adapter->slices; slice++) { + if (adapter->afu[slice]) + cxl_p1n_write(adapter->afu[slice], CXL_PSL_SLICE_TRACE, + 0x8000000000000000LL); + } + spin_unlock(&adapter->afu_list_lock); +} + +static const struct cxl_service_layer_ops psl9_ops = { + .adapter_regs_init = init_implementation_adapter_regs_psl9, + .invalidate_all = cxl_invalidate_all_psl9, + .afu_regs_init = init_implementation_afu_regs_psl9, + .sanitise_afu_regs = sanitise_afu_regs_psl9, + .register_serr_irq = cxl_native_register_serr_irq, + .release_serr_irq = cxl_native_release_serr_irq, + .handle_interrupt = cxl_irq_psl9, + .fail_irq = cxl_fail_irq_psl, + .activate_dedicated_process = cxl_activate_dedicated_process_psl9, + .attach_afu_directed = cxl_attach_afu_directed_psl9, + .attach_dedicated_process = cxl_attach_dedicated_process_psl9, + .update_dedicated_ivtes = cxl_update_dedicated_ivtes_psl9, + .debugfs_add_adapter_regs = cxl_debugfs_add_adapter_regs_psl9, + .debugfs_add_afu_regs = cxl_debugfs_add_afu_regs_psl9, + .psl_irq_dump_registers = cxl_native_irq_dump_regs_psl9, + .err_irq_dump_registers = cxl_native_err_irq_dump_regs_psl9, + .debugfs_stop_trace = cxl_stop_trace_psl9, + .timebase_read = timebase_read_psl9, + .capi_mode = OPAL_PHB_CAPI_MODE_CAPI, + .needs_reset_before_disable = true, +}; + +static const struct cxl_service_layer_ops psl8_ops = { + .adapter_regs_init = init_implementation_adapter_regs_psl8, + .invalidate_all = cxl_invalidate_all_psl8, + .afu_regs_init = init_implementation_afu_regs_psl8, + .sanitise_afu_regs = sanitise_afu_regs_psl8, + .register_serr_irq = cxl_native_register_serr_irq, + .release_serr_irq = cxl_native_release_serr_irq, + .handle_interrupt = cxl_irq_psl8, + .fail_irq = cxl_fail_irq_psl, + .activate_dedicated_process = cxl_activate_dedicated_process_psl8, + .attach_afu_directed = cxl_attach_afu_directed_psl8, + .attach_dedicated_process = cxl_attach_dedicated_process_psl8, + .update_dedicated_ivtes = cxl_update_dedicated_ivtes_psl8, + .debugfs_add_adapter_regs = cxl_debugfs_add_adapter_regs_psl8, + .debugfs_add_afu_regs = cxl_debugfs_add_afu_regs_psl8, + .psl_irq_dump_registers = cxl_native_irq_dump_regs_psl8, + .err_irq_dump_registers = cxl_native_err_irq_dump_regs_psl8, + .debugfs_stop_trace = cxl_stop_trace_psl8, + .write_timebase_ctrl = write_timebase_ctrl_psl8, + .timebase_read = timebase_read_psl8, + .capi_mode = OPAL_PHB_CAPI_MODE_CAPI, + .needs_reset_before_disable = true, +}; + +static void set_sl_ops(struct cxl *adapter, struct pci_dev *dev) +{ + if (cxl_is_power8()) { + dev_info(&dev->dev, "Device uses a PSL8\n"); + adapter->native->sl_ops = &psl8_ops; + } else { + dev_info(&dev->dev, "Device uses a PSL9\n"); + adapter->native->sl_ops = &psl9_ops; + } +} + + +static struct cxl *cxl_pci_init_adapter(struct pci_dev *dev) +{ + struct cxl *adapter; + int rc; + + adapter = cxl_alloc_adapter(); + if (!adapter) + return ERR_PTR(-ENOMEM); + + adapter->native = kzalloc(sizeof(struct cxl_native), GFP_KERNEL); + if (!adapter->native) { + rc = -ENOMEM; + goto err_release; + } + + set_sl_ops(adapter, dev); + + /* Set defaults for parameters which need to persist over + * configure/reconfigure + */ + adapter->perst_loads_image = true; + adapter->perst_same_image = false; + + rc = cxl_configure_adapter(adapter, dev); + if (rc) { + pci_disable_device(dev); + goto err_release; + } + + /* Don't care if this one fails: */ + cxl_debugfs_adapter_add(adapter); + + /* + * After we call this function we must not free the adapter directly, + * even if it returns an error! + */ + if ((rc = cxl_register_adapter(adapter))) + goto err_put_dev; + + if ((rc = cxl_sysfs_adapter_add(adapter))) + goto err_del_dev; + + /* Release the context lock as adapter is configured */ + cxl_adapter_context_unlock(adapter); + + return adapter; + +err_del_dev: + device_del(&adapter->dev); +err_put_dev: + /* This should mirror cxl_remove_adapter, except without the + * sysfs parts + */ + cxl_debugfs_adapter_remove(adapter); + cxl_deconfigure_adapter(adapter); + put_device(&adapter->dev); + return ERR_PTR(rc); + +err_release: + cxl_release_adapter(&adapter->dev); + return ERR_PTR(rc); +} + +static void cxl_pci_remove_adapter(struct cxl *adapter) +{ + pr_devel("cxl_remove_adapter\n"); + + cxl_sysfs_adapter_remove(adapter); + cxl_debugfs_adapter_remove(adapter); + + /* + * Flush adapter datacache as its about to be removed. + */ + cxl_data_cache_flush(adapter); + + cxl_deconfigure_adapter(adapter); + + device_unregister(&adapter->dev); +} + +#define CXL_MAX_PCIEX_PARENT 2 + +int cxl_slot_is_switched(struct pci_dev *dev) +{ + struct device_node *np; + int depth = 0; + + if (!(np = pci_device_to_OF_node(dev))) { + pr_err("cxl: np = NULL\n"); + return -ENODEV; + } + of_node_get(np); + while (np) { + np = of_get_next_parent(np); + if (!of_node_is_type(np, "pciex")) + break; + depth++; + } + of_node_put(np); + return (depth > CXL_MAX_PCIEX_PARENT); +} + +static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id) +{ + struct cxl *adapter; + int slice; + int rc; + + if (cxl_pci_is_vphb_device(dev)) { + dev_dbg(&dev->dev, "cxl_init_adapter: Ignoring cxl vphb device\n"); + return -ENODEV; + } + + if (cxl_slot_is_switched(dev)) { + dev_info(&dev->dev, "Ignoring card on incompatible PCI slot\n"); + return -ENODEV; + } + + if (cxl_is_power9() && !radix_enabled()) { + dev_info(&dev->dev, "Only Radix mode supported\n"); + return -ENODEV; + } + + if (cxl_verbose) + dump_cxl_config_space(dev); + + adapter = cxl_pci_init_adapter(dev); + if (IS_ERR(adapter)) { + dev_err(&dev->dev, "cxl_init_adapter failed: %li\n", PTR_ERR(adapter)); + return PTR_ERR(adapter); + } + + for (slice = 0; slice < adapter->slices; slice++) { + if ((rc = pci_init_afu(adapter, slice, dev))) { + dev_err(&dev->dev, "AFU %i failed to initialise: %i\n", slice, rc); + continue; + } + + rc = cxl_afu_select_best_mode(adapter->afu[slice]); + if (rc) + dev_err(&dev->dev, "AFU %i failed to start: %i\n", slice, rc); + } + + return 0; +} + +static void cxl_remove(struct pci_dev *dev) +{ + struct cxl *adapter = pci_get_drvdata(dev); + struct cxl_afu *afu; + int i; + + /* + * Lock to prevent someone grabbing a ref through the adapter list as + * we are removing it + */ + for (i = 0; i < adapter->slices; i++) { + afu = adapter->afu[i]; + cxl_pci_remove_afu(afu); + } + cxl_pci_remove_adapter(adapter); +} + +static pci_ers_result_t cxl_vphb_error_detected(struct cxl_afu *afu, + pci_channel_state_t state) +{ + struct pci_dev *afu_dev; + pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET; + pci_ers_result_t afu_result = PCI_ERS_RESULT_NEED_RESET; + + /* There should only be one entry, but go through the list + * anyway + */ + if (afu == NULL || afu->phb == NULL) + return result; + + list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { + if (!afu_dev->driver) + continue; + + afu_dev->error_state = state; + + if (afu_dev->driver->err_handler) + afu_result = afu_dev->driver->err_handler->error_detected(afu_dev, + state); + /* Disconnect trumps all, NONE trumps NEED_RESET */ + if (afu_result == PCI_ERS_RESULT_DISCONNECT) + result = PCI_ERS_RESULT_DISCONNECT; + else if ((afu_result == PCI_ERS_RESULT_NONE) && + (result == PCI_ERS_RESULT_NEED_RESET)) + result = PCI_ERS_RESULT_NONE; + } + return result; +} + +static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct cxl *adapter = pci_get_drvdata(pdev); + struct cxl_afu *afu; + pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET; + pci_ers_result_t afu_result = PCI_ERS_RESULT_NEED_RESET; + int i; + + /* At this point, we could still have an interrupt pending. + * Let's try to get them out of the way before they do + * anything we don't like. + */ + schedule(); + + /* If we're permanently dead, give up. */ + if (state == pci_channel_io_perm_failure) { + spin_lock(&adapter->afu_list_lock); + for (i = 0; i < adapter->slices; i++) { + afu = adapter->afu[i]; + /* + * Tell the AFU drivers; but we don't care what they + * say, we're going away. + */ + cxl_vphb_error_detected(afu, state); + } + spin_unlock(&adapter->afu_list_lock); + return PCI_ERS_RESULT_DISCONNECT; + } + + /* Are we reflashing? + * + * If we reflash, we could come back as something entirely + * different, including a non-CAPI card. As such, by default + * we don't participate in the process. We'll be unbound and + * the slot re-probed. (TODO: check EEH doesn't blindly rebind + * us!) + * + * However, this isn't the entire story: for reliablity + * reasons, we usually want to reflash the FPGA on PERST in + * order to get back to a more reliable known-good state. + * + * This causes us a bit of a problem: if we reflash we can't + * trust that we'll come back the same - we could have a new + * image and been PERSTed in order to load that + * image. However, most of the time we actually *will* come + * back the same - for example a regular EEH event. + * + * Therefore, we allow the user to assert that the image is + * indeed the same and that we should continue on into EEH + * anyway. + */ + if (adapter->perst_loads_image && !adapter->perst_same_image) { + /* TODO take the PHB out of CXL mode */ + dev_info(&pdev->dev, "reflashing, so opting out of EEH!\n"); + return PCI_ERS_RESULT_NONE; + } + + /* + * At this point, we want to try to recover. We'll always + * need a complete slot reset: we don't trust any other reset. + * + * Now, we go through each AFU: + * - We send the driver, if bound, an error_detected callback. + * We expect it to clean up, but it can also tell us to give + * up and permanently detach the card. To simplify things, if + * any bound AFU driver doesn't support EEH, we give up on EEH. + * + * - We detach all contexts associated with the AFU. This + * does not free them, but puts them into a CLOSED state + * which causes any the associated files to return useful + * errors to userland. It also unmaps, but does not free, + * any IRQs. + * + * - We clean up our side: releasing and unmapping resources we hold + * so we can wire them up again when the hardware comes back up. + * + * Driver authors should note: + * + * - Any contexts you create in your kernel driver (except + * those associated with anonymous file descriptors) are + * your responsibility to free and recreate. Likewise with + * any attached resources. + * + * - We will take responsibility for re-initialising the + * device context (the one set up for you in + * cxl_pci_enable_device_hook and accessed through + * cxl_get_context). If you've attached IRQs or other + * resources to it, they remains yours to free. + * + * You can call the same functions to release resources as you + * normally would: we make sure that these functions continue + * to work when the hardware is down. + * + * Two examples: + * + * 1) If you normally free all your resources at the end of + * each request, or if you use anonymous FDs, your + * error_detected callback can simply set a flag to tell + * your driver not to start any new calls. You can then + * clear the flag in the resume callback. + * + * 2) If you normally allocate your resources on startup: + * * Set a flag in error_detected as above. + * * Let CXL detach your contexts. + * * In slot_reset, free the old resources and allocate new ones. + * * In resume, clear the flag to allow things to start. + */ + + /* Make sure no one else changes the afu list */ + spin_lock(&adapter->afu_list_lock); + + for (i = 0; i < adapter->slices; i++) { + afu = adapter->afu[i]; + + if (afu == NULL) + continue; + + afu_result = cxl_vphb_error_detected(afu, state); + cxl_context_detach_all(afu); + cxl_ops->afu_deactivate_mode(afu, afu->current_mode); + pci_deconfigure_afu(afu); + + /* Disconnect trumps all, NONE trumps NEED_RESET */ + if (afu_result == PCI_ERS_RESULT_DISCONNECT) + result = PCI_ERS_RESULT_DISCONNECT; + else if ((afu_result == PCI_ERS_RESULT_NONE) && + (result == PCI_ERS_RESULT_NEED_RESET)) + result = PCI_ERS_RESULT_NONE; + } + spin_unlock(&adapter->afu_list_lock); + + /* should take the context lock here */ + if (cxl_adapter_context_lock(adapter) != 0) + dev_warn(&adapter->dev, + "Couldn't take context lock with %d active-contexts\n", + atomic_read(&adapter->contexts_num)); + + cxl_deconfigure_adapter(adapter); + + return result; +} + +static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev) +{ + struct cxl *adapter = pci_get_drvdata(pdev); + struct cxl_afu *afu; + struct cxl_context *ctx; + struct pci_dev *afu_dev; + pci_ers_result_t afu_result = PCI_ERS_RESULT_RECOVERED; + pci_ers_result_t result = PCI_ERS_RESULT_RECOVERED; + int i; + + if (cxl_configure_adapter(adapter, pdev)) + goto err; + + /* + * Unlock context activation for the adapter. Ideally this should be + * done in cxl_pci_resume but cxlflash module tries to activate the + * master context as part of slot_reset callback. + */ + cxl_adapter_context_unlock(adapter); + + spin_lock(&adapter->afu_list_lock); + for (i = 0; i < adapter->slices; i++) { + afu = adapter->afu[i]; + + if (afu == NULL) + continue; + + if (pci_configure_afu(afu, adapter, pdev)) + goto err_unlock; + + if (cxl_afu_select_best_mode(afu)) + goto err_unlock; + + if (afu->phb == NULL) + continue; + + list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { + /* Reset the device context. + * TODO: make this less disruptive + */ + ctx = cxl_get_context(afu_dev); + + if (ctx && cxl_release_context(ctx)) + goto err_unlock; + + ctx = cxl_dev_context_init(afu_dev); + if (IS_ERR(ctx)) + goto err_unlock; + + afu_dev->dev.archdata.cxl_ctx = ctx; + + if (cxl_ops->afu_check_and_enable(afu)) + goto err_unlock; + + afu_dev->error_state = pci_channel_io_normal; + + /* If there's a driver attached, allow it to + * chime in on recovery. Drivers should check + * if everything has come back OK, but + * shouldn't start new work until we call + * their resume function. + */ + if (!afu_dev->driver) + continue; + + if (afu_dev->driver->err_handler && + afu_dev->driver->err_handler->slot_reset) + afu_result = afu_dev->driver->err_handler->slot_reset(afu_dev); + + if (afu_result == PCI_ERS_RESULT_DISCONNECT) + result = PCI_ERS_RESULT_DISCONNECT; + } + } + + spin_unlock(&adapter->afu_list_lock); + return result; + +err_unlock: + spin_unlock(&adapter->afu_list_lock); + +err: + /* All the bits that happen in both error_detected and cxl_remove + * should be idempotent, so we don't need to worry about leaving a mix + * of unconfigured and reconfigured resources. + */ + dev_err(&pdev->dev, "EEH recovery failed. Asking to be disconnected.\n"); + return PCI_ERS_RESULT_DISCONNECT; +} + +static void cxl_pci_resume(struct pci_dev *pdev) +{ + struct cxl *adapter = pci_get_drvdata(pdev); + struct cxl_afu *afu; + struct pci_dev *afu_dev; + int i; + + /* Everything is back now. Drivers should restart work now. + * This is not the place to be checking if everything came back up + * properly, because there's no return value: do that in slot_reset. + */ + spin_lock(&adapter->afu_list_lock); + for (i = 0; i < adapter->slices; i++) { + afu = adapter->afu[i]; + + if (afu == NULL || afu->phb == NULL) + continue; + + list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { + if (afu_dev->driver && afu_dev->driver->err_handler && + afu_dev->driver->err_handler->resume) + afu_dev->driver->err_handler->resume(afu_dev); + } + } + spin_unlock(&adapter->afu_list_lock); +} + +static const struct pci_error_handlers cxl_err_handler = { + .error_detected = cxl_pci_error_detected, + .slot_reset = cxl_pci_slot_reset, + .resume = cxl_pci_resume, +}; + +struct pci_driver cxl_pci_driver = { + .name = "cxl-pci", + .id_table = cxl_pci_tbl, + .probe = cxl_probe, + .remove = cxl_remove, + .shutdown = cxl_remove, + .err_handler = &cxl_err_handler, +}; diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c new file mode 100644 index 000000000..d97a243ad --- /dev/null +++ b/drivers/misc/cxl/sysfs.c @@ -0,0 +1,770 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright 2014 IBM Corp. + */ + +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/sysfs.h> +#include <linux/pci_regs.h> + +#include "cxl.h" + +#define to_afu_chardev_m(d) dev_get_drvdata(d) + +/********* Adapter attributes **********************************************/ + +static ssize_t caia_version_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct cxl *adapter = to_cxl_adapter(device); + + return scnprintf(buf, PAGE_SIZE, "%i.%i\n", adapter->caia_major, + adapter->caia_minor); +} + +static ssize_t psl_revision_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct cxl *adapter = to_cxl_adapter(device); + + return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->psl_rev); +} + +static ssize_t base_image_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct cxl *adapter = to_cxl_adapter(device); + + return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->base_image); +} + +static ssize_t image_loaded_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct cxl *adapter = to_cxl_adapter(device); + + if (adapter->user_image_loaded) + return scnprintf(buf, PAGE_SIZE, "user\n"); + return scnprintf(buf, PAGE_SIZE, "factory\n"); +} + +static ssize_t psl_timebase_synced_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct cxl *adapter = to_cxl_adapter(device); + u64 psl_tb, delta; + + /* Recompute the status only in native mode */ + if (cpu_has_feature(CPU_FTR_HVMODE)) { + psl_tb = adapter->native->sl_ops->timebase_read(adapter); + delta = abs(mftb() - psl_tb); + + /* CORE TB and PSL TB difference <= 16usecs ? */ + adapter->psl_timebase_synced = (tb_to_ns(delta) < 16000) ? true : false; + pr_devel("PSL timebase %s - delta: 0x%016llx\n", + (tb_to_ns(delta) < 16000) ? "synchronized" : + "not synchronized", tb_to_ns(delta)); + } + return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->psl_timebase_synced); +} + +static ssize_t tunneled_ops_supported_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct cxl *adapter = to_cxl_adapter(device); + + return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->tunneled_ops_supported); +} + +static ssize_t reset_adapter_store(struct device *device, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct cxl *adapter = to_cxl_adapter(device); + int rc; + int val; + + rc = sscanf(buf, "%i", &val); + if ((rc != 1) || (val != 1 && val != -1)) + return -EINVAL; + + /* + * See if we can lock the context mapping that's only allowed + * when there are no contexts attached to the adapter. Once + * taken this will also prevent any context from getting activated. + */ + if (val == 1) { + rc = cxl_adapter_context_lock(adapter); + if (rc) + goto out; + + rc = cxl_ops->adapter_reset(adapter); + /* In case reset failed release context lock */ + if (rc) + cxl_adapter_context_unlock(adapter); + + } else if (val == -1) { + /* Perform a forced adapter reset */ + rc = cxl_ops->adapter_reset(adapter); + } + +out: + return rc ? rc : count; +} + +static ssize_t load_image_on_perst_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct cxl *adapter = to_cxl_adapter(device); + + if (!adapter->perst_loads_image) + return scnprintf(buf, PAGE_SIZE, "none\n"); + + if (adapter->perst_select_user) + return scnprintf(buf, PAGE_SIZE, "user\n"); + return scnprintf(buf, PAGE_SIZE, "factory\n"); +} + +static ssize_t load_image_on_perst_store(struct device *device, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct cxl *adapter = to_cxl_adapter(device); + int rc; + + if (!strncmp(buf, "none", 4)) + adapter->perst_loads_image = false; + else if (!strncmp(buf, "user", 4)) { + adapter->perst_select_user = true; + adapter->perst_loads_image = true; + } else if (!strncmp(buf, "factory", 7)) { + adapter->perst_select_user = false; + adapter->perst_loads_image = true; + } else + return -EINVAL; + + if ((rc = cxl_update_image_control(adapter))) + return rc; + + return count; +} + +static ssize_t perst_reloads_same_image_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct cxl *adapter = to_cxl_adapter(device); + + return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->perst_same_image); +} + +static ssize_t perst_reloads_same_image_store(struct device *device, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct cxl *adapter = to_cxl_adapter(device); + int rc; + int val; + + rc = sscanf(buf, "%i", &val); + if ((rc != 1) || !(val == 1 || val == 0)) + return -EINVAL; + + adapter->perst_same_image = (val == 1 ? true : false); + return count; +} + +static struct device_attribute adapter_attrs[] = { + __ATTR_RO(caia_version), + __ATTR_RO(psl_revision), + __ATTR_RO(base_image), + __ATTR_RO(image_loaded), + __ATTR_RO(psl_timebase_synced), + __ATTR_RO(tunneled_ops_supported), + __ATTR_RW(load_image_on_perst), + __ATTR_RW(perst_reloads_same_image), + __ATTR(reset, S_IWUSR, NULL, reset_adapter_store), +}; + + +/********* AFU master specific attributes **********************************/ + +static ssize_t mmio_size_show_master(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct cxl_afu *afu = to_afu_chardev_m(device); + + return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->adapter->ps_size); +} + +static ssize_t pp_mmio_off_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct cxl_afu *afu = to_afu_chardev_m(device); + + return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->native->pp_offset); +} + +static ssize_t pp_mmio_len_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct cxl_afu *afu = to_afu_chardev_m(device); + + return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_size); +} + +static struct device_attribute afu_master_attrs[] = { + __ATTR(mmio_size, S_IRUGO, mmio_size_show_master, NULL), + __ATTR_RO(pp_mmio_off), + __ATTR_RO(pp_mmio_len), +}; + + +/********* AFU attributes **************************************************/ + +static ssize_t mmio_size_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct cxl_afu *afu = to_cxl_afu(device); + + if (afu->pp_size) + return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_size); + return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->adapter->ps_size); +} + +static ssize_t reset_store_afu(struct device *device, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct cxl_afu *afu = to_cxl_afu(device); + int rc; + + /* Not safe to reset if it is currently in use */ + mutex_lock(&afu->contexts_lock); + if (!idr_is_empty(&afu->contexts_idr)) { + rc = -EBUSY; + goto err; + } + + if ((rc = cxl_ops->afu_reset(afu))) + goto err; + + rc = count; +err: + mutex_unlock(&afu->contexts_lock); + return rc; +} + +static ssize_t irqs_min_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct cxl_afu *afu = to_cxl_afu(device); + + return scnprintf(buf, PAGE_SIZE, "%i\n", afu->pp_irqs); +} + +static ssize_t irqs_max_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct cxl_afu *afu = to_cxl_afu(device); + + return scnprintf(buf, PAGE_SIZE, "%i\n", afu->irqs_max); +} + +static ssize_t irqs_max_store(struct device *device, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct cxl_afu *afu = to_cxl_afu(device); + ssize_t ret; + int irqs_max; + + ret = sscanf(buf, "%i", &irqs_max); + if (ret != 1) + return -EINVAL; + + if (irqs_max < afu->pp_irqs) + return -EINVAL; + + if (cpu_has_feature(CPU_FTR_HVMODE)) { + if (irqs_max > afu->adapter->user_irqs) + return -EINVAL; + } else { + /* pHyp sets a per-AFU limit */ + if (irqs_max > afu->guest->max_ints) + return -EINVAL; + } + + afu->irqs_max = irqs_max; + return count; +} + +static ssize_t modes_supported_show(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct cxl_afu *afu = to_cxl_afu(device); + char *p = buf, *end = buf + PAGE_SIZE; + + if (afu->modes_supported & CXL_MODE_DEDICATED) + p += scnprintf(p, end - p, "dedicated_process\n"); + if (afu->modes_supported & CXL_MODE_DIRECTED) + p += scnprintf(p, end - p, "afu_directed\n"); + return (p - buf); +} + +static ssize_t prefault_mode_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct cxl_afu *afu = to_cxl_afu(device); + + switch (afu->prefault_mode) { + case CXL_PREFAULT_WED: + return scnprintf(buf, PAGE_SIZE, "work_element_descriptor\n"); + case CXL_PREFAULT_ALL: + return scnprintf(buf, PAGE_SIZE, "all\n"); + default: + return scnprintf(buf, PAGE_SIZE, "none\n"); + } +} + +static ssize_t prefault_mode_store(struct device *device, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct cxl_afu *afu = to_cxl_afu(device); + enum prefault_modes mode = -1; + + if (!strncmp(buf, "none", 4)) + mode = CXL_PREFAULT_NONE; + else { + if (!radix_enabled()) { + + /* only allowed when not in radix mode */ + if (!strncmp(buf, "work_element_descriptor", 23)) + mode = CXL_PREFAULT_WED; + if (!strncmp(buf, "all", 3)) + mode = CXL_PREFAULT_ALL; + } else { + dev_err(device, "Cannot prefault with radix enabled\n"); + } + } + + if (mode == -1) + return -EINVAL; + + afu->prefault_mode = mode; + return count; +} + +static ssize_t mode_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct cxl_afu *afu = to_cxl_afu(device); + + if (afu->current_mode == CXL_MODE_DEDICATED) + return scnprintf(buf, PAGE_SIZE, "dedicated_process\n"); + if (afu->current_mode == CXL_MODE_DIRECTED) + return scnprintf(buf, PAGE_SIZE, "afu_directed\n"); + return scnprintf(buf, PAGE_SIZE, "none\n"); +} + +static ssize_t mode_store(struct device *device, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct cxl_afu *afu = to_cxl_afu(device); + int old_mode, mode = -1; + int rc = -EBUSY; + + /* can't change this if we have a user */ + mutex_lock(&afu->contexts_lock); + if (!idr_is_empty(&afu->contexts_idr)) + goto err; + + if (!strncmp(buf, "dedicated_process", 17)) + mode = CXL_MODE_DEDICATED; + if (!strncmp(buf, "afu_directed", 12)) + mode = CXL_MODE_DIRECTED; + if (!strncmp(buf, "none", 4)) + mode = 0; + + if (mode == -1) { + rc = -EINVAL; + goto err; + } + + /* + * afu_deactivate_mode needs to be done outside the lock, prevent + * other contexts coming in before we are ready: + */ + old_mode = afu->current_mode; + afu->current_mode = 0; + afu->num_procs = 0; + + mutex_unlock(&afu->contexts_lock); + + if ((rc = cxl_ops->afu_deactivate_mode(afu, old_mode))) + return rc; + if ((rc = cxl_ops->afu_activate_mode(afu, mode))) + return rc; + + return count; +err: + mutex_unlock(&afu->contexts_lock); + return rc; +} + +static ssize_t api_version_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%i\n", CXL_API_VERSION); +} + +static ssize_t api_version_compatible_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%i\n", CXL_API_VERSION_COMPATIBLE); +} + +static ssize_t afu_eb_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buf, + loff_t off, size_t count) +{ + struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj)); + + return cxl_ops->afu_read_err_buffer(afu, buf, off, count); +} + +static struct device_attribute afu_attrs[] = { + __ATTR_RO(mmio_size), + __ATTR_RO(irqs_min), + __ATTR_RW(irqs_max), + __ATTR_RO(modes_supported), + __ATTR_RW(mode), + __ATTR_RW(prefault_mode), + __ATTR_RO(api_version), + __ATTR_RO(api_version_compatible), + __ATTR(reset, S_IWUSR, NULL, reset_store_afu), +}; + +int cxl_sysfs_adapter_add(struct cxl *adapter) +{ + struct device_attribute *dev_attr; + int i, rc; + + for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++) { + dev_attr = &adapter_attrs[i]; + if (cxl_ops->support_attributes(dev_attr->attr.name, + CXL_ADAPTER_ATTRS)) { + if ((rc = device_create_file(&adapter->dev, dev_attr))) + goto err; + } + } + return 0; +err: + for (i--; i >= 0; i--) { + dev_attr = &adapter_attrs[i]; + if (cxl_ops->support_attributes(dev_attr->attr.name, + CXL_ADAPTER_ATTRS)) + device_remove_file(&adapter->dev, dev_attr); + } + return rc; +} + +void cxl_sysfs_adapter_remove(struct cxl *adapter) +{ + struct device_attribute *dev_attr; + int i; + + for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++) { + dev_attr = &adapter_attrs[i]; + if (cxl_ops->support_attributes(dev_attr->attr.name, + CXL_ADAPTER_ATTRS)) + device_remove_file(&adapter->dev, dev_attr); + } +} + +struct afu_config_record { + struct kobject kobj; + struct bin_attribute config_attr; + struct list_head list; + int cr; + u16 device; + u16 vendor; + u32 class; +}; + +#define to_cr(obj) container_of(obj, struct afu_config_record, kobj) + +static ssize_t vendor_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct afu_config_record *cr = to_cr(kobj); + + return scnprintf(buf, PAGE_SIZE, "0x%.4x\n", cr->vendor); +} + +static ssize_t device_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct afu_config_record *cr = to_cr(kobj); + + return scnprintf(buf, PAGE_SIZE, "0x%.4x\n", cr->device); +} + +static ssize_t class_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct afu_config_record *cr = to_cr(kobj); + + return scnprintf(buf, PAGE_SIZE, "0x%.6x\n", cr->class); +} + +static ssize_t afu_read_config(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buf, + loff_t off, size_t count) +{ + struct afu_config_record *cr = to_cr(kobj); + struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj->parent)); + + u64 i, j, val, rc; + + for (i = 0; i < count;) { + rc = cxl_ops->afu_cr_read64(afu, cr->cr, off & ~0x7, &val); + if (rc) + val = ~0ULL; + for (j = off & 0x7; j < 8 && i < count; i++, j++, off++) + buf[i] = (val >> (j * 8)) & 0xff; + } + + return count; +} + +static struct kobj_attribute vendor_attribute = + __ATTR_RO(vendor); +static struct kobj_attribute device_attribute = + __ATTR_RO(device); +static struct kobj_attribute class_attribute = + __ATTR_RO(class); + +static struct attribute *afu_cr_attrs[] = { + &vendor_attribute.attr, + &device_attribute.attr, + &class_attribute.attr, + NULL, +}; + +static void release_afu_config_record(struct kobject *kobj) +{ + struct afu_config_record *cr = to_cr(kobj); + + kfree(cr); +} + +static struct kobj_type afu_config_record_type = { + .sysfs_ops = &kobj_sysfs_ops, + .release = release_afu_config_record, + .default_attrs = afu_cr_attrs, +}; + +static struct afu_config_record *cxl_sysfs_afu_new_cr(struct cxl_afu *afu, int cr_idx) +{ + struct afu_config_record *cr; + int rc; + + cr = kzalloc(sizeof(struct afu_config_record), GFP_KERNEL); + if (!cr) + return ERR_PTR(-ENOMEM); + + cr->cr = cr_idx; + + rc = cxl_ops->afu_cr_read16(afu, cr_idx, PCI_DEVICE_ID, &cr->device); + if (rc) + goto err; + rc = cxl_ops->afu_cr_read16(afu, cr_idx, PCI_VENDOR_ID, &cr->vendor); + if (rc) + goto err; + rc = cxl_ops->afu_cr_read32(afu, cr_idx, PCI_CLASS_REVISION, &cr->class); + if (rc) + goto err; + cr->class >>= 8; + + /* + * Export raw AFU PCIe like config record. For now this is read only by + * root - we can expand that later to be readable by non-root and maybe + * even writable provided we have a good use-case. Once we support + * exposing AFUs through a virtual PHB they will get that for free from + * Linux' PCI infrastructure, but until then it's not clear that we + * need it for anything since the main use case is just identifying + * AFUs, which can be done via the vendor, device and class attributes. + */ + sysfs_bin_attr_init(&cr->config_attr); + cr->config_attr.attr.name = "config"; + cr->config_attr.attr.mode = S_IRUSR; + cr->config_attr.size = afu->crs_len; + cr->config_attr.read = afu_read_config; + + rc = kobject_init_and_add(&cr->kobj, &afu_config_record_type, + &afu->dev.kobj, "cr%i", cr->cr); + if (rc) + goto err1; + + rc = sysfs_create_bin_file(&cr->kobj, &cr->config_attr); + if (rc) + goto err1; + + rc = kobject_uevent(&cr->kobj, KOBJ_ADD); + if (rc) + goto err2; + + return cr; +err2: + sysfs_remove_bin_file(&cr->kobj, &cr->config_attr); +err1: + kobject_put(&cr->kobj); + return ERR_PTR(rc); +err: + kfree(cr); + return ERR_PTR(rc); +} + +void cxl_sysfs_afu_remove(struct cxl_afu *afu) +{ + struct device_attribute *dev_attr; + struct afu_config_record *cr, *tmp; + int i; + + /* remove the err buffer bin attribute */ + if (afu->eb_len) + device_remove_bin_file(&afu->dev, &afu->attr_eb); + + for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) { + dev_attr = &afu_attrs[i]; + if (cxl_ops->support_attributes(dev_attr->attr.name, + CXL_AFU_ATTRS)) + device_remove_file(&afu->dev, &afu_attrs[i]); + } + + list_for_each_entry_safe(cr, tmp, &afu->crs, list) { + sysfs_remove_bin_file(&cr->kobj, &cr->config_attr); + kobject_put(&cr->kobj); + } +} + +int cxl_sysfs_afu_add(struct cxl_afu *afu) +{ + struct device_attribute *dev_attr; + struct afu_config_record *cr; + int i, rc; + + INIT_LIST_HEAD(&afu->crs); + + for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) { + dev_attr = &afu_attrs[i]; + if (cxl_ops->support_attributes(dev_attr->attr.name, + CXL_AFU_ATTRS)) { + if ((rc = device_create_file(&afu->dev, &afu_attrs[i]))) + goto err; + } + } + + /* conditionally create the add the binary file for error info buffer */ + if (afu->eb_len) { + sysfs_attr_init(&afu->attr_eb.attr); + + afu->attr_eb.attr.name = "afu_err_buff"; + afu->attr_eb.attr.mode = S_IRUGO; + afu->attr_eb.size = afu->eb_len; + afu->attr_eb.read = afu_eb_read; + + rc = device_create_bin_file(&afu->dev, &afu->attr_eb); + if (rc) { + dev_err(&afu->dev, + "Unable to create eb attr for the afu. Err(%d)\n", + rc); + goto err; + } + } + + for (i = 0; i < afu->crs_num; i++) { + cr = cxl_sysfs_afu_new_cr(afu, i); + if (IS_ERR(cr)) { + rc = PTR_ERR(cr); + goto err1; + } + list_add(&cr->list, &afu->crs); + } + + return 0; + +err1: + cxl_sysfs_afu_remove(afu); + return rc; +err: + /* reset the eb_len as we havent created the bin attr */ + afu->eb_len = 0; + + for (i--; i >= 0; i--) { + dev_attr = &afu_attrs[i]; + if (cxl_ops->support_attributes(dev_attr->attr.name, + CXL_AFU_ATTRS)) + device_remove_file(&afu->dev, &afu_attrs[i]); + } + return rc; +} + +int cxl_sysfs_afu_m_add(struct cxl_afu *afu) +{ + struct device_attribute *dev_attr; + int i, rc; + + for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++) { + dev_attr = &afu_master_attrs[i]; + if (cxl_ops->support_attributes(dev_attr->attr.name, + CXL_AFU_MASTER_ATTRS)) { + if ((rc = device_create_file(afu->chardev_m, &afu_master_attrs[i]))) + goto err; + } + } + + return 0; + +err: + for (i--; i >= 0; i--) { + dev_attr = &afu_master_attrs[i]; + if (cxl_ops->support_attributes(dev_attr->attr.name, + CXL_AFU_MASTER_ATTRS)) + device_remove_file(afu->chardev_m, &afu_master_attrs[i]); + } + return rc; +} + +void cxl_sysfs_afu_m_remove(struct cxl_afu *afu) +{ + struct device_attribute *dev_attr; + int i; + + for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++) { + dev_attr = &afu_master_attrs[i]; + if (cxl_ops->support_attributes(dev_attr->attr.name, + CXL_AFU_MASTER_ATTRS)) + device_remove_file(afu->chardev_m, &afu_master_attrs[i]); + } +} diff --git a/drivers/misc/cxl/trace.c b/drivers/misc/cxl/trace.c new file mode 100644 index 000000000..86f654b99 --- /dev/null +++ b/drivers/misc/cxl/trace.c @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright 2015 IBM Corp. + */ + +#ifndef __CHECKER__ +#define CREATE_TRACE_POINTS +#include "trace.h" +#endif diff --git a/drivers/misc/cxl/trace.h b/drivers/misc/cxl/trace.h new file mode 100644 index 000000000..c474157c6 --- /dev/null +++ b/drivers/misc/cxl/trace.h @@ -0,0 +1,691 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright 2015 IBM Corp. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM cxl + +#if !defined(_CXL_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _CXL_TRACE_H + +#include <linux/tracepoint.h> + +#include "cxl.h" + +#define dsisr_psl9_flags(flags) \ + __print_flags(flags, "|", \ + { CXL_PSL9_DSISR_An_CO_MASK, "FR" }, \ + { CXL_PSL9_DSISR_An_TF, "TF" }, \ + { CXL_PSL9_DSISR_An_PE, "PE" }, \ + { CXL_PSL9_DSISR_An_AE, "AE" }, \ + { CXL_PSL9_DSISR_An_OC, "OC" }, \ + { CXL_PSL9_DSISR_An_S, "S" }) + +#define DSISR_FLAGS \ + { CXL_PSL_DSISR_An_DS, "DS" }, \ + { CXL_PSL_DSISR_An_DM, "DM" }, \ + { CXL_PSL_DSISR_An_ST, "ST" }, \ + { CXL_PSL_DSISR_An_UR, "UR" }, \ + { CXL_PSL_DSISR_An_PE, "PE" }, \ + { CXL_PSL_DSISR_An_AE, "AE" }, \ + { CXL_PSL_DSISR_An_OC, "OC" }, \ + { CXL_PSL_DSISR_An_M, "M" }, \ + { CXL_PSL_DSISR_An_P, "P" }, \ + { CXL_PSL_DSISR_An_A, "A" }, \ + { CXL_PSL_DSISR_An_S, "S" }, \ + { CXL_PSL_DSISR_An_K, "K" } + +#define TFC_FLAGS \ + { CXL_PSL_TFC_An_A, "A" }, \ + { CXL_PSL_TFC_An_C, "C" }, \ + { CXL_PSL_TFC_An_AE, "AE" }, \ + { CXL_PSL_TFC_An_R, "R" } + +#define LLCMD_NAMES \ + { CXL_SPA_SW_CMD_TERMINATE, "TERMINATE" }, \ + { CXL_SPA_SW_CMD_REMOVE, "REMOVE" }, \ + { CXL_SPA_SW_CMD_SUSPEND, "SUSPEND" }, \ + { CXL_SPA_SW_CMD_RESUME, "RESUME" }, \ + { CXL_SPA_SW_CMD_ADD, "ADD" }, \ + { CXL_SPA_SW_CMD_UPDATE, "UPDATE" } + +#define AFU_COMMANDS \ + { 0, "DISABLE" }, \ + { CXL_AFU_Cntl_An_E, "ENABLE" }, \ + { CXL_AFU_Cntl_An_RA, "RESET" } + +#define PSL_COMMANDS \ + { CXL_PSL_SCNTL_An_Pc, "PURGE" }, \ + { CXL_PSL_SCNTL_An_Sc, "SUSPEND" } + + +DECLARE_EVENT_CLASS(cxl_pe_class, + TP_PROTO(struct cxl_context *ctx), + + TP_ARGS(ctx), + + TP_STRUCT__entry( + __field(u8, card) + __field(u8, afu) + __field(u16, pe) + ), + + TP_fast_assign( + __entry->card = ctx->afu->adapter->adapter_num; + __entry->afu = ctx->afu->slice; + __entry->pe = ctx->pe; + ), + + TP_printk("afu%i.%i pe=%i", + __entry->card, + __entry->afu, + __entry->pe + ) +); + + +TRACE_EVENT(cxl_attach, + TP_PROTO(struct cxl_context *ctx, u64 wed, s16 num_interrupts, u64 amr), + + TP_ARGS(ctx, wed, num_interrupts, amr), + + TP_STRUCT__entry( + __field(u8, card) + __field(u8, afu) + __field(u16, pe) + __field(pid_t, pid) + __field(u64, wed) + __field(u64, amr) + __field(s16, num_interrupts) + ), + + TP_fast_assign( + __entry->card = ctx->afu->adapter->adapter_num; + __entry->afu = ctx->afu->slice; + __entry->pe = ctx->pe; + __entry->pid = pid_nr(ctx->pid); + __entry->wed = wed; + __entry->amr = amr; + __entry->num_interrupts = num_interrupts; + ), + + TP_printk("afu%i.%i pid=%i pe=%i wed=0x%016llx irqs=%i amr=0x%llx", + __entry->card, + __entry->afu, + __entry->pid, + __entry->pe, + __entry->wed, + __entry->num_interrupts, + __entry->amr + ) +); + +DEFINE_EVENT(cxl_pe_class, cxl_detach, + TP_PROTO(struct cxl_context *ctx), + TP_ARGS(ctx) +); + +TRACE_EVENT(cxl_afu_irq, + TP_PROTO(struct cxl_context *ctx, int afu_irq, int virq, irq_hw_number_t hwirq), + + TP_ARGS(ctx, afu_irq, virq, hwirq), + + TP_STRUCT__entry( + __field(u8, card) + __field(u8, afu) + __field(u16, pe) + __field(u16, afu_irq) + __field(int, virq) + __field(irq_hw_number_t, hwirq) + ), + + TP_fast_assign( + __entry->card = ctx->afu->adapter->adapter_num; + __entry->afu = ctx->afu->slice; + __entry->pe = ctx->pe; + __entry->afu_irq = afu_irq; + __entry->virq = virq; + __entry->hwirq = hwirq; + ), + + TP_printk("afu%i.%i pe=%i afu_irq=%i virq=%i hwirq=0x%lx", + __entry->card, + __entry->afu, + __entry->pe, + __entry->afu_irq, + __entry->virq, + __entry->hwirq + ) +); + +TRACE_EVENT(cxl_psl9_irq, + TP_PROTO(struct cxl_context *ctx, int irq, u64 dsisr, u64 dar), + + TP_ARGS(ctx, irq, dsisr, dar), + + TP_STRUCT__entry( + __field(u8, card) + __field(u8, afu) + __field(u16, pe) + __field(int, irq) + __field(u64, dsisr) + __field(u64, dar) + ), + + TP_fast_assign( + __entry->card = ctx->afu->adapter->adapter_num; + __entry->afu = ctx->afu->slice; + __entry->pe = ctx->pe; + __entry->irq = irq; + __entry->dsisr = dsisr; + __entry->dar = dar; + ), + + TP_printk("afu%i.%i pe=%i irq=%i dsisr=0x%016llx dsisr=%s dar=0x%016llx", + __entry->card, + __entry->afu, + __entry->pe, + __entry->irq, + __entry->dsisr, + dsisr_psl9_flags(__entry->dsisr), + __entry->dar + ) +); + +TRACE_EVENT(cxl_psl_irq, + TP_PROTO(struct cxl_context *ctx, int irq, u64 dsisr, u64 dar), + + TP_ARGS(ctx, irq, dsisr, dar), + + TP_STRUCT__entry( + __field(u8, card) + __field(u8, afu) + __field(u16, pe) + __field(int, irq) + __field(u64, dsisr) + __field(u64, dar) + ), + + TP_fast_assign( + __entry->card = ctx->afu->adapter->adapter_num; + __entry->afu = ctx->afu->slice; + __entry->pe = ctx->pe; + __entry->irq = irq; + __entry->dsisr = dsisr; + __entry->dar = dar; + ), + + TP_printk("afu%i.%i pe=%i irq=%i dsisr=%s dar=0x%016llx", + __entry->card, + __entry->afu, + __entry->pe, + __entry->irq, + __print_flags(__entry->dsisr, "|", DSISR_FLAGS), + __entry->dar + ) +); + +TRACE_EVENT(cxl_psl_irq_ack, + TP_PROTO(struct cxl_context *ctx, u64 tfc), + + TP_ARGS(ctx, tfc), + + TP_STRUCT__entry( + __field(u8, card) + __field(u8, afu) + __field(u16, pe) + __field(u64, tfc) + ), + + TP_fast_assign( + __entry->card = ctx->afu->adapter->adapter_num; + __entry->afu = ctx->afu->slice; + __entry->pe = ctx->pe; + __entry->tfc = tfc; + ), + + TP_printk("afu%i.%i pe=%i tfc=%s", + __entry->card, + __entry->afu, + __entry->pe, + __print_flags(__entry->tfc, "|", TFC_FLAGS) + ) +); + +TRACE_EVENT(cxl_ste_miss, + TP_PROTO(struct cxl_context *ctx, u64 dar), + + TP_ARGS(ctx, dar), + + TP_STRUCT__entry( + __field(u8, card) + __field(u8, afu) + __field(u16, pe) + __field(u64, dar) + ), + + TP_fast_assign( + __entry->card = ctx->afu->adapter->adapter_num; + __entry->afu = ctx->afu->slice; + __entry->pe = ctx->pe; + __entry->dar = dar; + ), + + TP_printk("afu%i.%i pe=%i dar=0x%016llx", + __entry->card, + __entry->afu, + __entry->pe, + __entry->dar + ) +); + +TRACE_EVENT(cxl_ste_write, + TP_PROTO(struct cxl_context *ctx, unsigned int idx, u64 e, u64 v), + + TP_ARGS(ctx, idx, e, v), + + TP_STRUCT__entry( + __field(u8, card) + __field(u8, afu) + __field(u16, pe) + __field(unsigned int, idx) + __field(u64, e) + __field(u64, v) + ), + + TP_fast_assign( + __entry->card = ctx->afu->adapter->adapter_num; + __entry->afu = ctx->afu->slice; + __entry->pe = ctx->pe; + __entry->idx = idx; + __entry->e = e; + __entry->v = v; + ), + + TP_printk("afu%i.%i pe=%i SSTE[%i] E=0x%016llx V=0x%016llx", + __entry->card, + __entry->afu, + __entry->pe, + __entry->idx, + __entry->e, + __entry->v + ) +); + +TRACE_EVENT(cxl_pte_miss, + TP_PROTO(struct cxl_context *ctx, u64 dsisr, u64 dar), + + TP_ARGS(ctx, dsisr, dar), + + TP_STRUCT__entry( + __field(u8, card) + __field(u8, afu) + __field(u16, pe) + __field(u64, dsisr) + __field(u64, dar) + ), + + TP_fast_assign( + __entry->card = ctx->afu->adapter->adapter_num; + __entry->afu = ctx->afu->slice; + __entry->pe = ctx->pe; + __entry->dsisr = dsisr; + __entry->dar = dar; + ), + + TP_printk("afu%i.%i pe=%i dsisr=%s dar=0x%016llx", + __entry->card, + __entry->afu, + __entry->pe, + __print_flags(__entry->dsisr, "|", DSISR_FLAGS), + __entry->dar + ) +); + +TRACE_EVENT(cxl_llcmd, + TP_PROTO(struct cxl_context *ctx, u64 cmd), + + TP_ARGS(ctx, cmd), + + TP_STRUCT__entry( + __field(u8, card) + __field(u8, afu) + __field(u16, pe) + __field(u64, cmd) + ), + + TP_fast_assign( + __entry->card = ctx->afu->adapter->adapter_num; + __entry->afu = ctx->afu->slice; + __entry->pe = ctx->pe; + __entry->cmd = cmd; + ), + + TP_printk("afu%i.%i pe=%i cmd=%s", + __entry->card, + __entry->afu, + __entry->pe, + __print_symbolic_u64(__entry->cmd, LLCMD_NAMES) + ) +); + +TRACE_EVENT(cxl_llcmd_done, + TP_PROTO(struct cxl_context *ctx, u64 cmd, int rc), + + TP_ARGS(ctx, cmd, rc), + + TP_STRUCT__entry( + __field(u8, card) + __field(u8, afu) + __field(u16, pe) + __field(u64, cmd) + __field(int, rc) + ), + + TP_fast_assign( + __entry->card = ctx->afu->adapter->adapter_num; + __entry->afu = ctx->afu->slice; + __entry->pe = ctx->pe; + __entry->rc = rc; + __entry->cmd = cmd; + ), + + TP_printk("afu%i.%i pe=%i cmd=%s rc=%i", + __entry->card, + __entry->afu, + __entry->pe, + __print_symbolic_u64(__entry->cmd, LLCMD_NAMES), + __entry->rc + ) +); + +DECLARE_EVENT_CLASS(cxl_afu_psl_ctrl, + TP_PROTO(struct cxl_afu *afu, u64 cmd), + + TP_ARGS(afu, cmd), + + TP_STRUCT__entry( + __field(u8, card) + __field(u8, afu) + __field(u64, cmd) + ), + + TP_fast_assign( + __entry->card = afu->adapter->adapter_num; + __entry->afu = afu->slice; + __entry->cmd = cmd; + ), + + TP_printk("afu%i.%i cmd=%s", + __entry->card, + __entry->afu, + __print_symbolic_u64(__entry->cmd, AFU_COMMANDS) + ) +); + +DECLARE_EVENT_CLASS(cxl_afu_psl_ctrl_done, + TP_PROTO(struct cxl_afu *afu, u64 cmd, int rc), + + TP_ARGS(afu, cmd, rc), + + TP_STRUCT__entry( + __field(u8, card) + __field(u8, afu) + __field(u64, cmd) + __field(int, rc) + ), + + TP_fast_assign( + __entry->card = afu->adapter->adapter_num; + __entry->afu = afu->slice; + __entry->rc = rc; + __entry->cmd = cmd; + ), + + TP_printk("afu%i.%i cmd=%s rc=%i", + __entry->card, + __entry->afu, + __print_symbolic_u64(__entry->cmd, AFU_COMMANDS), + __entry->rc + ) +); + +DEFINE_EVENT(cxl_afu_psl_ctrl, cxl_afu_ctrl, + TP_PROTO(struct cxl_afu *afu, u64 cmd), + TP_ARGS(afu, cmd) +); + +DEFINE_EVENT(cxl_afu_psl_ctrl_done, cxl_afu_ctrl_done, + TP_PROTO(struct cxl_afu *afu, u64 cmd, int rc), + TP_ARGS(afu, cmd, rc) +); + +DEFINE_EVENT_PRINT(cxl_afu_psl_ctrl, cxl_psl_ctrl, + TP_PROTO(struct cxl_afu *afu, u64 cmd), + TP_ARGS(afu, cmd), + + TP_printk("psl%i.%i cmd=%s", + __entry->card, + __entry->afu, + __print_symbolic_u64(__entry->cmd, PSL_COMMANDS) + ) +); + +DEFINE_EVENT_PRINT(cxl_afu_psl_ctrl_done, cxl_psl_ctrl_done, + TP_PROTO(struct cxl_afu *afu, u64 cmd, int rc), + TP_ARGS(afu, cmd, rc), + + TP_printk("psl%i.%i cmd=%s rc=%i", + __entry->card, + __entry->afu, + __print_symbolic_u64(__entry->cmd, PSL_COMMANDS), + __entry->rc + ) +); + +DEFINE_EVENT(cxl_pe_class, cxl_slbia, + TP_PROTO(struct cxl_context *ctx), + TP_ARGS(ctx) +); + +TRACE_EVENT(cxl_hcall, + TP_PROTO(u64 unit_address, u64 process_token, long rc), + + TP_ARGS(unit_address, process_token, rc), + + TP_STRUCT__entry( + __field(u64, unit_address) + __field(u64, process_token) + __field(long, rc) + ), + + TP_fast_assign( + __entry->unit_address = unit_address; + __entry->process_token = process_token; + __entry->rc = rc; + ), + + TP_printk("unit_address=0x%016llx process_token=0x%016llx rc=%li", + __entry->unit_address, + __entry->process_token, + __entry->rc + ) +); + +TRACE_EVENT(cxl_hcall_control, + TP_PROTO(u64 unit_address, char *fct, u64 p1, u64 p2, u64 p3, + u64 p4, unsigned long r4, long rc), + + TP_ARGS(unit_address, fct, p1, p2, p3, p4, r4, rc), + + TP_STRUCT__entry( + __field(u64, unit_address) + __field(char *, fct) + __field(u64, p1) + __field(u64, p2) + __field(u64, p3) + __field(u64, p4) + __field(unsigned long, r4) + __field(long, rc) + ), + + TP_fast_assign( + __entry->unit_address = unit_address; + __entry->fct = fct; + __entry->p1 = p1; + __entry->p2 = p2; + __entry->p3 = p3; + __entry->p4 = p4; + __entry->r4 = r4; + __entry->rc = rc; + ), + + TP_printk("unit_address=%#.16llx %s(%#llx, %#llx, %#llx, %#llx, R4: %#lx)): %li", + __entry->unit_address, + __entry->fct, + __entry->p1, + __entry->p2, + __entry->p3, + __entry->p4, + __entry->r4, + __entry->rc + ) +); + +TRACE_EVENT(cxl_hcall_attach, + TP_PROTO(u64 unit_address, u64 phys_addr, unsigned long process_token, + unsigned long mmio_addr, unsigned long mmio_size, long rc), + + TP_ARGS(unit_address, phys_addr, process_token, + mmio_addr, mmio_size, rc), + + TP_STRUCT__entry( + __field(u64, unit_address) + __field(u64, phys_addr) + __field(unsigned long, process_token) + __field(unsigned long, mmio_addr) + __field(unsigned long, mmio_size) + __field(long, rc) + ), + + TP_fast_assign( + __entry->unit_address = unit_address; + __entry->phys_addr = phys_addr; + __entry->process_token = process_token; + __entry->mmio_addr = mmio_addr; + __entry->mmio_size = mmio_size; + __entry->rc = rc; + ), + + TP_printk("unit_address=0x%016llx phys_addr=0x%016llx " + "token=0x%.8lx mmio_addr=0x%lx mmio_size=0x%lx rc=%li", + __entry->unit_address, + __entry->phys_addr, + __entry->process_token, + __entry->mmio_addr, + __entry->mmio_size, + __entry->rc + ) +); + +DEFINE_EVENT(cxl_hcall, cxl_hcall_detach, + TP_PROTO(u64 unit_address, u64 process_token, long rc), + TP_ARGS(unit_address, process_token, rc) +); + +DEFINE_EVENT(cxl_hcall_control, cxl_hcall_control_function, + TP_PROTO(u64 unit_address, char *fct, u64 p1, u64 p2, u64 p3, + u64 p4, unsigned long r4, long rc), + TP_ARGS(unit_address, fct, p1, p2, p3, p4, r4, rc) +); + +DEFINE_EVENT(cxl_hcall, cxl_hcall_collect_int_info, + TP_PROTO(u64 unit_address, u64 process_token, long rc), + TP_ARGS(unit_address, process_token, rc) +); + +TRACE_EVENT(cxl_hcall_control_faults, + TP_PROTO(u64 unit_address, u64 process_token, + u64 control_mask, u64 reset_mask, unsigned long r4, + long rc), + + TP_ARGS(unit_address, process_token, + control_mask, reset_mask, r4, rc), + + TP_STRUCT__entry( + __field(u64, unit_address) + __field(u64, process_token) + __field(u64, control_mask) + __field(u64, reset_mask) + __field(unsigned long, r4) + __field(long, rc) + ), + + TP_fast_assign( + __entry->unit_address = unit_address; + __entry->process_token = process_token; + __entry->control_mask = control_mask; + __entry->reset_mask = reset_mask; + __entry->r4 = r4; + __entry->rc = rc; + ), + + TP_printk("unit_address=0x%016llx process_token=0x%llx " + "control_mask=%#llx reset_mask=%#llx r4=%#lx rc=%li", + __entry->unit_address, + __entry->process_token, + __entry->control_mask, + __entry->reset_mask, + __entry->r4, + __entry->rc + ) +); + +DEFINE_EVENT(cxl_hcall_control, cxl_hcall_control_facility, + TP_PROTO(u64 unit_address, char *fct, u64 p1, u64 p2, u64 p3, + u64 p4, unsigned long r4, long rc), + TP_ARGS(unit_address, fct, p1, p2, p3, p4, r4, rc) +); + +TRACE_EVENT(cxl_hcall_download_facility, + TP_PROTO(u64 unit_address, char *fct, u64 list_address, u64 num, + unsigned long r4, long rc), + + TP_ARGS(unit_address, fct, list_address, num, r4, rc), + + TP_STRUCT__entry( + __field(u64, unit_address) + __field(char *, fct) + __field(u64, list_address) + __field(u64, num) + __field(unsigned long, r4) + __field(long, rc) + ), + + TP_fast_assign( + __entry->unit_address = unit_address; + __entry->fct = fct; + __entry->list_address = list_address; + __entry->num = num; + __entry->r4 = r4; + __entry->rc = rc; + ), + + TP_printk("%#.16llx, %s(%#llx, %#llx), %#lx): %li", + __entry->unit_address, + __entry->fct, + __entry->list_address, + __entry->num, + __entry->r4, + __entry->rc + ) +); + +#endif /* _CXL_TRACE_H */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE trace +#include <trace/define_trace.h> diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c new file mode 100644 index 000000000..1264253cc --- /dev/null +++ b/drivers/misc/cxl/vphb.c @@ -0,0 +1,316 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright 2014 IBM Corp. + */ + +#include <linux/pci.h> +#include <misc/cxl.h> +#include "cxl.h" + +static int cxl_pci_probe_mode(struct pci_bus *bus) +{ + return PCI_PROBE_NORMAL; +} + +static int cxl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) +{ + return -ENODEV; +} + +static void cxl_teardown_msi_irqs(struct pci_dev *pdev) +{ + /* + * MSI should never be set but need still need to provide this call + * back. + */ +} + +static bool cxl_pci_enable_device_hook(struct pci_dev *dev) +{ + struct pci_controller *phb; + struct cxl_afu *afu; + struct cxl_context *ctx; + + phb = pci_bus_to_host(dev->bus); + afu = (struct cxl_afu *)phb->private_data; + + if (!cxl_ops->link_ok(afu->adapter, afu)) { + dev_warn(&dev->dev, "%s: Device link is down, refusing to enable AFU\n", __func__); + return false; + } + + dev->dev.archdata.dma_offset = PAGE_OFFSET; + + /* + * Allocate a context to do cxl things too. If we eventually do real + * DMA ops, we'll need a default context to attach them to + */ + ctx = cxl_dev_context_init(dev); + if (IS_ERR(ctx)) + return false; + dev->dev.archdata.cxl_ctx = ctx; + + return (cxl_ops->afu_check_and_enable(afu) == 0); +} + +static void cxl_pci_disable_device(struct pci_dev *dev) +{ + struct cxl_context *ctx = cxl_get_context(dev); + + if (ctx) { + if (ctx->status == STARTED) { + dev_err(&dev->dev, "Default context started\n"); + return; + } + dev->dev.archdata.cxl_ctx = NULL; + cxl_release_context(ctx); + } +} + +static resource_size_t cxl_pci_window_alignment(struct pci_bus *bus, + unsigned long type) +{ + return 1; +} + +static void cxl_pci_reset_secondary_bus(struct pci_dev *dev) +{ + /* Should we do an AFU reset here ? */ +} + +static int cxl_pcie_cfg_record(u8 bus, u8 devfn) +{ + return (bus << 8) + devfn; +} + +static inline struct cxl_afu *pci_bus_to_afu(struct pci_bus *bus) +{ + struct pci_controller *phb = bus ? pci_bus_to_host(bus) : NULL; + + return phb ? phb->private_data : NULL; +} + +static void cxl_afu_configured_put(struct cxl_afu *afu) +{ + atomic_dec_if_positive(&afu->configured_state); +} + +static bool cxl_afu_configured_get(struct cxl_afu *afu) +{ + return atomic_inc_unless_negative(&afu->configured_state); +} + +static inline int cxl_pcie_config_info(struct pci_bus *bus, unsigned int devfn, + struct cxl_afu *afu, int *_record) +{ + int record; + + record = cxl_pcie_cfg_record(bus->number, devfn); + if (record > afu->crs_num) + return PCIBIOS_DEVICE_NOT_FOUND; + + *_record = record; + return 0; +} + +static int cxl_pcie_read_config(struct pci_bus *bus, unsigned int devfn, + int offset, int len, u32 *val) +{ + int rc, record; + struct cxl_afu *afu; + u8 val8; + u16 val16; + u32 val32; + + afu = pci_bus_to_afu(bus); + /* Grab a reader lock on afu. */ + if (afu == NULL || !cxl_afu_configured_get(afu)) + return PCIBIOS_DEVICE_NOT_FOUND; + + rc = cxl_pcie_config_info(bus, devfn, afu, &record); + if (rc) + goto out; + + switch (len) { + case 1: + rc = cxl_ops->afu_cr_read8(afu, record, offset, &val8); + *val = val8; + break; + case 2: + rc = cxl_ops->afu_cr_read16(afu, record, offset, &val16); + *val = val16; + break; + case 4: + rc = cxl_ops->afu_cr_read32(afu, record, offset, &val32); + *val = val32; + break; + default: + WARN_ON(1); + } + +out: + cxl_afu_configured_put(afu); + return rc ? PCIBIOS_DEVICE_NOT_FOUND : 0; +} + +static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn, + int offset, int len, u32 val) +{ + int rc, record; + struct cxl_afu *afu; + + afu = pci_bus_to_afu(bus); + /* Grab a reader lock on afu. */ + if (afu == NULL || !cxl_afu_configured_get(afu)) + return PCIBIOS_DEVICE_NOT_FOUND; + + rc = cxl_pcie_config_info(bus, devfn, afu, &record); + if (rc) + goto out; + + switch (len) { + case 1: + rc = cxl_ops->afu_cr_write8(afu, record, offset, val & 0xff); + break; + case 2: + rc = cxl_ops->afu_cr_write16(afu, record, offset, val & 0xffff); + break; + case 4: + rc = cxl_ops->afu_cr_write32(afu, record, offset, val); + break; + default: + WARN_ON(1); + } + +out: + cxl_afu_configured_put(afu); + return rc ? PCIBIOS_SET_FAILED : 0; +} + +static struct pci_ops cxl_pcie_pci_ops = +{ + .read = cxl_pcie_read_config, + .write = cxl_pcie_write_config, +}; + + +static struct pci_controller_ops cxl_pci_controller_ops = +{ + .probe_mode = cxl_pci_probe_mode, + .enable_device_hook = cxl_pci_enable_device_hook, + .disable_device = cxl_pci_disable_device, + .release_device = cxl_pci_disable_device, + .window_alignment = cxl_pci_window_alignment, + .reset_secondary_bus = cxl_pci_reset_secondary_bus, + .setup_msi_irqs = cxl_setup_msi_irqs, + .teardown_msi_irqs = cxl_teardown_msi_irqs, +}; + +int cxl_pci_vphb_add(struct cxl_afu *afu) +{ + struct pci_controller *phb; + struct device_node *vphb_dn; + struct device *parent; + + /* + * If there are no AFU configuration records we won't have anything to + * expose under the vPHB, so skip creating one, returning success since + * this is still a valid case. This will also opt us out of EEH + * handling since we won't have anything special to do if there are no + * kernel drivers attached to the vPHB, and EEH handling is not yet + * supported in the peer model. + */ + if (!afu->crs_num) + return 0; + + /* The parent device is the adapter. Reuse the device node of + * the adapter. + * We don't seem to care what device node is used for the vPHB, + * but tools such as lsvpd walk up the device parents looking + * for a valid location code, so we might as well show devices + * attached to the adapter as being located on that adapter. + */ + parent = afu->adapter->dev.parent; + vphb_dn = parent->of_node; + + /* Alloc and setup PHB data structure */ + phb = pcibios_alloc_controller(vphb_dn); + if (!phb) + return -ENODEV; + + /* Setup parent in sysfs */ + phb->parent = parent; + + /* Setup the PHB using arch provided callback */ + phb->ops = &cxl_pcie_pci_ops; + phb->cfg_addr = NULL; + phb->cfg_data = NULL; + phb->private_data = afu; + phb->controller_ops = cxl_pci_controller_ops; + + /* Scan the bus */ + pcibios_scan_phb(phb); + if (phb->bus == NULL) + return -ENXIO; + + /* Set release hook on root bus */ + pci_set_host_bridge_release(to_pci_host_bridge(phb->bus->bridge), + pcibios_free_controller_deferred, + (void *) phb); + + /* Claim resources. This might need some rework as well depending + * whether we are doing probe-only or not, like assigning unassigned + * resources etc... + */ + pcibios_claim_one_bus(phb->bus); + + /* Add probed PCI devices to the device model */ + pci_bus_add_devices(phb->bus); + + afu->phb = phb; + + return 0; +} + +void cxl_pci_vphb_remove(struct cxl_afu *afu) +{ + struct pci_controller *phb; + + /* If there is no configuration record we won't have one of these */ + if (!afu || !afu->phb) + return; + + phb = afu->phb; + afu->phb = NULL; + + pci_remove_root_bus(phb->bus); + /* + * We don't free phb here - that's handled by + * pcibios_free_controller_deferred() + */ +} + +bool cxl_pci_is_vphb_device(struct pci_dev *dev) +{ + struct pci_controller *phb; + + phb = pci_bus_to_host(dev->bus); + + return (phb->ops == &cxl_pcie_pci_ops); +} + +struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev) +{ + struct pci_controller *phb; + + phb = pci_bus_to_host(dev->bus); + + return (struct cxl_afu *)phb->private_data; +} +EXPORT_SYMBOL_GPL(cxl_pci_to_afu); + +unsigned int cxl_pci_to_cfg_record(struct pci_dev *dev) +{ + return cxl_pcie_cfg_record(dev->bus->number, dev->devfn); +} +EXPORT_SYMBOL_GPL(cxl_pci_to_cfg_record); diff --git a/drivers/misc/ds1682.c b/drivers/misc/ds1682.c new file mode 100644 index 000000000..42f316c2d --- /dev/null +++ b/drivers/misc/ds1682.c @@ -0,0 +1,264 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Dallas Semiconductor DS1682 Elapsed Time Recorder device driver + * + * Written by: Grant Likely <grant.likely@secretlab.ca> + * + * Copyright (C) 2007 Secret Lab Technologies Ltd. + */ + +/* + * The DS1682 elapsed timer recorder is a simple device that implements + * one elapsed time counter, one event counter, an alarm signal and 10 + * bytes of general purpose EEPROM. + * + * This driver provides access to the DS1682 counters and user data via + * the sysfs. The following attributes are added to the device node: + * elapsed_time (u32): Total elapsed event time in ms resolution + * alarm_time (u32): When elapsed time exceeds the value in alarm_time, + * then the alarm pin is asserted. + * event_count (u16): number of times the event pin has gone low. + * eeprom (u8[10]): general purpose EEPROM + * + * Counter registers and user data are both read/write unless the device + * has been write protected. This driver does not support turning off write + * protection. Once write protection is turned on, it is impossible to + * turn it off again, so I have left the feature out of this driver to avoid + * accidental enabling, but it is trivial to add write protect support. + * + */ + +#include <linux/module.h> +#include <linux/i2c.h> +#include <linux/string.h> +#include <linux/list.h> +#include <linux/sysfs.h> +#include <linux/ctype.h> +#include <linux/hwmon-sysfs.h> + +/* Device registers */ +#define DS1682_REG_CONFIG 0x00 +#define DS1682_REG_ALARM 0x01 +#define DS1682_REG_ELAPSED 0x05 +#define DS1682_REG_EVT_CNTR 0x09 +#define DS1682_REG_EEPROM 0x0b +#define DS1682_REG_RESET 0x1d +#define DS1682_REG_WRITE_DISABLE 0x1e +#define DS1682_REG_WRITE_MEM_DISABLE 0x1f + +#define DS1682_EEPROM_SIZE 10 + +/* + * Generic counter attributes + */ +static ssize_t ds1682_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); + struct i2c_client *client = to_i2c_client(dev); + unsigned long long val, check; + __le32 val_le = 0; + int rc; + + dev_dbg(dev, "ds1682_show() called on %s\n", attr->attr.name); + + /* Read the register */ + rc = i2c_smbus_read_i2c_block_data(client, sattr->index, sattr->nr, + (u8 *)&val_le); + if (rc < 0) + return -EIO; + + val = le32_to_cpu(val_le); + + if (sattr->index == DS1682_REG_ELAPSED) { + int retries = 5; + + /* Detect and retry when a tick occurs mid-read */ + do { + rc = i2c_smbus_read_i2c_block_data(client, sattr->index, + sattr->nr, + (u8 *)&val_le); + if (rc < 0 || retries <= 0) + return -EIO; + + check = val; + val = le32_to_cpu(val_le); + retries--; + } while (val != check && val != (check + 1)); + } + + /* Format the output string and return # of bytes + * Special case: the 32 bit regs are time values with 1/4s + * resolution, scale them up to milliseconds + */ + return sprintf(buf, "%llu\n", (sattr->nr == 4) ? (val * 250) : val); +} + +static ssize_t ds1682_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); + struct i2c_client *client = to_i2c_client(dev); + u64 val; + __le32 val_le; + int rc; + + dev_dbg(dev, "ds1682_store() called on %s\n", attr->attr.name); + + /* Decode input */ + rc = kstrtoull(buf, 0, &val); + if (rc < 0) { + dev_dbg(dev, "input string not a number\n"); + return -EINVAL; + } + + /* Special case: the 32 bit regs are time values with 1/4s + * resolution, scale input down to quarter-seconds */ + if (sattr->nr == 4) + do_div(val, 250); + + /* write out the value */ + val_le = cpu_to_le32(val); + rc = i2c_smbus_write_i2c_block_data(client, sattr->index, sattr->nr, + (u8 *) & val_le); + if (rc < 0) { + dev_err(dev, "register write failed; reg=0x%x, size=%i\n", + sattr->index, sattr->nr); + return -EIO; + } + + return count; +} + +/* + * Simple register attributes + */ +static SENSOR_DEVICE_ATTR_2(elapsed_time, S_IRUGO | S_IWUSR, ds1682_show, + ds1682_store, 4, DS1682_REG_ELAPSED); +static SENSOR_DEVICE_ATTR_2(alarm_time, S_IRUGO | S_IWUSR, ds1682_show, + ds1682_store, 4, DS1682_REG_ALARM); +static SENSOR_DEVICE_ATTR_2(event_count, S_IRUGO | S_IWUSR, ds1682_show, + ds1682_store, 2, DS1682_REG_EVT_CNTR); + +static const struct attribute_group ds1682_group = { + .attrs = (struct attribute *[]) { + &sensor_dev_attr_elapsed_time.dev_attr.attr, + &sensor_dev_attr_alarm_time.dev_attr.attr, + &sensor_dev_attr_event_count.dev_attr.attr, + NULL, + }, +}; + +/* + * User data attribute + */ +static ssize_t ds1682_eeprom_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct i2c_client *client = kobj_to_i2c_client(kobj); + int rc; + + dev_dbg(&client->dev, "ds1682_eeprom_read(p=%p, off=%lli, c=%zi)\n", + buf, off, count); + + rc = i2c_smbus_read_i2c_block_data(client, DS1682_REG_EEPROM + off, + count, buf); + if (rc < 0) + return -EIO; + + return count; +} + +static ssize_t ds1682_eeprom_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct i2c_client *client = kobj_to_i2c_client(kobj); + + dev_dbg(&client->dev, "ds1682_eeprom_write(p=%p, off=%lli, c=%zi)\n", + buf, off, count); + + /* Write out to the device */ + if (i2c_smbus_write_i2c_block_data(client, DS1682_REG_EEPROM + off, + count, buf) < 0) + return -EIO; + + return count; +} + +static const struct bin_attribute ds1682_eeprom_attr = { + .attr = { + .name = "eeprom", + .mode = S_IRUGO | S_IWUSR, + }, + .size = DS1682_EEPROM_SIZE, + .read = ds1682_eeprom_read, + .write = ds1682_eeprom_write, +}; + +/* + * Called when a ds1682 device is matched with this driver + */ +static int ds1682_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int rc; + + if (!i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_I2C_BLOCK)) { + dev_err(&client->dev, "i2c bus does not support the ds1682\n"); + rc = -ENODEV; + goto exit; + } + + rc = sysfs_create_group(&client->dev.kobj, &ds1682_group); + if (rc) + goto exit; + + rc = sysfs_create_bin_file(&client->dev.kobj, &ds1682_eeprom_attr); + if (rc) + goto exit_bin_attr; + + return 0; + + exit_bin_attr: + sysfs_remove_group(&client->dev.kobj, &ds1682_group); + exit: + return rc; +} + +static int ds1682_remove(struct i2c_client *client) +{ + sysfs_remove_bin_file(&client->dev.kobj, &ds1682_eeprom_attr); + sysfs_remove_group(&client->dev.kobj, &ds1682_group); + return 0; +} + +static const struct i2c_device_id ds1682_id[] = { + { "ds1682", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, ds1682_id); + +static const struct of_device_id ds1682_of_match[] = { + { .compatible = "dallas,ds1682", }, + {} +}; +MODULE_DEVICE_TABLE(of, ds1682_of_match); + +static struct i2c_driver ds1682_driver = { + .driver = { + .name = "ds1682", + .of_match_table = ds1682_of_match, + }, + .probe = ds1682_probe, + .remove = ds1682_remove, + .id_table = ds1682_id, +}; + +module_i2c_driver(ds1682_driver); + +MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>"); +MODULE_DESCRIPTION("DS1682 Elapsed Time Indicator driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/dummy-irq.c b/drivers/misc/dummy-irq.c new file mode 100644 index 000000000..fe3bfcb31 --- /dev/null +++ b/drivers/misc/dummy-irq.c @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Dummy IRQ handler driver. + * + * This module only registers itself as a handler that is specified to it + * by the 'irq' parameter. + * + * The sole purpose of this module is to help with debugging of systems on + * which spurious IRQs would happen on disabled IRQ vector. + * + * Copyright (C) 2013 Jiri Kosina + */ + +#include <linux/module.h> +#include <linux/irq.h> +#include <linux/interrupt.h> + +static int irq = -1; + +static irqreturn_t dummy_interrupt(int irq, void *dev_id) +{ + static int count = 0; + + if (count == 0) { + printk(KERN_INFO "dummy-irq: interrupt occurred on IRQ %d\n", + irq); + count++; + } + + return IRQ_NONE; +} + +static int __init dummy_irq_init(void) +{ + if (irq < 0) { + printk(KERN_ERR "dummy-irq: no IRQ given. Use irq=N\n"); + return -EIO; + } + if (request_irq(irq, &dummy_interrupt, IRQF_SHARED, "dummy_irq", &irq)) { + printk(KERN_ERR "dummy-irq: cannot register IRQ %d\n", irq); + return -EIO; + } + printk(KERN_INFO "dummy-irq: registered for IRQ %d\n", irq); + return 0; +} + +static void __exit dummy_irq_exit(void) +{ + printk(KERN_INFO "dummy-irq unloaded\n"); + free_irq(irq, &irq); +} + +module_init(dummy_irq_init); +module_exit(dummy_irq_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Jiri Kosina"); +module_param_hw(irq, uint, irq, 0444); +MODULE_PARM_DESC(irq, "The IRQ to register for"); +MODULE_DESCRIPTION("Dummy IRQ handler driver"); diff --git a/drivers/misc/echo/Kconfig b/drivers/misc/echo/Kconfig new file mode 100644 index 000000000..ce0a37a47 --- /dev/null +++ b/drivers/misc/echo/Kconfig @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0-only +config ECHO + tristate "Line Echo Canceller support" + help + This driver provides line echo cancelling support for mISDN and + Zaptel drivers. + + To compile this driver as a module, choose M here. The module + will be called echo. diff --git a/drivers/misc/echo/Makefile b/drivers/misc/echo/Makefile new file mode 100644 index 000000000..5b97467ff --- /dev/null +++ b/drivers/misc/echo/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_ECHO) += echo.o diff --git a/drivers/misc/echo/echo.c b/drivers/misc/echo/echo.c new file mode 100644 index 000000000..3c4eaba86 --- /dev/null +++ b/drivers/misc/echo/echo.c @@ -0,0 +1,589 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * SpanDSP - a series of DSP components for telephony + * + * echo.c - A line echo canceller. This code is being developed + * against and partially complies with G168. + * + * Written by Steve Underwood <steveu@coppice.org> + * and David Rowe <david_at_rowetel_dot_com> + * + * Copyright (C) 2001, 2003 Steve Underwood, 2007 David Rowe + * + * Based on a bit from here, a bit from there, eye of toad, ear of + * bat, 15 years of failed attempts by David and a few fried brain + * cells. + * + * All rights reserved. + */ + +/*! \file */ + +/* Implementation Notes + David Rowe + April 2007 + + This code started life as Steve's NLMS algorithm with a tap + rotation algorithm to handle divergence during double talk. I + added a Geigel Double Talk Detector (DTD) [2] and performed some + G168 tests. However I had trouble meeting the G168 requirements, + especially for double talk - there were always cases where my DTD + failed, for example where near end speech was under the 6dB + threshold required for declaring double talk. + + So I tried a two path algorithm [1], which has so far given better + results. The original tap rotation/Geigel algorithm is available + in SVN http://svn.rowetel.com/software/oslec/tags/before_16bit. + It's probably possible to make it work if some one wants to put some + serious work into it. + + At present no special treatment is provided for tones, which + generally cause NLMS algorithms to diverge. Initial runs of a + subset of the G168 tests for tones (e.g ./echo_test 6) show the + current algorithm is passing OK, which is kind of surprising. The + full set of tests needs to be performed to confirm this result. + + One other interesting change is that I have managed to get the NLMS + code to work with 16 bit coefficients, rather than the original 32 + bit coefficents. This reduces the MIPs and storage required. + I evaulated the 16 bit port using g168_tests.sh and listening tests + on 4 real-world samples. + + I also attempted the implementation of a block based NLMS update + [2] but although this passes g168_tests.sh it didn't converge well + on the real-world samples. I have no idea why, perhaps a scaling + problem. The block based code is also available in SVN + http://svn.rowetel.com/software/oslec/tags/before_16bit. If this + code can be debugged, it will lead to further reduction in MIPS, as + the block update code maps nicely onto DSP instruction sets (it's a + dot product) compared to the current sample-by-sample update. + + Steve also has some nice notes on echo cancellers in echo.h + + References: + + [1] Ochiai, Areseki, and Ogihara, "Echo Canceller with Two Echo + Path Models", IEEE Transactions on communications, COM-25, + No. 6, June + 1977. + https://www.rowetel.com/images/echo/dual_path_paper.pdf + + [2] The classic, very useful paper that tells you how to + actually build a real world echo canceller: + Messerschmitt, Hedberg, Cole, Haoui, Winship, "Digital Voice + Echo Canceller with a TMS320020, + https://www.rowetel.com/images/echo/spra129.pdf + + [3] I have written a series of blog posts on this work, here is + Part 1: http://www.rowetel.com/blog/?p=18 + + [4] The source code http://svn.rowetel.com/software/oslec/ + + [5] A nice reference on LMS filters: + https://en.wikipedia.org/wiki/Least_mean_squares_filter + + Credits: + + Thanks to Steve Underwood, Jean-Marc Valin, and Ramakrishnan + Muthukrishnan for their suggestions and email discussions. Thanks + also to those people who collected echo samples for me such as + Mark, Pawel, and Pavel. +*/ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> + +#include "echo.h" + +#define MIN_TX_POWER_FOR_ADAPTION 64 +#define MIN_RX_POWER_FOR_ADAPTION 64 +#define DTD_HANGOVER 600 /* 600 samples, or 75ms */ +#define DC_LOG2BETA 3 /* log2() of DC filter Beta */ + +/* adapting coeffs using the traditional stochastic descent (N)LMS algorithm */ + +static inline void lms_adapt_bg(struct oslec_state *ec, int clean, int shift) +{ + int i; + + int offset1; + int offset2; + int factor; + int exp; + + if (shift > 0) + factor = clean << shift; + else + factor = clean >> -shift; + + /* Update the FIR taps */ + + offset2 = ec->curr_pos; + offset1 = ec->taps - offset2; + + for (i = ec->taps - 1; i >= offset1; i--) { + exp = (ec->fir_state_bg.history[i - offset1] * factor); + ec->fir_taps16[1][i] += (int16_t) ((exp + (1 << 14)) >> 15); + } + for (; i >= 0; i--) { + exp = (ec->fir_state_bg.history[i + offset2] * factor); + ec->fir_taps16[1][i] += (int16_t) ((exp + (1 << 14)) >> 15); + } +} + +static inline int top_bit(unsigned int bits) +{ + if (bits == 0) + return -1; + else + return (int)fls((int32_t) bits) - 1; +} + +struct oslec_state *oslec_create(int len, int adaption_mode) +{ + struct oslec_state *ec; + int i; + const int16_t *history; + + ec = kzalloc(sizeof(*ec), GFP_KERNEL); + if (!ec) + return NULL; + + ec->taps = len; + ec->log2taps = top_bit(len); + ec->curr_pos = ec->taps - 1; + + ec->fir_taps16[0] = + kcalloc(ec->taps, sizeof(int16_t), GFP_KERNEL); + if (!ec->fir_taps16[0]) + goto error_oom_0; + + ec->fir_taps16[1] = + kcalloc(ec->taps, sizeof(int16_t), GFP_KERNEL); + if (!ec->fir_taps16[1]) + goto error_oom_1; + + history = fir16_create(&ec->fir_state, ec->fir_taps16[0], ec->taps); + if (!history) + goto error_state; + history = fir16_create(&ec->fir_state_bg, ec->fir_taps16[1], ec->taps); + if (!history) + goto error_state_bg; + + for (i = 0; i < 5; i++) + ec->xvtx[i] = ec->yvtx[i] = ec->xvrx[i] = ec->yvrx[i] = 0; + + ec->cng_level = 1000; + oslec_adaption_mode(ec, adaption_mode); + + ec->snapshot = kcalloc(ec->taps, sizeof(int16_t), GFP_KERNEL); + if (!ec->snapshot) + goto error_snap; + + ec->cond_met = 0; + ec->pstates = 0; + ec->ltxacc = ec->lrxacc = ec->lcleanacc = ec->lclean_bgacc = 0; + ec->ltx = ec->lrx = ec->lclean = ec->lclean_bg = 0; + ec->tx_1 = ec->tx_2 = ec->rx_1 = ec->rx_2 = 0; + ec->lbgn = ec->lbgn_acc = 0; + ec->lbgn_upper = 200; + ec->lbgn_upper_acc = ec->lbgn_upper << 13; + + return ec; + +error_snap: + fir16_free(&ec->fir_state_bg); +error_state_bg: + fir16_free(&ec->fir_state); +error_state: + kfree(ec->fir_taps16[1]); +error_oom_1: + kfree(ec->fir_taps16[0]); +error_oom_0: + kfree(ec); + return NULL; +} +EXPORT_SYMBOL_GPL(oslec_create); + +void oslec_free(struct oslec_state *ec) +{ + int i; + + fir16_free(&ec->fir_state); + fir16_free(&ec->fir_state_bg); + for (i = 0; i < 2; i++) + kfree(ec->fir_taps16[i]); + kfree(ec->snapshot); + kfree(ec); +} +EXPORT_SYMBOL_GPL(oslec_free); + +void oslec_adaption_mode(struct oslec_state *ec, int adaption_mode) +{ + ec->adaption_mode = adaption_mode; +} +EXPORT_SYMBOL_GPL(oslec_adaption_mode); + +void oslec_flush(struct oslec_state *ec) +{ + int i; + + ec->ltxacc = ec->lrxacc = ec->lcleanacc = ec->lclean_bgacc = 0; + ec->ltx = ec->lrx = ec->lclean = ec->lclean_bg = 0; + ec->tx_1 = ec->tx_2 = ec->rx_1 = ec->rx_2 = 0; + + ec->lbgn = ec->lbgn_acc = 0; + ec->lbgn_upper = 200; + ec->lbgn_upper_acc = ec->lbgn_upper << 13; + + ec->nonupdate_dwell = 0; + + fir16_flush(&ec->fir_state); + fir16_flush(&ec->fir_state_bg); + ec->fir_state.curr_pos = ec->taps - 1; + ec->fir_state_bg.curr_pos = ec->taps - 1; + for (i = 0; i < 2; i++) + memset(ec->fir_taps16[i], 0, ec->taps * sizeof(int16_t)); + + ec->curr_pos = ec->taps - 1; + ec->pstates = 0; +} +EXPORT_SYMBOL_GPL(oslec_flush); + +void oslec_snapshot(struct oslec_state *ec) +{ + memcpy(ec->snapshot, ec->fir_taps16[0], ec->taps * sizeof(int16_t)); +} +EXPORT_SYMBOL_GPL(oslec_snapshot); + +/* Dual Path Echo Canceller */ + +int16_t oslec_update(struct oslec_state *ec, int16_t tx, int16_t rx) +{ + int32_t echo_value; + int clean_bg; + int tmp; + int tmp1; + + /* + * Input scaling was found be required to prevent problems when tx + * starts clipping. Another possible way to handle this would be the + * filter coefficent scaling. + */ + + ec->tx = tx; + ec->rx = rx; + tx >>= 1; + rx >>= 1; + + /* + * Filter DC, 3dB point is 160Hz (I think), note 32 bit precision + * required otherwise values do not track down to 0. Zero at DC, Pole + * at (1-Beta) on real axis. Some chip sets (like Si labs) don't + * need this, but something like a $10 X100P card does. Any DC really + * slows down convergence. + * + * Note: removes some low frequency from the signal, this reduces the + * speech quality when listening to samples through headphones but may + * not be obvious through a telephone handset. + * + * Note that the 3dB frequency in radians is approx Beta, e.g. for Beta + * = 2^(-3) = 0.125, 3dB freq is 0.125 rads = 159Hz. + */ + + if (ec->adaption_mode & ECHO_CAN_USE_RX_HPF) { + tmp = rx << 15; + + /* + * Make sure the gain of the HPF is 1.0. This can still + * saturate a little under impulse conditions, and it might + * roll to 32768 and need clipping on sustained peak level + * signals. However, the scale of such clipping is small, and + * the error due to any saturation should not markedly affect + * the downstream processing. + */ + tmp -= (tmp >> 4); + + ec->rx_1 += -(ec->rx_1 >> DC_LOG2BETA) + tmp - ec->rx_2; + + /* + * hard limit filter to prevent clipping. Note that at this + * stage rx should be limited to +/- 16383 due to right shift + * above + */ + tmp1 = ec->rx_1 >> 15; + if (tmp1 > 16383) + tmp1 = 16383; + if (tmp1 < -16383) + tmp1 = -16383; + rx = tmp1; + ec->rx_2 = tmp; + } + + /* Block average of power in the filter states. Used for + adaption power calculation. */ + + { + int new, old; + + /* efficient "out with the old and in with the new" algorithm so + we don't have to recalculate over the whole block of + samples. */ + new = (int)tx * (int)tx; + old = (int)ec->fir_state.history[ec->fir_state.curr_pos] * + (int)ec->fir_state.history[ec->fir_state.curr_pos]; + ec->pstates += + ((new - old) + (1 << (ec->log2taps - 1))) >> ec->log2taps; + if (ec->pstates < 0) + ec->pstates = 0; + } + + /* Calculate short term average levels using simple single pole IIRs */ + + ec->ltxacc += abs(tx) - ec->ltx; + ec->ltx = (ec->ltxacc + (1 << 4)) >> 5; + ec->lrxacc += abs(rx) - ec->lrx; + ec->lrx = (ec->lrxacc + (1 << 4)) >> 5; + + /* Foreground filter */ + + ec->fir_state.coeffs = ec->fir_taps16[0]; + echo_value = fir16(&ec->fir_state, tx); + ec->clean = rx - echo_value; + ec->lcleanacc += abs(ec->clean) - ec->lclean; + ec->lclean = (ec->lcleanacc + (1 << 4)) >> 5; + + /* Background filter */ + + echo_value = fir16(&ec->fir_state_bg, tx); + clean_bg = rx - echo_value; + ec->lclean_bgacc += abs(clean_bg) - ec->lclean_bg; + ec->lclean_bg = (ec->lclean_bgacc + (1 << 4)) >> 5; + + /* Background Filter adaption */ + + /* Almost always adap bg filter, just simple DT and energy + detection to minimise adaption in cases of strong double talk. + However this is not critical for the dual path algorithm. + */ + ec->factor = 0; + ec->shift = 0; + if (!ec->nonupdate_dwell) { + int p, logp, shift; + + /* Determine: + + f = Beta * clean_bg_rx/P ------ (1) + + where P is the total power in the filter states. + + The Boffins have shown that if we obey (1) we converge + quickly and avoid instability. + + The correct factor f must be in Q30, as this is the fixed + point format required by the lms_adapt_bg() function, + therefore the scaled version of (1) is: + + (2^30) * f = (2^30) * Beta * clean_bg_rx/P + factor = (2^30) * Beta * clean_bg_rx/P ----- (2) + + We have chosen Beta = 0.25 by experiment, so: + + factor = (2^30) * (2^-2) * clean_bg_rx/P + + (30 - 2 - log2(P)) + factor = clean_bg_rx 2 ----- (3) + + To avoid a divide we approximate log2(P) as top_bit(P), + which returns the position of the highest non-zero bit in + P. This approximation introduces an error as large as a + factor of 2, but the algorithm seems to handle it OK. + + Come to think of it a divide may not be a big deal on a + modern DSP, so its probably worth checking out the cycles + for a divide versus a top_bit() implementation. + */ + + p = MIN_TX_POWER_FOR_ADAPTION + ec->pstates; + logp = top_bit(p) + ec->log2taps; + shift = 30 - 2 - logp; + ec->shift = shift; + + lms_adapt_bg(ec, clean_bg, shift); + } + + /* very simple DTD to make sure we dont try and adapt with strong + near end speech */ + + ec->adapt = 0; + if ((ec->lrx > MIN_RX_POWER_FOR_ADAPTION) && (ec->lrx > ec->ltx)) + ec->nonupdate_dwell = DTD_HANGOVER; + if (ec->nonupdate_dwell) + ec->nonupdate_dwell--; + + /* Transfer logic */ + + /* These conditions are from the dual path paper [1], I messed with + them a bit to improve performance. */ + + if ((ec->adaption_mode & ECHO_CAN_USE_ADAPTION) && + (ec->nonupdate_dwell == 0) && + /* (ec->Lclean_bg < 0.875*ec->Lclean) */ + (8 * ec->lclean_bg < 7 * ec->lclean) && + /* (ec->Lclean_bg < 0.125*ec->Ltx) */ + (8 * ec->lclean_bg < ec->ltx)) { + if (ec->cond_met == 6) { + /* + * BG filter has had better results for 6 consecutive + * samples + */ + ec->adapt = 1; + memcpy(ec->fir_taps16[0], ec->fir_taps16[1], + ec->taps * sizeof(int16_t)); + } else + ec->cond_met++; + } else + ec->cond_met = 0; + + /* Non-Linear Processing */ + + ec->clean_nlp = ec->clean; + if (ec->adaption_mode & ECHO_CAN_USE_NLP) { + /* + * Non-linear processor - a fancy way to say "zap small + * signals, to avoid residual echo due to (uLaw/ALaw) + * non-linearity in the channel.". + */ + + if ((16 * ec->lclean < ec->ltx)) { + /* + * Our e/c has improved echo by at least 24 dB (each + * factor of 2 is 6dB, so 2*2*2*2=16 is the same as + * 6+6+6+6=24dB) + */ + if (ec->adaption_mode & ECHO_CAN_USE_CNG) { + ec->cng_level = ec->lbgn; + + /* + * Very elementary comfort noise generation. + * Just random numbers rolled off very vaguely + * Hoth-like. DR: This noise doesn't sound + * quite right to me - I suspect there are some + * overflow issues in the filtering as it's too + * "crackly". + * TODO: debug this, maybe just play noise at + * high level or look at spectrum. + */ + + ec->cng_rndnum = + 1664525U * ec->cng_rndnum + 1013904223U; + ec->cng_filter = + ((ec->cng_rndnum & 0xFFFF) - 32768 + + 5 * ec->cng_filter) >> 3; + ec->clean_nlp = + (ec->cng_filter * ec->cng_level * 8) >> 14; + + } else if (ec->adaption_mode & ECHO_CAN_USE_CLIP) { + /* This sounds much better than CNG */ + if (ec->clean_nlp > ec->lbgn) + ec->clean_nlp = ec->lbgn; + if (ec->clean_nlp < -ec->lbgn) + ec->clean_nlp = -ec->lbgn; + } else { + /* + * just mute the residual, doesn't sound very + * good, used mainly in G168 tests + */ + ec->clean_nlp = 0; + } + } else { + /* + * Background noise estimator. I tried a few + * algorithms here without much luck. This very simple + * one seems to work best, we just average the level + * using a slow (1 sec time const) filter if the + * current level is less than a (experimentally + * derived) constant. This means we dont include high + * level signals like near end speech. When combined + * with CNG or especially CLIP seems to work OK. + */ + if (ec->lclean < 40) { + ec->lbgn_acc += abs(ec->clean) - ec->lbgn; + ec->lbgn = (ec->lbgn_acc + (1 << 11)) >> 12; + } + } + } + + /* Roll around the taps buffer */ + if (ec->curr_pos <= 0) + ec->curr_pos = ec->taps; + ec->curr_pos--; + + if (ec->adaption_mode & ECHO_CAN_DISABLE) + ec->clean_nlp = rx; + + /* Output scaled back up again to match input scaling */ + + return (int16_t) ec->clean_nlp << 1; +} +EXPORT_SYMBOL_GPL(oslec_update); + +/* This function is separated from the echo canceller is it is usually called + as part of the tx process. See rx HP (DC blocking) filter above, it's + the same design. + + Some soft phones send speech signals with a lot of low frequency + energy, e.g. down to 20Hz. This can make the hybrid non-linear + which causes the echo canceller to fall over. This filter can help + by removing any low frequency before it gets to the tx port of the + hybrid. + + It can also help by removing and DC in the tx signal. DC is bad + for LMS algorithms. + + This is one of the classic DC removal filters, adjusted to provide + sufficient bass rolloff to meet the above requirement to protect hybrids + from things that upset them. The difference between successive samples + produces a lousy HPF, and then a suitably placed pole flattens things out. + The final result is a nicely rolled off bass end. The filtering is + implemented with extended fractional precision, which noise shapes things, + giving very clean DC removal. +*/ + +int16_t oslec_hpf_tx(struct oslec_state *ec, int16_t tx) +{ + int tmp; + int tmp1; + + if (ec->adaption_mode & ECHO_CAN_USE_TX_HPF) { + tmp = tx << 15; + + /* + * Make sure the gain of the HPF is 1.0. The first can still + * saturate a little under impulse conditions, and it might + * roll to 32768 and need clipping on sustained peak level + * signals. However, the scale of such clipping is small, and + * the error due to any saturation should not markedly affect + * the downstream processing. + */ + tmp -= (tmp >> 4); + + ec->tx_1 += -(ec->tx_1 >> DC_LOG2BETA) + tmp - ec->tx_2; + tmp1 = ec->tx_1 >> 15; + if (tmp1 > 32767) + tmp1 = 32767; + if (tmp1 < -32767) + tmp1 = -32767; + tx = tmp1; + ec->tx_2 = tmp; + } + + return tx; +} +EXPORT_SYMBOL_GPL(oslec_hpf_tx); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("David Rowe"); +MODULE_DESCRIPTION("Open Source Line Echo Canceller"); +MODULE_VERSION("0.3.0"); diff --git a/drivers/misc/echo/echo.h b/drivers/misc/echo/echo.h new file mode 100644 index 000000000..56b4b95fd --- /dev/null +++ b/drivers/misc/echo/echo.h @@ -0,0 +1,175 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * SpanDSP - a series of DSP components for telephony + * + * echo.c - A line echo canceller. This code is being developed + * against and partially complies with G168. + * + * Written by Steve Underwood <steveu@coppice.org> + * and David Rowe <david_at_rowetel_dot_com> + * + * Copyright (C) 2001 Steve Underwood and 2007 David Rowe + * + * All rights reserved. + */ + +#ifndef __ECHO_H +#define __ECHO_H + +/* +Line echo cancellation for voice + +What does it do? + +This module aims to provide G.168-2002 compliant echo cancellation, to remove +electrical echoes (e.g. from 2-4 wire hybrids) from voice calls. + +How does it work? + +The heart of the echo cancellor is FIR filter. This is adapted to match the +echo impulse response of the telephone line. It must be long enough to +adequately cover the duration of that impulse response. The signal transmitted +to the telephone line is passed through the FIR filter. Once the FIR is +properly adapted, the resulting output is an estimate of the echo signal +received from the line. This is subtracted from the received signal. The result +is an estimate of the signal which originated at the far end of the line, free +from echos of our own transmitted signal. + +The least mean squares (LMS) algorithm is attributed to Widrow and Hoff, and +was introduced in 1960. It is the commonest form of filter adaption used in +things like modem line equalisers and line echo cancellers. There it works very +well. However, it only works well for signals of constant amplitude. It works +very poorly for things like speech echo cancellation, where the signal level +varies widely. This is quite easy to fix. If the signal level is normalised - +similar to applying AGC - LMS can work as well for a signal of varying +amplitude as it does for a modem signal. This normalised least mean squares +(NLMS) algorithm is the commonest one used for speech echo cancellation. Many +other algorithms exist - e.g. RLS (essentially the same as Kalman filtering), +FAP, etc. Some perform significantly better than NLMS. However, factors such +as computational complexity and patents favour the use of NLMS. + +A simple refinement to NLMS can improve its performance with speech. NLMS tends +to adapt best to the strongest parts of a signal. If the signal is white noise, +the NLMS algorithm works very well. However, speech has more low frequency than +high frequency content. Pre-whitening (i.e. filtering the signal to flatten its +spectrum) the echo signal improves the adapt rate for speech, and ensures the +final residual signal is not heavily biased towards high frequencies. A very +low complexity filter is adequate for this, so pre-whitening adds little to the +compute requirements of the echo canceller. + +An FIR filter adapted using pre-whitened NLMS performs well, provided certain +conditions are met: + + - The transmitted signal has poor self-correlation. + - There is no signal being generated within the environment being + cancelled. + +The difficulty is that neither of these can be guaranteed. + +If the adaption is performed while transmitting noise (or something fairly +noise like, such as voice) the adaption works very well. If the adaption is +performed while transmitting something highly correlative (typically narrow +band energy such as signalling tones or DTMF), the adaption can go seriously +wrong. The reason is there is only one solution for the adaption on a near +random signal - the impulse response of the line. For a repetitive signal, +there are any number of solutions which converge the adaption, and nothing +guides the adaption to choose the generalised one. Allowing an untrained +canceller to converge on this kind of narrowband energy probably a good thing, +since at least it cancels the tones. Allowing a well converged canceller to +continue converging on such energy is just a way to ruin its generalised +adaption. A narrowband detector is needed, so adapation can be suspended at +appropriate times. + +The adaption process is based on trying to eliminate the received signal. When +there is any signal from within the environment being cancelled it may upset +the adaption process. Similarly, if the signal we are transmitting is small, +noise may dominate and disturb the adaption process. If we can ensure that the +adaption is only performed when we are transmitting a significant signal level, +and the environment is not, things will be OK. Clearly, it is easy to tell when +we are sending a significant signal. Telling, if the environment is generating +a significant signal, and doing it with sufficient speed that the adaption will +not have diverged too much more we stop it, is a little harder. + +The key problem in detecting when the environment is sourcing significant +energy is that we must do this very quickly. Given a reasonably long sample of +the received signal, there are a number of strategies which may be used to +assess whether that signal contains a strong far end component. However, by the +time that assessment is complete the far end signal will have already caused +major mis-convergence in the adaption process. An assessment algorithm is +needed which produces a fairly accurate result from a very short burst of far +end energy. + +How do I use it? + +The echo cancellor processes both the transmit and receive streams sample by +sample. The processing function is not declared inline. Unfortunately, +cancellation requires many operations per sample, so the call overhead is only +a minor burden. +*/ + +#include "fir.h" +#include "oslec.h" + +/* + G.168 echo canceller descriptor. This defines the working state for a line + echo canceller. +*/ +struct oslec_state { + int16_t tx; + int16_t rx; + int16_t clean; + int16_t clean_nlp; + + int nonupdate_dwell; + int curr_pos; + int taps; + int log2taps; + int adaption_mode; + + int cond_met; + int32_t pstates; + int16_t adapt; + int32_t factor; + int16_t shift; + + /* Average levels and averaging filter states */ + int ltxacc; + int lrxacc; + int lcleanacc; + int lclean_bgacc; + int ltx; + int lrx; + int lclean; + int lclean_bg; + int lbgn; + int lbgn_acc; + int lbgn_upper; + int lbgn_upper_acc; + + /* foreground and background filter states */ + struct fir16_state_t fir_state; + struct fir16_state_t fir_state_bg; + int16_t *fir_taps16[2]; + + /* DC blocking filter states */ + int tx_1; + int tx_2; + int rx_1; + int rx_2; + + /* optional High Pass Filter states */ + int32_t xvtx[5]; + int32_t yvtx[5]; + int32_t xvrx[5]; + int32_t yvrx[5]; + + /* Parameters for the optional Hoth noise generator */ + int cng_level; + int cng_rndnum; + int cng_filter; + + /* snapshot sample of coeffs used for development */ + int16_t *snapshot; +}; + +#endif /* __ECHO_H */ diff --git a/drivers/misc/echo/fir.h b/drivers/misc/echo/fir.h new file mode 100644 index 000000000..4d0821025 --- /dev/null +++ b/drivers/misc/echo/fir.h @@ -0,0 +1,154 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * SpanDSP - a series of DSP components for telephony + * + * fir.h - General telephony FIR routines + * + * Written by Steve Underwood <steveu@coppice.org> + * + * Copyright (C) 2002 Steve Underwood + * + * All rights reserved. + */ + +#if !defined(_FIR_H_) +#define _FIR_H_ + +/* + Ideas for improvement: + + 1/ Rewrite filter for dual MAC inner loop. The issue here is handling + history sample offsets that are 16 bit aligned - the dual MAC needs + 32 bit aligmnent. There are some good examples in libbfdsp. + + 2/ Use the hardware circular buffer facility tohalve memory usage. + + 3/ Consider using internal memory. + + Using less memory might also improve speed as cache misses will be + reduced. A drop in MIPs and memory approaching 50% should be + possible. + + The foreground and background filters currenlty use a total of + about 10 MIPs/ch as measured with speedtest.c on a 256 TAP echo + can. +*/ + +/* + * 16 bit integer FIR descriptor. This defines the working state for a single + * instance of an FIR filter using 16 bit integer coefficients. + */ +struct fir16_state_t { + int taps; + int curr_pos; + const int16_t *coeffs; + int16_t *history; +}; + +/* + * 32 bit integer FIR descriptor. This defines the working state for a single + * instance of an FIR filter using 32 bit integer coefficients, and filtering + * 16 bit integer data. + */ +struct fir32_state_t { + int taps; + int curr_pos; + const int32_t *coeffs; + int16_t *history; +}; + +/* + * Floating point FIR descriptor. This defines the working state for a single + * instance of an FIR filter using floating point coefficients and data. + */ +struct fir_float_state_t { + int taps; + int curr_pos; + const float *coeffs; + float *history; +}; + +static inline const int16_t *fir16_create(struct fir16_state_t *fir, + const int16_t *coeffs, int taps) +{ + fir->taps = taps; + fir->curr_pos = taps - 1; + fir->coeffs = coeffs; + fir->history = kcalloc(taps, sizeof(int16_t), GFP_KERNEL); + return fir->history; +} + +static inline void fir16_flush(struct fir16_state_t *fir) +{ + memset(fir->history, 0, fir->taps * sizeof(int16_t)); +} + +static inline void fir16_free(struct fir16_state_t *fir) +{ + kfree(fir->history); +} + +static inline int16_t fir16(struct fir16_state_t *fir, int16_t sample) +{ + int32_t y; + int i; + int offset1; + int offset2; + + fir->history[fir->curr_pos] = sample; + + offset2 = fir->curr_pos; + offset1 = fir->taps - offset2; + y = 0; + for (i = fir->taps - 1; i >= offset1; i--) + y += fir->coeffs[i] * fir->history[i - offset1]; + for (; i >= 0; i--) + y += fir->coeffs[i] * fir->history[i + offset2]; + if (fir->curr_pos <= 0) + fir->curr_pos = fir->taps; + fir->curr_pos--; + return (int16_t) (y >> 15); +} + +static inline const int16_t *fir32_create(struct fir32_state_t *fir, + const int32_t *coeffs, int taps) +{ + fir->taps = taps; + fir->curr_pos = taps - 1; + fir->coeffs = coeffs; + fir->history = kcalloc(taps, sizeof(int16_t), GFP_KERNEL); + return fir->history; +} + +static inline void fir32_flush(struct fir32_state_t *fir) +{ + memset(fir->history, 0, fir->taps * sizeof(int16_t)); +} + +static inline void fir32_free(struct fir32_state_t *fir) +{ + kfree(fir->history); +} + +static inline int16_t fir32(struct fir32_state_t *fir, int16_t sample) +{ + int i; + int32_t y; + int offset1; + int offset2; + + fir->history[fir->curr_pos] = sample; + offset2 = fir->curr_pos; + offset1 = fir->taps - offset2; + y = 0; + for (i = fir->taps - 1; i >= offset1; i--) + y += fir->coeffs[i] * fir->history[i - offset1]; + for (; i >= 0; i--) + y += fir->coeffs[i] * fir->history[i + offset2]; + if (fir->curr_pos <= 0) + fir->curr_pos = fir->taps; + fir->curr_pos--; + return (int16_t) (y >> 15); +} + +#endif diff --git a/drivers/misc/echo/oslec.h b/drivers/misc/echo/oslec.h new file mode 100644 index 000000000..f1adac143 --- /dev/null +++ b/drivers/misc/echo/oslec.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * OSLEC - A line echo canceller. This code is being developed + * against and partially complies with G168. Using code from SpanDSP + * + * Written by Steve Underwood <steveu@coppice.org> + * and David Rowe <david_at_rowetel_dot_com> + * + * Copyright (C) 2001 Steve Underwood and 2007-2008 David Rowe + * + * All rights reserved. + */ + +#ifndef __OSLEC_H +#define __OSLEC_H + +/* Mask bits for the adaption mode */ +#define ECHO_CAN_USE_ADAPTION 0x01 +#define ECHO_CAN_USE_NLP 0x02 +#define ECHO_CAN_USE_CNG 0x04 +#define ECHO_CAN_USE_CLIP 0x08 +#define ECHO_CAN_USE_TX_HPF 0x10 +#define ECHO_CAN_USE_RX_HPF 0x20 +#define ECHO_CAN_DISABLE 0x40 + +/** + * oslec_state: G.168 echo canceller descriptor. + * + * This defines the working state for a line echo canceller. + */ +struct oslec_state; + +/** + * oslec_create - Create a voice echo canceller context. + * @len: The length of the canceller, in samples. + * @return: The new canceller context, or NULL if the canceller could not be + * created. + */ +struct oslec_state *oslec_create(int len, int adaption_mode); + +/** + * oslec_free - Free a voice echo canceller context. + * @ec: The echo canceller context. + */ +void oslec_free(struct oslec_state *ec); + +/** + * oslec_flush - Flush (reinitialise) a voice echo canceller context. + * @ec: The echo canceller context. + */ +void oslec_flush(struct oslec_state *ec); + +/** + * oslec_adaption_mode - set the adaption mode of a voice echo canceller context. + * @ec The echo canceller context. + * @adaption_mode: The mode. + */ +void oslec_adaption_mode(struct oslec_state *ec, int adaption_mode); + +void oslec_snapshot(struct oslec_state *ec); + +/** + * oslec_update: Process a sample through a voice echo canceller. + * @ec: The echo canceller context. + * @tx: The transmitted audio sample. + * @rx: The received audio sample. + * + * The return value is the clean (echo cancelled) received sample. + */ +int16_t oslec_update(struct oslec_state *ec, int16_t tx, int16_t rx); + +/** + * oslec_hpf_tx: Process to high pass filter the tx signal. + * @ec: The echo canceller context. + * @tx: The transmitted auio sample. + * + * The return value is the HP filtered transmit sample, send this to your D/A. + */ +int16_t oslec_hpf_tx(struct oslec_state *ec, int16_t tx); + +#endif /* __OSLEC_H */ diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig new file mode 100644 index 000000000..c92f2cdf4 --- /dev/null +++ b/drivers/misc/eeprom/Kconfig @@ -0,0 +1,133 @@ +# SPDX-License-Identifier: GPL-2.0-only +menu "EEPROM support" + +config EEPROM_AT24 + tristate "I2C EEPROMs / RAMs / ROMs from most vendors" + depends on I2C && SYSFS + select NVMEM + select NVMEM_SYSFS + select REGMAP + select REGMAP_I2C + help + Enable this driver to get read/write support to most I2C EEPROMs + and compatible devices like FRAMs, SRAMs, ROMs etc. After you + configure the driver to know about each chip on your target + board. Use these generic chip names, instead of vendor-specific + ones like at24c64, 24lc02 or fm24c04: + + 24c00, 24c01, 24c02, spd (readonly 24c02), 24c04, 24c08, + 24c16, 24c32, 24c64, 24c128, 24c256, 24c512, 24c1024, 24c2048 + + Unless you like data loss puzzles, always be sure that any chip + you configure as a 24c32 (32 kbit) or larger is NOT really a + 24c16 (16 kbit) or smaller, and vice versa. Marking the chip + as read-only won't help recover from this. Also, if your chip + has any software write-protect mechanism you may want to review the + code to make sure this driver won't turn it on by accident. + + If you use this with an SMBus adapter instead of an I2C adapter, + full functionality is not available. Only smaller devices are + supported (24c16 and below, max 4 kByte). + + This driver can also be built as a module. If so, the module + will be called at24. + +config EEPROM_AT25 + tristate "SPI EEPROMs from most vendors" + depends on SPI && SYSFS + select NVMEM + select NVMEM_SYSFS + help + Enable this driver to get read/write support to most SPI EEPROMs, + after you configure the board init code to know about each eeprom + on your target board. + + This driver can also be built as a module. If so, the module + will be called at25. + +config EEPROM_LEGACY + tristate "Old I2C EEPROM reader (DEPRECATED)" + depends on I2C && SYSFS + help + If you say yes here you get read-only access to the EEPROM data + available on modern memory DIMMs and Sony Vaio laptops via I2C. Such + EEPROMs could theoretically be available on other devices as well. + + This driver is deprecated and will be removed soon, please use the + better at24 driver instead. + + This driver can also be built as a module. If so, the module + will be called eeprom. + +config EEPROM_MAX6875 + tristate "Maxim MAX6874/5 power supply supervisor" + depends on I2C + help + If you say yes here you get read-only support for the user EEPROM of + the Maxim MAX6874/5 EEPROM-programmable, quad power-supply + sequencer/supervisor. + + All other features of this chip should be accessed via i2c-dev. + + This driver can also be built as a module. If so, the module + will be called max6875. + + +config EEPROM_93CX6 + tristate "EEPROM 93CX6 support" + help + This is a driver for the EEPROM chipsets 93c46 and 93c66. + The driver supports both read as well as write commands. + + If unsure, say N. + +config EEPROM_93XX46 + tristate "Microwire EEPROM 93XX46 support" + depends on SPI && SYSFS + select REGMAP + select NVMEM + select NVMEM_SYSFS + help + Driver for the microwire EEPROM chipsets 93xx46x. The driver + supports both read and write commands and also the command to + erase the whole EEPROM. + + This driver can also be built as a module. If so, the module + will be called eeprom_93xx46. + + If unsure, say N. + +config EEPROM_DIGSY_MTC_CFG + bool "DigsyMTC display configuration EEPROMs device" + depends on GPIO_MPC5200 && SPI_GPIO + help + This option enables access to display configuration EEPROMs + on digsy_mtc board. You have to additionally select Microwire + EEPROM 93XX46 driver. sysfs entries will be created for that + EEPROM allowing to read/write the configuration data or to + erase the whole EEPROM. + + If unsure, say N. + +config EEPROM_IDT_89HPESX + tristate "IDT 89HPESx PCIe-swtiches EEPROM / CSR support" + depends on I2C && SYSFS + help + Enable this driver to get read/write access to EEPROM / CSRs + over IDT PCIe-swtich i2c-slave interface. + + This driver can also be built as a module. If so, the module + will be called idt_89hpesx. + +config EEPROM_EE1004 + tristate "SPD EEPROMs on DDR4 memory modules" + depends on I2C && SYSFS + help + Enable this driver to get read support to SPD EEPROMs following + the JEDEC EE1004 standard. These are typically found on DDR4 + SDRAM memory modules. + + This driver can also be built as a module. If so, the module + will be called ee1004. + +endmenu diff --git a/drivers/misc/eeprom/Makefile b/drivers/misc/eeprom/Makefile new file mode 100644 index 000000000..a9b4b6579 --- /dev/null +++ b/drivers/misc/eeprom/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_EEPROM_AT24) += at24.o +obj-$(CONFIG_EEPROM_AT25) += at25.o +obj-$(CONFIG_EEPROM_LEGACY) += eeprom.o +obj-$(CONFIG_EEPROM_MAX6875) += max6875.o +obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o +obj-$(CONFIG_EEPROM_93XX46) += eeprom_93xx46.o +obj-$(CONFIG_EEPROM_DIGSY_MTC_CFG) += digsy_mtc_eeprom.o +obj-$(CONFIG_EEPROM_IDT_89HPESX) += idt_89hpesx.o +obj-$(CONFIG_EEPROM_EE1004) += ee1004.o diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c new file mode 100644 index 000000000..305ffad13 --- /dev/null +++ b/drivers/misc/eeprom/at24.c @@ -0,0 +1,858 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * at24.c - handle most I2C EEPROMs + * + * Copyright (C) 2005-2007 David Brownell + * Copyright (C) 2008 Wolfram Sang, Pengutronix + */ + +#include <linux/acpi.h> +#include <linux/bitops.h> +#include <linux/capability.h> +#include <linux/delay.h> +#include <linux/i2c.h> +#include <linux/init.h> +#include <linux/jiffies.h> +#include <linux/kernel.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/nvmem-provider.h> +#include <linux/of_device.h> +#include <linux/pm_runtime.h> +#include <linux/property.h> +#include <linux/regmap.h> +#include <linux/regulator/consumer.h> +#include <linux/slab.h> + +/* Address pointer is 16 bit. */ +#define AT24_FLAG_ADDR16 BIT(7) +/* sysfs-entry will be read-only. */ +#define AT24_FLAG_READONLY BIT(6) +/* sysfs-entry will be world-readable. */ +#define AT24_FLAG_IRUGO BIT(5) +/* Take always 8 addresses (24c00). */ +#define AT24_FLAG_TAKE8ADDR BIT(4) +/* Factory-programmed serial number. */ +#define AT24_FLAG_SERIAL BIT(3) +/* Factory-programmed mac address. */ +#define AT24_FLAG_MAC BIT(2) +/* Does not auto-rollover reads to the next slave address. */ +#define AT24_FLAG_NO_RDROL BIT(1) + +/* + * I2C EEPROMs from most vendors are inexpensive and mostly interchangeable. + * Differences between different vendor product lines (like Atmel AT24C or + * MicroChip 24LC, etc) won't much matter for typical read/write access. + * There are also I2C RAM chips, likewise interchangeable. One example + * would be the PCF8570, which acts like a 24c02 EEPROM (256 bytes). + * + * However, misconfiguration can lose data. "Set 16-bit memory address" + * to a part with 8-bit addressing will overwrite data. Writing with too + * big a page size also loses data. And it's not safe to assume that the + * conventional addresses 0x50..0x57 only hold eeproms; a PCF8563 RTC + * uses 0x51, for just one example. + * + * Accordingly, explicit board-specific configuration data should be used + * in almost all cases. (One partial exception is an SMBus used to access + * "SPD" data for DRAM sticks. Those only use 24c02 EEPROMs.) + * + * So this driver uses "new style" I2C driver binding, expecting to be + * told what devices exist. That may be in arch/X/mach-Y/board-Z.c or + * similar kernel-resident tables; or, configuration data coming from + * a bootloader. + * + * Other than binding model, current differences from "eeprom" driver are + * that this one handles write access and isn't restricted to 24c02 devices. + * It also handles larger devices (32 kbit and up) with two-byte addresses, + * which won't work on pure SMBus systems. + */ + +struct at24_client { + struct i2c_client *client; + struct regmap *regmap; +}; + +struct at24_data { + /* + * Lock protects against activities from other Linux tasks, + * but not from changes by other I2C masters. + */ + struct mutex lock; + + unsigned int write_max; + unsigned int num_addresses; + unsigned int offset_adj; + + u32 byte_len; + u16 page_size; + u8 flags; + + struct nvmem_device *nvmem; + struct regulator *vcc_reg; + void (*read_post)(unsigned int off, char *buf, size_t count); + + /* + * Some chips tie up multiple I2C addresses; dummy devices reserve + * them for us, and we'll use them with SMBus calls. + */ + struct at24_client client[]; +}; + +/* + * This parameter is to help this driver avoid blocking other drivers out + * of I2C for potentially troublesome amounts of time. With a 100 kHz I2C + * clock, one 256 byte read takes about 1/43 second which is excessive; + * but the 1/170 second it takes at 400 kHz may be quite reasonable; and + * at 1 MHz (Fm+) a 1/430 second delay could easily be invisible. + * + * This value is forced to be a power of two so that writes align on pages. + */ +static unsigned int at24_io_limit = 128; +module_param_named(io_limit, at24_io_limit, uint, 0); +MODULE_PARM_DESC(at24_io_limit, "Maximum bytes per I/O (default 128)"); + +/* + * Specs often allow 5 msec for a page write, sometimes 20 msec; + * it's important to recover from write timeouts. + */ +static unsigned int at24_write_timeout = 25; +module_param_named(write_timeout, at24_write_timeout, uint, 0); +MODULE_PARM_DESC(at24_write_timeout, "Time (in ms) to try writes (default 25)"); + +struct at24_chip_data { + u32 byte_len; + u8 flags; + void (*read_post)(unsigned int off, char *buf, size_t count); +}; + +#define AT24_CHIP_DATA(_name, _len, _flags) \ + static const struct at24_chip_data _name = { \ + .byte_len = _len, .flags = _flags, \ + } + +#define AT24_CHIP_DATA_CB(_name, _len, _flags, _read_post) \ + static const struct at24_chip_data _name = { \ + .byte_len = _len, .flags = _flags, \ + .read_post = _read_post, \ + } + +static void at24_read_post_vaio(unsigned int off, char *buf, size_t count) +{ + int i; + + if (capable(CAP_SYS_ADMIN)) + return; + + /* + * Hide VAIO private settings to regular users: + * - BIOS passwords: bytes 0x00 to 0x0f + * - UUID: bytes 0x10 to 0x1f + * - Serial number: 0xc0 to 0xdf + */ + for (i = 0; i < count; i++) { + if ((off + i <= 0x1f) || + (off + i >= 0xc0 && off + i <= 0xdf)) + buf[i] = 0; + } +} + +/* needs 8 addresses as A0-A2 are ignored */ +AT24_CHIP_DATA(at24_data_24c00, 128 / 8, AT24_FLAG_TAKE8ADDR); +/* old variants can't be handled with this generic entry! */ +AT24_CHIP_DATA(at24_data_24c01, 1024 / 8, 0); +AT24_CHIP_DATA(at24_data_24cs01, 16, + AT24_FLAG_SERIAL | AT24_FLAG_READONLY); +AT24_CHIP_DATA(at24_data_24c02, 2048 / 8, 0); +AT24_CHIP_DATA(at24_data_24cs02, 16, + AT24_FLAG_SERIAL | AT24_FLAG_READONLY); +AT24_CHIP_DATA(at24_data_24mac402, 48 / 8, + AT24_FLAG_MAC | AT24_FLAG_READONLY); +AT24_CHIP_DATA(at24_data_24mac602, 64 / 8, + AT24_FLAG_MAC | AT24_FLAG_READONLY); +/* spd is a 24c02 in memory DIMMs */ +AT24_CHIP_DATA(at24_data_spd, 2048 / 8, + AT24_FLAG_READONLY | AT24_FLAG_IRUGO); +/* 24c02_vaio is a 24c02 on some Sony laptops */ +AT24_CHIP_DATA_CB(at24_data_24c02_vaio, 2048 / 8, + AT24_FLAG_READONLY | AT24_FLAG_IRUGO, + at24_read_post_vaio); +AT24_CHIP_DATA(at24_data_24c04, 4096 / 8, 0); +AT24_CHIP_DATA(at24_data_24cs04, 16, + AT24_FLAG_SERIAL | AT24_FLAG_READONLY); +/* 24rf08 quirk is handled at i2c-core */ +AT24_CHIP_DATA(at24_data_24c08, 8192 / 8, 0); +AT24_CHIP_DATA(at24_data_24cs08, 16, + AT24_FLAG_SERIAL | AT24_FLAG_READONLY); +AT24_CHIP_DATA(at24_data_24c16, 16384 / 8, 0); +AT24_CHIP_DATA(at24_data_24cs16, 16, + AT24_FLAG_SERIAL | AT24_FLAG_READONLY); +AT24_CHIP_DATA(at24_data_24c32, 32768 / 8, AT24_FLAG_ADDR16); +AT24_CHIP_DATA(at24_data_24cs32, 16, + AT24_FLAG_ADDR16 | AT24_FLAG_SERIAL | AT24_FLAG_READONLY); +AT24_CHIP_DATA(at24_data_24c64, 65536 / 8, AT24_FLAG_ADDR16); +AT24_CHIP_DATA(at24_data_24cs64, 16, + AT24_FLAG_ADDR16 | AT24_FLAG_SERIAL | AT24_FLAG_READONLY); +AT24_CHIP_DATA(at24_data_24c128, 131072 / 8, AT24_FLAG_ADDR16); +AT24_CHIP_DATA(at24_data_24c256, 262144 / 8, AT24_FLAG_ADDR16); +AT24_CHIP_DATA(at24_data_24c512, 524288 / 8, AT24_FLAG_ADDR16); +AT24_CHIP_DATA(at24_data_24c1024, 1048576 / 8, AT24_FLAG_ADDR16); +AT24_CHIP_DATA(at24_data_24c2048, 2097152 / 8, AT24_FLAG_ADDR16); +/* identical to 24c08 ? */ +AT24_CHIP_DATA(at24_data_INT3499, 8192 / 8, 0); + +static const struct i2c_device_id at24_ids[] = { + { "24c00", (kernel_ulong_t)&at24_data_24c00 }, + { "24c01", (kernel_ulong_t)&at24_data_24c01 }, + { "24cs01", (kernel_ulong_t)&at24_data_24cs01 }, + { "24c02", (kernel_ulong_t)&at24_data_24c02 }, + { "24cs02", (kernel_ulong_t)&at24_data_24cs02 }, + { "24mac402", (kernel_ulong_t)&at24_data_24mac402 }, + { "24mac602", (kernel_ulong_t)&at24_data_24mac602 }, + { "spd", (kernel_ulong_t)&at24_data_spd }, + { "24c02-vaio", (kernel_ulong_t)&at24_data_24c02_vaio }, + { "24c04", (kernel_ulong_t)&at24_data_24c04 }, + { "24cs04", (kernel_ulong_t)&at24_data_24cs04 }, + { "24c08", (kernel_ulong_t)&at24_data_24c08 }, + { "24cs08", (kernel_ulong_t)&at24_data_24cs08 }, + { "24c16", (kernel_ulong_t)&at24_data_24c16 }, + { "24cs16", (kernel_ulong_t)&at24_data_24cs16 }, + { "24c32", (kernel_ulong_t)&at24_data_24c32 }, + { "24cs32", (kernel_ulong_t)&at24_data_24cs32 }, + { "24c64", (kernel_ulong_t)&at24_data_24c64 }, + { "24cs64", (kernel_ulong_t)&at24_data_24cs64 }, + { "24c128", (kernel_ulong_t)&at24_data_24c128 }, + { "24c256", (kernel_ulong_t)&at24_data_24c256 }, + { "24c512", (kernel_ulong_t)&at24_data_24c512 }, + { "24c1024", (kernel_ulong_t)&at24_data_24c1024 }, + { "24c2048", (kernel_ulong_t)&at24_data_24c2048 }, + { "at24", 0 }, + { /* END OF LIST */ } +}; +MODULE_DEVICE_TABLE(i2c, at24_ids); + +static const struct of_device_id at24_of_match[] = { + { .compatible = "atmel,24c00", .data = &at24_data_24c00 }, + { .compatible = "atmel,24c01", .data = &at24_data_24c01 }, + { .compatible = "atmel,24cs01", .data = &at24_data_24cs01 }, + { .compatible = "atmel,24c02", .data = &at24_data_24c02 }, + { .compatible = "atmel,24cs02", .data = &at24_data_24cs02 }, + { .compatible = "atmel,24mac402", .data = &at24_data_24mac402 }, + { .compatible = "atmel,24mac602", .data = &at24_data_24mac602 }, + { .compatible = "atmel,spd", .data = &at24_data_spd }, + { .compatible = "atmel,24c04", .data = &at24_data_24c04 }, + { .compatible = "atmel,24cs04", .data = &at24_data_24cs04 }, + { .compatible = "atmel,24c08", .data = &at24_data_24c08 }, + { .compatible = "atmel,24cs08", .data = &at24_data_24cs08 }, + { .compatible = "atmel,24c16", .data = &at24_data_24c16 }, + { .compatible = "atmel,24cs16", .data = &at24_data_24cs16 }, + { .compatible = "atmel,24c32", .data = &at24_data_24c32 }, + { .compatible = "atmel,24cs32", .data = &at24_data_24cs32 }, + { .compatible = "atmel,24c64", .data = &at24_data_24c64 }, + { .compatible = "atmel,24cs64", .data = &at24_data_24cs64 }, + { .compatible = "atmel,24c128", .data = &at24_data_24c128 }, + { .compatible = "atmel,24c256", .data = &at24_data_24c256 }, + { .compatible = "atmel,24c512", .data = &at24_data_24c512 }, + { .compatible = "atmel,24c1024", .data = &at24_data_24c1024 }, + { .compatible = "atmel,24c2048", .data = &at24_data_24c2048 }, + { /* END OF LIST */ }, +}; +MODULE_DEVICE_TABLE(of, at24_of_match); + +static const struct acpi_device_id __maybe_unused at24_acpi_ids[] = { + { "INT3499", (kernel_ulong_t)&at24_data_INT3499 }, + { "TPF0001", (kernel_ulong_t)&at24_data_24c1024 }, + { /* END OF LIST */ } +}; +MODULE_DEVICE_TABLE(acpi, at24_acpi_ids); + +/* + * This routine supports chips which consume multiple I2C addresses. It + * computes the addressing information to be used for a given r/w request. + * Assumes that sanity checks for offset happened at sysfs-layer. + * + * Slave address and byte offset derive from the offset. Always + * set the byte address; on a multi-master board, another master + * may have changed the chip's "current" address pointer. + */ +static struct at24_client *at24_translate_offset(struct at24_data *at24, + unsigned int *offset) +{ + unsigned int i; + + if (at24->flags & AT24_FLAG_ADDR16) { + i = *offset >> 16; + *offset &= 0xffff; + } else { + i = *offset >> 8; + *offset &= 0xff; + } + + return &at24->client[i]; +} + +static struct device *at24_base_client_dev(struct at24_data *at24) +{ + return &at24->client[0].client->dev; +} + +static size_t at24_adjust_read_count(struct at24_data *at24, + unsigned int offset, size_t count) +{ + unsigned int bits; + size_t remainder; + + /* + * In case of multi-address chips that don't rollover reads to + * the next slave address: truncate the count to the slave boundary, + * so that the read never straddles slaves. + */ + if (at24->flags & AT24_FLAG_NO_RDROL) { + bits = (at24->flags & AT24_FLAG_ADDR16) ? 16 : 8; + remainder = BIT(bits) - offset; + if (count > remainder) + count = remainder; + } + + if (count > at24_io_limit) + count = at24_io_limit; + + return count; +} + +static ssize_t at24_regmap_read(struct at24_data *at24, char *buf, + unsigned int offset, size_t count) +{ + unsigned long timeout, read_time; + struct at24_client *at24_client; + struct i2c_client *client; + struct regmap *regmap; + int ret; + + at24_client = at24_translate_offset(at24, &offset); + regmap = at24_client->regmap; + client = at24_client->client; + count = at24_adjust_read_count(at24, offset, count); + + /* adjust offset for mac and serial read ops */ + offset += at24->offset_adj; + + timeout = jiffies + msecs_to_jiffies(at24_write_timeout); + do { + /* + * The timestamp shall be taken before the actual operation + * to avoid a premature timeout in case of high CPU load. + */ + read_time = jiffies; + + ret = regmap_bulk_read(regmap, offset, buf, count); + dev_dbg(&client->dev, "read %zu@%d --> %d (%ld)\n", + count, offset, ret, jiffies); + if (!ret) + return count; + + usleep_range(1000, 1500); + } while (time_before(read_time, timeout)); + + return -ETIMEDOUT; +} + +/* + * Note that if the hardware write-protect pin is pulled high, the whole + * chip is normally write protected. But there are plenty of product + * variants here, including OTP fuses and partial chip protect. + * + * We only use page mode writes; the alternative is sloooow. These routines + * write at most one page. + */ + +static size_t at24_adjust_write_count(struct at24_data *at24, + unsigned int offset, size_t count) +{ + unsigned int next_page; + + /* write_max is at most a page */ + if (count > at24->write_max) + count = at24->write_max; + + /* Never roll over backwards, to the start of this page */ + next_page = roundup(offset + 1, at24->page_size); + if (offset + count > next_page) + count = next_page - offset; + + return count; +} + +static ssize_t at24_regmap_write(struct at24_data *at24, const char *buf, + unsigned int offset, size_t count) +{ + unsigned long timeout, write_time; + struct at24_client *at24_client; + struct i2c_client *client; + struct regmap *regmap; + int ret; + + at24_client = at24_translate_offset(at24, &offset); + regmap = at24_client->regmap; + client = at24_client->client; + count = at24_adjust_write_count(at24, offset, count); + timeout = jiffies + msecs_to_jiffies(at24_write_timeout); + + do { + /* + * The timestamp shall be taken before the actual operation + * to avoid a premature timeout in case of high CPU load. + */ + write_time = jiffies; + + ret = regmap_bulk_write(regmap, offset, buf, count); + dev_dbg(&client->dev, "write %zu@%d --> %d (%ld)\n", + count, offset, ret, jiffies); + if (!ret) + return count; + + usleep_range(1000, 1500); + } while (time_before(write_time, timeout)); + + return -ETIMEDOUT; +} + +static int at24_read(void *priv, unsigned int off, void *val, size_t count) +{ + struct at24_data *at24; + struct device *dev; + char *buf = val; + int i, ret; + + at24 = priv; + dev = at24_base_client_dev(at24); + + if (unlikely(!count)) + return count; + + if (off + count > at24->byte_len) + return -EINVAL; + + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + pm_runtime_put_noidle(dev); + return ret; + } + + /* + * Read data from chip, protecting against concurrent updates + * from this host, but not from other I2C masters. + */ + mutex_lock(&at24->lock); + + for (i = 0; count; i += ret, count -= ret) { + ret = at24_regmap_read(at24, buf + i, off + i, count); + if (ret < 0) { + mutex_unlock(&at24->lock); + pm_runtime_put(dev); + return ret; + } + } + + mutex_unlock(&at24->lock); + + pm_runtime_put(dev); + + if (unlikely(at24->read_post)) + at24->read_post(off, buf, i); + + return 0; +} + +static int at24_write(void *priv, unsigned int off, void *val, size_t count) +{ + struct at24_data *at24; + struct device *dev; + char *buf = val; + int ret; + + at24 = priv; + dev = at24_base_client_dev(at24); + + if (unlikely(!count)) + return -EINVAL; + + if (off + count > at24->byte_len) + return -EINVAL; + + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + pm_runtime_put_noidle(dev); + return ret; + } + + /* + * Write data to chip, protecting against concurrent updates + * from this host, but not from other I2C masters. + */ + mutex_lock(&at24->lock); + + while (count) { + ret = at24_regmap_write(at24, buf, off, count); + if (ret < 0) { + mutex_unlock(&at24->lock); + pm_runtime_put(dev); + return ret; + } + buf += ret; + off += ret; + count -= ret; + } + + mutex_unlock(&at24->lock); + + pm_runtime_put(dev); + + return 0; +} + +static const struct at24_chip_data *at24_get_chip_data(struct device *dev) +{ + struct device_node *of_node = dev->of_node; + const struct at24_chip_data *cdata; + const struct i2c_device_id *id; + + id = i2c_match_id(at24_ids, to_i2c_client(dev)); + + /* + * The I2C core allows OF nodes compatibles to match against the + * I2C device ID table as a fallback, so check not only if an OF + * node is present but also if it matches an OF device ID entry. + */ + if (of_node && of_match_device(at24_of_match, dev)) + cdata = of_device_get_match_data(dev); + else if (id) + cdata = (void *)id->driver_data; + else + cdata = acpi_device_get_match_data(dev); + + if (!cdata) + return ERR_PTR(-ENODEV); + + return cdata; +} + +static int at24_make_dummy_client(struct at24_data *at24, unsigned int index, + struct regmap_config *regmap_config) +{ + struct i2c_client *base_client, *dummy_client; + struct regmap *regmap; + struct device *dev; + + base_client = at24->client[0].client; + dev = &base_client->dev; + + dummy_client = devm_i2c_new_dummy_device(dev, base_client->adapter, + base_client->addr + index); + if (IS_ERR(dummy_client)) + return PTR_ERR(dummy_client); + + regmap = devm_regmap_init_i2c(dummy_client, regmap_config); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + at24->client[index].client = dummy_client; + at24->client[index].regmap = regmap; + + return 0; +} + +static unsigned int at24_get_offset_adj(u8 flags, unsigned int byte_len) +{ + if (flags & AT24_FLAG_MAC) { + /* EUI-48 starts from 0x9a, EUI-64 from 0x98 */ + return 0xa0 - byte_len; + } else if (flags & AT24_FLAG_SERIAL && flags & AT24_FLAG_ADDR16) { + /* + * For 16 bit address pointers, the word address must contain + * a '10' sequence in bits 11 and 10 regardless of the + * intended position of the address pointer. + */ + return 0x0800; + } else if (flags & AT24_FLAG_SERIAL) { + /* + * Otherwise the word address must begin with a '10' sequence, + * regardless of the intended address. + */ + return 0x0080; + } else { + return 0; + } +} + +static int at24_probe(struct i2c_client *client) +{ + struct regmap_config regmap_config = { }; + struct nvmem_config nvmem_config = { }; + u32 byte_len, page_size, flags, addrw; + const struct at24_chip_data *cdata; + struct device *dev = &client->dev; + bool i2c_fn_i2c, i2c_fn_block; + unsigned int i, num_addresses; + struct at24_data *at24; + struct regmap *regmap; + bool writable; + u8 test_byte; + int err; + + i2c_fn_i2c = i2c_check_functionality(client->adapter, I2C_FUNC_I2C); + i2c_fn_block = i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_WRITE_I2C_BLOCK); + + cdata = at24_get_chip_data(dev); + if (IS_ERR(cdata)) + return PTR_ERR(cdata); + + err = device_property_read_u32(dev, "pagesize", &page_size); + if (err) + /* + * This is slow, but we can't know all eeproms, so we better + * play safe. Specifying custom eeprom-types via device tree + * or properties is recommended anyhow. + */ + page_size = 1; + + flags = cdata->flags; + if (device_property_present(dev, "read-only")) + flags |= AT24_FLAG_READONLY; + if (device_property_present(dev, "no-read-rollover")) + flags |= AT24_FLAG_NO_RDROL; + + err = device_property_read_u32(dev, "address-width", &addrw); + if (!err) { + switch (addrw) { + case 8: + if (flags & AT24_FLAG_ADDR16) + dev_warn(dev, + "Override address width to be 8, while default is 16\n"); + flags &= ~AT24_FLAG_ADDR16; + break; + case 16: + flags |= AT24_FLAG_ADDR16; + break; + default: + dev_warn(dev, "Bad \"address-width\" property: %u\n", + addrw); + } + } + + err = device_property_read_u32(dev, "size", &byte_len); + if (err) + byte_len = cdata->byte_len; + + if (!i2c_fn_i2c && !i2c_fn_block) + page_size = 1; + + if (!page_size) { + dev_err(dev, "page_size must not be 0!\n"); + return -EINVAL; + } + + if (!is_power_of_2(page_size)) + dev_warn(dev, "page_size looks suspicious (no power of 2)!\n"); + + err = device_property_read_u32(dev, "num-addresses", &num_addresses); + if (err) { + if (flags & AT24_FLAG_TAKE8ADDR) + num_addresses = 8; + else + num_addresses = DIV_ROUND_UP(byte_len, + (flags & AT24_FLAG_ADDR16) ? 65536 : 256); + } + + if ((flags & AT24_FLAG_SERIAL) && (flags & AT24_FLAG_MAC)) { + dev_err(dev, + "invalid device data - cannot have both AT24_FLAG_SERIAL & AT24_FLAG_MAC."); + return -EINVAL; + } + + regmap_config.val_bits = 8; + regmap_config.reg_bits = (flags & AT24_FLAG_ADDR16) ? 16 : 8; + regmap_config.disable_locking = true; + + regmap = devm_regmap_init_i2c(client, ®map_config); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + at24 = devm_kzalloc(dev, struct_size(at24, client, num_addresses), + GFP_KERNEL); + if (!at24) + return -ENOMEM; + + mutex_init(&at24->lock); + at24->byte_len = byte_len; + at24->page_size = page_size; + at24->flags = flags; + at24->read_post = cdata->read_post; + at24->num_addresses = num_addresses; + at24->offset_adj = at24_get_offset_adj(flags, byte_len); + at24->client[0].client = client; + at24->client[0].regmap = regmap; + + at24->vcc_reg = devm_regulator_get(dev, "vcc"); + if (IS_ERR(at24->vcc_reg)) + return PTR_ERR(at24->vcc_reg); + + writable = !(flags & AT24_FLAG_READONLY); + if (writable) { + at24->write_max = min_t(unsigned int, + page_size, at24_io_limit); + if (!i2c_fn_i2c && at24->write_max > I2C_SMBUS_BLOCK_MAX) + at24->write_max = I2C_SMBUS_BLOCK_MAX; + } + + /* use dummy devices for multiple-address chips */ + for (i = 1; i < num_addresses; i++) { + err = at24_make_dummy_client(at24, i, ®map_config); + if (err) + return err; + } + + /* + * We initialize nvmem_config.id to NVMEM_DEVID_AUTO even if the + * label property is set as some platform can have multiple eeproms + * with same label and we can not register each of those with same + * label. Failing to register those eeproms trigger cascade failure + * on such platform. + */ + nvmem_config.id = NVMEM_DEVID_AUTO; + + if (device_property_present(dev, "label")) { + err = device_property_read_string(dev, "label", + &nvmem_config.name); + if (err) + return err; + } else { + nvmem_config.name = dev_name(dev); + } + + nvmem_config.type = NVMEM_TYPE_EEPROM; + nvmem_config.dev = dev; + nvmem_config.read_only = !writable; + nvmem_config.root_only = !(flags & AT24_FLAG_IRUGO); + nvmem_config.owner = THIS_MODULE; + nvmem_config.compat = true; + nvmem_config.base_dev = dev; + nvmem_config.reg_read = at24_read; + nvmem_config.reg_write = at24_write; + nvmem_config.priv = at24; + nvmem_config.stride = 1; + nvmem_config.word_size = 1; + nvmem_config.size = byte_len; + + i2c_set_clientdata(client, at24); + + err = regulator_enable(at24->vcc_reg); + if (err) { + dev_err(dev, "Failed to enable vcc regulator\n"); + return err; + } + + /* enable runtime pm */ + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + + at24->nvmem = devm_nvmem_register(dev, &nvmem_config); + if (IS_ERR(at24->nvmem)) { + pm_runtime_disable(dev); + if (!pm_runtime_status_suspended(dev)) + regulator_disable(at24->vcc_reg); + return PTR_ERR(at24->nvmem); + } + + /* + * Perform a one-byte test read to verify that the + * chip is functional. + */ + err = at24_read(at24, 0, &test_byte, 1); + if (err) { + pm_runtime_disable(dev); + if (!pm_runtime_status_suspended(dev)) + regulator_disable(at24->vcc_reg); + return -ENODEV; + } + + pm_runtime_idle(dev); + + if (writable) + dev_info(dev, "%u byte %s EEPROM, writable, %u bytes/write\n", + byte_len, client->name, at24->write_max); + else + dev_info(dev, "%u byte %s EEPROM, read-only\n", + byte_len, client->name); + + return 0; +} + +static int at24_remove(struct i2c_client *client) +{ + struct at24_data *at24 = i2c_get_clientdata(client); + + pm_runtime_disable(&client->dev); + if (!pm_runtime_status_suspended(&client->dev)) + regulator_disable(at24->vcc_reg); + pm_runtime_set_suspended(&client->dev); + + return 0; +} + +static int __maybe_unused at24_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct at24_data *at24 = i2c_get_clientdata(client); + + return regulator_disable(at24->vcc_reg); +} + +static int __maybe_unused at24_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct at24_data *at24 = i2c_get_clientdata(client); + + return regulator_enable(at24->vcc_reg); +} + +static const struct dev_pm_ops at24_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(at24_suspend, at24_resume, NULL) +}; + +static struct i2c_driver at24_driver = { + .driver = { + .name = "at24", + .pm = &at24_pm_ops, + .of_match_table = at24_of_match, + .acpi_match_table = ACPI_PTR(at24_acpi_ids), + }, + .probe_new = at24_probe, + .remove = at24_remove, + .id_table = at24_ids, +}; + +static int __init at24_init(void) +{ + if (!at24_io_limit) { + pr_err("at24: at24_io_limit must not be 0!\n"); + return -EINVAL; + } + + at24_io_limit = rounddown_pow_of_two(at24_io_limit); + return i2c_add_driver(&at24_driver); +} +module_init(at24_init); + +static void __exit at24_exit(void) +{ + i2c_del_driver(&at24_driver); +} +module_exit(at24_exit); + +MODULE_DESCRIPTION("Driver for most I2C EEPROMs"); +MODULE_AUTHOR("David Brownell and Wolfram Sang"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c new file mode 100644 index 000000000..3b7d8b758 --- /dev/null +++ b/drivers/misc/eeprom/at25.c @@ -0,0 +1,400 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * at25.c -- support most SPI EEPROMs, such as Atmel AT25 models + * + * Copyright (C) 2006 David Brownell + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/sched.h> + +#include <linux/nvmem-provider.h> +#include <linux/spi/spi.h> +#include <linux/spi/eeprom.h> +#include <linux/property.h> + +/* + * NOTE: this is an *EEPROM* driver. The vagaries of product naming + * mean that some AT25 products are EEPROMs, and others are FLASH. + * Handle FLASH chips with the drivers/mtd/devices/m25p80.c driver, + * not this one! + */ + +struct at25_data { + struct spi_device *spi; + struct mutex lock; + struct spi_eeprom chip; + unsigned addrlen; + struct nvmem_config nvmem_config; + struct nvmem_device *nvmem; +}; + +#define AT25_WREN 0x06 /* latch the write enable */ +#define AT25_WRDI 0x04 /* reset the write enable */ +#define AT25_RDSR 0x05 /* read status register */ +#define AT25_WRSR 0x01 /* write status register */ +#define AT25_READ 0x03 /* read byte(s) */ +#define AT25_WRITE 0x02 /* write byte(s)/sector */ + +#define AT25_SR_nRDY 0x01 /* nRDY = write-in-progress */ +#define AT25_SR_WEN 0x02 /* write enable (latched) */ +#define AT25_SR_BP0 0x04 /* BP for software writeprotect */ +#define AT25_SR_BP1 0x08 +#define AT25_SR_WPEN 0x80 /* writeprotect enable */ + +#define AT25_INSTR_BIT3 0x08 /* Additional address bit in instr */ + +#define EE_MAXADDRLEN 3 /* 24 bit addresses, up to 2 MBytes */ + +/* Specs often allow 5 msec for a page write, sometimes 20 msec; + * it's important to recover from write timeouts. + */ +#define EE_TIMEOUT 25 + +/*-------------------------------------------------------------------------*/ + +#define io_limit PAGE_SIZE /* bytes */ + +static int at25_ee_read(void *priv, unsigned int offset, + void *val, size_t count) +{ + struct at25_data *at25 = priv; + char *buf = val; + u8 command[EE_MAXADDRLEN + 1]; + u8 *cp; + ssize_t status; + struct spi_transfer t[2]; + struct spi_message m; + u8 instr; + + if (unlikely(offset >= at25->chip.byte_len)) + return -EINVAL; + if ((offset + count) > at25->chip.byte_len) + count = at25->chip.byte_len - offset; + if (unlikely(!count)) + return -EINVAL; + + cp = command; + + instr = AT25_READ; + if (at25->chip.flags & EE_INSTR_BIT3_IS_ADDR) + if (offset >= (1U << (at25->addrlen * 8))) + instr |= AT25_INSTR_BIT3; + *cp++ = instr; + + /* 8/16/24-bit address is written MSB first */ + switch (at25->addrlen) { + default: /* case 3 */ + *cp++ = offset >> 16; + fallthrough; + case 2: + *cp++ = offset >> 8; + fallthrough; + case 1: + case 0: /* can't happen: for better codegen */ + *cp++ = offset >> 0; + } + + spi_message_init(&m); + memset(t, 0, sizeof(t)); + + t[0].tx_buf = command; + t[0].len = at25->addrlen + 1; + spi_message_add_tail(&t[0], &m); + + t[1].rx_buf = buf; + t[1].len = count; + spi_message_add_tail(&t[1], &m); + + mutex_lock(&at25->lock); + + /* Read it all at once. + * + * REVISIT that's potentially a problem with large chips, if + * other devices on the bus need to be accessed regularly or + * this chip is clocked very slowly + */ + status = spi_sync(at25->spi, &m); + dev_dbg(&at25->spi->dev, "read %zu bytes at %d --> %zd\n", + count, offset, status); + + mutex_unlock(&at25->lock); + return status; +} + +static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count) +{ + struct at25_data *at25 = priv; + const char *buf = val; + int status = 0; + unsigned buf_size; + u8 *bounce; + + if (unlikely(off >= at25->chip.byte_len)) + return -EFBIG; + if ((off + count) > at25->chip.byte_len) + count = at25->chip.byte_len - off; + if (unlikely(!count)) + return -EINVAL; + + /* Temp buffer starts with command and address */ + buf_size = at25->chip.page_size; + if (buf_size > io_limit) + buf_size = io_limit; + bounce = kmalloc(buf_size + at25->addrlen + 1, GFP_KERNEL); + if (!bounce) + return -ENOMEM; + + /* For write, rollover is within the page ... so we write at + * most one page, then manually roll over to the next page. + */ + mutex_lock(&at25->lock); + do { + unsigned long timeout, retries; + unsigned segment; + unsigned offset = (unsigned) off; + u8 *cp = bounce; + int sr; + u8 instr; + + *cp = AT25_WREN; + status = spi_write(at25->spi, cp, 1); + if (status < 0) { + dev_dbg(&at25->spi->dev, "WREN --> %d\n", status); + break; + } + + instr = AT25_WRITE; + if (at25->chip.flags & EE_INSTR_BIT3_IS_ADDR) + if (offset >= (1U << (at25->addrlen * 8))) + instr |= AT25_INSTR_BIT3; + *cp++ = instr; + + /* 8/16/24-bit address is written MSB first */ + switch (at25->addrlen) { + default: /* case 3 */ + *cp++ = offset >> 16; + fallthrough; + case 2: + *cp++ = offset >> 8; + fallthrough; + case 1: + case 0: /* can't happen: for better codegen */ + *cp++ = offset >> 0; + } + + /* Write as much of a page as we can */ + segment = buf_size - (offset % buf_size); + if (segment > count) + segment = count; + memcpy(cp, buf, segment); + status = spi_write(at25->spi, bounce, + segment + at25->addrlen + 1); + dev_dbg(&at25->spi->dev, "write %u bytes at %u --> %d\n", + segment, offset, status); + if (status < 0) + break; + + /* REVISIT this should detect (or prevent) failed writes + * to readonly sections of the EEPROM... + */ + + /* Wait for non-busy status */ + timeout = jiffies + msecs_to_jiffies(EE_TIMEOUT); + retries = 0; + do { + + sr = spi_w8r8(at25->spi, AT25_RDSR); + if (sr < 0 || (sr & AT25_SR_nRDY)) { + dev_dbg(&at25->spi->dev, + "rdsr --> %d (%02x)\n", sr, sr); + /* at HZ=100, this is sloooow */ + msleep(1); + continue; + } + if (!(sr & AT25_SR_nRDY)) + break; + } while (retries++ < 3 || time_before_eq(jiffies, timeout)); + + if ((sr < 0) || (sr & AT25_SR_nRDY)) { + dev_err(&at25->spi->dev, + "write %u bytes offset %u, timeout after %u msecs\n", + segment, offset, + jiffies_to_msecs(jiffies - + (timeout - EE_TIMEOUT))); + status = -ETIMEDOUT; + break; + } + + off += segment; + buf += segment; + count -= segment; + + } while (count > 0); + + mutex_unlock(&at25->lock); + + kfree(bounce); + return status; +} + +/*-------------------------------------------------------------------------*/ + +static int at25_fw_to_chip(struct device *dev, struct spi_eeprom *chip) +{ + u32 val; + + memset(chip, 0, sizeof(*chip)); + strncpy(chip->name, "at25", sizeof(chip->name)); + + if (device_property_read_u32(dev, "size", &val) == 0 || + device_property_read_u32(dev, "at25,byte-len", &val) == 0) { + chip->byte_len = val; + } else { + dev_err(dev, "Error: missing \"size\" property\n"); + return -ENODEV; + } + + if (device_property_read_u32(dev, "pagesize", &val) == 0 || + device_property_read_u32(dev, "at25,page-size", &val) == 0) { + chip->page_size = val; + } else { + dev_err(dev, "Error: missing \"pagesize\" property\n"); + return -ENODEV; + } + + if (device_property_read_u32(dev, "at25,addr-mode", &val) == 0) { + chip->flags = (u16)val; + } else { + if (device_property_read_u32(dev, "address-width", &val)) { + dev_err(dev, + "Error: missing \"address-width\" property\n"); + return -ENODEV; + } + switch (val) { + case 9: + chip->flags |= EE_INSTR_BIT3_IS_ADDR; + fallthrough; + case 8: + chip->flags |= EE_ADDR1; + break; + case 16: + chip->flags |= EE_ADDR2; + break; + case 24: + chip->flags |= EE_ADDR3; + break; + default: + dev_err(dev, + "Error: bad \"address-width\" property: %u\n", + val); + return -ENODEV; + } + if (device_property_present(dev, "read-only")) + chip->flags |= EE_READONLY; + } + return 0; +} + +static int at25_probe(struct spi_device *spi) +{ + struct at25_data *at25 = NULL; + struct spi_eeprom chip; + int err; + int sr; + int addrlen; + + /* Chip description */ + if (!spi->dev.platform_data) { + err = at25_fw_to_chip(&spi->dev, &chip); + if (err) + return err; + } else + chip = *(struct spi_eeprom *)spi->dev.platform_data; + + /* For now we only support 8/16/24 bit addressing */ + if (chip.flags & EE_ADDR1) + addrlen = 1; + else if (chip.flags & EE_ADDR2) + addrlen = 2; + else if (chip.flags & EE_ADDR3) + addrlen = 3; + else { + dev_dbg(&spi->dev, "unsupported address type\n"); + return -EINVAL; + } + + /* Ping the chip ... the status register is pretty portable, + * unlike probing manufacturer IDs. We do expect that system + * firmware didn't write it in the past few milliseconds! + */ + sr = spi_w8r8(spi, AT25_RDSR); + if (sr < 0 || sr & AT25_SR_nRDY) { + dev_dbg(&spi->dev, "rdsr --> %d (%02x)\n", sr, sr); + return -ENXIO; + } + + at25 = devm_kzalloc(&spi->dev, sizeof(struct at25_data), GFP_KERNEL); + if (!at25) + return -ENOMEM; + + mutex_init(&at25->lock); + at25->chip = chip; + at25->spi = spi; + spi_set_drvdata(spi, at25); + at25->addrlen = addrlen; + + at25->nvmem_config.type = NVMEM_TYPE_EEPROM; + at25->nvmem_config.name = dev_name(&spi->dev); + at25->nvmem_config.dev = &spi->dev; + at25->nvmem_config.read_only = chip.flags & EE_READONLY; + at25->nvmem_config.root_only = true; + at25->nvmem_config.owner = THIS_MODULE; + at25->nvmem_config.compat = true; + at25->nvmem_config.base_dev = &spi->dev; + at25->nvmem_config.reg_read = at25_ee_read; + at25->nvmem_config.reg_write = at25_ee_write; + at25->nvmem_config.priv = at25; + at25->nvmem_config.stride = 1; + at25->nvmem_config.word_size = 1; + at25->nvmem_config.size = chip.byte_len; + + at25->nvmem = devm_nvmem_register(&spi->dev, &at25->nvmem_config); + if (IS_ERR(at25->nvmem)) + return PTR_ERR(at25->nvmem); + + dev_info(&spi->dev, "%d %s %s eeprom%s, pagesize %u\n", + (chip.byte_len < 1024) ? chip.byte_len : (chip.byte_len / 1024), + (chip.byte_len < 1024) ? "Byte" : "KByte", + at25->chip.name, + (chip.flags & EE_READONLY) ? " (readonly)" : "", + at25->chip.page_size); + return 0; +} + +/*-------------------------------------------------------------------------*/ + +static const struct of_device_id at25_of_match[] = { + { .compatible = "atmel,at25", }, + { } +}; +MODULE_DEVICE_TABLE(of, at25_of_match); + +static struct spi_driver at25_driver = { + .driver = { + .name = "at25", + .of_match_table = at25_of_match, + }, + .probe = at25_probe, +}; + +module_spi_driver(at25_driver); + +MODULE_DESCRIPTION("Driver for most SPI EEPROMs"); +MODULE_AUTHOR("David Brownell"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("spi:at25"); diff --git a/drivers/misc/eeprom/digsy_mtc_eeprom.c b/drivers/misc/eeprom/digsy_mtc_eeprom.c new file mode 100644 index 000000000..f1f766b70 --- /dev/null +++ b/drivers/misc/eeprom/digsy_mtc_eeprom.c @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * EEPROMs access control driver for display configuration EEPROMs + * on DigsyMTC board. + * + * (C) 2011 DENX Software Engineering, Anatolij Gustschin <agust@denx.de> + * + * FIXME: this driver is used on a device-tree probed platform: it + * should be defined as a bit-banged SPI device and probed from the device + * tree and not like this with static grabbing of a few numbered GPIO + * lines at random. + * + * Add proper SPI and EEPROM in arch/powerpc/boot/dts/digsy_mtc.dts + * and delete this driver. + */ + +#include <linux/gpio.h> +#include <linux/gpio/machine.h> +#include <linux/init.h> +#include <linux/platform_device.h> +#include <linux/spi/spi.h> +#include <linux/spi/spi_gpio.h> +#include <linux/eeprom_93xx46.h> + +#define GPIO_EEPROM_CLK 216 +#define GPIO_EEPROM_CS 210 +#define GPIO_EEPROM_DI 217 +#define GPIO_EEPROM_DO 249 +#define GPIO_EEPROM_OE 255 +#define EE_SPI_BUS_NUM 1 + +static void digsy_mtc_op_prepare(void *p) +{ + /* enable */ + gpio_set_value(GPIO_EEPROM_OE, 0); +} + +static void digsy_mtc_op_finish(void *p) +{ + /* disable */ + gpio_set_value(GPIO_EEPROM_OE, 1); +} + +struct eeprom_93xx46_platform_data digsy_mtc_eeprom_data = { + .flags = EE_ADDR8, + .prepare = digsy_mtc_op_prepare, + .finish = digsy_mtc_op_finish, +}; + +static struct spi_gpio_platform_data eeprom_spi_gpio_data = { + .num_chipselect = 1, +}; + +static struct platform_device digsy_mtc_eeprom = { + .name = "spi_gpio", + .id = EE_SPI_BUS_NUM, + .dev = { + .platform_data = &eeprom_spi_gpio_data, + }, +}; + +static struct gpiod_lookup_table eeprom_spi_gpiod_table = { + .dev_id = "spi_gpio", + .table = { + GPIO_LOOKUP("gpio@b00", GPIO_EEPROM_CLK, + "sck", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("gpio@b00", GPIO_EEPROM_DI, + "mosi", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("gpio@b00", GPIO_EEPROM_DO, + "miso", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("gpio@b00", GPIO_EEPROM_CS, + "cs", GPIO_ACTIVE_HIGH), + { }, + }, +}; + +static struct spi_board_info digsy_mtc_eeprom_info[] __initdata = { + { + .modalias = "93xx46", + .max_speed_hz = 1000000, + .bus_num = EE_SPI_BUS_NUM, + .chip_select = 0, + .mode = SPI_MODE_0, + .platform_data = &digsy_mtc_eeprom_data, + }, +}; + +static int __init digsy_mtc_eeprom_devices_init(void) +{ + int ret; + + ret = gpio_request_one(GPIO_EEPROM_OE, GPIOF_OUT_INIT_HIGH, + "93xx46 EEPROMs OE"); + if (ret) { + pr_err("can't request gpio %d\n", GPIO_EEPROM_OE); + return ret; + } + gpiod_add_lookup_table(&eeprom_spi_gpiod_table); + spi_register_board_info(digsy_mtc_eeprom_info, + ARRAY_SIZE(digsy_mtc_eeprom_info)); + return platform_device_register(&digsy_mtc_eeprom); +} +device_initcall(digsy_mtc_eeprom_devices_init); diff --git a/drivers/misc/eeprom/ee1004.c b/drivers/misc/eeprom/ee1004.c new file mode 100644 index 000000000..d9f90332a --- /dev/null +++ b/drivers/misc/eeprom/ee1004.c @@ -0,0 +1,290 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * ee1004 - driver for DDR4 SPD EEPROMs + * + * Copyright (C) 2017-2019 Jean Delvare + * + * Based on the at24 driver: + * Copyright (C) 2005-2007 David Brownell + * Copyright (C) 2008 Wolfram Sang, Pengutronix + */ + +#include <linux/i2c.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/mutex.h> + +/* + * DDR4 memory modules use special EEPROMs following the Jedec EE1004 + * specification. These are 512-byte EEPROMs using a single I2C address + * in the 0x50-0x57 range for data. One of two 256-byte page is selected + * by writing a command to I2C address 0x36 or 0x37 on the same I2C bus. + * + * Therefore we need to request these 2 additional addresses, and serialize + * access to all such EEPROMs with a single mutex. + * + * We assume it is safe to read up to 32 bytes at once from these EEPROMs. + * We use SMBus access even if I2C is available, these EEPROMs are small + * enough, and reading from them infrequent enough, that we favor simplicity + * over performance. + */ + +#define EE1004_ADDR_SET_PAGE 0x36 +#define EE1004_EEPROM_SIZE 512 +#define EE1004_PAGE_SIZE 256 +#define EE1004_PAGE_SHIFT 8 + +/* + * Mutex protects ee1004_set_page and ee1004_dev_count, and must be held + * from page selection to end of read. + */ +static DEFINE_MUTEX(ee1004_bus_lock); +static struct i2c_client *ee1004_set_page[2]; +static unsigned int ee1004_dev_count; +static int ee1004_current_page; + +static const struct i2c_device_id ee1004_ids[] = { + { "ee1004", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, ee1004_ids); + +/*-------------------------------------------------------------------------*/ + +static int ee1004_get_current_page(void) +{ + int err; + + err = i2c_smbus_read_byte(ee1004_set_page[0]); + if (err == -ENXIO) { + /* Nack means page 1 is selected */ + return 1; + } + if (err < 0) { + /* Anything else is a real error, bail out */ + return err; + } + + /* Ack means page 0 is selected, returned value meaningless */ + return 0; +} + +static ssize_t ee1004_eeprom_read(struct i2c_client *client, char *buf, + unsigned int offset, size_t count) +{ + int status; + + if (count > I2C_SMBUS_BLOCK_MAX) + count = I2C_SMBUS_BLOCK_MAX; + /* Can't cross page boundaries */ + if (unlikely(offset + count > EE1004_PAGE_SIZE)) + count = EE1004_PAGE_SIZE - offset; + + if (count > I2C_SMBUS_BLOCK_MAX) + count = I2C_SMBUS_BLOCK_MAX; + + status = i2c_smbus_read_i2c_block_data_or_emulated(client, offset, + count, buf); + dev_dbg(&client->dev, "read %zu@%d --> %d\n", count, offset, status); + + return status; +} + +static ssize_t ee1004_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + struct device *dev = kobj_to_dev(kobj); + struct i2c_client *client = to_i2c_client(dev); + size_t requested = count; + int page; + + if (unlikely(!count)) + return count; + + page = off >> EE1004_PAGE_SHIFT; + if (unlikely(page > 1)) + return 0; + off &= (1 << EE1004_PAGE_SHIFT) - 1; + + /* + * Read data from chip, protecting against concurrent access to + * other EE1004 SPD EEPROMs on the same adapter. + */ + mutex_lock(&ee1004_bus_lock); + + while (count) { + int status; + + /* Select page */ + if (page != ee1004_current_page) { + /* Data is ignored */ + status = i2c_smbus_write_byte(ee1004_set_page[page], + 0x00); + if (status == -ENXIO) { + /* + * Don't give up just yet. Some memory + * modules will select the page but not + * ack the command. Check which page is + * selected now. + */ + if (ee1004_get_current_page() == page) + status = 0; + } + if (status < 0) { + dev_err(dev, "Failed to select page %d (%d)\n", + page, status); + mutex_unlock(&ee1004_bus_lock); + return status; + } + dev_dbg(dev, "Selected page %d\n", page); + ee1004_current_page = page; + } + + status = ee1004_eeprom_read(client, buf, off, count); + if (status < 0) { + mutex_unlock(&ee1004_bus_lock); + return status; + } + buf += status; + off += status; + count -= status; + + if (off == EE1004_PAGE_SIZE) { + page++; + off = 0; + } + } + + mutex_unlock(&ee1004_bus_lock); + + return requested; +} + +static const struct bin_attribute eeprom_attr = { + .attr = { + .name = "eeprom", + .mode = 0444, + }, + .size = EE1004_EEPROM_SIZE, + .read = ee1004_read, +}; + +static int ee1004_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int err, cnr = 0; + const char *slow = NULL; + + /* Make sure we can operate on this adapter */ + if (!i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_READ_BYTE | + I2C_FUNC_SMBUS_READ_I2C_BLOCK)) { + if (i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_READ_BYTE | + I2C_FUNC_SMBUS_READ_WORD_DATA)) + slow = "word"; + else if (i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_READ_BYTE | + I2C_FUNC_SMBUS_READ_BYTE_DATA)) + slow = "byte"; + else + return -EPFNOSUPPORT; + } + + /* Use 2 dummy devices for page select command */ + mutex_lock(&ee1004_bus_lock); + if (++ee1004_dev_count == 1) { + for (cnr = 0; cnr < 2; cnr++) { + ee1004_set_page[cnr] = i2c_new_dummy_device(client->adapter, + EE1004_ADDR_SET_PAGE + cnr); + if (IS_ERR(ee1004_set_page[cnr])) { + dev_err(&client->dev, + "address 0x%02x unavailable\n", + EE1004_ADDR_SET_PAGE + cnr); + err = PTR_ERR(ee1004_set_page[cnr]); + goto err_clients; + } + } + } else if (i2c_adapter_id(client->adapter) != + i2c_adapter_id(ee1004_set_page[0]->adapter)) { + dev_err(&client->dev, + "Driver only supports devices on a single I2C bus\n"); + err = -EOPNOTSUPP; + goto err_clients; + } + + /* Remember current page to avoid unneeded page select */ + err = ee1004_get_current_page(); + if (err < 0) + goto err_clients; + ee1004_current_page = err; + dev_dbg(&client->dev, "Currently selected page: %d\n", + ee1004_current_page); + mutex_unlock(&ee1004_bus_lock); + + /* Create the sysfs eeprom file */ + err = sysfs_create_bin_file(&client->dev.kobj, &eeprom_attr); + if (err) + goto err_clients_lock; + + dev_info(&client->dev, + "%u byte EE1004-compliant SPD EEPROM, read-only\n", + EE1004_EEPROM_SIZE); + if (slow) + dev_notice(&client->dev, + "Falling back to %s reads, performance will suffer\n", + slow); + + return 0; + + err_clients_lock: + mutex_lock(&ee1004_bus_lock); + err_clients: + if (--ee1004_dev_count == 0) { + for (cnr--; cnr >= 0; cnr--) { + i2c_unregister_device(ee1004_set_page[cnr]); + ee1004_set_page[cnr] = NULL; + } + } + mutex_unlock(&ee1004_bus_lock); + + return err; +} + +static int ee1004_remove(struct i2c_client *client) +{ + int i; + + sysfs_remove_bin_file(&client->dev.kobj, &eeprom_attr); + + /* Remove page select clients if this is the last device */ + mutex_lock(&ee1004_bus_lock); + if (--ee1004_dev_count == 0) { + for (i = 0; i < 2; i++) { + i2c_unregister_device(ee1004_set_page[i]); + ee1004_set_page[i] = NULL; + } + } + mutex_unlock(&ee1004_bus_lock); + + return 0; +} + +/*-------------------------------------------------------------------------*/ + +static struct i2c_driver ee1004_driver = { + .driver = { + .name = "ee1004", + }, + .probe = ee1004_probe, + .remove = ee1004_remove, + .id_table = ee1004_ids, +}; +module_i2c_driver(ee1004_driver); + +MODULE_DESCRIPTION("Driver for EE1004-compliant DDR4 SPD EEPROMs"); +MODULE_AUTHOR("Jean Delvare"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c new file mode 100644 index 000000000..34fa385df --- /dev/null +++ b/drivers/misc/eeprom/eeprom.c @@ -0,0 +1,217 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 1998, 1999 Frodo Looijaard <frodol@dds.nl> and + * Philip Edelbrock <phil@netroedge.com> + * Copyright (C) 2003 Greg Kroah-Hartman <greg@kroah.com> + * Copyright (C) 2003 IBM Corp. + * Copyright (C) 2004 Jean Delvare <jdelvare@suse.de> + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/capability.h> +#include <linux/jiffies.h> +#include <linux/i2c.h> +#include <linux/mutex.h> + +/* Addresses to scan */ +static const unsigned short normal_i2c[] = { 0x50, 0x51, 0x52, 0x53, 0x54, + 0x55, 0x56, 0x57, I2C_CLIENT_END }; + + +/* Size of EEPROM in bytes */ +#define EEPROM_SIZE 256 + +/* possible types of eeprom devices */ +enum eeprom_nature { + UNKNOWN, + VAIO, +}; + +/* Each client has this additional data */ +struct eeprom_data { + struct mutex update_lock; + u8 valid; /* bitfield, bit!=0 if slice is valid */ + unsigned long last_updated[8]; /* In jiffies, 8 slices */ + u8 data[EEPROM_SIZE]; /* Register values */ + enum eeprom_nature nature; +}; + + +static void eeprom_update_client(struct i2c_client *client, u8 slice) +{ + struct eeprom_data *data = i2c_get_clientdata(client); + int i; + + mutex_lock(&data->update_lock); + + if (!(data->valid & (1 << slice)) || + time_after(jiffies, data->last_updated[slice] + 300 * HZ)) { + dev_dbg(&client->dev, "Starting eeprom update, slice %u\n", slice); + + if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_READ_I2C_BLOCK)) { + for (i = slice << 5; i < (slice + 1) << 5; i += 32) + if (i2c_smbus_read_i2c_block_data(client, i, + 32, data->data + i) + != 32) + goto exit; + } else { + for (i = slice << 5; i < (slice + 1) << 5; i += 2) { + int word = i2c_smbus_read_word_data(client, i); + if (word < 0) + goto exit; + data->data[i] = word & 0xff; + data->data[i + 1] = word >> 8; + } + } + data->last_updated[slice] = jiffies; + data->valid |= (1 << slice); + } +exit: + mutex_unlock(&data->update_lock); +} + +static ssize_t eeprom_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + struct i2c_client *client = kobj_to_i2c_client(kobj); + struct eeprom_data *data = i2c_get_clientdata(client); + u8 slice; + + /* Only refresh slices which contain requested bytes */ + for (slice = off >> 5; slice <= (off + count - 1) >> 5; slice++) + eeprom_update_client(client, slice); + + /* Hide Vaio private settings to regular users: + - BIOS passwords: bytes 0x00 to 0x0f + - UUID: bytes 0x10 to 0x1f + - Serial number: 0xc0 to 0xdf */ + if (data->nature == VAIO && !capable(CAP_SYS_ADMIN)) { + int i; + + for (i = 0; i < count; i++) { + if ((off + i <= 0x1f) || + (off + i >= 0xc0 && off + i <= 0xdf)) + buf[i] = 0; + else + buf[i] = data->data[off + i]; + } + } else { + memcpy(buf, &data->data[off], count); + } + + return count; +} + +static const struct bin_attribute eeprom_attr = { + .attr = { + .name = "eeprom", + .mode = S_IRUGO, + }, + .size = EEPROM_SIZE, + .read = eeprom_read, +}; + +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int eeprom_detect(struct i2c_client *client, struct i2c_board_info *info) +{ + struct i2c_adapter *adapter = client->adapter; + + /* EDID EEPROMs are often 24C00 EEPROMs, which answer to all + addresses 0x50-0x57, but we only care about 0x50. So decline + attaching to addresses >= 0x51 on DDC buses */ + if (!(adapter->class & I2C_CLASS_SPD) && client->addr >= 0x51) + return -ENODEV; + + /* There are four ways we can read the EEPROM data: + (1) I2C block reads (faster, but unsupported by most adapters) + (2) Word reads (128% overhead) + (3) Consecutive byte reads (88% overhead, unsafe) + (4) Regular byte data reads (265% overhead) + The third and fourth methods are not implemented by this driver + because all known adapters support one of the first two. */ + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_READ_WORD_DATA) + && !i2c_check_functionality(adapter, I2C_FUNC_SMBUS_READ_I2C_BLOCK)) + return -ENODEV; + + strlcpy(info->type, "eeprom", I2C_NAME_SIZE); + + return 0; +} + +static int eeprom_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct i2c_adapter *adapter = client->adapter; + struct eeprom_data *data; + + data = devm_kzalloc(&client->dev, sizeof(struct eeprom_data), + GFP_KERNEL); + if (!data) + return -ENOMEM; + + memset(data->data, 0xff, EEPROM_SIZE); + i2c_set_clientdata(client, data); + mutex_init(&data->update_lock); + data->nature = UNKNOWN; + + /* Detect the Vaio nature of EEPROMs. + We use the "PCG-" or "VGN-" prefix as the signature. */ + if (client->addr == 0x57 + && i2c_check_functionality(adapter, I2C_FUNC_SMBUS_READ_BYTE_DATA)) { + char name[4]; + + name[0] = i2c_smbus_read_byte_data(client, 0x80); + name[1] = i2c_smbus_read_byte_data(client, 0x81); + name[2] = i2c_smbus_read_byte_data(client, 0x82); + name[3] = i2c_smbus_read_byte_data(client, 0x83); + + if (!memcmp(name, "PCG-", 4) || !memcmp(name, "VGN-", 4)) { + dev_info(&client->dev, "Vaio EEPROM detected, " + "enabling privacy protection\n"); + data->nature = VAIO; + } + } + + /* Let the users know they are using deprecated driver */ + dev_notice(&client->dev, + "eeprom driver is deprecated, please use at24 instead\n"); + + /* create the sysfs eeprom file */ + return sysfs_create_bin_file(&client->dev.kobj, &eeprom_attr); +} + +static int eeprom_remove(struct i2c_client *client) +{ + sysfs_remove_bin_file(&client->dev.kobj, &eeprom_attr); + + return 0; +} + +static const struct i2c_device_id eeprom_id[] = { + { "eeprom", 0 }, + { } +}; + +static struct i2c_driver eeprom_driver = { + .driver = { + .name = "eeprom", + }, + .probe = eeprom_probe, + .remove = eeprom_remove, + .id_table = eeprom_id, + + .class = I2C_CLASS_DDC | I2C_CLASS_SPD, + .detect = eeprom_detect, + .address_list = normal_i2c, +}; + +module_i2c_driver(eeprom_driver); + +MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl> and " + "Philip Edelbrock <phil@netroedge.com> and " + "Greg Kroah-Hartman <greg@kroah.com>"); +MODULE_DESCRIPTION("I2C EEPROM driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/eeprom/eeprom_93cx6.c b/drivers/misc/eeprom/eeprom_93cx6.c new file mode 100644 index 000000000..9627294fe --- /dev/null +++ b/drivers/misc/eeprom/eeprom_93cx6.c @@ -0,0 +1,372 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2004 - 2006 rt2x00 SourceForge Project + * <http://rt2x00.serialmonkey.com> + * + * Module: eeprom_93cx6 + * Abstract: EEPROM reader routines for 93cx6 chipsets. + * Supported chipsets: 93c46 & 93c66. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/eeprom_93cx6.h> + +MODULE_AUTHOR("http://rt2x00.serialmonkey.com"); +MODULE_VERSION("1.0"); +MODULE_DESCRIPTION("EEPROM 93cx6 chip driver"); +MODULE_LICENSE("GPL"); + +static inline void eeprom_93cx6_pulse_high(struct eeprom_93cx6 *eeprom) +{ + eeprom->reg_data_clock = 1; + eeprom->register_write(eeprom); + + /* + * Add a short delay for the pulse to work. + * According to the specifications the "maximum minimum" + * time should be 450ns. + */ + ndelay(450); +} + +static inline void eeprom_93cx6_pulse_low(struct eeprom_93cx6 *eeprom) +{ + eeprom->reg_data_clock = 0; + eeprom->register_write(eeprom); + + /* + * Add a short delay for the pulse to work. + * According to the specifications the "maximum minimum" + * time should be 450ns. + */ + ndelay(450); +} + +static void eeprom_93cx6_startup(struct eeprom_93cx6 *eeprom) +{ + /* + * Clear all flags, and enable chip select. + */ + eeprom->register_read(eeprom); + eeprom->reg_data_in = 0; + eeprom->reg_data_out = 0; + eeprom->reg_data_clock = 0; + eeprom->reg_chip_select = 1; + eeprom->drive_data = 1; + eeprom->register_write(eeprom); + + /* + * kick a pulse. + */ + eeprom_93cx6_pulse_high(eeprom); + eeprom_93cx6_pulse_low(eeprom); +} + +static void eeprom_93cx6_cleanup(struct eeprom_93cx6 *eeprom) +{ + /* + * Clear chip_select and data_in flags. + */ + eeprom->register_read(eeprom); + eeprom->reg_data_in = 0; + eeprom->reg_chip_select = 0; + eeprom->register_write(eeprom); + + /* + * kick a pulse. + */ + eeprom_93cx6_pulse_high(eeprom); + eeprom_93cx6_pulse_low(eeprom); +} + +static void eeprom_93cx6_write_bits(struct eeprom_93cx6 *eeprom, + const u16 data, const u16 count) +{ + unsigned int i; + + eeprom->register_read(eeprom); + + /* + * Clear data flags. + */ + eeprom->reg_data_in = 0; + eeprom->reg_data_out = 0; + eeprom->drive_data = 1; + + /* + * Start writing all bits. + */ + for (i = count; i > 0; i--) { + /* + * Check if this bit needs to be set. + */ + eeprom->reg_data_in = !!(data & (1 << (i - 1))); + + /* + * Write the bit to the eeprom register. + */ + eeprom->register_write(eeprom); + + /* + * Kick a pulse. + */ + eeprom_93cx6_pulse_high(eeprom); + eeprom_93cx6_pulse_low(eeprom); + } + + eeprom->reg_data_in = 0; + eeprom->register_write(eeprom); +} + +static void eeprom_93cx6_read_bits(struct eeprom_93cx6 *eeprom, + u16 *data, const u16 count) +{ + unsigned int i; + u16 buf = 0; + + eeprom->register_read(eeprom); + + /* + * Clear data flags. + */ + eeprom->reg_data_in = 0; + eeprom->reg_data_out = 0; + eeprom->drive_data = 0; + + /* + * Start reading all bits. + */ + for (i = count; i > 0; i--) { + eeprom_93cx6_pulse_high(eeprom); + + eeprom->register_read(eeprom); + + /* + * Clear data_in flag. + */ + eeprom->reg_data_in = 0; + + /* + * Read if the bit has been set. + */ + if (eeprom->reg_data_out) + buf |= (1 << (i - 1)); + + eeprom_93cx6_pulse_low(eeprom); + } + + *data = buf; +} + +/** + * eeprom_93cx6_read - Read a word from eeprom + * @eeprom: Pointer to eeprom structure + * @word: Word index from where we should start reading + * @data: target pointer where the information will have to be stored + * + * This function will read the eeprom data as host-endian word + * into the given data pointer. + */ +void eeprom_93cx6_read(struct eeprom_93cx6 *eeprom, const u8 word, + u16 *data) +{ + u16 command; + + /* + * Initialize the eeprom register + */ + eeprom_93cx6_startup(eeprom); + + /* + * Select the read opcode and the word to be read. + */ + command = (PCI_EEPROM_READ_OPCODE << eeprom->width) | word; + eeprom_93cx6_write_bits(eeprom, command, + PCI_EEPROM_WIDTH_OPCODE + eeprom->width); + + /* + * Read the requested 16 bits. + */ + eeprom_93cx6_read_bits(eeprom, data, 16); + + /* + * Cleanup eeprom register. + */ + eeprom_93cx6_cleanup(eeprom); +} +EXPORT_SYMBOL_GPL(eeprom_93cx6_read); + +/** + * eeprom_93cx6_multiread - Read multiple words from eeprom + * @eeprom: Pointer to eeprom structure + * @word: Word index from where we should start reading + * @data: target pointer where the information will have to be stored + * @words: Number of words that should be read. + * + * This function will read all requested words from the eeprom, + * this is done by calling eeprom_93cx6_read() multiple times. + * But with the additional change that while the eeprom_93cx6_read + * will return host ordered bytes, this method will return little + * endian words. + */ +void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom, const u8 word, + __le16 *data, const u16 words) +{ + unsigned int i; + u16 tmp; + + for (i = 0; i < words; i++) { + tmp = 0; + eeprom_93cx6_read(eeprom, word + i, &tmp); + data[i] = cpu_to_le16(tmp); + } +} +EXPORT_SYMBOL_GPL(eeprom_93cx6_multiread); + +/** + * eeprom_93cx6_readb - Read a byte from eeprom + * @eeprom: Pointer to eeprom structure + * @byte: Byte index from where we should start reading + * @data: target pointer where the information will have to be stored + * + * This function will read a byte of the eeprom data + * into the given data pointer. + */ +void eeprom_93cx6_readb(struct eeprom_93cx6 *eeprom, const u8 byte, + u8 *data) +{ + u16 command; + u16 tmp; + + /* + * Initialize the eeprom register + */ + eeprom_93cx6_startup(eeprom); + + /* + * Select the read opcode and the byte to be read. + */ + command = (PCI_EEPROM_READ_OPCODE << (eeprom->width + 1)) | byte; + eeprom_93cx6_write_bits(eeprom, command, + PCI_EEPROM_WIDTH_OPCODE + eeprom->width + 1); + + /* + * Read the requested 8 bits. + */ + eeprom_93cx6_read_bits(eeprom, &tmp, 8); + *data = tmp & 0xff; + + /* + * Cleanup eeprom register. + */ + eeprom_93cx6_cleanup(eeprom); +} +EXPORT_SYMBOL_GPL(eeprom_93cx6_readb); + +/** + * eeprom_93cx6_multireadb - Read multiple bytes from eeprom + * @eeprom: Pointer to eeprom structure + * @byte: Index from where we should start reading + * @data: target pointer where the information will have to be stored + * @bytes: Number of bytes that should be read. + * + * This function will read all requested bytes from the eeprom, + * this is done by calling eeprom_93cx6_readb() multiple times. + */ +void eeprom_93cx6_multireadb(struct eeprom_93cx6 *eeprom, const u8 byte, + u8 *data, const u16 bytes) +{ + unsigned int i; + + for (i = 0; i < bytes; i++) + eeprom_93cx6_readb(eeprom, byte + i, &data[i]); +} +EXPORT_SYMBOL_GPL(eeprom_93cx6_multireadb); + +/** + * eeprom_93cx6_wren - set the write enable state + * @eeprom: Pointer to eeprom structure + * @enable: true to enable writes, otherwise disable writes + * + * Set the EEPROM write enable state to either allow or deny + * writes depending on the @enable value. + */ +void eeprom_93cx6_wren(struct eeprom_93cx6 *eeprom, bool enable) +{ + u16 command; + + /* start the command */ + eeprom_93cx6_startup(eeprom); + + /* create command to enable/disable */ + + command = enable ? PCI_EEPROM_EWEN_OPCODE : PCI_EEPROM_EWDS_OPCODE; + command <<= (eeprom->width - 2); + + eeprom_93cx6_write_bits(eeprom, command, + PCI_EEPROM_WIDTH_OPCODE + eeprom->width); + + eeprom_93cx6_cleanup(eeprom); +} +EXPORT_SYMBOL_GPL(eeprom_93cx6_wren); + +/** + * eeprom_93cx6_write - write data to the EEPROM + * @eeprom: Pointer to eeprom structure + * @addr: Address to write data to. + * @data: The data to write to address @addr. + * + * Write the @data to the specified @addr in the EEPROM and + * waiting for the device to finish writing. + * + * Note, since we do not expect large number of write operations + * we delay in between parts of the operation to avoid using excessive + * amounts of CPU time busy waiting. + */ +void eeprom_93cx6_write(struct eeprom_93cx6 *eeprom, u8 addr, u16 data) +{ + int timeout = 100; + u16 command; + + /* start the command */ + eeprom_93cx6_startup(eeprom); + + command = PCI_EEPROM_WRITE_OPCODE << eeprom->width; + command |= addr; + + /* send write command */ + eeprom_93cx6_write_bits(eeprom, command, + PCI_EEPROM_WIDTH_OPCODE + eeprom->width); + + /* send data */ + eeprom_93cx6_write_bits(eeprom, data, 16); + + /* get ready to check for busy */ + eeprom->drive_data = 0; + eeprom->reg_chip_select = 1; + eeprom->register_write(eeprom); + + /* wait at-least 250ns to get DO to be the busy signal */ + usleep_range(1000, 2000); + + /* wait for DO to go high to signify finish */ + + while (true) { + eeprom->register_read(eeprom); + + if (eeprom->reg_data_out) + break; + + usleep_range(1000, 2000); + + if (--timeout <= 0) { + printk(KERN_ERR "%s: timeout\n", __func__); + break; + } + } + + eeprom_93cx6_cleanup(eeprom); +} +EXPORT_SYMBOL_GPL(eeprom_93cx6_write); diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c new file mode 100644 index 000000000..6e5f544c9 --- /dev/null +++ b/drivers/misc/eeprom/eeprom_93xx46.c @@ -0,0 +1,530 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Driver for 93xx46 EEPROMs + * + * (C) 2011 DENX Software Engineering, Anatolij Gustschin <agust@denx.de> + */ + +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/gpio/consumer.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_gpio.h> +#include <linux/slab.h> +#include <linux/spi/spi.h> +#include <linux/nvmem-provider.h> +#include <linux/eeprom_93xx46.h> + +#define OP_START 0x4 +#define OP_WRITE (OP_START | 0x1) +#define OP_READ (OP_START | 0x2) +#define ADDR_EWDS 0x00 +#define ADDR_ERAL 0x20 +#define ADDR_EWEN 0x30 + +struct eeprom_93xx46_devtype_data { + unsigned int quirks; +}; + +static const struct eeprom_93xx46_devtype_data atmel_at93c46d_data = { + .quirks = EEPROM_93XX46_QUIRK_SINGLE_WORD_READ | + EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH, +}; + +static const struct eeprom_93xx46_devtype_data microchip_93lc46b_data = { + .quirks = EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE, +}; + +struct eeprom_93xx46_dev { + struct spi_device *spi; + struct eeprom_93xx46_platform_data *pdata; + struct mutex lock; + struct nvmem_config nvmem_config; + struct nvmem_device *nvmem; + int addrlen; + int size; +}; + +static inline bool has_quirk_single_word_read(struct eeprom_93xx46_dev *edev) +{ + return edev->pdata->quirks & EEPROM_93XX46_QUIRK_SINGLE_WORD_READ; +} + +static inline bool has_quirk_instruction_length(struct eeprom_93xx46_dev *edev) +{ + return edev->pdata->quirks & EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH; +} + +static inline bool has_quirk_extra_read_cycle(struct eeprom_93xx46_dev *edev) +{ + return edev->pdata->quirks & EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE; +} + +static int eeprom_93xx46_read(void *priv, unsigned int off, + void *val, size_t count) +{ + struct eeprom_93xx46_dev *edev = priv; + char *buf = val; + int err = 0; + + if (unlikely(off >= edev->size)) + return 0; + if ((off + count) > edev->size) + count = edev->size - off; + if (unlikely(!count)) + return count; + + mutex_lock(&edev->lock); + + if (edev->pdata->prepare) + edev->pdata->prepare(edev); + + while (count) { + struct spi_message m; + struct spi_transfer t[2] = { { 0 } }; + u16 cmd_addr = OP_READ << edev->addrlen; + size_t nbytes = count; + int bits; + + if (edev->addrlen == 7) { + cmd_addr |= off & 0x7f; + bits = 10; + if (has_quirk_single_word_read(edev)) + nbytes = 1; + } else { + cmd_addr |= (off >> 1) & 0x3f; + bits = 9; + if (has_quirk_single_word_read(edev)) + nbytes = 2; + } + + dev_dbg(&edev->spi->dev, "read cmd 0x%x, %d Hz\n", + cmd_addr, edev->spi->max_speed_hz); + + if (has_quirk_extra_read_cycle(edev)) { + cmd_addr <<= 1; + bits += 1; + } + + spi_message_init(&m); + + t[0].tx_buf = (char *)&cmd_addr; + t[0].len = 2; + t[0].bits_per_word = bits; + spi_message_add_tail(&t[0], &m); + + t[1].rx_buf = buf; + t[1].len = count; + t[1].bits_per_word = 8; + spi_message_add_tail(&t[1], &m); + + err = spi_sync(edev->spi, &m); + /* have to wait at least Tcsl ns */ + ndelay(250); + + if (err) { + dev_err(&edev->spi->dev, "read %zu bytes at %d: err. %d\n", + nbytes, (int)off, err); + break; + } + + buf += nbytes; + off += nbytes; + count -= nbytes; + } + + if (edev->pdata->finish) + edev->pdata->finish(edev); + + mutex_unlock(&edev->lock); + + return err; +} + +static int eeprom_93xx46_ew(struct eeprom_93xx46_dev *edev, int is_on) +{ + struct spi_message m; + struct spi_transfer t; + int bits, ret; + u16 cmd_addr; + + cmd_addr = OP_START << edev->addrlen; + if (edev->addrlen == 7) { + cmd_addr |= (is_on ? ADDR_EWEN : ADDR_EWDS) << 1; + bits = 10; + } else { + cmd_addr |= (is_on ? ADDR_EWEN : ADDR_EWDS); + bits = 9; + } + + if (has_quirk_instruction_length(edev)) { + cmd_addr <<= 2; + bits += 2; + } + + dev_dbg(&edev->spi->dev, "ew%s cmd 0x%04x, %d bits\n", + is_on ? "en" : "ds", cmd_addr, bits); + + spi_message_init(&m); + memset(&t, 0, sizeof(t)); + + t.tx_buf = &cmd_addr; + t.len = 2; + t.bits_per_word = bits; + spi_message_add_tail(&t, &m); + + mutex_lock(&edev->lock); + + if (edev->pdata->prepare) + edev->pdata->prepare(edev); + + ret = spi_sync(edev->spi, &m); + /* have to wait at least Tcsl ns */ + ndelay(250); + if (ret) + dev_err(&edev->spi->dev, "erase/write %sable error %d\n", + is_on ? "en" : "dis", ret); + + if (edev->pdata->finish) + edev->pdata->finish(edev); + + mutex_unlock(&edev->lock); + return ret; +} + +static ssize_t +eeprom_93xx46_write_word(struct eeprom_93xx46_dev *edev, + const char *buf, unsigned off) +{ + struct spi_message m; + struct spi_transfer t[2]; + int bits, data_len, ret; + u16 cmd_addr; + + cmd_addr = OP_WRITE << edev->addrlen; + + if (edev->addrlen == 7) { + cmd_addr |= off & 0x7f; + bits = 10; + data_len = 1; + } else { + cmd_addr |= (off >> 1) & 0x3f; + bits = 9; + data_len = 2; + } + + dev_dbg(&edev->spi->dev, "write cmd 0x%x\n", cmd_addr); + + spi_message_init(&m); + memset(t, 0, sizeof(t)); + + t[0].tx_buf = (char *)&cmd_addr; + t[0].len = 2; + t[0].bits_per_word = bits; + spi_message_add_tail(&t[0], &m); + + t[1].tx_buf = buf; + t[1].len = data_len; + t[1].bits_per_word = 8; + spi_message_add_tail(&t[1], &m); + + ret = spi_sync(edev->spi, &m); + /* have to wait program cycle time Twc ms */ + mdelay(6); + return ret; +} + +static int eeprom_93xx46_write(void *priv, unsigned int off, + void *val, size_t count) +{ + struct eeprom_93xx46_dev *edev = priv; + char *buf = val; + int i, ret, step = 1; + + if (unlikely(off >= edev->size)) + return -EFBIG; + if ((off + count) > edev->size) + count = edev->size - off; + if (unlikely(!count)) + return count; + + /* only write even number of bytes on 16-bit devices */ + if (edev->addrlen == 6) { + step = 2; + count &= ~1; + } + + /* erase/write enable */ + ret = eeprom_93xx46_ew(edev, 1); + if (ret) + return ret; + + mutex_lock(&edev->lock); + + if (edev->pdata->prepare) + edev->pdata->prepare(edev); + + for (i = 0; i < count; i += step) { + ret = eeprom_93xx46_write_word(edev, &buf[i], off + i); + if (ret) { + dev_err(&edev->spi->dev, "write failed at %d: %d\n", + (int)off + i, ret); + break; + } + } + + if (edev->pdata->finish) + edev->pdata->finish(edev); + + mutex_unlock(&edev->lock); + + /* erase/write disable */ + eeprom_93xx46_ew(edev, 0); + return ret; +} + +static int eeprom_93xx46_eral(struct eeprom_93xx46_dev *edev) +{ + struct eeprom_93xx46_platform_data *pd = edev->pdata; + struct spi_message m; + struct spi_transfer t; + int bits, ret; + u16 cmd_addr; + + cmd_addr = OP_START << edev->addrlen; + if (edev->addrlen == 7) { + cmd_addr |= ADDR_ERAL << 1; + bits = 10; + } else { + cmd_addr |= ADDR_ERAL; + bits = 9; + } + + if (has_quirk_instruction_length(edev)) { + cmd_addr <<= 2; + bits += 2; + } + + dev_dbg(&edev->spi->dev, "eral cmd 0x%04x, %d bits\n", cmd_addr, bits); + + spi_message_init(&m); + memset(&t, 0, sizeof(t)); + + t.tx_buf = &cmd_addr; + t.len = 2; + t.bits_per_word = bits; + spi_message_add_tail(&t, &m); + + mutex_lock(&edev->lock); + + if (edev->pdata->prepare) + edev->pdata->prepare(edev); + + ret = spi_sync(edev->spi, &m); + if (ret) + dev_err(&edev->spi->dev, "erase error %d\n", ret); + /* have to wait erase cycle time Tec ms */ + mdelay(6); + + if (pd->finish) + pd->finish(edev); + + mutex_unlock(&edev->lock); + return ret; +} + +static ssize_t eeprom_93xx46_store_erase(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct eeprom_93xx46_dev *edev = dev_get_drvdata(dev); + int erase = 0, ret; + + sscanf(buf, "%d", &erase); + if (erase) { + ret = eeprom_93xx46_ew(edev, 1); + if (ret) + return ret; + ret = eeprom_93xx46_eral(edev); + if (ret) + return ret; + ret = eeprom_93xx46_ew(edev, 0); + if (ret) + return ret; + } + return count; +} +static DEVICE_ATTR(erase, S_IWUSR, NULL, eeprom_93xx46_store_erase); + +static void select_assert(void *context) +{ + struct eeprom_93xx46_dev *edev = context; + + gpiod_set_value_cansleep(edev->pdata->select, 1); +} + +static void select_deassert(void *context) +{ + struct eeprom_93xx46_dev *edev = context; + + gpiod_set_value_cansleep(edev->pdata->select, 0); +} + +static const struct of_device_id eeprom_93xx46_of_table[] = { + { .compatible = "eeprom-93xx46", }, + { .compatible = "atmel,at93c46d", .data = &atmel_at93c46d_data, }, + { .compatible = "microchip,93lc46b", .data = µchip_93lc46b_data, }, + {} +}; +MODULE_DEVICE_TABLE(of, eeprom_93xx46_of_table); + +static int eeprom_93xx46_probe_dt(struct spi_device *spi) +{ + const struct of_device_id *of_id = + of_match_device(eeprom_93xx46_of_table, &spi->dev); + struct device_node *np = spi->dev.of_node; + struct eeprom_93xx46_platform_data *pd; + u32 tmp; + int ret; + + pd = devm_kzalloc(&spi->dev, sizeof(*pd), GFP_KERNEL); + if (!pd) + return -ENOMEM; + + ret = of_property_read_u32(np, "data-size", &tmp); + if (ret < 0) { + dev_err(&spi->dev, "data-size property not found\n"); + return ret; + } + + if (tmp == 8) { + pd->flags |= EE_ADDR8; + } else if (tmp == 16) { + pd->flags |= EE_ADDR16; + } else { + dev_err(&spi->dev, "invalid data-size (%d)\n", tmp); + return -EINVAL; + } + + if (of_property_read_bool(np, "read-only")) + pd->flags |= EE_READONLY; + + pd->select = devm_gpiod_get_optional(&spi->dev, "select", + GPIOD_OUT_LOW); + if (IS_ERR(pd->select)) + return PTR_ERR(pd->select); + + pd->prepare = select_assert; + pd->finish = select_deassert; + gpiod_direction_output(pd->select, 0); + + if (of_id->data) { + const struct eeprom_93xx46_devtype_data *data = of_id->data; + + pd->quirks = data->quirks; + } + + spi->dev.platform_data = pd; + + return 0; +} + +static int eeprom_93xx46_probe(struct spi_device *spi) +{ + struct eeprom_93xx46_platform_data *pd; + struct eeprom_93xx46_dev *edev; + int err; + + if (spi->dev.of_node) { + err = eeprom_93xx46_probe_dt(spi); + if (err < 0) + return err; + } + + pd = spi->dev.platform_data; + if (!pd) { + dev_err(&spi->dev, "missing platform data\n"); + return -ENODEV; + } + + edev = devm_kzalloc(&spi->dev, sizeof(*edev), GFP_KERNEL); + if (!edev) + return -ENOMEM; + + if (pd->flags & EE_ADDR8) + edev->addrlen = 7; + else if (pd->flags & EE_ADDR16) + edev->addrlen = 6; + else { + dev_err(&spi->dev, "unspecified address type\n"); + return -EINVAL; + } + + mutex_init(&edev->lock); + + edev->spi = spi; + edev->pdata = pd; + + edev->size = 128; + edev->nvmem_config.type = NVMEM_TYPE_EEPROM; + edev->nvmem_config.name = dev_name(&spi->dev); + edev->nvmem_config.dev = &spi->dev; + edev->nvmem_config.read_only = pd->flags & EE_READONLY; + edev->nvmem_config.root_only = true; + edev->nvmem_config.owner = THIS_MODULE; + edev->nvmem_config.compat = true; + edev->nvmem_config.base_dev = &spi->dev; + edev->nvmem_config.reg_read = eeprom_93xx46_read; + edev->nvmem_config.reg_write = eeprom_93xx46_write; + edev->nvmem_config.priv = edev; + edev->nvmem_config.stride = 4; + edev->nvmem_config.word_size = 1; + edev->nvmem_config.size = edev->size; + + edev->nvmem = devm_nvmem_register(&spi->dev, &edev->nvmem_config); + if (IS_ERR(edev->nvmem)) + return PTR_ERR(edev->nvmem); + + dev_info(&spi->dev, "%d-bit eeprom %s\n", + (pd->flags & EE_ADDR8) ? 8 : 16, + (pd->flags & EE_READONLY) ? "(readonly)" : ""); + + if (!(pd->flags & EE_READONLY)) { + if (device_create_file(&spi->dev, &dev_attr_erase)) + dev_err(&spi->dev, "can't create erase interface\n"); + } + + spi_set_drvdata(spi, edev); + return 0; +} + +static int eeprom_93xx46_remove(struct spi_device *spi) +{ + struct eeprom_93xx46_dev *edev = spi_get_drvdata(spi); + + if (!(edev->pdata->flags & EE_READONLY)) + device_remove_file(&spi->dev, &dev_attr_erase); + + return 0; +} + +static struct spi_driver eeprom_93xx46_driver = { + .driver = { + .name = "93xx46", + .of_match_table = of_match_ptr(eeprom_93xx46_of_table), + }, + .probe = eeprom_93xx46_probe, + .remove = eeprom_93xx46_remove, +}; + +module_spi_driver(eeprom_93xx46_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Driver for 93xx46 EEPROMs"); +MODULE_AUTHOR("Anatolij Gustschin <agust@denx.de>"); +MODULE_ALIAS("spi:93xx46"); +MODULE_ALIAS("spi:eeprom-93xx46"); diff --git a/drivers/misc/eeprom/idt_89hpesx.c b/drivers/misc/eeprom/idt_89hpesx.c new file mode 100644 index 000000000..6a456645e --- /dev/null +++ b/drivers/misc/eeprom/idt_89hpesx.c @@ -0,0 +1,1622 @@ +/* + * This file is provided under a GPLv2 license. When using or + * redistributing this file, you may do so under that license. + * + * GPL LICENSE SUMMARY + * + * Copyright (C) 2016 T-Platforms. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, it can be found <http://www.gnu.org/licenses/>. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * IDT PCIe-switch NTB Linux driver + * + * Contact Information: + * Serge Semin <fancer.lancer@gmail.com>, <Sergey.Semin@t-platforms.ru> + */ +/* + * NOTE of the IDT 89HPESx SMBus-slave interface driver + * This driver primarily is developed to have an access to EEPROM device of + * IDT PCIe-switches. IDT provides a simple SMBus interface to perform IO- + * operations from/to EEPROM, which is located at private (so called Master) + * SMBus of switches. Using that interface this the driver creates a simple + * binary sysfs-file in the device directory: + * /sys/bus/i2c/devices/<bus>-<devaddr>/eeprom + * In case if read-only flag is specified in the dts-node of device desription, + * User-space applications won't be able to write to the EEPROM sysfs-node. + * Additionally IDT 89HPESx SMBus interface has an ability to write/read + * data of device CSRs. This driver exposes debugf-file to perform simple IO + * operations using that ability for just basic debug purpose. Particularly + * next file is created in the specific debugfs-directory: + * /sys/kernel/debug/idt_csr/ + * Format of the debugfs-node is: + * $ cat /sys/kernel/debug/idt_csr/<bus>-<devaddr>/<devname>; + * <CSR address>:<CSR value> + * So reading the content of the file gives current CSR address and it value. + * If User-space application wishes to change current CSR address, + * it can just write a proper value to the sysfs-file: + * $ echo "<CSR address>" > /sys/kernel/debug/idt_csr/<bus>-<devaddr>/<devname> + * If it wants to change the CSR value as well, the format of the write + * operation is: + * $ echo "<CSR address>:<CSR value>" > \ + * /sys/kernel/debug/idt_csr/<bus>-<devaddr>/<devname>; + * CSR address and value can be any of hexadecimal, decimal or octal format. + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/sizes.h> +#include <linux/slab.h> +#include <linux/mutex.h> +#include <linux/sysfs.h> +#include <linux/debugfs.h> +#include <linux/mod_devicetable.h> +#include <linux/property.h> +#include <linux/i2c.h> +#include <linux/pci_ids.h> +#include <linux/delay.h> + +#define IDT_NAME "89hpesx" +#define IDT_89HPESX_DESC "IDT 89HPESx SMBus-slave interface driver" +#define IDT_89HPESX_VER "1.0" + +MODULE_DESCRIPTION(IDT_89HPESX_DESC); +MODULE_VERSION(IDT_89HPESX_VER); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("T-platforms"); + +/* + * csr_dbgdir - CSR read/write operations Debugfs directory + */ +static struct dentry *csr_dbgdir; + +/* + * struct idt_89hpesx_dev - IDT 89HPESx device data structure + * @eesize: Size of EEPROM in bytes (calculated from "idt,eecompatible") + * @eero: EEPROM Read-only flag + * @eeaddr: EEPROM custom address + * + * @inieecmd: Initial cmd value for EEPROM read/write operations + * @inicsrcmd: Initial cmd value for CSR read/write operations + * @iniccode: Initialial command code value for IO-operations + * + * @csr: CSR address to perform read operation + * + * @smb_write: SMBus write method + * @smb_read: SMBus read method + * @smb_mtx: SMBus mutex + * + * @client: i2c client used to perform IO operations + * + * @ee_file: EEPROM read/write sysfs-file + */ +struct idt_smb_seq; +struct idt_89hpesx_dev { + u32 eesize; + bool eero; + u8 eeaddr; + + u8 inieecmd; + u8 inicsrcmd; + u8 iniccode; + + u16 csr; + + int (*smb_write)(struct idt_89hpesx_dev *, const struct idt_smb_seq *); + int (*smb_read)(struct idt_89hpesx_dev *, struct idt_smb_seq *); + struct mutex smb_mtx; + + struct i2c_client *client; + + struct bin_attribute *ee_file; + struct dentry *csr_dir; +}; + +/* + * struct idt_smb_seq - sequence of data to be read/written from/to IDT 89HPESx + * @ccode: SMBus command code + * @bytecnt: Byte count of operation + * @data: Data to by written + */ +struct idt_smb_seq { + u8 ccode; + u8 bytecnt; + u8 *data; +}; + +/* + * struct idt_eeprom_seq - sequence of data to be read/written from/to EEPROM + * @cmd: Transaction CMD + * @eeaddr: EEPROM custom address + * @memaddr: Internal memory address of EEPROM + * @data: Data to be written at the memory address + */ +struct idt_eeprom_seq { + u8 cmd; + u8 eeaddr; + u16 memaddr; + u8 data; +} __packed; + +/* + * struct idt_csr_seq - sequence of data to be read/written from/to CSR + * @cmd: Transaction CMD + * @csraddr: Internal IDT device CSR address + * @data: Data to be read/written from/to the CSR address + */ +struct idt_csr_seq { + u8 cmd; + u16 csraddr; + u32 data; +} __packed; + +/* + * SMBus command code macros + * @CCODE_END: Indicates the end of transaction + * @CCODE_START: Indicates the start of transaction + * @CCODE_CSR: CSR read/write transaction + * @CCODE_EEPROM: EEPROM read/write transaction + * @CCODE_BYTE: Supplied data has BYTE length + * @CCODE_WORD: Supplied data has WORD length + * @CCODE_BLOCK: Supplied data has variable length passed in bytecnt + * byte right following CCODE byte + */ +#define CCODE_END ((u8)0x01) +#define CCODE_START ((u8)0x02) +#define CCODE_CSR ((u8)0x00) +#define CCODE_EEPROM ((u8)0x04) +#define CCODE_BYTE ((u8)0x00) +#define CCODE_WORD ((u8)0x20) +#define CCODE_BLOCK ((u8)0x40) +#define CCODE_PEC ((u8)0x80) + +/* + * EEPROM command macros + * @EEPROM_OP_WRITE: EEPROM write operation + * @EEPROM_OP_READ: EEPROM read operation + * @EEPROM_USA: Use specified address of EEPROM + * @EEPROM_NAERR: EEPROM device is not ready to respond + * @EEPROM_LAERR: EEPROM arbitration loss error + * @EEPROM_MSS: EEPROM misplace start & stop bits error + * @EEPROM_WR_CNT: Bytes count to perform write operation + * @EEPROM_WRRD_CNT: Bytes count to write before reading + * @EEPROM_RD_CNT: Bytes count to perform read operation + * @EEPROM_DEF_SIZE: Fall back size of EEPROM + * @EEPROM_DEF_ADDR: Defatul EEPROM address + * @EEPROM_TOUT: Timeout before retry read operation if eeprom is busy + */ +#define EEPROM_OP_WRITE ((u8)0x00) +#define EEPROM_OP_READ ((u8)0x01) +#define EEPROM_USA ((u8)0x02) +#define EEPROM_NAERR ((u8)0x08) +#define EEPROM_LAERR ((u8)0x10) +#define EEPROM_MSS ((u8)0x20) +#define EEPROM_WR_CNT ((u8)5) +#define EEPROM_WRRD_CNT ((u8)4) +#define EEPROM_RD_CNT ((u8)5) +#define EEPROM_DEF_SIZE ((u16)4096) +#define EEPROM_DEF_ADDR ((u8)0x50) +#define EEPROM_TOUT (100) + +/* + * CSR command macros + * @CSR_DWE: Enable all four bytes of the operation + * @CSR_OP_WRITE: CSR write operation + * @CSR_OP_READ: CSR read operation + * @CSR_RERR: Read operation error + * @CSR_WERR: Write operation error + * @CSR_WR_CNT: Bytes count to perform write operation + * @CSR_WRRD_CNT: Bytes count to write before reading + * @CSR_RD_CNT: Bytes count to perform read operation + * @CSR_MAX: Maximum CSR address + * @CSR_DEF: Default CSR address + * @CSR_REAL_ADDR: CSR real unshifted address + */ +#define CSR_DWE ((u8)0x0F) +#define CSR_OP_WRITE ((u8)0x00) +#define CSR_OP_READ ((u8)0x10) +#define CSR_RERR ((u8)0x40) +#define CSR_WERR ((u8)0x80) +#define CSR_WR_CNT ((u8)7) +#define CSR_WRRD_CNT ((u8)3) +#define CSR_RD_CNT ((u8)7) +#define CSR_MAX ((u32)0x3FFFF) +#define CSR_DEF ((u16)0x0000) +#define CSR_REAL_ADDR(val) ((unsigned int)val << 2) + +/* + * IDT 89HPESx basic register + * @IDT_VIDDID_CSR: PCIe VID and DID of IDT 89HPESx + * @IDT_VID_MASK: Mask of VID + */ +#define IDT_VIDDID_CSR ((u32)0x0000) +#define IDT_VID_MASK ((u32)0xFFFF) + +/* + * IDT 89HPESx can send NACK when new command is sent before previous one + * fininshed execution. In this case driver retries operation + * certain times. + * @RETRY_CNT: Number of retries before giving up and fail + * @idt_smb_safe: Generate a retry loop on corresponding SMBus method + */ +#define RETRY_CNT (128) +#define idt_smb_safe(ops, args...) ({ \ + int __retry = RETRY_CNT; \ + s32 __sts; \ + do { \ + __sts = i2c_smbus_ ## ops ## _data(args); \ + } while (__retry-- && __sts < 0); \ + __sts; \ +}) + +/*=========================================================================== + * i2c bus level IO-operations + *=========================================================================== + */ + +/* + * idt_smb_write_byte() - SMBus write method when I2C_SMBUS_BYTE_DATA operation + * is only available + * @pdev: Pointer to the driver data + * @seq: Sequence of data to be written + */ +static int idt_smb_write_byte(struct idt_89hpesx_dev *pdev, + const struct idt_smb_seq *seq) +{ + s32 sts; + u8 ccode; + int idx; + + /* Loop over the supplied data sending byte one-by-one */ + for (idx = 0; idx < seq->bytecnt; idx++) { + /* Collect the command code byte */ + ccode = seq->ccode | CCODE_BYTE; + if (idx == 0) + ccode |= CCODE_START; + if (idx == seq->bytecnt - 1) + ccode |= CCODE_END; + + /* Send data to the device */ + sts = idt_smb_safe(write_byte, pdev->client, ccode, + seq->data[idx]); + if (sts != 0) + return (int)sts; + } + + return 0; +} + +/* + * idt_smb_read_byte() - SMBus read method when I2C_SMBUS_BYTE_DATA operation + * is only available + * @pdev: Pointer to the driver data + * @seq: Buffer to read data to + */ +static int idt_smb_read_byte(struct idt_89hpesx_dev *pdev, + struct idt_smb_seq *seq) +{ + s32 sts; + u8 ccode; + int idx; + + /* Loop over the supplied buffer receiving byte one-by-one */ + for (idx = 0; idx < seq->bytecnt; idx++) { + /* Collect the command code byte */ + ccode = seq->ccode | CCODE_BYTE; + if (idx == 0) + ccode |= CCODE_START; + if (idx == seq->bytecnt - 1) + ccode |= CCODE_END; + + /* Read data from the device */ + sts = idt_smb_safe(read_byte, pdev->client, ccode); + if (sts < 0) + return (int)sts; + + seq->data[idx] = (u8)sts; + } + + return 0; +} + +/* + * idt_smb_write_word() - SMBus write method when I2C_SMBUS_BYTE_DATA and + * I2C_FUNC_SMBUS_WORD_DATA operations are available + * @pdev: Pointer to the driver data + * @seq: Sequence of data to be written + */ +static int idt_smb_write_word(struct idt_89hpesx_dev *pdev, + const struct idt_smb_seq *seq) +{ + s32 sts; + u8 ccode; + int idx, evencnt; + + /* Calculate the even count of data to send */ + evencnt = seq->bytecnt - (seq->bytecnt % 2); + + /* Loop over the supplied data sending two bytes at a time */ + for (idx = 0; idx < evencnt; idx += 2) { + /* Collect the command code byte */ + ccode = seq->ccode | CCODE_WORD; + if (idx == 0) + ccode |= CCODE_START; + if (idx == evencnt - 2) + ccode |= CCODE_END; + + /* Send word data to the device */ + sts = idt_smb_safe(write_word, pdev->client, ccode, + *(u16 *)&seq->data[idx]); + if (sts != 0) + return (int)sts; + } + + /* If there is odd number of bytes then send just one last byte */ + if (seq->bytecnt != evencnt) { + /* Collect the command code byte */ + ccode = seq->ccode | CCODE_BYTE | CCODE_END; + if (idx == 0) + ccode |= CCODE_START; + + /* Send byte data to the device */ + sts = idt_smb_safe(write_byte, pdev->client, ccode, + seq->data[idx]); + if (sts != 0) + return (int)sts; + } + + return 0; +} + +/* + * idt_smb_read_word() - SMBus read method when I2C_SMBUS_BYTE_DATA and + * I2C_FUNC_SMBUS_WORD_DATA operations are available + * @pdev: Pointer to the driver data + * @seq: Buffer to read data to + */ +static int idt_smb_read_word(struct idt_89hpesx_dev *pdev, + struct idt_smb_seq *seq) +{ + s32 sts; + u8 ccode; + int idx, evencnt; + + /* Calculate the even count of data to send */ + evencnt = seq->bytecnt - (seq->bytecnt % 2); + + /* Loop over the supplied data reading two bytes at a time */ + for (idx = 0; idx < evencnt; idx += 2) { + /* Collect the command code byte */ + ccode = seq->ccode | CCODE_WORD; + if (idx == 0) + ccode |= CCODE_START; + if (idx == evencnt - 2) + ccode |= CCODE_END; + + /* Read word data from the device */ + sts = idt_smb_safe(read_word, pdev->client, ccode); + if (sts < 0) + return (int)sts; + + *(u16 *)&seq->data[idx] = (u16)sts; + } + + /* If there is odd number of bytes then receive just one last byte */ + if (seq->bytecnt != evencnt) { + /* Collect the command code byte */ + ccode = seq->ccode | CCODE_BYTE | CCODE_END; + if (idx == 0) + ccode |= CCODE_START; + + /* Read last data byte from the device */ + sts = idt_smb_safe(read_byte, pdev->client, ccode); + if (sts < 0) + return (int)sts; + + seq->data[idx] = (u8)sts; + } + + return 0; +} + +/* + * idt_smb_write_block() - SMBus write method when I2C_SMBUS_BLOCK_DATA + * operation is available + * @pdev: Pointer to the driver data + * @seq: Sequence of data to be written + */ +static int idt_smb_write_block(struct idt_89hpesx_dev *pdev, + const struct idt_smb_seq *seq) +{ + u8 ccode; + + /* Return error if too much data passed to send */ + if (seq->bytecnt > I2C_SMBUS_BLOCK_MAX) + return -EINVAL; + + /* Collect the command code byte */ + ccode = seq->ccode | CCODE_BLOCK | CCODE_START | CCODE_END; + + /* Send block of data to the device */ + return idt_smb_safe(write_block, pdev->client, ccode, seq->bytecnt, + seq->data); +} + +/* + * idt_smb_read_block() - SMBus read method when I2C_SMBUS_BLOCK_DATA + * operation is available + * @pdev: Pointer to the driver data + * @seq: Buffer to read data to + */ +static int idt_smb_read_block(struct idt_89hpesx_dev *pdev, + struct idt_smb_seq *seq) +{ + s32 sts; + u8 ccode; + + /* Return error if too much data passed to send */ + if (seq->bytecnt > I2C_SMBUS_BLOCK_MAX) + return -EINVAL; + + /* Collect the command code byte */ + ccode = seq->ccode | CCODE_BLOCK | CCODE_START | CCODE_END; + + /* Read block of data from the device */ + sts = idt_smb_safe(read_block, pdev->client, ccode, seq->data); + if (sts != seq->bytecnt) + return (sts < 0 ? sts : -ENODATA); + + return 0; +} + +/* + * idt_smb_write_i2c_block() - SMBus write method when I2C_SMBUS_I2C_BLOCK_DATA + * operation is available + * @pdev: Pointer to the driver data + * @seq: Sequence of data to be written + * + * NOTE It's usual SMBus write block operation, except the actual data length is + * sent as first byte of data + */ +static int idt_smb_write_i2c_block(struct idt_89hpesx_dev *pdev, + const struct idt_smb_seq *seq) +{ + u8 ccode, buf[I2C_SMBUS_BLOCK_MAX + 1]; + + /* Return error if too much data passed to send */ + if (seq->bytecnt > I2C_SMBUS_BLOCK_MAX) + return -EINVAL; + + /* Collect the data to send. Length byte must be added prior the data */ + buf[0] = seq->bytecnt; + memcpy(&buf[1], seq->data, seq->bytecnt); + + /* Collect the command code byte */ + ccode = seq->ccode | CCODE_BLOCK | CCODE_START | CCODE_END; + + /* Send length and block of data to the device */ + return idt_smb_safe(write_i2c_block, pdev->client, ccode, + seq->bytecnt + 1, buf); +} + +/* + * idt_smb_read_i2c_block() - SMBus read method when I2C_SMBUS_I2C_BLOCK_DATA + * operation is available + * @pdev: Pointer to the driver data + * @seq: Buffer to read data to + * + * NOTE It's usual SMBus read block operation, except the actual data length is + * retrieved as first byte of data + */ +static int idt_smb_read_i2c_block(struct idt_89hpesx_dev *pdev, + struct idt_smb_seq *seq) +{ + u8 ccode, buf[I2C_SMBUS_BLOCK_MAX + 1]; + s32 sts; + + /* Return error if too much data passed to send */ + if (seq->bytecnt > I2C_SMBUS_BLOCK_MAX) + return -EINVAL; + + /* Collect the command code byte */ + ccode = seq->ccode | CCODE_BLOCK | CCODE_START | CCODE_END; + + /* Read length and block of data from the device */ + sts = idt_smb_safe(read_i2c_block, pdev->client, ccode, + seq->bytecnt + 1, buf); + if (sts != seq->bytecnt + 1) + return (sts < 0 ? sts : -ENODATA); + if (buf[0] != seq->bytecnt) + return -ENODATA; + + /* Copy retrieved data to the output data buffer */ + memcpy(seq->data, &buf[1], seq->bytecnt); + + return 0; +} + +/*=========================================================================== + * EEPROM IO-operations + *=========================================================================== + */ + +/* + * idt_eeprom_read_byte() - read just one byte from EEPROM + * @pdev: Pointer to the driver data + * @memaddr: Start EEPROM memory address + * @data: Data to be written to EEPROM + */ +static int idt_eeprom_read_byte(struct idt_89hpesx_dev *pdev, u16 memaddr, + u8 *data) +{ + struct device *dev = &pdev->client->dev; + struct idt_eeprom_seq eeseq; + struct idt_smb_seq smbseq; + int ret, retry; + + /* Initialize SMBus sequence fields */ + smbseq.ccode = pdev->iniccode | CCODE_EEPROM; + smbseq.data = (u8 *)&eeseq; + + /* + * Sometimes EEPROM may respond with NACK if it's busy with previous + * operation, so we need to perform a few attempts of read cycle + */ + retry = RETRY_CNT; + do { + /* Send EEPROM memory address to read data from */ + smbseq.bytecnt = EEPROM_WRRD_CNT; + eeseq.cmd = pdev->inieecmd | EEPROM_OP_READ; + eeseq.eeaddr = pdev->eeaddr; + eeseq.memaddr = cpu_to_le16(memaddr); + ret = pdev->smb_write(pdev, &smbseq); + if (ret != 0) { + dev_err(dev, "Failed to init eeprom addr 0x%02hhx", + memaddr); + break; + } + + /* Perform read operation */ + smbseq.bytecnt = EEPROM_RD_CNT; + ret = pdev->smb_read(pdev, &smbseq); + if (ret != 0) { + dev_err(dev, "Failed to read eeprom data 0x%02hhx", + memaddr); + break; + } + + /* Restart read operation if the device is busy */ + if (retry && (eeseq.cmd & EEPROM_NAERR)) { + dev_dbg(dev, "EEPROM busy, retry reading after %d ms", + EEPROM_TOUT); + msleep(EEPROM_TOUT); + continue; + } + + /* Check whether IDT successfully read data from EEPROM */ + if (eeseq.cmd & (EEPROM_NAERR | EEPROM_LAERR | EEPROM_MSS)) { + dev_err(dev, + "Communication with eeprom failed, cmd 0x%hhx", + eeseq.cmd); + ret = -EREMOTEIO; + break; + } + + /* Save retrieved data and exit the loop */ + *data = eeseq.data; + break; + } while (retry--); + + /* Return the status of operation */ + return ret; +} + +/* + * idt_eeprom_write() - EEPROM write operation + * @pdev: Pointer to the driver data + * @memaddr: Start EEPROM memory address + * @len: Length of data to be written + * @data: Data to be written to EEPROM + */ +static int idt_eeprom_write(struct idt_89hpesx_dev *pdev, u16 memaddr, u16 len, + const u8 *data) +{ + struct device *dev = &pdev->client->dev; + struct idt_eeprom_seq eeseq; + struct idt_smb_seq smbseq; + int ret; + u16 idx; + + /* Initialize SMBus sequence fields */ + smbseq.ccode = pdev->iniccode | CCODE_EEPROM; + smbseq.data = (u8 *)&eeseq; + + /* Send data byte-by-byte, checking if it is successfully written */ + for (idx = 0; idx < len; idx++, memaddr++) { + /* Lock IDT SMBus device */ + mutex_lock(&pdev->smb_mtx); + + /* Perform write operation */ + smbseq.bytecnt = EEPROM_WR_CNT; + eeseq.cmd = pdev->inieecmd | EEPROM_OP_WRITE; + eeseq.eeaddr = pdev->eeaddr; + eeseq.memaddr = cpu_to_le16(memaddr); + eeseq.data = data[idx]; + ret = pdev->smb_write(pdev, &smbseq); + if (ret != 0) { + dev_err(dev, + "Failed to write 0x%04hx:0x%02hhx to eeprom", + memaddr, data[idx]); + goto err_mutex_unlock; + } + + /* + * Check whether the data is successfully written by reading + * from the same EEPROM memory address. + */ + eeseq.data = ~data[idx]; + ret = idt_eeprom_read_byte(pdev, memaddr, &eeseq.data); + if (ret != 0) + goto err_mutex_unlock; + + /* Check whether the read byte is the same as written one */ + if (eeseq.data != data[idx]) { + dev_err(dev, "Values don't match 0x%02hhx != 0x%02hhx", + eeseq.data, data[idx]); + ret = -EREMOTEIO; + goto err_mutex_unlock; + } + + /* Unlock IDT SMBus device */ +err_mutex_unlock: + mutex_unlock(&pdev->smb_mtx); + if (ret != 0) + return ret; + } + + return 0; +} + +/* + * idt_eeprom_read() - EEPROM read operation + * @pdev: Pointer to the driver data + * @memaddr: Start EEPROM memory address + * @len: Length of data to read + * @buf: Buffer to read data to + */ +static int idt_eeprom_read(struct idt_89hpesx_dev *pdev, u16 memaddr, u16 len, + u8 *buf) +{ + int ret; + u16 idx; + + /* Read data byte-by-byte, retrying if it wasn't successful */ + for (idx = 0; idx < len; idx++, memaddr++) { + /* Lock IDT SMBus device */ + mutex_lock(&pdev->smb_mtx); + + /* Just read the byte to the buffer */ + ret = idt_eeprom_read_byte(pdev, memaddr, &buf[idx]); + + /* Unlock IDT SMBus device */ + mutex_unlock(&pdev->smb_mtx); + + /* Return error if read operation failed */ + if (ret != 0) + return ret; + } + + return 0; +} + +/*=========================================================================== + * CSR IO-operations + *=========================================================================== + */ + +/* + * idt_csr_write() - CSR write operation + * @pdev: Pointer to the driver data + * @csraddr: CSR address (with no two LS bits) + * @data: Data to be written to CSR + */ +static int idt_csr_write(struct idt_89hpesx_dev *pdev, u16 csraddr, + const u32 data) +{ + struct device *dev = &pdev->client->dev; + struct idt_csr_seq csrseq; + struct idt_smb_seq smbseq; + int ret; + + /* Initialize SMBus sequence fields */ + smbseq.ccode = pdev->iniccode | CCODE_CSR; + smbseq.data = (u8 *)&csrseq; + + /* Lock IDT SMBus device */ + mutex_lock(&pdev->smb_mtx); + + /* Perform write operation */ + smbseq.bytecnt = CSR_WR_CNT; + csrseq.cmd = pdev->inicsrcmd | CSR_OP_WRITE; + csrseq.csraddr = cpu_to_le16(csraddr); + csrseq.data = cpu_to_le32(data); + ret = pdev->smb_write(pdev, &smbseq); + if (ret != 0) { + dev_err(dev, "Failed to write 0x%04x: 0x%04x to csr", + CSR_REAL_ADDR(csraddr), data); + goto err_mutex_unlock; + } + + /* Send CSR address to read data from */ + smbseq.bytecnt = CSR_WRRD_CNT; + csrseq.cmd = pdev->inicsrcmd | CSR_OP_READ; + ret = pdev->smb_write(pdev, &smbseq); + if (ret != 0) { + dev_err(dev, "Failed to init csr address 0x%04x", + CSR_REAL_ADDR(csraddr)); + goto err_mutex_unlock; + } + + /* Perform read operation */ + smbseq.bytecnt = CSR_RD_CNT; + ret = pdev->smb_read(pdev, &smbseq); + if (ret != 0) { + dev_err(dev, "Failed to read csr 0x%04x", + CSR_REAL_ADDR(csraddr)); + goto err_mutex_unlock; + } + + /* Check whether IDT successfully retrieved CSR data */ + if (csrseq.cmd & (CSR_RERR | CSR_WERR)) { + dev_err(dev, "IDT failed to perform CSR r/w"); + ret = -EREMOTEIO; + goto err_mutex_unlock; + } + + /* Unlock IDT SMBus device */ +err_mutex_unlock: + mutex_unlock(&pdev->smb_mtx); + + return ret; +} + +/* + * idt_csr_read() - CSR read operation + * @pdev: Pointer to the driver data + * @csraddr: CSR address (with no two LS bits) + * @data: Data to be written to CSR + */ +static int idt_csr_read(struct idt_89hpesx_dev *pdev, u16 csraddr, u32 *data) +{ + struct device *dev = &pdev->client->dev; + struct idt_csr_seq csrseq; + struct idt_smb_seq smbseq; + int ret; + + /* Initialize SMBus sequence fields */ + smbseq.ccode = pdev->iniccode | CCODE_CSR; + smbseq.data = (u8 *)&csrseq; + + /* Lock IDT SMBus device */ + mutex_lock(&pdev->smb_mtx); + + /* Send CSR register address before reading it */ + smbseq.bytecnt = CSR_WRRD_CNT; + csrseq.cmd = pdev->inicsrcmd | CSR_OP_READ; + csrseq.csraddr = cpu_to_le16(csraddr); + ret = pdev->smb_write(pdev, &smbseq); + if (ret != 0) { + dev_err(dev, "Failed to init csr address 0x%04x", + CSR_REAL_ADDR(csraddr)); + goto err_mutex_unlock; + } + + /* Perform read operation */ + smbseq.bytecnt = CSR_RD_CNT; + ret = pdev->smb_read(pdev, &smbseq); + if (ret != 0) { + dev_err(dev, "Failed to read csr 0x%04hx", + CSR_REAL_ADDR(csraddr)); + goto err_mutex_unlock; + } + + /* Check whether IDT successfully retrieved CSR data */ + if (csrseq.cmd & (CSR_RERR | CSR_WERR)) { + dev_err(dev, "IDT failed to perform CSR r/w"); + ret = -EREMOTEIO; + goto err_mutex_unlock; + } + + /* Save data retrieved from IDT */ + *data = le32_to_cpu(csrseq.data); + + /* Unlock IDT SMBus device */ +err_mutex_unlock: + mutex_unlock(&pdev->smb_mtx); + + return ret; +} + +/*=========================================================================== + * Sysfs/debugfs-nodes IO-operations + *=========================================================================== + */ + +/* + * eeprom_write() - EEPROM sysfs-node write callback + * @filep: Pointer to the file system node + * @kobj: Pointer to the kernel object related to the sysfs-node + * @attr: Attributes of the file + * @buf: Buffer to write data to + * @off: Offset at which data should be written to + * @count: Number of bytes to write + */ +static ssize_t eeprom_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct idt_89hpesx_dev *pdev; + int ret; + + /* Retrieve driver data */ + pdev = dev_get_drvdata(kobj_to_dev(kobj)); + + /* Perform EEPROM write operation */ + ret = idt_eeprom_write(pdev, (u16)off, (u16)count, (u8 *)buf); + return (ret != 0 ? ret : count); +} + +/* + * eeprom_read() - EEPROM sysfs-node read callback + * @filep: Pointer to the file system node + * @kobj: Pointer to the kernel object related to the sysfs-node + * @attr: Attributes of the file + * @buf: Buffer to write data to + * @off: Offset at which data should be written to + * @count: Number of bytes to write + */ +static ssize_t eeprom_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct idt_89hpesx_dev *pdev; + int ret; + + /* Retrieve driver data */ + pdev = dev_get_drvdata(kobj_to_dev(kobj)); + + /* Perform EEPROM read operation */ + ret = idt_eeprom_read(pdev, (u16)off, (u16)count, (u8 *)buf); + return (ret != 0 ? ret : count); +} + +/* + * idt_dbgfs_csr_write() - CSR debugfs-node write callback + * @filep: Pointer to the file system file descriptor + * @buf: Buffer to read data from + * @count: Size of the buffer + * @offp: Offset within the file + * + * It accepts either "0x<reg addr>:0x<value>" for saving register address + * and writing value to specified DWORD register or "0x<reg addr>" for + * just saving register address in order to perform next read operation. + * + * WARNING No spaces are allowed. Incoming string must be strictly formated as: + * "<reg addr>:<value>". Register address must be aligned within 4 bytes + * (one DWORD). + */ +static ssize_t idt_dbgfs_csr_write(struct file *filep, const char __user *ubuf, + size_t count, loff_t *offp) +{ + struct idt_89hpesx_dev *pdev = filep->private_data; + char *colon_ch, *csraddr_str, *csrval_str; + int ret, csraddr_len; + u32 csraddr, csrval; + char *buf; + + if (*offp) + return 0; + + /* Copy data from User-space */ + buf = kmalloc(count + 1, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + if (copy_from_user(buf, ubuf, count)) { + ret = -EFAULT; + goto free_buf; + } + buf[count] = 0; + + /* Find position of colon in the buffer */ + colon_ch = strnchr(buf, count, ':'); + + /* + * If there is colon passed then new CSR value should be parsed as + * well, so allocate buffer for CSR address substring. + * If no colon is found, then string must have just one number with + * no new CSR value + */ + if (colon_ch != NULL) { + csraddr_len = colon_ch - buf; + csraddr_str = + kmalloc(csraddr_len + 1, GFP_KERNEL); + if (csraddr_str == NULL) { + ret = -ENOMEM; + goto free_buf; + } + /* Copy the register address to the substring buffer */ + strncpy(csraddr_str, buf, csraddr_len); + csraddr_str[csraddr_len] = '\0'; + /* Register value must follow the colon */ + csrval_str = colon_ch + 1; + } else /* if (str_colon == NULL) */ { + csraddr_str = (char *)buf; /* Just to shut warning up */ + csraddr_len = strnlen(csraddr_str, count); + csrval_str = NULL; + } + + /* Convert CSR address to u32 value */ + ret = kstrtou32(csraddr_str, 0, &csraddr); + if (ret != 0) + goto free_csraddr_str; + + /* Check whether passed register address is valid */ + if (csraddr > CSR_MAX || !IS_ALIGNED(csraddr, SZ_4)) { + ret = -EINVAL; + goto free_csraddr_str; + } + + /* Shift register address to the right so to have u16 address */ + pdev->csr = (csraddr >> 2); + + /* Parse new CSR value and send it to IDT, if colon has been found */ + if (colon_ch != NULL) { + ret = kstrtou32(csrval_str, 0, &csrval); + if (ret != 0) + goto free_csraddr_str; + + ret = idt_csr_write(pdev, pdev->csr, csrval); + if (ret != 0) + goto free_csraddr_str; + } + + /* Free memory only if colon has been found */ +free_csraddr_str: + if (colon_ch != NULL) + kfree(csraddr_str); + + /* Free buffer allocated for data retrieved from User-space */ +free_buf: + kfree(buf); + + return (ret != 0 ? ret : count); +} + +/* + * idt_dbgfs_csr_read() - CSR debugfs-node read callback + * @filep: Pointer to the file system file descriptor + * @buf: Buffer to write data to + * @count: Size of the buffer + * @offp: Offset within the file + * + * It just prints the pair "0x<reg addr>:0x<value>" to passed buffer. + */ +#define CSRBUF_SIZE ((size_t)32) +static ssize_t idt_dbgfs_csr_read(struct file *filep, char __user *ubuf, + size_t count, loff_t *offp) +{ + struct idt_89hpesx_dev *pdev = filep->private_data; + u32 csraddr, csrval; + char buf[CSRBUF_SIZE]; + int ret, size; + + /* Perform CSR read operation */ + ret = idt_csr_read(pdev, pdev->csr, &csrval); + if (ret != 0) + return ret; + + /* Shift register address to the left so to have real address */ + csraddr = ((u32)pdev->csr << 2); + + /* Print the "0x<reg addr>:0x<value>" to buffer */ + size = snprintf(buf, CSRBUF_SIZE, "0x%05x:0x%08x\n", + (unsigned int)csraddr, (unsigned int)csrval); + + /* Copy data to User-space */ + return simple_read_from_buffer(ubuf, count, offp, buf, size); +} + +/* + * eeprom_attribute - EEPROM sysfs-node attributes + * + * NOTE Size will be changed in compliance with OF node. EEPROM attribute will + * be read-only as well if the corresponding flag is specified in OF node. + */ +static BIN_ATTR_RW(eeprom, EEPROM_DEF_SIZE); + +/* + * csr_dbgfs_ops - CSR debugfs-node read/write operations + */ +static const struct file_operations csr_dbgfs_ops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = idt_dbgfs_csr_write, + .read = idt_dbgfs_csr_read +}; + +/*=========================================================================== + * Driver init/deinit methods + *=========================================================================== + */ + +/* + * idt_set_defval() - disable EEPROM access by default + * @pdev: Pointer to the driver data + */ +static void idt_set_defval(struct idt_89hpesx_dev *pdev) +{ + /* If OF info is missing then use next values */ + pdev->eesize = 0; + pdev->eero = true; + pdev->inieecmd = 0; + pdev->eeaddr = 0; +} + +static const struct i2c_device_id ee_ids[]; + +/* + * idt_ee_match_id() - check whether the node belongs to compatible EEPROMs + */ +static const struct i2c_device_id *idt_ee_match_id(struct fwnode_handle *fwnode) +{ + const struct i2c_device_id *id = ee_ids; + const char *compatible, *p; + char devname[I2C_NAME_SIZE]; + int ret; + + ret = fwnode_property_read_string(fwnode, "compatible", &compatible); + if (ret) + return NULL; + + p = strchr(compatible, ','); + strlcpy(devname, p ? p + 1 : compatible, sizeof(devname)); + /* Search through the device name */ + while (id->name[0]) { + if (strcmp(devname, id->name) == 0) + return id; + id++; + } + return NULL; +} + +/* + * idt_get_fw_data() - get IDT i2c-device parameters from device tree + * @pdev: Pointer to the driver data + */ +static void idt_get_fw_data(struct idt_89hpesx_dev *pdev) +{ + struct device *dev = &pdev->client->dev; + struct fwnode_handle *fwnode; + const struct i2c_device_id *ee_id = NULL; + u32 eeprom_addr; + int ret; + + device_for_each_child_node(dev, fwnode) { + ee_id = idt_ee_match_id(fwnode); + if (ee_id) + break; + + dev_warn(dev, "Skip unsupported EEPROM device %pfw\n", fwnode); + } + + /* If there is no fwnode EEPROM device, then set zero size */ + if (!ee_id) { + dev_warn(dev, "No fwnode, EEPROM access disabled"); + idt_set_defval(pdev); + return; + } + + /* Retrieve EEPROM size */ + pdev->eesize = (u32)ee_id->driver_data; + + /* Get custom EEPROM address from 'reg' attribute */ + ret = fwnode_property_read_u32(fwnode, "reg", &eeprom_addr); + if (ret || (eeprom_addr == 0)) { + dev_warn(dev, "No EEPROM reg found, use default address 0x%x", + EEPROM_DEF_ADDR); + pdev->inieecmd = 0; + pdev->eeaddr = EEPROM_DEF_ADDR << 1; + } else { + pdev->inieecmd = EEPROM_USA; + pdev->eeaddr = eeprom_addr << 1; + } + + /* Check EEPROM 'read-only' flag */ + if (fwnode_property_read_bool(fwnode, "read-only")) + pdev->eero = true; + else /* if (!fwnode_property_read_bool(node, "read-only")) */ + pdev->eero = false; + + fwnode_handle_put(fwnode); + dev_info(dev, "EEPROM of %d bytes found by 0x%x", + pdev->eesize, pdev->eeaddr); +} + +/* + * idt_create_pdev() - create and init data structure of the driver + * @client: i2c client of IDT PCIe-switch device + */ +static struct idt_89hpesx_dev *idt_create_pdev(struct i2c_client *client) +{ + struct idt_89hpesx_dev *pdev; + + /* Allocate memory for driver data */ + pdev = devm_kmalloc(&client->dev, sizeof(struct idt_89hpesx_dev), + GFP_KERNEL); + if (pdev == NULL) + return ERR_PTR(-ENOMEM); + + /* Initialize basic fields of the data */ + pdev->client = client; + i2c_set_clientdata(client, pdev); + + /* Read firmware nodes information */ + idt_get_fw_data(pdev); + + /* Initialize basic CSR CMD field - use full DWORD-sized r/w ops */ + pdev->inicsrcmd = CSR_DWE; + pdev->csr = CSR_DEF; + + /* Enable Packet Error Checking if it's supported by adapter */ + if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_PEC)) { + pdev->iniccode = CCODE_PEC; + client->flags |= I2C_CLIENT_PEC; + } else /* PEC is unsupported */ { + pdev->iniccode = 0; + } + + return pdev; +} + +/* + * idt_free_pdev() - free data structure of the driver + * @pdev: Pointer to the driver data + */ +static void idt_free_pdev(struct idt_89hpesx_dev *pdev) +{ + /* Clear driver data from device private field */ + i2c_set_clientdata(pdev->client, NULL); +} + +/* + * idt_set_smbus_ops() - set supported SMBus operations + * @pdev: Pointer to the driver data + * Return status of smbus check operations + */ +static int idt_set_smbus_ops(struct idt_89hpesx_dev *pdev) +{ + struct i2c_adapter *adapter = pdev->client->adapter; + struct device *dev = &pdev->client->dev; + + /* Check i2c adapter read functionality */ + if (i2c_check_functionality(adapter, + I2C_FUNC_SMBUS_READ_BLOCK_DATA)) { + pdev->smb_read = idt_smb_read_block; + dev_dbg(dev, "SMBus block-read op chosen"); + } else if (i2c_check_functionality(adapter, + I2C_FUNC_SMBUS_READ_I2C_BLOCK)) { + pdev->smb_read = idt_smb_read_i2c_block; + dev_dbg(dev, "SMBus i2c-block-read op chosen"); + } else if (i2c_check_functionality(adapter, + I2C_FUNC_SMBUS_READ_WORD_DATA) && + i2c_check_functionality(adapter, + I2C_FUNC_SMBUS_READ_BYTE_DATA)) { + pdev->smb_read = idt_smb_read_word; + dev_warn(dev, "Use slow word/byte SMBus read ops"); + } else if (i2c_check_functionality(adapter, + I2C_FUNC_SMBUS_READ_BYTE_DATA)) { + pdev->smb_read = idt_smb_read_byte; + dev_warn(dev, "Use slow byte SMBus read op"); + } else /* no supported smbus read operations */ { + dev_err(dev, "No supported SMBus read op"); + return -EPFNOSUPPORT; + } + + /* Check i2c adapter write functionality */ + if (i2c_check_functionality(adapter, + I2C_FUNC_SMBUS_WRITE_BLOCK_DATA)) { + pdev->smb_write = idt_smb_write_block; + dev_dbg(dev, "SMBus block-write op chosen"); + } else if (i2c_check_functionality(adapter, + I2C_FUNC_SMBUS_WRITE_I2C_BLOCK)) { + pdev->smb_write = idt_smb_write_i2c_block; + dev_dbg(dev, "SMBus i2c-block-write op chosen"); + } else if (i2c_check_functionality(adapter, + I2C_FUNC_SMBUS_WRITE_WORD_DATA) && + i2c_check_functionality(adapter, + I2C_FUNC_SMBUS_WRITE_BYTE_DATA)) { + pdev->smb_write = idt_smb_write_word; + dev_warn(dev, "Use slow word/byte SMBus write op"); + } else if (i2c_check_functionality(adapter, + I2C_FUNC_SMBUS_WRITE_BYTE_DATA)) { + pdev->smb_write = idt_smb_write_byte; + dev_warn(dev, "Use slow byte SMBus write op"); + } else /* no supported smbus write operations */ { + dev_err(dev, "No supported SMBus write op"); + return -EPFNOSUPPORT; + } + + /* Initialize IDT SMBus slave interface mutex */ + mutex_init(&pdev->smb_mtx); + + return 0; +} + +/* + * idt_check_dev() - check whether it's really IDT 89HPESx device + * @pdev: Pointer to the driver data + * Return status of i2c adapter check operation + */ +static int idt_check_dev(struct idt_89hpesx_dev *pdev) +{ + struct device *dev = &pdev->client->dev; + u32 viddid; + int ret; + + /* Read VID and DID directly from IDT memory space */ + ret = idt_csr_read(pdev, IDT_VIDDID_CSR, &viddid); + if (ret != 0) { + dev_err(dev, "Failed to read VID/DID"); + return ret; + } + + /* Check whether it's IDT device */ + if ((viddid & IDT_VID_MASK) != PCI_VENDOR_ID_IDT) { + dev_err(dev, "Got unsupported VID/DID: 0x%08x", viddid); + return -ENODEV; + } + + dev_info(dev, "Found IDT 89HPES device VID:0x%04x, DID:0x%04x", + (viddid & IDT_VID_MASK), (viddid >> 16)); + + return 0; +} + +/* + * idt_create_sysfs_files() - create sysfs attribute files + * @pdev: Pointer to the driver data + * Return status of operation + */ +static int idt_create_sysfs_files(struct idt_89hpesx_dev *pdev) +{ + struct device *dev = &pdev->client->dev; + int ret; + + /* Don't do anything if EEPROM isn't accessible */ + if (pdev->eesize == 0) { + dev_dbg(dev, "Skip creating sysfs-files"); + return 0; + } + + /* Allocate memory for attribute file */ + pdev->ee_file = devm_kmalloc(dev, sizeof(*pdev->ee_file), GFP_KERNEL); + if (!pdev->ee_file) + return -ENOMEM; + + /* Copy the declared EEPROM attr structure to change some of fields */ + memcpy(pdev->ee_file, &bin_attr_eeprom, sizeof(*pdev->ee_file)); + + /* In case of read-only EEPROM get rid of write ability */ + if (pdev->eero) { + pdev->ee_file->attr.mode &= ~0200; + pdev->ee_file->write = NULL; + } + /* Create EEPROM sysfs file */ + pdev->ee_file->size = pdev->eesize; + ret = sysfs_create_bin_file(&dev->kobj, pdev->ee_file); + if (ret != 0) { + dev_err(dev, "Failed to create EEPROM sysfs-node"); + return ret; + } + + return 0; +} + +/* + * idt_remove_sysfs_files() - remove sysfs attribute files + * @pdev: Pointer to the driver data + */ +static void idt_remove_sysfs_files(struct idt_89hpesx_dev *pdev) +{ + struct device *dev = &pdev->client->dev; + + /* Don't do anything if EEPROM wasn't accessible */ + if (pdev->eesize == 0) + return; + + /* Remove EEPROM sysfs file */ + sysfs_remove_bin_file(&dev->kobj, pdev->ee_file); +} + +/* + * idt_create_dbgfs_files() - create debugfs files + * @pdev: Pointer to the driver data + */ +#define CSRNAME_LEN ((size_t)32) +static void idt_create_dbgfs_files(struct idt_89hpesx_dev *pdev) +{ + struct i2c_client *cli = pdev->client; + char fname[CSRNAME_LEN]; + + /* Create Debugfs directory for CSR file */ + snprintf(fname, CSRNAME_LEN, "%d-%04hx", cli->adapter->nr, cli->addr); + pdev->csr_dir = debugfs_create_dir(fname, csr_dbgdir); + + /* Create Debugfs file for CSR read/write operations */ + debugfs_create_file(cli->name, 0600, pdev->csr_dir, pdev, + &csr_dbgfs_ops); +} + +/* + * idt_remove_dbgfs_files() - remove debugfs files + * @pdev: Pointer to the driver data + */ +static void idt_remove_dbgfs_files(struct idt_89hpesx_dev *pdev) +{ + /* Remove CSR directory and it sysfs-node */ + debugfs_remove_recursive(pdev->csr_dir); +} + +/* + * idt_probe() - IDT 89HPESx driver probe() callback method + */ +static int idt_probe(struct i2c_client *client, const struct i2c_device_id *id) +{ + struct idt_89hpesx_dev *pdev; + int ret; + + /* Create driver data */ + pdev = idt_create_pdev(client); + if (IS_ERR(pdev)) + return PTR_ERR(pdev); + + /* Set SMBus operations */ + ret = idt_set_smbus_ops(pdev); + if (ret != 0) + goto err_free_pdev; + + /* Check whether it is truly IDT 89HPESx device */ + ret = idt_check_dev(pdev); + if (ret != 0) + goto err_free_pdev; + + /* Create sysfs files */ + ret = idt_create_sysfs_files(pdev); + if (ret != 0) + goto err_free_pdev; + + /* Create debugfs files */ + idt_create_dbgfs_files(pdev); + + return 0; + +err_free_pdev: + idt_free_pdev(pdev); + + return ret; +} + +/* + * idt_remove() - IDT 89HPESx driver remove() callback method + */ +static int idt_remove(struct i2c_client *client) +{ + struct idt_89hpesx_dev *pdev = i2c_get_clientdata(client); + + /* Remove debugfs files first */ + idt_remove_dbgfs_files(pdev); + + /* Remove sysfs files */ + idt_remove_sysfs_files(pdev); + + /* Discard driver data structure */ + idt_free_pdev(pdev); + + return 0; +} + +/* + * ee_ids - array of supported EEPROMs + */ +static const struct i2c_device_id ee_ids[] = { + { "24c32", 4096}, + { "24c64", 8192}, + { "24c128", 16384}, + { "24c256", 32768}, + { "24c512", 65536}, + {} +}; +MODULE_DEVICE_TABLE(i2c, ee_ids); + +/* + * idt_ids - supported IDT 89HPESx devices + */ +static const struct i2c_device_id idt_ids[] = { + { "89hpes8nt2", 0 }, + { "89hpes12nt3", 0 }, + + { "89hpes24nt6ag2", 0 }, + { "89hpes32nt8ag2", 0 }, + { "89hpes32nt8bg2", 0 }, + { "89hpes12nt12g2", 0 }, + { "89hpes16nt16g2", 0 }, + { "89hpes24nt24g2", 0 }, + { "89hpes32nt24ag2", 0 }, + { "89hpes32nt24bg2", 0 }, + + { "89hpes12n3", 0 }, + { "89hpes12n3a", 0 }, + { "89hpes24n3", 0 }, + { "89hpes24n3a", 0 }, + + { "89hpes32h8", 0 }, + { "89hpes32h8g2", 0 }, + { "89hpes48h12", 0 }, + { "89hpes48h12g2", 0 }, + { "89hpes48h12ag2", 0 }, + { "89hpes16h16", 0 }, + { "89hpes22h16", 0 }, + { "89hpes22h16g2", 0 }, + { "89hpes34h16", 0 }, + { "89hpes34h16g2", 0 }, + { "89hpes64h16", 0 }, + { "89hpes64h16g2", 0 }, + { "89hpes64h16ag2", 0 }, + + /* { "89hpes3t3", 0 }, // No SMBus-slave iface */ + { "89hpes12t3g2", 0 }, + { "89hpes24t3g2", 0 }, + /* { "89hpes4t4", 0 }, // No SMBus-slave iface */ + { "89hpes16t4", 0 }, + { "89hpes4t4g2", 0 }, + { "89hpes10t4g2", 0 }, + { "89hpes16t4g2", 0 }, + { "89hpes16t4ag2", 0 }, + { "89hpes5t5", 0 }, + { "89hpes6t5", 0 }, + { "89hpes8t5", 0 }, + { "89hpes8t5a", 0 }, + { "89hpes24t6", 0 }, + { "89hpes6t6g2", 0 }, + { "89hpes24t6g2", 0 }, + { "89hpes16t7", 0 }, + { "89hpes32t8", 0 }, + { "89hpes32t8g2", 0 }, + { "89hpes48t12", 0 }, + { "89hpes48t12g2", 0 }, + { /* END OF LIST */ } +}; +MODULE_DEVICE_TABLE(i2c, idt_ids); + +static const struct of_device_id idt_of_match[] = { + { .compatible = "idt,89hpes8nt2", }, + { .compatible = "idt,89hpes12nt3", }, + + { .compatible = "idt,89hpes24nt6ag2", }, + { .compatible = "idt,89hpes32nt8ag2", }, + { .compatible = "idt,89hpes32nt8bg2", }, + { .compatible = "idt,89hpes12nt12g2", }, + { .compatible = "idt,89hpes16nt16g2", }, + { .compatible = "idt,89hpes24nt24g2", }, + { .compatible = "idt,89hpes32nt24ag2", }, + { .compatible = "idt,89hpes32nt24bg2", }, + + { .compatible = "idt,89hpes12n3", }, + { .compatible = "idt,89hpes12n3a", }, + { .compatible = "idt,89hpes24n3", }, + { .compatible = "idt,89hpes24n3a", }, + + { .compatible = "idt,89hpes32h8", }, + { .compatible = "idt,89hpes32h8g2", }, + { .compatible = "idt,89hpes48h12", }, + { .compatible = "idt,89hpes48h12g2", }, + { .compatible = "idt,89hpes48h12ag2", }, + { .compatible = "idt,89hpes16h16", }, + { .compatible = "idt,89hpes22h16", }, + { .compatible = "idt,89hpes22h16g2", }, + { .compatible = "idt,89hpes34h16", }, + { .compatible = "idt,89hpes34h16g2", }, + { .compatible = "idt,89hpes64h16", }, + { .compatible = "idt,89hpes64h16g2", }, + { .compatible = "idt,89hpes64h16ag2", }, + + { .compatible = "idt,89hpes12t3g2", }, + { .compatible = "idt,89hpes24t3g2", }, + + { .compatible = "idt,89hpes16t4", }, + { .compatible = "idt,89hpes4t4g2", }, + { .compatible = "idt,89hpes10t4g2", }, + { .compatible = "idt,89hpes16t4g2", }, + { .compatible = "idt,89hpes16t4ag2", }, + { .compatible = "idt,89hpes5t5", }, + { .compatible = "idt,89hpes6t5", }, + { .compatible = "idt,89hpes8t5", }, + { .compatible = "idt,89hpes8t5a", }, + { .compatible = "idt,89hpes24t6", }, + { .compatible = "idt,89hpes6t6g2", }, + { .compatible = "idt,89hpes24t6g2", }, + { .compatible = "idt,89hpes16t7", }, + { .compatible = "idt,89hpes32t8", }, + { .compatible = "idt,89hpes32t8g2", }, + { .compatible = "idt,89hpes48t12", }, + { .compatible = "idt,89hpes48t12g2", }, + { }, +}; +MODULE_DEVICE_TABLE(of, idt_of_match); + +/* + * idt_driver - IDT 89HPESx driver structure + */ +static struct i2c_driver idt_driver = { + .driver = { + .name = IDT_NAME, + .of_match_table = idt_of_match, + }, + .probe = idt_probe, + .remove = idt_remove, + .id_table = idt_ids, +}; + +/* + * idt_init() - IDT 89HPESx driver init() callback method + */ +static int __init idt_init(void) +{ + /* Create Debugfs directory first */ + if (debugfs_initialized()) + csr_dbgdir = debugfs_create_dir("idt_csr", NULL); + + /* Add new i2c-device driver */ + return i2c_add_driver(&idt_driver); +} +module_init(idt_init); + +/* + * idt_exit() - IDT 89HPESx driver exit() callback method + */ +static void __exit idt_exit(void) +{ + /* Discard debugfs directory and all files if any */ + debugfs_remove_recursive(csr_dbgdir); + + /* Unregister i2c-device driver */ + i2c_del_driver(&idt_driver); +} +module_exit(idt_exit); diff --git a/drivers/misc/eeprom/max6875.c b/drivers/misc/eeprom/max6875.c new file mode 100644 index 000000000..9da81f6d4 --- /dev/null +++ b/drivers/misc/eeprom/max6875.c @@ -0,0 +1,207 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * max6875.c - driver for MAX6874/MAX6875 + * + * Copyright (C) 2005 Ben Gardner <bgardner@wabtec.com> + * + * Based on eeprom.c + * + * The MAX6875 has a bank of registers and two banks of EEPROM. + * Address ranges are defined as follows: + * * 0x0000 - 0x0046 = configuration registers + * * 0x8000 - 0x8046 = configuration EEPROM + * * 0x8100 - 0x82FF = user EEPROM + * + * This driver makes the user EEPROM available for read. + * + * The registers & config EEPROM should be accessed via i2c-dev. + * + * The MAX6875 ignores the lowest address bit, so each chip responds to + * two addresses - 0x50/0x51 and 0x52/0x53. + * + * Note that the MAX6875 uses i2c_smbus_write_byte_data() to set the read + * address, so this driver is destructive if loaded for the wrong EEPROM chip. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/i2c.h> +#include <linux/mutex.h> + +/* The MAX6875 can only read/write 16 bytes at a time */ +#define SLICE_SIZE 16 +#define SLICE_BITS 4 + +/* USER EEPROM is at addresses 0x8100 - 0x82FF */ +#define USER_EEPROM_BASE 0x8100 +#define USER_EEPROM_SIZE 0x0200 +#define USER_EEPROM_SLICES 32 + +/* MAX6875 commands */ +#define MAX6875_CMD_BLK_READ 0x84 + +/* Each client has this additional data */ +struct max6875_data { + struct i2c_client *fake_client; + struct mutex update_lock; + + u32 valid; + u8 data[USER_EEPROM_SIZE]; + unsigned long last_updated[USER_EEPROM_SLICES]; +}; + +static void max6875_update_slice(struct i2c_client *client, int slice) +{ + struct max6875_data *data = i2c_get_clientdata(client); + int i, j, addr; + u8 *buf; + + if (slice >= USER_EEPROM_SLICES) + return; + + mutex_lock(&data->update_lock); + + buf = &data->data[slice << SLICE_BITS]; + + if (!(data->valid & (1 << slice)) || + time_after(jiffies, data->last_updated[slice])) { + + dev_dbg(&client->dev, "Starting update of slice %u\n", slice); + + data->valid &= ~(1 << slice); + + addr = USER_EEPROM_BASE + (slice << SLICE_BITS); + + /* select the eeprom address */ + if (i2c_smbus_write_byte_data(client, addr >> 8, addr & 0xFF)) { + dev_err(&client->dev, "address set failed\n"); + goto exit_up; + } + + if (i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_READ_I2C_BLOCK)) { + if (i2c_smbus_read_i2c_block_data(client, + MAX6875_CMD_BLK_READ, + SLICE_SIZE, + buf) != SLICE_SIZE) { + goto exit_up; + } + } else { + for (i = 0; i < SLICE_SIZE; i++) { + j = i2c_smbus_read_byte(client); + if (j < 0) { + goto exit_up; + } + buf[i] = j; + } + } + data->last_updated[slice] = jiffies; + data->valid |= (1 << slice); + } +exit_up: + mutex_unlock(&data->update_lock); +} + +static ssize_t max6875_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + struct i2c_client *client = kobj_to_i2c_client(kobj); + struct max6875_data *data = i2c_get_clientdata(client); + int slice, max_slice; + + /* refresh slices which contain requested bytes */ + max_slice = (off + count - 1) >> SLICE_BITS; + for (slice = (off >> SLICE_BITS); slice <= max_slice; slice++) + max6875_update_slice(client, slice); + + memcpy(buf, &data->data[off], count); + + return count; +} + +static const struct bin_attribute user_eeprom_attr = { + .attr = { + .name = "eeprom", + .mode = S_IRUGO, + }, + .size = USER_EEPROM_SIZE, + .read = max6875_read, +}; + +static int max6875_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct i2c_adapter *adapter = client->adapter; + struct max6875_data *data; + int err; + + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WRITE_BYTE_DATA + | I2C_FUNC_SMBUS_READ_BYTE)) + return -ENODEV; + + /* Only bind to even addresses */ + if (client->addr & 1) + return -ENODEV; + + data = kzalloc(sizeof(struct max6875_data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + /* A fake client is created on the odd address */ + data->fake_client = i2c_new_dummy_device(client->adapter, client->addr + 1); + if (IS_ERR(data->fake_client)) { + err = PTR_ERR(data->fake_client); + goto exit_kfree; + } + + /* Init real i2c_client */ + i2c_set_clientdata(client, data); + mutex_init(&data->update_lock); + + err = sysfs_create_bin_file(&client->dev.kobj, &user_eeprom_attr); + if (err) + goto exit_remove_fake; + + return 0; + +exit_remove_fake: + i2c_unregister_device(data->fake_client); +exit_kfree: + kfree(data); + return err; +} + +static int max6875_remove(struct i2c_client *client) +{ + struct max6875_data *data = i2c_get_clientdata(client); + + i2c_unregister_device(data->fake_client); + + sysfs_remove_bin_file(&client->dev.kobj, &user_eeprom_attr); + kfree(data); + + return 0; +} + +static const struct i2c_device_id max6875_id[] = { + { "max6875", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, max6875_id); + +static struct i2c_driver max6875_driver = { + .driver = { + .name = "max6875", + }, + .probe = max6875_probe, + .remove = max6875_remove, + .id_table = max6875_id, +}; + +module_i2c_driver(max6875_driver); + +MODULE_AUTHOR("Ben Gardner <bgardner@wabtec.com>"); +MODULE_DESCRIPTION("MAX6875 driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c new file mode 100644 index 000000000..f950d0155 --- /dev/null +++ b/drivers/misc/enclosure.c @@ -0,0 +1,684 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Enclosure Services + * + * Copyright (C) 2008 James Bottomley <James.Bottomley@HansenPartnership.com> + * +**----------------------------------------------------------------------------- +** +** +**----------------------------------------------------------------------------- +*/ +#include <linux/device.h> +#include <linux/enclosure.h> +#include <linux/err.h> +#include <linux/list.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/slab.h> + +static LIST_HEAD(container_list); +static DEFINE_MUTEX(container_list_lock); +static struct class enclosure_class; + +/** + * enclosure_find - find an enclosure given a parent device + * @dev: the parent to match against + * @start: Optional enclosure device to start from (NULL if none) + * + * Looks through the list of registered enclosures to find all those + * with @dev as a parent. Returns NULL if no enclosure is + * found. @start can be used as a starting point to obtain multiple + * enclosures per parent (should begin with NULL and then be set to + * each returned enclosure device). Obtains a reference to the + * enclosure class device which must be released with device_put(). + * If @start is not NULL, a reference must be taken on it which is + * released before returning (this allows a loop through all + * enclosures to exit with only the reference on the enclosure of + * interest held). Note that the @dev may correspond to the actual + * device housing the enclosure, in which case no iteration via @start + * is required. + */ +struct enclosure_device *enclosure_find(struct device *dev, + struct enclosure_device *start) +{ + struct enclosure_device *edev; + + mutex_lock(&container_list_lock); + edev = list_prepare_entry(start, &container_list, node); + if (start) + put_device(&start->edev); + + list_for_each_entry_continue(edev, &container_list, node) { + struct device *parent = edev->edev.parent; + /* parent might not be immediate, so iterate up to + * the root of the tree if necessary */ + while (parent) { + if (parent == dev) { + get_device(&edev->edev); + mutex_unlock(&container_list_lock); + return edev; + } + parent = parent->parent; + } + } + mutex_unlock(&container_list_lock); + + return NULL; +} +EXPORT_SYMBOL_GPL(enclosure_find); + +/** + * enclosure_for_each_device - calls a function for each enclosure + * @fn: the function to call + * @data: the data to pass to each call + * + * Loops over all the enclosures calling the function. + * + * Note, this function uses a mutex which will be held across calls to + * @fn, so it must have non atomic context, and @fn may (although it + * should not) sleep or otherwise cause the mutex to be held for + * indefinite periods + */ +int enclosure_for_each_device(int (*fn)(struct enclosure_device *, void *), + void *data) +{ + int error = 0; + struct enclosure_device *edev; + + mutex_lock(&container_list_lock); + list_for_each_entry(edev, &container_list, node) { + error = fn(edev, data); + if (error) + break; + } + mutex_unlock(&container_list_lock); + + return error; +} +EXPORT_SYMBOL_GPL(enclosure_for_each_device); + +/** + * enclosure_register - register device as an enclosure + * + * @dev: device containing the enclosure + * @name: chosen device name + * @components: number of components in the enclosure + * @cb: platform call-backs + * + * This sets up the device for being an enclosure. Note that @dev does + * not have to be a dedicated enclosure device. It may be some other type + * of device that additionally responds to enclosure services + */ +struct enclosure_device * +enclosure_register(struct device *dev, const char *name, int components, + struct enclosure_component_callbacks *cb) +{ + struct enclosure_device *edev = + kzalloc(struct_size(edev, component, components), GFP_KERNEL); + int err, i; + + BUG_ON(!cb); + + if (!edev) + return ERR_PTR(-ENOMEM); + + edev->components = components; + + edev->edev.class = &enclosure_class; + edev->edev.parent = get_device(dev); + edev->cb = cb; + dev_set_name(&edev->edev, "%s", name); + err = device_register(&edev->edev); + if (err) + goto err; + + for (i = 0; i < components; i++) { + edev->component[i].number = -1; + edev->component[i].slot = -1; + edev->component[i].power_status = -1; + } + + mutex_lock(&container_list_lock); + list_add_tail(&edev->node, &container_list); + mutex_unlock(&container_list_lock); + + return edev; + + err: + put_device(edev->edev.parent); + kfree(edev); + return ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(enclosure_register); + +static struct enclosure_component_callbacks enclosure_null_callbacks; + +/** + * enclosure_unregister - remove an enclosure + * + * @edev: the registered enclosure to remove; + */ +void enclosure_unregister(struct enclosure_device *edev) +{ + int i; + + mutex_lock(&container_list_lock); + list_del(&edev->node); + mutex_unlock(&container_list_lock); + + for (i = 0; i < edev->components; i++) + if (edev->component[i].number != -1) + device_unregister(&edev->component[i].cdev); + + /* prevent any callbacks into service user */ + edev->cb = &enclosure_null_callbacks; + device_unregister(&edev->edev); +} +EXPORT_SYMBOL_GPL(enclosure_unregister); + +#define ENCLOSURE_NAME_SIZE 64 +#define COMPONENT_NAME_SIZE 64 + +static void enclosure_link_name(struct enclosure_component *cdev, char *name) +{ + strcpy(name, "enclosure_device:"); + strcat(name, dev_name(&cdev->cdev)); +} + +static void enclosure_remove_links(struct enclosure_component *cdev) +{ + char name[ENCLOSURE_NAME_SIZE]; + + enclosure_link_name(cdev, name); + + /* + * In odd circumstances, like multipath devices, something else may + * already have removed the links, so check for this condition first. + */ + if (cdev->dev->kobj.sd) + sysfs_remove_link(&cdev->dev->kobj, name); + + if (cdev->cdev.kobj.sd) + sysfs_remove_link(&cdev->cdev.kobj, "device"); +} + +static int enclosure_add_links(struct enclosure_component *cdev) +{ + int error; + char name[ENCLOSURE_NAME_SIZE]; + + error = sysfs_create_link(&cdev->cdev.kobj, &cdev->dev->kobj, "device"); + if (error) + return error; + + enclosure_link_name(cdev, name); + error = sysfs_create_link(&cdev->dev->kobj, &cdev->cdev.kobj, name); + if (error) + sysfs_remove_link(&cdev->cdev.kobj, "device"); + + return error; +} + +static void enclosure_release(struct device *cdev) +{ + struct enclosure_device *edev = to_enclosure_device(cdev); + + put_device(cdev->parent); + kfree(edev); +} + +static void enclosure_component_release(struct device *dev) +{ + struct enclosure_component *cdev = to_enclosure_component(dev); + + if (cdev->dev) { + enclosure_remove_links(cdev); + put_device(cdev->dev); + } + put_device(dev->parent); +} + +static struct enclosure_component * +enclosure_component_find_by_name(struct enclosure_device *edev, + const char *name) +{ + int i; + const char *cname; + struct enclosure_component *ecomp; + + if (!edev || !name || !name[0]) + return NULL; + + for (i = 0; i < edev->components; i++) { + ecomp = &edev->component[i]; + cname = dev_name(&ecomp->cdev); + if (ecomp->number != -1 && + cname && cname[0] && + !strcmp(cname, name)) + return ecomp; + } + + return NULL; +} + +static const struct attribute_group *enclosure_component_groups[]; + +/** + * enclosure_component_alloc - prepare a new enclosure component + * @edev: the enclosure to add the component + * @number: the device number + * @type: the type of component being added + * @name: an optional name to appear in sysfs (leave NULL if none) + * + * The name is optional for enclosures that give their components a unique + * name. If not, leave the field NULL and a name will be assigned. + * + * Returns a pointer to the enclosure component or an error. + */ +struct enclosure_component * +enclosure_component_alloc(struct enclosure_device *edev, + unsigned int number, + enum enclosure_component_type type, + const char *name) +{ + struct enclosure_component *ecomp; + struct device *cdev; + int i; + char newname[COMPONENT_NAME_SIZE]; + + if (number >= edev->components) + return ERR_PTR(-EINVAL); + + ecomp = &edev->component[number]; + + if (ecomp->number != -1) + return ERR_PTR(-EINVAL); + + ecomp->type = type; + ecomp->number = number; + cdev = &ecomp->cdev; + cdev->parent = get_device(&edev->edev); + + if (name && name[0]) { + /* Some hardware (e.g. enclosure in RX300 S6) has components + * with non unique names. Registering duplicates in sysfs + * will lead to warnings during bootup. So make the names + * unique by appending consecutive numbers -1, -2, ... */ + i = 1; + snprintf(newname, COMPONENT_NAME_SIZE, + "%s", name); + while (enclosure_component_find_by_name(edev, newname)) + snprintf(newname, COMPONENT_NAME_SIZE, + "%s-%i", name, i++); + dev_set_name(cdev, "%s", newname); + } else + dev_set_name(cdev, "%u", number); + + cdev->release = enclosure_component_release; + cdev->groups = enclosure_component_groups; + + return ecomp; +} +EXPORT_SYMBOL_GPL(enclosure_component_alloc); + +/** + * enclosure_component_register - publishes an initialized enclosure component + * @ecomp: component to add + * + * Returns 0 on successful registration, releases the component otherwise + */ +int enclosure_component_register(struct enclosure_component *ecomp) +{ + struct device *cdev; + int err; + + cdev = &ecomp->cdev; + err = device_register(cdev); + if (err) { + ecomp->number = -1; + put_device(cdev); + return err; + } + + return 0; +} +EXPORT_SYMBOL_GPL(enclosure_component_register); + +/** + * enclosure_add_device - add a device as being part of an enclosure + * @edev: the enclosure device being added to. + * @component: the number of the component + * @dev: the device being added + * + * Declares a real device to reside in slot (or identifier) @num of an + * enclosure. This will cause the relevant sysfs links to appear. + * This function may also be used to change a device associated with + * an enclosure without having to call enclosure_remove_device() in + * between. + * + * Returns zero on success or an error. + */ +int enclosure_add_device(struct enclosure_device *edev, int component, + struct device *dev) +{ + struct enclosure_component *cdev; + int err; + + if (!edev || component >= edev->components) + return -EINVAL; + + cdev = &edev->component[component]; + + if (cdev->dev == dev) + return -EEXIST; + + if (cdev->dev) { + enclosure_remove_links(cdev); + put_device(cdev->dev); + } + cdev->dev = get_device(dev); + err = enclosure_add_links(cdev); + if (err) { + put_device(cdev->dev); + cdev->dev = NULL; + } + return err; +} +EXPORT_SYMBOL_GPL(enclosure_add_device); + +/** + * enclosure_remove_device - remove a device from an enclosure + * @edev: the enclosure device + * @dev: device to remove/put + * + * Returns zero on success or an error. + * + */ +int enclosure_remove_device(struct enclosure_device *edev, struct device *dev) +{ + struct enclosure_component *cdev; + int i; + + if (!edev || !dev) + return -EINVAL; + + for (i = 0; i < edev->components; i++) { + cdev = &edev->component[i]; + if (cdev->dev == dev) { + enclosure_remove_links(cdev); + put_device(dev); + cdev->dev = NULL; + return 0; + } + } + return -ENODEV; +} +EXPORT_SYMBOL_GPL(enclosure_remove_device); + +/* + * sysfs pieces below + */ + +static ssize_t components_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct enclosure_device *edev = to_enclosure_device(cdev); + + return snprintf(buf, 40, "%d\n", edev->components); +} +static DEVICE_ATTR_RO(components); + +static ssize_t id_show(struct device *cdev, + struct device_attribute *attr, + char *buf) +{ + struct enclosure_device *edev = to_enclosure_device(cdev); + + if (edev->cb->show_id) + return edev->cb->show_id(edev, buf); + return -EINVAL; +} +static DEVICE_ATTR_RO(id); + +static struct attribute *enclosure_class_attrs[] = { + &dev_attr_components.attr, + &dev_attr_id.attr, + NULL, +}; +ATTRIBUTE_GROUPS(enclosure_class); + +static struct class enclosure_class = { + .name = "enclosure", + .owner = THIS_MODULE, + .dev_release = enclosure_release, + .dev_groups = enclosure_class_groups, +}; + +static const char *const enclosure_status[] = { + [ENCLOSURE_STATUS_UNSUPPORTED] = "unsupported", + [ENCLOSURE_STATUS_OK] = "OK", + [ENCLOSURE_STATUS_CRITICAL] = "critical", + [ENCLOSURE_STATUS_NON_CRITICAL] = "non-critical", + [ENCLOSURE_STATUS_UNRECOVERABLE] = "unrecoverable", + [ENCLOSURE_STATUS_NOT_INSTALLED] = "not installed", + [ENCLOSURE_STATUS_UNKNOWN] = "unknown", + [ENCLOSURE_STATUS_UNAVAILABLE] = "unavailable", + [ENCLOSURE_STATUS_MAX] = NULL, +}; + +static const char *const enclosure_type[] = { + [ENCLOSURE_COMPONENT_DEVICE] = "device", + [ENCLOSURE_COMPONENT_ARRAY_DEVICE] = "array device", +}; + +static ssize_t get_component_fault(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct enclosure_device *edev = to_enclosure_device(cdev->parent); + struct enclosure_component *ecomp = to_enclosure_component(cdev); + + if (edev->cb->get_fault) + edev->cb->get_fault(edev, ecomp); + return snprintf(buf, 40, "%d\n", ecomp->fault); +} + +static ssize_t set_component_fault(struct device *cdev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct enclosure_device *edev = to_enclosure_device(cdev->parent); + struct enclosure_component *ecomp = to_enclosure_component(cdev); + int val = simple_strtoul(buf, NULL, 0); + + if (edev->cb->set_fault) + edev->cb->set_fault(edev, ecomp, val); + return count; +} + +static ssize_t get_component_status(struct device *cdev, + struct device_attribute *attr,char *buf) +{ + struct enclosure_device *edev = to_enclosure_device(cdev->parent); + struct enclosure_component *ecomp = to_enclosure_component(cdev); + + if (edev->cb->get_status) + edev->cb->get_status(edev, ecomp); + return snprintf(buf, 40, "%s\n", enclosure_status[ecomp->status]); +} + +static ssize_t set_component_status(struct device *cdev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct enclosure_device *edev = to_enclosure_device(cdev->parent); + struct enclosure_component *ecomp = to_enclosure_component(cdev); + int i; + + for (i = 0; enclosure_status[i]; i++) { + if (strncmp(buf, enclosure_status[i], + strlen(enclosure_status[i])) == 0 && + (buf[strlen(enclosure_status[i])] == '\n' || + buf[strlen(enclosure_status[i])] == '\0')) + break; + } + + if (enclosure_status[i] && edev->cb->set_status) { + edev->cb->set_status(edev, ecomp, i); + return count; + } else + return -EINVAL; +} + +static ssize_t get_component_active(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct enclosure_device *edev = to_enclosure_device(cdev->parent); + struct enclosure_component *ecomp = to_enclosure_component(cdev); + + if (edev->cb->get_active) + edev->cb->get_active(edev, ecomp); + return snprintf(buf, 40, "%d\n", ecomp->active); +} + +static ssize_t set_component_active(struct device *cdev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct enclosure_device *edev = to_enclosure_device(cdev->parent); + struct enclosure_component *ecomp = to_enclosure_component(cdev); + int val = simple_strtoul(buf, NULL, 0); + + if (edev->cb->set_active) + edev->cb->set_active(edev, ecomp, val); + return count; +} + +static ssize_t get_component_locate(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct enclosure_device *edev = to_enclosure_device(cdev->parent); + struct enclosure_component *ecomp = to_enclosure_component(cdev); + + if (edev->cb->get_locate) + edev->cb->get_locate(edev, ecomp); + return snprintf(buf, 40, "%d\n", ecomp->locate); +} + +static ssize_t set_component_locate(struct device *cdev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct enclosure_device *edev = to_enclosure_device(cdev->parent); + struct enclosure_component *ecomp = to_enclosure_component(cdev); + int val = simple_strtoul(buf, NULL, 0); + + if (edev->cb->set_locate) + edev->cb->set_locate(edev, ecomp, val); + return count; +} + +static ssize_t get_component_power_status(struct device *cdev, + struct device_attribute *attr, + char *buf) +{ + struct enclosure_device *edev = to_enclosure_device(cdev->parent); + struct enclosure_component *ecomp = to_enclosure_component(cdev); + + if (edev->cb->get_power_status) + edev->cb->get_power_status(edev, ecomp); + + /* If still uninitialized, the callback failed or does not exist. */ + if (ecomp->power_status == -1) + return (edev->cb->get_power_status) ? -EIO : -ENOTTY; + + return snprintf(buf, 40, "%s\n", ecomp->power_status ? "on" : "off"); +} + +static ssize_t set_component_power_status(struct device *cdev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct enclosure_device *edev = to_enclosure_device(cdev->parent); + struct enclosure_component *ecomp = to_enclosure_component(cdev); + int val; + + if (strncmp(buf, "on", 2) == 0 && + (buf[2] == '\n' || buf[2] == '\0')) + val = 1; + else if (strncmp(buf, "off", 3) == 0 && + (buf[3] == '\n' || buf[3] == '\0')) + val = 0; + else + return -EINVAL; + + if (edev->cb->set_power_status) + edev->cb->set_power_status(edev, ecomp, val); + return count; +} + +static ssize_t get_component_type(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct enclosure_component *ecomp = to_enclosure_component(cdev); + + return snprintf(buf, 40, "%s\n", enclosure_type[ecomp->type]); +} + +static ssize_t get_component_slot(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct enclosure_component *ecomp = to_enclosure_component(cdev); + int slot; + + /* if the enclosure does not override then use 'number' as a stand-in */ + if (ecomp->slot >= 0) + slot = ecomp->slot; + else + slot = ecomp->number; + + return snprintf(buf, 40, "%d\n", slot); +} + +static DEVICE_ATTR(fault, S_IRUGO | S_IWUSR, get_component_fault, + set_component_fault); +static DEVICE_ATTR(status, S_IRUGO | S_IWUSR, get_component_status, + set_component_status); +static DEVICE_ATTR(active, S_IRUGO | S_IWUSR, get_component_active, + set_component_active); +static DEVICE_ATTR(locate, S_IRUGO | S_IWUSR, get_component_locate, + set_component_locate); +static DEVICE_ATTR(power_status, S_IRUGO | S_IWUSR, get_component_power_status, + set_component_power_status); +static DEVICE_ATTR(type, S_IRUGO, get_component_type, NULL); +static DEVICE_ATTR(slot, S_IRUGO, get_component_slot, NULL); + +static struct attribute *enclosure_component_attrs[] = { + &dev_attr_fault.attr, + &dev_attr_status.attr, + &dev_attr_active.attr, + &dev_attr_locate.attr, + &dev_attr_power_status.attr, + &dev_attr_type.attr, + &dev_attr_slot.attr, + NULL +}; +ATTRIBUTE_GROUPS(enclosure_component); + +static int __init enclosure_init(void) +{ + return class_register(&enclosure_class); +} + +static void __exit enclosure_exit(void) +{ + class_unregister(&enclosure_class); +} + +module_init(enclosure_init); +module_exit(enclosure_exit); + +MODULE_AUTHOR("James Bottomley"); +MODULE_DESCRIPTION("Enclosure Services"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c new file mode 100644 index 000000000..9822efdc6 --- /dev/null +++ b/drivers/misc/fastrpc.c @@ -0,0 +1,1784 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2011-2018, The Linux Foundation. All rights reserved. +// Copyright (c) 2018, Linaro Limited + +#include <linux/completion.h> +#include <linux/device.h> +#include <linux/dma-buf.h> +#include <linux/dma-mapping.h> +#include <linux/idr.h> +#include <linux/list.h> +#include <linux/miscdevice.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of.h> +#include <linux/sort.h> +#include <linux/of_platform.h> +#include <linux/rpmsg.h> +#include <linux/scatterlist.h> +#include <linux/slab.h> +#include <uapi/misc/fastrpc.h> + +#define ADSP_DOMAIN_ID (0) +#define MDSP_DOMAIN_ID (1) +#define SDSP_DOMAIN_ID (2) +#define CDSP_DOMAIN_ID (3) +#define FASTRPC_DEV_MAX 4 /* adsp, mdsp, slpi, cdsp*/ +#define FASTRPC_MAX_SESSIONS 9 /*8 compute, 1 cpz*/ +#define FASTRPC_ALIGN 128 +#define FASTRPC_MAX_FDLIST 16 +#define FASTRPC_MAX_CRCLIST 64 +#define FASTRPC_PHYS(p) ((p) & 0xffffffff) +#define FASTRPC_CTX_MAX (256) +#define FASTRPC_INIT_HANDLE 1 +#define FASTRPC_CTXID_MASK (0xFF0) +#define INIT_FILELEN_MAX (2 * 1024 * 1024) +#define FASTRPC_DEVICE_NAME "fastrpc" +#define ADSP_MMAP_ADD_PAGES 0x1000 + +/* Retrives number of input buffers from the scalars parameter */ +#define REMOTE_SCALARS_INBUFS(sc) (((sc) >> 16) & 0x0ff) + +/* Retrives number of output buffers from the scalars parameter */ +#define REMOTE_SCALARS_OUTBUFS(sc) (((sc) >> 8) & 0x0ff) + +/* Retrives number of input handles from the scalars parameter */ +#define REMOTE_SCALARS_INHANDLES(sc) (((sc) >> 4) & 0x0f) + +/* Retrives number of output handles from the scalars parameter */ +#define REMOTE_SCALARS_OUTHANDLES(sc) ((sc) & 0x0f) + +#define REMOTE_SCALARS_LENGTH(sc) (REMOTE_SCALARS_INBUFS(sc) + \ + REMOTE_SCALARS_OUTBUFS(sc) + \ + REMOTE_SCALARS_INHANDLES(sc)+ \ + REMOTE_SCALARS_OUTHANDLES(sc)) +#define FASTRPC_BUILD_SCALARS(attr, method, in, out, oin, oout) \ + (((attr & 0x07) << 29) | \ + ((method & 0x1f) << 24) | \ + ((in & 0xff) << 16) | \ + ((out & 0xff) << 8) | \ + ((oin & 0x0f) << 4) | \ + (oout & 0x0f)) + +#define FASTRPC_SCALARS(method, in, out) \ + FASTRPC_BUILD_SCALARS(0, method, in, out, 0, 0) + +#define FASTRPC_CREATE_PROCESS_NARGS 6 +/* Remote Method id table */ +#define FASTRPC_RMID_INIT_ATTACH 0 +#define FASTRPC_RMID_INIT_RELEASE 1 +#define FASTRPC_RMID_INIT_MMAP 4 +#define FASTRPC_RMID_INIT_MUNMAP 5 +#define FASTRPC_RMID_INIT_CREATE 6 +#define FASTRPC_RMID_INIT_CREATE_ATTR 7 +#define FASTRPC_RMID_INIT_CREATE_STATIC 8 + +/* Protection Domain(PD) ids */ +#define AUDIO_PD (0) /* also GUEST_OS PD? */ +#define USER_PD (1) +#define SENSORS_PD (2) + +#define miscdev_to_cctx(d) container_of(d, struct fastrpc_channel_ctx, miscdev) + +static const char *domains[FASTRPC_DEV_MAX] = { "adsp", "mdsp", + "sdsp", "cdsp"}; +struct fastrpc_phy_page { + u64 addr; /* physical address */ + u64 size; /* size of contiguous region */ +}; + +struct fastrpc_invoke_buf { + u32 num; /* number of contiguous regions */ + u32 pgidx; /* index to start of contiguous region */ +}; + +struct fastrpc_remote_arg { + u64 pv; + u64 len; +}; + +struct fastrpc_mmap_rsp_msg { + u64 vaddr; +}; + +struct fastrpc_mmap_req_msg { + s32 pgid; + u32 flags; + u64 vaddr; + s32 num; +}; + +struct fastrpc_munmap_req_msg { + s32 pgid; + u64 vaddr; + u64 size; +}; + +struct fastrpc_msg { + int pid; /* process group id */ + int tid; /* thread id */ + u64 ctx; /* invoke caller context */ + u32 handle; /* handle to invoke */ + u32 sc; /* scalars structure describing the data */ + u64 addr; /* physical address */ + u64 size; /* size of contiguous region */ +}; + +struct fastrpc_invoke_rsp { + u64 ctx; /* invoke caller context */ + int retval; /* invoke return value */ +}; + +struct fastrpc_buf_overlap { + u64 start; + u64 end; + int raix; + u64 mstart; + u64 mend; + u64 offset; +}; + +struct fastrpc_buf { + struct fastrpc_user *fl; + struct dma_buf *dmabuf; + struct device *dev; + void *virt; + u64 phys; + u64 size; + /* Lock for dma buf attachments */ + struct mutex lock; + struct list_head attachments; + /* mmap support */ + struct list_head node; /* list of user requested mmaps */ + uintptr_t raddr; +}; + +struct fastrpc_dma_buf_attachment { + struct device *dev; + struct sg_table sgt; + struct list_head node; +}; + +struct fastrpc_map { + struct list_head node; + struct fastrpc_user *fl; + int fd; + struct dma_buf *buf; + struct sg_table *table; + struct dma_buf_attachment *attach; + u64 phys; + u64 size; + void *va; + u64 len; + struct kref refcount; +}; + +struct fastrpc_invoke_ctx { + int nscalars; + int nbufs; + int retval; + int pid; + int tgid; + u32 sc; + u32 *crc; + u64 ctxid; + u64 msg_sz; + struct kref refcount; + struct list_head node; /* list of ctxs */ + struct completion work; + struct work_struct put_work; + struct fastrpc_msg msg; + struct fastrpc_user *fl; + struct fastrpc_remote_arg *rpra; + struct fastrpc_map **maps; + struct fastrpc_buf *buf; + struct fastrpc_invoke_args *args; + struct fastrpc_buf_overlap *olaps; + struct fastrpc_channel_ctx *cctx; +}; + +struct fastrpc_session_ctx { + struct device *dev; + int sid; + bool used; + bool valid; +}; + +struct fastrpc_channel_ctx { + int domain_id; + int sesscount; + struct rpmsg_device *rpdev; + struct fastrpc_session_ctx session[FASTRPC_MAX_SESSIONS]; + spinlock_t lock; + struct idr ctx_idr; + struct list_head users; + struct miscdevice miscdev; + struct kref refcount; +}; + +struct fastrpc_user { + struct list_head user; + struct list_head maps; + struct list_head pending; + struct list_head mmaps; + + struct fastrpc_channel_ctx *cctx; + struct fastrpc_session_ctx *sctx; + struct fastrpc_buf *init_mem; + + int tgid; + int pd; + /* Lock for lists */ + spinlock_t lock; + /* lock for allocations */ + struct mutex mutex; +}; + +static void fastrpc_free_map(struct kref *ref) +{ + struct fastrpc_map *map; + + map = container_of(ref, struct fastrpc_map, refcount); + + if (map->table) { + dma_buf_unmap_attachment(map->attach, map->table, + DMA_BIDIRECTIONAL); + dma_buf_detach(map->buf, map->attach); + dma_buf_put(map->buf); + } + + if (map->fl) { + spin_lock(&map->fl->lock); + list_del(&map->node); + spin_unlock(&map->fl->lock); + map->fl = NULL; + } + + kfree(map); +} + +static void fastrpc_map_put(struct fastrpc_map *map) +{ + if (map) + kref_put(&map->refcount, fastrpc_free_map); +} + +static int fastrpc_map_get(struct fastrpc_map *map) +{ + if (!map) + return -ENOENT; + + return kref_get_unless_zero(&map->refcount) ? 0 : -ENOENT; +} + +static int fastrpc_map_find(struct fastrpc_user *fl, int fd, + struct fastrpc_map **ppmap) +{ + struct fastrpc_map *map = NULL; + + mutex_lock(&fl->mutex); + list_for_each_entry(map, &fl->maps, node) { + if (map->fd == fd) { + fastrpc_map_get(map); + *ppmap = map; + mutex_unlock(&fl->mutex); + return 0; + } + } + mutex_unlock(&fl->mutex); + + return -ENOENT; +} + +static void fastrpc_buf_free(struct fastrpc_buf *buf) +{ + dma_free_coherent(buf->dev, buf->size, buf->virt, + FASTRPC_PHYS(buf->phys)); + kfree(buf); +} + +static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev, + u64 size, struct fastrpc_buf **obuf) +{ + struct fastrpc_buf *buf; + + buf = kzalloc(sizeof(*buf), GFP_KERNEL); + if (!buf) + return -ENOMEM; + + INIT_LIST_HEAD(&buf->attachments); + INIT_LIST_HEAD(&buf->node); + mutex_init(&buf->lock); + + buf->fl = fl; + buf->virt = NULL; + buf->phys = 0; + buf->size = size; + buf->dev = dev; + buf->raddr = 0; + + buf->virt = dma_alloc_coherent(dev, buf->size, (dma_addr_t *)&buf->phys, + GFP_KERNEL); + if (!buf->virt) { + mutex_destroy(&buf->lock); + kfree(buf); + return -ENOMEM; + } + + if (fl->sctx && fl->sctx->sid) + buf->phys += ((u64)fl->sctx->sid << 32); + + *obuf = buf; + + return 0; +} + +static void fastrpc_channel_ctx_free(struct kref *ref) +{ + struct fastrpc_channel_ctx *cctx; + + cctx = container_of(ref, struct fastrpc_channel_ctx, refcount); + + kfree(cctx); +} + +static void fastrpc_channel_ctx_get(struct fastrpc_channel_ctx *cctx) +{ + kref_get(&cctx->refcount); +} + +static void fastrpc_channel_ctx_put(struct fastrpc_channel_ctx *cctx) +{ + kref_put(&cctx->refcount, fastrpc_channel_ctx_free); +} + +static void fastrpc_context_free(struct kref *ref) +{ + struct fastrpc_invoke_ctx *ctx; + struct fastrpc_channel_ctx *cctx; + unsigned long flags; + int i; + + ctx = container_of(ref, struct fastrpc_invoke_ctx, refcount); + cctx = ctx->cctx; + + for (i = 0; i < ctx->nscalars; i++) + fastrpc_map_put(ctx->maps[i]); + + if (ctx->buf) + fastrpc_buf_free(ctx->buf); + + spin_lock_irqsave(&cctx->lock, flags); + idr_remove(&cctx->ctx_idr, ctx->ctxid >> 4); + spin_unlock_irqrestore(&cctx->lock, flags); + + kfree(ctx->maps); + kfree(ctx->olaps); + kfree(ctx); + + fastrpc_channel_ctx_put(cctx); +} + +static void fastrpc_context_get(struct fastrpc_invoke_ctx *ctx) +{ + kref_get(&ctx->refcount); +} + +static void fastrpc_context_put(struct fastrpc_invoke_ctx *ctx) +{ + kref_put(&ctx->refcount, fastrpc_context_free); +} + +static void fastrpc_context_put_wq(struct work_struct *work) +{ + struct fastrpc_invoke_ctx *ctx = + container_of(work, struct fastrpc_invoke_ctx, put_work); + + fastrpc_context_put(ctx); +} + +#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1) +static int olaps_cmp(const void *a, const void *b) +{ + struct fastrpc_buf_overlap *pa = (struct fastrpc_buf_overlap *)a; + struct fastrpc_buf_overlap *pb = (struct fastrpc_buf_overlap *)b; + /* sort with lowest starting buffer first */ + int st = CMP(pa->start, pb->start); + /* sort with highest ending buffer first */ + int ed = CMP(pb->end, pa->end); + + return st == 0 ? ed : st; +} + +static void fastrpc_get_buff_overlaps(struct fastrpc_invoke_ctx *ctx) +{ + u64 max_end = 0; + int i; + + for (i = 0; i < ctx->nbufs; ++i) { + ctx->olaps[i].start = ctx->args[i].ptr; + ctx->olaps[i].end = ctx->olaps[i].start + ctx->args[i].length; + ctx->olaps[i].raix = i; + } + + sort(ctx->olaps, ctx->nbufs, sizeof(*ctx->olaps), olaps_cmp, NULL); + + for (i = 0; i < ctx->nbufs; ++i) { + /* Falling inside previous range */ + if (ctx->olaps[i].start < max_end) { + ctx->olaps[i].mstart = max_end; + ctx->olaps[i].mend = ctx->olaps[i].end; + ctx->olaps[i].offset = max_end - ctx->olaps[i].start; + + if (ctx->olaps[i].end > max_end) { + max_end = ctx->olaps[i].end; + } else { + ctx->olaps[i].mend = 0; + ctx->olaps[i].mstart = 0; + } + + } else { + ctx->olaps[i].mend = ctx->olaps[i].end; + ctx->olaps[i].mstart = ctx->olaps[i].start; + ctx->olaps[i].offset = 0; + max_end = ctx->olaps[i].end; + } + } +} + +static struct fastrpc_invoke_ctx *fastrpc_context_alloc( + struct fastrpc_user *user, u32 kernel, u32 sc, + struct fastrpc_invoke_args *args) +{ + struct fastrpc_channel_ctx *cctx = user->cctx; + struct fastrpc_invoke_ctx *ctx = NULL; + unsigned long flags; + int ret; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&ctx->node); + ctx->fl = user; + ctx->nscalars = REMOTE_SCALARS_LENGTH(sc); + ctx->nbufs = REMOTE_SCALARS_INBUFS(sc) + + REMOTE_SCALARS_OUTBUFS(sc); + + if (ctx->nscalars) { + ctx->maps = kcalloc(ctx->nscalars, + sizeof(*ctx->maps), GFP_KERNEL); + if (!ctx->maps) { + kfree(ctx); + return ERR_PTR(-ENOMEM); + } + ctx->olaps = kcalloc(ctx->nscalars, + sizeof(*ctx->olaps), GFP_KERNEL); + if (!ctx->olaps) { + kfree(ctx->maps); + kfree(ctx); + return ERR_PTR(-ENOMEM); + } + ctx->args = args; + fastrpc_get_buff_overlaps(ctx); + } + + /* Released in fastrpc_context_put() */ + fastrpc_channel_ctx_get(cctx); + + ctx->sc = sc; + ctx->retval = -1; + ctx->pid = current->pid; + ctx->tgid = user->tgid; + ctx->cctx = cctx; + init_completion(&ctx->work); + INIT_WORK(&ctx->put_work, fastrpc_context_put_wq); + + spin_lock(&user->lock); + list_add_tail(&ctx->node, &user->pending); + spin_unlock(&user->lock); + + spin_lock_irqsave(&cctx->lock, flags); + ret = idr_alloc_cyclic(&cctx->ctx_idr, ctx, 1, + FASTRPC_CTX_MAX, GFP_ATOMIC); + if (ret < 0) { + spin_unlock_irqrestore(&cctx->lock, flags); + goto err_idr; + } + ctx->ctxid = ret << 4; + spin_unlock_irqrestore(&cctx->lock, flags); + + kref_init(&ctx->refcount); + + return ctx; +err_idr: + spin_lock(&user->lock); + list_del(&ctx->node); + spin_unlock(&user->lock); + fastrpc_channel_ctx_put(cctx); + kfree(ctx->maps); + kfree(ctx->olaps); + kfree(ctx); + + return ERR_PTR(ret); +} + +static struct sg_table * +fastrpc_map_dma_buf(struct dma_buf_attachment *attachment, + enum dma_data_direction dir) +{ + struct fastrpc_dma_buf_attachment *a = attachment->priv; + struct sg_table *table; + int ret; + + table = &a->sgt; + + ret = dma_map_sgtable(attachment->dev, table, dir, 0); + if (ret) + table = ERR_PTR(ret); + return table; +} + +static void fastrpc_unmap_dma_buf(struct dma_buf_attachment *attach, + struct sg_table *table, + enum dma_data_direction dir) +{ + dma_unmap_sgtable(attach->dev, table, dir, 0); +} + +static void fastrpc_release(struct dma_buf *dmabuf) +{ + struct fastrpc_buf *buffer = dmabuf->priv; + + fastrpc_buf_free(buffer); +} + +static int fastrpc_dma_buf_attach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attachment) +{ + struct fastrpc_dma_buf_attachment *a; + struct fastrpc_buf *buffer = dmabuf->priv; + int ret; + + a = kzalloc(sizeof(*a), GFP_KERNEL); + if (!a) + return -ENOMEM; + + ret = dma_get_sgtable(buffer->dev, &a->sgt, buffer->virt, + FASTRPC_PHYS(buffer->phys), buffer->size); + if (ret < 0) { + dev_err(buffer->dev, "failed to get scatterlist from DMA API\n"); + kfree(a); + return -EINVAL; + } + + a->dev = attachment->dev; + INIT_LIST_HEAD(&a->node); + attachment->priv = a; + + mutex_lock(&buffer->lock); + list_add(&a->node, &buffer->attachments); + mutex_unlock(&buffer->lock); + + return 0; +} + +static void fastrpc_dma_buf_detatch(struct dma_buf *dmabuf, + struct dma_buf_attachment *attachment) +{ + struct fastrpc_dma_buf_attachment *a = attachment->priv; + struct fastrpc_buf *buffer = dmabuf->priv; + + mutex_lock(&buffer->lock); + list_del(&a->node); + mutex_unlock(&buffer->lock); + sg_free_table(&a->sgt); + kfree(a); +} + +static void *fastrpc_vmap(struct dma_buf *dmabuf) +{ + struct fastrpc_buf *buf = dmabuf->priv; + + return buf->virt; +} + +static int fastrpc_mmap(struct dma_buf *dmabuf, + struct vm_area_struct *vma) +{ + struct fastrpc_buf *buf = dmabuf->priv; + size_t size = vma->vm_end - vma->vm_start; + + return dma_mmap_coherent(buf->dev, vma, buf->virt, + FASTRPC_PHYS(buf->phys), size); +} + +static const struct dma_buf_ops fastrpc_dma_buf_ops = { + .attach = fastrpc_dma_buf_attach, + .detach = fastrpc_dma_buf_detatch, + .map_dma_buf = fastrpc_map_dma_buf, + .unmap_dma_buf = fastrpc_unmap_dma_buf, + .mmap = fastrpc_mmap, + .vmap = fastrpc_vmap, + .release = fastrpc_release, +}; + +static int fastrpc_map_create(struct fastrpc_user *fl, int fd, + u64 len, struct fastrpc_map **ppmap) +{ + struct fastrpc_session_ctx *sess = fl->sctx; + struct fastrpc_map *map = NULL; + int err = 0; + + if (!fastrpc_map_find(fl, fd, ppmap)) + return 0; + + map = kzalloc(sizeof(*map), GFP_KERNEL); + if (!map) + return -ENOMEM; + + INIT_LIST_HEAD(&map->node); + map->fl = fl; + map->fd = fd; + map->buf = dma_buf_get(fd); + if (IS_ERR(map->buf)) { + err = PTR_ERR(map->buf); + goto get_err; + } + + map->attach = dma_buf_attach(map->buf, sess->dev); + if (IS_ERR(map->attach)) { + dev_err(sess->dev, "Failed to attach dmabuf\n"); + err = PTR_ERR(map->attach); + goto attach_err; + } + + map->table = dma_buf_map_attachment(map->attach, DMA_BIDIRECTIONAL); + if (IS_ERR(map->table)) { + err = PTR_ERR(map->table); + goto map_err; + } + + map->phys = sg_dma_address(map->table->sgl); + map->phys += ((u64)fl->sctx->sid << 32); + map->size = len; + map->va = sg_virt(map->table->sgl); + map->len = len; + kref_init(&map->refcount); + + spin_lock(&fl->lock); + list_add_tail(&map->node, &fl->maps); + spin_unlock(&fl->lock); + *ppmap = map; + + return 0; + +map_err: + dma_buf_detach(map->buf, map->attach); +attach_err: + dma_buf_put(map->buf); +get_err: + kfree(map); + + return err; +} + +/* + * Fastrpc payload buffer with metadata looks like: + * + * >>>>>> START of METADATA <<<<<<<<< + * +---------------------------------+ + * | Arguments | + * | type:(struct fastrpc_remote_arg)| + * | (0 - N) | + * +---------------------------------+ + * | Invoke Buffer list | + * | type:(struct fastrpc_invoke_buf)| + * | (0 - N) | + * +---------------------------------+ + * | Page info list | + * | type:(struct fastrpc_phy_page) | + * | (0 - N) | + * +---------------------------------+ + * | Optional info | + * |(can be specific to SoC/Firmware)| + * +---------------------------------+ + * >>>>>>>> END of METADATA <<<<<<<<< + * +---------------------------------+ + * | Inline ARGS | + * | (0-N) | + * +---------------------------------+ + */ + +static int fastrpc_get_meta_size(struct fastrpc_invoke_ctx *ctx) +{ + int size = 0; + + size = (sizeof(struct fastrpc_remote_arg) + + sizeof(struct fastrpc_invoke_buf) + + sizeof(struct fastrpc_phy_page)) * ctx->nscalars + + sizeof(u64) * FASTRPC_MAX_FDLIST + + sizeof(u32) * FASTRPC_MAX_CRCLIST; + + return size; +} + +static u64 fastrpc_get_payload_size(struct fastrpc_invoke_ctx *ctx, int metalen) +{ + u64 size = 0; + int oix; + + size = ALIGN(metalen, FASTRPC_ALIGN); + for (oix = 0; oix < ctx->nbufs; oix++) { + int i = ctx->olaps[oix].raix; + + if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1) { + + if (ctx->olaps[oix].offset == 0) + size = ALIGN(size, FASTRPC_ALIGN); + + size += (ctx->olaps[oix].mend - ctx->olaps[oix].mstart); + } + } + + return size; +} + +static int fastrpc_create_maps(struct fastrpc_invoke_ctx *ctx) +{ + struct device *dev = ctx->fl->sctx->dev; + int i, err; + + for (i = 0; i < ctx->nscalars; ++i) { + /* Make sure reserved field is set to 0 */ + if (ctx->args[i].reserved) + return -EINVAL; + + if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1 || + ctx->args[i].length == 0) + continue; + + err = fastrpc_map_create(ctx->fl, ctx->args[i].fd, + ctx->args[i].length, &ctx->maps[i]); + if (err) { + dev_err(dev, "Error Creating map %d\n", err); + return -EINVAL; + } + + } + return 0; +} + +static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx) +{ + struct device *dev = ctx->fl->sctx->dev; + struct fastrpc_remote_arg *rpra; + struct fastrpc_invoke_buf *list; + struct fastrpc_phy_page *pages; + int inbufs, i, oix, err = 0; + u64 len, rlen, pkt_size; + u64 pg_start, pg_end; + uintptr_t args; + int metalen; + + inbufs = REMOTE_SCALARS_INBUFS(ctx->sc); + metalen = fastrpc_get_meta_size(ctx); + pkt_size = fastrpc_get_payload_size(ctx, metalen); + + err = fastrpc_create_maps(ctx); + if (err) + return err; + + ctx->msg_sz = pkt_size; + + err = fastrpc_buf_alloc(ctx->fl, dev, pkt_size, &ctx->buf); + if (err) + return err; + + rpra = ctx->buf->virt; + list = ctx->buf->virt + ctx->nscalars * sizeof(*rpra); + pages = ctx->buf->virt + ctx->nscalars * (sizeof(*list) + + sizeof(*rpra)); + args = (uintptr_t)ctx->buf->virt + metalen; + rlen = pkt_size - metalen; + ctx->rpra = rpra; + + for (oix = 0; oix < ctx->nbufs; ++oix) { + int mlen; + + i = ctx->olaps[oix].raix; + len = ctx->args[i].length; + + rpra[i].pv = 0; + rpra[i].len = len; + list[i].num = len ? 1 : 0; + list[i].pgidx = i; + + if (!len) + continue; + + if (ctx->maps[i]) { + struct vm_area_struct *vma = NULL; + + rpra[i].pv = (u64) ctx->args[i].ptr; + pages[i].addr = ctx->maps[i]->phys; + + mmap_read_lock(current->mm); + vma = find_vma(current->mm, ctx->args[i].ptr); + if (vma) + pages[i].addr += ctx->args[i].ptr - + vma->vm_start; + mmap_read_unlock(current->mm); + + pg_start = (ctx->args[i].ptr & PAGE_MASK) >> PAGE_SHIFT; + pg_end = ((ctx->args[i].ptr + len - 1) & PAGE_MASK) >> + PAGE_SHIFT; + pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE; + + } else { + + if (ctx->olaps[oix].offset == 0) { + rlen -= ALIGN(args, FASTRPC_ALIGN) - args; + args = ALIGN(args, FASTRPC_ALIGN); + } + + mlen = ctx->olaps[oix].mend - ctx->olaps[oix].mstart; + + if (rlen < mlen) + goto bail; + + rpra[i].pv = args - ctx->olaps[oix].offset; + pages[i].addr = ctx->buf->phys - + ctx->olaps[oix].offset + + (pkt_size - rlen); + pages[i].addr = pages[i].addr & PAGE_MASK; + + pg_start = (args & PAGE_MASK) >> PAGE_SHIFT; + pg_end = ((args + len - 1) & PAGE_MASK) >> PAGE_SHIFT; + pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE; + args = args + mlen; + rlen -= mlen; + } + + if (i < inbufs && !ctx->maps[i]) { + void *dst = (void *)(uintptr_t)rpra[i].pv; + void *src = (void *)(uintptr_t)ctx->args[i].ptr; + + if (!kernel) { + if (copy_from_user(dst, (void __user *)src, + len)) { + err = -EFAULT; + goto bail; + } + } else { + memcpy(dst, src, len); + } + } + } + + for (i = ctx->nbufs; i < ctx->nscalars; ++i) { + rpra[i].pv = (u64) ctx->args[i].ptr; + rpra[i].len = ctx->args[i].length; + list[i].num = ctx->args[i].length ? 1 : 0; + list[i].pgidx = i; + pages[i].addr = ctx->maps[i]->phys; + pages[i].size = ctx->maps[i]->size; + } + +bail: + if (err) + dev_err(dev, "Error: get invoke args failed:%d\n", err); + + return err; +} + +static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx, + u32 kernel) +{ + struct fastrpc_remote_arg *rpra = ctx->rpra; + int i, inbufs; + + inbufs = REMOTE_SCALARS_INBUFS(ctx->sc); + + for (i = inbufs; i < ctx->nbufs; ++i) { + void *src = (void *)(uintptr_t)rpra[i].pv; + void *dst = (void *)(uintptr_t)ctx->args[i].ptr; + u64 len = rpra[i].len; + + if (!kernel) { + if (copy_to_user((void __user *)dst, src, len)) + return -EFAULT; + } else { + memcpy(dst, src, len); + } + } + + return 0; +} + +static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx, + struct fastrpc_invoke_ctx *ctx, + u32 kernel, uint32_t handle) +{ + struct fastrpc_channel_ctx *cctx; + struct fastrpc_user *fl = ctx->fl; + struct fastrpc_msg *msg = &ctx->msg; + int ret; + + cctx = fl->cctx; + msg->pid = fl->tgid; + msg->tid = current->pid; + + if (kernel) + msg->pid = 0; + + msg->ctx = ctx->ctxid | fl->pd; + msg->handle = handle; + msg->sc = ctx->sc; + msg->addr = ctx->buf ? ctx->buf->phys : 0; + msg->size = roundup(ctx->msg_sz, PAGE_SIZE); + fastrpc_context_get(ctx); + + ret = rpmsg_send(cctx->rpdev->ept, (void *)msg, sizeof(*msg)); + + if (ret) + fastrpc_context_put(ctx); + + return ret; + +} + +static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel, + u32 handle, u32 sc, + struct fastrpc_invoke_args *args) +{ + struct fastrpc_invoke_ctx *ctx = NULL; + int err = 0; + + if (!fl->sctx) + return -EINVAL; + + if (!fl->cctx->rpdev) + return -EPIPE; + + if (handle == FASTRPC_INIT_HANDLE && !kernel) { + dev_warn_ratelimited(fl->sctx->dev, "user app trying to send a kernel RPC message (%d)\n", handle); + return -EPERM; + } + + ctx = fastrpc_context_alloc(fl, kernel, sc, args); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + if (ctx->nscalars) { + err = fastrpc_get_args(kernel, ctx); + if (err) + goto bail; + } + + /* make sure that all CPU memory writes are seen by DSP */ + dma_wmb(); + /* Send invoke buffer to remote dsp */ + err = fastrpc_invoke_send(fl->sctx, ctx, kernel, handle); + if (err) + goto bail; + + if (kernel) { + if (!wait_for_completion_timeout(&ctx->work, 10 * HZ)) + err = -ETIMEDOUT; + } else { + err = wait_for_completion_interruptible(&ctx->work); + } + + if (err) + goto bail; + + if (ctx->nscalars) { + /* make sure that all memory writes by DSP are seen by CPU */ + dma_rmb(); + /* populate all the output buffers with results */ + err = fastrpc_put_args(ctx, kernel); + if (err) + goto bail; + } + + /* Check the response from remote dsp */ + err = ctx->retval; + if (err) + goto bail; + +bail: + if (err != -ERESTARTSYS && err != -ETIMEDOUT) { + /* We are done with this compute context */ + spin_lock(&fl->lock); + list_del(&ctx->node); + spin_unlock(&fl->lock); + fastrpc_context_put(ctx); + } + if (err) + dev_dbg(fl->sctx->dev, "Error: Invoke Failed %d\n", err); + + return err; +} + +static int fastrpc_init_create_process(struct fastrpc_user *fl, + char __user *argp) +{ + struct fastrpc_init_create init; + struct fastrpc_invoke_args *args; + struct fastrpc_phy_page pages[1]; + struct fastrpc_map *map = NULL; + struct fastrpc_buf *imem = NULL; + int memlen; + int err; + struct { + int pgid; + u32 namelen; + u32 filelen; + u32 pageslen; + u32 attrs; + u32 siglen; + } inbuf; + u32 sc; + + args = kcalloc(FASTRPC_CREATE_PROCESS_NARGS, sizeof(*args), GFP_KERNEL); + if (!args) + return -ENOMEM; + + if (copy_from_user(&init, argp, sizeof(init))) { + err = -EFAULT; + goto err; + } + + if (init.filelen > INIT_FILELEN_MAX) { + err = -EINVAL; + goto err; + } + + inbuf.pgid = fl->tgid; + inbuf.namelen = strlen(current->comm) + 1; + inbuf.filelen = init.filelen; + inbuf.pageslen = 1; + inbuf.attrs = init.attrs; + inbuf.siglen = init.siglen; + fl->pd = USER_PD; + + if (init.filelen && init.filefd) { + err = fastrpc_map_create(fl, init.filefd, init.filelen, &map); + if (err) + goto err; + } + + memlen = ALIGN(max(INIT_FILELEN_MAX, (int)init.filelen * 4), + 1024 * 1024); + err = fastrpc_buf_alloc(fl, fl->sctx->dev, memlen, + &imem); + if (err) + goto err_alloc; + + fl->init_mem = imem; + args[0].ptr = (u64)(uintptr_t)&inbuf; + args[0].length = sizeof(inbuf); + args[0].fd = -1; + + args[1].ptr = (u64)(uintptr_t)current->comm; + args[1].length = inbuf.namelen; + args[1].fd = -1; + + args[2].ptr = (u64) init.file; + args[2].length = inbuf.filelen; + args[2].fd = init.filefd; + + pages[0].addr = imem->phys; + pages[0].size = imem->size; + + args[3].ptr = (u64)(uintptr_t) pages; + args[3].length = 1 * sizeof(*pages); + args[3].fd = -1; + + args[4].ptr = (u64)(uintptr_t)&inbuf.attrs; + args[4].length = sizeof(inbuf.attrs); + args[4].fd = -1; + + args[5].ptr = (u64)(uintptr_t) &inbuf.siglen; + args[5].length = sizeof(inbuf.siglen); + args[5].fd = -1; + + sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE, 4, 0); + if (init.attrs) + sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR, 4, 0); + + err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, + sc, args); + if (err) + goto err_invoke; + + kfree(args); + + return 0; + +err_invoke: + fl->init_mem = NULL; + fastrpc_buf_free(imem); +err_alloc: + fastrpc_map_put(map); +err: + kfree(args); + + return err; +} + +static struct fastrpc_session_ctx *fastrpc_session_alloc( + struct fastrpc_channel_ctx *cctx) +{ + struct fastrpc_session_ctx *session = NULL; + unsigned long flags; + int i; + + spin_lock_irqsave(&cctx->lock, flags); + for (i = 0; i < cctx->sesscount; i++) { + if (!cctx->session[i].used && cctx->session[i].valid) { + cctx->session[i].used = true; + session = &cctx->session[i]; + break; + } + } + spin_unlock_irqrestore(&cctx->lock, flags); + + return session; +} + +static void fastrpc_session_free(struct fastrpc_channel_ctx *cctx, + struct fastrpc_session_ctx *session) +{ + unsigned long flags; + + spin_lock_irqsave(&cctx->lock, flags); + session->used = false; + spin_unlock_irqrestore(&cctx->lock, flags); +} + +static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl) +{ + struct fastrpc_invoke_args args[1]; + int tgid = 0; + u32 sc; + + tgid = fl->tgid; + args[0].ptr = (u64)(uintptr_t) &tgid; + args[0].length = sizeof(tgid); + args[0].fd = -1; + args[0].reserved = 0; + sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_RELEASE, 1, 0); + + return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, + sc, &args[0]); +} + +static int fastrpc_device_release(struct inode *inode, struct file *file) +{ + struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data; + struct fastrpc_channel_ctx *cctx = fl->cctx; + struct fastrpc_invoke_ctx *ctx, *n; + struct fastrpc_map *map, *m; + struct fastrpc_buf *buf, *b; + unsigned long flags; + + fastrpc_release_current_dsp_process(fl); + + spin_lock_irqsave(&cctx->lock, flags); + list_del(&fl->user); + spin_unlock_irqrestore(&cctx->lock, flags); + + if (fl->init_mem) + fastrpc_buf_free(fl->init_mem); + + list_for_each_entry_safe(ctx, n, &fl->pending, node) { + list_del(&ctx->node); + fastrpc_context_put(ctx); + } + + list_for_each_entry_safe(map, m, &fl->maps, node) + fastrpc_map_put(map); + + list_for_each_entry_safe(buf, b, &fl->mmaps, node) { + list_del(&buf->node); + fastrpc_buf_free(buf); + } + + fastrpc_session_free(cctx, fl->sctx); + fastrpc_channel_ctx_put(cctx); + + mutex_destroy(&fl->mutex); + kfree(fl); + file->private_data = NULL; + + return 0; +} + +static int fastrpc_device_open(struct inode *inode, struct file *filp) +{ + struct fastrpc_channel_ctx *cctx = miscdev_to_cctx(filp->private_data); + struct fastrpc_user *fl = NULL; + unsigned long flags; + + fl = kzalloc(sizeof(*fl), GFP_KERNEL); + if (!fl) + return -ENOMEM; + + /* Released in fastrpc_device_release() */ + fastrpc_channel_ctx_get(cctx); + + filp->private_data = fl; + spin_lock_init(&fl->lock); + mutex_init(&fl->mutex); + INIT_LIST_HEAD(&fl->pending); + INIT_LIST_HEAD(&fl->maps); + INIT_LIST_HEAD(&fl->mmaps); + INIT_LIST_HEAD(&fl->user); + fl->tgid = current->tgid; + fl->cctx = cctx; + + fl->sctx = fastrpc_session_alloc(cctx); + if (!fl->sctx) { + dev_err(&cctx->rpdev->dev, "No session available\n"); + mutex_destroy(&fl->mutex); + kfree(fl); + + return -EBUSY; + } + + spin_lock_irqsave(&cctx->lock, flags); + list_add_tail(&fl->user, &cctx->users); + spin_unlock_irqrestore(&cctx->lock, flags); + + return 0; +} + +static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp) +{ + struct fastrpc_alloc_dma_buf bp; + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + struct fastrpc_buf *buf = NULL; + int err; + + if (copy_from_user(&bp, argp, sizeof(bp))) + return -EFAULT; + + err = fastrpc_buf_alloc(fl, fl->sctx->dev, bp.size, &buf); + if (err) + return err; + exp_info.ops = &fastrpc_dma_buf_ops; + exp_info.size = bp.size; + exp_info.flags = O_RDWR; + exp_info.priv = buf; + buf->dmabuf = dma_buf_export(&exp_info); + if (IS_ERR(buf->dmabuf)) { + err = PTR_ERR(buf->dmabuf); + fastrpc_buf_free(buf); + return err; + } + + bp.fd = dma_buf_fd(buf->dmabuf, O_ACCMODE); + if (bp.fd < 0) { + dma_buf_put(buf->dmabuf); + return -EINVAL; + } + + if (copy_to_user(argp, &bp, sizeof(bp))) { + /* + * The usercopy failed, but we can't do much about it, as + * dma_buf_fd() already called fd_install() and made the + * file descriptor accessible for the current process. It + * might already be closed and dmabuf no longer valid when + * we reach this point. Therefore "leak" the fd and rely on + * the process exit path to do any required cleanup. + */ + return -EFAULT; + } + + return 0; +} + +static int fastrpc_init_attach(struct fastrpc_user *fl, int pd) +{ + struct fastrpc_invoke_args args[1]; + int tgid = fl->tgid; + u32 sc; + + args[0].ptr = (u64)(uintptr_t) &tgid; + args[0].length = sizeof(tgid); + args[0].fd = -1; + args[0].reserved = 0; + sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_ATTACH, 1, 0); + fl->pd = pd; + + return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, + sc, &args[0]); +} + +static int fastrpc_invoke(struct fastrpc_user *fl, char __user *argp) +{ + struct fastrpc_invoke_args *args = NULL; + struct fastrpc_invoke inv; + u32 nscalars; + int err; + + if (copy_from_user(&inv, argp, sizeof(inv))) + return -EFAULT; + + /* nscalars is truncated here to max supported value */ + nscalars = REMOTE_SCALARS_LENGTH(inv.sc); + if (nscalars) { + args = kcalloc(nscalars, sizeof(*args), GFP_KERNEL); + if (!args) + return -ENOMEM; + + if (copy_from_user(args, (void __user *)(uintptr_t)inv.args, + nscalars * sizeof(*args))) { + kfree(args); + return -EFAULT; + } + } + + err = fastrpc_internal_invoke(fl, false, inv.handle, inv.sc, args); + kfree(args); + + return err; +} + +static int fastrpc_req_munmap_impl(struct fastrpc_user *fl, + struct fastrpc_req_munmap *req) +{ + struct fastrpc_invoke_args args[1] = { [0] = { 0 } }; + struct fastrpc_buf *buf = NULL, *iter, *b; + struct fastrpc_munmap_req_msg req_msg; + struct device *dev = fl->sctx->dev; + int err; + u32 sc; + + spin_lock(&fl->lock); + list_for_each_entry_safe(iter, b, &fl->mmaps, node) { + if ((iter->raddr == req->vaddrout) && (iter->size == req->size)) { + buf = iter; + break; + } + } + spin_unlock(&fl->lock); + + if (!buf) { + dev_err(dev, "mmap not in list\n"); + return -EINVAL; + } + + req_msg.pgid = fl->tgid; + req_msg.size = buf->size; + req_msg.vaddr = buf->raddr; + + args[0].ptr = (u64) (uintptr_t) &req_msg; + args[0].length = sizeof(req_msg); + + sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MUNMAP, 1, 0); + err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, + &args[0]); + if (!err) { + dev_dbg(dev, "unmmap\tpt 0x%09lx OK\n", buf->raddr); + spin_lock(&fl->lock); + list_del(&buf->node); + spin_unlock(&fl->lock); + fastrpc_buf_free(buf); + } else { + dev_err(dev, "unmmap\tpt 0x%09lx ERROR\n", buf->raddr); + } + + return err; +} + +static int fastrpc_req_munmap(struct fastrpc_user *fl, char __user *argp) +{ + struct fastrpc_req_munmap req; + + if (copy_from_user(&req, argp, sizeof(req))) + return -EFAULT; + + return fastrpc_req_munmap_impl(fl, &req); +} + +static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp) +{ + struct fastrpc_invoke_args args[3] = { [0 ... 2] = { 0 } }; + struct fastrpc_buf *buf = NULL; + struct fastrpc_mmap_req_msg req_msg; + struct fastrpc_mmap_rsp_msg rsp_msg; + struct fastrpc_req_munmap req_unmap; + struct fastrpc_phy_page pages; + struct fastrpc_req_mmap req; + struct device *dev = fl->sctx->dev; + int err; + u32 sc; + + if (copy_from_user(&req, argp, sizeof(req))) + return -EFAULT; + + if (req.flags != ADSP_MMAP_ADD_PAGES) { + dev_err(dev, "flag not supported 0x%x\n", req.flags); + return -EINVAL; + } + + if (req.vaddrin) { + dev_err(dev, "adding user allocated pages is not supported\n"); + return -EINVAL; + } + + err = fastrpc_buf_alloc(fl, fl->sctx->dev, req.size, &buf); + if (err) { + dev_err(dev, "failed to allocate buffer\n"); + return err; + } + + req_msg.pgid = fl->tgid; + req_msg.flags = req.flags; + req_msg.vaddr = req.vaddrin; + req_msg.num = sizeof(pages); + + args[0].ptr = (u64) (uintptr_t) &req_msg; + args[0].length = sizeof(req_msg); + + pages.addr = buf->phys; + pages.size = buf->size; + + args[1].ptr = (u64) (uintptr_t) &pages; + args[1].length = sizeof(pages); + + args[2].ptr = (u64) (uintptr_t) &rsp_msg; + args[2].length = sizeof(rsp_msg); + + sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MMAP, 2, 1); + err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, + &args[0]); + if (err) { + dev_err(dev, "mmap error (len 0x%08llx)\n", buf->size); + goto err_invoke; + } + + /* update the buffer to be able to deallocate the memory on the DSP */ + buf->raddr = (uintptr_t) rsp_msg.vaddr; + + /* let the client know the address to use */ + req.vaddrout = rsp_msg.vaddr; + + spin_lock(&fl->lock); + list_add_tail(&buf->node, &fl->mmaps); + spin_unlock(&fl->lock); + + if (copy_to_user((void __user *)argp, &req, sizeof(req))) { + /* unmap the memory and release the buffer */ + req_unmap.vaddrout = buf->raddr; + req_unmap.size = buf->size; + fastrpc_req_munmap_impl(fl, &req_unmap); + return -EFAULT; + } + + dev_dbg(dev, "mmap\t\tpt 0x%09lx OK [len 0x%08llx]\n", + buf->raddr, buf->size); + + return 0; + +err_invoke: + fastrpc_buf_free(buf); + + return err; +} + +static long fastrpc_device_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data; + char __user *argp = (char __user *)arg; + int err; + + switch (cmd) { + case FASTRPC_IOCTL_INVOKE: + err = fastrpc_invoke(fl, argp); + break; + case FASTRPC_IOCTL_INIT_ATTACH: + err = fastrpc_init_attach(fl, AUDIO_PD); + break; + case FASTRPC_IOCTL_INIT_ATTACH_SNS: + err = fastrpc_init_attach(fl, SENSORS_PD); + break; + case FASTRPC_IOCTL_INIT_CREATE: + err = fastrpc_init_create_process(fl, argp); + break; + case FASTRPC_IOCTL_ALLOC_DMA_BUFF: + err = fastrpc_dmabuf_alloc(fl, argp); + break; + case FASTRPC_IOCTL_MMAP: + err = fastrpc_req_mmap(fl, argp); + break; + case FASTRPC_IOCTL_MUNMAP: + err = fastrpc_req_munmap(fl, argp); + break; + default: + err = -ENOTTY; + break; + } + + return err; +} + +static const struct file_operations fastrpc_fops = { + .open = fastrpc_device_open, + .release = fastrpc_device_release, + .unlocked_ioctl = fastrpc_device_ioctl, + .compat_ioctl = fastrpc_device_ioctl, +}; + +static int fastrpc_cb_probe(struct platform_device *pdev) +{ + struct fastrpc_channel_ctx *cctx; + struct fastrpc_session_ctx *sess; + struct device *dev = &pdev->dev; + int i, sessions = 0; + unsigned long flags; + int rc; + + cctx = dev_get_drvdata(dev->parent); + if (!cctx) + return -EINVAL; + + of_property_read_u32(dev->of_node, "qcom,nsessions", &sessions); + + spin_lock_irqsave(&cctx->lock, flags); + if (cctx->sesscount >= FASTRPC_MAX_SESSIONS) { + dev_err(&pdev->dev, "too many sessions\n"); + spin_unlock_irqrestore(&cctx->lock, flags); + return -ENOSPC; + } + sess = &cctx->session[cctx->sesscount++]; + sess->used = false; + sess->valid = true; + sess->dev = dev; + dev_set_drvdata(dev, sess); + + if (of_property_read_u32(dev->of_node, "reg", &sess->sid)) + dev_info(dev, "FastRPC Session ID not specified in DT\n"); + + if (sessions > 0) { + struct fastrpc_session_ctx *dup_sess; + + for (i = 1; i < sessions; i++) { + if (cctx->sesscount >= FASTRPC_MAX_SESSIONS) + break; + dup_sess = &cctx->session[cctx->sesscount++]; + memcpy(dup_sess, sess, sizeof(*dup_sess)); + } + } + spin_unlock_irqrestore(&cctx->lock, flags); + rc = dma_set_mask(dev, DMA_BIT_MASK(32)); + if (rc) { + dev_err(dev, "32-bit DMA enable failed\n"); + return rc; + } + + return 0; +} + +static int fastrpc_cb_remove(struct platform_device *pdev) +{ + struct fastrpc_channel_ctx *cctx = dev_get_drvdata(pdev->dev.parent); + struct fastrpc_session_ctx *sess = dev_get_drvdata(&pdev->dev); + unsigned long flags; + int i; + + spin_lock_irqsave(&cctx->lock, flags); + for (i = 1; i < FASTRPC_MAX_SESSIONS; i++) { + if (cctx->session[i].sid == sess->sid) { + cctx->session[i].valid = false; + cctx->sesscount--; + } + } + spin_unlock_irqrestore(&cctx->lock, flags); + + return 0; +} + +static const struct of_device_id fastrpc_match_table[] = { + { .compatible = "qcom,fastrpc-compute-cb", }, + {} +}; + +static struct platform_driver fastrpc_cb_driver = { + .probe = fastrpc_cb_probe, + .remove = fastrpc_cb_remove, + .driver = { + .name = "qcom,fastrpc-cb", + .of_match_table = fastrpc_match_table, + .suppress_bind_attrs = true, + }, +}; + +static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev) +{ + struct device *rdev = &rpdev->dev; + struct fastrpc_channel_ctx *data; + int i, err, domain_id = -1; + const char *domain; + + err = of_property_read_string(rdev->of_node, "label", &domain); + if (err) { + dev_info(rdev, "FastRPC Domain not specified in DT\n"); + return err; + } + + for (i = 0; i <= CDSP_DOMAIN_ID; i++) { + if (!strcmp(domains[i], domain)) { + domain_id = i; + break; + } + } + + if (domain_id < 0) { + dev_info(rdev, "FastRPC Invalid Domain ID %d\n", domain_id); + return -EINVAL; + } + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->miscdev.minor = MISC_DYNAMIC_MINOR; + data->miscdev.name = devm_kasprintf(rdev, GFP_KERNEL, "fastrpc-%s", + domains[domain_id]); + data->miscdev.fops = &fastrpc_fops; + err = misc_register(&data->miscdev); + if (err) { + kfree(data); + return err; + } + + kref_init(&data->refcount); + + dev_set_drvdata(&rpdev->dev, data); + dma_set_mask_and_coherent(rdev, DMA_BIT_MASK(32)); + INIT_LIST_HEAD(&data->users); + spin_lock_init(&data->lock); + idr_init(&data->ctx_idr); + data->domain_id = domain_id; + data->rpdev = rpdev; + + return of_platform_populate(rdev->of_node, NULL, NULL, rdev); +} + +static void fastrpc_notify_users(struct fastrpc_user *user) +{ + struct fastrpc_invoke_ctx *ctx; + + spin_lock(&user->lock); + list_for_each_entry(ctx, &user->pending, node) { + ctx->retval = -EPIPE; + complete(&ctx->work); + } + spin_unlock(&user->lock); +} + +static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev) +{ + struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev); + struct fastrpc_user *user; + unsigned long flags; + + /* No invocations past this point */ + spin_lock_irqsave(&cctx->lock, flags); + cctx->rpdev = NULL; + list_for_each_entry(user, &cctx->users, user) + fastrpc_notify_users(user); + spin_unlock_irqrestore(&cctx->lock, flags); + + misc_deregister(&cctx->miscdev); + of_platform_depopulate(&rpdev->dev); + + fastrpc_channel_ctx_put(cctx); +} + +static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data, + int len, void *priv, u32 addr) +{ + struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev); + struct fastrpc_invoke_rsp *rsp = data; + struct fastrpc_invoke_ctx *ctx; + unsigned long flags; + unsigned long ctxid; + + if (len < sizeof(*rsp)) + return -EINVAL; + + ctxid = ((rsp->ctx & FASTRPC_CTXID_MASK) >> 4); + + spin_lock_irqsave(&cctx->lock, flags); + ctx = idr_find(&cctx->ctx_idr, ctxid); + spin_unlock_irqrestore(&cctx->lock, flags); + + if (!ctx) { + dev_err(&rpdev->dev, "No context ID matches response\n"); + return -ENOENT; + } + + ctx->retval = rsp->retval; + complete(&ctx->work); + + /* + * The DMA buffer associated with the context cannot be freed in + * interrupt context so schedule it through a worker thread to + * avoid a kernel BUG. + */ + schedule_work(&ctx->put_work); + + return 0; +} + +static const struct of_device_id fastrpc_rpmsg_of_match[] = { + { .compatible = "qcom,fastrpc" }, + { }, +}; +MODULE_DEVICE_TABLE(of, fastrpc_rpmsg_of_match); + +static struct rpmsg_driver fastrpc_driver = { + .probe = fastrpc_rpmsg_probe, + .remove = fastrpc_rpmsg_remove, + .callback = fastrpc_rpmsg_callback, + .drv = { + .name = "qcom,fastrpc", + .of_match_table = fastrpc_rpmsg_of_match, + }, +}; + +static int fastrpc_init(void) +{ + int ret; + + ret = platform_driver_register(&fastrpc_cb_driver); + if (ret < 0) { + pr_err("fastrpc: failed to register cb driver\n"); + return ret; + } + + ret = register_rpmsg_driver(&fastrpc_driver); + if (ret < 0) { + pr_err("fastrpc: failed to register rpmsg driver\n"); + platform_driver_unregister(&fastrpc_cb_driver); + return ret; + } + + return 0; +} +module_init(fastrpc_init); + +static void fastrpc_exit(void) +{ + platform_driver_unregister(&fastrpc_cb_driver); + unregister_rpmsg_driver(&fastrpc_driver); +} +module_exit(fastrpc_exit); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/genwqe/Kconfig b/drivers/misc/genwqe/Kconfig new file mode 100644 index 000000000..97f64bcf9 --- /dev/null +++ b/drivers/misc/genwqe/Kconfig @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# IBM Accelerator Family 'GenWQE' +# + +menuconfig GENWQE + tristate "GenWQE PCIe Accelerator" + depends on PCI && 64BIT + select CRC_ITU_T + help + Enables PCIe card driver for IBM GenWQE accelerators. + The user-space interface is described in + include/linux/genwqe/genwqe_card.h. + +config GENWQE_PLATFORM_ERROR_RECOVERY + int "Use platform recovery procedures (0=off, 1=on)" + depends on GENWQE + default 1 if PPC64 + default 0 diff --git a/drivers/misc/genwqe/Makefile b/drivers/misc/genwqe/Makefile new file mode 100644 index 000000000..d9811ecbe --- /dev/null +++ b/drivers/misc/genwqe/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Makefile for GenWQE driver +# + +obj-$(CONFIG_GENWQE) := genwqe_card.o +genwqe_card-objs := card_base.o card_dev.o card_ddcb.o card_sysfs.o \ + card_debugfs.o card_utils.o diff --git a/drivers/misc/genwqe/card_base.c b/drivers/misc/genwqe/card_base.c new file mode 100644 index 000000000..c9b886618 --- /dev/null +++ b/drivers/misc/genwqe/card_base.c @@ -0,0 +1,1416 @@ +// SPDX-License-Identifier: GPL-2.0-only +/** + * IBM Accelerator Family 'GenWQE' + * + * (C) Copyright IBM Corp. 2013 + * + * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> + * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> + * Author: Michael Jung <mijung@gmx.net> + * Author: Michael Ruettger <michael@ibmra.de> + */ + +/* + * Module initialization and PCIe setup. Card health monitoring and + * recovery functionality. Character device creation and deletion are + * controlled from here. + */ + +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/err.h> +#include <linux/aer.h> +#include <linux/string.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/module.h> +#include <linux/notifier.h> +#include <linux/device.h> +#include <linux/log2.h> + +#include "card_base.h" +#include "card_ddcb.h" + +MODULE_AUTHOR("Frank Haverkamp <haver@linux.vnet.ibm.com>"); +MODULE_AUTHOR("Michael Ruettger <michael@ibmra.de>"); +MODULE_AUTHOR("Joerg-Stephan Vogt <jsvogt@de.ibm.com>"); +MODULE_AUTHOR("Michael Jung <mijung@gmx.net>"); + +MODULE_DESCRIPTION("GenWQE Card"); +MODULE_VERSION(DRV_VERSION); +MODULE_LICENSE("GPL"); + +static char genwqe_driver_name[] = GENWQE_DEVNAME; +static struct class *class_genwqe; +static struct dentry *debugfs_genwqe; +static struct genwqe_dev *genwqe_devices[GENWQE_CARD_NO_MAX]; + +/* PCI structure for identifying device by PCI vendor and device ID */ +static const struct pci_device_id genwqe_device_table[] = { + { .vendor = PCI_VENDOR_ID_IBM, + .device = PCI_DEVICE_GENWQE, + .subvendor = PCI_SUBVENDOR_ID_IBM, + .subdevice = PCI_SUBSYSTEM_ID_GENWQE5, + .class = (PCI_CLASSCODE_GENWQE5 << 8), + .class_mask = ~0, + .driver_data = 0 }, + + /* Initial SR-IOV bring-up image */ + { .vendor = PCI_VENDOR_ID_IBM, + .device = PCI_DEVICE_GENWQE, + .subvendor = PCI_SUBVENDOR_ID_IBM_SRIOV, + .subdevice = PCI_SUBSYSTEM_ID_GENWQE5_SRIOV, + .class = (PCI_CLASSCODE_GENWQE5_SRIOV << 8), + .class_mask = ~0, + .driver_data = 0 }, + + { .vendor = PCI_VENDOR_ID_IBM, /* VF Vendor ID */ + .device = 0x0000, /* VF Device ID */ + .subvendor = PCI_SUBVENDOR_ID_IBM_SRIOV, + .subdevice = PCI_SUBSYSTEM_ID_GENWQE5_SRIOV, + .class = (PCI_CLASSCODE_GENWQE5_SRIOV << 8), + .class_mask = ~0, + .driver_data = 0 }, + + /* Fixed up image */ + { .vendor = PCI_VENDOR_ID_IBM, + .device = PCI_DEVICE_GENWQE, + .subvendor = PCI_SUBVENDOR_ID_IBM_SRIOV, + .subdevice = PCI_SUBSYSTEM_ID_GENWQE5, + .class = (PCI_CLASSCODE_GENWQE5_SRIOV << 8), + .class_mask = ~0, + .driver_data = 0 }, + + { .vendor = PCI_VENDOR_ID_IBM, /* VF Vendor ID */ + .device = 0x0000, /* VF Device ID */ + .subvendor = PCI_SUBVENDOR_ID_IBM_SRIOV, + .subdevice = PCI_SUBSYSTEM_ID_GENWQE5, + .class = (PCI_CLASSCODE_GENWQE5_SRIOV << 8), + .class_mask = ~0, + .driver_data = 0 }, + + /* Even one more ... */ + { .vendor = PCI_VENDOR_ID_IBM, + .device = PCI_DEVICE_GENWQE, + .subvendor = PCI_SUBVENDOR_ID_IBM, + .subdevice = PCI_SUBSYSTEM_ID_GENWQE5_NEW, + .class = (PCI_CLASSCODE_GENWQE5 << 8), + .class_mask = ~0, + .driver_data = 0 }, + + { 0, } /* 0 terminated list. */ +}; + +MODULE_DEVICE_TABLE(pci, genwqe_device_table); + +/** + * genwqe_dev_alloc() - Create and prepare a new card descriptor + * + * Return: Pointer to card descriptor, or ERR_PTR(err) on error + */ +static struct genwqe_dev *genwqe_dev_alloc(void) +{ + unsigned int i = 0, j; + struct genwqe_dev *cd; + + for (i = 0; i < GENWQE_CARD_NO_MAX; i++) { + if (genwqe_devices[i] == NULL) + break; + } + if (i >= GENWQE_CARD_NO_MAX) + return ERR_PTR(-ENODEV); + + cd = kzalloc(sizeof(struct genwqe_dev), GFP_KERNEL); + if (!cd) + return ERR_PTR(-ENOMEM); + + cd->card_idx = i; + cd->class_genwqe = class_genwqe; + cd->debugfs_genwqe = debugfs_genwqe; + + /* + * This comes from kernel config option and can be overritten via + * debugfs. + */ + cd->use_platform_recovery = CONFIG_GENWQE_PLATFORM_ERROR_RECOVERY; + + init_waitqueue_head(&cd->queue_waitq); + + spin_lock_init(&cd->file_lock); + INIT_LIST_HEAD(&cd->file_list); + + cd->card_state = GENWQE_CARD_UNUSED; + spin_lock_init(&cd->print_lock); + + cd->ddcb_software_timeout = GENWQE_DDCB_SOFTWARE_TIMEOUT; + cd->kill_timeout = GENWQE_KILL_TIMEOUT; + + for (j = 0; j < GENWQE_MAX_VFS; j++) + cd->vf_jobtimeout_msec[j] = GENWQE_VF_JOBTIMEOUT_MSEC; + + genwqe_devices[i] = cd; + return cd; +} + +static void genwqe_dev_free(struct genwqe_dev *cd) +{ + if (!cd) + return; + + genwqe_devices[cd->card_idx] = NULL; + kfree(cd); +} + +/** + * genwqe_bus_reset() - Card recovery + * @cd: GenWQE device information + * + * pci_reset_function() will recover the device and ensure that the + * registers are accessible again when it completes with success. If + * not, the card will stay dead and registers will be unaccessible + * still. + */ +static int genwqe_bus_reset(struct genwqe_dev *cd) +{ + int rc = 0; + struct pci_dev *pci_dev = cd->pci_dev; + void __iomem *mmio; + + if (cd->err_inject & GENWQE_INJECT_BUS_RESET_FAILURE) + return -EIO; + + mmio = cd->mmio; + cd->mmio = NULL; + pci_iounmap(pci_dev, mmio); + + pci_release_mem_regions(pci_dev); + + /* + * Firmware/BIOS might change memory mapping during bus reset. + * Settings like enable bus-mastering, ... are backuped and + * restored by the pci_reset_function(). + */ + dev_dbg(&pci_dev->dev, "[%s] pci_reset function ...\n", __func__); + rc = pci_reset_function(pci_dev); + if (rc) { + dev_err(&pci_dev->dev, + "[%s] err: failed reset func (rc %d)\n", __func__, rc); + return rc; + } + dev_dbg(&pci_dev->dev, "[%s] done with rc=%d\n", __func__, rc); + + /* + * Here is the right spot to clear the register read + * failure. pci_bus_reset() does this job in real systems. + */ + cd->err_inject &= ~(GENWQE_INJECT_HARDWARE_FAILURE | + GENWQE_INJECT_GFIR_FATAL | + GENWQE_INJECT_GFIR_INFO); + + rc = pci_request_mem_regions(pci_dev, genwqe_driver_name); + if (rc) { + dev_err(&pci_dev->dev, + "[%s] err: request bars failed (%d)\n", __func__, rc); + return -EIO; + } + + cd->mmio = pci_iomap(pci_dev, 0, 0); + if (cd->mmio == NULL) { + dev_err(&pci_dev->dev, + "[%s] err: mapping BAR0 failed\n", __func__); + return -ENOMEM; + } + return 0; +} + +/* + * Hardware circumvention section. Certain bitstreams in our test-lab + * had different kinds of problems. Here is where we adjust those + * bitstreams to function will with this version of our device driver. + * + * Thise circumventions are applied to the physical function only. + * The magical numbers below are identifying development/manufacturing + * versions of the bitstream used on the card. + * + * Turn off error reporting for old/manufacturing images. + */ + +bool genwqe_need_err_masking(struct genwqe_dev *cd) +{ + return (cd->slu_unitcfg & 0xFFFF0ull) < 0x32170ull; +} + +static void genwqe_tweak_hardware(struct genwqe_dev *cd) +{ + struct pci_dev *pci_dev = cd->pci_dev; + + /* Mask FIRs for development images */ + if (((cd->slu_unitcfg & 0xFFFF0ull) >= 0x32000ull) && + ((cd->slu_unitcfg & 0xFFFF0ull) <= 0x33250ull)) { + dev_warn(&pci_dev->dev, + "FIRs masked due to bitstream %016llx.%016llx\n", + cd->slu_unitcfg, cd->app_unitcfg); + + __genwqe_writeq(cd, IO_APP_SEC_LEM_DEBUG_OVR, + 0xFFFFFFFFFFFFFFFFull); + + __genwqe_writeq(cd, IO_APP_ERR_ACT_MASK, + 0x0000000000000000ull); + } +} + +/** + * genwqe_recovery_on_fatal_gfir_required() - Version depended actions + * @cd: GenWQE device information + * + * Bitstreams older than 2013-02-17 have a bug where fatal GFIRs must + * be ignored. This is e.g. true for the bitstream we gave to the card + * manufacturer, but also for some old bitstreams we released to our + * test-lab. + */ +int genwqe_recovery_on_fatal_gfir_required(struct genwqe_dev *cd) +{ + return (cd->slu_unitcfg & 0xFFFF0ull) >= 0x32170ull; +} + +int genwqe_flash_readback_fails(struct genwqe_dev *cd) +{ + return (cd->slu_unitcfg & 0xFFFF0ull) < 0x32170ull; +} + +/** + * genwqe_T_psec() - Calculate PF/VF timeout register content + * @cd: GenWQE device information + * + * Note: From a design perspective it turned out to be a bad idea to + * use codes here to specifiy the frequency/speed values. An old + * driver cannot understand new codes and is therefore always a + * problem. Better is to measure out the value or put the + * speed/frequency directly into a register which is always a valid + * value for old as well as for new software. + */ +/* T = 1/f */ +static int genwqe_T_psec(struct genwqe_dev *cd) +{ + u16 speed; /* 1/f -> 250, 200, 166, 175 */ + static const int T[] = { 4000, 5000, 6000, 5714 }; + + speed = (u16)((cd->slu_unitcfg >> 28) & 0x0full); + if (speed >= ARRAY_SIZE(T)) + return -1; /* illegal value */ + + return T[speed]; +} + +/** + * genwqe_setup_pf_jtimer() - Setup PF hardware timeouts for DDCB execution + * @cd: GenWQE device information + * + * Do this _after_ card_reset() is called. Otherwise the values will + * vanish. The settings need to be done when the queues are inactive. + * + * The max. timeout value is 2^(10+x) * T (6ns for 166MHz) * 15/16. + * The min. timeout value is 2^(10+x) * T (6ns for 166MHz) * 14/16. + */ +static bool genwqe_setup_pf_jtimer(struct genwqe_dev *cd) +{ + u32 T = genwqe_T_psec(cd); + u64 x; + + if (GENWQE_PF_JOBTIMEOUT_MSEC == 0) + return false; + + /* PF: large value needed, flash update 2sec per block */ + x = ilog2(GENWQE_PF_JOBTIMEOUT_MSEC * + 16000000000uL/(T * 15)) - 10; + + genwqe_write_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT, + 0xff00 | (x & 0xff), 0); + return true; +} + +/** + * genwqe_setup_vf_jtimer() - Setup VF hardware timeouts for DDCB execution + * @cd: GenWQE device information + */ +static bool genwqe_setup_vf_jtimer(struct genwqe_dev *cd) +{ + struct pci_dev *pci_dev = cd->pci_dev; + unsigned int vf; + u32 T = genwqe_T_psec(cd); + u64 x; + int totalvfs; + + totalvfs = pci_sriov_get_totalvfs(pci_dev); + if (totalvfs <= 0) + return false; + + for (vf = 0; vf < totalvfs; vf++) { + + if (cd->vf_jobtimeout_msec[vf] == 0) + continue; + + x = ilog2(cd->vf_jobtimeout_msec[vf] * + 16000000000uL/(T * 15)) - 10; + + genwqe_write_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT, + 0xff00 | (x & 0xff), vf + 1); + } + return true; +} + +static int genwqe_ffdc_buffs_alloc(struct genwqe_dev *cd) +{ + unsigned int type, e = 0; + + for (type = 0; type < GENWQE_DBG_UNITS; type++) { + switch (type) { + case GENWQE_DBG_UNIT0: + e = genwqe_ffdc_buff_size(cd, 0); + break; + case GENWQE_DBG_UNIT1: + e = genwqe_ffdc_buff_size(cd, 1); + break; + case GENWQE_DBG_UNIT2: + e = genwqe_ffdc_buff_size(cd, 2); + break; + case GENWQE_DBG_REGS: + e = GENWQE_FFDC_REGS; + break; + } + + /* currently support only the debug units mentioned here */ + cd->ffdc[type].entries = e; + cd->ffdc[type].regs = + kmalloc_array(e, sizeof(struct genwqe_reg), + GFP_KERNEL); + /* + * regs == NULL is ok, the using code treats this as no regs, + * Printing warning is ok in this case. + */ + } + return 0; +} + +static void genwqe_ffdc_buffs_free(struct genwqe_dev *cd) +{ + unsigned int type; + + for (type = 0; type < GENWQE_DBG_UNITS; type++) { + kfree(cd->ffdc[type].regs); + cd->ffdc[type].regs = NULL; + } +} + +static int genwqe_read_ids(struct genwqe_dev *cd) +{ + int err = 0; + int slu_id; + struct pci_dev *pci_dev = cd->pci_dev; + + cd->slu_unitcfg = __genwqe_readq(cd, IO_SLU_UNITCFG); + if (cd->slu_unitcfg == IO_ILLEGAL_VALUE) { + dev_err(&pci_dev->dev, + "err: SLUID=%016llx\n", cd->slu_unitcfg); + err = -EIO; + goto out_err; + } + + slu_id = genwqe_get_slu_id(cd); + if (slu_id < GENWQE_SLU_ARCH_REQ || slu_id == 0xff) { + dev_err(&pci_dev->dev, + "err: incompatible SLU Architecture %u\n", slu_id); + err = -ENOENT; + goto out_err; + } + + cd->app_unitcfg = __genwqe_readq(cd, IO_APP_UNITCFG); + if (cd->app_unitcfg == IO_ILLEGAL_VALUE) { + dev_err(&pci_dev->dev, + "err: APPID=%016llx\n", cd->app_unitcfg); + err = -EIO; + goto out_err; + } + genwqe_read_app_id(cd, cd->app_name, sizeof(cd->app_name)); + + /* + * Is access to all registers possible? If we are a VF the + * answer is obvious. If we run fully virtualized, we need to + * check if we can access all registers. If we do not have + * full access we will cause an UR and some informational FIRs + * in the PF, but that should not harm. + */ + if (pci_dev->is_virtfn) + cd->is_privileged = 0; + else + cd->is_privileged = (__genwqe_readq(cd, IO_SLU_BITSTREAM) + != IO_ILLEGAL_VALUE); + + out_err: + return err; +} + +static int genwqe_start(struct genwqe_dev *cd) +{ + int err; + struct pci_dev *pci_dev = cd->pci_dev; + + err = genwqe_read_ids(cd); + if (err) + return err; + + if (genwqe_is_privileged(cd)) { + /* do this after the tweaks. alloc fail is acceptable */ + genwqe_ffdc_buffs_alloc(cd); + genwqe_stop_traps(cd); + + /* Collect registers e.g. FIRs, UNITIDs, traces ... */ + genwqe_read_ffdc_regs(cd, cd->ffdc[GENWQE_DBG_REGS].regs, + cd->ffdc[GENWQE_DBG_REGS].entries, 0); + + genwqe_ffdc_buff_read(cd, GENWQE_DBG_UNIT0, + cd->ffdc[GENWQE_DBG_UNIT0].regs, + cd->ffdc[GENWQE_DBG_UNIT0].entries); + + genwqe_ffdc_buff_read(cd, GENWQE_DBG_UNIT1, + cd->ffdc[GENWQE_DBG_UNIT1].regs, + cd->ffdc[GENWQE_DBG_UNIT1].entries); + + genwqe_ffdc_buff_read(cd, GENWQE_DBG_UNIT2, + cd->ffdc[GENWQE_DBG_UNIT2].regs, + cd->ffdc[GENWQE_DBG_UNIT2].entries); + + genwqe_start_traps(cd); + + if (cd->card_state == GENWQE_CARD_FATAL_ERROR) { + dev_warn(&pci_dev->dev, + "[%s] chip reload/recovery!\n", __func__); + + /* + * Stealth Mode: Reload chip on either hot + * reset or PERST. + */ + cd->softreset = 0x7Cull; + __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, + cd->softreset); + + err = genwqe_bus_reset(cd); + if (err != 0) { + dev_err(&pci_dev->dev, + "[%s] err: bus reset failed!\n", + __func__); + goto out; + } + + /* + * Re-read the IDs because + * it could happen that the bitstream load + * failed! + */ + err = genwqe_read_ids(cd); + if (err) + goto out; + } + } + + err = genwqe_setup_service_layer(cd); /* does a reset to the card */ + if (err != 0) { + dev_err(&pci_dev->dev, + "[%s] err: could not setup servicelayer!\n", __func__); + err = -ENODEV; + goto out; + } + + if (genwqe_is_privileged(cd)) { /* code is running _after_ reset */ + genwqe_tweak_hardware(cd); + + genwqe_setup_pf_jtimer(cd); + genwqe_setup_vf_jtimer(cd); + } + + err = genwqe_device_create(cd); + if (err < 0) { + dev_err(&pci_dev->dev, + "err: chdev init failed! (err=%d)\n", err); + goto out_release_service_layer; + } + return 0; + + out_release_service_layer: + genwqe_release_service_layer(cd); + out: + if (genwqe_is_privileged(cd)) + genwqe_ffdc_buffs_free(cd); + return -EIO; +} + +/** + * genwqe_stop() - Stop card operation + * @cd: GenWQE device information + * + * Recovery notes: + * As long as genwqe_thread runs we might access registers during + * error data capture. Same is with the genwqe_health_thread. + * When genwqe_bus_reset() fails this function might called two times: + * first by the genwqe_health_thread() and later by genwqe_remove() to + * unbind the device. We must be able to survive that. + * + * This function must be robust enough to be called twice. + */ +static int genwqe_stop(struct genwqe_dev *cd) +{ + genwqe_finish_queue(cd); /* no register access */ + genwqe_device_remove(cd); /* device removed, procs killed */ + genwqe_release_service_layer(cd); /* here genwqe_thread is stopped */ + + if (genwqe_is_privileged(cd)) { + pci_disable_sriov(cd->pci_dev); /* access pci config space */ + genwqe_ffdc_buffs_free(cd); + } + + return 0; +} + +/** + * genwqe_recover_card() - Try to recover the card if it is possible + * @cd: GenWQE device information + * @fatal_err: Indicate whether to attempt soft reset + * + * If fatal_err is set no register access is possible anymore. It is + * likely that genwqe_start fails in that situation. Proper error + * handling is required in this case. + * + * genwqe_bus_reset() will cause the pci code to call genwqe_remove() + * and later genwqe_probe() for all virtual functions. + */ +static int genwqe_recover_card(struct genwqe_dev *cd, int fatal_err) +{ + int rc; + struct pci_dev *pci_dev = cd->pci_dev; + + genwqe_stop(cd); + + /* + * Make sure chip is not reloaded to maintain FFDC. Write SLU + * Reset Register, CPLDReset field to 0. + */ + if (!fatal_err) { + cd->softreset = 0x70ull; + __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, cd->softreset); + } + + rc = genwqe_bus_reset(cd); + if (rc != 0) { + dev_err(&pci_dev->dev, + "[%s] err: card recovery impossible!\n", __func__); + return rc; + } + + rc = genwqe_start(cd); + if (rc < 0) { + dev_err(&pci_dev->dev, + "[%s] err: failed to launch device!\n", __func__); + return rc; + } + return 0; +} + +static int genwqe_health_check_cond(struct genwqe_dev *cd, u64 *gfir) +{ + *gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR); + return (*gfir & GFIR_ERR_TRIGGER) && + genwqe_recovery_on_fatal_gfir_required(cd); +} + +/** + * genwqe_fir_checking() - Check the fault isolation registers of the card + * @cd: GenWQE device information + * + * If this code works ok, can be tried out with help of the genwqe_poke tool: + * sudo ./tools/genwqe_poke 0x8 0xfefefefefef + * + * Now the relevant FIRs/sFIRs should be printed out and the driver should + * invoke recovery (devices are removed and readded). + */ +static u64 genwqe_fir_checking(struct genwqe_dev *cd) +{ + int j, iterations = 0; + u64 mask, fir, fec, uid, gfir, gfir_masked, sfir, sfec; + u32 fir_addr, fir_clr_addr, fec_addr, sfir_addr, sfec_addr; + struct pci_dev *pci_dev = cd->pci_dev; + + healthMonitor: + iterations++; + if (iterations > 16) { + dev_err(&pci_dev->dev, "* exit looping after %d times\n", + iterations); + goto fatal_error; + } + + gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR); + if (gfir != 0x0) + dev_err(&pci_dev->dev, "* 0x%08x 0x%016llx\n", + IO_SLC_CFGREG_GFIR, gfir); + if (gfir == IO_ILLEGAL_VALUE) + goto fatal_error; + + /* + * Avoid printing when to GFIR bit is on prevents contignous + * printout e.g. for the following bug: + * FIR set without a 2ndary FIR/FIR cannot be cleared + * Comment out the following if to get the prints: + */ + if (gfir == 0) + return 0; + + gfir_masked = gfir & GFIR_ERR_TRIGGER; /* fatal errors */ + + for (uid = 0; uid < GENWQE_MAX_UNITS; uid++) { /* 0..2 in zEDC */ + + /* read the primary FIR (pfir) */ + fir_addr = (uid << 24) + 0x08; + fir = __genwqe_readq(cd, fir_addr); + if (fir == 0x0) + continue; /* no error in this unit */ + + dev_err(&pci_dev->dev, "* 0x%08x 0x%016llx\n", fir_addr, fir); + if (fir == IO_ILLEGAL_VALUE) + goto fatal_error; + + /* read primary FEC */ + fec_addr = (uid << 24) + 0x18; + fec = __genwqe_readq(cd, fec_addr); + + dev_err(&pci_dev->dev, "* 0x%08x 0x%016llx\n", fec_addr, fec); + if (fec == IO_ILLEGAL_VALUE) + goto fatal_error; + + for (j = 0, mask = 1ULL; j < 64; j++, mask <<= 1) { + + /* secondary fir empty, skip it */ + if ((fir & mask) == 0x0) + continue; + + sfir_addr = (uid << 24) + 0x100 + 0x08 * j; + sfir = __genwqe_readq(cd, sfir_addr); + + if (sfir == IO_ILLEGAL_VALUE) + goto fatal_error; + dev_err(&pci_dev->dev, + "* 0x%08x 0x%016llx\n", sfir_addr, sfir); + + sfec_addr = (uid << 24) + 0x300 + 0x08 * j; + sfec = __genwqe_readq(cd, sfec_addr); + + if (sfec == IO_ILLEGAL_VALUE) + goto fatal_error; + dev_err(&pci_dev->dev, + "* 0x%08x 0x%016llx\n", sfec_addr, sfec); + + gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR); + if (gfir == IO_ILLEGAL_VALUE) + goto fatal_error; + + /* gfir turned on during routine! get out and + start over. */ + if ((gfir_masked == 0x0) && + (gfir & GFIR_ERR_TRIGGER)) { + goto healthMonitor; + } + + /* do not clear if we entered with a fatal gfir */ + if (gfir_masked == 0x0) { + + /* NEW clear by mask the logged bits */ + sfir_addr = (uid << 24) + 0x100 + 0x08 * j; + __genwqe_writeq(cd, sfir_addr, sfir); + + dev_dbg(&pci_dev->dev, + "[HM] Clearing 2ndary FIR 0x%08x with 0x%016llx\n", + sfir_addr, sfir); + + /* + * note, these cannot be error-Firs + * since gfir_masked is 0 after sfir + * was read. Also, it is safe to do + * this write if sfir=0. Still need to + * clear the primary. This just means + * there is no secondary FIR. + */ + + /* clear by mask the logged bit. */ + fir_clr_addr = (uid << 24) + 0x10; + __genwqe_writeq(cd, fir_clr_addr, mask); + + dev_dbg(&pci_dev->dev, + "[HM] Clearing primary FIR 0x%08x with 0x%016llx\n", + fir_clr_addr, mask); + } + } + } + gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR); + if (gfir == IO_ILLEGAL_VALUE) + goto fatal_error; + + if ((gfir_masked == 0x0) && (gfir & GFIR_ERR_TRIGGER)) { + /* + * Check once more that it didn't go on after all the + * FIRS were cleared. + */ + dev_dbg(&pci_dev->dev, "ACK! Another FIR! Recursing %d!\n", + iterations); + goto healthMonitor; + } + return gfir_masked; + + fatal_error: + return IO_ILLEGAL_VALUE; +} + +/** + * genwqe_pci_fundamental_reset() - trigger a PCIe fundamental reset on the slot + * @pci_dev: PCI device information struct + * + * Note: pci_set_pcie_reset_state() is not implemented on all archs, so this + * reset method will not work in all cases. + * + * Return: 0 on success or error code from pci_set_pcie_reset_state() + */ +static int genwqe_pci_fundamental_reset(struct pci_dev *pci_dev) +{ + int rc; + + /* + * lock pci config space access from userspace, + * save state and issue PCIe fundamental reset + */ + pci_cfg_access_lock(pci_dev); + pci_save_state(pci_dev); + rc = pci_set_pcie_reset_state(pci_dev, pcie_warm_reset); + if (!rc) { + /* keep PCIe reset asserted for 250ms */ + msleep(250); + pci_set_pcie_reset_state(pci_dev, pcie_deassert_reset); + /* Wait for 2s to reload flash and train the link */ + msleep(2000); + } + pci_restore_state(pci_dev); + pci_cfg_access_unlock(pci_dev); + return rc; +} + + +static int genwqe_platform_recovery(struct genwqe_dev *cd) +{ + struct pci_dev *pci_dev = cd->pci_dev; + int rc; + + dev_info(&pci_dev->dev, + "[%s] resetting card for error recovery\n", __func__); + + /* Clear out error injection flags */ + cd->err_inject &= ~(GENWQE_INJECT_HARDWARE_FAILURE | + GENWQE_INJECT_GFIR_FATAL | + GENWQE_INJECT_GFIR_INFO); + + genwqe_stop(cd); + + /* Try recoverying the card with fundamental reset */ + rc = genwqe_pci_fundamental_reset(pci_dev); + if (!rc) { + rc = genwqe_start(cd); + if (!rc) + dev_info(&pci_dev->dev, + "[%s] card recovered\n", __func__); + else + dev_err(&pci_dev->dev, + "[%s] err: cannot start card services! (err=%d)\n", + __func__, rc); + } else { + dev_err(&pci_dev->dev, + "[%s] card reset failed\n", __func__); + } + + return rc; +} + +/** + * genwqe_reload_bistream() - reload card bitstream + * @cd: GenWQE device information + * + * Set the appropriate register and call fundamental reset to reaload the card + * bitstream. + * + * Return: 0 on success, error code otherwise + */ +static int genwqe_reload_bistream(struct genwqe_dev *cd) +{ + struct pci_dev *pci_dev = cd->pci_dev; + int rc; + + dev_info(&pci_dev->dev, + "[%s] resetting card for bitstream reload\n", + __func__); + + genwqe_stop(cd); + + /* + * Cause a CPLD reprogram with the 'next_bitstream' + * partition on PCIe hot or fundamental reset + */ + __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, + (cd->softreset & 0xcull) | 0x70ull); + + rc = genwqe_pci_fundamental_reset(pci_dev); + if (rc) { + /* + * A fundamental reset failure can be caused + * by lack of support on the arch, so we just + * log the error and try to start the card + * again. + */ + dev_err(&pci_dev->dev, + "[%s] err: failed to reset card for bitstream reload\n", + __func__); + } + + rc = genwqe_start(cd); + if (rc) { + dev_err(&pci_dev->dev, + "[%s] err: cannot start card services! (err=%d)\n", + __func__, rc); + return rc; + } + dev_info(&pci_dev->dev, + "[%s] card reloaded\n", __func__); + return 0; +} + + +/** + * genwqe_health_thread() - Health checking thread + * @data: GenWQE device information + * + * This thread is only started for the PF of the card. + * + * This thread monitors the health of the card. A critical situation + * is when we read registers which contain -1 (IO_ILLEGAL_VALUE). In + * this case we need to be recovered from outside. Writing to + * registers will very likely not work either. + * + * This thread must only exit if kthread_should_stop() becomes true. + * + * Condition for the health-thread to trigger: + * a) when a kthread_stop() request comes in or + * b) a critical GFIR occured + * + * Informational GFIRs are checked and potentially printed in + * GENWQE_HEALTH_CHECK_INTERVAL seconds. + */ +static int genwqe_health_thread(void *data) +{ + int rc, should_stop = 0; + struct genwqe_dev *cd = data; + struct pci_dev *pci_dev = cd->pci_dev; + u64 gfir, gfir_masked, slu_unitcfg, app_unitcfg; + + health_thread_begin: + while (!kthread_should_stop()) { + rc = wait_event_interruptible_timeout(cd->health_waitq, + (genwqe_health_check_cond(cd, &gfir) || + (should_stop = kthread_should_stop())), + GENWQE_HEALTH_CHECK_INTERVAL * HZ); + + if (should_stop) + break; + + if (gfir == IO_ILLEGAL_VALUE) { + dev_err(&pci_dev->dev, + "[%s] GFIR=%016llx\n", __func__, gfir); + goto fatal_error; + } + + slu_unitcfg = __genwqe_readq(cd, IO_SLU_UNITCFG); + if (slu_unitcfg == IO_ILLEGAL_VALUE) { + dev_err(&pci_dev->dev, + "[%s] SLU_UNITCFG=%016llx\n", + __func__, slu_unitcfg); + goto fatal_error; + } + + app_unitcfg = __genwqe_readq(cd, IO_APP_UNITCFG); + if (app_unitcfg == IO_ILLEGAL_VALUE) { + dev_err(&pci_dev->dev, + "[%s] APP_UNITCFG=%016llx\n", + __func__, app_unitcfg); + goto fatal_error; + } + + gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR); + if (gfir == IO_ILLEGAL_VALUE) { + dev_err(&pci_dev->dev, + "[%s] %s: GFIR=%016llx\n", __func__, + (gfir & GFIR_ERR_TRIGGER) ? "err" : "info", + gfir); + goto fatal_error; + } + + gfir_masked = genwqe_fir_checking(cd); + if (gfir_masked == IO_ILLEGAL_VALUE) + goto fatal_error; + + /* + * GFIR ErrorTrigger bits set => reset the card! + * Never do this for old/manufacturing images! + */ + if ((gfir_masked) && !cd->skip_recovery && + genwqe_recovery_on_fatal_gfir_required(cd)) { + + cd->card_state = GENWQE_CARD_FATAL_ERROR; + + rc = genwqe_recover_card(cd, 0); + if (rc < 0) { + /* FIXME Card is unusable and needs unbind! */ + goto fatal_error; + } + } + + if (cd->card_state == GENWQE_CARD_RELOAD_BITSTREAM) { + /* Userspace requested card bitstream reload */ + rc = genwqe_reload_bistream(cd); + if (rc) + goto fatal_error; + } + + cd->last_gfir = gfir; + cond_resched(); + } + + return 0; + + fatal_error: + if (cd->use_platform_recovery) { + /* + * Since we use raw accessors, EEH errors won't be detected + * by the platform until we do a non-raw MMIO or config space + * read + */ + readq(cd->mmio + IO_SLC_CFGREG_GFIR); + + /* We do nothing if the card is going over PCI recovery */ + if (pci_channel_offline(pci_dev)) + return -EIO; + + /* + * If it's supported by the platform, we try a fundamental reset + * to recover from a fatal error. Otherwise, we continue to wait + * for an external recovery procedure to take care of it. + */ + rc = genwqe_platform_recovery(cd); + if (!rc) + goto health_thread_begin; + } + + dev_err(&pci_dev->dev, + "[%s] card unusable. Please trigger unbind!\n", __func__); + + /* Bring down logical devices to inform user space via udev remove. */ + cd->card_state = GENWQE_CARD_FATAL_ERROR; + genwqe_stop(cd); + + /* genwqe_bus_reset failed(). Now wait for genwqe_remove(). */ + while (!kthread_should_stop()) + cond_resched(); + + return -EIO; +} + +static int genwqe_health_check_start(struct genwqe_dev *cd) +{ + int rc; + + if (GENWQE_HEALTH_CHECK_INTERVAL <= 0) + return 0; /* valid for disabling the service */ + + /* moved before request_irq() */ + /* init_waitqueue_head(&cd->health_waitq); */ + + cd->health_thread = kthread_run(genwqe_health_thread, cd, + GENWQE_DEVNAME "%d_health", + cd->card_idx); + if (IS_ERR(cd->health_thread)) { + rc = PTR_ERR(cd->health_thread); + cd->health_thread = NULL; + return rc; + } + return 0; +} + +static int genwqe_health_thread_running(struct genwqe_dev *cd) +{ + return cd->health_thread != NULL; +} + +static int genwqe_health_check_stop(struct genwqe_dev *cd) +{ + if (!genwqe_health_thread_running(cd)) + return -EIO; + + kthread_stop(cd->health_thread); + cd->health_thread = NULL; + return 0; +} + +/** + * genwqe_pci_setup() - Allocate PCIe related resources for our card + * @cd: GenWQE device information + */ +static int genwqe_pci_setup(struct genwqe_dev *cd) +{ + int err; + struct pci_dev *pci_dev = cd->pci_dev; + + err = pci_enable_device_mem(pci_dev); + if (err) { + dev_err(&pci_dev->dev, + "err: failed to enable pci memory (err=%d)\n", err); + goto err_out; + } + + /* Reserve PCI I/O and memory resources */ + err = pci_request_mem_regions(pci_dev, genwqe_driver_name); + if (err) { + dev_err(&pci_dev->dev, + "[%s] err: request bars failed (%d)\n", __func__, err); + err = -EIO; + goto err_disable_device; + } + + /* check for 64-bit DMA address supported (DAC) */ + if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64))) { + err = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pci_dev->dev, + "err: DMA64 consistent mask error\n"); + err = -EIO; + goto out_release_resources; + } + /* check for 32-bit DMA address supported (SAC) */ + } else if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) { + err = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pci_dev->dev, + "err: DMA32 consistent mask error\n"); + err = -EIO; + goto out_release_resources; + } + } else { + dev_err(&pci_dev->dev, + "err: neither DMA32 nor DMA64 supported\n"); + err = -EIO; + goto out_release_resources; + } + + pci_set_master(pci_dev); + pci_enable_pcie_error_reporting(pci_dev); + + /* EEH recovery requires PCIe fundamental reset */ + pci_dev->needs_freset = 1; + + /* request complete BAR-0 space (length = 0) */ + cd->mmio_len = pci_resource_len(pci_dev, 0); + cd->mmio = pci_iomap(pci_dev, 0, 0); + if (cd->mmio == NULL) { + dev_err(&pci_dev->dev, + "[%s] err: mapping BAR0 failed\n", __func__); + err = -ENOMEM; + goto out_release_resources; + } + + cd->num_vfs = pci_sriov_get_totalvfs(pci_dev); + if (cd->num_vfs < 0) + cd->num_vfs = 0; + + err = genwqe_read_ids(cd); + if (err) + goto out_iounmap; + + return 0; + + out_iounmap: + pci_iounmap(pci_dev, cd->mmio); + out_release_resources: + pci_release_mem_regions(pci_dev); + err_disable_device: + pci_disable_device(pci_dev); + err_out: + return err; +} + +/** + * genwqe_pci_remove() - Free PCIe related resources for our card + * @cd: GenWQE device information + */ +static void genwqe_pci_remove(struct genwqe_dev *cd) +{ + struct pci_dev *pci_dev = cd->pci_dev; + + if (cd->mmio) + pci_iounmap(pci_dev, cd->mmio); + + pci_release_mem_regions(pci_dev); + pci_disable_device(pci_dev); +} + +/** + * genwqe_probe() - Device initialization + * @pci_dev: PCI device information struct + * @id: PCI device ID + * + * Callable for multiple cards. This function is called on bind. + * + * Return: 0 if succeeded, < 0 when failed + */ +static int genwqe_probe(struct pci_dev *pci_dev, + const struct pci_device_id *id) +{ + int err; + struct genwqe_dev *cd; + + genwqe_init_crc32(); + + cd = genwqe_dev_alloc(); + if (IS_ERR(cd)) { + dev_err(&pci_dev->dev, "err: could not alloc mem (err=%d)!\n", + (int)PTR_ERR(cd)); + return PTR_ERR(cd); + } + + dev_set_drvdata(&pci_dev->dev, cd); + cd->pci_dev = pci_dev; + + err = genwqe_pci_setup(cd); + if (err < 0) { + dev_err(&pci_dev->dev, + "err: problems with PCI setup (err=%d)\n", err); + goto out_free_dev; + } + + err = genwqe_start(cd); + if (err < 0) { + dev_err(&pci_dev->dev, + "err: cannot start card services! (err=%d)\n", err); + goto out_pci_remove; + } + + if (genwqe_is_privileged(cd)) { + err = genwqe_health_check_start(cd); + if (err < 0) { + dev_err(&pci_dev->dev, + "err: cannot start health checking! (err=%d)\n", + err); + goto out_stop_services; + } + } + return 0; + + out_stop_services: + genwqe_stop(cd); + out_pci_remove: + genwqe_pci_remove(cd); + out_free_dev: + genwqe_dev_free(cd); + return err; +} + +/** + * genwqe_remove() - Called when device is removed (hot-plugable) + * @pci_dev: PCI device information struct + * + * Or when driver is unloaded respecitively when unbind is done. + */ +static void genwqe_remove(struct pci_dev *pci_dev) +{ + struct genwqe_dev *cd = dev_get_drvdata(&pci_dev->dev); + + genwqe_health_check_stop(cd); + + /* + * genwqe_stop() must survive if it is called twice + * sequentially. This happens when the health thread calls it + * and fails on genwqe_bus_reset(). + */ + genwqe_stop(cd); + genwqe_pci_remove(cd); + genwqe_dev_free(cd); +} + +/** + * genwqe_err_error_detected() - Error detection callback + * @pci_dev: PCI device information struct + * @state: PCI channel state + * + * This callback is called by the PCI subsystem whenever a PCI bus + * error is detected. + */ +static pci_ers_result_t genwqe_err_error_detected(struct pci_dev *pci_dev, + pci_channel_state_t state) +{ + struct genwqe_dev *cd; + + dev_err(&pci_dev->dev, "[%s] state=%d\n", __func__, state); + + cd = dev_get_drvdata(&pci_dev->dev); + if (cd == NULL) + return PCI_ERS_RESULT_DISCONNECT; + + /* Stop the card */ + genwqe_health_check_stop(cd); + genwqe_stop(cd); + + /* + * On permanent failure, the PCI code will call device remove + * after the return of this function. + * genwqe_stop() can be called twice. + */ + if (state == pci_channel_io_perm_failure) { + return PCI_ERS_RESULT_DISCONNECT; + } else { + genwqe_pci_remove(cd); + return PCI_ERS_RESULT_NEED_RESET; + } +} + +static pci_ers_result_t genwqe_err_slot_reset(struct pci_dev *pci_dev) +{ + int rc; + struct genwqe_dev *cd = dev_get_drvdata(&pci_dev->dev); + + rc = genwqe_pci_setup(cd); + if (!rc) { + return PCI_ERS_RESULT_RECOVERED; + } else { + dev_err(&pci_dev->dev, + "err: problems with PCI setup (err=%d)\n", rc); + return PCI_ERS_RESULT_DISCONNECT; + } +} + +static pci_ers_result_t genwqe_err_result_none(struct pci_dev *dev) +{ + return PCI_ERS_RESULT_NONE; +} + +static void genwqe_err_resume(struct pci_dev *pci_dev) +{ + int rc; + struct genwqe_dev *cd = dev_get_drvdata(&pci_dev->dev); + + rc = genwqe_start(cd); + if (!rc) { + rc = genwqe_health_check_start(cd); + if (rc) + dev_err(&pci_dev->dev, + "err: cannot start health checking! (err=%d)\n", + rc); + } else { + dev_err(&pci_dev->dev, + "err: cannot start card services! (err=%d)\n", rc); + } +} + +static int genwqe_sriov_configure(struct pci_dev *dev, int numvfs) +{ + int rc; + struct genwqe_dev *cd = dev_get_drvdata(&dev->dev); + + if (numvfs > 0) { + genwqe_setup_vf_jtimer(cd); + rc = pci_enable_sriov(dev, numvfs); + if (rc < 0) + return rc; + return numvfs; + } + if (numvfs == 0) { + pci_disable_sriov(dev); + return 0; + } + return 0; +} + +static const struct pci_error_handlers genwqe_err_handler = { + .error_detected = genwqe_err_error_detected, + .mmio_enabled = genwqe_err_result_none, + .slot_reset = genwqe_err_slot_reset, + .resume = genwqe_err_resume, +}; + +static struct pci_driver genwqe_driver = { + .name = genwqe_driver_name, + .id_table = genwqe_device_table, + .probe = genwqe_probe, + .remove = genwqe_remove, + .sriov_configure = genwqe_sriov_configure, + .err_handler = &genwqe_err_handler, +}; + +/** + * genwqe_devnode() - Set default access mode for genwqe devices. + * @dev: Pointer to device (unused) + * @mode: Carrier to pass-back given mode (permissions) + * + * Default mode should be rw for everybody. Do not change default + * device name. + */ +static char *genwqe_devnode(struct device *dev, umode_t *mode) +{ + if (mode) + *mode = 0666; + return NULL; +} + +/** + * genwqe_init_module() - Driver registration and initialization + */ +static int __init genwqe_init_module(void) +{ + int rc; + + class_genwqe = class_create(THIS_MODULE, GENWQE_DEVNAME); + if (IS_ERR(class_genwqe)) { + pr_err("[%s] create class failed\n", __func__); + return -ENOMEM; + } + + class_genwqe->devnode = genwqe_devnode; + + debugfs_genwqe = debugfs_create_dir(GENWQE_DEVNAME, NULL); + + rc = pci_register_driver(&genwqe_driver); + if (rc != 0) { + pr_err("[%s] pci_reg_driver (rc=%d)\n", __func__, rc); + goto err_out0; + } + + return rc; + + err_out0: + debugfs_remove(debugfs_genwqe); + class_destroy(class_genwqe); + return rc; +} + +/** + * genwqe_exit_module() - Driver exit + */ +static void __exit genwqe_exit_module(void) +{ + pci_unregister_driver(&genwqe_driver); + debugfs_remove(debugfs_genwqe); + class_destroy(class_genwqe); +} + +module_init(genwqe_init_module); +module_exit(genwqe_exit_module); diff --git a/drivers/misc/genwqe/card_base.h b/drivers/misc/genwqe/card_base.h new file mode 100644 index 000000000..0e902977d --- /dev/null +++ b/drivers/misc/genwqe/card_base.h @@ -0,0 +1,577 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __CARD_BASE_H__ +#define __CARD_BASE_H__ + +/** + * IBM Accelerator Family 'GenWQE' + * + * (C) Copyright IBM Corp. 2013 + * + * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> + * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> + * Author: Michael Jung <mijung@gmx.net> + * Author: Michael Ruettger <michael@ibmra.de> + */ + +/* + * Interfaces within the GenWQE module. Defines genwqe_card and + * ddcb_queue as well as ddcb_requ. + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/cdev.h> +#include <linux/stringify.h> +#include <linux/pci.h> +#include <linux/semaphore.h> +#include <linux/uaccess.h> +#include <linux/io.h> +#include <linux/debugfs.h> +#include <linux/slab.h> + +#include <linux/genwqe/genwqe_card.h> +#include "genwqe_driver.h" + +#define GENWQE_MSI_IRQS 4 /* Just one supported, no MSIx */ + +#define GENWQE_MAX_VFS 15 /* maximum 15 VFs are possible */ +#define GENWQE_MAX_FUNCS 16 /* 1 PF and 15 VFs */ +#define GENWQE_CARD_NO_MAX (16 * GENWQE_MAX_FUNCS) + +/* Compile parameters, some of them appear in debugfs for later adjustment */ +#define GENWQE_DDCB_MAX 32 /* DDCBs on the work-queue */ +#define GENWQE_POLLING_ENABLED 0 /* in case of irqs not working */ +#define GENWQE_DDCB_SOFTWARE_TIMEOUT 10 /* timeout per DDCB in seconds */ +#define GENWQE_KILL_TIMEOUT 8 /* time until process gets killed */ +#define GENWQE_VF_JOBTIMEOUT_MSEC 250 /* 250 msec */ +#define GENWQE_PF_JOBTIMEOUT_MSEC 8000 /* 8 sec should be ok */ +#define GENWQE_HEALTH_CHECK_INTERVAL 4 /* <= 0: disabled */ + +/* Sysfs attribute groups used when we create the genwqe device */ +extern const struct attribute_group *genwqe_attribute_groups[]; + +/* + * Config space for Genwqe5 A7: + * 00:[14 10 4b 04]40 00 10 00[00 00 00 12]00 00 00 00 + * 10: 0c 00 00 f0 07 3c 00 00 00 00 00 00 00 00 00 00 + * 20: 00 00 00 00 00 00 00 00 00 00 00 00[14 10 4b 04] + * 30: 00 00 00 00 50 00 00 00 00 00 00 00 00 00 00 00 + */ +#define PCI_DEVICE_GENWQE 0x044b /* Genwqe DeviceID */ + +#define PCI_SUBSYSTEM_ID_GENWQE5 0x035f /* Genwqe A5 Subsystem-ID */ +#define PCI_SUBSYSTEM_ID_GENWQE5_NEW 0x044b /* Genwqe A5 Subsystem-ID */ +#define PCI_CLASSCODE_GENWQE5 0x1200 /* UNKNOWN */ + +#define PCI_SUBVENDOR_ID_IBM_SRIOV 0x0000 +#define PCI_SUBSYSTEM_ID_GENWQE5_SRIOV 0x0000 /* Genwqe A5 Subsystem-ID */ +#define PCI_CLASSCODE_GENWQE5_SRIOV 0x1200 /* UNKNOWN */ + +#define GENWQE_SLU_ARCH_REQ 2 /* Required SLU architecture level */ + +/** + * struct genwqe_reg - Genwqe data dump functionality + */ +struct genwqe_reg { + u32 addr; + u32 idx; + u64 val; +}; + +/* + * enum genwqe_dbg_type - Specify chip unit to dump/debug + */ +enum genwqe_dbg_type { + GENWQE_DBG_UNIT0 = 0, /* captured before prev errs cleared */ + GENWQE_DBG_UNIT1 = 1, + GENWQE_DBG_UNIT2 = 2, + GENWQE_DBG_UNIT3 = 3, + GENWQE_DBG_UNIT4 = 4, + GENWQE_DBG_UNIT5 = 5, + GENWQE_DBG_UNIT6 = 6, + GENWQE_DBG_UNIT7 = 7, + GENWQE_DBG_REGS = 8, + GENWQE_DBG_DMA = 9, + GENWQE_DBG_UNITS = 10, /* max number of possible debug units */ +}; + +/* Software error injection to simulate card failures */ +#define GENWQE_INJECT_HARDWARE_FAILURE 0x00000001 /* injects -1 reg reads */ +#define GENWQE_INJECT_BUS_RESET_FAILURE 0x00000002 /* pci_bus_reset fail */ +#define GENWQE_INJECT_GFIR_FATAL 0x00000004 /* GFIR = 0x0000ffff */ +#define GENWQE_INJECT_GFIR_INFO 0x00000008 /* GFIR = 0xffff0000 */ + +/* + * Genwqe card description and management data. + * + * Error-handling in case of card malfunction + * ------------------------------------------ + * + * If the card is detected to be defective the outside environment + * will cause the PCI layer to call deinit (the cleanup function for + * probe). This is the same effect like doing a unbind/bind operation + * on the card. + * + * The genwqe card driver implements a health checking thread which + * verifies the card function. If this detects a problem the cards + * device is being shutdown and restarted again, along with a reset of + * the card and queue. + * + * All functions accessing the card device return either -EIO or -ENODEV + * code to indicate the malfunction to the user. The user has to close + * the file descriptor and open a new one, once the card becomes + * available again. + * + * If the open file descriptor is setup to receive SIGIO, the signal is + * genereated for the application which has to provide a handler to + * react on it. If the application does not close the open + * file descriptor a SIGKILL is send to enforce freeing the cards + * resources. + * + * I did not find a different way to prevent kernel problems due to + * reference counters for the cards character devices getting out of + * sync. The character device deallocation does not block, even if + * there is still an open file descriptor pending. If this pending + * descriptor is closed, the data structures used by the character + * device is reinstantiated, which will lead to the reference counter + * dropping below the allowed values. + * + * Card recovery + * ------------- + * + * To test the internal driver recovery the following command can be used: + * sudo sh -c 'echo 0xfffff > /sys/class/genwqe/genwqe0_card/err_inject' + */ + + +/** + * struct dma_mapping_type - Mapping type definition + * + * To avoid memcpying data arround we use user memory directly. To do + * this we need to pin/swap-in the memory and request a DMA address + * for it. + */ +enum dma_mapping_type { + GENWQE_MAPPING_RAW = 0, /* contignous memory buffer */ + GENWQE_MAPPING_SGL_TEMP, /* sglist dynamically used */ + GENWQE_MAPPING_SGL_PINNED, /* sglist used with pinning */ +}; + +/** + * struct dma_mapping - Information about memory mappings done by the driver + */ +struct dma_mapping { + enum dma_mapping_type type; + + void *u_vaddr; /* user-space vaddr/non-aligned */ + void *k_vaddr; /* kernel-space vaddr/non-aligned */ + dma_addr_t dma_addr; /* physical DMA address */ + + struct page **page_list; /* list of pages used by user buff */ + dma_addr_t *dma_list; /* list of dma addresses per page */ + unsigned int nr_pages; /* number of pages */ + unsigned int size; /* size in bytes */ + + struct list_head card_list; /* list of usr_maps for card */ + struct list_head pin_list; /* list of pinned memory for dev */ + int write; /* writable map? useful in unmapping */ +}; + +static inline void genwqe_mapping_init(struct dma_mapping *m, + enum dma_mapping_type type) +{ + memset(m, 0, sizeof(*m)); + m->type = type; + m->write = 1; /* Assume the maps we create are R/W */ +} + +/** + * struct ddcb_queue - DDCB queue data + * @ddcb_max: Number of DDCBs on the queue + * @ddcb_next: Next free DDCB + * @ddcb_act: Next DDCB supposed to finish + * @ddcb_seq: Sequence number of last DDCB + * @ddcbs_in_flight: Currently enqueued DDCBs + * @ddcbs_completed: Number of already completed DDCBs + * @return_on_busy: Number of -EBUSY returns on full queue + * @wait_on_busy: Number of waits on full queue + * @ddcb_daddr: DMA address of first DDCB in the queue + * @ddcb_vaddr: Kernel virtual address of first DDCB in the queue + * @ddcb_req: Associated requests (one per DDCB) + * @ddcb_waitqs: Associated wait queues (one per DDCB) + * @ddcb_lock: Lock to protect queuing operations + * @ddcb_waitq: Wait on next DDCB finishing + */ + +struct ddcb_queue { + int ddcb_max; /* amount of DDCBs */ + int ddcb_next; /* next available DDCB num */ + int ddcb_act; /* DDCB to be processed */ + u16 ddcb_seq; /* slc seq num */ + unsigned int ddcbs_in_flight; /* number of ddcbs in processing */ + unsigned int ddcbs_completed; + unsigned int ddcbs_max_in_flight; + unsigned int return_on_busy; /* how many times -EBUSY? */ + unsigned int wait_on_busy; + + dma_addr_t ddcb_daddr; /* DMA address */ + struct ddcb *ddcb_vaddr; /* kernel virtual addr for DDCBs */ + struct ddcb_requ **ddcb_req; /* ddcb processing parameter */ + wait_queue_head_t *ddcb_waitqs; /* waitqueue per ddcb */ + + spinlock_t ddcb_lock; /* exclusive access to queue */ + wait_queue_head_t busy_waitq; /* wait for ddcb processing */ + + /* registers or the respective queue to be used */ + u32 IO_QUEUE_CONFIG; + u32 IO_QUEUE_STATUS; + u32 IO_QUEUE_SEGMENT; + u32 IO_QUEUE_INITSQN; + u32 IO_QUEUE_WRAP; + u32 IO_QUEUE_OFFSET; + u32 IO_QUEUE_WTIME; + u32 IO_QUEUE_ERRCNTS; + u32 IO_QUEUE_LRW; +}; + +/* + * GFIR, SLU_UNITCFG, APP_UNITCFG + * 8 Units with FIR/FEC + 64 * 2ndary FIRS/FEC. + */ +#define GENWQE_FFDC_REGS (3 + (8 * (2 + 2 * 64))) + +struct genwqe_ffdc { + unsigned int entries; + struct genwqe_reg *regs; +}; + +/** + * struct genwqe_dev - GenWQE device information + * @card_state: Card operation state, see above + * @ffdc: First Failure Data Capture buffers for each unit + * @card_thread: Working thread to operate the DDCB queue + * @card_waitq: Wait queue used in card_thread + * @queue: DDCB queue + * @health_thread: Card monitoring thread (only for PFs) + * @health_waitq: Wait queue used in health_thread + * @pci_dev: Associated PCI device (function) + * @mmio: Base address of 64-bit register space + * @mmio_len: Length of register area + * @file_lock: Lock to protect access to file_list + * @file_list: List of all processes with open GenWQE file descriptors + * + * This struct contains all information needed to communicate with a + * GenWQE card. It is initialized when a GenWQE device is found and + * destroyed when it goes away. It holds data to maintain the queue as + * well as data needed to feed the user interfaces. + */ +struct genwqe_dev { + enum genwqe_card_state card_state; + spinlock_t print_lock; + + int card_idx; /* card index 0..CARD_NO_MAX-1 */ + u64 flags; /* general flags */ + + /* FFDC data gathering */ + struct genwqe_ffdc ffdc[GENWQE_DBG_UNITS]; + + /* DDCB workqueue */ + struct task_struct *card_thread; + wait_queue_head_t queue_waitq; + struct ddcb_queue queue; /* genwqe DDCB queue */ + unsigned int irqs_processed; + + /* Card health checking thread */ + struct task_struct *health_thread; + wait_queue_head_t health_waitq; + + int use_platform_recovery; /* use platform recovery mechanisms */ + + /* char device */ + dev_t devnum_genwqe; /* major/minor num card */ + struct class *class_genwqe; /* reference to class object */ + struct device *dev; /* for device creation */ + struct cdev cdev_genwqe; /* char device for card */ + + struct dentry *debugfs_root; /* debugfs card root directory */ + struct dentry *debugfs_genwqe; /* debugfs driver root directory */ + + /* pci resources */ + struct pci_dev *pci_dev; /* PCI device */ + void __iomem *mmio; /* BAR-0 MMIO start */ + unsigned long mmio_len; + int num_vfs; + u32 vf_jobtimeout_msec[GENWQE_MAX_VFS]; + int is_privileged; /* access to all regs possible */ + + /* config regs which we need often */ + u64 slu_unitcfg; + u64 app_unitcfg; + u64 softreset; + u64 err_inject; + u64 last_gfir; + char app_name[5]; + + spinlock_t file_lock; /* lock for open files */ + struct list_head file_list; /* list of open files */ + + /* debugfs parameters */ + int ddcb_software_timeout; /* wait until DDCB times out */ + int skip_recovery; /* circumvention if recovery fails */ + int kill_timeout; /* wait after sending SIGKILL */ +}; + +/** + * enum genwqe_requ_state - State of a DDCB execution request + */ +enum genwqe_requ_state { + GENWQE_REQU_NEW = 0, + GENWQE_REQU_ENQUEUED = 1, + GENWQE_REQU_TAPPED = 2, + GENWQE_REQU_FINISHED = 3, + GENWQE_REQU_STATE_MAX, +}; + +/** + * struct genwqe_sgl - Scatter gather list describing user-space memory + * @sgl: scatter gather list needs to be 128 byte aligned + * @sgl_dma_addr: dma address of sgl + * @sgl_size: size of area used for sgl + * @user_addr: user-space address of memory area + * @user_size: size of user-space memory area + * @page: buffer for partial pages if needed + * @page_dma_addr: dma address partial pages + * @write: should we write it back to userspace? + */ +struct genwqe_sgl { + dma_addr_t sgl_dma_addr; + struct sg_entry *sgl; + size_t sgl_size; /* size of sgl */ + + void __user *user_addr; /* user-space base-address */ + size_t user_size; /* size of memory area */ + + int write; + + unsigned long nr_pages; + unsigned long fpage_offs; + size_t fpage_size; + size_t lpage_size; + + void *fpage; + dma_addr_t fpage_dma_addr; + + void *lpage; + dma_addr_t lpage_dma_addr; +}; + +int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, + void __user *user_addr, size_t user_size, int write); + +int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, + dma_addr_t *dma_list); + +int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl); + +/** + * struct ddcb_requ - Kernel internal representation of the DDCB request + * @cmd: User space representation of the DDCB execution request + */ +struct ddcb_requ { + /* kernel specific content */ + enum genwqe_requ_state req_state; /* request status */ + int num; /* ddcb_no for this request */ + struct ddcb_queue *queue; /* associated queue */ + + struct dma_mapping dma_mappings[DDCB_FIXUPS]; + struct genwqe_sgl sgls[DDCB_FIXUPS]; + + /* kernel/user shared content */ + struct genwqe_ddcb_cmd cmd; /* ddcb_no for this request */ + struct genwqe_debug_data debug_data; +}; + +/** + * struct genwqe_file - Information for open GenWQE devices + */ +struct genwqe_file { + struct genwqe_dev *cd; + struct genwqe_driver *client; + struct file *filp; + + struct fasync_struct *async_queue; + struct pid *opener; + struct list_head list; /* entry in list of open files */ + + spinlock_t map_lock; /* lock for dma_mappings */ + struct list_head map_list; /* list of dma_mappings */ + + spinlock_t pin_lock; /* lock for pinned memory */ + struct list_head pin_list; /* list of pinned memory */ +}; + +int genwqe_setup_service_layer(struct genwqe_dev *cd); /* for PF only */ +int genwqe_finish_queue(struct genwqe_dev *cd); +int genwqe_release_service_layer(struct genwqe_dev *cd); + +/** + * genwqe_get_slu_id() - Read Service Layer Unit Id + * Return: 0x00: Development code + * 0x01: SLC1 (old) + * 0x02: SLC2 (sept2012) + * 0x03: SLC2 (feb2013, generic driver) + */ +static inline int genwqe_get_slu_id(struct genwqe_dev *cd) +{ + return (int)((cd->slu_unitcfg >> 32) & 0xff); +} + +int genwqe_ddcbs_in_flight(struct genwqe_dev *cd); + +u8 genwqe_card_type(struct genwqe_dev *cd); +int genwqe_card_reset(struct genwqe_dev *cd); +int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count); +void genwqe_reset_interrupt_capability(struct genwqe_dev *cd); + +int genwqe_device_create(struct genwqe_dev *cd); +int genwqe_device_remove(struct genwqe_dev *cd); + +/* debugfs */ +void genwqe_init_debugfs(struct genwqe_dev *cd); +void genqwe_exit_debugfs(struct genwqe_dev *cd); + +int genwqe_read_softreset(struct genwqe_dev *cd); + +/* Hardware Circumventions */ +int genwqe_recovery_on_fatal_gfir_required(struct genwqe_dev *cd); +int genwqe_flash_readback_fails(struct genwqe_dev *cd); + +/** + * genwqe_write_vreg() - Write register in VF window + * @cd: genwqe device + * @reg: register address + * @val: value to write + * @func: 0: PF, 1: VF0, ..., 15: VF14 + */ +int genwqe_write_vreg(struct genwqe_dev *cd, u32 reg, u64 val, int func); + +/** + * genwqe_read_vreg() - Read register in VF window + * @cd: genwqe device + * @reg: register address + * @func: 0: PF, 1: VF0, ..., 15: VF14 + * + * Return: content of the register + */ +u64 genwqe_read_vreg(struct genwqe_dev *cd, u32 reg, int func); + +/* FFDC Buffer Management */ +int genwqe_ffdc_buff_size(struct genwqe_dev *cd, int unit_id); +int genwqe_ffdc_buff_read(struct genwqe_dev *cd, int unit_id, + struct genwqe_reg *regs, unsigned int max_regs); +int genwqe_read_ffdc_regs(struct genwqe_dev *cd, struct genwqe_reg *regs, + unsigned int max_regs, int all); +int genwqe_ffdc_dump_dma(struct genwqe_dev *cd, + struct genwqe_reg *regs, unsigned int max_regs); + +int genwqe_init_debug_data(struct genwqe_dev *cd, + struct genwqe_debug_data *d); + +void genwqe_init_crc32(void); +int genwqe_read_app_id(struct genwqe_dev *cd, char *app_name, int len); + +/* Memory allocation/deallocation; dma address handling */ +int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, + void *uaddr, unsigned long size); + +int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m); + +static inline bool dma_mapping_used(struct dma_mapping *m) +{ + if (!m) + return false; + return m->size != 0; +} + +/** + * __genwqe_execute_ddcb() - Execute DDCB request with addr translation + * + * This function will do the address translation changes to the DDCBs + * according to the definitions required by the ATS field. It looks up + * the memory allocation buffer or does vmap/vunmap for the respective + * user-space buffers, inclusive page pinning and scatter gather list + * buildup and teardown. + */ +int __genwqe_execute_ddcb(struct genwqe_dev *cd, + struct genwqe_ddcb_cmd *cmd, unsigned int f_flags); + +/** + * __genwqe_execute_raw_ddcb() - Execute DDCB request without addr translation + * + * This version will not do address translation or any modification of + * the DDCB data. It is used e.g. for the MoveFlash DDCB which is + * entirely prepared by the driver itself. That means the appropriate + * DMA addresses are already in the DDCB and do not need any + * modification. + */ +int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd, + struct genwqe_ddcb_cmd *cmd, + unsigned int f_flags); +int __genwqe_enqueue_ddcb(struct genwqe_dev *cd, + struct ddcb_requ *req, + unsigned int f_flags); + +int __genwqe_wait_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req); +int __genwqe_purge_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req); + +/* register access */ +int __genwqe_writeq(struct genwqe_dev *cd, u64 byte_offs, u64 val); +u64 __genwqe_readq(struct genwqe_dev *cd, u64 byte_offs); +int __genwqe_writel(struct genwqe_dev *cd, u64 byte_offs, u32 val); +u32 __genwqe_readl(struct genwqe_dev *cd, u64 byte_offs); + +void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size, + dma_addr_t *dma_handle); +void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size, + void *vaddr, dma_addr_t dma_handle); + +/* Base clock frequency in MHz */ +int genwqe_base_clock_frequency(struct genwqe_dev *cd); + +/* Before FFDC is captured the traps should be stopped. */ +void genwqe_stop_traps(struct genwqe_dev *cd); +void genwqe_start_traps(struct genwqe_dev *cd); + +/* Hardware circumvention */ +bool genwqe_need_err_masking(struct genwqe_dev *cd); + +/** + * genwqe_is_privileged() - Determine operation mode for PCI function + * + * On Intel with SRIOV support we see: + * PF: is_physfn = 1 is_virtfn = 0 + * VF: is_physfn = 0 is_virtfn = 1 + * + * On Systems with no SRIOV support _and_ virtualized systems we get: + * is_physfn = 0 is_virtfn = 0 + * + * Other vendors have individual pci device ids to distinguish between + * virtual function drivers and physical function drivers. GenWQE + * unfortunately has just on pci device id for both, VFs and PF. + * + * The following code is used to distinguish if the card is running in + * privileged mode, either as true PF or in a virtualized system with + * full register access e.g. currently on PowerPC. + * + * if (pci_dev->is_virtfn) + * cd->is_privileged = 0; + * else + * cd->is_privileged = (__genwqe_readq(cd, IO_SLU_BITSTREAM) + * != IO_ILLEGAL_VALUE); + */ +static inline int genwqe_is_privileged(struct genwqe_dev *cd) +{ + return cd->is_privileged; +} + +#endif /* __CARD_BASE_H__ */ diff --git a/drivers/misc/genwqe/card_ddcb.c b/drivers/misc/genwqe/card_ddcb.c new file mode 100644 index 000000000..0db4000de --- /dev/null +++ b/drivers/misc/genwqe/card_ddcb.c @@ -0,0 +1,1411 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * IBM Accelerator Family 'GenWQE' + * + * (C) Copyright IBM Corp. 2013 + * + * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> + * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> + * Author: Michael Jung <mijung@gmx.net> + * Author: Michael Ruettger <michael@ibmra.de> + */ + +/* + * Device Driver Control Block (DDCB) queue support. Definition of + * interrupt handlers for queue support as well as triggering the + * health monitor code in case of problems. The current hardware uses + * an MSI interrupt which is shared between error handling and + * functional code. + */ + +#include <linux/types.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/pci.h> +#include <linux/string.h> +#include <linux/dma-mapping.h> +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/crc-itu-t.h> + +#include "card_base.h" +#include "card_ddcb.h" + +/* + * N: next DDCB, this is where the next DDCB will be put. + * A: active DDCB, this is where the code will look for the next completion. + * x: DDCB is enqueued, we are waiting for its completion. + + * Situation (1): Empty queue + * +---+---+---+---+---+---+---+---+ + * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | + * | | | | | | | | | + * +---+---+---+---+---+---+---+---+ + * A/N + * enqueued_ddcbs = A - N = 2 - 2 = 0 + * + * Situation (2): Wrapped, N > A + * +---+---+---+---+---+---+---+---+ + * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | + * | | | x | x | | | | | + * +---+---+---+---+---+---+---+---+ + * A N + * enqueued_ddcbs = N - A = 4 - 2 = 2 + * + * Situation (3): Queue wrapped, A > N + * +---+---+---+---+---+---+---+---+ + * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | + * | x | x | | | x | x | x | x | + * +---+---+---+---+---+---+---+---+ + * N A + * enqueued_ddcbs = queue_max - (A - N) = 8 - (4 - 2) = 6 + * + * Situation (4a): Queue full N > A + * +---+---+---+---+---+---+---+---+ + * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | + * | x | x | x | x | x | x | x | | + * +---+---+---+---+---+---+---+---+ + * A N + * + * enqueued_ddcbs = N - A = 7 - 0 = 7 + * + * Situation (4a): Queue full A > N + * +---+---+---+---+---+---+---+---+ + * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | + * | x | x | x | | x | x | x | x | + * +---+---+---+---+---+---+---+---+ + * N A + * enqueued_ddcbs = queue_max - (A - N) = 8 - (4 - 3) = 7 + */ + +static int queue_empty(struct ddcb_queue *queue) +{ + return queue->ddcb_next == queue->ddcb_act; +} + +static int queue_enqueued_ddcbs(struct ddcb_queue *queue) +{ + if (queue->ddcb_next >= queue->ddcb_act) + return queue->ddcb_next - queue->ddcb_act; + + return queue->ddcb_max - (queue->ddcb_act - queue->ddcb_next); +} + +static int queue_free_ddcbs(struct ddcb_queue *queue) +{ + int free_ddcbs = queue->ddcb_max - queue_enqueued_ddcbs(queue) - 1; + + if (WARN_ON_ONCE(free_ddcbs < 0)) { /* must never ever happen! */ + return 0; + } + return free_ddcbs; +} + +/* + * Use of the PRIV field in the DDCB for queue debugging: + * + * (1) Trying to get rid of a DDCB which saw a timeout: + * pddcb->priv[6] = 0xcc; # cleared + * + * (2) Append a DDCB via NEXT bit: + * pddcb->priv[7] = 0xaa; # appended + * + * (3) DDCB needed tapping: + * pddcb->priv[7] = 0xbb; # tapped + * + * (4) DDCB marked as correctly finished: + * pddcb->priv[6] = 0xff; # finished + */ + +static inline void ddcb_mark_tapped(struct ddcb *pddcb) +{ + pddcb->priv[7] = 0xbb; /* tapped */ +} + +static inline void ddcb_mark_appended(struct ddcb *pddcb) +{ + pddcb->priv[7] = 0xaa; /* appended */ +} + +static inline void ddcb_mark_cleared(struct ddcb *pddcb) +{ + pddcb->priv[6] = 0xcc; /* cleared */ +} + +static inline void ddcb_mark_finished(struct ddcb *pddcb) +{ + pddcb->priv[6] = 0xff; /* finished */ +} + +static inline void ddcb_mark_unused(struct ddcb *pddcb) +{ + pddcb->priv_64 = cpu_to_be64(0); /* not tapped */ +} + +/** + * genwqe_crc16() - Generate 16-bit crc as required for DDCBs + * @buff: pointer to data buffer + * @len: length of data for calculation + * @init: initial crc (0xffff at start) + * + * Polynomial = x^16 + x^12 + x^5 + 1 (0x1021) + * Example: 4 bytes 0x01 0x02 0x03 0x04 with init = 0xffff + * should result in a crc16 of 0x89c3 + * + * Return: crc16 checksum in big endian format ! + */ +static inline u16 genwqe_crc16(const u8 *buff, size_t len, u16 init) +{ + return crc_itu_t(init, buff, len); +} + +static void print_ddcb_info(struct genwqe_dev *cd, struct ddcb_queue *queue) +{ + int i; + struct ddcb *pddcb; + unsigned long flags; + struct pci_dev *pci_dev = cd->pci_dev; + + spin_lock_irqsave(&cd->print_lock, flags); + + dev_info(&pci_dev->dev, + "DDCB list for card #%d (ddcb_act=%d / ddcb_next=%d):\n", + cd->card_idx, queue->ddcb_act, queue->ddcb_next); + + pddcb = queue->ddcb_vaddr; + for (i = 0; i < queue->ddcb_max; i++) { + dev_err(&pci_dev->dev, + " %c %-3d: RETC=%03x SEQ=%04x HSI=%02X SHI=%02x PRIV=%06llx CMD=%03x\n", + i == queue->ddcb_act ? '>' : ' ', + i, + be16_to_cpu(pddcb->retc_16), + be16_to_cpu(pddcb->seqnum_16), + pddcb->hsi, + pddcb->shi, + be64_to_cpu(pddcb->priv_64), + pddcb->cmd); + pddcb++; + } + spin_unlock_irqrestore(&cd->print_lock, flags); +} + +struct genwqe_ddcb_cmd *ddcb_requ_alloc(void) +{ + struct ddcb_requ *req; + + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) + return NULL; + + return &req->cmd; +} + +void ddcb_requ_free(struct genwqe_ddcb_cmd *cmd) +{ + struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd); + + kfree(req); +} + +static inline enum genwqe_requ_state ddcb_requ_get_state(struct ddcb_requ *req) +{ + return req->req_state; +} + +static inline void ddcb_requ_set_state(struct ddcb_requ *req, + enum genwqe_requ_state new_state) +{ + req->req_state = new_state; +} + +static inline int ddcb_requ_collect_debug_data(struct ddcb_requ *req) +{ + return req->cmd.ddata_addr != 0x0; +} + +/** + * ddcb_requ_finished() - Returns the hardware state of the associated DDCB + * @cd: pointer to genwqe device descriptor + * @req: DDCB work request + * + * Status of ddcb_requ mirrors this hardware state, but is copied in + * the ddcb_requ on interrupt/polling function. The lowlevel code + * should check the hardware state directly, the higher level code + * should check the copy. + * + * This function will also return true if the state of the queue is + * not GENWQE_CARD_USED. This enables us to purge all DDCBs in the + * shutdown case. + */ +static int ddcb_requ_finished(struct genwqe_dev *cd, struct ddcb_requ *req) +{ + return (ddcb_requ_get_state(req) == GENWQE_REQU_FINISHED) || + (cd->card_state != GENWQE_CARD_USED); +} + +#define RET_DDCB_APPENDED 1 +#define RET_DDCB_TAPPED 2 +/** + * enqueue_ddcb() - Enqueue a DDCB + * @cd: pointer to genwqe device descriptor + * @queue: queue this operation should be done on + * @pddcb: pointer to ddcb structure + * @ddcb_no: pointer to ddcb number being tapped + * + * Start execution of DDCB by tapping or append to queue via NEXT + * bit. This is done by an atomic 'compare and swap' instruction and + * checking SHI and HSI of the previous DDCB. + * + * This function must only be called with ddcb_lock held. + * + * Return: 1 if new DDCB is appended to previous + * 2 if DDCB queue is tapped via register/simulation + */ +static int enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_queue *queue, + struct ddcb *pddcb, int ddcb_no) +{ + unsigned int try; + int prev_no; + struct ddcb *prev_ddcb; + __be32 old, new, icrc_hsi_shi; + u64 num; + + /* + * For performance checks a Dispatch Timestamp can be put into + * DDCB It is supposed to use the SLU's free running counter, + * but this requires PCIe cycles. + */ + ddcb_mark_unused(pddcb); + + /* check previous DDCB if already fetched */ + prev_no = (ddcb_no == 0) ? queue->ddcb_max - 1 : ddcb_no - 1; + prev_ddcb = &queue->ddcb_vaddr[prev_no]; + + /* + * It might have happened that the HSI.FETCHED bit is + * set. Retry in this case. Therefore I expect maximum 2 times + * trying. + */ + ddcb_mark_appended(pddcb); + for (try = 0; try < 2; try++) { + old = prev_ddcb->icrc_hsi_shi_32; /* read SHI/HSI in BE32 */ + + /* try to append via NEXT bit if prev DDCB is not completed */ + if ((old & DDCB_COMPLETED_BE32) != 0x00000000) + break; + + new = (old | DDCB_NEXT_BE32); + + wmb(); /* need to ensure write ordering */ + icrc_hsi_shi = cmpxchg(&prev_ddcb->icrc_hsi_shi_32, old, new); + + if (icrc_hsi_shi == old) + return RET_DDCB_APPENDED; /* appended to queue */ + } + + /* Queue must be re-started by updating QUEUE_OFFSET */ + ddcb_mark_tapped(pddcb); + num = (u64)ddcb_no << 8; + + wmb(); /* need to ensure write ordering */ + __genwqe_writeq(cd, queue->IO_QUEUE_OFFSET, num); /* start queue */ + + return RET_DDCB_TAPPED; +} + +/** + * copy_ddcb_results() - Copy output state from real DDCB to request + * @req: pointer to requsted DDCB parameters + * @ddcb_no: pointer to ddcb number being tapped + * + * Copy DDCB ASV to request struct. There is no endian + * conversion made, since data structure in ASV is still + * unknown here. + * + * This is needed by: + * - genwqe_purge_ddcb() + * - genwqe_check_ddcb_queue() + */ +static void copy_ddcb_results(struct ddcb_requ *req, int ddcb_no) +{ + struct ddcb_queue *queue = req->queue; + struct ddcb *pddcb = &queue->ddcb_vaddr[req->num]; + + memcpy(&req->cmd.asv[0], &pddcb->asv[0], DDCB_ASV_LENGTH); + + /* copy status flags of the variant part */ + req->cmd.vcrc = be16_to_cpu(pddcb->vcrc_16); + req->cmd.deque_ts = be64_to_cpu(pddcb->deque_ts_64); + req->cmd.cmplt_ts = be64_to_cpu(pddcb->cmplt_ts_64); + + req->cmd.attn = be16_to_cpu(pddcb->attn_16); + req->cmd.progress = be32_to_cpu(pddcb->progress_32); + req->cmd.retc = be16_to_cpu(pddcb->retc_16); + + if (ddcb_requ_collect_debug_data(req)) { + int prev_no = (ddcb_no == 0) ? + queue->ddcb_max - 1 : ddcb_no - 1; + struct ddcb *prev_pddcb = &queue->ddcb_vaddr[prev_no]; + + memcpy(&req->debug_data.ddcb_finished, pddcb, + sizeof(req->debug_data.ddcb_finished)); + memcpy(&req->debug_data.ddcb_prev, prev_pddcb, + sizeof(req->debug_data.ddcb_prev)); + } +} + +/** + * genwqe_check_ddcb_queue() - Checks DDCB queue for completed work equests. + * @cd: pointer to genwqe device descriptor + * @queue: queue to be checked + * + * Return: Number of DDCBs which were finished + */ +static int genwqe_check_ddcb_queue(struct genwqe_dev *cd, + struct ddcb_queue *queue) +{ + unsigned long flags; + int ddcbs_finished = 0; + struct pci_dev *pci_dev = cd->pci_dev; + + spin_lock_irqsave(&queue->ddcb_lock, flags); + + /* FIXME avoid soft locking CPU */ + while (!queue_empty(queue) && (ddcbs_finished < queue->ddcb_max)) { + + struct ddcb *pddcb; + struct ddcb_requ *req; + u16 vcrc, vcrc_16, retc_16; + + pddcb = &queue->ddcb_vaddr[queue->ddcb_act]; + + if ((pddcb->icrc_hsi_shi_32 & DDCB_COMPLETED_BE32) == + 0x00000000) + goto go_home; /* not completed, continue waiting */ + + wmb(); /* Add sync to decouple prev. read operations */ + + /* Note: DDCB could be purged */ + req = queue->ddcb_req[queue->ddcb_act]; + if (req == NULL) { + /* this occurs if DDCB is purged, not an error */ + /* Move active DDCB further; Nothing to do anymore. */ + goto pick_next_one; + } + + /* + * HSI=0x44 (fetched and completed), but RETC is + * 0x101, or even worse 0x000. + * + * In case of seeing the queue in inconsistent state + * we read the errcnts and the queue status to provide + * a trigger for our PCIe analyzer stop capturing. + */ + retc_16 = be16_to_cpu(pddcb->retc_16); + if ((pddcb->hsi == 0x44) && (retc_16 <= 0x101)) { + u64 errcnts, status; + u64 ddcb_offs = (u64)pddcb - (u64)queue->ddcb_vaddr; + + errcnts = __genwqe_readq(cd, queue->IO_QUEUE_ERRCNTS); + status = __genwqe_readq(cd, queue->IO_QUEUE_STATUS); + + dev_err(&pci_dev->dev, + "[%s] SEQN=%04x HSI=%02x RETC=%03x Q_ERRCNTS=%016llx Q_STATUS=%016llx DDCB_DMA_ADDR=%016llx\n", + __func__, be16_to_cpu(pddcb->seqnum_16), + pddcb->hsi, retc_16, errcnts, status, + queue->ddcb_daddr + ddcb_offs); + } + + copy_ddcb_results(req, queue->ddcb_act); + queue->ddcb_req[queue->ddcb_act] = NULL; /* take from queue */ + + dev_dbg(&pci_dev->dev, "FINISHED DDCB#%d\n", req->num); + genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb)); + + ddcb_mark_finished(pddcb); + + /* calculate CRC_16 to see if VCRC is correct */ + vcrc = genwqe_crc16(pddcb->asv, + VCRC_LENGTH(req->cmd.asv_length), + 0xffff); + vcrc_16 = be16_to_cpu(pddcb->vcrc_16); + if (vcrc != vcrc_16) { + printk_ratelimited(KERN_ERR + "%s %s: err: wrong VCRC pre=%02x vcrc_len=%d bytes vcrc_data=%04x is not vcrc_card=%04x\n", + GENWQE_DEVNAME, dev_name(&pci_dev->dev), + pddcb->pre, VCRC_LENGTH(req->cmd.asv_length), + vcrc, vcrc_16); + } + + ddcb_requ_set_state(req, GENWQE_REQU_FINISHED); + queue->ddcbs_completed++; + queue->ddcbs_in_flight--; + + /* wake up process waiting for this DDCB, and + processes on the busy queue */ + wake_up_interruptible(&queue->ddcb_waitqs[queue->ddcb_act]); + wake_up_interruptible(&queue->busy_waitq); + +pick_next_one: + queue->ddcb_act = (queue->ddcb_act + 1) % queue->ddcb_max; + ddcbs_finished++; + } + + go_home: + spin_unlock_irqrestore(&queue->ddcb_lock, flags); + return ddcbs_finished; +} + +/** + * __genwqe_wait_ddcb(): Waits until DDCB is completed + * @cd: pointer to genwqe device descriptor + * @req: pointer to requsted DDCB parameters + * + * The Service Layer will update the RETC in DDCB when processing is + * pending or done. + * + * Return: > 0 remaining jiffies, DDCB completed + * -ETIMEDOUT when timeout + * -ERESTARTSYS when ^C + * -EINVAL when unknown error condition + * + * When an error is returned the called needs to ensure that + * purge_ddcb() is being called to get the &req removed from the + * queue. + */ +int __genwqe_wait_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req) +{ + int rc; + unsigned int ddcb_no; + struct ddcb_queue *queue; + struct pci_dev *pci_dev = cd->pci_dev; + + if (req == NULL) + return -EINVAL; + + queue = req->queue; + if (queue == NULL) + return -EINVAL; + + ddcb_no = req->num; + if (ddcb_no >= queue->ddcb_max) + return -EINVAL; + + rc = wait_event_interruptible_timeout(queue->ddcb_waitqs[ddcb_no], + ddcb_requ_finished(cd, req), + GENWQE_DDCB_SOFTWARE_TIMEOUT * HZ); + + /* + * We need to distinguish 3 cases here: + * 1. rc == 0 timeout occured + * 2. rc == -ERESTARTSYS signal received + * 3. rc > 0 remaining jiffies condition is true + */ + if (rc == 0) { + struct ddcb_queue *queue = req->queue; + struct ddcb *pddcb; + + /* + * Timeout may be caused by long task switching time. + * When timeout happens, check if the request has + * meanwhile completed. + */ + genwqe_check_ddcb_queue(cd, req->queue); + if (ddcb_requ_finished(cd, req)) + return rc; + + dev_err(&pci_dev->dev, + "[%s] err: DDCB#%d timeout rc=%d state=%d req @ %p\n", + __func__, req->num, rc, ddcb_requ_get_state(req), + req); + dev_err(&pci_dev->dev, + "[%s] IO_QUEUE_STATUS=0x%016llx\n", __func__, + __genwqe_readq(cd, queue->IO_QUEUE_STATUS)); + + pddcb = &queue->ddcb_vaddr[req->num]; + genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb)); + + print_ddcb_info(cd, req->queue); + return -ETIMEDOUT; + + } else if (rc == -ERESTARTSYS) { + return rc; + /* + * EINTR: Stops the application + * ERESTARTSYS: Restartable systemcall; called again + */ + + } else if (rc < 0) { + dev_err(&pci_dev->dev, + "[%s] err: DDCB#%d unknown result (rc=%d) %d!\n", + __func__, req->num, rc, ddcb_requ_get_state(req)); + return -EINVAL; + } + + /* Severe error occured. Driver is forced to stop operation */ + if (cd->card_state != GENWQE_CARD_USED) { + dev_err(&pci_dev->dev, + "[%s] err: DDCB#%d forced to stop (rc=%d)\n", + __func__, req->num, rc); + return -EIO; + } + return rc; +} + +/** + * get_next_ddcb() - Get next available DDCB + * @cd: pointer to genwqe device descriptor + * @queue: DDCB queue + * @num: internal DDCB number + * + * DDCB's content is completely cleared but presets for PRE and + * SEQNUM. This function must only be called when ddcb_lock is held. + * + * Return: NULL if no empty DDCB available otherwise ptr to next DDCB. + */ +static struct ddcb *get_next_ddcb(struct genwqe_dev *cd, + struct ddcb_queue *queue, + int *num) +{ + u64 *pu64; + struct ddcb *pddcb; + + if (queue_free_ddcbs(queue) == 0) /* queue is full */ + return NULL; + + /* find new ddcb */ + pddcb = &queue->ddcb_vaddr[queue->ddcb_next]; + + /* if it is not completed, we are not allowed to use it */ + /* barrier(); */ + if ((pddcb->icrc_hsi_shi_32 & DDCB_COMPLETED_BE32) == 0x00000000) + return NULL; + + *num = queue->ddcb_next; /* internal DDCB number */ + queue->ddcb_next = (queue->ddcb_next + 1) % queue->ddcb_max; + + /* clear important DDCB fields */ + pu64 = (u64 *)pddcb; + pu64[0] = 0ULL; /* offs 0x00 (ICRC,HSI,SHI,...) */ + pu64[1] = 0ULL; /* offs 0x01 (ACFUNC,CMD...) */ + + /* destroy previous results in ASV */ + pu64[0x80/8] = 0ULL; /* offs 0x80 (ASV + 0) */ + pu64[0x88/8] = 0ULL; /* offs 0x88 (ASV + 0x08) */ + pu64[0x90/8] = 0ULL; /* offs 0x90 (ASV + 0x10) */ + pu64[0x98/8] = 0ULL; /* offs 0x98 (ASV + 0x18) */ + pu64[0xd0/8] = 0ULL; /* offs 0xd0 (RETC,ATTN...) */ + + pddcb->pre = DDCB_PRESET_PRE; /* 128 */ + pddcb->seqnum_16 = cpu_to_be16(queue->ddcb_seq++); + return pddcb; +} + +/** + * __genwqe_purge_ddcb() - Remove a DDCB from the workqueue + * @cd: genwqe device descriptor + * @req: DDCB request + * + * This will fail when the request was already FETCHED. In this case + * we need to wait until it is finished. Else the DDCB can be + * reused. This function also ensures that the request data structure + * is removed from ddcb_req[]. + * + * Do not forget to call this function when genwqe_wait_ddcb() fails, + * such that the request gets really removed from ddcb_req[]. + * + * Return: 0 success + */ +int __genwqe_purge_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req) +{ + struct ddcb *pddcb = NULL; + unsigned int t; + unsigned long flags; + struct ddcb_queue *queue = req->queue; + struct pci_dev *pci_dev = cd->pci_dev; + u64 queue_status; + __be32 icrc_hsi_shi = 0x0000; + __be32 old, new; + + /* unsigned long flags; */ + if (GENWQE_DDCB_SOFTWARE_TIMEOUT <= 0) { + dev_err(&pci_dev->dev, + "[%s] err: software timeout is not set!\n", __func__); + return -EFAULT; + } + + pddcb = &queue->ddcb_vaddr[req->num]; + + for (t = 0; t < GENWQE_DDCB_SOFTWARE_TIMEOUT * 10; t++) { + + spin_lock_irqsave(&queue->ddcb_lock, flags); + + /* Check if req was meanwhile finished */ + if (ddcb_requ_get_state(req) == GENWQE_REQU_FINISHED) + goto go_home; + + /* try to set PURGE bit if FETCHED/COMPLETED are not set */ + old = pddcb->icrc_hsi_shi_32; /* read SHI/HSI in BE32 */ + if ((old & DDCB_FETCHED_BE32) == 0x00000000) { + + new = (old | DDCB_PURGE_BE32); + icrc_hsi_shi = cmpxchg(&pddcb->icrc_hsi_shi_32, + old, new); + if (icrc_hsi_shi == old) + goto finish_ddcb; + } + + /* normal finish with HSI bit */ + barrier(); + icrc_hsi_shi = pddcb->icrc_hsi_shi_32; + if (icrc_hsi_shi & DDCB_COMPLETED_BE32) + goto finish_ddcb; + + spin_unlock_irqrestore(&queue->ddcb_lock, flags); + + /* + * Here the check_ddcb() function will most likely + * discover this DDCB to be finished some point in + * time. It will mark the req finished and free it up + * in the list. + */ + + copy_ddcb_results(req, req->num); /* for the failing case */ + msleep(100); /* sleep for 1/10 second and try again */ + continue; + +finish_ddcb: + copy_ddcb_results(req, req->num); + ddcb_requ_set_state(req, GENWQE_REQU_FINISHED); + queue->ddcbs_in_flight--; + queue->ddcb_req[req->num] = NULL; /* delete from array */ + ddcb_mark_cleared(pddcb); + + /* Move active DDCB further; Nothing to do here anymore. */ + + /* + * We need to ensure that there is at least one free + * DDCB in the queue. To do that, we must update + * ddcb_act only if the COMPLETED bit is set for the + * DDCB we are working on else we treat that DDCB even + * if we PURGED it as occupied (hardware is supposed + * to set the COMPLETED bit yet!). + */ + icrc_hsi_shi = pddcb->icrc_hsi_shi_32; + if ((icrc_hsi_shi & DDCB_COMPLETED_BE32) && + (queue->ddcb_act == req->num)) { + queue->ddcb_act = ((queue->ddcb_act + 1) % + queue->ddcb_max); + } +go_home: + spin_unlock_irqrestore(&queue->ddcb_lock, flags); + return 0; + } + + /* + * If the card is dead and the queue is forced to stop, we + * might see this in the queue status register. + */ + queue_status = __genwqe_readq(cd, queue->IO_QUEUE_STATUS); + + dev_dbg(&pci_dev->dev, "UN/FINISHED DDCB#%d\n", req->num); + genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb)); + + dev_err(&pci_dev->dev, + "[%s] err: DDCB#%d not purged and not completed after %d seconds QSTAT=%016llx!!\n", + __func__, req->num, GENWQE_DDCB_SOFTWARE_TIMEOUT, + queue_status); + + print_ddcb_info(cd, req->queue); + + return -EFAULT; +} + +int genwqe_init_debug_data(struct genwqe_dev *cd, struct genwqe_debug_data *d) +{ + int len; + struct pci_dev *pci_dev = cd->pci_dev; + + if (d == NULL) { + dev_err(&pci_dev->dev, + "[%s] err: invalid memory for debug data!\n", + __func__); + return -EFAULT; + } + + len = sizeof(d->driver_version); + snprintf(d->driver_version, len, "%s", DRV_VERSION); + d->slu_unitcfg = cd->slu_unitcfg; + d->app_unitcfg = cd->app_unitcfg; + return 0; +} + +/** + * __genwqe_enqueue_ddcb() - Enqueue a DDCB + * @cd: pointer to genwqe device descriptor + * @req: pointer to DDCB execution request + * @f_flags: file mode: blocking, non-blocking + * + * Return: 0 if enqueuing succeeded + * -EIO if card is unusable/PCIe problems + * -EBUSY if enqueuing failed + */ +int __genwqe_enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req, + unsigned int f_flags) +{ + struct ddcb *pddcb; + unsigned long flags; + struct ddcb_queue *queue; + struct pci_dev *pci_dev = cd->pci_dev; + u16 icrc; + + retry: + if (cd->card_state != GENWQE_CARD_USED) { + printk_ratelimited(KERN_ERR + "%s %s: [%s] Card is unusable/PCIe problem Req#%d\n", + GENWQE_DEVNAME, dev_name(&pci_dev->dev), + __func__, req->num); + return -EIO; + } + + queue = req->queue = &cd->queue; + + /* FIXME circumvention to improve performance when no irq is + * there. + */ + if (GENWQE_POLLING_ENABLED) + genwqe_check_ddcb_queue(cd, queue); + + /* + * It must be ensured to process all DDCBs in successive + * order. Use a lock here in order to prevent nested DDCB + * enqueuing. + */ + spin_lock_irqsave(&queue->ddcb_lock, flags); + + pddcb = get_next_ddcb(cd, queue, &req->num); /* get ptr and num */ + if (pddcb == NULL) { + int rc; + + spin_unlock_irqrestore(&queue->ddcb_lock, flags); + + if (f_flags & O_NONBLOCK) { + queue->return_on_busy++; + return -EBUSY; + } + + queue->wait_on_busy++; + rc = wait_event_interruptible(queue->busy_waitq, + queue_free_ddcbs(queue) != 0); + dev_dbg(&pci_dev->dev, "[%s] waiting for free DDCB: rc=%d\n", + __func__, rc); + if (rc == -ERESTARTSYS) + return rc; /* interrupted by a signal */ + + goto retry; + } + + if (queue->ddcb_req[req->num] != NULL) { + spin_unlock_irqrestore(&queue->ddcb_lock, flags); + + dev_err(&pci_dev->dev, + "[%s] picked DDCB %d with req=%p still in use!!\n", + __func__, req->num, req); + return -EFAULT; + } + ddcb_requ_set_state(req, GENWQE_REQU_ENQUEUED); + queue->ddcb_req[req->num] = req; + + pddcb->cmdopts_16 = cpu_to_be16(req->cmd.cmdopts); + pddcb->cmd = req->cmd.cmd; + pddcb->acfunc = req->cmd.acfunc; /* functional unit */ + + /* + * We know that we can get retc 0x104 with CRC error, do not + * stop the queue in those cases for this command. XDIR = 1 + * does not work for old SLU versions. + * + * Last bitstream with the old XDIR behavior had SLU_ID + * 0x34199. + */ + if ((cd->slu_unitcfg & 0xFFFF0ull) > 0x34199ull) + pddcb->xdir = 0x1; + else + pddcb->xdir = 0x0; + + + pddcb->psp = (((req->cmd.asiv_length / 8) << 4) | + ((req->cmd.asv_length / 8))); + pddcb->disp_ts_64 = cpu_to_be64(req->cmd.disp_ts); + + /* + * If copying the whole DDCB_ASIV_LENGTH is impacting + * performance we need to change it to + * req->cmd.asiv_length. But simulation benefits from some + * non-architectured bits behind the architectured content. + * + * How much data is copied depends on the availability of the + * ATS field, which was introduced late. If the ATS field is + * supported ASIV is 8 bytes shorter than it used to be. Since + * the ATS field is copied too, the code should do exactly + * what it did before, but I wanted to make copying of the ATS + * field very explicit. + */ + if (genwqe_get_slu_id(cd) <= 0x2) { + memcpy(&pddcb->__asiv[0], /* destination */ + &req->cmd.__asiv[0], /* source */ + DDCB_ASIV_LENGTH); /* req->cmd.asiv_length */ + } else { + pddcb->n.ats_64 = cpu_to_be64(req->cmd.ats); + memcpy(&pddcb->n.asiv[0], /* destination */ + &req->cmd.asiv[0], /* source */ + DDCB_ASIV_LENGTH_ATS); /* req->cmd.asiv_length */ + } + + pddcb->icrc_hsi_shi_32 = cpu_to_be32(0x00000000); /* for crc */ + + /* + * Calculate CRC_16 for corresponding range PSP(7:4). Include + * empty 4 bytes prior to the data. + */ + icrc = genwqe_crc16((const u8 *)pddcb, + ICRC_LENGTH(req->cmd.asiv_length), 0xffff); + pddcb->icrc_hsi_shi_32 = cpu_to_be32((u32)icrc << 16); + + /* enable DDCB completion irq */ + if (!GENWQE_POLLING_ENABLED) + pddcb->icrc_hsi_shi_32 |= DDCB_INTR_BE32; + + dev_dbg(&pci_dev->dev, "INPUT DDCB#%d\n", req->num); + genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb)); + + if (ddcb_requ_collect_debug_data(req)) { + /* use the kernel copy of debug data. copying back to + user buffer happens later */ + + genwqe_init_debug_data(cd, &req->debug_data); + memcpy(&req->debug_data.ddcb_before, pddcb, + sizeof(req->debug_data.ddcb_before)); + } + + enqueue_ddcb(cd, queue, pddcb, req->num); + queue->ddcbs_in_flight++; + + if (queue->ddcbs_in_flight > queue->ddcbs_max_in_flight) + queue->ddcbs_max_in_flight = queue->ddcbs_in_flight; + + ddcb_requ_set_state(req, GENWQE_REQU_TAPPED); + spin_unlock_irqrestore(&queue->ddcb_lock, flags); + wake_up_interruptible(&cd->queue_waitq); + + return 0; +} + +/** + * __genwqe_execute_raw_ddcb() - Setup and execute DDCB + * @cd: pointer to genwqe device descriptor + * @cmd: user provided DDCB command + * @f_flags: file mode: blocking, non-blocking + */ +int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd, + struct genwqe_ddcb_cmd *cmd, + unsigned int f_flags) +{ + int rc = 0; + struct pci_dev *pci_dev = cd->pci_dev; + struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd); + + if (cmd->asiv_length > DDCB_ASIV_LENGTH) { + dev_err(&pci_dev->dev, "[%s] err: wrong asiv_length of %d\n", + __func__, cmd->asiv_length); + return -EINVAL; + } + if (cmd->asv_length > DDCB_ASV_LENGTH) { + dev_err(&pci_dev->dev, "[%s] err: wrong asv_length of %d\n", + __func__, cmd->asiv_length); + return -EINVAL; + } + rc = __genwqe_enqueue_ddcb(cd, req, f_flags); + if (rc != 0) + return rc; + + rc = __genwqe_wait_ddcb(cd, req); + if (rc < 0) /* error or signal interrupt */ + goto err_exit; + + if (ddcb_requ_collect_debug_data(req)) { + if (copy_to_user((struct genwqe_debug_data __user *) + (unsigned long)cmd->ddata_addr, + &req->debug_data, + sizeof(struct genwqe_debug_data))) + return -EFAULT; + } + + /* + * Higher values than 0x102 indicate completion with faults, + * lower values than 0x102 indicate processing faults. Note + * that DDCB might have been purged. E.g. Cntl+C. + */ + if (cmd->retc != DDCB_RETC_COMPLETE) { + /* This might happen e.g. flash read, and needs to be + handled by the upper layer code. */ + rc = -EBADMSG; /* not processed/error retc */ + } + + return rc; + + err_exit: + __genwqe_purge_ddcb(cd, req); + + if (ddcb_requ_collect_debug_data(req)) { + if (copy_to_user((struct genwqe_debug_data __user *) + (unsigned long)cmd->ddata_addr, + &req->debug_data, + sizeof(struct genwqe_debug_data))) + return -EFAULT; + } + return rc; +} + +/** + * genwqe_next_ddcb_ready() - Figure out if the next DDCB is already finished + * @cd: pointer to genwqe device descriptor + * + * We use this as condition for our wait-queue code. + */ +static int genwqe_next_ddcb_ready(struct genwqe_dev *cd) +{ + unsigned long flags; + struct ddcb *pddcb; + struct ddcb_queue *queue = &cd->queue; + + spin_lock_irqsave(&queue->ddcb_lock, flags); + + if (queue_empty(queue)) { /* emtpy queue */ + spin_unlock_irqrestore(&queue->ddcb_lock, flags); + return 0; + } + + pddcb = &queue->ddcb_vaddr[queue->ddcb_act]; + if (pddcb->icrc_hsi_shi_32 & DDCB_COMPLETED_BE32) { /* ddcb ready */ + spin_unlock_irqrestore(&queue->ddcb_lock, flags); + return 1; + } + + spin_unlock_irqrestore(&queue->ddcb_lock, flags); + return 0; +} + +/** + * genwqe_ddcbs_in_flight() - Check how many DDCBs are in flight + * @cd: pointer to genwqe device descriptor + * + * Keep track on the number of DDCBs which ware currently in the + * queue. This is needed for statistics as well as conditon if we want + * to wait or better do polling in case of no interrupts available. + */ +int genwqe_ddcbs_in_flight(struct genwqe_dev *cd) +{ + unsigned long flags; + int ddcbs_in_flight = 0; + struct ddcb_queue *queue = &cd->queue; + + spin_lock_irqsave(&queue->ddcb_lock, flags); + ddcbs_in_flight += queue->ddcbs_in_flight; + spin_unlock_irqrestore(&queue->ddcb_lock, flags); + + return ddcbs_in_flight; +} + +static int setup_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue) +{ + int rc, i; + struct ddcb *pddcb; + u64 val64; + unsigned int queue_size; + struct pci_dev *pci_dev = cd->pci_dev; + + if (GENWQE_DDCB_MAX < 2) + return -EINVAL; + + queue_size = roundup(GENWQE_DDCB_MAX * sizeof(struct ddcb), PAGE_SIZE); + + queue->ddcbs_in_flight = 0; /* statistics */ + queue->ddcbs_max_in_flight = 0; + queue->ddcbs_completed = 0; + queue->return_on_busy = 0; + queue->wait_on_busy = 0; + + queue->ddcb_seq = 0x100; /* start sequence number */ + queue->ddcb_max = GENWQE_DDCB_MAX; + queue->ddcb_vaddr = __genwqe_alloc_consistent(cd, queue_size, + &queue->ddcb_daddr); + if (queue->ddcb_vaddr == NULL) { + dev_err(&pci_dev->dev, + "[%s] **err: could not allocate DDCB **\n", __func__); + return -ENOMEM; + } + queue->ddcb_req = kcalloc(queue->ddcb_max, sizeof(struct ddcb_requ *), + GFP_KERNEL); + if (!queue->ddcb_req) { + rc = -ENOMEM; + goto free_ddcbs; + } + + queue->ddcb_waitqs = kcalloc(queue->ddcb_max, + sizeof(wait_queue_head_t), + GFP_KERNEL); + if (!queue->ddcb_waitqs) { + rc = -ENOMEM; + goto free_requs; + } + + for (i = 0; i < queue->ddcb_max; i++) { + pddcb = &queue->ddcb_vaddr[i]; /* DDCBs */ + pddcb->icrc_hsi_shi_32 = DDCB_COMPLETED_BE32; + pddcb->retc_16 = cpu_to_be16(0xfff); + + queue->ddcb_req[i] = NULL; /* requests */ + init_waitqueue_head(&queue->ddcb_waitqs[i]); /* waitqueues */ + } + + queue->ddcb_act = 0; + queue->ddcb_next = 0; /* queue is empty */ + + spin_lock_init(&queue->ddcb_lock); + init_waitqueue_head(&queue->busy_waitq); + + val64 = ((u64)(queue->ddcb_max - 1) << 8); /* lastptr */ + __genwqe_writeq(cd, queue->IO_QUEUE_CONFIG, 0x07); /* iCRC/vCRC */ + __genwqe_writeq(cd, queue->IO_QUEUE_SEGMENT, queue->ddcb_daddr); + __genwqe_writeq(cd, queue->IO_QUEUE_INITSQN, queue->ddcb_seq); + __genwqe_writeq(cd, queue->IO_QUEUE_WRAP, val64); + return 0; + + free_requs: + kfree(queue->ddcb_req); + queue->ddcb_req = NULL; + free_ddcbs: + __genwqe_free_consistent(cd, queue_size, queue->ddcb_vaddr, + queue->ddcb_daddr); + queue->ddcb_vaddr = NULL; + queue->ddcb_daddr = 0ull; + return rc; + +} + +static int ddcb_queue_initialized(struct ddcb_queue *queue) +{ + return queue->ddcb_vaddr != NULL; +} + +static void free_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue) +{ + unsigned int queue_size; + + queue_size = roundup(queue->ddcb_max * sizeof(struct ddcb), PAGE_SIZE); + + kfree(queue->ddcb_req); + queue->ddcb_req = NULL; + + if (queue->ddcb_vaddr) { + __genwqe_free_consistent(cd, queue_size, queue->ddcb_vaddr, + queue->ddcb_daddr); + queue->ddcb_vaddr = NULL; + queue->ddcb_daddr = 0ull; + } +} + +static irqreturn_t genwqe_pf_isr(int irq, void *dev_id) +{ + u64 gfir; + struct genwqe_dev *cd = (struct genwqe_dev *)dev_id; + struct pci_dev *pci_dev = cd->pci_dev; + + /* + * In case of fatal FIR error the queue is stopped, such that + * we can safely check it without risking anything. + */ + cd->irqs_processed++; + wake_up_interruptible(&cd->queue_waitq); + + /* + * Checking for errors before kicking the queue might be + * safer, but slower for the good-case ... See above. + */ + gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR); + if (((gfir & GFIR_ERR_TRIGGER) != 0x0) && + !pci_channel_offline(pci_dev)) { + + if (cd->use_platform_recovery) { + /* + * Since we use raw accessors, EEH errors won't be + * detected by the platform until we do a non-raw + * MMIO or config space read + */ + readq(cd->mmio + IO_SLC_CFGREG_GFIR); + + /* Don't do anything if the PCI channel is frozen */ + if (pci_channel_offline(pci_dev)) + goto exit; + } + + wake_up_interruptible(&cd->health_waitq); + + /* + * By default GFIRs causes recovery actions. This + * count is just for debug when recovery is masked. + */ + dev_err_ratelimited(&pci_dev->dev, + "[%s] GFIR=%016llx\n", + __func__, gfir); + } + + exit: + return IRQ_HANDLED; +} + +static irqreturn_t genwqe_vf_isr(int irq, void *dev_id) +{ + struct genwqe_dev *cd = (struct genwqe_dev *)dev_id; + + cd->irqs_processed++; + wake_up_interruptible(&cd->queue_waitq); + + return IRQ_HANDLED; +} + +/** + * genwqe_card_thread() - Work thread for the DDCB queue + * @data: pointer to genwqe device descriptor + * + * The idea is to check if there are DDCBs in processing. If there are + * some finished DDCBs, we process them and wakeup the + * requestors. Otherwise we give other processes time using + * cond_resched(). + */ +static int genwqe_card_thread(void *data) +{ + int should_stop = 0; + struct genwqe_dev *cd = (struct genwqe_dev *)data; + + while (!kthread_should_stop()) { + + genwqe_check_ddcb_queue(cd, &cd->queue); + + if (GENWQE_POLLING_ENABLED) { + wait_event_interruptible_timeout( + cd->queue_waitq, + genwqe_ddcbs_in_flight(cd) || + (should_stop = kthread_should_stop()), 1); + } else { + wait_event_interruptible_timeout( + cd->queue_waitq, + genwqe_next_ddcb_ready(cd) || + (should_stop = kthread_should_stop()), HZ); + } + if (should_stop) + break; + + /* + * Avoid soft lockups on heavy loads; we do not want + * to disable our interrupts. + */ + cond_resched(); + } + return 0; +} + +/** + * genwqe_setup_service_layer() - Setup DDCB queue + * @cd: pointer to genwqe device descriptor + * + * Allocate DDCBs. Configure Service Layer Controller (SLC). + * + * Return: 0 success + */ +int genwqe_setup_service_layer(struct genwqe_dev *cd) +{ + int rc; + struct ddcb_queue *queue; + struct pci_dev *pci_dev = cd->pci_dev; + + if (genwqe_is_privileged(cd)) { + rc = genwqe_card_reset(cd); + if (rc < 0) { + dev_err(&pci_dev->dev, + "[%s] err: reset failed.\n", __func__); + return rc; + } + genwqe_read_softreset(cd); + } + + queue = &cd->queue; + queue->IO_QUEUE_CONFIG = IO_SLC_QUEUE_CONFIG; + queue->IO_QUEUE_STATUS = IO_SLC_QUEUE_STATUS; + queue->IO_QUEUE_SEGMENT = IO_SLC_QUEUE_SEGMENT; + queue->IO_QUEUE_INITSQN = IO_SLC_QUEUE_INITSQN; + queue->IO_QUEUE_OFFSET = IO_SLC_QUEUE_OFFSET; + queue->IO_QUEUE_WRAP = IO_SLC_QUEUE_WRAP; + queue->IO_QUEUE_WTIME = IO_SLC_QUEUE_WTIME; + queue->IO_QUEUE_ERRCNTS = IO_SLC_QUEUE_ERRCNTS; + queue->IO_QUEUE_LRW = IO_SLC_QUEUE_LRW; + + rc = setup_ddcb_queue(cd, queue); + if (rc != 0) { + rc = -ENODEV; + goto err_out; + } + + init_waitqueue_head(&cd->queue_waitq); + cd->card_thread = kthread_run(genwqe_card_thread, cd, + GENWQE_DEVNAME "%d_thread", + cd->card_idx); + if (IS_ERR(cd->card_thread)) { + rc = PTR_ERR(cd->card_thread); + cd->card_thread = NULL; + goto stop_free_queue; + } + + rc = genwqe_set_interrupt_capability(cd, GENWQE_MSI_IRQS); + if (rc) + goto stop_kthread; + + /* + * We must have all wait-queues initialized when we enable the + * interrupts. Otherwise we might crash if we get an early + * irq. + */ + init_waitqueue_head(&cd->health_waitq); + + if (genwqe_is_privileged(cd)) { + rc = request_irq(pci_dev->irq, genwqe_pf_isr, IRQF_SHARED, + GENWQE_DEVNAME, cd); + } else { + rc = request_irq(pci_dev->irq, genwqe_vf_isr, IRQF_SHARED, + GENWQE_DEVNAME, cd); + } + if (rc < 0) { + dev_err(&pci_dev->dev, "irq %d not free.\n", pci_dev->irq); + goto stop_irq_cap; + } + + cd->card_state = GENWQE_CARD_USED; + return 0; + + stop_irq_cap: + genwqe_reset_interrupt_capability(cd); + stop_kthread: + kthread_stop(cd->card_thread); + cd->card_thread = NULL; + stop_free_queue: + free_ddcb_queue(cd, queue); + err_out: + return rc; +} + +/** + * queue_wake_up_all() - Handles fatal error case + * @cd: pointer to genwqe device descriptor + * + * The PCI device got unusable and we have to stop all pending + * requests as fast as we can. The code after this must purge the + * DDCBs in question and ensure that all mappings are freed. + */ +static int queue_wake_up_all(struct genwqe_dev *cd) +{ + unsigned int i; + unsigned long flags; + struct ddcb_queue *queue = &cd->queue; + + spin_lock_irqsave(&queue->ddcb_lock, flags); + + for (i = 0; i < queue->ddcb_max; i++) + wake_up_interruptible(&queue->ddcb_waitqs[queue->ddcb_act]); + + wake_up_interruptible(&queue->busy_waitq); + spin_unlock_irqrestore(&queue->ddcb_lock, flags); + + return 0; +} + +/** + * genwqe_finish_queue() - Remove any genwqe devices and user-interfaces + * @cd: pointer to genwqe device descriptor + * + * Relies on the pre-condition that there are no users of the card + * device anymore e.g. with open file-descriptors. + * + * This function must be robust enough to be called twice. + */ +int genwqe_finish_queue(struct genwqe_dev *cd) +{ + int i, rc = 0, in_flight; + int waitmax = GENWQE_DDCB_SOFTWARE_TIMEOUT; + struct pci_dev *pci_dev = cd->pci_dev; + struct ddcb_queue *queue = &cd->queue; + + if (!ddcb_queue_initialized(queue)) + return 0; + + /* Do not wipe out the error state. */ + if (cd->card_state == GENWQE_CARD_USED) + cd->card_state = GENWQE_CARD_UNUSED; + + /* Wake up all requests in the DDCB queue such that they + should be removed nicely. */ + queue_wake_up_all(cd); + + /* We must wait to get rid of the DDCBs in flight */ + for (i = 0; i < waitmax; i++) { + in_flight = genwqe_ddcbs_in_flight(cd); + + if (in_flight == 0) + break; + + dev_dbg(&pci_dev->dev, + " DEBUG [%d/%d] waiting for queue to get empty: %d requests!\n", + i, waitmax, in_flight); + + /* + * Severe severe error situation: The card itself has + * 16 DDCB queues, each queue has e.g. 32 entries, + * each DDBC has a hardware timeout of currently 250 + * msec but the PFs have a hardware timeout of 8 sec + * ... so I take something large. + */ + msleep(1000); + } + if (i == waitmax) { + dev_err(&pci_dev->dev, " [%s] err: queue is not empty!!\n", + __func__); + rc = -EIO; + } + return rc; +} + +/** + * genwqe_release_service_layer() - Shutdown DDCB queue + * @cd: genwqe device descriptor + * + * This function must be robust enough to be called twice. + */ +int genwqe_release_service_layer(struct genwqe_dev *cd) +{ + struct pci_dev *pci_dev = cd->pci_dev; + + if (!ddcb_queue_initialized(&cd->queue)) + return 1; + + free_irq(pci_dev->irq, cd); + genwqe_reset_interrupt_capability(cd); + + if (cd->card_thread != NULL) { + kthread_stop(cd->card_thread); + cd->card_thread = NULL; + } + + free_ddcb_queue(cd, &cd->queue); + return 0; +} diff --git a/drivers/misc/genwqe/card_ddcb.h b/drivers/misc/genwqe/card_ddcb.h new file mode 100644 index 000000000..a47ff49ae --- /dev/null +++ b/drivers/misc/genwqe/card_ddcb.h @@ -0,0 +1,179 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef __CARD_DDCB_H__ +#define __CARD_DDCB_H__ + +/** + * IBM Accelerator Family 'GenWQE' + * + * (C) Copyright IBM Corp. 2013 + * + * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> + * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> + * Author: Michael Jung <mijung@gmx.net> + * Author: Michael Ruettger <michael@ibmra.de> + */ + +#include <linux/types.h> +#include <asm/byteorder.h> + +#include "genwqe_driver.h" +#include "card_base.h" + +/** + * struct ddcb - Device Driver Control Block DDCB + * @hsi: Hardware software interlock + * @shi: Software hardware interlock. Hsi and shi are used to interlock + * software and hardware activities. We are using a compare and + * swap operation to ensure that there are no races when + * activating new DDCBs on the queue, or when we need to + * purge a DDCB from a running queue. + * @acfunc: Accelerator function addresses a unit within the chip + * @cmd: Command to work on + * @cmdopts_16: Options for the command + * @asiv: Input data + * @asv: Output data + * + * The DDCB data format is big endian. Multiple consequtive DDBCs form + * a DDCB queue. + */ +#define ASIV_LENGTH 104 /* Old specification without ATS field */ +#define ASIV_LENGTH_ATS 96 /* New specification with ATS field */ +#define ASV_LENGTH 64 + +struct ddcb { + union { + __be32 icrc_hsi_shi_32; /* iCRC, Hardware/SW interlock */ + struct { + __be16 icrc_16; + u8 hsi; + u8 shi; + }; + }; + u8 pre; /* Preamble */ + u8 xdir; /* Execution Directives */ + __be16 seqnum_16; /* Sequence Number */ + + u8 acfunc; /* Accelerator Function.. */ + u8 cmd; /* Command. */ + __be16 cmdopts_16; /* Command Options */ + u8 sur; /* Status Update Rate */ + u8 psp; /* Protection Section Pointer */ + __be16 rsvd_0e_16; /* Reserved invariant */ + + __be64 fwiv_64; /* Firmware Invariant. */ + + union { + struct { + __be64 ats_64; /* Address Translation Spec */ + u8 asiv[ASIV_LENGTH_ATS]; /* New ASIV */ + } n; + u8 __asiv[ASIV_LENGTH]; /* obsolete */ + }; + u8 asv[ASV_LENGTH]; /* Appl Spec Variant */ + + __be16 rsvd_c0_16; /* Reserved Variant */ + __be16 vcrc_16; /* Variant CRC */ + __be32 rsvd_32; /* Reserved unprotected */ + + __be64 deque_ts_64; /* Deque Time Stamp. */ + + __be16 retc_16; /* Return Code */ + __be16 attn_16; /* Attention/Extended Error Codes */ + __be32 progress_32; /* Progress indicator. */ + + __be64 cmplt_ts_64; /* Completion Time Stamp. */ + + /* The following layout matches the new service layer format */ + __be32 ibdc_32; /* Inbound Data Count (* 256) */ + __be32 obdc_32; /* Outbound Data Count (* 256) */ + + __be64 rsvd_SLH_64; /* Reserved for hardware */ + union { /* private data for driver */ + u8 priv[8]; + __be64 priv_64; + }; + __be64 disp_ts_64; /* Dispatch TimeStamp */ +} __attribute__((__packed__)); + +/* CRC polynomials for DDCB */ +#define CRC16_POLYNOMIAL 0x1021 + +/* + * SHI: Software to Hardware Interlock + * This 1 byte field is written by software to interlock the + * movement of one queue entry to another with the hardware in the + * chip. + */ +#define DDCB_SHI_INTR 0x04 /* Bit 2 */ +#define DDCB_SHI_PURGE 0x02 /* Bit 1 */ +#define DDCB_SHI_NEXT 0x01 /* Bit 0 */ + +/* + * HSI: Hardware to Software interlock + * This 1 byte field is written by hardware to interlock the movement + * of one queue entry to another with the software in the chip. + */ +#define DDCB_HSI_COMPLETED 0x40 /* Bit 6 */ +#define DDCB_HSI_FETCHED 0x04 /* Bit 2 */ + +/* + * Accessing HSI/SHI is done 32-bit wide + * Normally 16-bit access would work too, but on some platforms the + * 16 compare and swap operation is not supported. Therefore + * switching to 32-bit such that those platforms will work too. + * + * iCRC HSI/SHI + */ +#define DDCB_INTR_BE32 cpu_to_be32(0x00000004) +#define DDCB_PURGE_BE32 cpu_to_be32(0x00000002) +#define DDCB_NEXT_BE32 cpu_to_be32(0x00000001) +#define DDCB_COMPLETED_BE32 cpu_to_be32(0x00004000) +#define DDCB_FETCHED_BE32 cpu_to_be32(0x00000400) + +/* Definitions of DDCB presets */ +#define DDCB_PRESET_PRE 0x80 +#define ICRC_LENGTH(n) ((n) + 8 + 8 + 8) /* used ASIV + hdr fields */ +#define VCRC_LENGTH(n) ((n)) /* used ASV */ + +/* + * Genwqe Scatter Gather list + * Each element has up to 8 entries. + * The chaining element is element 0 cause of prefetching needs. + */ + +/* + * 0b0110 Chained descriptor. The descriptor is describing the next + * descriptor list. + */ +#define SG_CHAINED (0x6) + +/* + * 0b0010 First entry of a descriptor list. Start from a Buffer-Empty + * condition. + */ +#define SG_DATA (0x2) + +/* + * 0b0000 Early terminator. This is the last entry on the list + * irregardless of the length indicated. + */ +#define SG_END_LIST (0x0) + +/** + * struct sglist - Scatter gather list + * @target_addr: Either a dma addr of memory to work on or a + * dma addr or a subsequent sglist block. + * @len: Length of the data block. + * @flags: See above. + * + * Depending on the command the GenWQE card can use a scatter gather + * list to describe the memory it works on. Always 8 sg_entry's form + * a block. + */ +struct sg_entry { + __be64 target_addr; + __be32 len; + __be32 flags; +}; + +#endif /* __CARD_DDCB_H__ */ diff --git a/drivers/misc/genwqe/card_debugfs.c b/drivers/misc/genwqe/card_debugfs.c new file mode 100644 index 000000000..491fb4482 --- /dev/null +++ b/drivers/misc/genwqe/card_debugfs.c @@ -0,0 +1,378 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * IBM Accelerator Family 'GenWQE' + * + * (C) Copyright IBM Corp. 2013 + * + * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> + * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> + * Author: Michael Jung <mijung@gmx.net> + * Author: Michael Ruettger <michael@ibmra.de> + */ + +/* + * Debugfs interfaces for the GenWQE card. Help to debug potential + * problems. Dump internal chip state for debugging and failure + * determination. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#include <linux/uaccess.h> + +#include "card_base.h" +#include "card_ddcb.h" + +static void dbg_uidn_show(struct seq_file *s, struct genwqe_reg *regs, + int entries) +{ + unsigned int i; + u32 v_hi, v_lo; + + for (i = 0; i < entries; i++) { + v_hi = (regs[i].val >> 32) & 0xffffffff; + v_lo = (regs[i].val) & 0xffffffff; + + seq_printf(s, " 0x%08x 0x%08x 0x%08x 0x%08x EXT_ERR_REC\n", + regs[i].addr, regs[i].idx, v_hi, v_lo); + } +} + +static int curr_dbg_uidn_show(struct seq_file *s, void *unused, int uid) +{ + struct genwqe_dev *cd = s->private; + int entries; + struct genwqe_reg *regs; + + entries = genwqe_ffdc_buff_size(cd, uid); + if (entries < 0) + return -EINVAL; + + if (entries == 0) + return 0; + + regs = kcalloc(entries, sizeof(*regs), GFP_KERNEL); + if (regs == NULL) + return -ENOMEM; + + genwqe_stop_traps(cd); /* halt the traps while dumping data */ + genwqe_ffdc_buff_read(cd, uid, regs, entries); + genwqe_start_traps(cd); + + dbg_uidn_show(s, regs, entries); + kfree(regs); + return 0; +} + +static int curr_dbg_uid0_show(struct seq_file *s, void *unused) +{ + return curr_dbg_uidn_show(s, unused, 0); +} + +DEFINE_SHOW_ATTRIBUTE(curr_dbg_uid0); + +static int curr_dbg_uid1_show(struct seq_file *s, void *unused) +{ + return curr_dbg_uidn_show(s, unused, 1); +} + +DEFINE_SHOW_ATTRIBUTE(curr_dbg_uid1); + +static int curr_dbg_uid2_show(struct seq_file *s, void *unused) +{ + return curr_dbg_uidn_show(s, unused, 2); +} + +DEFINE_SHOW_ATTRIBUTE(curr_dbg_uid2); + +static int prev_dbg_uidn_show(struct seq_file *s, void *unused, int uid) +{ + struct genwqe_dev *cd = s->private; + + dbg_uidn_show(s, cd->ffdc[uid].regs, cd->ffdc[uid].entries); + return 0; +} + +static int prev_dbg_uid0_show(struct seq_file *s, void *unused) +{ + return prev_dbg_uidn_show(s, unused, 0); +} + +DEFINE_SHOW_ATTRIBUTE(prev_dbg_uid0); + +static int prev_dbg_uid1_show(struct seq_file *s, void *unused) +{ + return prev_dbg_uidn_show(s, unused, 1); +} + +DEFINE_SHOW_ATTRIBUTE(prev_dbg_uid1); + +static int prev_dbg_uid2_show(struct seq_file *s, void *unused) +{ + return prev_dbg_uidn_show(s, unused, 2); +} + +DEFINE_SHOW_ATTRIBUTE(prev_dbg_uid2); + +static int curr_regs_show(struct seq_file *s, void *unused) +{ + struct genwqe_dev *cd = s->private; + unsigned int i; + struct genwqe_reg *regs; + + regs = kcalloc(GENWQE_FFDC_REGS, sizeof(*regs), GFP_KERNEL); + if (regs == NULL) + return -ENOMEM; + + genwqe_stop_traps(cd); + genwqe_read_ffdc_regs(cd, regs, GENWQE_FFDC_REGS, 1); + genwqe_start_traps(cd); + + for (i = 0; i < GENWQE_FFDC_REGS; i++) { + if (regs[i].addr == 0xffffffff) + break; /* invalid entries */ + + if (regs[i].val == 0x0ull) + continue; /* do not print 0x0 FIRs */ + + seq_printf(s, " 0x%08x 0x%016llx\n", + regs[i].addr, regs[i].val); + } + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(curr_regs); + +static int prev_regs_show(struct seq_file *s, void *unused) +{ + struct genwqe_dev *cd = s->private; + unsigned int i; + struct genwqe_reg *regs = cd->ffdc[GENWQE_DBG_REGS].regs; + + if (regs == NULL) + return -EINVAL; + + for (i = 0; i < GENWQE_FFDC_REGS; i++) { + if (regs[i].addr == 0xffffffff) + break; /* invalid entries */ + + if (regs[i].val == 0x0ull) + continue; /* do not print 0x0 FIRs */ + + seq_printf(s, " 0x%08x 0x%016llx\n", + regs[i].addr, regs[i].val); + } + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(prev_regs); + +static int jtimer_show(struct seq_file *s, void *unused) +{ + struct genwqe_dev *cd = s->private; + unsigned int vf_num; + u64 jtimer; + + jtimer = genwqe_read_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT, 0); + seq_printf(s, " PF 0x%016llx %d msec\n", jtimer, + GENWQE_PF_JOBTIMEOUT_MSEC); + + for (vf_num = 0; vf_num < cd->num_vfs; vf_num++) { + jtimer = genwqe_read_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT, + vf_num + 1); + seq_printf(s, " VF%-2d 0x%016llx %d msec\n", vf_num, jtimer, + cd->vf_jobtimeout_msec[vf_num]); + } + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(jtimer); + +static int queue_working_time_show(struct seq_file *s, void *unused) +{ + struct genwqe_dev *cd = s->private; + unsigned int vf_num; + u64 t; + + t = genwqe_read_vreg(cd, IO_SLC_VF_QUEUE_WTIME, 0); + seq_printf(s, " PF 0x%016llx\n", t); + + for (vf_num = 0; vf_num < cd->num_vfs; vf_num++) { + t = genwqe_read_vreg(cd, IO_SLC_VF_QUEUE_WTIME, vf_num + 1); + seq_printf(s, " VF%-2d 0x%016llx\n", vf_num, t); + } + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(queue_working_time); + +static int ddcb_info_show(struct seq_file *s, void *unused) +{ + struct genwqe_dev *cd = s->private; + unsigned int i; + struct ddcb_queue *queue; + struct ddcb *pddcb; + + queue = &cd->queue; + seq_puts(s, "DDCB QUEUE:\n"); + seq_printf(s, " ddcb_max: %d\n" + " ddcb_daddr: %016llx - %016llx\n" + " ddcb_vaddr: %p\n" + " ddcbs_in_flight: %u\n" + " ddcbs_max_in_flight: %u\n" + " ddcbs_completed: %u\n" + " return_on_busy: %u\n" + " wait_on_busy: %u\n" + " irqs_processed: %u\n", + queue->ddcb_max, (long long)queue->ddcb_daddr, + (long long)queue->ddcb_daddr + + (queue->ddcb_max * DDCB_LENGTH), + queue->ddcb_vaddr, queue->ddcbs_in_flight, + queue->ddcbs_max_in_flight, queue->ddcbs_completed, + queue->return_on_busy, queue->wait_on_busy, + cd->irqs_processed); + + /* Hardware State */ + seq_printf(s, " 0x%08x 0x%016llx IO_QUEUE_CONFIG\n" + " 0x%08x 0x%016llx IO_QUEUE_STATUS\n" + " 0x%08x 0x%016llx IO_QUEUE_SEGMENT\n" + " 0x%08x 0x%016llx IO_QUEUE_INITSQN\n" + " 0x%08x 0x%016llx IO_QUEUE_WRAP\n" + " 0x%08x 0x%016llx IO_QUEUE_OFFSET\n" + " 0x%08x 0x%016llx IO_QUEUE_WTIME\n" + " 0x%08x 0x%016llx IO_QUEUE_ERRCNTS\n" + " 0x%08x 0x%016llx IO_QUEUE_LRW\n", + queue->IO_QUEUE_CONFIG, + __genwqe_readq(cd, queue->IO_QUEUE_CONFIG), + queue->IO_QUEUE_STATUS, + __genwqe_readq(cd, queue->IO_QUEUE_STATUS), + queue->IO_QUEUE_SEGMENT, + __genwqe_readq(cd, queue->IO_QUEUE_SEGMENT), + queue->IO_QUEUE_INITSQN, + __genwqe_readq(cd, queue->IO_QUEUE_INITSQN), + queue->IO_QUEUE_WRAP, + __genwqe_readq(cd, queue->IO_QUEUE_WRAP), + queue->IO_QUEUE_OFFSET, + __genwqe_readq(cd, queue->IO_QUEUE_OFFSET), + queue->IO_QUEUE_WTIME, + __genwqe_readq(cd, queue->IO_QUEUE_WTIME), + queue->IO_QUEUE_ERRCNTS, + __genwqe_readq(cd, queue->IO_QUEUE_ERRCNTS), + queue->IO_QUEUE_LRW, + __genwqe_readq(cd, queue->IO_QUEUE_LRW)); + + seq_printf(s, "DDCB list (ddcb_act=%d/ddcb_next=%d):\n", + queue->ddcb_act, queue->ddcb_next); + + pddcb = queue->ddcb_vaddr; + for (i = 0; i < queue->ddcb_max; i++) { + seq_printf(s, " %-3d: RETC=%03x SEQ=%04x HSI/SHI=%02x/%02x ", + i, be16_to_cpu(pddcb->retc_16), + be16_to_cpu(pddcb->seqnum_16), + pddcb->hsi, pddcb->shi); + seq_printf(s, "PRIV=%06llx CMD=%02x\n", + be64_to_cpu(pddcb->priv_64), pddcb->cmd); + pddcb++; + } + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(ddcb_info); + +static int info_show(struct seq_file *s, void *unused) +{ + struct genwqe_dev *cd = s->private; + u64 app_id, slu_id, bitstream = -1; + struct pci_dev *pci_dev = cd->pci_dev; + + slu_id = __genwqe_readq(cd, IO_SLU_UNITCFG); + app_id = __genwqe_readq(cd, IO_APP_UNITCFG); + + if (genwqe_is_privileged(cd)) + bitstream = __genwqe_readq(cd, IO_SLU_BITSTREAM); + + seq_printf(s, "%s driver version: %s\n" + " Device Name/Type: %s %s CardIdx: %d\n" + " SLU/APP Config : 0x%016llx/0x%016llx\n" + " Build Date : %u/%x/%u\n" + " Base Clock : %u MHz\n" + " Arch/SVN Release: %u/%llx\n" + " Bitstream : %llx\n", + GENWQE_DEVNAME, DRV_VERSION, dev_name(&pci_dev->dev), + genwqe_is_privileged(cd) ? + "Physical" : "Virtual or no SR-IOV", + cd->card_idx, slu_id, app_id, + (u16)((slu_id >> 12) & 0x0fLLU), /* month */ + (u16)((slu_id >> 4) & 0xffLLU), /* day */ + (u16)((slu_id >> 16) & 0x0fLLU) + 2010, /* year */ + genwqe_base_clock_frequency(cd), + (u16)((slu_id >> 32) & 0xffLLU), slu_id >> 40, + bitstream); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(info); + +void genwqe_init_debugfs(struct genwqe_dev *cd) +{ + struct dentry *root; + char card_name[64]; + char name[64]; + unsigned int i; + + sprintf(card_name, "%s%d_card", GENWQE_DEVNAME, cd->card_idx); + + root = debugfs_create_dir(card_name, cd->debugfs_genwqe); + + /* non privileged interfaces are done here */ + debugfs_create_file("ddcb_info", S_IRUGO, root, cd, &ddcb_info_fops); + debugfs_create_file("info", S_IRUGO, root, cd, &info_fops); + debugfs_create_x64("err_inject", 0666, root, &cd->err_inject); + debugfs_create_u32("ddcb_software_timeout", 0666, root, + &cd->ddcb_software_timeout); + debugfs_create_u32("kill_timeout", 0666, root, &cd->kill_timeout); + + /* privileged interfaces follow here */ + if (!genwqe_is_privileged(cd)) { + cd->debugfs_root = root; + return; + } + + debugfs_create_file("curr_regs", S_IRUGO, root, cd, &curr_regs_fops); + debugfs_create_file("curr_dbg_uid0", S_IRUGO, root, cd, + &curr_dbg_uid0_fops); + debugfs_create_file("curr_dbg_uid1", S_IRUGO, root, cd, + &curr_dbg_uid1_fops); + debugfs_create_file("curr_dbg_uid2", S_IRUGO, root, cd, + &curr_dbg_uid2_fops); + debugfs_create_file("prev_regs", S_IRUGO, root, cd, &prev_regs_fops); + debugfs_create_file("prev_dbg_uid0", S_IRUGO, root, cd, + &prev_dbg_uid0_fops); + debugfs_create_file("prev_dbg_uid1", S_IRUGO, root, cd, + &prev_dbg_uid1_fops); + debugfs_create_file("prev_dbg_uid2", S_IRUGO, root, cd, + &prev_dbg_uid2_fops); + + for (i = 0; i < GENWQE_MAX_VFS; i++) { + sprintf(name, "vf%u_jobtimeout_msec", i); + debugfs_create_u32(name, 0666, root, + &cd->vf_jobtimeout_msec[i]); + } + + debugfs_create_file("jobtimer", S_IRUGO, root, cd, &jtimer_fops); + debugfs_create_file("queue_working_time", S_IRUGO, root, cd, + &queue_working_time_fops); + debugfs_create_u32("skip_recovery", 0666, root, &cd->skip_recovery); + debugfs_create_u32("use_platform_recovery", 0666, root, + &cd->use_platform_recovery); + + cd->debugfs_root = root; +} + +void genqwe_exit_debugfs(struct genwqe_dev *cd) +{ + debugfs_remove_recursive(cd->debugfs_root); +} diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c new file mode 100644 index 000000000..55fc5b80e --- /dev/null +++ b/drivers/misc/genwqe/card_dev.c @@ -0,0 +1,1391 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * IBM Accelerator Family 'GenWQE' + * + * (C) Copyright IBM Corp. 2013 + * + * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> + * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> + * Author: Michael Jung <mijung@gmx.net> + * Author: Michael Ruettger <michael@ibmra.de> + */ + +/* + * Character device representation of the GenWQE device. This allows + * user-space applications to communicate with the card. + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/string.h> +#include <linux/fs.h> +#include <linux/sched/signal.h> +#include <linux/wait.h> +#include <linux/delay.h> +#include <linux/atomic.h> + +#include "card_base.h" +#include "card_ddcb.h" + +static int genwqe_open_files(struct genwqe_dev *cd) +{ + int rc; + unsigned long flags; + + spin_lock_irqsave(&cd->file_lock, flags); + rc = list_empty(&cd->file_list); + spin_unlock_irqrestore(&cd->file_lock, flags); + return !rc; +} + +static void genwqe_add_file(struct genwqe_dev *cd, struct genwqe_file *cfile) +{ + unsigned long flags; + + cfile->opener = get_pid(task_tgid(current)); + spin_lock_irqsave(&cd->file_lock, flags); + list_add(&cfile->list, &cd->file_list); + spin_unlock_irqrestore(&cd->file_lock, flags); +} + +static int genwqe_del_file(struct genwqe_dev *cd, struct genwqe_file *cfile) +{ + unsigned long flags; + + spin_lock_irqsave(&cd->file_lock, flags); + list_del(&cfile->list); + spin_unlock_irqrestore(&cd->file_lock, flags); + put_pid(cfile->opener); + + return 0; +} + +static void genwqe_add_pin(struct genwqe_file *cfile, struct dma_mapping *m) +{ + unsigned long flags; + + spin_lock_irqsave(&cfile->pin_lock, flags); + list_add(&m->pin_list, &cfile->pin_list); + spin_unlock_irqrestore(&cfile->pin_lock, flags); +} + +static int genwqe_del_pin(struct genwqe_file *cfile, struct dma_mapping *m) +{ + unsigned long flags; + + spin_lock_irqsave(&cfile->pin_lock, flags); + list_del(&m->pin_list); + spin_unlock_irqrestore(&cfile->pin_lock, flags); + + return 0; +} + +/** + * genwqe_search_pin() - Search for the mapping for a userspace address + * @cfile: Descriptor of opened file + * @u_addr: User virtual address + * @size: Size of buffer + * @virt_addr: Virtual address to be updated + * + * Return: Pointer to the corresponding mapping NULL if not found + */ +static struct dma_mapping *genwqe_search_pin(struct genwqe_file *cfile, + unsigned long u_addr, + unsigned int size, + void **virt_addr) +{ + unsigned long flags; + struct dma_mapping *m; + + spin_lock_irqsave(&cfile->pin_lock, flags); + + list_for_each_entry(m, &cfile->pin_list, pin_list) { + if ((((u64)m->u_vaddr) <= (u_addr)) && + (((u64)m->u_vaddr + m->size) >= (u_addr + size))) { + + if (virt_addr) + *virt_addr = m->k_vaddr + + (u_addr - (u64)m->u_vaddr); + + spin_unlock_irqrestore(&cfile->pin_lock, flags); + return m; + } + } + spin_unlock_irqrestore(&cfile->pin_lock, flags); + return NULL; +} + +static void __genwqe_add_mapping(struct genwqe_file *cfile, + struct dma_mapping *dma_map) +{ + unsigned long flags; + + spin_lock_irqsave(&cfile->map_lock, flags); + list_add(&dma_map->card_list, &cfile->map_list); + spin_unlock_irqrestore(&cfile->map_lock, flags); +} + +static void __genwqe_del_mapping(struct genwqe_file *cfile, + struct dma_mapping *dma_map) +{ + unsigned long flags; + + spin_lock_irqsave(&cfile->map_lock, flags); + list_del(&dma_map->card_list); + spin_unlock_irqrestore(&cfile->map_lock, flags); +} + + +/** + * __genwqe_search_mapping() - Search for the mapping for a userspace address + * @cfile: descriptor of opened file + * @u_addr: user virtual address + * @size: size of buffer + * @dma_addr: DMA address to be updated + * @virt_addr: Virtual address to be updated + * Return: Pointer to the corresponding mapping NULL if not found + */ +static struct dma_mapping *__genwqe_search_mapping(struct genwqe_file *cfile, + unsigned long u_addr, + unsigned int size, + dma_addr_t *dma_addr, + void **virt_addr) +{ + unsigned long flags; + struct dma_mapping *m; + struct pci_dev *pci_dev = cfile->cd->pci_dev; + + spin_lock_irqsave(&cfile->map_lock, flags); + list_for_each_entry(m, &cfile->map_list, card_list) { + + if ((((u64)m->u_vaddr) <= (u_addr)) && + (((u64)m->u_vaddr + m->size) >= (u_addr + size))) { + + /* match found: current is as expected and + addr is in range */ + if (dma_addr) + *dma_addr = m->dma_addr + + (u_addr - (u64)m->u_vaddr); + + if (virt_addr) + *virt_addr = m->k_vaddr + + (u_addr - (u64)m->u_vaddr); + + spin_unlock_irqrestore(&cfile->map_lock, flags); + return m; + } + } + spin_unlock_irqrestore(&cfile->map_lock, flags); + + dev_err(&pci_dev->dev, + "[%s] Entry not found: u_addr=%lx, size=%x\n", + __func__, u_addr, size); + + return NULL; +} + +static void genwqe_remove_mappings(struct genwqe_file *cfile) +{ + int i = 0; + struct list_head *node, *next; + struct dma_mapping *dma_map; + struct genwqe_dev *cd = cfile->cd; + struct pci_dev *pci_dev = cfile->cd->pci_dev; + + list_for_each_safe(node, next, &cfile->map_list) { + dma_map = list_entry(node, struct dma_mapping, card_list); + + list_del_init(&dma_map->card_list); + + /* + * This is really a bug, because those things should + * have been already tidied up. + * + * GENWQE_MAPPING_RAW should have been removed via mmunmap(). + * GENWQE_MAPPING_SGL_TEMP should be removed by tidy up code. + */ + dev_err(&pci_dev->dev, + "[%s] %d. cleanup mapping: u_vaddr=%p u_kaddr=%016lx dma_addr=%lx\n", + __func__, i++, dma_map->u_vaddr, + (unsigned long)dma_map->k_vaddr, + (unsigned long)dma_map->dma_addr); + + if (dma_map->type == GENWQE_MAPPING_RAW) { + /* we allocated this dynamically */ + __genwqe_free_consistent(cd, dma_map->size, + dma_map->k_vaddr, + dma_map->dma_addr); + kfree(dma_map); + } else if (dma_map->type == GENWQE_MAPPING_SGL_TEMP) { + /* we use dma_map statically from the request */ + genwqe_user_vunmap(cd, dma_map); + } + } +} + +static void genwqe_remove_pinnings(struct genwqe_file *cfile) +{ + struct list_head *node, *next; + struct dma_mapping *dma_map; + struct genwqe_dev *cd = cfile->cd; + + list_for_each_safe(node, next, &cfile->pin_list) { + dma_map = list_entry(node, struct dma_mapping, pin_list); + + /* + * This is not a bug, because a killed processed might + * not call the unpin ioctl, which is supposed to free + * the resources. + * + * Pinnings are dymically allocated and need to be + * deleted. + */ + list_del_init(&dma_map->pin_list); + genwqe_user_vunmap(cd, dma_map); + kfree(dma_map); + } +} + +/** + * genwqe_kill_fasync() - Send signal to all processes with open GenWQE files + * @cd: GenWQE device information + * @sig: Signal to send out + * + * E.g. genwqe_send_signal(cd, SIGIO); + */ +static int genwqe_kill_fasync(struct genwqe_dev *cd, int sig) +{ + unsigned int files = 0; + unsigned long flags; + struct genwqe_file *cfile; + + spin_lock_irqsave(&cd->file_lock, flags); + list_for_each_entry(cfile, &cd->file_list, list) { + if (cfile->async_queue) + kill_fasync(&cfile->async_queue, sig, POLL_HUP); + files++; + } + spin_unlock_irqrestore(&cd->file_lock, flags); + return files; +} + +static int genwqe_terminate(struct genwqe_dev *cd) +{ + unsigned int files = 0; + unsigned long flags; + struct genwqe_file *cfile; + + spin_lock_irqsave(&cd->file_lock, flags); + list_for_each_entry(cfile, &cd->file_list, list) { + kill_pid(cfile->opener, SIGKILL, 1); + files++; + } + spin_unlock_irqrestore(&cd->file_lock, flags); + return files; +} + +/** + * genwqe_open() - file open + * @inode: file system information + * @filp: file handle + * + * This function is executed whenever an application calls + * open("/dev/genwqe",..). + * + * Return: 0 if successful or <0 if errors + */ +static int genwqe_open(struct inode *inode, struct file *filp) +{ + struct genwqe_dev *cd; + struct genwqe_file *cfile; + + cfile = kzalloc(sizeof(*cfile), GFP_KERNEL); + if (cfile == NULL) + return -ENOMEM; + + cd = container_of(inode->i_cdev, struct genwqe_dev, cdev_genwqe); + cfile->cd = cd; + cfile->filp = filp; + cfile->client = NULL; + + spin_lock_init(&cfile->map_lock); /* list of raw memory allocations */ + INIT_LIST_HEAD(&cfile->map_list); + + spin_lock_init(&cfile->pin_lock); /* list of user pinned memory */ + INIT_LIST_HEAD(&cfile->pin_list); + + filp->private_data = cfile; + + genwqe_add_file(cd, cfile); + return 0; +} + +/** + * genwqe_fasync() - Setup process to receive SIGIO. + * @fd: file descriptor + * @filp: file handle + * @mode: file mode + * + * Sending a signal is working as following: + * + * if (cdev->async_queue) + * kill_fasync(&cdev->async_queue, SIGIO, POLL_IN); + * + * Some devices also implement asynchronous notification to indicate + * when the device can be written; in this case, of course, + * kill_fasync must be called with a mode of POLL_OUT. + */ +static int genwqe_fasync(int fd, struct file *filp, int mode) +{ + struct genwqe_file *cdev = (struct genwqe_file *)filp->private_data; + + return fasync_helper(fd, filp, mode, &cdev->async_queue); +} + + +/** + * genwqe_release() - file close + * @inode: file system information + * @filp: file handle + * + * This function is executed whenever an application calls 'close(fd_genwqe)' + * + * Return: always 0 + */ +static int genwqe_release(struct inode *inode, struct file *filp) +{ + struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data; + struct genwqe_dev *cd = cfile->cd; + + /* there must be no entries in these lists! */ + genwqe_remove_mappings(cfile); + genwqe_remove_pinnings(cfile); + + /* remove this filp from the asynchronously notified filp's */ + genwqe_fasync(-1, filp, 0); + + /* + * For this to work we must not release cd when this cfile is + * not yet released, otherwise the list entry is invalid, + * because the list itself gets reinstantiated! + */ + genwqe_del_file(cd, cfile); + kfree(cfile); + return 0; +} + +static void genwqe_vma_open(struct vm_area_struct *vma) +{ + /* nothing ... */ +} + +/** + * genwqe_vma_close() - Called each time when vma is unmapped + * @vma: VMA area to close + * + * Free memory which got allocated by GenWQE mmap(). + */ +static void genwqe_vma_close(struct vm_area_struct *vma) +{ + unsigned long vsize = vma->vm_end - vma->vm_start; + struct inode *inode = file_inode(vma->vm_file); + struct dma_mapping *dma_map; + struct genwqe_dev *cd = container_of(inode->i_cdev, struct genwqe_dev, + cdev_genwqe); + struct pci_dev *pci_dev = cd->pci_dev; + dma_addr_t d_addr = 0; + struct genwqe_file *cfile = vma->vm_private_data; + + dma_map = __genwqe_search_mapping(cfile, vma->vm_start, vsize, + &d_addr, NULL); + if (dma_map == NULL) { + dev_err(&pci_dev->dev, + " [%s] err: mapping not found: v=%lx, p=%lx s=%lx\n", + __func__, vma->vm_start, vma->vm_pgoff << PAGE_SHIFT, + vsize); + return; + } + __genwqe_del_mapping(cfile, dma_map); + __genwqe_free_consistent(cd, dma_map->size, dma_map->k_vaddr, + dma_map->dma_addr); + kfree(dma_map); +} + +static const struct vm_operations_struct genwqe_vma_ops = { + .open = genwqe_vma_open, + .close = genwqe_vma_close, +}; + +/** + * genwqe_mmap() - Provide contignous buffers to userspace + * @filp: File pointer (unused) + * @vma: VMA area to map + * + * We use mmap() to allocate contignous buffers used for DMA + * transfers. After the buffer is allocated we remap it to user-space + * and remember a reference to our dma_mapping data structure, where + * we store the associated DMA address and allocated size. + * + * When we receive a DDCB execution request with the ATS bits set to + * plain buffer, we lookup our dma_mapping list to find the + * corresponding DMA address for the associated user-space address. + */ +static int genwqe_mmap(struct file *filp, struct vm_area_struct *vma) +{ + int rc; + unsigned long pfn, vsize = vma->vm_end - vma->vm_start; + struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data; + struct genwqe_dev *cd = cfile->cd; + struct dma_mapping *dma_map; + + if (vsize == 0) + return -EINVAL; + + if (get_order(vsize) > MAX_ORDER) + return -ENOMEM; + + dma_map = kzalloc(sizeof(struct dma_mapping), GFP_KERNEL); + if (dma_map == NULL) + return -ENOMEM; + + genwqe_mapping_init(dma_map, GENWQE_MAPPING_RAW); + dma_map->u_vaddr = (void *)vma->vm_start; + dma_map->size = vsize; + dma_map->nr_pages = DIV_ROUND_UP(vsize, PAGE_SIZE); + dma_map->k_vaddr = __genwqe_alloc_consistent(cd, vsize, + &dma_map->dma_addr); + if (dma_map->k_vaddr == NULL) { + rc = -ENOMEM; + goto free_dma_map; + } + + if (capable(CAP_SYS_ADMIN) && (vsize > sizeof(dma_addr_t))) + *(dma_addr_t *)dma_map->k_vaddr = dma_map->dma_addr; + + pfn = virt_to_phys(dma_map->k_vaddr) >> PAGE_SHIFT; + rc = remap_pfn_range(vma, + vma->vm_start, + pfn, + vsize, + vma->vm_page_prot); + if (rc != 0) { + rc = -EFAULT; + goto free_dma_mem; + } + + vma->vm_private_data = cfile; + vma->vm_ops = &genwqe_vma_ops; + __genwqe_add_mapping(cfile, dma_map); + + return 0; + + free_dma_mem: + __genwqe_free_consistent(cd, dma_map->size, + dma_map->k_vaddr, + dma_map->dma_addr); + free_dma_map: + kfree(dma_map); + return rc; +} + +#define FLASH_BLOCK 0x40000 /* we use 256k blocks */ + +/** + * do_flash_update() - Excute flash update (write image or CVPD) + * @cfile: Descriptor of opened file + * @load: details about image load + * + * Return: 0 if successful + */ +static int do_flash_update(struct genwqe_file *cfile, + struct genwqe_bitstream *load) +{ + int rc = 0; + int blocks_to_flash; + dma_addr_t dma_addr; + u64 flash = 0; + size_t tocopy = 0; + u8 __user *buf; + u8 *xbuf; + u32 crc; + u8 cmdopts; + struct genwqe_dev *cd = cfile->cd; + struct file *filp = cfile->filp; + struct pci_dev *pci_dev = cd->pci_dev; + + if ((load->size & 0x3) != 0) + return -EINVAL; + + if (((unsigned long)(load->data_addr) & ~PAGE_MASK) != 0) + return -EINVAL; + + /* FIXME Bits have changed for new service layer! */ + switch ((char)load->partition) { + case '0': + cmdopts = 0x14; + break; /* download/erase_first/part_0 */ + case '1': + cmdopts = 0x1C; + break; /* download/erase_first/part_1 */ + case 'v': + cmdopts = 0x0C; + break; /* download/erase_first/vpd */ + default: + return -EINVAL; + } + + buf = (u8 __user *)load->data_addr; + xbuf = __genwqe_alloc_consistent(cd, FLASH_BLOCK, &dma_addr); + if (xbuf == NULL) + return -ENOMEM; + + blocks_to_flash = load->size / FLASH_BLOCK; + while (load->size) { + struct genwqe_ddcb_cmd *req; + + /* + * We must be 4 byte aligned. Buffer must be 0 appened + * to have defined values when calculating CRC. + */ + tocopy = min_t(size_t, load->size, FLASH_BLOCK); + + rc = copy_from_user(xbuf, buf, tocopy); + if (rc) { + rc = -EFAULT; + goto free_buffer; + } + crc = genwqe_crc32(xbuf, tocopy, 0xffffffff); + + dev_dbg(&pci_dev->dev, + "[%s] DMA: %lx CRC: %08x SZ: %ld %d\n", + __func__, (unsigned long)dma_addr, crc, tocopy, + blocks_to_flash); + + /* prepare DDCB for SLU process */ + req = ddcb_requ_alloc(); + if (req == NULL) { + rc = -ENOMEM; + goto free_buffer; + } + + req->cmd = SLCMD_MOVE_FLASH; + req->cmdopts = cmdopts; + + /* prepare invariant values */ + if (genwqe_get_slu_id(cd) <= 0x2) { + *(__be64 *)&req->__asiv[0] = cpu_to_be64(dma_addr); + *(__be64 *)&req->__asiv[8] = cpu_to_be64(tocopy); + *(__be64 *)&req->__asiv[16] = cpu_to_be64(flash); + *(__be32 *)&req->__asiv[24] = cpu_to_be32(0); + req->__asiv[24] = load->uid; + *(__be32 *)&req->__asiv[28] = cpu_to_be32(crc); + + /* for simulation only */ + *(__be64 *)&req->__asiv[88] = cpu_to_be64(load->slu_id); + *(__be64 *)&req->__asiv[96] = cpu_to_be64(load->app_id); + req->asiv_length = 32; /* bytes included in crc calc */ + } else { /* setup DDCB for ATS architecture */ + *(__be64 *)&req->asiv[0] = cpu_to_be64(dma_addr); + *(__be32 *)&req->asiv[8] = cpu_to_be32(tocopy); + *(__be32 *)&req->asiv[12] = cpu_to_be32(0); /* resvd */ + *(__be64 *)&req->asiv[16] = cpu_to_be64(flash); + *(__be32 *)&req->asiv[24] = cpu_to_be32(load->uid<<24); + *(__be32 *)&req->asiv[28] = cpu_to_be32(crc); + + /* for simulation only */ + *(__be64 *)&req->asiv[80] = cpu_to_be64(load->slu_id); + *(__be64 *)&req->asiv[88] = cpu_to_be64(load->app_id); + + /* Rd only */ + req->ats = 0x4ULL << 44; + req->asiv_length = 40; /* bytes included in crc calc */ + } + req->asv_length = 8; + + /* For Genwqe5 we get back the calculated CRC */ + *(u64 *)&req->asv[0] = 0ULL; /* 0x80 */ + + rc = __genwqe_execute_raw_ddcb(cd, req, filp->f_flags); + + load->retc = req->retc; + load->attn = req->attn; + load->progress = req->progress; + + if (rc < 0) { + ddcb_requ_free(req); + goto free_buffer; + } + + if (req->retc != DDCB_RETC_COMPLETE) { + rc = -EIO; + ddcb_requ_free(req); + goto free_buffer; + } + + load->size -= tocopy; + flash += tocopy; + buf += tocopy; + blocks_to_flash--; + ddcb_requ_free(req); + } + + free_buffer: + __genwqe_free_consistent(cd, FLASH_BLOCK, xbuf, dma_addr); + return rc; +} + +static int do_flash_read(struct genwqe_file *cfile, + struct genwqe_bitstream *load) +{ + int rc, blocks_to_flash; + dma_addr_t dma_addr; + u64 flash = 0; + size_t tocopy = 0; + u8 __user *buf; + u8 *xbuf; + u8 cmdopts; + struct genwqe_dev *cd = cfile->cd; + struct file *filp = cfile->filp; + struct pci_dev *pci_dev = cd->pci_dev; + struct genwqe_ddcb_cmd *cmd; + + if ((load->size & 0x3) != 0) + return -EINVAL; + + if (((unsigned long)(load->data_addr) & ~PAGE_MASK) != 0) + return -EINVAL; + + /* FIXME Bits have changed for new service layer! */ + switch ((char)load->partition) { + case '0': + cmdopts = 0x12; + break; /* upload/part_0 */ + case '1': + cmdopts = 0x1A; + break; /* upload/part_1 */ + case 'v': + cmdopts = 0x0A; + break; /* upload/vpd */ + default: + return -EINVAL; + } + + buf = (u8 __user *)load->data_addr; + xbuf = __genwqe_alloc_consistent(cd, FLASH_BLOCK, &dma_addr); + if (xbuf == NULL) + return -ENOMEM; + + blocks_to_flash = load->size / FLASH_BLOCK; + while (load->size) { + /* + * We must be 4 byte aligned. Buffer must be 0 appened + * to have defined values when calculating CRC. + */ + tocopy = min_t(size_t, load->size, FLASH_BLOCK); + + dev_dbg(&pci_dev->dev, + "[%s] DMA: %lx SZ: %ld %d\n", + __func__, (unsigned long)dma_addr, tocopy, + blocks_to_flash); + + /* prepare DDCB for SLU process */ + cmd = ddcb_requ_alloc(); + if (cmd == NULL) { + rc = -ENOMEM; + goto free_buffer; + } + cmd->cmd = SLCMD_MOVE_FLASH; + cmd->cmdopts = cmdopts; + + /* prepare invariant values */ + if (genwqe_get_slu_id(cd) <= 0x2) { + *(__be64 *)&cmd->__asiv[0] = cpu_to_be64(dma_addr); + *(__be64 *)&cmd->__asiv[8] = cpu_to_be64(tocopy); + *(__be64 *)&cmd->__asiv[16] = cpu_to_be64(flash); + *(__be32 *)&cmd->__asiv[24] = cpu_to_be32(0); + cmd->__asiv[24] = load->uid; + *(__be32 *)&cmd->__asiv[28] = cpu_to_be32(0) /* CRC */; + cmd->asiv_length = 32; /* bytes included in crc calc */ + } else { /* setup DDCB for ATS architecture */ + *(__be64 *)&cmd->asiv[0] = cpu_to_be64(dma_addr); + *(__be32 *)&cmd->asiv[8] = cpu_to_be32(tocopy); + *(__be32 *)&cmd->asiv[12] = cpu_to_be32(0); /* resvd */ + *(__be64 *)&cmd->asiv[16] = cpu_to_be64(flash); + *(__be32 *)&cmd->asiv[24] = cpu_to_be32(load->uid<<24); + *(__be32 *)&cmd->asiv[28] = cpu_to_be32(0); /* CRC */ + + /* rd/wr */ + cmd->ats = 0x5ULL << 44; + cmd->asiv_length = 40; /* bytes included in crc calc */ + } + cmd->asv_length = 8; + + /* we only get back the calculated CRC */ + *(u64 *)&cmd->asv[0] = 0ULL; /* 0x80 */ + + rc = __genwqe_execute_raw_ddcb(cd, cmd, filp->f_flags); + + load->retc = cmd->retc; + load->attn = cmd->attn; + load->progress = cmd->progress; + + if ((rc < 0) && (rc != -EBADMSG)) { + ddcb_requ_free(cmd); + goto free_buffer; + } + + rc = copy_to_user(buf, xbuf, tocopy); + if (rc) { + rc = -EFAULT; + ddcb_requ_free(cmd); + goto free_buffer; + } + + /* We know that we can get retc 0x104 with CRC err */ + if (((cmd->retc == DDCB_RETC_FAULT) && + (cmd->attn != 0x02)) || /* Normally ignore CRC error */ + ((cmd->retc == DDCB_RETC_COMPLETE) && + (cmd->attn != 0x00))) { /* Everything was fine */ + rc = -EIO; + ddcb_requ_free(cmd); + goto free_buffer; + } + + load->size -= tocopy; + flash += tocopy; + buf += tocopy; + blocks_to_flash--; + ddcb_requ_free(cmd); + } + rc = 0; + + free_buffer: + __genwqe_free_consistent(cd, FLASH_BLOCK, xbuf, dma_addr); + return rc; +} + +static int genwqe_pin_mem(struct genwqe_file *cfile, struct genwqe_mem *m) +{ + int rc; + struct genwqe_dev *cd = cfile->cd; + struct pci_dev *pci_dev = cfile->cd->pci_dev; + struct dma_mapping *dma_map; + unsigned long map_addr; + unsigned long map_size; + + if ((m->addr == 0x0) || (m->size == 0)) + return -EINVAL; + if (m->size > ULONG_MAX - PAGE_SIZE - (m->addr & ~PAGE_MASK)) + return -EINVAL; + + map_addr = (m->addr & PAGE_MASK); + map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE); + + dma_map = kzalloc(sizeof(struct dma_mapping), GFP_KERNEL); + if (dma_map == NULL) + return -ENOMEM; + + genwqe_mapping_init(dma_map, GENWQE_MAPPING_SGL_PINNED); + rc = genwqe_user_vmap(cd, dma_map, (void *)map_addr, map_size); + if (rc != 0) { + dev_err(&pci_dev->dev, + "[%s] genwqe_user_vmap rc=%d\n", __func__, rc); + kfree(dma_map); + return rc; + } + + genwqe_add_pin(cfile, dma_map); + return 0; +} + +static int genwqe_unpin_mem(struct genwqe_file *cfile, struct genwqe_mem *m) +{ + struct genwqe_dev *cd = cfile->cd; + struct dma_mapping *dma_map; + unsigned long map_addr; + unsigned long map_size; + + if (m->addr == 0x0) + return -EINVAL; + + map_addr = (m->addr & PAGE_MASK); + map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE); + + dma_map = genwqe_search_pin(cfile, map_addr, map_size, NULL); + if (dma_map == NULL) + return -ENOENT; + + genwqe_del_pin(cfile, dma_map); + genwqe_user_vunmap(cd, dma_map); + kfree(dma_map); + return 0; +} + +/** + * ddcb_cmd_cleanup() - Remove dynamically created fixup entries + * @cfile: Descriptor of opened file + * @req: DDCB work request + * + * Only if there are any. Pinnings are not removed. + */ +static int ddcb_cmd_cleanup(struct genwqe_file *cfile, struct ddcb_requ *req) +{ + unsigned int i; + struct dma_mapping *dma_map; + struct genwqe_dev *cd = cfile->cd; + + for (i = 0; i < DDCB_FIXUPS; i++) { + dma_map = &req->dma_mappings[i]; + + if (dma_mapping_used(dma_map)) { + __genwqe_del_mapping(cfile, dma_map); + genwqe_user_vunmap(cd, dma_map); + } + if (req->sgls[i].sgl != NULL) + genwqe_free_sync_sgl(cd, &req->sgls[i]); + } + return 0; +} + +/** + * ddcb_cmd_fixups() - Establish DMA fixups/sglists for user memory references + * @cfile: Descriptor of opened file + * @req: DDCB work request + * + * Before the DDCB gets executed we need to handle the fixups. We + * replace the user-space addresses with DMA addresses or do + * additional setup work e.g. generating a scatter-gather list which + * is used to describe the memory referred to in the fixup. + */ +static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req) +{ + int rc; + unsigned int asiv_offs, i; + struct genwqe_dev *cd = cfile->cd; + struct genwqe_ddcb_cmd *cmd = &req->cmd; + struct dma_mapping *m; + + for (i = 0, asiv_offs = 0x00; asiv_offs <= 0x58; + i++, asiv_offs += 0x08) { + + u64 u_addr; + dma_addr_t d_addr; + u32 u_size = 0; + u64 ats_flags; + + ats_flags = ATS_GET_FLAGS(cmd->ats, asiv_offs); + + switch (ats_flags) { + + case ATS_TYPE_DATA: + break; /* nothing to do here */ + + case ATS_TYPE_FLAT_RDWR: + case ATS_TYPE_FLAT_RD: { + u_addr = be64_to_cpu(*((__be64 *)&cmd-> + asiv[asiv_offs])); + u_size = be32_to_cpu(*((__be32 *)&cmd-> + asiv[asiv_offs + 0x08])); + + /* + * No data available. Ignore u_addr in this + * case and set addr to 0. Hardware must not + * fetch the buffer. + */ + if (u_size == 0x0) { + *((__be64 *)&cmd->asiv[asiv_offs]) = + cpu_to_be64(0x0); + break; + } + + m = __genwqe_search_mapping(cfile, u_addr, u_size, + &d_addr, NULL); + if (m == NULL) { + rc = -EFAULT; + goto err_out; + } + + *((__be64 *)&cmd->asiv[asiv_offs]) = + cpu_to_be64(d_addr); + break; + } + + case ATS_TYPE_SGL_RDWR: + case ATS_TYPE_SGL_RD: { + int page_offs; + + u_addr = be64_to_cpu(*((__be64 *) + &cmd->asiv[asiv_offs])); + u_size = be32_to_cpu(*((__be32 *) + &cmd->asiv[asiv_offs + 0x08])); + + /* + * No data available. Ignore u_addr in this + * case and set addr to 0. Hardware must not + * fetch the empty sgl. + */ + if (u_size == 0x0) { + *((__be64 *)&cmd->asiv[asiv_offs]) = + cpu_to_be64(0x0); + break; + } + + m = genwqe_search_pin(cfile, u_addr, u_size, NULL); + if (m != NULL) { + page_offs = (u_addr - + (u64)m->u_vaddr)/PAGE_SIZE; + } else { + m = &req->dma_mappings[i]; + + genwqe_mapping_init(m, + GENWQE_MAPPING_SGL_TEMP); + + if (ats_flags == ATS_TYPE_SGL_RD) + m->write = 0; + + rc = genwqe_user_vmap(cd, m, (void *)u_addr, + u_size); + if (rc != 0) + goto err_out; + + __genwqe_add_mapping(cfile, m); + page_offs = 0; + } + + /* create genwqe style scatter gather list */ + rc = genwqe_alloc_sync_sgl(cd, &req->sgls[i], + (void __user *)u_addr, + u_size, m->write); + if (rc != 0) + goto err_out; + + genwqe_setup_sgl(cd, &req->sgls[i], + &m->dma_list[page_offs]); + + *((__be64 *)&cmd->asiv[asiv_offs]) = + cpu_to_be64(req->sgls[i].sgl_dma_addr); + + break; + } + default: + rc = -EINVAL; + goto err_out; + } + } + return 0; + + err_out: + ddcb_cmd_cleanup(cfile, req); + return rc; +} + +/** + * genwqe_execute_ddcb() - Execute DDCB using userspace address fixups + * @cfile: Descriptor of opened file + * @cmd: Command identifier (passed from user) + * + * The code will build up the translation tables or lookup the + * contignous memory allocation table to find the right translations + * and DMA addresses. + */ +static int genwqe_execute_ddcb(struct genwqe_file *cfile, + struct genwqe_ddcb_cmd *cmd) +{ + int rc; + struct genwqe_dev *cd = cfile->cd; + struct file *filp = cfile->filp; + struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd); + + rc = ddcb_cmd_fixups(cfile, req); + if (rc != 0) + return rc; + + rc = __genwqe_execute_raw_ddcb(cd, cmd, filp->f_flags); + ddcb_cmd_cleanup(cfile, req); + return rc; +} + +static int do_execute_ddcb(struct genwqe_file *cfile, + unsigned long arg, int raw) +{ + int rc; + struct genwqe_ddcb_cmd *cmd; + struct genwqe_dev *cd = cfile->cd; + struct file *filp = cfile->filp; + + cmd = ddcb_requ_alloc(); + if (cmd == NULL) + return -ENOMEM; + + if (copy_from_user(cmd, (void __user *)arg, sizeof(*cmd))) { + ddcb_requ_free(cmd); + return -EFAULT; + } + + if (!raw) + rc = genwqe_execute_ddcb(cfile, cmd); + else + rc = __genwqe_execute_raw_ddcb(cd, cmd, filp->f_flags); + + /* Copy back only the modifed fields. Do not copy ASIV + back since the copy got modified by the driver. */ + if (copy_to_user((void __user *)arg, cmd, + sizeof(*cmd) - DDCB_ASIV_LENGTH)) { + ddcb_requ_free(cmd); + return -EFAULT; + } + + ddcb_requ_free(cmd); + return rc; +} + +/** + * genwqe_ioctl() - IO control + * @filp: file handle + * @cmd: command identifier (passed from user) + * @arg: argument (passed from user) + * + * Return: 0 success + */ +static long genwqe_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + int rc = 0; + struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data; + struct genwqe_dev *cd = cfile->cd; + struct pci_dev *pci_dev = cd->pci_dev; + struct genwqe_reg_io __user *io; + u64 val; + u32 reg_offs; + + /* Return -EIO if card hit EEH */ + if (pci_channel_offline(pci_dev)) + return -EIO; + + if (_IOC_TYPE(cmd) != GENWQE_IOC_CODE) + return -EINVAL; + + switch (cmd) { + + case GENWQE_GET_CARD_STATE: + put_user(cd->card_state, (enum genwqe_card_state __user *)arg); + return 0; + + /* Register access */ + case GENWQE_READ_REG64: { + io = (struct genwqe_reg_io __user *)arg; + + if (get_user(reg_offs, &io->num)) + return -EFAULT; + + if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x7)) + return -EINVAL; + + val = __genwqe_readq(cd, reg_offs); + put_user(val, &io->val64); + return 0; + } + + case GENWQE_WRITE_REG64: { + io = (struct genwqe_reg_io __user *)arg; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if ((filp->f_flags & O_ACCMODE) == O_RDONLY) + return -EPERM; + + if (get_user(reg_offs, &io->num)) + return -EFAULT; + + if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x7)) + return -EINVAL; + + if (get_user(val, &io->val64)) + return -EFAULT; + + __genwqe_writeq(cd, reg_offs, val); + return 0; + } + + case GENWQE_READ_REG32: { + io = (struct genwqe_reg_io __user *)arg; + + if (get_user(reg_offs, &io->num)) + return -EFAULT; + + if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x3)) + return -EINVAL; + + val = __genwqe_readl(cd, reg_offs); + put_user(val, &io->val64); + return 0; + } + + case GENWQE_WRITE_REG32: { + io = (struct genwqe_reg_io __user *)arg; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if ((filp->f_flags & O_ACCMODE) == O_RDONLY) + return -EPERM; + + if (get_user(reg_offs, &io->num)) + return -EFAULT; + + if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x3)) + return -EINVAL; + + if (get_user(val, &io->val64)) + return -EFAULT; + + __genwqe_writel(cd, reg_offs, val); + return 0; + } + + /* Flash update/reading */ + case GENWQE_SLU_UPDATE: { + struct genwqe_bitstream load; + + if (!genwqe_is_privileged(cd)) + return -EPERM; + + if ((filp->f_flags & O_ACCMODE) == O_RDONLY) + return -EPERM; + + if (copy_from_user(&load, (void __user *)arg, + sizeof(load))) + return -EFAULT; + + rc = do_flash_update(cfile, &load); + + if (copy_to_user((void __user *)arg, &load, sizeof(load))) + return -EFAULT; + + return rc; + } + + case GENWQE_SLU_READ: { + struct genwqe_bitstream load; + + if (!genwqe_is_privileged(cd)) + return -EPERM; + + if (genwqe_flash_readback_fails(cd)) + return -ENOSPC; /* known to fail for old versions */ + + if (copy_from_user(&load, (void __user *)arg, sizeof(load))) + return -EFAULT; + + rc = do_flash_read(cfile, &load); + + if (copy_to_user((void __user *)arg, &load, sizeof(load))) + return -EFAULT; + + return rc; + } + + /* memory pinning and unpinning */ + case GENWQE_PIN_MEM: { + struct genwqe_mem m; + + if (copy_from_user(&m, (void __user *)arg, sizeof(m))) + return -EFAULT; + + return genwqe_pin_mem(cfile, &m); + } + + case GENWQE_UNPIN_MEM: { + struct genwqe_mem m; + + if (copy_from_user(&m, (void __user *)arg, sizeof(m))) + return -EFAULT; + + return genwqe_unpin_mem(cfile, &m); + } + + /* launch an DDCB and wait for completion */ + case GENWQE_EXECUTE_DDCB: + return do_execute_ddcb(cfile, arg, 0); + + case GENWQE_EXECUTE_RAW_DDCB: { + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + return do_execute_ddcb(cfile, arg, 1); + } + + default: + return -EINVAL; + } + + return rc; +} + +static const struct file_operations genwqe_fops = { + .owner = THIS_MODULE, + .open = genwqe_open, + .fasync = genwqe_fasync, + .mmap = genwqe_mmap, + .unlocked_ioctl = genwqe_ioctl, + .compat_ioctl = compat_ptr_ioctl, + .release = genwqe_release, +}; + +static int genwqe_device_initialized(struct genwqe_dev *cd) +{ + return cd->dev != NULL; +} + +/** + * genwqe_device_create() - Create and configure genwqe char device + * @cd: genwqe device descriptor + * + * This function must be called before we create any more genwqe + * character devices, because it is allocating the major and minor + * number which are supposed to be used by the client drivers. + */ +int genwqe_device_create(struct genwqe_dev *cd) +{ + int rc; + struct pci_dev *pci_dev = cd->pci_dev; + + /* + * Here starts the individual setup per client. It must + * initialize its own cdev data structure with its own fops. + * The appropriate devnum needs to be created. The ranges must + * not overlap. + */ + rc = alloc_chrdev_region(&cd->devnum_genwqe, 0, + GENWQE_MAX_MINOR, GENWQE_DEVNAME); + if (rc < 0) { + dev_err(&pci_dev->dev, "err: alloc_chrdev_region failed\n"); + goto err_dev; + } + + cdev_init(&cd->cdev_genwqe, &genwqe_fops); + cd->cdev_genwqe.owner = THIS_MODULE; + + rc = cdev_add(&cd->cdev_genwqe, cd->devnum_genwqe, 1); + if (rc < 0) { + dev_err(&pci_dev->dev, "err: cdev_add failed\n"); + goto err_add; + } + + /* + * Finally the device in /dev/... must be created. The rule is + * to use card%d_clientname for each created device. + */ + cd->dev = device_create_with_groups(cd->class_genwqe, + &cd->pci_dev->dev, + cd->devnum_genwqe, cd, + genwqe_attribute_groups, + GENWQE_DEVNAME "%u_card", + cd->card_idx); + if (IS_ERR(cd->dev)) { + rc = PTR_ERR(cd->dev); + goto err_cdev; + } + + genwqe_init_debugfs(cd); + + return 0; + + err_cdev: + cdev_del(&cd->cdev_genwqe); + err_add: + unregister_chrdev_region(cd->devnum_genwqe, GENWQE_MAX_MINOR); + err_dev: + cd->dev = NULL; + return rc; +} + +static int genwqe_inform_and_stop_processes(struct genwqe_dev *cd) +{ + int rc; + unsigned int i; + struct pci_dev *pci_dev = cd->pci_dev; + + if (!genwqe_open_files(cd)) + return 0; + + dev_warn(&pci_dev->dev, "[%s] send SIGIO and wait ...\n", __func__); + + rc = genwqe_kill_fasync(cd, SIGIO); + if (rc > 0) { + /* give kill_timeout seconds to close file descriptors ... */ + for (i = 0; (i < GENWQE_KILL_TIMEOUT) && + genwqe_open_files(cd); i++) { + dev_info(&pci_dev->dev, " %d sec ...", i); + + cond_resched(); + msleep(1000); + } + + /* if no open files we can safely continue, else ... */ + if (!genwqe_open_files(cd)) + return 0; + + dev_warn(&pci_dev->dev, + "[%s] send SIGKILL and wait ...\n", __func__); + + rc = genwqe_terminate(cd); + if (rc) { + /* Give kill_timout more seconds to end processes */ + for (i = 0; (i < GENWQE_KILL_TIMEOUT) && + genwqe_open_files(cd); i++) { + dev_warn(&pci_dev->dev, " %d sec ...", i); + + cond_resched(); + msleep(1000); + } + } + } + return 0; +} + +/** + * genwqe_device_remove() - Remove genwqe's char device + * @cd: GenWQE device information + * + * This function must be called after the client devices are removed + * because it will free the major/minor number range for the genwqe + * drivers. + * + * This function must be robust enough to be called twice. + */ +int genwqe_device_remove(struct genwqe_dev *cd) +{ + int rc; + struct pci_dev *pci_dev = cd->pci_dev; + + if (!genwqe_device_initialized(cd)) + return 1; + + genwqe_inform_and_stop_processes(cd); + + /* + * We currently do wait until all filedescriptors are + * closed. This leads to a problem when we abort the + * application which will decrease this reference from + * 1/unused to 0/illegal and not from 2/used 1/empty. + */ + rc = kref_read(&cd->cdev_genwqe.kobj.kref); + if (rc != 1) { + dev_err(&pci_dev->dev, + "[%s] err: cdev_genwqe...refcount=%d\n", __func__, rc); + panic("Fatal err: cannot free resources with pending references!"); + } + + genqwe_exit_debugfs(cd); + device_destroy(cd->class_genwqe, cd->devnum_genwqe); + cdev_del(&cd->cdev_genwqe); + unregister_chrdev_region(cd->devnum_genwqe, GENWQE_MAX_MINOR); + cd->dev = NULL; + + return 0; +} diff --git a/drivers/misc/genwqe/card_sysfs.c b/drivers/misc/genwqe/card_sysfs.c new file mode 100644 index 000000000..b2f115602 --- /dev/null +++ b/drivers/misc/genwqe/card_sysfs.c @@ -0,0 +1,295 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * IBM Accelerator Family 'GenWQE' + * + * (C) Copyright IBM Corp. 2013 + * + * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> + * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> + * Author: Michael Jung <mijung@gmx.net> + * Author: Michael Ruettger <michael@ibmra.de> + */ + +/* + * Sysfs interfaces for the GenWQE card. There are attributes to query + * the version of the bitstream as well as some for the driver. For + * debugging, please also see the debugfs interfaces of this driver. + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/string.h> +#include <linux/fs.h> +#include <linux/sysfs.h> +#include <linux/ctype.h> +#include <linux/device.h> + +#include "card_base.h" +#include "card_ddcb.h" + +static const char * const genwqe_types[] = { + [GENWQE_TYPE_ALTERA_230] = "GenWQE4-230", + [GENWQE_TYPE_ALTERA_530] = "GenWQE4-530", + [GENWQE_TYPE_ALTERA_A4] = "GenWQE5-A4", + [GENWQE_TYPE_ALTERA_A7] = "GenWQE5-A7", +}; + +static ssize_t status_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct genwqe_dev *cd = dev_get_drvdata(dev); + const char *cs[GENWQE_CARD_STATE_MAX] = { "unused", "used", "error" }; + + return sprintf(buf, "%s\n", cs[cd->card_state]); +} +static DEVICE_ATTR_RO(status); + +static ssize_t appid_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + char app_name[5]; + struct genwqe_dev *cd = dev_get_drvdata(dev); + + genwqe_read_app_id(cd, app_name, sizeof(app_name)); + return sprintf(buf, "%s\n", app_name); +} +static DEVICE_ATTR_RO(appid); + +static ssize_t version_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + u64 slu_id, app_id; + struct genwqe_dev *cd = dev_get_drvdata(dev); + + slu_id = __genwqe_readq(cd, IO_SLU_UNITCFG); + app_id = __genwqe_readq(cd, IO_APP_UNITCFG); + + return sprintf(buf, "%016llx.%016llx\n", slu_id, app_id); +} +static DEVICE_ATTR_RO(version); + +static ssize_t type_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + u8 card_type; + struct genwqe_dev *cd = dev_get_drvdata(dev); + + card_type = genwqe_card_type(cd); + return sprintf(buf, "%s\n", (card_type >= ARRAY_SIZE(genwqe_types)) ? + "invalid" : genwqe_types[card_type]); +} +static DEVICE_ATTR_RO(type); + +static ssize_t tempsens_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + u64 tempsens; + struct genwqe_dev *cd = dev_get_drvdata(dev); + + tempsens = __genwqe_readq(cd, IO_SLU_TEMPERATURE_SENSOR); + return sprintf(buf, "%016llx\n", tempsens); +} +static DEVICE_ATTR_RO(tempsens); + +static ssize_t freerunning_timer_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u64 t; + struct genwqe_dev *cd = dev_get_drvdata(dev); + + t = __genwqe_readq(cd, IO_SLC_FREE_RUNNING_TIMER); + return sprintf(buf, "%016llx\n", t); +} +static DEVICE_ATTR_RO(freerunning_timer); + +static ssize_t queue_working_time_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u64 t; + struct genwqe_dev *cd = dev_get_drvdata(dev); + + t = __genwqe_readq(cd, IO_SLC_QUEUE_WTIME); + return sprintf(buf, "%016llx\n", t); +} +static DEVICE_ATTR_RO(queue_working_time); + +static ssize_t base_clock_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u64 base_clock; + struct genwqe_dev *cd = dev_get_drvdata(dev); + + base_clock = genwqe_base_clock_frequency(cd); + return sprintf(buf, "%lld\n", base_clock); +} +static DEVICE_ATTR_RO(base_clock); + +/* + * curr_bitstream_show() - Show the current bitstream id + * + * There is a bug in some old versions of the CPLD which selects the + * bitstream, which causes the IO_SLU_BITSTREAM register to report + * unreliable data in very rare cases. This makes this sysfs + * unreliable up to the point were a new CPLD version is being used. + * + * Unfortunately there is no automatic way yet to query the CPLD + * version, such that you need to manually ensure via programming + * tools that you have a recent version of the CPLD software. + * + * The proposed circumvention is to use a special recovery bitstream + * on the backup partition (0) to identify problems while loading the + * image. + */ +static ssize_t curr_bitstream_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int curr_bitstream; + struct genwqe_dev *cd = dev_get_drvdata(dev); + + curr_bitstream = __genwqe_readq(cd, IO_SLU_BITSTREAM) & 0x1; + return sprintf(buf, "%d\n", curr_bitstream); +} +static DEVICE_ATTR_RO(curr_bitstream); + +/* + * next_bitstream_show() - Show the next activated bitstream + * + * IO_SLC_CFGREG_SOFTRESET: This register can only be accessed by the PF. + */ +static ssize_t next_bitstream_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int next_bitstream; + struct genwqe_dev *cd = dev_get_drvdata(dev); + + switch ((cd->softreset & 0xc) >> 2) { + case 0x2: + next_bitstream = 0; + break; + case 0x3: + next_bitstream = 1; + break; + default: + next_bitstream = -1; + break; /* error */ + } + return sprintf(buf, "%d\n", next_bitstream); +} + +static ssize_t next_bitstream_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int partition; + struct genwqe_dev *cd = dev_get_drvdata(dev); + + if (kstrtoint(buf, 0, &partition) < 0) + return -EINVAL; + + switch (partition) { + case 0x0: + cd->softreset = 0x78; + break; + case 0x1: + cd->softreset = 0x7c; + break; + default: + return -EINVAL; + } + + __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, cd->softreset); + return count; +} +static DEVICE_ATTR_RW(next_bitstream); + +static ssize_t reload_bitstream_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int reload; + struct genwqe_dev *cd = dev_get_drvdata(dev); + + if (kstrtoint(buf, 0, &reload) < 0) + return -EINVAL; + + if (reload == 0x1) { + if (cd->card_state == GENWQE_CARD_UNUSED || + cd->card_state == GENWQE_CARD_USED) + cd->card_state = GENWQE_CARD_RELOAD_BITSTREAM; + else + return -EIO; + } else { + return -EINVAL; + } + + return count; +} +static DEVICE_ATTR_WO(reload_bitstream); + +/* + * Create device_attribute structures / params: name, mode, show, store + * additional flag if valid in VF + */ +static struct attribute *genwqe_attributes[] = { + &dev_attr_tempsens.attr, + &dev_attr_next_bitstream.attr, + &dev_attr_curr_bitstream.attr, + &dev_attr_base_clock.attr, + &dev_attr_type.attr, + &dev_attr_version.attr, + &dev_attr_appid.attr, + &dev_attr_status.attr, + &dev_attr_freerunning_timer.attr, + &dev_attr_queue_working_time.attr, + &dev_attr_reload_bitstream.attr, + NULL, +}; + +static struct attribute *genwqe_normal_attributes[] = { + &dev_attr_type.attr, + &dev_attr_version.attr, + &dev_attr_appid.attr, + &dev_attr_status.attr, + &dev_attr_freerunning_timer.attr, + &dev_attr_queue_working_time.attr, + NULL, +}; + +/* + * genwqe_is_visible() - Determine if sysfs attribute should be visible or not + * + * VFs have restricted mmio capabilities, so not all sysfs entries + * are allowed in VFs. + */ +static umode_t genwqe_is_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + unsigned int j; + struct device *dev = kobj_to_dev(kobj); + struct genwqe_dev *cd = dev_get_drvdata(dev); + umode_t mode = attr->mode; + + if (genwqe_is_privileged(cd)) + return mode; + + for (j = 0; genwqe_normal_attributes[j] != NULL; j++) + if (genwqe_normal_attributes[j] == attr) + return mode; + + return 0; +} + +static struct attribute_group genwqe_attribute_group = { + .is_visible = genwqe_is_visible, + .attrs = genwqe_attributes, +}; + +const struct attribute_group *genwqe_attribute_groups[] = { + &genwqe_attribute_group, + NULL, +}; diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c new file mode 100644 index 000000000..039b923d1 --- /dev/null +++ b/drivers/misc/genwqe/card_utils.c @@ -0,0 +1,1049 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * IBM Accelerator Family 'GenWQE' + * + * (C) Copyright IBM Corp. 2013 + * + * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> + * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> + * Author: Michael Jung <mijung@gmx.net> + * Author: Michael Ruettger <michael@ibmra.de> + */ + +/* + * Miscelanous functionality used in the other GenWQE driver parts. + */ + +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/vmalloc.h> +#include <linux/page-flags.h> +#include <linux/scatterlist.h> +#include <linux/hugetlb.h> +#include <linux/iommu.h> +#include <linux/pci.h> +#include <linux/dma-mapping.h> +#include <linux/ctype.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/delay.h> +#include <linux/pgtable.h> + +#include "genwqe_driver.h" +#include "card_base.h" +#include "card_ddcb.h" + +/** + * __genwqe_writeq() - Write 64-bit register + * @cd: genwqe device descriptor + * @byte_offs: byte offset within BAR + * @val: 64-bit value + * + * Return: 0 if success; < 0 if error + */ +int __genwqe_writeq(struct genwqe_dev *cd, u64 byte_offs, u64 val) +{ + struct pci_dev *pci_dev = cd->pci_dev; + + if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE) + return -EIO; + + if (cd->mmio == NULL) + return -EIO; + + if (pci_channel_offline(pci_dev)) + return -EIO; + + __raw_writeq((__force u64)cpu_to_be64(val), cd->mmio + byte_offs); + return 0; +} + +/** + * __genwqe_readq() - Read 64-bit register + * @cd: genwqe device descriptor + * @byte_offs: offset within BAR + * + * Return: value from register + */ +u64 __genwqe_readq(struct genwqe_dev *cd, u64 byte_offs) +{ + if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE) + return 0xffffffffffffffffull; + + if ((cd->err_inject & GENWQE_INJECT_GFIR_FATAL) && + (byte_offs == IO_SLC_CFGREG_GFIR)) + return 0x000000000000ffffull; + + if ((cd->err_inject & GENWQE_INJECT_GFIR_INFO) && + (byte_offs == IO_SLC_CFGREG_GFIR)) + return 0x00000000ffff0000ull; + + if (cd->mmio == NULL) + return 0xffffffffffffffffull; + + return be64_to_cpu((__force __be64)__raw_readq(cd->mmio + byte_offs)); +} + +/** + * __genwqe_writel() - Write 32-bit register + * @cd: genwqe device descriptor + * @byte_offs: byte offset within BAR + * @val: 32-bit value + * + * Return: 0 if success; < 0 if error + */ +int __genwqe_writel(struct genwqe_dev *cd, u64 byte_offs, u32 val) +{ + struct pci_dev *pci_dev = cd->pci_dev; + + if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE) + return -EIO; + + if (cd->mmio == NULL) + return -EIO; + + if (pci_channel_offline(pci_dev)) + return -EIO; + + __raw_writel((__force u32)cpu_to_be32(val), cd->mmio + byte_offs); + return 0; +} + +/** + * __genwqe_readl() - Read 32-bit register + * @cd: genwqe device descriptor + * @byte_offs: offset within BAR + * + * Return: Value from register + */ +u32 __genwqe_readl(struct genwqe_dev *cd, u64 byte_offs) +{ + if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE) + return 0xffffffff; + + if (cd->mmio == NULL) + return 0xffffffff; + + return be32_to_cpu((__force __be32)__raw_readl(cd->mmio + byte_offs)); +} + +/** + * genwqe_read_app_id() - Extract app_id + * @cd: genwqe device descriptor + * @app_name: carrier used to pass-back name + * @len: length of data for name + * + * app_unitcfg need to be filled with valid data first + */ +int genwqe_read_app_id(struct genwqe_dev *cd, char *app_name, int len) +{ + int i, j; + u32 app_id = (u32)cd->app_unitcfg; + + memset(app_name, 0, len); + for (i = 0, j = 0; j < min(len, 4); j++) { + char ch = (char)((app_id >> (24 - j*8)) & 0xff); + + if (ch == ' ') + continue; + app_name[i++] = isprint(ch) ? ch : 'X'; + } + return i; +} + +/** + * genwqe_init_crc32() - Prepare a lookup table for fast crc32 calculations + * + * Existing kernel functions seem to use a different polynom, + * therefore we could not use them here. + * + * Genwqe's Polynomial = 0x20044009 + */ +#define CRC32_POLYNOMIAL 0x20044009 +static u32 crc32_tab[256]; /* crc32 lookup table */ + +void genwqe_init_crc32(void) +{ + int i, j; + u32 crc; + + for (i = 0; i < 256; i++) { + crc = i << 24; + for (j = 0; j < 8; j++) { + if (crc & 0x80000000) + crc = (crc << 1) ^ CRC32_POLYNOMIAL; + else + crc = (crc << 1); + } + crc32_tab[i] = crc; + } +} + +/** + * genwqe_crc32() - Generate 32-bit crc as required for DDCBs + * @buff: pointer to data buffer + * @len: length of data for calculation + * @init: initial crc (0xffffffff at start) + * + * polynomial = x^32 * + x^29 + x^18 + x^14 + x^3 + 1 (0x20044009) + * + * Example: 4 bytes 0x01 0x02 0x03 0x04 with init=0xffffffff should + * result in a crc32 of 0xf33cb7d3. + * + * The existing kernel crc functions did not cover this polynom yet. + * + * Return: crc32 checksum. + */ +u32 genwqe_crc32(u8 *buff, size_t len, u32 init) +{ + int i; + u32 crc; + + crc = init; + while (len--) { + i = ((crc >> 24) ^ *buff++) & 0xFF; + crc = (crc << 8) ^ crc32_tab[i]; + } + return crc; +} + +void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size, + dma_addr_t *dma_handle) +{ + if (get_order(size) >= MAX_ORDER) + return NULL; + + return dma_alloc_coherent(&cd->pci_dev->dev, size, dma_handle, + GFP_KERNEL); +} + +void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size, + void *vaddr, dma_addr_t dma_handle) +{ + if (vaddr == NULL) + return; + + dma_free_coherent(&cd->pci_dev->dev, size, vaddr, dma_handle); +} + +static void genwqe_unmap_pages(struct genwqe_dev *cd, dma_addr_t *dma_list, + int num_pages) +{ + int i; + struct pci_dev *pci_dev = cd->pci_dev; + + for (i = 0; (i < num_pages) && (dma_list[i] != 0x0); i++) { + pci_unmap_page(pci_dev, dma_list[i], + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + dma_list[i] = 0x0; + } +} + +static int genwqe_map_pages(struct genwqe_dev *cd, + struct page **page_list, int num_pages, + dma_addr_t *dma_list) +{ + int i; + struct pci_dev *pci_dev = cd->pci_dev; + + /* establish DMA mapping for requested pages */ + for (i = 0; i < num_pages; i++) { + dma_addr_t daddr; + + dma_list[i] = 0x0; + daddr = pci_map_page(pci_dev, page_list[i], + 0, /* map_offs */ + PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL); /* FIXME rd/rw */ + + if (pci_dma_mapping_error(pci_dev, daddr)) { + dev_err(&pci_dev->dev, + "[%s] err: no dma addr daddr=%016llx!\n", + __func__, (long long)daddr); + goto err; + } + + dma_list[i] = daddr; + } + return 0; + + err: + genwqe_unmap_pages(cd, dma_list, num_pages); + return -EIO; +} + +static int genwqe_sgl_size(int num_pages) +{ + int len, num_tlb = num_pages / 7; + + len = sizeof(struct sg_entry) * (num_pages+num_tlb + 1); + return roundup(len, PAGE_SIZE); +} + +/* + * genwqe_alloc_sync_sgl() - Allocate memory for sgl and overlapping pages + * + * Allocates memory for sgl and overlapping pages. Pages which might + * overlap other user-space memory blocks are being cached for DMAs, + * such that we do not run into syncronization issues. Data is copied + * from user-space into the cached pages. + */ +int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, + void __user *user_addr, size_t user_size, int write) +{ + int ret = -ENOMEM; + struct pci_dev *pci_dev = cd->pci_dev; + + sgl->fpage_offs = offset_in_page((unsigned long)user_addr); + sgl->fpage_size = min_t(size_t, PAGE_SIZE-sgl->fpage_offs, user_size); + sgl->nr_pages = DIV_ROUND_UP(sgl->fpage_offs + user_size, PAGE_SIZE); + sgl->lpage_size = (user_size - sgl->fpage_size) % PAGE_SIZE; + + dev_dbg(&pci_dev->dev, "[%s] uaddr=%p usize=%8ld nr_pages=%ld fpage_offs=%lx fpage_size=%ld lpage_size=%ld\n", + __func__, user_addr, user_size, sgl->nr_pages, + sgl->fpage_offs, sgl->fpage_size, sgl->lpage_size); + + sgl->user_addr = user_addr; + sgl->user_size = user_size; + sgl->write = write; + sgl->sgl_size = genwqe_sgl_size(sgl->nr_pages); + + if (get_order(sgl->sgl_size) > MAX_ORDER) { + dev_err(&pci_dev->dev, + "[%s] err: too much memory requested!\n", __func__); + return ret; + } + + sgl->sgl = __genwqe_alloc_consistent(cd, sgl->sgl_size, + &sgl->sgl_dma_addr); + if (sgl->sgl == NULL) { + dev_err(&pci_dev->dev, + "[%s] err: no memory available!\n", __func__); + return ret; + } + + /* Only use buffering on incomplete pages */ + if ((sgl->fpage_size != 0) && (sgl->fpage_size != PAGE_SIZE)) { + sgl->fpage = __genwqe_alloc_consistent(cd, PAGE_SIZE, + &sgl->fpage_dma_addr); + if (sgl->fpage == NULL) + goto err_out; + + /* Sync with user memory */ + if (copy_from_user(sgl->fpage + sgl->fpage_offs, + user_addr, sgl->fpage_size)) { + ret = -EFAULT; + goto err_out; + } + } + if (sgl->lpage_size != 0) { + sgl->lpage = __genwqe_alloc_consistent(cd, PAGE_SIZE, + &sgl->lpage_dma_addr); + if (sgl->lpage == NULL) + goto err_out1; + + /* Sync with user memory */ + if (copy_from_user(sgl->lpage, user_addr + user_size - + sgl->lpage_size, sgl->lpage_size)) { + ret = -EFAULT; + goto err_out2; + } + } + return 0; + + err_out2: + __genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage, + sgl->lpage_dma_addr); + sgl->lpage = NULL; + sgl->lpage_dma_addr = 0; + err_out1: + __genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage, + sgl->fpage_dma_addr); + sgl->fpage = NULL; + sgl->fpage_dma_addr = 0; + err_out: + __genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl, + sgl->sgl_dma_addr); + sgl->sgl = NULL; + sgl->sgl_dma_addr = 0; + sgl->sgl_size = 0; + + return ret; +} + +int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, + dma_addr_t *dma_list) +{ + int i = 0, j = 0, p; + unsigned long dma_offs, map_offs; + dma_addr_t prev_daddr = 0; + struct sg_entry *s, *last_s = NULL; + size_t size = sgl->user_size; + + dma_offs = 128; /* next block if needed/dma_offset */ + map_offs = sgl->fpage_offs; /* offset in first page */ + + s = &sgl->sgl[0]; /* first set of 8 entries */ + p = 0; /* page */ + while (p < sgl->nr_pages) { + dma_addr_t daddr; + unsigned int size_to_map; + + /* always write the chaining entry, cleanup is done later */ + j = 0; + s[j].target_addr = cpu_to_be64(sgl->sgl_dma_addr + dma_offs); + s[j].len = cpu_to_be32(128); + s[j].flags = cpu_to_be32(SG_CHAINED); + j++; + + while (j < 8) { + /* DMA mapping for requested page, offs, size */ + size_to_map = min(size, PAGE_SIZE - map_offs); + + if ((p == 0) && (sgl->fpage != NULL)) { + daddr = sgl->fpage_dma_addr + map_offs; + + } else if ((p == sgl->nr_pages - 1) && + (sgl->lpage != NULL)) { + daddr = sgl->lpage_dma_addr; + } else { + daddr = dma_list[p] + map_offs; + } + + size -= size_to_map; + map_offs = 0; + + if (prev_daddr == daddr) { + u32 prev_len = be32_to_cpu(last_s->len); + + /* pr_info("daddr combining: " + "%016llx/%08x -> %016llx\n", + prev_daddr, prev_len, daddr); */ + + last_s->len = cpu_to_be32(prev_len + + size_to_map); + + p++; /* process next page */ + if (p == sgl->nr_pages) + goto fixup; /* nothing to do */ + + prev_daddr = daddr + size_to_map; + continue; + } + + /* start new entry */ + s[j].target_addr = cpu_to_be64(daddr); + s[j].len = cpu_to_be32(size_to_map); + s[j].flags = cpu_to_be32(SG_DATA); + prev_daddr = daddr + size_to_map; + last_s = &s[j]; + j++; + + p++; /* process next page */ + if (p == sgl->nr_pages) + goto fixup; /* nothing to do */ + } + dma_offs += 128; + s += 8; /* continue 8 elements further */ + } + fixup: + if (j == 1) { /* combining happened on last entry! */ + s -= 8; /* full shift needed on previous sgl block */ + j = 7; /* shift all elements */ + } + + for (i = 0; i < j; i++) /* move elements 1 up */ + s[i] = s[i + 1]; + + s[i].target_addr = cpu_to_be64(0); + s[i].len = cpu_to_be32(0); + s[i].flags = cpu_to_be32(SG_END_LIST); + return 0; +} + +/** + * genwqe_free_sync_sgl() - Free memory for sgl and overlapping pages + * @cd: genwqe device descriptor + * @sgl: scatter gather list describing user-space memory + * + * After the DMA transfer has been completed we free the memory for + * the sgl and the cached pages. Data is being transferred from cached + * pages into user-space buffers. + */ +int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl) +{ + int rc = 0; + size_t offset; + unsigned long res; + struct pci_dev *pci_dev = cd->pci_dev; + + if (sgl->fpage) { + if (sgl->write) { + res = copy_to_user(sgl->user_addr, + sgl->fpage + sgl->fpage_offs, sgl->fpage_size); + if (res) { + dev_err(&pci_dev->dev, + "[%s] err: copying fpage! (res=%lu)\n", + __func__, res); + rc = -EFAULT; + } + } + __genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage, + sgl->fpage_dma_addr); + sgl->fpage = NULL; + sgl->fpage_dma_addr = 0; + } + if (sgl->lpage) { + if (sgl->write) { + offset = sgl->user_size - sgl->lpage_size; + res = copy_to_user(sgl->user_addr + offset, sgl->lpage, + sgl->lpage_size); + if (res) { + dev_err(&pci_dev->dev, + "[%s] err: copying lpage! (res=%lu)\n", + __func__, res); + rc = -EFAULT; + } + } + __genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage, + sgl->lpage_dma_addr); + sgl->lpage = NULL; + sgl->lpage_dma_addr = 0; + } + __genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl, + sgl->sgl_dma_addr); + + sgl->sgl = NULL; + sgl->sgl_dma_addr = 0x0; + sgl->sgl_size = 0; + return rc; +} + +/** + * genwqe_user_vmap() - Map user-space memory to virtual kernel memory + * @cd: pointer to genwqe device + * @m: mapping params + * @uaddr: user virtual address + * @size: size of memory to be mapped + * + * We need to think about how we could speed this up. Of course it is + * not a good idea to do this over and over again, like we are + * currently doing it. Nevertheless, I am curious where on the path + * the performance is spend. Most probably within the memory + * allocation functions, but maybe also in the DMA mapping code. + * + * Restrictions: The maximum size of the possible mapping currently depends + * on the amount of memory we can get using kzalloc() for the + * page_list and pci_alloc_consistent for the sg_list. + * The sg_list is currently itself not scattered, which could + * be fixed with some effort. The page_list must be split into + * PAGE_SIZE chunks too. All that will make the complicated + * code more complicated. + * + * Return: 0 if success + */ +int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr, + unsigned long size) +{ + int rc = -EINVAL; + unsigned long data, offs; + struct pci_dev *pci_dev = cd->pci_dev; + + if ((uaddr == NULL) || (size == 0)) { + m->size = 0; /* mark unused and not added */ + return -EINVAL; + } + m->u_vaddr = uaddr; + m->size = size; + + /* determine space needed for page_list. */ + data = (unsigned long)uaddr; + offs = offset_in_page(data); + if (size > ULONG_MAX - PAGE_SIZE - offs) { + m->size = 0; /* mark unused and not added */ + return -EINVAL; + } + m->nr_pages = DIV_ROUND_UP(offs + size, PAGE_SIZE); + + m->page_list = kcalloc(m->nr_pages, + sizeof(struct page *) + sizeof(dma_addr_t), + GFP_KERNEL); + if (!m->page_list) { + dev_err(&pci_dev->dev, "err: alloc page_list failed\n"); + m->nr_pages = 0; + m->u_vaddr = NULL; + m->size = 0; /* mark unused and not added */ + return -ENOMEM; + } + m->dma_list = (dma_addr_t *)(m->page_list + m->nr_pages); + + /* pin user pages in memory */ + rc = pin_user_pages_fast(data & PAGE_MASK, /* page aligned addr */ + m->nr_pages, + m->write ? FOLL_WRITE : 0, /* readable/writable */ + m->page_list); /* ptrs to pages */ + if (rc < 0) + goto fail_pin_user_pages; + + /* assumption: pin_user_pages can be killed by signals. */ + if (rc < m->nr_pages) { + unpin_user_pages_dirty_lock(m->page_list, rc, m->write); + rc = -EFAULT; + goto fail_pin_user_pages; + } + + rc = genwqe_map_pages(cd, m->page_list, m->nr_pages, m->dma_list); + if (rc != 0) + goto fail_free_user_pages; + + return 0; + + fail_free_user_pages: + unpin_user_pages_dirty_lock(m->page_list, m->nr_pages, m->write); + + fail_pin_user_pages: + kfree(m->page_list); + m->page_list = NULL; + m->dma_list = NULL; + m->nr_pages = 0; + m->u_vaddr = NULL; + m->size = 0; /* mark unused and not added */ + return rc; +} + +/** + * genwqe_user_vunmap() - Undo mapping of user-space mem to virtual kernel + * memory + * @cd: pointer to genwqe device + * @m: mapping params + */ +int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m) +{ + struct pci_dev *pci_dev = cd->pci_dev; + + if (!dma_mapping_used(m)) { + dev_err(&pci_dev->dev, "[%s] err: mapping %p not used!\n", + __func__, m); + return -EINVAL; + } + + if (m->dma_list) + genwqe_unmap_pages(cd, m->dma_list, m->nr_pages); + + if (m->page_list) { + unpin_user_pages_dirty_lock(m->page_list, m->nr_pages, + m->write); + kfree(m->page_list); + m->page_list = NULL; + m->dma_list = NULL; + m->nr_pages = 0; + } + + m->u_vaddr = NULL; + m->size = 0; /* mark as unused and not added */ + return 0; +} + +/** + * genwqe_card_type() - Get chip type SLU Configuration Register + * @cd: pointer to the genwqe device descriptor + * Return: 0: Altera Stratix-IV 230 + * 1: Altera Stratix-IV 530 + * 2: Altera Stratix-V A4 + * 3: Altera Stratix-V A7 + */ +u8 genwqe_card_type(struct genwqe_dev *cd) +{ + u64 card_type = cd->slu_unitcfg; + + return (u8)((card_type & IO_SLU_UNITCFG_TYPE_MASK) >> 20); +} + +/** + * genwqe_card_reset() - Reset the card + * @cd: pointer to the genwqe device descriptor + */ +int genwqe_card_reset(struct genwqe_dev *cd) +{ + u64 softrst; + struct pci_dev *pci_dev = cd->pci_dev; + + if (!genwqe_is_privileged(cd)) + return -ENODEV; + + /* new SL */ + __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, 0x1ull); + msleep(1000); + __genwqe_readq(cd, IO_HSU_FIR_CLR); + __genwqe_readq(cd, IO_APP_FIR_CLR); + __genwqe_readq(cd, IO_SLU_FIR_CLR); + + /* + * Read-modify-write to preserve the stealth bits + * + * For SL >= 039, Stealth WE bit allows removing + * the read-modify-wrote. + * r-m-w may require a mask 0x3C to avoid hitting hard + * reset again for error reset (should be 0, chicken). + */ + softrst = __genwqe_readq(cd, IO_SLC_CFGREG_SOFTRESET) & 0x3cull; + __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, softrst | 0x2ull); + + /* give ERRORRESET some time to finish */ + msleep(50); + + if (genwqe_need_err_masking(cd)) { + dev_info(&pci_dev->dev, + "[%s] masking errors for old bitstreams\n", __func__); + __genwqe_writeq(cd, IO_SLC_MISC_DEBUG, 0x0aull); + } + return 0; +} + +int genwqe_read_softreset(struct genwqe_dev *cd) +{ + u64 bitstream; + + if (!genwqe_is_privileged(cd)) + return -ENODEV; + + bitstream = __genwqe_readq(cd, IO_SLU_BITSTREAM) & 0x1; + cd->softreset = (bitstream == 0) ? 0x8ull : 0xcull; + return 0; +} + +/** + * genwqe_set_interrupt_capability() - Configure MSI capability structure + * @cd: pointer to the device + * @count: number of vectors to allocate + * Return: 0 if no error + */ +int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count) +{ + int rc; + + rc = pci_alloc_irq_vectors(cd->pci_dev, 1, count, PCI_IRQ_MSI); + if (rc < 0) + return rc; + return 0; +} + +/** + * genwqe_reset_interrupt_capability() - Undo genwqe_set_interrupt_capability() + * @cd: pointer to the device + */ +void genwqe_reset_interrupt_capability(struct genwqe_dev *cd) +{ + pci_free_irq_vectors(cd->pci_dev); +} + +/** + * set_reg_idx() - Fill array with data. Ignore illegal offsets. + * @cd: card device + * @r: debug register array + * @i: index to desired entry + * @m: maximum possible entries + * @addr: addr which is read + * @idx: index in debug array + * @val: read value + */ +static int set_reg_idx(struct genwqe_dev *cd, struct genwqe_reg *r, + unsigned int *i, unsigned int m, u32 addr, u32 idx, + u64 val) +{ + if (WARN_ON_ONCE(*i >= m)) + return -EFAULT; + + r[*i].addr = addr; + r[*i].idx = idx; + r[*i].val = val; + ++*i; + return 0; +} + +static int set_reg(struct genwqe_dev *cd, struct genwqe_reg *r, + unsigned int *i, unsigned int m, u32 addr, u64 val) +{ + return set_reg_idx(cd, r, i, m, addr, 0, val); +} + +int genwqe_read_ffdc_regs(struct genwqe_dev *cd, struct genwqe_reg *regs, + unsigned int max_regs, int all) +{ + unsigned int i, j, idx = 0; + u32 ufir_addr, ufec_addr, sfir_addr, sfec_addr; + u64 gfir, sluid, appid, ufir, ufec, sfir, sfec; + + /* Global FIR */ + gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR); + set_reg(cd, regs, &idx, max_regs, IO_SLC_CFGREG_GFIR, gfir); + + /* UnitCfg for SLU */ + sluid = __genwqe_readq(cd, IO_SLU_UNITCFG); /* 0x00000000 */ + set_reg(cd, regs, &idx, max_regs, IO_SLU_UNITCFG, sluid); + + /* UnitCfg for APP */ + appid = __genwqe_readq(cd, IO_APP_UNITCFG); /* 0x02000000 */ + set_reg(cd, regs, &idx, max_regs, IO_APP_UNITCFG, appid); + + /* Check all chip Units */ + for (i = 0; i < GENWQE_MAX_UNITS; i++) { + + /* Unit FIR */ + ufir_addr = (i << 24) | 0x008; + ufir = __genwqe_readq(cd, ufir_addr); + set_reg(cd, regs, &idx, max_regs, ufir_addr, ufir); + + /* Unit FEC */ + ufec_addr = (i << 24) | 0x018; + ufec = __genwqe_readq(cd, ufec_addr); + set_reg(cd, regs, &idx, max_regs, ufec_addr, ufec); + + for (j = 0; j < 64; j++) { + /* wherever there is a primary 1, read the 2ndary */ + if (!all && (!(ufir & (1ull << j)))) + continue; + + sfir_addr = (i << 24) | (0x100 + 8 * j); + sfir = __genwqe_readq(cd, sfir_addr); + set_reg(cd, regs, &idx, max_regs, sfir_addr, sfir); + + sfec_addr = (i << 24) | (0x300 + 8 * j); + sfec = __genwqe_readq(cd, sfec_addr); + set_reg(cd, regs, &idx, max_regs, sfec_addr, sfec); + } + } + + /* fill with invalid data until end */ + for (i = idx; i < max_regs; i++) { + regs[i].addr = 0xffffffff; + regs[i].val = 0xffffffffffffffffull; + } + return idx; +} + +/** + * genwqe_ffdc_buff_size() - Calculates the number of dump registers + * @cd: genwqe device descriptor + * @uid: unit ID + */ +int genwqe_ffdc_buff_size(struct genwqe_dev *cd, int uid) +{ + int entries = 0, ring, traps, traces, trace_entries; + u32 eevptr_addr, l_addr, d_len, d_type; + u64 eevptr, val, addr; + + eevptr_addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_ERROR_POINTER; + eevptr = __genwqe_readq(cd, eevptr_addr); + + if ((eevptr != 0x0) && (eevptr != -1ull)) { + l_addr = GENWQE_UID_OFFS(uid) | eevptr; + + while (1) { + val = __genwqe_readq(cd, l_addr); + + if ((val == 0x0) || (val == -1ull)) + break; + + /* 38:24 */ + d_len = (val & 0x0000007fff000000ull) >> 24; + + /* 39 */ + d_type = (val & 0x0000008000000000ull) >> 36; + + if (d_type) { /* repeat */ + entries += d_len; + } else { /* size in bytes! */ + entries += d_len >> 3; + } + + l_addr += 8; + } + } + + for (ring = 0; ring < 8; ring++) { + addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_DIAG_MAP(ring); + val = __genwqe_readq(cd, addr); + + if ((val == 0x0ull) || (val == -1ull)) + continue; + + traps = (val >> 24) & 0xff; + traces = (val >> 16) & 0xff; + trace_entries = val & 0xffff; + + entries += traps + (traces * trace_entries); + } + return entries; +} + +/** + * genwqe_ffdc_buff_read() - Implements LogoutExtendedErrorRegisters procedure + * @cd: genwqe device descriptor + * @uid: unit ID + * @regs: register information + * @max_regs: number of register entries + */ +int genwqe_ffdc_buff_read(struct genwqe_dev *cd, int uid, + struct genwqe_reg *regs, unsigned int max_regs) +{ + int i, traps, traces, trace, trace_entries, trace_entry, ring; + unsigned int idx = 0; + u32 eevptr_addr, l_addr, d_addr, d_len, d_type; + u64 eevptr, e, val, addr; + + eevptr_addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_ERROR_POINTER; + eevptr = __genwqe_readq(cd, eevptr_addr); + + if ((eevptr != 0x0) && (eevptr != 0xffffffffffffffffull)) { + l_addr = GENWQE_UID_OFFS(uid) | eevptr; + while (1) { + e = __genwqe_readq(cd, l_addr); + if ((e == 0x0) || (e == 0xffffffffffffffffull)) + break; + + d_addr = (e & 0x0000000000ffffffull); /* 23:0 */ + d_len = (e & 0x0000007fff000000ull) >> 24; /* 38:24 */ + d_type = (e & 0x0000008000000000ull) >> 36; /* 39 */ + d_addr |= GENWQE_UID_OFFS(uid); + + if (d_type) { + for (i = 0; i < (int)d_len; i++) { + val = __genwqe_readq(cd, d_addr); + set_reg_idx(cd, regs, &idx, max_regs, + d_addr, i, val); + } + } else { + d_len >>= 3; /* Size in bytes! */ + for (i = 0; i < (int)d_len; i++, d_addr += 8) { + val = __genwqe_readq(cd, d_addr); + set_reg_idx(cd, regs, &idx, max_regs, + d_addr, 0, val); + } + } + l_addr += 8; + } + } + + /* + * To save time, there are only 6 traces poplulated on Uid=2, + * Ring=1. each with iters=512. + */ + for (ring = 0; ring < 8; ring++) { /* 0 is fls, 1 is fds, + 2...7 are ASI rings */ + addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_DIAG_MAP(ring); + val = __genwqe_readq(cd, addr); + + if ((val == 0x0ull) || (val == -1ull)) + continue; + + traps = (val >> 24) & 0xff; /* Number of Traps */ + traces = (val >> 16) & 0xff; /* Number of Traces */ + trace_entries = val & 0xffff; /* Entries per trace */ + + /* Note: This is a combined loop that dumps both the traps */ + /* (for the trace == 0 case) as well as the traces 1 to */ + /* 'traces'. */ + for (trace = 0; trace <= traces; trace++) { + u32 diag_sel = + GENWQE_EXTENDED_DIAG_SELECTOR(ring, trace); + + addr = (GENWQE_UID_OFFS(uid) | + IO_EXTENDED_DIAG_SELECTOR); + __genwqe_writeq(cd, addr, diag_sel); + + for (trace_entry = 0; + trace_entry < (trace ? trace_entries : traps); + trace_entry++) { + addr = (GENWQE_UID_OFFS(uid) | + IO_EXTENDED_DIAG_READ_MBX); + val = __genwqe_readq(cd, addr); + set_reg_idx(cd, regs, &idx, max_regs, addr, + (diag_sel<<16) | trace_entry, val); + } + } + } + return 0; +} + +/** + * genwqe_write_vreg() - Write register in virtual window + * @cd: genwqe device descriptor + * @reg: register (byte) offset within BAR + * @val: value to write + * @func: PCI virtual function + * + * Note, these registers are only accessible to the PF through the + * VF-window. It is not intended for the VF to access. + */ +int genwqe_write_vreg(struct genwqe_dev *cd, u32 reg, u64 val, int func) +{ + __genwqe_writeq(cd, IO_PF_SLC_VIRTUAL_WINDOW, func & 0xf); + __genwqe_writeq(cd, reg, val); + return 0; +} + +/** + * genwqe_read_vreg() - Read register in virtual window + * @cd: genwqe device descriptor + * @reg: register (byte) offset within BAR + * @func: PCI virtual function + * + * Note, these registers are only accessible to the PF through the + * VF-window. It is not intended for the VF to access. + */ +u64 genwqe_read_vreg(struct genwqe_dev *cd, u32 reg, int func) +{ + __genwqe_writeq(cd, IO_PF_SLC_VIRTUAL_WINDOW, func & 0xf); + return __genwqe_readq(cd, reg); +} + +/** + * genwqe_base_clock_frequency() - Deteremine base clock frequency of the card + * @cd: genwqe device descriptor + * + * Note: From a design perspective it turned out to be a bad idea to + * use codes here to specifiy the frequency/speed values. An old + * driver cannot understand new codes and is therefore always a + * problem. Better is to measure out the value or put the + * speed/frequency directly into a register which is always a valid + * value for old as well as for new software. + * + * Return: Card clock in MHz + */ +int genwqe_base_clock_frequency(struct genwqe_dev *cd) +{ + u16 speed; /* MHz MHz MHz MHz */ + static const int speed_grade[] = { 250, 200, 166, 175 }; + + speed = (u16)((cd->slu_unitcfg >> 28) & 0x0full); + if (speed >= ARRAY_SIZE(speed_grade)) + return 0; /* illegal value */ + + return speed_grade[speed]; +} + +/** + * genwqe_stop_traps() - Stop traps + * @cd: genwqe device descriptor + * + * Before reading out the analysis data, we need to stop the traps. + */ +void genwqe_stop_traps(struct genwqe_dev *cd) +{ + __genwqe_writeq(cd, IO_SLC_MISC_DEBUG_SET, 0xcull); +} + +/** + * genwqe_start_traps() - Start traps + * @cd: genwqe device descriptor + * + * After having read the data, we can/must enable the traps again. + */ +void genwqe_start_traps(struct genwqe_dev *cd) +{ + __genwqe_writeq(cd, IO_SLC_MISC_DEBUG_CLR, 0xcull); + + if (genwqe_need_err_masking(cd)) + __genwqe_writeq(cd, IO_SLC_MISC_DEBUG, 0x0aull); +} diff --git a/drivers/misc/genwqe/genwqe_driver.h b/drivers/misc/genwqe/genwqe_driver.h new file mode 100644 index 000000000..8c333481b --- /dev/null +++ b/drivers/misc/genwqe/genwqe_driver.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __GENWQE_DRIVER_H__ +#define __GENWQE_DRIVER_H__ + +/** + * IBM Accelerator Family 'GenWQE' + * + * (C) Copyright IBM Corp. 2013 + * + * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> + * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> + * Author: Michael Jung <mijung@gmx.net> + * Author: Michael Ruettger <michael@ibmra.de> + */ + +#include <linux/types.h> +#include <linux/stddef.h> +#include <linux/cdev.h> +#include <linux/list.h> +#include <linux/kthread.h> +#include <linux/scatterlist.h> +#include <linux/iommu.h> +#include <linux/spinlock.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/printk.h> + +#include <asm/byteorder.h> +#include <linux/genwqe/genwqe_card.h> + +#define DRV_VERSION "2.0.25" + +/* + * Static minor number assignement, until we decide/implement + * something dynamic. + */ +#define GENWQE_MAX_MINOR 128 /* up to 128 possible genwqe devices */ + +/** + * genwqe_requ_alloc() - Allocate a new DDCB execution request + * + * This data structure contains the user visiable fields of the DDCB + * to be executed. + * + * Return: ptr to genwqe_ddcb_cmd data structure + */ +struct genwqe_ddcb_cmd *ddcb_requ_alloc(void); + +/** + * ddcb_requ_free() - Free DDCB execution request. + * @req: ptr to genwqe_ddcb_cmd data structure. + */ +void ddcb_requ_free(struct genwqe_ddcb_cmd *req); + +u32 genwqe_crc32(u8 *buff, size_t len, u32 init); + +static inline void genwqe_hexdump(struct pci_dev *pci_dev, + const void *buff, unsigned int size) +{ + char prefix[32]; + + scnprintf(prefix, sizeof(prefix), "%s %s: ", + GENWQE_DEVNAME, pci_name(pci_dev)); + + print_hex_dump_debug(prefix, DUMP_PREFIX_OFFSET, 16, 1, buff, + size, true); +} + +#endif /* __GENWQE_DRIVER_H__ */ diff --git a/drivers/misc/habanalabs/Kconfig b/drivers/misc/habanalabs/Kconfig new file mode 100644 index 000000000..1640340d3 --- /dev/null +++ b/drivers/misc/habanalabs/Kconfig @@ -0,0 +1,25 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# HabanaLabs AI accelerators driver +# + +config HABANA_AI + tristate "HabanaAI accelerators (habanalabs)" + depends on PCI && HAS_IOMEM + select FRAME_VECTOR + select GENERIC_ALLOCATOR + select HWMON + help + Enables PCIe card driver for Habana's AI Processors (AIP) that are + designed to accelerate Deep Learning inference and training workloads. + + The driver manages the PCIe devices and provides IOCTL interface for + the user to submit workloads to the devices. + + The user-space interface is described in + include/uapi/misc/habanalabs.h + + If unsure, say N. + + To compile this driver as a module, choose M here: the + module will be called habanalabs. diff --git a/drivers/misc/habanalabs/Makefile b/drivers/misc/habanalabs/Makefile new file mode 100644 index 000000000..a786c0a7d --- /dev/null +++ b/drivers/misc/habanalabs/Makefile @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Makefile for HabanaLabs AI accelerators driver +# + +obj-$(CONFIG_HABANA_AI) := habanalabs.o + +include $(src)/common/Makefile +habanalabs-y += $(HL_COMMON_FILES) + +include $(src)/goya/Makefile +habanalabs-y += $(HL_GOYA_FILES) + +include $(src)/gaudi/Makefile +habanalabs-y += $(HL_GAUDI_FILES) + +habanalabs-$(CONFIG_DEBUG_FS) += common/debugfs.o diff --git a/drivers/misc/habanalabs/common/Makefile b/drivers/misc/habanalabs/common/Makefile new file mode 100644 index 000000000..eccd8c7dc --- /dev/null +++ b/drivers/misc/habanalabs/common/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0-only +HL_COMMON_FILES := common/habanalabs_drv.o common/device.o common/context.o \ + common/asid.o common/habanalabs_ioctl.o \ + common/command_buffer.o common/hw_queue.o common/irq.o \ + common/sysfs.o common/hwmon.o common/memory.o \ + common/command_submission.o common/mmu.o common/mmu_v1.o \ + common/firmware_if.o common/pci.o diff --git a/drivers/misc/habanalabs/common/asid.c b/drivers/misc/habanalabs/common/asid.c new file mode 100644 index 000000000..a2fdf31cf --- /dev/null +++ b/drivers/misc/habanalabs/common/asid.c @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include "habanalabs.h" + +#include <linux/slab.h> + +int hl_asid_init(struct hl_device *hdev) +{ + hdev->asid_bitmap = kcalloc(BITS_TO_LONGS(hdev->asic_prop.max_asid), + sizeof(*hdev->asid_bitmap), GFP_KERNEL); + if (!hdev->asid_bitmap) + return -ENOMEM; + + mutex_init(&hdev->asid_mutex); + + /* ASID 0 is reserved for the kernel driver and device CPU */ + set_bit(0, hdev->asid_bitmap); + + return 0; +} + +void hl_asid_fini(struct hl_device *hdev) +{ + mutex_destroy(&hdev->asid_mutex); + kfree(hdev->asid_bitmap); +} + +unsigned long hl_asid_alloc(struct hl_device *hdev) +{ + unsigned long found; + + mutex_lock(&hdev->asid_mutex); + + found = find_first_zero_bit(hdev->asid_bitmap, + hdev->asic_prop.max_asid); + if (found == hdev->asic_prop.max_asid) + found = 0; + else + set_bit(found, hdev->asid_bitmap); + + mutex_unlock(&hdev->asid_mutex); + + return found; +} + +void hl_asid_free(struct hl_device *hdev, unsigned long asid) +{ + if (WARN((asid == 0 || asid >= hdev->asic_prop.max_asid), + "Invalid ASID %lu", asid)) + return; + clear_bit(asid, hdev->asid_bitmap); +} diff --git a/drivers/misc/habanalabs/common/command_buffer.c b/drivers/misc/habanalabs/common/command_buffer.c new file mode 100644 index 000000000..ada570f35 --- /dev/null +++ b/drivers/misc/habanalabs/common/command_buffer.c @@ -0,0 +1,687 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include <uapi/misc/habanalabs.h> +#include "habanalabs.h" + +#include <linux/mm.h> +#include <linux/slab.h> +#include <linux/uaccess.h> +#include <linux/genalloc.h> + +static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb) +{ + struct hl_device *hdev = ctx->hdev; + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct hl_vm_va_block *va_block, *tmp; + dma_addr_t bus_addr; + u64 virt_addr; + u32 page_size = prop->pmmu.page_size; + s32 offset; + int rc; + + if (!hdev->supports_cb_mapping) { + dev_err_ratelimited(hdev->dev, + "Cannot map CB because no VA range is allocated for CB mapping\n"); + return -EINVAL; + } + + if (!hdev->mmu_enable) { + dev_err_ratelimited(hdev->dev, + "Cannot map CB because MMU is disabled\n"); + return -EINVAL; + } + + INIT_LIST_HEAD(&cb->va_block_list); + + for (bus_addr = cb->bus_address; + bus_addr < cb->bus_address + cb->size; + bus_addr += page_size) { + + virt_addr = (u64) gen_pool_alloc(ctx->cb_va_pool, page_size); + if (!virt_addr) { + dev_err(hdev->dev, + "Failed to allocate device virtual address for CB\n"); + rc = -ENOMEM; + goto err_va_pool_free; + } + + va_block = kzalloc(sizeof(*va_block), GFP_KERNEL); + if (!va_block) { + rc = -ENOMEM; + gen_pool_free(ctx->cb_va_pool, virt_addr, page_size); + goto err_va_pool_free; + } + + va_block->start = virt_addr; + va_block->end = virt_addr + page_size; + va_block->size = page_size; + list_add_tail(&va_block->node, &cb->va_block_list); + } + + mutex_lock(&ctx->mmu_lock); + + bus_addr = cb->bus_address; + offset = 0; + list_for_each_entry(va_block, &cb->va_block_list, node) { + rc = hl_mmu_map(ctx, va_block->start, bus_addr, va_block->size, + list_is_last(&va_block->node, + &cb->va_block_list)); + if (rc) { + dev_err(hdev->dev, "Failed to map VA %#llx to CB\n", + va_block->start); + goto err_va_umap; + } + + bus_addr += va_block->size; + offset += va_block->size; + } + + hdev->asic_funcs->mmu_invalidate_cache(hdev, false, VM_TYPE_USERPTR); + + mutex_unlock(&ctx->mmu_lock); + + cb->is_mmu_mapped = true; + + return 0; + +err_va_umap: + list_for_each_entry(va_block, &cb->va_block_list, node) { + if (offset <= 0) + break; + hl_mmu_unmap(ctx, va_block->start, va_block->size, + offset <= va_block->size); + offset -= va_block->size; + } + + hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR); + + mutex_unlock(&ctx->mmu_lock); + +err_va_pool_free: + list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) { + gen_pool_free(ctx->cb_va_pool, va_block->start, va_block->size); + list_del(&va_block->node); + kfree(va_block); + } + + return rc; +} + +static void cb_unmap_mem(struct hl_ctx *ctx, struct hl_cb *cb) +{ + struct hl_device *hdev = ctx->hdev; + struct hl_vm_va_block *va_block, *tmp; + + mutex_lock(&ctx->mmu_lock); + + list_for_each_entry(va_block, &cb->va_block_list, node) + if (hl_mmu_unmap(ctx, va_block->start, va_block->size, + list_is_last(&va_block->node, + &cb->va_block_list))) + dev_warn_ratelimited(hdev->dev, + "Failed to unmap CB's va 0x%llx\n", + va_block->start); + + hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR); + + mutex_unlock(&ctx->mmu_lock); + + list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) { + gen_pool_free(ctx->cb_va_pool, va_block->start, va_block->size); + list_del(&va_block->node); + kfree(va_block); + } +} + +static void cb_fini(struct hl_device *hdev, struct hl_cb *cb) +{ + if (cb->is_internal) + gen_pool_free(hdev->internal_cb_pool, + (uintptr_t)cb->kernel_address, cb->size); + else + hdev->asic_funcs->asic_dma_free_coherent(hdev, cb->size, + cb->kernel_address, cb->bus_address); + + kfree(cb); +} + +static void cb_do_release(struct hl_device *hdev, struct hl_cb *cb) +{ + if (cb->is_pool) { + spin_lock(&hdev->cb_pool_lock); + list_add(&cb->pool_list, &hdev->cb_pool); + spin_unlock(&hdev->cb_pool_lock); + } else { + cb_fini(hdev, cb); + } +} + +static void cb_release(struct kref *ref) +{ + struct hl_device *hdev; + struct hl_cb *cb; + + cb = container_of(ref, struct hl_cb, refcount); + hdev = cb->hdev; + + hl_debugfs_remove_cb(cb); + + if (cb->is_mmu_mapped) + cb_unmap_mem(cb->ctx, cb); + + hl_ctx_put(cb->ctx); + + cb_do_release(hdev, cb); +} + +static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size, + int ctx_id, bool internal_cb) +{ + struct hl_cb *cb; + u32 cb_offset; + void *p; + + /* + * We use of GFP_ATOMIC here because this function can be called from + * the latency-sensitive code path for command submission. Due to H/W + * limitations in some of the ASICs, the kernel must copy the user CB + * that is designated for an external queue and actually enqueue + * the kernel's copy. Hence, we must never sleep in this code section + * and must use GFP_ATOMIC for all memory allocations. + */ + if (ctx_id == HL_KERNEL_ASID_ID) + cb = kzalloc(sizeof(*cb), GFP_ATOMIC); + else + cb = kzalloc(sizeof(*cb), GFP_KERNEL); + + if (!cb) + return NULL; + + if (internal_cb) { + p = (void *) gen_pool_alloc(hdev->internal_cb_pool, cb_size); + if (!p) { + kfree(cb); + return NULL; + } + + cb_offset = p - hdev->internal_cb_pool_virt_addr; + cb->is_internal = true; + cb->bus_address = hdev->internal_cb_va_base + cb_offset; + } else if (ctx_id == HL_KERNEL_ASID_ID) { + p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size, + &cb->bus_address, GFP_ATOMIC); + } else { + p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size, + &cb->bus_address, + GFP_USER | __GFP_ZERO); + } + + if (!p) { + dev_err(hdev->dev, + "failed to allocate %d of dma memory for CB\n", + cb_size); + kfree(cb); + return NULL; + } + + cb->kernel_address = p; + cb->size = cb_size; + + return cb; +} + +int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr, + struct hl_ctx *ctx, u32 cb_size, bool internal_cb, + bool map_cb, u64 *handle) +{ + struct hl_cb *cb; + bool alloc_new_cb = true; + int rc, ctx_id = ctx->asid; + + /* + * Can't use generic function to check this because of special case + * where we create a CB as part of the reset process + */ + if ((hdev->disabled) || ((atomic_read(&hdev->in_reset)) && + (ctx_id != HL_KERNEL_ASID_ID))) { + dev_warn_ratelimited(hdev->dev, + "Device is disabled or in reset. Can't create new CBs\n"); + rc = -EBUSY; + goto out_err; + } + + if (cb_size > SZ_2M) { + dev_err(hdev->dev, "CB size %d must be less than %d\n", + cb_size, SZ_2M); + rc = -EINVAL; + goto out_err; + } + + if (!internal_cb) { + /* Minimum allocation must be PAGE SIZE */ + if (cb_size < PAGE_SIZE) + cb_size = PAGE_SIZE; + + if (ctx_id == HL_KERNEL_ASID_ID && + cb_size <= hdev->asic_prop.cb_pool_cb_size) { + + spin_lock(&hdev->cb_pool_lock); + if (!list_empty(&hdev->cb_pool)) { + cb = list_first_entry(&hdev->cb_pool, + typeof(*cb), pool_list); + list_del(&cb->pool_list); + spin_unlock(&hdev->cb_pool_lock); + alloc_new_cb = false; + } else { + spin_unlock(&hdev->cb_pool_lock); + dev_dbg(hdev->dev, "CB pool is empty\n"); + } + } + } + + if (alloc_new_cb) { + cb = hl_cb_alloc(hdev, cb_size, ctx_id, internal_cb); + if (!cb) { + rc = -ENOMEM; + goto out_err; + } + } + + cb->hdev = hdev; + cb->ctx = ctx; + hl_ctx_get(hdev, cb->ctx); + + if (map_cb) { + if (ctx_id == HL_KERNEL_ASID_ID) { + dev_err(hdev->dev, + "CB mapping is not supported for kernel context\n"); + rc = -EINVAL; + goto release_cb; + } + + rc = cb_map_mem(ctx, cb); + if (rc) + goto release_cb; + } + + spin_lock(&mgr->cb_lock); + rc = idr_alloc(&mgr->cb_handles, cb, 1, 0, GFP_ATOMIC); + spin_unlock(&mgr->cb_lock); + + if (rc < 0) { + dev_err(hdev->dev, "Failed to allocate IDR for a new CB\n"); + goto unmap_mem; + } + + cb->id = (u64) rc; + + kref_init(&cb->refcount); + spin_lock_init(&cb->lock); + + /* + * idr is 32-bit so we can safely OR it with a mask that is above + * 32 bit + */ + *handle = cb->id | HL_MMAP_TYPE_CB; + *handle <<= PAGE_SHIFT; + + hl_debugfs_add_cb(cb); + + return 0; + +unmap_mem: + if (cb->is_mmu_mapped) + cb_unmap_mem(cb->ctx, cb); +release_cb: + hl_ctx_put(cb->ctx); + cb_do_release(hdev, cb); +out_err: + *handle = 0; + + return rc; +} + +int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle) +{ + struct hl_cb *cb; + u32 handle; + int rc = 0; + + /* + * handle was given to user to do mmap, I need to shift it back to + * how the idr module gave it to me + */ + cb_handle >>= PAGE_SHIFT; + handle = (u32) cb_handle; + + spin_lock(&mgr->cb_lock); + + cb = idr_find(&mgr->cb_handles, handle); + if (cb) { + idr_remove(&mgr->cb_handles, handle); + spin_unlock(&mgr->cb_lock); + kref_put(&cb->refcount, cb_release); + } else { + spin_unlock(&mgr->cb_lock); + dev_err(hdev->dev, + "CB destroy failed, no match to handle 0x%x\n", handle); + rc = -EINVAL; + } + + return rc; +} + +int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data) +{ + union hl_cb_args *args = data; + struct hl_device *hdev = hpriv->hdev; + u64 handle = 0; + int rc; + + if (hl_device_disabled_or_in_reset(hdev)) { + dev_warn_ratelimited(hdev->dev, + "Device is %s. Can't execute CB IOCTL\n", + atomic_read(&hdev->in_reset) ? "in_reset" : "disabled"); + return -EBUSY; + } + + switch (args->in.op) { + case HL_CB_OP_CREATE: + if (args->in.cb_size > HL_MAX_CB_SIZE) { + dev_err(hdev->dev, + "User requested CB size %d must be less than %d\n", + args->in.cb_size, HL_MAX_CB_SIZE); + rc = -EINVAL; + } else { + rc = hl_cb_create(hdev, &hpriv->cb_mgr, hpriv->ctx, + args->in.cb_size, false, + !!(args->in.flags & HL_CB_FLAGS_MAP), + &handle); + } + + memset(args, 0, sizeof(*args)); + args->out.cb_handle = handle; + break; + + case HL_CB_OP_DESTROY: + rc = hl_cb_destroy(hdev, &hpriv->cb_mgr, + args->in.cb_handle); + break; + + default: + rc = -ENOTTY; + break; + } + + return rc; +} + +static void cb_vm_close(struct vm_area_struct *vma) +{ + struct hl_cb *cb = (struct hl_cb *) vma->vm_private_data; + long new_mmap_size; + + new_mmap_size = cb->mmap_size - (vma->vm_end - vma->vm_start); + + if (new_mmap_size > 0) { + cb->mmap_size = new_mmap_size; + return; + } + + spin_lock(&cb->lock); + cb->mmap = false; + spin_unlock(&cb->lock); + + hl_cb_put(cb); + vma->vm_private_data = NULL; +} + +static const struct vm_operations_struct cb_vm_ops = { + .close = cb_vm_close +}; + +int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma) +{ + struct hl_device *hdev = hpriv->hdev; + struct hl_cb *cb; + u32 handle, user_cb_size; + int rc; + + /* We use the page offset to hold the idr and thus we need to clear + * it before doing the mmap itself + */ + handle = vma->vm_pgoff; + vma->vm_pgoff = 0; + + /* reference was taken here */ + cb = hl_cb_get(hdev, &hpriv->cb_mgr, handle); + if (!cb) { + dev_err(hdev->dev, + "CB mmap failed, no match to handle 0x%x\n", handle); + return -EINVAL; + } + + /* Validation check */ + user_cb_size = vma->vm_end - vma->vm_start; + if (user_cb_size != ALIGN(cb->size, PAGE_SIZE)) { + dev_err(hdev->dev, + "CB mmap failed, mmap size 0x%lx != 0x%x cb size\n", + vma->vm_end - vma->vm_start, cb->size); + rc = -EINVAL; + goto put_cb; + } + + if (!access_ok((void __user *) (uintptr_t) vma->vm_start, + user_cb_size)) { + dev_err(hdev->dev, + "user pointer is invalid - 0x%lx\n", + vma->vm_start); + + rc = -EINVAL; + goto put_cb; + } + + spin_lock(&cb->lock); + + if (cb->mmap) { + dev_err(hdev->dev, + "CB mmap failed, CB already mmaped to user\n"); + rc = -EINVAL; + goto release_lock; + } + + cb->mmap = true; + + spin_unlock(&cb->lock); + + vma->vm_ops = &cb_vm_ops; + + /* + * Note: We're transferring the cb reference to + * vma->vm_private_data here. + */ + + vma->vm_private_data = cb; + + rc = hdev->asic_funcs->cb_mmap(hdev, vma, cb->kernel_address, + cb->bus_address, cb->size); + if (rc) { + spin_lock(&cb->lock); + cb->mmap = false; + goto release_lock; + } + + cb->mmap_size = cb->size; + + return 0; + +release_lock: + spin_unlock(&cb->lock); +put_cb: + hl_cb_put(cb); + return rc; +} + +struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr, + u32 handle) +{ + struct hl_cb *cb; + + spin_lock(&mgr->cb_lock); + cb = idr_find(&mgr->cb_handles, handle); + + if (!cb) { + spin_unlock(&mgr->cb_lock); + dev_warn(hdev->dev, + "CB get failed, no match to handle 0x%x\n", handle); + return NULL; + } + + kref_get(&cb->refcount); + + spin_unlock(&mgr->cb_lock); + + return cb; + +} + +void hl_cb_put(struct hl_cb *cb) +{ + kref_put(&cb->refcount, cb_release); +} + +void hl_cb_mgr_init(struct hl_cb_mgr *mgr) +{ + spin_lock_init(&mgr->cb_lock); + idr_init(&mgr->cb_handles); +} + +void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr) +{ + struct hl_cb *cb; + struct idr *idp; + u32 id; + + idp = &mgr->cb_handles; + + idr_for_each_entry(idp, cb, id) { + if (kref_put(&cb->refcount, cb_release) != 1) + dev_err(hdev->dev, + "CB %d for CTX ID %d is still alive\n", + id, cb->ctx->asid); + } + + idr_destroy(&mgr->cb_handles); +} + +struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size, + bool internal_cb) +{ + u64 cb_handle; + struct hl_cb *cb; + int rc; + + rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx, cb_size, + internal_cb, false, &cb_handle); + if (rc) { + dev_err(hdev->dev, + "Failed to allocate CB for the kernel driver %d\n", rc); + return NULL; + } + + cb_handle >>= PAGE_SHIFT; + cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, (u32) cb_handle); + /* hl_cb_get should never fail here so use kernel WARN */ + WARN(!cb, "Kernel CB handle invalid 0x%x\n", (u32) cb_handle); + if (!cb) + goto destroy_cb; + + return cb; + +destroy_cb: + hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb_handle << PAGE_SHIFT); + + return NULL; +} + +int hl_cb_pool_init(struct hl_device *hdev) +{ + struct hl_cb *cb; + int i; + + INIT_LIST_HEAD(&hdev->cb_pool); + spin_lock_init(&hdev->cb_pool_lock); + + for (i = 0 ; i < hdev->asic_prop.cb_pool_cb_cnt ; i++) { + cb = hl_cb_alloc(hdev, hdev->asic_prop.cb_pool_cb_size, + HL_KERNEL_ASID_ID, false); + if (cb) { + cb->is_pool = true; + list_add(&cb->pool_list, &hdev->cb_pool); + } else { + hl_cb_pool_fini(hdev); + return -ENOMEM; + } + } + + return 0; +} + +int hl_cb_pool_fini(struct hl_device *hdev) +{ + struct hl_cb *cb, *tmp; + + list_for_each_entry_safe(cb, tmp, &hdev->cb_pool, pool_list) { + list_del(&cb->pool_list); + cb_fini(hdev, cb); + } + + return 0; +} + +int hl_cb_va_pool_init(struct hl_ctx *ctx) +{ + struct hl_device *hdev = ctx->hdev; + struct asic_fixed_properties *prop = &hdev->asic_prop; + int rc; + + if (!hdev->supports_cb_mapping) + return 0; + + ctx->cb_va_pool = gen_pool_create(__ffs(prop->pmmu.page_size), -1); + if (!ctx->cb_va_pool) { + dev_err(hdev->dev, + "Failed to create VA gen pool for CB mapping\n"); + return -ENOMEM; + } + + rc = gen_pool_add(ctx->cb_va_pool, prop->cb_va_start_addr, + prop->cb_va_end_addr - prop->cb_va_start_addr, -1); + if (rc) { + dev_err(hdev->dev, + "Failed to add memory to VA gen pool for CB mapping\n"); + goto err_pool_destroy; + } + + return 0; + +err_pool_destroy: + gen_pool_destroy(ctx->cb_va_pool); + + return rc; +} + +void hl_cb_va_pool_fini(struct hl_ctx *ctx) +{ + struct hl_device *hdev = ctx->hdev; + + if (!hdev->supports_cb_mapping) + return; + + gen_pool_destroy(ctx->cb_va_pool); +} diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c new file mode 100644 index 000000000..b2b974ecc --- /dev/null +++ b/drivers/misc/habanalabs/common/command_submission.c @@ -0,0 +1,1245 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include <uapi/misc/habanalabs.h> +#include "habanalabs.h" + +#include <linux/uaccess.h> +#include <linux/slab.h> + +#define HL_CS_FLAGS_SIG_WAIT (HL_CS_FLAGS_SIGNAL | HL_CS_FLAGS_WAIT) + +static void job_wq_completion(struct work_struct *work); +static long _hl_cs_wait_ioctl(struct hl_device *hdev, + struct hl_ctx *ctx, u64 timeout_us, u64 seq); +static void cs_do_release(struct kref *ref); + +static void hl_sob_reset(struct kref *ref) +{ + struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob, + kref); + struct hl_device *hdev = hw_sob->hdev; + + hdev->asic_funcs->reset_sob(hdev, hw_sob); +} + +void hl_sob_reset_error(struct kref *ref) +{ + struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob, + kref); + struct hl_device *hdev = hw_sob->hdev; + + dev_crit(hdev->dev, + "SOB release shouldn't be called here, q_idx: %d, sob_id: %d\n", + hw_sob->q_idx, hw_sob->sob_id); +} + +static void hl_fence_release(struct kref *kref) +{ + struct hl_fence *fence = + container_of(kref, struct hl_fence, refcount); + struct hl_cs_compl *hl_cs_cmpl = + container_of(fence, struct hl_cs_compl, base_fence); + struct hl_device *hdev = hl_cs_cmpl->hdev; + + /* EBUSY means the CS was never submitted and hence we don't have + * an attached hw_sob object that we should handle here + */ + if (fence->error == -EBUSY) + goto free; + + if ((hl_cs_cmpl->type == CS_TYPE_SIGNAL) || + (hl_cs_cmpl->type == CS_TYPE_WAIT)) { + + dev_dbg(hdev->dev, + "CS 0x%llx type %d finished, sob_id: %d, sob_val: 0x%x\n", + hl_cs_cmpl->cs_seq, + hl_cs_cmpl->type, + hl_cs_cmpl->hw_sob->sob_id, + hl_cs_cmpl->sob_val); + + /* + * A signal CS can get completion while the corresponding wait + * for signal CS is on its way to the PQ. The wait for signal CS + * will get stuck if the signal CS incremented the SOB to its + * max value and there are no pending (submitted) waits on this + * SOB. + * We do the following to void this situation: + * 1. The wait for signal CS must get a ref for the signal CS as + * soon as possible in cs_ioctl_signal_wait() and put it + * before being submitted to the PQ but after it incremented + * the SOB refcnt in init_signal_wait_cs(). + * 2. Signal/Wait for signal CS will decrement the SOB refcnt + * here. + * These two measures guarantee that the wait for signal CS will + * reset the SOB upon completion rather than the signal CS and + * hence the above scenario is avoided. + */ + kref_put(&hl_cs_cmpl->hw_sob->kref, hl_sob_reset); + } + +free: + kfree(hl_cs_cmpl); +} + +void hl_fence_put(struct hl_fence *fence) +{ + if (fence) + kref_put(&fence->refcount, hl_fence_release); +} + +void hl_fence_get(struct hl_fence *fence) +{ + if (fence) + kref_get(&fence->refcount); +} + +static void hl_fence_init(struct hl_fence *fence) +{ + kref_init(&fence->refcount); + fence->error = 0; + init_completion(&fence->completion); +} + +static void cs_get(struct hl_cs *cs) +{ + kref_get(&cs->refcount); +} + +static int cs_get_unless_zero(struct hl_cs *cs) +{ + return kref_get_unless_zero(&cs->refcount); +} + +static void cs_put(struct hl_cs *cs) +{ + kref_put(&cs->refcount, cs_do_release); +} + +static bool is_cb_patched(struct hl_device *hdev, struct hl_cs_job *job) +{ + /* + * Patched CB is created for external queues jobs, and for H/W queues + * jobs if the user CB was allocated by driver and MMU is disabled. + */ + return (job->queue_type == QUEUE_TYPE_EXT || + (job->queue_type == QUEUE_TYPE_HW && + job->is_kernel_allocated_cb && + !hdev->mmu_enable)); +} + +/* + * cs_parser - parse the user command submission + * + * @hpriv : pointer to the private data of the fd + * @job : pointer to the job that holds the command submission info + * + * The function parses the command submission of the user. It calls the + * ASIC specific parser, which returns a list of memory blocks to send + * to the device as different command buffers + * + */ +static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job) +{ + struct hl_device *hdev = hpriv->hdev; + struct hl_cs_parser parser; + int rc; + + parser.ctx_id = job->cs->ctx->asid; + parser.cs_sequence = job->cs->sequence; + parser.job_id = job->id; + + parser.hw_queue_id = job->hw_queue_id; + parser.job_userptr_list = &job->userptr_list; + parser.patched_cb = NULL; + parser.user_cb = job->user_cb; + parser.user_cb_size = job->user_cb_size; + parser.queue_type = job->queue_type; + parser.is_kernel_allocated_cb = job->is_kernel_allocated_cb; + job->patched_cb = NULL; + + rc = hdev->asic_funcs->cs_parser(hdev, &parser); + + if (is_cb_patched(hdev, job)) { + if (!rc) { + job->patched_cb = parser.patched_cb; + job->job_cb_size = parser.patched_cb_size; + job->contains_dma_pkt = parser.contains_dma_pkt; + + spin_lock(&job->patched_cb->lock); + job->patched_cb->cs_cnt++; + spin_unlock(&job->patched_cb->lock); + } + + /* + * Whether the parsing worked or not, we don't need the + * original CB anymore because it was already parsed and + * won't be accessed again for this CS + */ + spin_lock(&job->user_cb->lock); + job->user_cb->cs_cnt--; + spin_unlock(&job->user_cb->lock); + hl_cb_put(job->user_cb); + job->user_cb = NULL; + } else if (!rc) { + job->job_cb_size = job->user_cb_size; + } + + return rc; +} + +static void free_job(struct hl_device *hdev, struct hl_cs_job *job) +{ + struct hl_cs *cs = job->cs; + + if (is_cb_patched(hdev, job)) { + hl_userptr_delete_list(hdev, &job->userptr_list); + + /* + * We might arrive here from rollback and patched CB wasn't + * created, so we need to check it's not NULL + */ + if (job->patched_cb) { + spin_lock(&job->patched_cb->lock); + job->patched_cb->cs_cnt--; + spin_unlock(&job->patched_cb->lock); + + hl_cb_put(job->patched_cb); + } + } + + /* For H/W queue jobs, if a user CB was allocated by driver and MMU is + * enabled, the user CB isn't released in cs_parser() and thus should be + * released here. + */ + if (job->queue_type == QUEUE_TYPE_HW && + job->is_kernel_allocated_cb && hdev->mmu_enable) { + spin_lock(&job->user_cb->lock); + job->user_cb->cs_cnt--; + spin_unlock(&job->user_cb->lock); + + hl_cb_put(job->user_cb); + } + + /* + * This is the only place where there can be multiple threads + * modifying the list at the same time + */ + spin_lock(&cs->job_lock); + list_del(&job->cs_node); + spin_unlock(&cs->job_lock); + + hl_debugfs_remove_job(hdev, job); + + if (job->queue_type == QUEUE_TYPE_EXT || + job->queue_type == QUEUE_TYPE_HW) + cs_put(cs); + + kfree(job); +} + +static void cs_counters_aggregate(struct hl_device *hdev, struct hl_ctx *ctx) +{ + hdev->aggregated_cs_counters.device_in_reset_drop_cnt += + ctx->cs_counters.device_in_reset_drop_cnt; + hdev->aggregated_cs_counters.out_of_mem_drop_cnt += + ctx->cs_counters.out_of_mem_drop_cnt; + hdev->aggregated_cs_counters.parsing_drop_cnt += + ctx->cs_counters.parsing_drop_cnt; + hdev->aggregated_cs_counters.queue_full_drop_cnt += + ctx->cs_counters.queue_full_drop_cnt; + hdev->aggregated_cs_counters.max_cs_in_flight_drop_cnt += + ctx->cs_counters.max_cs_in_flight_drop_cnt; +} + +static void cs_do_release(struct kref *ref) +{ + struct hl_cs *cs = container_of(ref, struct hl_cs, + refcount); + struct hl_device *hdev = cs->ctx->hdev; + struct hl_cs_job *job, *tmp; + + cs->completed = true; + + /* + * Although if we reached here it means that all external jobs have + * finished, because each one of them took refcnt to CS, we still + * need to go over the internal jobs and free them. Otherwise, we + * will have leaked memory and what's worse, the CS object (and + * potentially the CTX object) could be released, while the JOB + * still holds a pointer to them (but no reference). + */ + list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) + free_job(hdev, job); + + /* We also need to update CI for internal queues */ + if (cs->submitted) { + hdev->asic_funcs->hw_queues_lock(hdev); + + hdev->cs_active_cnt--; + if (!hdev->cs_active_cnt) { + struct hl_device_idle_busy_ts *ts; + + ts = &hdev->idle_busy_ts_arr[hdev->idle_busy_ts_idx++]; + ts->busy_to_idle_ts = ktime_get(); + + if (hdev->idle_busy_ts_idx == HL_IDLE_BUSY_TS_ARR_SIZE) + hdev->idle_busy_ts_idx = 0; + } else if (hdev->cs_active_cnt < 0) { + dev_crit(hdev->dev, "CS active cnt %d is negative\n", + hdev->cs_active_cnt); + } + + hdev->asic_funcs->hw_queues_unlock(hdev); + + hl_int_hw_queue_update_ci(cs); + + spin_lock(&hdev->hw_queues_mirror_lock); + /* remove CS from hw_queues mirror list */ + list_del_init(&cs->mirror_node); + spin_unlock(&hdev->hw_queues_mirror_lock); + + /* + * Don't cancel TDR in case this CS was timedout because we + * might be running from the TDR context + */ + if ((!cs->timedout) && + (hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT)) { + struct hl_cs *next; + + if (cs->tdr_active) + cancel_delayed_work_sync(&cs->work_tdr); + + spin_lock(&hdev->hw_queues_mirror_lock); + + /* queue TDR for next CS */ + next = list_first_entry_or_null( + &hdev->hw_queues_mirror_list, + struct hl_cs, mirror_node); + + if ((next) && (!next->tdr_active)) { + next->tdr_active = true; + schedule_delayed_work(&next->work_tdr, + hdev->timeout_jiffies); + } + + spin_unlock(&hdev->hw_queues_mirror_lock); + } + } else if (cs->type == CS_TYPE_WAIT) { + /* + * In case the wait for signal CS was submitted, the put occurs + * in init_signal_wait_cs() right before hanging on the PQ. + */ + hl_fence_put(cs->signal_fence); + } + + /* + * Must be called before hl_ctx_put because inside we use ctx to get + * the device + */ + hl_debugfs_remove_cs(cs); + + hl_ctx_put(cs->ctx); + + /* We need to mark an error for not submitted because in that case + * the hl fence release flow is different. Mainly, we don't need + * to handle hw_sob for signal/wait + */ + if (cs->timedout) + cs->fence->error = -ETIMEDOUT; + else if (cs->aborted) + cs->fence->error = -EIO; + else if (!cs->submitted) + cs->fence->error = -EBUSY; + + complete_all(&cs->fence->completion); + hl_fence_put(cs->fence); + cs_counters_aggregate(hdev, cs->ctx); + + kfree(cs->jobs_in_queue_cnt); + kfree(cs); +} + +static void cs_timedout(struct work_struct *work) +{ + struct hl_device *hdev; + int rc; + struct hl_cs *cs = container_of(work, struct hl_cs, + work_tdr.work); + rc = cs_get_unless_zero(cs); + if (!rc) + return; + + if ((!cs->submitted) || (cs->completed)) { + cs_put(cs); + return; + } + + /* Mark the CS is timed out so we won't try to cancel its TDR */ + cs->timedout = true; + + hdev = cs->ctx->hdev; + + dev_err(hdev->dev, + "Command submission %llu has not finished in time!\n", + cs->sequence); + + cs_put(cs); + + if (hdev->reset_on_lockup) + hl_device_reset(hdev, false, false); +} + +static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx, + enum hl_cs_type cs_type, struct hl_cs **cs_new) +{ + struct hl_cs_compl *cs_cmpl; + struct hl_fence *other = NULL; + struct hl_cs *cs; + int rc; + + cs = kzalloc(sizeof(*cs), GFP_ATOMIC); + if (!cs) + return -ENOMEM; + + cs->ctx = ctx; + cs->submitted = false; + cs->completed = false; + cs->type = cs_type; + INIT_LIST_HEAD(&cs->job_list); + INIT_DELAYED_WORK(&cs->work_tdr, cs_timedout); + kref_init(&cs->refcount); + spin_lock_init(&cs->job_lock); + + cs_cmpl = kmalloc(sizeof(*cs_cmpl), GFP_ATOMIC); + if (!cs_cmpl) { + rc = -ENOMEM; + goto free_cs; + } + + cs_cmpl->hdev = hdev; + cs_cmpl->type = cs->type; + spin_lock_init(&cs_cmpl->lock); + cs->fence = &cs_cmpl->base_fence; + + spin_lock(&ctx->cs_lock); + + cs_cmpl->cs_seq = ctx->cs_sequence; + other = ctx->cs_pending[cs_cmpl->cs_seq & + (hdev->asic_prop.max_pending_cs - 1)]; + + if (other && !completion_done(&other->completion)) { + dev_dbg_ratelimited(hdev->dev, + "Rejecting CS because of too many in-flights CS\n"); + ctx->cs_counters.max_cs_in_flight_drop_cnt++; + rc = -EAGAIN; + goto free_fence; + } + + cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues, + sizeof(*cs->jobs_in_queue_cnt), GFP_ATOMIC); + if (!cs->jobs_in_queue_cnt) { + rc = -ENOMEM; + goto free_fence; + } + + /* init hl_fence */ + hl_fence_init(&cs_cmpl->base_fence); + + cs->sequence = cs_cmpl->cs_seq; + + ctx->cs_pending[cs_cmpl->cs_seq & + (hdev->asic_prop.max_pending_cs - 1)] = + &cs_cmpl->base_fence; + ctx->cs_sequence++; + + hl_fence_get(&cs_cmpl->base_fence); + + hl_fence_put(other); + + spin_unlock(&ctx->cs_lock); + + *cs_new = cs; + + return 0; + +free_fence: + spin_unlock(&ctx->cs_lock); + kfree(cs_cmpl); +free_cs: + kfree(cs); + return rc; +} + +static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs) +{ + struct hl_cs_job *job, *tmp; + + list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) + free_job(hdev, job); +} + +void hl_cs_rollback_all(struct hl_device *hdev) +{ + int i; + struct hl_cs *cs, *tmp; + + /* flush all completions */ + for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) + flush_workqueue(hdev->cq_wq[i]); + + /* Make sure we don't have leftovers in the H/W queues mirror list */ + list_for_each_entry_safe(cs, tmp, &hdev->hw_queues_mirror_list, + mirror_node) { + cs_get(cs); + cs->aborted = true; + dev_warn_ratelimited(hdev->dev, "Killing CS %d.%llu\n", + cs->ctx->asid, cs->sequence); + cs_rollback(hdev, cs); + cs_put(cs); + } +} + +static void job_wq_completion(struct work_struct *work) +{ + struct hl_cs_job *job = container_of(work, struct hl_cs_job, + finish_work); + struct hl_cs *cs = job->cs; + struct hl_device *hdev = cs->ctx->hdev; + + /* job is no longer needed */ + free_job(hdev, job); +} + +static int validate_queue_index(struct hl_device *hdev, + struct hl_cs_chunk *chunk, + enum hl_queue_type *queue_type, + bool *is_kernel_allocated_cb) +{ + struct asic_fixed_properties *asic = &hdev->asic_prop; + struct hw_queue_properties *hw_queue_prop; + + /* This must be checked here to prevent out-of-bounds access to + * hw_queues_props array + */ + if (chunk->queue_index >= asic->max_queues) { + dev_err(hdev->dev, "Queue index %d is invalid\n", + chunk->queue_index); + return -EINVAL; + } + + hw_queue_prop = &asic->hw_queues_props[chunk->queue_index]; + + if (hw_queue_prop->type == QUEUE_TYPE_NA) { + dev_err(hdev->dev, "Queue index %d is invalid\n", + chunk->queue_index); + return -EINVAL; + } + + if (hw_queue_prop->driver_only) { + dev_err(hdev->dev, + "Queue index %d is restricted for the kernel driver\n", + chunk->queue_index); + return -EINVAL; + } + + *queue_type = hw_queue_prop->type; + *is_kernel_allocated_cb = !!hw_queue_prop->requires_kernel_cb; + + return 0; +} + +static struct hl_cb *get_cb_from_cs_chunk(struct hl_device *hdev, + struct hl_cb_mgr *cb_mgr, + struct hl_cs_chunk *chunk) +{ + struct hl_cb *cb; + u32 cb_handle; + + cb_handle = (u32) (chunk->cb_handle >> PAGE_SHIFT); + + cb = hl_cb_get(hdev, cb_mgr, cb_handle); + if (!cb) { + dev_err(hdev->dev, "CB handle 0x%x invalid\n", cb_handle); + return NULL; + } + + if ((chunk->cb_size < 8) || (chunk->cb_size > cb->size)) { + dev_err(hdev->dev, "CB size %u invalid\n", chunk->cb_size); + goto release_cb; + } + + spin_lock(&cb->lock); + cb->cs_cnt++; + spin_unlock(&cb->lock); + + return cb; + +release_cb: + hl_cb_put(cb); + return NULL; +} + +struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev, + enum hl_queue_type queue_type, bool is_kernel_allocated_cb) +{ + struct hl_cs_job *job; + + job = kzalloc(sizeof(*job), GFP_ATOMIC); + if (!job) + return NULL; + + job->queue_type = queue_type; + job->is_kernel_allocated_cb = is_kernel_allocated_cb; + + if (is_cb_patched(hdev, job)) + INIT_LIST_HEAD(&job->userptr_list); + + if (job->queue_type == QUEUE_TYPE_EXT) + INIT_WORK(&job->finish_work, job_wq_completion); + + return job; +} + +static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, + u32 num_chunks, u64 *cs_seq) +{ + struct hl_device *hdev = hpriv->hdev; + struct hl_cs_chunk *cs_chunk_array; + struct hl_cs_job *job; + struct hl_cs *cs; + struct hl_cb *cb; + bool int_queues_only = true; + u32 size_to_copy; + int rc, i; + + *cs_seq = ULLONG_MAX; + + if (num_chunks > HL_MAX_JOBS_PER_CS) { + dev_err(hdev->dev, + "Number of chunks can NOT be larger than %d\n", + HL_MAX_JOBS_PER_CS); + rc = -EINVAL; + goto out; + } + + cs_chunk_array = kmalloc_array(num_chunks, sizeof(*cs_chunk_array), + GFP_ATOMIC); + if (!cs_chunk_array) { + rc = -ENOMEM; + goto out; + } + + size_to_copy = num_chunks * sizeof(struct hl_cs_chunk); + if (copy_from_user(cs_chunk_array, chunks, size_to_copy)) { + dev_err(hdev->dev, "Failed to copy cs chunk array from user\n"); + rc = -EFAULT; + goto free_cs_chunk_array; + } + + /* increment refcnt for context */ + hl_ctx_get(hdev, hpriv->ctx); + + rc = allocate_cs(hdev, hpriv->ctx, CS_TYPE_DEFAULT, &cs); + if (rc) { + hl_ctx_put(hpriv->ctx); + goto free_cs_chunk_array; + } + + *cs_seq = cs->sequence; + + hl_debugfs_add_cs(cs); + + /* Validate ALL the CS chunks before submitting the CS */ + for (i = 0 ; i < num_chunks ; i++) { + struct hl_cs_chunk *chunk = &cs_chunk_array[i]; + enum hl_queue_type queue_type; + bool is_kernel_allocated_cb; + + rc = validate_queue_index(hdev, chunk, &queue_type, + &is_kernel_allocated_cb); + if (rc) { + hpriv->ctx->cs_counters.parsing_drop_cnt++; + goto free_cs_object; + } + + if (is_kernel_allocated_cb) { + cb = get_cb_from_cs_chunk(hdev, &hpriv->cb_mgr, chunk); + if (!cb) { + hpriv->ctx->cs_counters.parsing_drop_cnt++; + rc = -EINVAL; + goto free_cs_object; + } + } else { + cb = (struct hl_cb *) (uintptr_t) chunk->cb_handle; + } + + if (queue_type == QUEUE_TYPE_EXT || queue_type == QUEUE_TYPE_HW) + int_queues_only = false; + + job = hl_cs_allocate_job(hdev, queue_type, + is_kernel_allocated_cb); + if (!job) { + hpriv->ctx->cs_counters.out_of_mem_drop_cnt++; + dev_err(hdev->dev, "Failed to allocate a new job\n"); + rc = -ENOMEM; + if (is_kernel_allocated_cb) + goto release_cb; + + goto free_cs_object; + } + + job->id = i + 1; + job->cs = cs; + job->user_cb = cb; + job->user_cb_size = chunk->cb_size; + job->hw_queue_id = chunk->queue_index; + + cs->jobs_in_queue_cnt[job->hw_queue_id]++; + + list_add_tail(&job->cs_node, &cs->job_list); + + /* + * Increment CS reference. When CS reference is 0, CS is + * done and can be signaled to user and free all its resources + * Only increment for JOB on external or H/W queues, because + * only for those JOBs we get completion + */ + if (job->queue_type == QUEUE_TYPE_EXT || + job->queue_type == QUEUE_TYPE_HW) + cs_get(cs); + + hl_debugfs_add_job(hdev, job); + + rc = cs_parser(hpriv, job); + if (rc) { + hpriv->ctx->cs_counters.parsing_drop_cnt++; + dev_err(hdev->dev, + "Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n", + cs->ctx->asid, cs->sequence, job->id, rc); + goto free_cs_object; + } + } + + if (int_queues_only) { + hpriv->ctx->cs_counters.parsing_drop_cnt++; + dev_err(hdev->dev, + "Reject CS %d.%llu because only internal queues jobs are present\n", + cs->ctx->asid, cs->sequence); + rc = -EINVAL; + goto free_cs_object; + } + + rc = hl_hw_queue_schedule_cs(cs); + if (rc) { + if (rc != -EAGAIN) + dev_err(hdev->dev, + "Failed to submit CS %d.%llu to H/W queues, error %d\n", + cs->ctx->asid, cs->sequence, rc); + goto free_cs_object; + } + + rc = HL_CS_STATUS_SUCCESS; + goto put_cs; + +release_cb: + spin_lock(&cb->lock); + cb->cs_cnt--; + spin_unlock(&cb->lock); + hl_cb_put(cb); +free_cs_object: + cs_rollback(hdev, cs); + *cs_seq = ULLONG_MAX; + /* The path below is both for good and erroneous exits */ +put_cs: + /* We finished with the CS in this function, so put the ref */ + cs_put(cs); +free_cs_chunk_array: + kfree(cs_chunk_array); +out: + return rc; +} + +static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type, + void __user *chunks, u32 num_chunks, + u64 *cs_seq) +{ + struct hl_device *hdev = hpriv->hdev; + struct hl_ctx *ctx = hpriv->ctx; + struct hl_cs_chunk *cs_chunk_array, *chunk; + struct hw_queue_properties *hw_queue_prop; + struct hl_fence *sig_fence = NULL; + struct hl_cs_job *job; + struct hl_cs *cs; + struct hl_cb *cb; + enum hl_queue_type q_type; + u64 *signal_seq_arr = NULL, signal_seq; + u32 size_to_copy, q_idx, signal_seq_arr_len, cb_size; + int rc; + + *cs_seq = ULLONG_MAX; + + if (num_chunks > HL_MAX_JOBS_PER_CS) { + dev_err(hdev->dev, + "Number of chunks can NOT be larger than %d\n", + HL_MAX_JOBS_PER_CS); + rc = -EINVAL; + goto out; + } + + cs_chunk_array = kmalloc_array(num_chunks, sizeof(*cs_chunk_array), + GFP_ATOMIC); + if (!cs_chunk_array) { + rc = -ENOMEM; + goto out; + } + + size_to_copy = num_chunks * sizeof(struct hl_cs_chunk); + if (copy_from_user(cs_chunk_array, chunks, size_to_copy)) { + dev_err(hdev->dev, "Failed to copy cs chunk array from user\n"); + rc = -EFAULT; + goto free_cs_chunk_array; + } + + /* currently it is guaranteed to have only one chunk */ + chunk = &cs_chunk_array[0]; + + if (chunk->queue_index >= hdev->asic_prop.max_queues) { + dev_err(hdev->dev, "Queue index %d is invalid\n", + chunk->queue_index); + rc = -EINVAL; + goto free_cs_chunk_array; + } + + q_idx = chunk->queue_index; + hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx]; + q_type = hw_queue_prop->type; + + if ((q_idx >= hdev->asic_prop.max_queues) || + (!hw_queue_prop->supports_sync_stream)) { + dev_err(hdev->dev, "Queue index %d is invalid\n", q_idx); + rc = -EINVAL; + goto free_cs_chunk_array; + } + + if (cs_type == CS_TYPE_WAIT) { + struct hl_cs_compl *sig_waitcs_cmpl; + + signal_seq_arr_len = chunk->num_signal_seq_arr; + + /* currently only one signal seq is supported */ + if (signal_seq_arr_len != 1) { + dev_err(hdev->dev, + "Wait for signal CS supports only one signal CS seq\n"); + rc = -EINVAL; + goto free_cs_chunk_array; + } + + signal_seq_arr = kmalloc_array(signal_seq_arr_len, + sizeof(*signal_seq_arr), + GFP_ATOMIC); + if (!signal_seq_arr) { + rc = -ENOMEM; + goto free_cs_chunk_array; + } + + size_to_copy = chunk->num_signal_seq_arr * + sizeof(*signal_seq_arr); + if (copy_from_user(signal_seq_arr, + u64_to_user_ptr(chunk->signal_seq_arr), + size_to_copy)) { + dev_err(hdev->dev, + "Failed to copy signal seq array from user\n"); + rc = -EFAULT; + goto free_signal_seq_array; + } + + /* currently it is guaranteed to have only one signal seq */ + signal_seq = signal_seq_arr[0]; + sig_fence = hl_ctx_get_fence(ctx, signal_seq); + if (IS_ERR(sig_fence)) { + dev_err(hdev->dev, + "Failed to get signal CS with seq 0x%llx\n", + signal_seq); + rc = PTR_ERR(sig_fence); + goto free_signal_seq_array; + } + + if (!sig_fence) { + /* signal CS already finished */ + rc = 0; + goto free_signal_seq_array; + } + + sig_waitcs_cmpl = + container_of(sig_fence, struct hl_cs_compl, base_fence); + + if (sig_waitcs_cmpl->type != CS_TYPE_SIGNAL) { + dev_err(hdev->dev, + "CS seq 0x%llx is not of a signal CS\n", + signal_seq); + hl_fence_put(sig_fence); + rc = -EINVAL; + goto free_signal_seq_array; + } + + if (completion_done(&sig_fence->completion)) { + /* signal CS already finished */ + hl_fence_put(sig_fence); + rc = 0; + goto free_signal_seq_array; + } + } + + /* increment refcnt for context */ + hl_ctx_get(hdev, ctx); + + rc = allocate_cs(hdev, ctx, cs_type, &cs); + if (rc) { + if (cs_type == CS_TYPE_WAIT) + hl_fence_put(sig_fence); + hl_ctx_put(ctx); + goto free_signal_seq_array; + } + + /* + * Save the signal CS fence for later initialization right before + * hanging the wait CS on the queue. + */ + if (cs->type == CS_TYPE_WAIT) + cs->signal_fence = sig_fence; + + hl_debugfs_add_cs(cs); + + *cs_seq = cs->sequence; + + job = hl_cs_allocate_job(hdev, q_type, true); + if (!job) { + ctx->cs_counters.out_of_mem_drop_cnt++; + dev_err(hdev->dev, "Failed to allocate a new job\n"); + rc = -ENOMEM; + goto put_cs; + } + + if (cs->type == CS_TYPE_WAIT) + cb_size = hdev->asic_funcs->get_wait_cb_size(hdev); + else + cb_size = hdev->asic_funcs->get_signal_cb_size(hdev); + + cb = hl_cb_kernel_create(hdev, cb_size, + q_type == QUEUE_TYPE_HW && hdev->mmu_enable); + if (!cb) { + ctx->cs_counters.out_of_mem_drop_cnt++; + kfree(job); + rc = -EFAULT; + goto put_cs; + } + + job->id = 0; + job->cs = cs; + job->user_cb = cb; + job->user_cb->cs_cnt++; + job->user_cb_size = cb_size; + job->hw_queue_id = q_idx; + + /* + * No need in parsing, user CB is the patched CB. + * We call hl_cb_destroy() out of two reasons - we don't need the CB in + * the CB idr anymore and to decrement its refcount as it was + * incremented inside hl_cb_kernel_create(). + */ + job->patched_cb = job->user_cb; + job->job_cb_size = job->user_cb_size; + hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT); + + cs->jobs_in_queue_cnt[job->hw_queue_id]++; + + list_add_tail(&job->cs_node, &cs->job_list); + + /* increment refcount as for external queues we get completion */ + cs_get(cs); + + hl_debugfs_add_job(hdev, job); + + rc = hl_hw_queue_schedule_cs(cs); + if (rc) { + if (rc != -EAGAIN) + dev_err(hdev->dev, + "Failed to submit CS %d.%llu to H/W queues, error %d\n", + ctx->asid, cs->sequence, rc); + goto free_cs_object; + } + + rc = HL_CS_STATUS_SUCCESS; + goto put_cs; + +free_cs_object: + cs_rollback(hdev, cs); + *cs_seq = ULLONG_MAX; + /* The path below is both for good and erroneous exits */ +put_cs: + /* We finished with the CS in this function, so put the ref */ + cs_put(cs); +free_signal_seq_array: + if (cs_type == CS_TYPE_WAIT) + kfree(signal_seq_arr); +free_cs_chunk_array: + kfree(cs_chunk_array); +out: + return rc; +} + +int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data) +{ + struct hl_device *hdev = hpriv->hdev; + union hl_cs_args *args = data; + struct hl_ctx *ctx = hpriv->ctx; + void __user *chunks_execute, *chunks_restore; + enum hl_cs_type cs_type; + u32 num_chunks_execute, num_chunks_restore, sig_wait_flags; + u64 cs_seq = ULONG_MAX; + int rc, do_ctx_switch; + bool need_soft_reset = false; + + if (hl_device_disabled_or_in_reset(hdev)) { + dev_warn_ratelimited(hdev->dev, + "Device is %s. Can't submit new CS\n", + atomic_read(&hdev->in_reset) ? "in_reset" : "disabled"); + rc = -EBUSY; + goto out; + } + + sig_wait_flags = args->in.cs_flags & HL_CS_FLAGS_SIG_WAIT; + + if (unlikely(sig_wait_flags == HL_CS_FLAGS_SIG_WAIT)) { + dev_err(hdev->dev, + "Signal and wait CS flags are mutually exclusive, context %d\n", + ctx->asid); + rc = -EINVAL; + goto out; + } + + if (unlikely((sig_wait_flags & HL_CS_FLAGS_SIG_WAIT) && + (!hdev->supports_sync_stream))) { + dev_err(hdev->dev, "Sync stream CS is not supported\n"); + rc = -EINVAL; + goto out; + } + + if (args->in.cs_flags & HL_CS_FLAGS_SIGNAL) + cs_type = CS_TYPE_SIGNAL; + else if (args->in.cs_flags & HL_CS_FLAGS_WAIT) + cs_type = CS_TYPE_WAIT; + else + cs_type = CS_TYPE_DEFAULT; + + chunks_execute = (void __user *) (uintptr_t) args->in.chunks_execute; + num_chunks_execute = args->in.num_chunks_execute; + + if (cs_type == CS_TYPE_DEFAULT) { + if (!num_chunks_execute) { + dev_err(hdev->dev, + "Got execute CS with 0 chunks, context %d\n", + ctx->asid); + rc = -EINVAL; + goto out; + } + } else if (num_chunks_execute != 1) { + dev_err(hdev->dev, + "Sync stream CS mandates one chunk only, context %d\n", + ctx->asid); + rc = -EINVAL; + goto out; + } + + do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0); + + if (do_ctx_switch || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) { + long ret; + + chunks_restore = + (void __user *) (uintptr_t) args->in.chunks_restore; + num_chunks_restore = args->in.num_chunks_restore; + + mutex_lock(&hpriv->restore_phase_mutex); + + if (do_ctx_switch) { + rc = hdev->asic_funcs->context_switch(hdev, ctx->asid); + if (rc) { + dev_err_ratelimited(hdev->dev, + "Failed to switch to context %d, rejecting CS! %d\n", + ctx->asid, rc); + /* + * If we timedout, or if the device is not IDLE + * while we want to do context-switch (-EBUSY), + * we need to soft-reset because QMAN is + * probably stuck. However, we can't call to + * reset here directly because of deadlock, so + * need to do it at the very end of this + * function + */ + if ((rc == -ETIMEDOUT) || (rc == -EBUSY)) + need_soft_reset = true; + mutex_unlock(&hpriv->restore_phase_mutex); + goto out; + } + } + + hdev->asic_funcs->restore_phase_topology(hdev); + + if (!num_chunks_restore) { + dev_dbg(hdev->dev, + "Need to run restore phase but restore CS is empty\n"); + rc = 0; + } else { + rc = cs_ioctl_default(hpriv, chunks_restore, + num_chunks_restore, &cs_seq); + } + + mutex_unlock(&hpriv->restore_phase_mutex); + + if (rc) { + dev_err(hdev->dev, + "Failed to submit restore CS for context %d (%d)\n", + ctx->asid, rc); + goto out; + } + + /* Need to wait for restore completion before execution phase */ + if (num_chunks_restore) { + ret = _hl_cs_wait_ioctl(hdev, ctx, + jiffies_to_usecs(hdev->timeout_jiffies), + cs_seq); + if (ret <= 0) { + dev_err(hdev->dev, + "Restore CS for context %d failed to complete %ld\n", + ctx->asid, ret); + rc = -ENOEXEC; + goto out; + } + } + + ctx->thread_ctx_switch_wait_token = 1; + } else if (!ctx->thread_ctx_switch_wait_token) { + u32 tmp; + + rc = hl_poll_timeout_memory(hdev, + &ctx->thread_ctx_switch_wait_token, tmp, (tmp == 1), + 100, jiffies_to_usecs(hdev->timeout_jiffies), false); + + if (rc == -ETIMEDOUT) { + dev_err(hdev->dev, + "context switch phase timeout (%d)\n", tmp); + goto out; + } + } + + if (cs_type == CS_TYPE_DEFAULT) + rc = cs_ioctl_default(hpriv, chunks_execute, num_chunks_execute, + &cs_seq); + else + rc = cs_ioctl_signal_wait(hpriv, cs_type, chunks_execute, + num_chunks_execute, &cs_seq); + +out: + if (rc != -EAGAIN) { + memset(args, 0, sizeof(*args)); + args->out.status = rc; + args->out.seq = cs_seq; + } + + if (((rc == -ETIMEDOUT) || (rc == -EBUSY)) && (need_soft_reset)) + hl_device_reset(hdev, false, false); + + return rc; +} + +static long _hl_cs_wait_ioctl(struct hl_device *hdev, + struct hl_ctx *ctx, u64 timeout_us, u64 seq) +{ + struct hl_fence *fence; + unsigned long timeout; + long rc; + + if (timeout_us == MAX_SCHEDULE_TIMEOUT) + timeout = timeout_us; + else + timeout = usecs_to_jiffies(timeout_us); + + hl_ctx_get(hdev, ctx); + + fence = hl_ctx_get_fence(ctx, seq); + if (IS_ERR(fence)) { + rc = PTR_ERR(fence); + if (rc == -EINVAL) + dev_notice_ratelimited(hdev->dev, + "Can't wait on CS %llu because current CS is at seq %llu\n", + seq, ctx->cs_sequence); + } else if (fence) { + if (!timeout_us) + rc = completion_done(&fence->completion); + else + rc = wait_for_completion_interruptible_timeout( + &fence->completion, timeout); + + if (fence->error == -ETIMEDOUT) + rc = -ETIMEDOUT; + else if (fence->error == -EIO) + rc = -EIO; + + hl_fence_put(fence); + } else { + dev_dbg(hdev->dev, + "Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n", + seq, ctx->cs_sequence); + rc = 1; + } + + hl_ctx_put(ctx); + + return rc; +} + +int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data) +{ + struct hl_device *hdev = hpriv->hdev; + union hl_wait_cs_args *args = data; + u64 seq = args->in.seq; + long rc; + + rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq); + + memset(args, 0, sizeof(*args)); + + if (rc < 0) { + if (rc == -ERESTARTSYS) { + dev_err_ratelimited(hdev->dev, + "user process got signal while waiting for CS handle %llu\n", + seq); + args->out.status = HL_WAIT_CS_STATUS_INTERRUPTED; + rc = -EINTR; + } else if (rc == -ETIMEDOUT) { + dev_err_ratelimited(hdev->dev, + "CS %llu has timed-out while user process is waiting for it\n", + seq); + args->out.status = HL_WAIT_CS_STATUS_TIMEDOUT; + } else if (rc == -EIO) { + dev_err_ratelimited(hdev->dev, + "CS %llu has been aborted while user process is waiting for it\n", + seq); + args->out.status = HL_WAIT_CS_STATUS_ABORTED; + } + return rc; + } + + if (rc == 0) + args->out.status = HL_WAIT_CS_STATUS_BUSY; + else + args->out.status = HL_WAIT_CS_STATUS_COMPLETED; + + return 0; +} diff --git a/drivers/misc/habanalabs/common/context.c b/drivers/misc/habanalabs/common/context.c new file mode 100644 index 000000000..7a59dd7c6 --- /dev/null +++ b/drivers/misc/habanalabs/common/context.c @@ -0,0 +1,271 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include "habanalabs.h" + +#include <linux/slab.h> + +static void hl_ctx_fini(struct hl_ctx *ctx) +{ + struct hl_device *hdev = ctx->hdev; + u64 idle_mask = 0; + int i; + + /* + * If we arrived here, there are no jobs waiting for this context + * on its queues so we can safely remove it. + * This is because for each CS, we increment the ref count and for + * every CS that was finished we decrement it and we won't arrive + * to this function unless the ref count is 0 + */ + + for (i = 0 ; i < hdev->asic_prop.max_pending_cs ; i++) + hl_fence_put(ctx->cs_pending[i]); + + kfree(ctx->cs_pending); + + if (ctx->asid != HL_KERNEL_ASID_ID) { + dev_dbg(hdev->dev, "closing user context %d\n", ctx->asid); + + /* The engines are stopped as there is no executing CS, but the + * Coresight might be still working by accessing addresses + * related to the stopped engines. Hence stop it explicitly. + * Stop only if this is the compute context, as there can be + * only one compute context + */ + if ((hdev->in_debug) && (hdev->compute_ctx == ctx)) + hl_device_set_debug_mode(hdev, false); + + hl_cb_va_pool_fini(ctx); + hl_vm_ctx_fini(ctx); + hl_asid_free(hdev, ctx->asid); + + if ((!hdev->pldm) && (hdev->pdev) && + (!hdev->asic_funcs->is_device_idle(hdev, + &idle_mask, NULL))) + dev_notice(hdev->dev, + "device not idle after user context is closed (0x%llx)\n", + idle_mask); + } else { + dev_dbg(hdev->dev, "closing kernel context\n"); + hl_mmu_ctx_fini(ctx); + } +} + +void hl_ctx_do_release(struct kref *ref) +{ + struct hl_ctx *ctx; + + ctx = container_of(ref, struct hl_ctx, refcount); + + hl_ctx_fini(ctx); + + if (ctx->hpriv) + hl_hpriv_put(ctx->hpriv); + + kfree(ctx); +} + +int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv) +{ + struct hl_ctx_mgr *mgr = &hpriv->ctx_mgr; + struct hl_ctx *ctx; + int rc; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) { + rc = -ENOMEM; + goto out_err; + } + + mutex_lock(&mgr->ctx_lock); + rc = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL); + mutex_unlock(&mgr->ctx_lock); + + if (rc < 0) { + dev_err(hdev->dev, "Failed to allocate IDR for a new CTX\n"); + goto free_ctx; + } + + ctx->handle = rc; + + rc = hl_ctx_init(hdev, ctx, false); + if (rc) + goto remove_from_idr; + + hl_hpriv_get(hpriv); + ctx->hpriv = hpriv; + + /* TODO: remove for multiple contexts per process */ + hpriv->ctx = ctx; + + /* TODO: remove the following line for multiple process support */ + hdev->compute_ctx = ctx; + + return 0; + +remove_from_idr: + mutex_lock(&mgr->ctx_lock); + idr_remove(&mgr->ctx_handles, ctx->handle); + mutex_unlock(&mgr->ctx_lock); +free_ctx: + kfree(ctx); +out_err: + return rc; +} + +void hl_ctx_free(struct hl_device *hdev, struct hl_ctx *ctx) +{ + if (kref_put(&ctx->refcount, hl_ctx_do_release) == 1) + return; + + dev_warn(hdev->dev, + "user process released device but its command submissions are still executing\n"); +} + +int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx) +{ + int rc = 0; + + ctx->hdev = hdev; + + kref_init(&ctx->refcount); + + ctx->cs_sequence = 1; + spin_lock_init(&ctx->cs_lock); + atomic_set(&ctx->thread_ctx_switch_token, 1); + ctx->thread_ctx_switch_wait_token = 0; + ctx->cs_pending = kcalloc(hdev->asic_prop.max_pending_cs, + sizeof(struct hl_fence *), + GFP_KERNEL); + if (!ctx->cs_pending) + return -ENOMEM; + + if (is_kernel_ctx) { + ctx->asid = HL_KERNEL_ASID_ID; /* Kernel driver gets ASID 0 */ + rc = hl_mmu_ctx_init(ctx); + if (rc) { + dev_err(hdev->dev, "Failed to init mmu ctx module\n"); + goto err_free_cs_pending; + } + } else { + ctx->asid = hl_asid_alloc(hdev); + if (!ctx->asid) { + dev_err(hdev->dev, "No free ASID, failed to create context\n"); + rc = -ENOMEM; + goto err_free_cs_pending; + } + + rc = hl_vm_ctx_init(ctx); + if (rc) { + dev_err(hdev->dev, "Failed to init mem ctx module\n"); + rc = -ENOMEM; + goto err_asid_free; + } + + rc = hl_cb_va_pool_init(ctx); + if (rc) { + dev_err(hdev->dev, + "Failed to init VA pool for mapped CB\n"); + goto err_vm_ctx_fini; + } + + rc = hdev->asic_funcs->ctx_init(ctx); + if (rc) { + dev_err(hdev->dev, "ctx_init failed\n"); + goto err_cb_va_pool_fini; + } + + dev_dbg(hdev->dev, "create user context %d\n", ctx->asid); + } + + return 0; + +err_cb_va_pool_fini: + hl_cb_va_pool_fini(ctx); +err_vm_ctx_fini: + hl_vm_ctx_fini(ctx); +err_asid_free: + hl_asid_free(hdev, ctx->asid); +err_free_cs_pending: + kfree(ctx->cs_pending); + + return rc; +} + +void hl_ctx_get(struct hl_device *hdev, struct hl_ctx *ctx) +{ + kref_get(&ctx->refcount); +} + +int hl_ctx_put(struct hl_ctx *ctx) +{ + return kref_put(&ctx->refcount, hl_ctx_do_release); +} + +struct hl_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq) +{ + struct asic_fixed_properties *asic_prop = &ctx->hdev->asic_prop; + struct hl_fence *fence; + + spin_lock(&ctx->cs_lock); + + if (seq >= ctx->cs_sequence) { + spin_unlock(&ctx->cs_lock); + return ERR_PTR(-EINVAL); + } + + if (seq + asic_prop->max_pending_cs < ctx->cs_sequence) { + spin_unlock(&ctx->cs_lock); + return NULL; + } + + fence = ctx->cs_pending[seq & (asic_prop->max_pending_cs - 1)]; + hl_fence_get(fence); + + spin_unlock(&ctx->cs_lock); + + return fence; +} + +/* + * hl_ctx_mgr_init - initialize the context manager + * + * @mgr: pointer to context manager structure + * + * This manager is an object inside the hpriv object of the user process. + * The function is called when a user process opens the FD. + */ +void hl_ctx_mgr_init(struct hl_ctx_mgr *mgr) +{ + mutex_init(&mgr->ctx_lock); + idr_init(&mgr->ctx_handles); +} + +/* + * hl_ctx_mgr_fini - finalize the context manager + * + * @hdev: pointer to device structure + * @mgr: pointer to context manager structure + * + * This function goes over all the contexts in the manager and frees them. + * It is called when a process closes the FD. + */ +void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *mgr) +{ + struct hl_ctx *ctx; + struct idr *idp; + u32 id; + + idp = &mgr->ctx_handles; + + idr_for_each_entry(idp, ctx, id) + hl_ctx_free(hdev, ctx); + + idr_destroy(&mgr->ctx_handles); + mutex_destroy(&mgr->ctx_lock); +} diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c new file mode 100644 index 000000000..9716b0728 --- /dev/null +++ b/drivers/misc/habanalabs/common/debugfs.c @@ -0,0 +1,1456 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include "habanalabs.h" +#include "../include/hw_ip/mmu/mmu_general.h" + +#include <linux/pci.h> +#include <linux/debugfs.h> +#include <linux/uaccess.h> + +#define MMU_ADDR_BUF_SIZE 40 +#define MMU_ASID_BUF_SIZE 10 +#define MMU_KBUF_SIZE (MMU_ADDR_BUF_SIZE + MMU_ASID_BUF_SIZE) + +static struct dentry *hl_debug_root; + +static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr, + u8 i2c_reg, long *val) +{ + struct cpucp_packet pkt; + int rc; + + if (hl_device_disabled_or_in_reset(hdev)) + return -EBUSY; + + memset(&pkt, 0, sizeof(pkt)); + + pkt.ctl = cpu_to_le32(CPUCP_PACKET_I2C_RD << + CPUCP_PKT_CTL_OPCODE_SHIFT); + pkt.i2c_bus = i2c_bus; + pkt.i2c_addr = i2c_addr; + pkt.i2c_reg = i2c_reg; + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + 0, val); + + if (rc) + dev_err(hdev->dev, "Failed to read from I2C, error %d\n", rc); + + return rc; +} + +static int hl_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr, + u8 i2c_reg, u32 val) +{ + struct cpucp_packet pkt; + int rc; + + if (hl_device_disabled_or_in_reset(hdev)) + return -EBUSY; + + memset(&pkt, 0, sizeof(pkt)); + + pkt.ctl = cpu_to_le32(CPUCP_PACKET_I2C_WR << + CPUCP_PKT_CTL_OPCODE_SHIFT); + pkt.i2c_bus = i2c_bus; + pkt.i2c_addr = i2c_addr; + pkt.i2c_reg = i2c_reg; + pkt.value = cpu_to_le64(val); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + 0, NULL); + + if (rc) + dev_err(hdev->dev, "Failed to write to I2C, error %d\n", rc); + + return rc; +} + +static void hl_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state) +{ + struct cpucp_packet pkt; + int rc; + + if (hl_device_disabled_or_in_reset(hdev)) + return; + + memset(&pkt, 0, sizeof(pkt)); + + pkt.ctl = cpu_to_le32(CPUCP_PACKET_LED_SET << + CPUCP_PKT_CTL_OPCODE_SHIFT); + pkt.led_index = cpu_to_le32(led); + pkt.value = cpu_to_le64(state); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + 0, NULL); + + if (rc) + dev_err(hdev->dev, "Failed to set LED %d, error %d\n", led, rc); +} + +static int command_buffers_show(struct seq_file *s, void *data) +{ + struct hl_debugfs_entry *entry = s->private; + struct hl_dbg_device_entry *dev_entry = entry->dev_entry; + struct hl_cb *cb; + bool first = true; + + spin_lock(&dev_entry->cb_spinlock); + + list_for_each_entry(cb, &dev_entry->cb_list, debugfs_list) { + if (first) { + first = false; + seq_puts(s, "\n"); + seq_puts(s, " CB ID CTX ID CB size CB RefCnt mmap? CS counter\n"); + seq_puts(s, "---------------------------------------------------------------\n"); + } + seq_printf(s, + " %03llu %d 0x%08x %d %d %d\n", + cb->id, cb->ctx->asid, cb->size, + kref_read(&cb->refcount), + cb->mmap, cb->cs_cnt); + } + + spin_unlock(&dev_entry->cb_spinlock); + + if (!first) + seq_puts(s, "\n"); + + return 0; +} + +static int command_submission_show(struct seq_file *s, void *data) +{ + struct hl_debugfs_entry *entry = s->private; + struct hl_dbg_device_entry *dev_entry = entry->dev_entry; + struct hl_cs *cs; + bool first = true; + + spin_lock(&dev_entry->cs_spinlock); + + list_for_each_entry(cs, &dev_entry->cs_list, debugfs_list) { + if (first) { + first = false; + seq_puts(s, "\n"); + seq_puts(s, " CS ID CTX ASID CS RefCnt Submitted Completed\n"); + seq_puts(s, "------------------------------------------------------\n"); + } + seq_printf(s, + " %llu %d %d %d %d\n", + cs->sequence, cs->ctx->asid, + kref_read(&cs->refcount), + cs->submitted, cs->completed); + } + + spin_unlock(&dev_entry->cs_spinlock); + + if (!first) + seq_puts(s, "\n"); + + return 0; +} + +static int command_submission_jobs_show(struct seq_file *s, void *data) +{ + struct hl_debugfs_entry *entry = s->private; + struct hl_dbg_device_entry *dev_entry = entry->dev_entry; + struct hl_cs_job *job; + bool first = true; + + spin_lock(&dev_entry->cs_job_spinlock); + + list_for_each_entry(job, &dev_entry->cs_job_list, debugfs_list) { + if (first) { + first = false; + seq_puts(s, "\n"); + seq_puts(s, " JOB ID CS ID CTX ASID H/W Queue\n"); + seq_puts(s, "---------------------------------------\n"); + } + if (job->cs) + seq_printf(s, + " %02d %llu %d %d\n", + job->id, job->cs->sequence, job->cs->ctx->asid, + job->hw_queue_id); + else + seq_printf(s, + " %02d 0 %d %d\n", + job->id, HL_KERNEL_ASID_ID, job->hw_queue_id); + } + + spin_unlock(&dev_entry->cs_job_spinlock); + + if (!first) + seq_puts(s, "\n"); + + return 0; +} + +static int userptr_show(struct seq_file *s, void *data) +{ + struct hl_debugfs_entry *entry = s->private; + struct hl_dbg_device_entry *dev_entry = entry->dev_entry; + struct hl_userptr *userptr; + char dma_dir[4][30] = {"DMA_BIDIRECTIONAL", "DMA_TO_DEVICE", + "DMA_FROM_DEVICE", "DMA_NONE"}; + bool first = true; + + spin_lock(&dev_entry->userptr_spinlock); + + list_for_each_entry(userptr, &dev_entry->userptr_list, debugfs_list) { + if (first) { + first = false; + seq_puts(s, "\n"); + seq_puts(s, " user virtual address size dma dir\n"); + seq_puts(s, "----------------------------------------------------------\n"); + } + seq_printf(s, + " 0x%-14llx %-10u %-30s\n", + userptr->addr, userptr->size, dma_dir[userptr->dir]); + } + + spin_unlock(&dev_entry->userptr_spinlock); + + if (!first) + seq_puts(s, "\n"); + + return 0; +} + +static int vm_show(struct seq_file *s, void *data) +{ + struct hl_debugfs_entry *entry = s->private; + struct hl_dbg_device_entry *dev_entry = entry->dev_entry; + struct hl_ctx *ctx; + struct hl_vm *vm; + struct hl_vm_hash_node *hnode; + struct hl_userptr *userptr; + struct hl_vm_phys_pg_pack *phys_pg_pack = NULL; + enum vm_type_t *vm_type; + bool once = true; + u64 j; + int i; + + if (!dev_entry->hdev->mmu_enable) + return 0; + + spin_lock(&dev_entry->ctx_mem_hash_spinlock); + + list_for_each_entry(ctx, &dev_entry->ctx_mem_hash_list, debugfs_list) { + once = false; + seq_puts(s, "\n\n----------------------------------------------------"); + seq_puts(s, "\n----------------------------------------------------\n\n"); + seq_printf(s, "ctx asid: %u\n", ctx->asid); + + seq_puts(s, "\nmappings:\n\n"); + seq_puts(s, " virtual address size handle\n"); + seq_puts(s, "----------------------------------------------------\n"); + mutex_lock(&ctx->mem_hash_lock); + hash_for_each(ctx->mem_hash, i, hnode, node) { + vm_type = hnode->ptr; + + if (*vm_type == VM_TYPE_USERPTR) { + userptr = hnode->ptr; + seq_printf(s, + " 0x%-14llx %-10u\n", + hnode->vaddr, userptr->size); + } else { + phys_pg_pack = hnode->ptr; + seq_printf(s, + " 0x%-14llx %-10llu %-4u\n", + hnode->vaddr, phys_pg_pack->total_size, + phys_pg_pack->handle); + } + } + mutex_unlock(&ctx->mem_hash_lock); + + vm = &ctx->hdev->vm; + spin_lock(&vm->idr_lock); + + if (!idr_is_empty(&vm->phys_pg_pack_handles)) + seq_puts(s, "\n\nallocations:\n"); + + idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_pack, i) { + if (phys_pg_pack->asid != ctx->asid) + continue; + + seq_printf(s, "\nhandle: %u\n", phys_pg_pack->handle); + seq_printf(s, "page size: %u\n\n", + phys_pg_pack->page_size); + seq_puts(s, " physical address\n"); + seq_puts(s, "---------------------\n"); + for (j = 0 ; j < phys_pg_pack->npages ; j++) { + seq_printf(s, " 0x%-14llx\n", + phys_pg_pack->pages[j]); + } + } + spin_unlock(&vm->idr_lock); + + } + + spin_unlock(&dev_entry->ctx_mem_hash_spinlock); + + if (!once) + seq_puts(s, "\n"); + + return 0; +} + +/* these inline functions are copied from mmu.c */ +static inline u64 get_hop0_addr(struct hl_ctx *ctx) +{ + return ctx->hdev->asic_prop.mmu_pgt_addr + + (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size); +} + +static inline u64 get_hopN_pte_addr(struct hl_ctx *ctx, u64 hop_addr, + u64 virt_addr, u64 mask, u64 shift) +{ + return hop_addr + ctx->hdev->asic_prop.mmu_pte_size * + ((virt_addr & mask) >> shift); +} + +static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx, + struct hl_mmu_properties *mmu_specs, + u64 hop_addr, u64 vaddr) +{ + return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop0_mask, + mmu_specs->hop0_shift); +} + +static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx, + struct hl_mmu_properties *mmu_specs, + u64 hop_addr, u64 vaddr) +{ + return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop1_mask, + mmu_specs->hop1_shift); +} + +static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx, + struct hl_mmu_properties *mmu_specs, + u64 hop_addr, u64 vaddr) +{ + return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop2_mask, + mmu_specs->hop2_shift); +} + +static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx, + struct hl_mmu_properties *mmu_specs, + u64 hop_addr, u64 vaddr) +{ + return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop3_mask, + mmu_specs->hop3_shift); +} + +static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx, + struct hl_mmu_properties *mmu_specs, + u64 hop_addr, u64 vaddr) +{ + return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop4_mask, + mmu_specs->hop4_shift); +} + +static inline u64 get_hop5_pte_addr(struct hl_ctx *ctx, + struct hl_mmu_properties *mmu_specs, + u64 hop_addr, u64 vaddr) +{ + return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop5_mask, + mmu_specs->hop5_shift); +} + +static inline u64 get_next_hop_addr(u64 curr_pte) +{ + if (curr_pte & PAGE_PRESENT_MASK) + return curr_pte & HOP_PHYS_ADDR_MASK; + else + return ULLONG_MAX; +} + +static int mmu_show(struct seq_file *s, void *data) +{ + struct hl_debugfs_entry *entry = s->private; + struct hl_dbg_device_entry *dev_entry = entry->dev_entry; + struct hl_device *hdev = dev_entry->hdev; + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct hl_mmu_properties *mmu_prop; + struct hl_ctx *ctx; + bool is_dram_addr; + + u64 hop0_addr = 0, hop0_pte_addr = 0, hop0_pte = 0, + hop1_addr = 0, hop1_pte_addr = 0, hop1_pte = 0, + hop2_addr = 0, hop2_pte_addr = 0, hop2_pte = 0, + hop3_addr = 0, hop3_pte_addr = 0, hop3_pte = 0, + hop4_addr = 0, hop4_pte_addr = 0, hop4_pte = 0, + hop5_addr = 0, hop5_pte_addr = 0, hop5_pte = 0, + virt_addr = dev_entry->mmu_addr; + + if (!hdev->mmu_enable) + return 0; + + if (dev_entry->mmu_asid == HL_KERNEL_ASID_ID) + ctx = hdev->kernel_ctx; + else + ctx = hdev->compute_ctx; + + if (!ctx) { + dev_err(hdev->dev, "no ctx available\n"); + return 0; + } + + is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size, + prop->dmmu.start_addr, + prop->dmmu.end_addr); + + /* shifts and masks are the same in PMMU and HPMMU, use one of them */ + mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu; + + mutex_lock(&ctx->mmu_lock); + + /* the following lookup is copied from unmap() in mmu.c */ + + hop0_addr = get_hop0_addr(ctx); + hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr); + hop0_pte = hdev->asic_funcs->read_pte(hdev, hop0_pte_addr); + hop1_addr = get_next_hop_addr(hop0_pte); + + if (hop1_addr == ULLONG_MAX) + goto not_mapped; + + hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr); + hop1_pte = hdev->asic_funcs->read_pte(hdev, hop1_pte_addr); + hop2_addr = get_next_hop_addr(hop1_pte); + + if (hop2_addr == ULLONG_MAX) + goto not_mapped; + + hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr); + hop2_pte = hdev->asic_funcs->read_pte(hdev, hop2_pte_addr); + hop3_addr = get_next_hop_addr(hop2_pte); + + if (hop3_addr == ULLONG_MAX) + goto not_mapped; + + hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr); + hop3_pte = hdev->asic_funcs->read_pte(hdev, hop3_pte_addr); + + if (mmu_prop->num_hops == MMU_ARCH_5_HOPS) { + if (!(hop3_pte & LAST_MASK)) { + hop4_addr = get_next_hop_addr(hop3_pte); + + if (hop4_addr == ULLONG_MAX) + goto not_mapped; + + hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, + hop4_addr, virt_addr); + hop4_pte = hdev->asic_funcs->read_pte(hdev, + hop4_pte_addr); + if (!(hop4_pte & PAGE_PRESENT_MASK)) + goto not_mapped; + } else { + if (!(hop3_pte & PAGE_PRESENT_MASK)) + goto not_mapped; + } + } else { + hop4_addr = get_next_hop_addr(hop3_pte); + + if (hop4_addr == ULLONG_MAX) + goto not_mapped; + + hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, + hop4_addr, virt_addr); + hop4_pte = hdev->asic_funcs->read_pte(hdev, + hop4_pte_addr); + if (!(hop4_pte & LAST_MASK)) { + hop5_addr = get_next_hop_addr(hop4_pte); + + if (hop5_addr == ULLONG_MAX) + goto not_mapped; + + hop5_pte_addr = get_hop5_pte_addr(ctx, mmu_prop, + hop5_addr, virt_addr); + hop5_pte = hdev->asic_funcs->read_pte(hdev, + hop5_pte_addr); + if (!(hop5_pte & PAGE_PRESENT_MASK)) + goto not_mapped; + } else { + if (!(hop4_pte & PAGE_PRESENT_MASK)) + goto not_mapped; + } + } + + seq_printf(s, "asid: %u, virt_addr: 0x%llx\n", + dev_entry->mmu_asid, dev_entry->mmu_addr); + + seq_printf(s, "hop0_addr: 0x%llx\n", hop0_addr); + seq_printf(s, "hop0_pte_addr: 0x%llx\n", hop0_pte_addr); + seq_printf(s, "hop0_pte: 0x%llx\n", hop0_pte); + + seq_printf(s, "hop1_addr: 0x%llx\n", hop1_addr); + seq_printf(s, "hop1_pte_addr: 0x%llx\n", hop1_pte_addr); + seq_printf(s, "hop1_pte: 0x%llx\n", hop1_pte); + + seq_printf(s, "hop2_addr: 0x%llx\n", hop2_addr); + seq_printf(s, "hop2_pte_addr: 0x%llx\n", hop2_pte_addr); + seq_printf(s, "hop2_pte: 0x%llx\n", hop2_pte); + + seq_printf(s, "hop3_addr: 0x%llx\n", hop3_addr); + seq_printf(s, "hop3_pte_addr: 0x%llx\n", hop3_pte_addr); + seq_printf(s, "hop3_pte: 0x%llx\n", hop3_pte); + + if (mmu_prop->num_hops == MMU_ARCH_5_HOPS) { + if (!(hop3_pte & LAST_MASK)) { + seq_printf(s, "hop4_addr: 0x%llx\n", hop4_addr); + seq_printf(s, "hop4_pte_addr: 0x%llx\n", hop4_pte_addr); + seq_printf(s, "hop4_pte: 0x%llx\n", hop4_pte); + } + } else { + seq_printf(s, "hop4_addr: 0x%llx\n", hop4_addr); + seq_printf(s, "hop4_pte_addr: 0x%llx\n", hop4_pte_addr); + seq_printf(s, "hop4_pte: 0x%llx\n", hop4_pte); + + if (!(hop4_pte & LAST_MASK)) { + seq_printf(s, "hop5_addr: 0x%llx\n", hop5_addr); + seq_printf(s, "hop5_pte_addr: 0x%llx\n", hop5_pte_addr); + seq_printf(s, "hop5_pte: 0x%llx\n", hop5_pte); + } + } + + goto out; + +not_mapped: + dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n", + virt_addr); +out: + mutex_unlock(&ctx->mmu_lock); + + return 0; +} + +static ssize_t mmu_asid_va_write(struct file *file, const char __user *buf, + size_t count, loff_t *f_pos) +{ + struct seq_file *s = file->private_data; + struct hl_debugfs_entry *entry = s->private; + struct hl_dbg_device_entry *dev_entry = entry->dev_entry; + struct hl_device *hdev = dev_entry->hdev; + char kbuf[MMU_KBUF_SIZE]; + char *c; + ssize_t rc; + + if (!hdev->mmu_enable) + return count; + + if (count > sizeof(kbuf) - 1) + goto err; + if (copy_from_user(kbuf, buf, count)) + goto err; + kbuf[count] = 0; + + c = strchr(kbuf, ' '); + if (!c) + goto err; + *c = '\0'; + + rc = kstrtouint(kbuf, 10, &dev_entry->mmu_asid); + if (rc) + goto err; + + if (strncmp(c+1, "0x", 2)) + goto err; + rc = kstrtoull(c+3, 16, &dev_entry->mmu_addr); + if (rc) + goto err; + + return count; + +err: + dev_err(hdev->dev, "usage: echo <asid> <0xaddr> > mmu\n"); + + return -EINVAL; +} + +static int engines_show(struct seq_file *s, void *data) +{ + struct hl_debugfs_entry *entry = s->private; + struct hl_dbg_device_entry *dev_entry = entry->dev_entry; + struct hl_device *hdev = dev_entry->hdev; + + if (atomic_read(&hdev->in_reset)) { + dev_warn_ratelimited(hdev->dev, + "Can't check device idle during reset\n"); + return 0; + } + + hdev->asic_funcs->is_device_idle(hdev, NULL, s); + + return 0; +} + +static bool hl_is_device_va(struct hl_device *hdev, u64 addr) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + + if (!hdev->mmu_enable) + goto out; + + if (hdev->dram_supports_virtual_memory && + (addr >= prop->dmmu.start_addr && addr < prop->dmmu.end_addr)) + return true; + + if (addr >= prop->pmmu.start_addr && + addr < prop->pmmu.end_addr) + return true; + + if (addr >= prop->pmmu_huge.start_addr && + addr < prop->pmmu_huge.end_addr) + return true; +out: + return false; +} + +static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr, + u64 *phys_addr) +{ + struct hl_ctx *ctx = hdev->compute_ctx; + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct hl_mmu_properties *mmu_prop; + u64 hop_addr, hop_pte_addr, hop_pte; + u64 offset_mask = HOP4_MASK | FLAGS_MASK; + int rc = 0; + bool is_dram_addr; + + if (!ctx) { + dev_err(hdev->dev, "no ctx available\n"); + return -EINVAL; + } + + is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size, + prop->dmmu.start_addr, + prop->dmmu.end_addr); + + /* shifts and masks are the same in PMMU and HPMMU, use one of them */ + mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu; + + mutex_lock(&ctx->mmu_lock); + + /* hop 0 */ + hop_addr = get_hop0_addr(ctx); + hop_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop_addr, virt_addr); + hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr); + + /* hop 1 */ + hop_addr = get_next_hop_addr(hop_pte); + if (hop_addr == ULLONG_MAX) + goto not_mapped; + hop_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop_addr, virt_addr); + hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr); + + /* hop 2 */ + hop_addr = get_next_hop_addr(hop_pte); + if (hop_addr == ULLONG_MAX) + goto not_mapped; + hop_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop_addr, virt_addr); + hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr); + + /* hop 3 */ + hop_addr = get_next_hop_addr(hop_pte); + if (hop_addr == ULLONG_MAX) + goto not_mapped; + hop_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop_addr, virt_addr); + hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr); + + if (!(hop_pte & LAST_MASK)) { + /* hop 4 */ + hop_addr = get_next_hop_addr(hop_pte); + if (hop_addr == ULLONG_MAX) + goto not_mapped; + hop_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop_addr, + virt_addr); + hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr); + + offset_mask = FLAGS_MASK; + } + + if (!(hop_pte & PAGE_PRESENT_MASK)) + goto not_mapped; + + *phys_addr = (hop_pte & ~offset_mask) | (virt_addr & offset_mask); + + goto out; + +not_mapped: + dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n", + virt_addr); + rc = -EINVAL; +out: + mutex_unlock(&ctx->mmu_lock); + return rc; +} + +static ssize_t hl_data_read32(struct file *f, char __user *buf, + size_t count, loff_t *ppos) +{ + struct hl_dbg_device_entry *entry = file_inode(f)->i_private; + struct hl_device *hdev = entry->hdev; + char tmp_buf[32]; + u64 addr = entry->addr; + u32 val; + ssize_t rc; + + if (atomic_read(&hdev->in_reset)) { + dev_warn_ratelimited(hdev->dev, "Can't read during reset\n"); + return 0; + } + + if (*ppos) + return 0; + + if (hl_is_device_va(hdev, addr)) { + rc = device_va_to_pa(hdev, addr, &addr); + if (rc) + return rc; + } + + rc = hdev->asic_funcs->debugfs_read32(hdev, addr, &val); + if (rc) { + dev_err(hdev->dev, "Failed to read from 0x%010llx\n", addr); + return rc; + } + + sprintf(tmp_buf, "0x%08x\n", val); + return simple_read_from_buffer(buf, count, ppos, tmp_buf, + strlen(tmp_buf)); +} + +static ssize_t hl_data_write32(struct file *f, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct hl_dbg_device_entry *entry = file_inode(f)->i_private; + struct hl_device *hdev = entry->hdev; + u64 addr = entry->addr; + u32 value; + ssize_t rc; + + if (atomic_read(&hdev->in_reset)) { + dev_warn_ratelimited(hdev->dev, "Can't write during reset\n"); + return 0; + } + + rc = kstrtouint_from_user(buf, count, 16, &value); + if (rc) + return rc; + + if (hl_is_device_va(hdev, addr)) { + rc = device_va_to_pa(hdev, addr, &addr); + if (rc) + return rc; + } + + rc = hdev->asic_funcs->debugfs_write32(hdev, addr, value); + if (rc) { + dev_err(hdev->dev, "Failed to write 0x%08x to 0x%010llx\n", + value, addr); + return rc; + } + + return count; +} + +static ssize_t hl_data_read64(struct file *f, char __user *buf, + size_t count, loff_t *ppos) +{ + struct hl_dbg_device_entry *entry = file_inode(f)->i_private; + struct hl_device *hdev = entry->hdev; + char tmp_buf[32]; + u64 addr = entry->addr; + u64 val; + ssize_t rc; + + if (*ppos) + return 0; + + if (hl_is_device_va(hdev, addr)) { + rc = device_va_to_pa(hdev, addr, &addr); + if (rc) + return rc; + } + + rc = hdev->asic_funcs->debugfs_read64(hdev, addr, &val); + if (rc) { + dev_err(hdev->dev, "Failed to read from 0x%010llx\n", addr); + return rc; + } + + sprintf(tmp_buf, "0x%016llx\n", val); + return simple_read_from_buffer(buf, count, ppos, tmp_buf, + strlen(tmp_buf)); +} + +static ssize_t hl_data_write64(struct file *f, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct hl_dbg_device_entry *entry = file_inode(f)->i_private; + struct hl_device *hdev = entry->hdev; + u64 addr = entry->addr; + u64 value; + ssize_t rc; + + rc = kstrtoull_from_user(buf, count, 16, &value); + if (rc) + return rc; + + if (hl_is_device_va(hdev, addr)) { + rc = device_va_to_pa(hdev, addr, &addr); + if (rc) + return rc; + } + + rc = hdev->asic_funcs->debugfs_write64(hdev, addr, value); + if (rc) { + dev_err(hdev->dev, "Failed to write 0x%016llx to 0x%010llx\n", + value, addr); + return rc; + } + + return count; +} + +static ssize_t hl_get_power_state(struct file *f, char __user *buf, + size_t count, loff_t *ppos) +{ + struct hl_dbg_device_entry *entry = file_inode(f)->i_private; + struct hl_device *hdev = entry->hdev; + char tmp_buf[200]; + int i; + + if (*ppos) + return 0; + + if (hdev->pdev->current_state == PCI_D0) + i = 1; + else if (hdev->pdev->current_state == PCI_D3hot) + i = 2; + else + i = 3; + + sprintf(tmp_buf, + "current power state: %d\n1 - D0\n2 - D3hot\n3 - Unknown\n", i); + return simple_read_from_buffer(buf, count, ppos, tmp_buf, + strlen(tmp_buf)); +} + +static ssize_t hl_set_power_state(struct file *f, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct hl_dbg_device_entry *entry = file_inode(f)->i_private; + struct hl_device *hdev = entry->hdev; + u32 value; + ssize_t rc; + + rc = kstrtouint_from_user(buf, count, 10, &value); + if (rc) + return rc; + + if (value == 1) { + pci_set_power_state(hdev->pdev, PCI_D0); + pci_restore_state(hdev->pdev); + rc = pci_enable_device(hdev->pdev); + if (rc < 0) + return rc; + } else if (value == 2) { + pci_save_state(hdev->pdev); + pci_disable_device(hdev->pdev); + pci_set_power_state(hdev->pdev, PCI_D3hot); + } else { + dev_dbg(hdev->dev, "invalid power state value %u\n", value); + return -EINVAL; + } + + return count; +} + +static ssize_t hl_i2c_data_read(struct file *f, char __user *buf, + size_t count, loff_t *ppos) +{ + struct hl_dbg_device_entry *entry = file_inode(f)->i_private; + struct hl_device *hdev = entry->hdev; + char tmp_buf[32]; + long val; + ssize_t rc; + + if (*ppos) + return 0; + + rc = hl_debugfs_i2c_read(hdev, entry->i2c_bus, entry->i2c_addr, + entry->i2c_reg, &val); + if (rc) { + dev_err(hdev->dev, + "Failed to read from I2C bus %d, addr %d, reg %d\n", + entry->i2c_bus, entry->i2c_addr, entry->i2c_reg); + return rc; + } + + sprintf(tmp_buf, "0x%02lx\n", val); + rc = simple_read_from_buffer(buf, count, ppos, tmp_buf, + strlen(tmp_buf)); + + return rc; +} + +static ssize_t hl_i2c_data_write(struct file *f, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct hl_dbg_device_entry *entry = file_inode(f)->i_private; + struct hl_device *hdev = entry->hdev; + u32 value; + ssize_t rc; + + rc = kstrtouint_from_user(buf, count, 16, &value); + if (rc) + return rc; + + rc = hl_debugfs_i2c_write(hdev, entry->i2c_bus, entry->i2c_addr, + entry->i2c_reg, value); + if (rc) { + dev_err(hdev->dev, + "Failed to write 0x%02x to I2C bus %d, addr %d, reg %d\n", + value, entry->i2c_bus, entry->i2c_addr, entry->i2c_reg); + return rc; + } + + return count; +} + +static ssize_t hl_led0_write(struct file *f, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct hl_dbg_device_entry *entry = file_inode(f)->i_private; + struct hl_device *hdev = entry->hdev; + u32 value; + ssize_t rc; + + rc = kstrtouint_from_user(buf, count, 10, &value); + if (rc) + return rc; + + value = value ? 1 : 0; + + hl_debugfs_led_set(hdev, 0, value); + + return count; +} + +static ssize_t hl_led1_write(struct file *f, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct hl_dbg_device_entry *entry = file_inode(f)->i_private; + struct hl_device *hdev = entry->hdev; + u32 value; + ssize_t rc; + + rc = kstrtouint_from_user(buf, count, 10, &value); + if (rc) + return rc; + + value = value ? 1 : 0; + + hl_debugfs_led_set(hdev, 1, value); + + return count; +} + +static ssize_t hl_led2_write(struct file *f, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct hl_dbg_device_entry *entry = file_inode(f)->i_private; + struct hl_device *hdev = entry->hdev; + u32 value; + ssize_t rc; + + rc = kstrtouint_from_user(buf, count, 10, &value); + if (rc) + return rc; + + value = value ? 1 : 0; + + hl_debugfs_led_set(hdev, 2, value); + + return count; +} + +static ssize_t hl_device_read(struct file *f, char __user *buf, + size_t count, loff_t *ppos) +{ + static const char *help = + "Valid values: disable, enable, suspend, resume, cpu_timeout\n"; + return simple_read_from_buffer(buf, count, ppos, help, strlen(help)); +} + +static ssize_t hl_device_write(struct file *f, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct hl_dbg_device_entry *entry = file_inode(f)->i_private; + struct hl_device *hdev = entry->hdev; + char data[30] = {0}; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + + simple_write_to_buffer(data, 29, ppos, buf, count); + + if (strncmp("disable", data, strlen("disable")) == 0) { + hdev->disabled = true; + } else if (strncmp("enable", data, strlen("enable")) == 0) { + hdev->disabled = false; + } else if (strncmp("suspend", data, strlen("suspend")) == 0) { + hdev->asic_funcs->suspend(hdev); + } else if (strncmp("resume", data, strlen("resume")) == 0) { + hdev->asic_funcs->resume(hdev); + } else if (strncmp("cpu_timeout", data, strlen("cpu_timeout")) == 0) { + hdev->device_cpu_disabled = true; + } else { + dev_err(hdev->dev, + "Valid values: disable, enable, suspend, resume, cpu_timeout\n"); + count = -EINVAL; + } + + return count; +} + +static ssize_t hl_clk_gate_read(struct file *f, char __user *buf, + size_t count, loff_t *ppos) +{ + struct hl_dbg_device_entry *entry = file_inode(f)->i_private; + struct hl_device *hdev = entry->hdev; + char tmp_buf[200]; + ssize_t rc; + + if (*ppos) + return 0; + + sprintf(tmp_buf, "0x%llx\n", hdev->clock_gating_mask); + rc = simple_read_from_buffer(buf, count, ppos, tmp_buf, + strlen(tmp_buf) + 1); + + return rc; +} + +static ssize_t hl_clk_gate_write(struct file *f, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct hl_dbg_device_entry *entry = file_inode(f)->i_private; + struct hl_device *hdev = entry->hdev; + u64 value; + ssize_t rc; + + if (atomic_read(&hdev->in_reset)) { + dev_warn_ratelimited(hdev->dev, + "Can't change clock gating during reset\n"); + return 0; + } + + rc = kstrtoull_from_user(buf, count, 16, &value); + if (rc) + return rc; + + hdev->clock_gating_mask = value; + hdev->asic_funcs->set_clock_gating(hdev); + + return count; +} + +static ssize_t hl_stop_on_err_read(struct file *f, char __user *buf, + size_t count, loff_t *ppos) +{ + struct hl_dbg_device_entry *entry = file_inode(f)->i_private; + struct hl_device *hdev = entry->hdev; + char tmp_buf[200]; + ssize_t rc; + + if (*ppos) + return 0; + + sprintf(tmp_buf, "%d\n", hdev->stop_on_err); + rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf, + strlen(tmp_buf) + 1); + + return rc; +} + +static ssize_t hl_stop_on_err_write(struct file *f, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct hl_dbg_device_entry *entry = file_inode(f)->i_private; + struct hl_device *hdev = entry->hdev; + u32 value; + ssize_t rc; + + if (atomic_read(&hdev->in_reset)) { + dev_warn_ratelimited(hdev->dev, + "Can't change stop on error during reset\n"); + return 0; + } + + rc = kstrtouint_from_user(buf, count, 10, &value); + if (rc) + return rc; + + hdev->stop_on_err = value ? 1 : 0; + + hl_device_reset(hdev, false, false); + + return count; +} + +static const struct file_operations hl_data32b_fops = { + .owner = THIS_MODULE, + .read = hl_data_read32, + .write = hl_data_write32 +}; + +static const struct file_operations hl_data64b_fops = { + .owner = THIS_MODULE, + .read = hl_data_read64, + .write = hl_data_write64 +}; + +static const struct file_operations hl_i2c_data_fops = { + .owner = THIS_MODULE, + .read = hl_i2c_data_read, + .write = hl_i2c_data_write +}; + +static const struct file_operations hl_power_fops = { + .owner = THIS_MODULE, + .read = hl_get_power_state, + .write = hl_set_power_state +}; + +static const struct file_operations hl_led0_fops = { + .owner = THIS_MODULE, + .write = hl_led0_write +}; + +static const struct file_operations hl_led1_fops = { + .owner = THIS_MODULE, + .write = hl_led1_write +}; + +static const struct file_operations hl_led2_fops = { + .owner = THIS_MODULE, + .write = hl_led2_write +}; + +static const struct file_operations hl_device_fops = { + .owner = THIS_MODULE, + .read = hl_device_read, + .write = hl_device_write +}; + +static const struct file_operations hl_clk_gate_fops = { + .owner = THIS_MODULE, + .read = hl_clk_gate_read, + .write = hl_clk_gate_write +}; + +static const struct file_operations hl_stop_on_err_fops = { + .owner = THIS_MODULE, + .read = hl_stop_on_err_read, + .write = hl_stop_on_err_write +}; + +static const struct hl_info_list hl_debugfs_list[] = { + {"command_buffers", command_buffers_show, NULL}, + {"command_submission", command_submission_show, NULL}, + {"command_submission_jobs", command_submission_jobs_show, NULL}, + {"userptr", userptr_show, NULL}, + {"vm", vm_show, NULL}, + {"mmu", mmu_show, mmu_asid_va_write}, + {"engines", engines_show, NULL} +}; + +static int hl_debugfs_open(struct inode *inode, struct file *file) +{ + struct hl_debugfs_entry *node = inode->i_private; + + return single_open(file, node->info_ent->show, node); +} + +static ssize_t hl_debugfs_write(struct file *file, const char __user *buf, + size_t count, loff_t *f_pos) +{ + struct hl_debugfs_entry *node = file->f_inode->i_private; + + if (node->info_ent->write) + return node->info_ent->write(file, buf, count, f_pos); + else + return -EINVAL; + +} + +static const struct file_operations hl_debugfs_fops = { + .owner = THIS_MODULE, + .open = hl_debugfs_open, + .read = seq_read, + .write = hl_debugfs_write, + .llseek = seq_lseek, + .release = single_release, +}; + +void hl_debugfs_add_device(struct hl_device *hdev) +{ + struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs; + int count = ARRAY_SIZE(hl_debugfs_list); + struct hl_debugfs_entry *entry; + struct dentry *ent; + int i; + + dev_entry->hdev = hdev; + dev_entry->entry_arr = kmalloc_array(count, + sizeof(struct hl_debugfs_entry), + GFP_KERNEL); + if (!dev_entry->entry_arr) + return; + + INIT_LIST_HEAD(&dev_entry->file_list); + INIT_LIST_HEAD(&dev_entry->cb_list); + INIT_LIST_HEAD(&dev_entry->cs_list); + INIT_LIST_HEAD(&dev_entry->cs_job_list); + INIT_LIST_HEAD(&dev_entry->userptr_list); + INIT_LIST_HEAD(&dev_entry->ctx_mem_hash_list); + mutex_init(&dev_entry->file_mutex); + spin_lock_init(&dev_entry->cb_spinlock); + spin_lock_init(&dev_entry->cs_spinlock); + spin_lock_init(&dev_entry->cs_job_spinlock); + spin_lock_init(&dev_entry->userptr_spinlock); + spin_lock_init(&dev_entry->ctx_mem_hash_spinlock); + + dev_entry->root = debugfs_create_dir(dev_name(hdev->dev), + hl_debug_root); + + debugfs_create_x64("addr", + 0644, + dev_entry->root, + &dev_entry->addr); + + debugfs_create_file("data32", + 0644, + dev_entry->root, + dev_entry, + &hl_data32b_fops); + + debugfs_create_file("data64", + 0644, + dev_entry->root, + dev_entry, + &hl_data64b_fops); + + debugfs_create_file("set_power_state", + 0200, + dev_entry->root, + dev_entry, + &hl_power_fops); + + debugfs_create_u8("i2c_bus", + 0644, + dev_entry->root, + &dev_entry->i2c_bus); + + debugfs_create_u8("i2c_addr", + 0644, + dev_entry->root, + &dev_entry->i2c_addr); + + debugfs_create_u8("i2c_reg", + 0644, + dev_entry->root, + &dev_entry->i2c_reg); + + debugfs_create_file("i2c_data", + 0644, + dev_entry->root, + dev_entry, + &hl_i2c_data_fops); + + debugfs_create_file("led0", + 0200, + dev_entry->root, + dev_entry, + &hl_led0_fops); + + debugfs_create_file("led1", + 0200, + dev_entry->root, + dev_entry, + &hl_led1_fops); + + debugfs_create_file("led2", + 0200, + dev_entry->root, + dev_entry, + &hl_led2_fops); + + debugfs_create_file("device", + 0200, + dev_entry->root, + dev_entry, + &hl_device_fops); + + debugfs_create_file("clk_gate", + 0200, + dev_entry->root, + dev_entry, + &hl_clk_gate_fops); + + debugfs_create_file("stop_on_err", + 0644, + dev_entry->root, + dev_entry, + &hl_stop_on_err_fops); + + for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) { + + ent = debugfs_create_file(hl_debugfs_list[i].name, + 0444, + dev_entry->root, + entry, + &hl_debugfs_fops); + entry->dent = ent; + entry->info_ent = &hl_debugfs_list[i]; + entry->dev_entry = dev_entry; + } +} + +void hl_debugfs_remove_device(struct hl_device *hdev) +{ + struct hl_dbg_device_entry *entry = &hdev->hl_debugfs; + + debugfs_remove_recursive(entry->root); + + mutex_destroy(&entry->file_mutex); + kfree(entry->entry_arr); +} + +void hl_debugfs_add_file(struct hl_fpriv *hpriv) +{ + struct hl_dbg_device_entry *dev_entry = &hpriv->hdev->hl_debugfs; + + mutex_lock(&dev_entry->file_mutex); + list_add(&hpriv->debugfs_list, &dev_entry->file_list); + mutex_unlock(&dev_entry->file_mutex); +} + +void hl_debugfs_remove_file(struct hl_fpriv *hpriv) +{ + struct hl_dbg_device_entry *dev_entry = &hpriv->hdev->hl_debugfs; + + mutex_lock(&dev_entry->file_mutex); + list_del(&hpriv->debugfs_list); + mutex_unlock(&dev_entry->file_mutex); +} + +void hl_debugfs_add_cb(struct hl_cb *cb) +{ + struct hl_dbg_device_entry *dev_entry = &cb->hdev->hl_debugfs; + + spin_lock(&dev_entry->cb_spinlock); + list_add(&cb->debugfs_list, &dev_entry->cb_list); + spin_unlock(&dev_entry->cb_spinlock); +} + +void hl_debugfs_remove_cb(struct hl_cb *cb) +{ + struct hl_dbg_device_entry *dev_entry = &cb->hdev->hl_debugfs; + + spin_lock(&dev_entry->cb_spinlock); + list_del(&cb->debugfs_list); + spin_unlock(&dev_entry->cb_spinlock); +} + +void hl_debugfs_add_cs(struct hl_cs *cs) +{ + struct hl_dbg_device_entry *dev_entry = &cs->ctx->hdev->hl_debugfs; + + spin_lock(&dev_entry->cs_spinlock); + list_add(&cs->debugfs_list, &dev_entry->cs_list); + spin_unlock(&dev_entry->cs_spinlock); +} + +void hl_debugfs_remove_cs(struct hl_cs *cs) +{ + struct hl_dbg_device_entry *dev_entry = &cs->ctx->hdev->hl_debugfs; + + spin_lock(&dev_entry->cs_spinlock); + list_del(&cs->debugfs_list); + spin_unlock(&dev_entry->cs_spinlock); +} + +void hl_debugfs_add_job(struct hl_device *hdev, struct hl_cs_job *job) +{ + struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs; + + spin_lock(&dev_entry->cs_job_spinlock); + list_add(&job->debugfs_list, &dev_entry->cs_job_list); + spin_unlock(&dev_entry->cs_job_spinlock); +} + +void hl_debugfs_remove_job(struct hl_device *hdev, struct hl_cs_job *job) +{ + struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs; + + spin_lock(&dev_entry->cs_job_spinlock); + list_del(&job->debugfs_list); + spin_unlock(&dev_entry->cs_job_spinlock); +} + +void hl_debugfs_add_userptr(struct hl_device *hdev, struct hl_userptr *userptr) +{ + struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs; + + spin_lock(&dev_entry->userptr_spinlock); + list_add(&userptr->debugfs_list, &dev_entry->userptr_list); + spin_unlock(&dev_entry->userptr_spinlock); +} + +void hl_debugfs_remove_userptr(struct hl_device *hdev, + struct hl_userptr *userptr) +{ + struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs; + + spin_lock(&dev_entry->userptr_spinlock); + list_del(&userptr->debugfs_list); + spin_unlock(&dev_entry->userptr_spinlock); +} + +void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx) +{ + struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs; + + spin_lock(&dev_entry->ctx_mem_hash_spinlock); + list_add(&ctx->debugfs_list, &dev_entry->ctx_mem_hash_list); + spin_unlock(&dev_entry->ctx_mem_hash_spinlock); +} + +void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx) +{ + struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs; + + spin_lock(&dev_entry->ctx_mem_hash_spinlock); + list_del(&ctx->debugfs_list); + spin_unlock(&dev_entry->ctx_mem_hash_spinlock); +} + +void __init hl_debugfs_init(void) +{ + hl_debug_root = debugfs_create_dir("habanalabs", NULL); +} + +void hl_debugfs_fini(void) +{ + debugfs_remove_recursive(hl_debug_root); +} diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c new file mode 100644 index 000000000..7b0bf4707 --- /dev/null +++ b/drivers/misc/habanalabs/common/device.c @@ -0,0 +1,1541 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#define pr_fmt(fmt) "habanalabs: " fmt + +#include "habanalabs.h" + +#include <linux/pci.h> +#include <linux/sched/signal.h> +#include <linux/hwmon.h> +#include <uapi/misc/habanalabs.h> + +#define HL_PLDM_PENDING_RESET_PER_SEC (HL_PENDING_RESET_PER_SEC * 10) + +bool hl_device_disabled_or_in_reset(struct hl_device *hdev) +{ + if ((hdev->disabled) || (atomic_read(&hdev->in_reset))) + return true; + else + return false; +} + +enum hl_device_status hl_device_status(struct hl_device *hdev) +{ + enum hl_device_status status; + + if (hdev->disabled) + status = HL_DEVICE_STATUS_MALFUNCTION; + else if (atomic_read(&hdev->in_reset)) + status = HL_DEVICE_STATUS_IN_RESET; + else + status = HL_DEVICE_STATUS_OPERATIONAL; + + return status; +} + +static void hpriv_release(struct kref *ref) +{ + struct hl_fpriv *hpriv; + struct hl_device *hdev; + + hpriv = container_of(ref, struct hl_fpriv, refcount); + + hdev = hpriv->hdev; + + put_pid(hpriv->taskpid); + + hl_debugfs_remove_file(hpriv); + + mutex_destroy(&hpriv->restore_phase_mutex); + + mutex_lock(&hdev->fpriv_list_lock); + list_del(&hpriv->dev_node); + hdev->compute_ctx = NULL; + mutex_unlock(&hdev->fpriv_list_lock); + + kfree(hpriv); +} + +void hl_hpriv_get(struct hl_fpriv *hpriv) +{ + kref_get(&hpriv->refcount); +} + +void hl_hpriv_put(struct hl_fpriv *hpriv) +{ + kref_put(&hpriv->refcount, hpriv_release); +} + +/* + * hl_device_release - release function for habanalabs device + * + * @inode: pointer to inode structure + * @filp: pointer to file structure + * + * Called when process closes an habanalabs device + */ +static int hl_device_release(struct inode *inode, struct file *filp) +{ + struct hl_fpriv *hpriv = filp->private_data; + + hl_cb_mgr_fini(hpriv->hdev, &hpriv->cb_mgr); + hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr); + + filp->private_data = NULL; + + hl_hpriv_put(hpriv); + + return 0; +} + +static int hl_device_release_ctrl(struct inode *inode, struct file *filp) +{ + struct hl_fpriv *hpriv = filp->private_data; + struct hl_device *hdev; + + filp->private_data = NULL; + + hdev = hpriv->hdev; + + mutex_lock(&hdev->fpriv_list_lock); + list_del(&hpriv->dev_node); + mutex_unlock(&hdev->fpriv_list_lock); + + put_pid(hpriv->taskpid); + + kfree(hpriv); + + return 0; +} + +/* + * hl_mmap - mmap function for habanalabs device + * + * @*filp: pointer to file structure + * @*vma: pointer to vm_area_struct of the process + * + * Called when process does an mmap on habanalabs device. Call the device's mmap + * function at the end of the common code. + */ +static int hl_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct hl_fpriv *hpriv = filp->private_data; + unsigned long vm_pgoff; + + vm_pgoff = vma->vm_pgoff; + vma->vm_pgoff = HL_MMAP_OFFSET_VALUE_GET(vm_pgoff); + + switch (vm_pgoff & HL_MMAP_TYPE_MASK) { + case HL_MMAP_TYPE_CB: + return hl_cb_mmap(hpriv, vma); + } + + return -EINVAL; +} + +static const struct file_operations hl_ops = { + .owner = THIS_MODULE, + .open = hl_device_open, + .release = hl_device_release, + .mmap = hl_mmap, + .unlocked_ioctl = hl_ioctl, + .compat_ioctl = hl_ioctl +}; + +static const struct file_operations hl_ctrl_ops = { + .owner = THIS_MODULE, + .open = hl_device_open_ctrl, + .release = hl_device_release_ctrl, + .unlocked_ioctl = hl_ioctl_control, + .compat_ioctl = hl_ioctl_control +}; + +static void device_release_func(struct device *dev) +{ + kfree(dev); +} + +/* + * device_init_cdev - Initialize cdev and device for habanalabs device + * + * @hdev: pointer to habanalabs device structure + * @hclass: pointer to the class object of the device + * @minor: minor number of the specific device + * @fpos: file operations to install for this device + * @name: name of the device as it will appear in the filesystem + * @cdev: pointer to the char device object that will be initialized + * @dev: pointer to the device object that will be initialized + * + * Initialize a cdev and a Linux device for habanalabs's device. + */ +static int device_init_cdev(struct hl_device *hdev, struct class *hclass, + int minor, const struct file_operations *fops, + char *name, struct cdev *cdev, + struct device **dev) +{ + cdev_init(cdev, fops); + cdev->owner = THIS_MODULE; + + *dev = kzalloc(sizeof(**dev), GFP_KERNEL); + if (!*dev) + return -ENOMEM; + + device_initialize(*dev); + (*dev)->devt = MKDEV(hdev->major, minor); + (*dev)->class = hclass; + (*dev)->release = device_release_func; + dev_set_drvdata(*dev, hdev); + dev_set_name(*dev, "%s", name); + + return 0; +} + +static int device_cdev_sysfs_add(struct hl_device *hdev) +{ + int rc; + + rc = cdev_device_add(&hdev->cdev, hdev->dev); + if (rc) { + dev_err(hdev->dev, + "failed to add a char device to the system\n"); + return rc; + } + + rc = cdev_device_add(&hdev->cdev_ctrl, hdev->dev_ctrl); + if (rc) { + dev_err(hdev->dev, + "failed to add a control char device to the system\n"); + goto delete_cdev_device; + } + + /* hl_sysfs_init() must be done after adding the device to the system */ + rc = hl_sysfs_init(hdev); + if (rc) { + dev_err(hdev->dev, "failed to initialize sysfs\n"); + goto delete_ctrl_cdev_device; + } + + hdev->cdev_sysfs_created = true; + + return 0; + +delete_ctrl_cdev_device: + cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl); +delete_cdev_device: + cdev_device_del(&hdev->cdev, hdev->dev); + return rc; +} + +static void device_cdev_sysfs_del(struct hl_device *hdev) +{ + if (!hdev->cdev_sysfs_created) + goto put_devices; + + hl_sysfs_fini(hdev); + cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl); + cdev_device_del(&hdev->cdev, hdev->dev); + +put_devices: + put_device(hdev->dev); + put_device(hdev->dev_ctrl); +} + +/* + * device_early_init - do some early initialization for the habanalabs device + * + * @hdev: pointer to habanalabs device structure + * + * Install the relevant function pointers and call the early_init function, + * if such a function exists + */ +static int device_early_init(struct hl_device *hdev) +{ + int i, rc; + char workq_name[32]; + + switch (hdev->asic_type) { + case ASIC_GOYA: + goya_set_asic_funcs(hdev); + strlcpy(hdev->asic_name, "GOYA", sizeof(hdev->asic_name)); + break; + case ASIC_GAUDI: + gaudi_set_asic_funcs(hdev); + sprintf(hdev->asic_name, "GAUDI"); + break; + default: + dev_err(hdev->dev, "Unrecognized ASIC type %d\n", + hdev->asic_type); + return -EINVAL; + } + + rc = hdev->asic_funcs->early_init(hdev); + if (rc) + return rc; + + rc = hl_asid_init(hdev); + if (rc) + goto early_fini; + + if (hdev->asic_prop.completion_queues_count) { + hdev->cq_wq = kcalloc(hdev->asic_prop.completion_queues_count, + sizeof(*hdev->cq_wq), + GFP_ATOMIC); + if (!hdev->cq_wq) { + rc = -ENOMEM; + goto asid_fini; + } + } + + for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) { + snprintf(workq_name, 32, "hl-free-jobs-%u", (u32) i); + hdev->cq_wq[i] = create_singlethread_workqueue(workq_name); + if (hdev->cq_wq[i] == NULL) { + dev_err(hdev->dev, "Failed to allocate CQ workqueue\n"); + rc = -ENOMEM; + goto free_cq_wq; + } + } + + hdev->eq_wq = alloc_workqueue("hl-events", WQ_UNBOUND, 0); + if (hdev->eq_wq == NULL) { + dev_err(hdev->dev, "Failed to allocate EQ workqueue\n"); + rc = -ENOMEM; + goto free_cq_wq; + } + + hdev->hl_chip_info = kzalloc(sizeof(struct hwmon_chip_info), + GFP_KERNEL); + if (!hdev->hl_chip_info) { + rc = -ENOMEM; + goto free_eq_wq; + } + + hdev->idle_busy_ts_arr = kmalloc_array(HL_IDLE_BUSY_TS_ARR_SIZE, + sizeof(struct hl_device_idle_busy_ts), + (GFP_KERNEL | __GFP_ZERO)); + if (!hdev->idle_busy_ts_arr) { + rc = -ENOMEM; + goto free_chip_info; + } + + rc = hl_mmu_if_set_funcs(hdev); + if (rc) + goto free_idle_busy_ts_arr; + + hl_cb_mgr_init(&hdev->kernel_cb_mgr); + + mutex_init(&hdev->send_cpu_message_lock); + mutex_init(&hdev->debug_lock); + mutex_init(&hdev->mmu_cache_lock); + INIT_LIST_HEAD(&hdev->hw_queues_mirror_list); + spin_lock_init(&hdev->hw_queues_mirror_lock); + INIT_LIST_HEAD(&hdev->fpriv_list); + mutex_init(&hdev->fpriv_list_lock); + atomic_set(&hdev->in_reset, 0); + + return 0; + +free_idle_busy_ts_arr: + kfree(hdev->idle_busy_ts_arr); +free_chip_info: + kfree(hdev->hl_chip_info); +free_eq_wq: + destroy_workqueue(hdev->eq_wq); +free_cq_wq: + for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) + if (hdev->cq_wq[i]) + destroy_workqueue(hdev->cq_wq[i]); + kfree(hdev->cq_wq); +asid_fini: + hl_asid_fini(hdev); +early_fini: + if (hdev->asic_funcs->early_fini) + hdev->asic_funcs->early_fini(hdev); + + return rc; +} + +/* + * device_early_fini - finalize all that was done in device_early_init + * + * @hdev: pointer to habanalabs device structure + * + */ +static void device_early_fini(struct hl_device *hdev) +{ + int i; + + mutex_destroy(&hdev->mmu_cache_lock); + mutex_destroy(&hdev->debug_lock); + mutex_destroy(&hdev->send_cpu_message_lock); + + mutex_destroy(&hdev->fpriv_list_lock); + + hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr); + + kfree(hdev->idle_busy_ts_arr); + kfree(hdev->hl_chip_info); + + destroy_workqueue(hdev->eq_wq); + + for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) + destroy_workqueue(hdev->cq_wq[i]); + kfree(hdev->cq_wq); + + hl_asid_fini(hdev); + + if (hdev->asic_funcs->early_fini) + hdev->asic_funcs->early_fini(hdev); +} + +static void set_freq_to_low_job(struct work_struct *work) +{ + struct hl_device *hdev = container_of(work, struct hl_device, + work_freq.work); + + mutex_lock(&hdev->fpriv_list_lock); + + if (!hdev->compute_ctx) + hl_device_set_frequency(hdev, PLL_LOW); + + mutex_unlock(&hdev->fpriv_list_lock); + + schedule_delayed_work(&hdev->work_freq, + usecs_to_jiffies(HL_PLL_LOW_JOB_FREQ_USEC)); +} + +static void hl_device_heartbeat(struct work_struct *work) +{ + struct hl_device *hdev = container_of(work, struct hl_device, + work_heartbeat.work); + + if (hl_device_disabled_or_in_reset(hdev)) + goto reschedule; + + if (!hdev->asic_funcs->send_heartbeat(hdev)) + goto reschedule; + + dev_err(hdev->dev, "Device heartbeat failed!\n"); + hl_device_reset(hdev, true, false); + + return; + +reschedule: + schedule_delayed_work(&hdev->work_heartbeat, + usecs_to_jiffies(HL_HEARTBEAT_PER_USEC)); +} + +/* + * device_late_init - do late stuff initialization for the habanalabs device + * + * @hdev: pointer to habanalabs device structure + * + * Do stuff that either needs the device H/W queues to be active or needs + * to happen after all the rest of the initialization is finished + */ +static int device_late_init(struct hl_device *hdev) +{ + int rc; + + if (hdev->asic_funcs->late_init) { + rc = hdev->asic_funcs->late_init(hdev); + if (rc) { + dev_err(hdev->dev, + "failed late initialization for the H/W\n"); + return rc; + } + } + + hdev->high_pll = hdev->asic_prop.high_pll; + + /* force setting to low frequency */ + hdev->curr_pll_profile = PLL_LOW; + + if (hdev->pm_mng_profile == PM_AUTO) + hdev->asic_funcs->set_pll_profile(hdev, PLL_LOW); + else + hdev->asic_funcs->set_pll_profile(hdev, PLL_LAST); + + INIT_DELAYED_WORK(&hdev->work_freq, set_freq_to_low_job); + schedule_delayed_work(&hdev->work_freq, + usecs_to_jiffies(HL_PLL_LOW_JOB_FREQ_USEC)); + + if (hdev->heartbeat) { + INIT_DELAYED_WORK(&hdev->work_heartbeat, hl_device_heartbeat); + schedule_delayed_work(&hdev->work_heartbeat, + usecs_to_jiffies(HL_HEARTBEAT_PER_USEC)); + } + + hdev->late_init_done = true; + + return 0; +} + +/* + * device_late_fini - finalize all that was done in device_late_init + * + * @hdev: pointer to habanalabs device structure + * + */ +static void device_late_fini(struct hl_device *hdev) +{ + if (!hdev->late_init_done) + return; + + cancel_delayed_work_sync(&hdev->work_freq); + if (hdev->heartbeat) + cancel_delayed_work_sync(&hdev->work_heartbeat); + + if (hdev->asic_funcs->late_fini) + hdev->asic_funcs->late_fini(hdev); + + hdev->late_init_done = false; +} + +uint32_t hl_device_utilization(struct hl_device *hdev, uint32_t period_ms) +{ + struct hl_device_idle_busy_ts *ts; + ktime_t zero_ktime, curr = ktime_get(); + u32 overlap_cnt = 0, last_index = hdev->idle_busy_ts_idx; + s64 period_us, last_start_us, last_end_us, last_busy_time_us, + total_busy_time_us = 0, total_busy_time_ms; + + zero_ktime = ktime_set(0, 0); + period_us = period_ms * USEC_PER_MSEC; + ts = &hdev->idle_busy_ts_arr[last_index]; + + /* check case that device is currently in idle */ + if (!ktime_compare(ts->busy_to_idle_ts, zero_ktime) && + !ktime_compare(ts->idle_to_busy_ts, zero_ktime)) { + + last_index--; + /* Handle case idle_busy_ts_idx was 0 */ + if (last_index > HL_IDLE_BUSY_TS_ARR_SIZE) + last_index = HL_IDLE_BUSY_TS_ARR_SIZE - 1; + + ts = &hdev->idle_busy_ts_arr[last_index]; + } + + while (overlap_cnt < HL_IDLE_BUSY_TS_ARR_SIZE) { + /* Check if we are in last sample case. i.e. if the sample + * begun before the sampling period. This could be a real + * sample or 0 so need to handle both cases + */ + last_start_us = ktime_to_us( + ktime_sub(curr, ts->idle_to_busy_ts)); + + if (last_start_us > period_us) { + + /* First check two cases: + * 1. If the device is currently busy + * 2. If the device was idle during the whole sampling + * period + */ + + if (!ktime_compare(ts->busy_to_idle_ts, zero_ktime)) { + /* Check if the device is currently busy */ + if (ktime_compare(ts->idle_to_busy_ts, + zero_ktime)) + return 100; + + /* We either didn't have any activity or we + * reached an entry which is 0. Either way, + * exit and return what was accumulated so far + */ + break; + } + + /* If sample has finished, check it is relevant */ + last_end_us = ktime_to_us( + ktime_sub(curr, ts->busy_to_idle_ts)); + + if (last_end_us > period_us) + break; + + /* It is relevant so add it but with adjustment */ + last_busy_time_us = ktime_to_us( + ktime_sub(ts->busy_to_idle_ts, + ts->idle_to_busy_ts)); + total_busy_time_us += last_busy_time_us - + (last_start_us - period_us); + break; + } + + /* Check if the sample is finished or still open */ + if (ktime_compare(ts->busy_to_idle_ts, zero_ktime)) + last_busy_time_us = ktime_to_us( + ktime_sub(ts->busy_to_idle_ts, + ts->idle_to_busy_ts)); + else + last_busy_time_us = ktime_to_us( + ktime_sub(curr, ts->idle_to_busy_ts)); + + total_busy_time_us += last_busy_time_us; + + last_index--; + /* Handle case idle_busy_ts_idx was 0 */ + if (last_index > HL_IDLE_BUSY_TS_ARR_SIZE) + last_index = HL_IDLE_BUSY_TS_ARR_SIZE - 1; + + ts = &hdev->idle_busy_ts_arr[last_index]; + + overlap_cnt++; + } + + total_busy_time_ms = DIV_ROUND_UP_ULL(total_busy_time_us, + USEC_PER_MSEC); + + return DIV_ROUND_UP_ULL(total_busy_time_ms * 100, period_ms); +} + +/* + * hl_device_set_frequency - set the frequency of the device + * + * @hdev: pointer to habanalabs device structure + * @freq: the new frequency value + * + * Change the frequency if needed. This function has no protection against + * concurrency, therefore it is assumed that the calling function has protected + * itself against the case of calling this function from multiple threads with + * different values + * + * Returns 0 if no change was done, otherwise returns 1 + */ +int hl_device_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq) +{ + if ((hdev->pm_mng_profile == PM_MANUAL) || + (hdev->curr_pll_profile == freq)) + return 0; + + dev_dbg(hdev->dev, "Changing device frequency to %s\n", + freq == PLL_HIGH ? "high" : "low"); + + hdev->asic_funcs->set_pll_profile(hdev, freq); + + hdev->curr_pll_profile = freq; + + return 1; +} + +int hl_device_set_debug_mode(struct hl_device *hdev, bool enable) +{ + int rc = 0; + + mutex_lock(&hdev->debug_lock); + + if (!enable) { + if (!hdev->in_debug) { + dev_err(hdev->dev, + "Failed to disable debug mode because device was not in debug mode\n"); + rc = -EFAULT; + goto out; + } + + if (!hdev->hard_reset_pending) + hdev->asic_funcs->halt_coresight(hdev); + + hdev->in_debug = 0; + + if (!hdev->hard_reset_pending) + hdev->asic_funcs->set_clock_gating(hdev); + + goto out; + } + + if (hdev->in_debug) { + dev_err(hdev->dev, + "Failed to enable debug mode because device is already in debug mode\n"); + rc = -EFAULT; + goto out; + } + + hdev->asic_funcs->disable_clock_gating(hdev); + hdev->in_debug = 1; + +out: + mutex_unlock(&hdev->debug_lock); + + return rc; +} + +/* + * hl_device_suspend - initiate device suspend + * + * @hdev: pointer to habanalabs device structure + * + * Puts the hw in the suspend state (all asics). + * Returns 0 for success or an error on failure. + * Called at driver suspend. + */ +int hl_device_suspend(struct hl_device *hdev) +{ + int rc; + + pci_save_state(hdev->pdev); + + /* Block future CS/VM/JOB completion operations */ + rc = atomic_cmpxchg(&hdev->in_reset, 0, 1); + if (rc) { + dev_err(hdev->dev, "Can't suspend while in reset\n"); + return -EIO; + } + + /* This blocks all other stuff that is not blocked by in_reset */ + hdev->disabled = true; + + /* + * Flush anyone that is inside the critical section of enqueue + * jobs to the H/W + */ + hdev->asic_funcs->hw_queues_lock(hdev); + hdev->asic_funcs->hw_queues_unlock(hdev); + + /* Flush processes that are sending message to CPU */ + mutex_lock(&hdev->send_cpu_message_lock); + mutex_unlock(&hdev->send_cpu_message_lock); + + rc = hdev->asic_funcs->suspend(hdev); + if (rc) + dev_err(hdev->dev, + "Failed to disable PCI access of device CPU\n"); + + /* Shut down the device */ + pci_disable_device(hdev->pdev); + pci_set_power_state(hdev->pdev, PCI_D3hot); + + return 0; +} + +/* + * hl_device_resume - initiate device resume + * + * @hdev: pointer to habanalabs device structure + * + * Bring the hw back to operating state (all asics). + * Returns 0 for success or an error on failure. + * Called at driver resume. + */ +int hl_device_resume(struct hl_device *hdev) +{ + int rc; + + pci_set_power_state(hdev->pdev, PCI_D0); + pci_restore_state(hdev->pdev); + rc = pci_enable_device_mem(hdev->pdev); + if (rc) { + dev_err(hdev->dev, + "Failed to enable PCI device in resume\n"); + return rc; + } + + pci_set_master(hdev->pdev); + + rc = hdev->asic_funcs->resume(hdev); + if (rc) { + dev_err(hdev->dev, "Failed to resume device after suspend\n"); + goto disable_device; + } + + + hdev->disabled = false; + atomic_set(&hdev->in_reset, 0); + + rc = hl_device_reset(hdev, true, false); + if (rc) { + dev_err(hdev->dev, "Failed to reset device during resume\n"); + goto disable_device; + } + + return 0; + +disable_device: + pci_clear_master(hdev->pdev); + pci_disable_device(hdev->pdev); + + return rc; +} + +static int device_kill_open_processes(struct hl_device *hdev) +{ + u16 pending_total, pending_cnt; + struct hl_fpriv *hpriv; + struct task_struct *task = NULL; + + if (hdev->pldm) + pending_total = HL_PLDM_PENDING_RESET_PER_SEC; + else + pending_total = HL_PENDING_RESET_PER_SEC; + + /* Giving time for user to close FD, and for processes that are inside + * hl_device_open to finish + */ + if (!list_empty(&hdev->fpriv_list)) + ssleep(1); + + mutex_lock(&hdev->fpriv_list_lock); + + /* This section must be protected because we are dereferencing + * pointers that are freed if the process exits + */ + list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node) { + task = get_pid_task(hpriv->taskpid, PIDTYPE_PID); + if (task) { + dev_info(hdev->dev, "Killing user process pid=%d\n", + task_pid_nr(task)); + send_sig(SIGKILL, task, 1); + usleep_range(1000, 10000); + + put_task_struct(task); + } + } + + mutex_unlock(&hdev->fpriv_list_lock); + + /* We killed the open users, but because the driver cleans up after the + * user contexts are closed (e.g. mmu mappings), we need to wait again + * to make sure the cleaning phase is finished before continuing with + * the reset + */ + + pending_cnt = pending_total; + + while ((!list_empty(&hdev->fpriv_list)) && (pending_cnt)) { + dev_info(hdev->dev, + "Waiting for all unmap operations to finish before hard reset\n"); + + pending_cnt--; + + ssleep(1); + } + + return list_empty(&hdev->fpriv_list) ? 0 : -EBUSY; +} + +static void device_hard_reset_pending(struct work_struct *work) +{ + struct hl_device_reset_work *device_reset_work = + container_of(work, struct hl_device_reset_work, reset_work); + struct hl_device *hdev = device_reset_work->hdev; + + hl_device_reset(hdev, true, true); + + kfree(device_reset_work); +} + +/* + * hl_device_reset - reset the device + * + * @hdev: pointer to habanalabs device structure + * @hard_reset: should we do hard reset to all engines or just reset the + * compute/dma engines + * @from_hard_reset_thread: is the caller the hard-reset thread + * + * Block future CS and wait for pending CS to be enqueued + * Call ASIC H/W fini + * Flush all completions + * Re-initialize all internal data structures + * Call ASIC H/W init, late_init + * Test queues + * Enable device + * + * Returns 0 for success or an error on failure. + */ +int hl_device_reset(struct hl_device *hdev, bool hard_reset, + bool from_hard_reset_thread) +{ + int i, rc; + + if (!hdev->init_done) { + dev_err(hdev->dev, + "Can't reset before initialization is done\n"); + return 0; + } + + if ((!hard_reset) && (!hdev->supports_soft_reset)) { + dev_dbg(hdev->dev, "Doing hard-reset instead of soft-reset\n"); + hard_reset = true; + } + + /* + * Prevent concurrency in this function - only one reset should be + * done at any given time. Only need to perform this if we didn't + * get from the dedicated hard reset thread + */ + if (!from_hard_reset_thread) { + /* Block future CS/VM/JOB completion operations */ + rc = atomic_cmpxchg(&hdev->in_reset, 0, 1); + if (rc) + return 0; + + if (hard_reset) { + /* Disable PCI access from device F/W so he won't send + * us additional interrupts. We disable MSI/MSI-X at + * the halt_engines function and we can't have the F/W + * sending us interrupts after that. We need to disable + * the access here because if the device is marked + * disable, the message won't be send. Also, in case + * of heartbeat, the device CPU is marked as disable + * so this message won't be sent + */ + if (hl_fw_send_pci_access_msg(hdev, + CPUCP_PACKET_DISABLE_PCI_ACCESS)) + dev_warn(hdev->dev, + "Failed to disable PCI access by F/W\n"); + } + + /* This also blocks future CS/VM/JOB completion operations */ + hdev->disabled = true; + + /* Flush anyone that is inside the critical section of enqueue + * jobs to the H/W + */ + hdev->asic_funcs->hw_queues_lock(hdev); + hdev->asic_funcs->hw_queues_unlock(hdev); + + /* Flush anyone that is inside device open */ + mutex_lock(&hdev->fpriv_list_lock); + mutex_unlock(&hdev->fpriv_list_lock); + + dev_err(hdev->dev, "Going to RESET device!\n"); + } + +again: + if ((hard_reset) && (!from_hard_reset_thread)) { + struct hl_device_reset_work *device_reset_work; + + hdev->hard_reset_pending = true; + + device_reset_work = kzalloc(sizeof(*device_reset_work), + GFP_ATOMIC); + if (!device_reset_work) { + rc = -ENOMEM; + goto out_err; + } + + /* + * Because the reset function can't run from interrupt or + * from heartbeat work, we need to call the reset function + * from a dedicated work + */ + INIT_WORK(&device_reset_work->reset_work, + device_hard_reset_pending); + device_reset_work->hdev = hdev; + schedule_work(&device_reset_work->reset_work); + + return 0; + } + + if (hard_reset) { + device_late_fini(hdev); + + /* + * Now that the heartbeat thread is closed, flush processes + * which are sending messages to CPU + */ + mutex_lock(&hdev->send_cpu_message_lock); + mutex_unlock(&hdev->send_cpu_message_lock); + } + + /* + * Halt the engines and disable interrupts so we won't get any more + * completions from H/W and we won't have any accesses from the + * H/W to the host machine + */ + hdev->asic_funcs->halt_engines(hdev, hard_reset); + + /* Go over all the queues, release all CS and their jobs */ + hl_cs_rollback_all(hdev); + + if (hard_reset) { + /* Kill processes here after CS rollback. This is because the + * process can't really exit until all its CSs are done, which + * is what we do in cs rollback + */ + rc = device_kill_open_processes(hdev); + if (rc) { + dev_crit(hdev->dev, + "Failed to kill all open processes, stopping hard reset\n"); + goto out_err; + } + + /* Flush the Event queue workers to make sure no other thread is + * reading or writing to registers during the reset + */ + flush_workqueue(hdev->eq_wq); + } + + /* Reset the H/W. It will be in idle state after this returns */ + hdev->asic_funcs->hw_fini(hdev, hard_reset); + + if (hard_reset) { + /* Release kernel context */ + if (hl_ctx_put(hdev->kernel_ctx) == 1) + hdev->kernel_ctx = NULL; + hl_vm_fini(hdev); + hl_mmu_fini(hdev); + hl_eq_reset(hdev, &hdev->event_queue); + } + + /* Re-initialize PI,CI to 0 in all queues (hw queue, cq) */ + hl_hw_queue_reset(hdev, hard_reset); + for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) + hl_cq_reset(hdev, &hdev->completion_queue[i]); + + hdev->idle_busy_ts_idx = 0; + hdev->idle_busy_ts_arr[0].busy_to_idle_ts = ktime_set(0, 0); + hdev->idle_busy_ts_arr[0].idle_to_busy_ts = ktime_set(0, 0); + + if (hdev->cs_active_cnt) + dev_crit(hdev->dev, "CS active cnt %d is not 0 during reset\n", + hdev->cs_active_cnt); + + mutex_lock(&hdev->fpriv_list_lock); + + /* Make sure the context switch phase will run again */ + if (hdev->compute_ctx) { + atomic_set(&hdev->compute_ctx->thread_ctx_switch_token, 1); + hdev->compute_ctx->thread_ctx_switch_wait_token = 0; + } + + mutex_unlock(&hdev->fpriv_list_lock); + + /* Finished tear-down, starting to re-initialize */ + + if (hard_reset) { + hdev->device_cpu_disabled = false; + hdev->hard_reset_pending = false; + + if (hdev->kernel_ctx) { + dev_crit(hdev->dev, + "kernel ctx was alive during hard reset, something is terribly wrong\n"); + rc = -EBUSY; + goto out_err; + } + + rc = hl_mmu_init(hdev); + if (rc) { + dev_err(hdev->dev, + "Failed to initialize MMU S/W after hard reset\n"); + goto out_err; + } + + /* Allocate the kernel context */ + hdev->kernel_ctx = kzalloc(sizeof(*hdev->kernel_ctx), + GFP_KERNEL); + if (!hdev->kernel_ctx) { + rc = -ENOMEM; + hl_mmu_fini(hdev); + goto out_err; + } + + hdev->compute_ctx = NULL; + + rc = hl_ctx_init(hdev, hdev->kernel_ctx, true); + if (rc) { + dev_err(hdev->dev, + "failed to init kernel ctx in hard reset\n"); + kfree(hdev->kernel_ctx); + hdev->kernel_ctx = NULL; + hl_mmu_fini(hdev); + goto out_err; + } + } + + /* Device is now enabled as part of the initialization requires + * communication with the device firmware to get information that + * is required for the initialization itself + */ + hdev->disabled = false; + + rc = hdev->asic_funcs->hw_init(hdev); + if (rc) { + dev_err(hdev->dev, + "failed to initialize the H/W after reset\n"); + goto out_err; + } + + /* Check that the communication with the device is working */ + rc = hdev->asic_funcs->test_queues(hdev); + if (rc) { + dev_err(hdev->dev, + "Failed to detect if device is alive after reset\n"); + goto out_err; + } + + if (hard_reset) { + rc = device_late_init(hdev); + if (rc) { + dev_err(hdev->dev, + "Failed late init after hard reset\n"); + goto out_err; + } + + rc = hl_vm_init(hdev); + if (rc) { + dev_err(hdev->dev, + "Failed to init memory module after hard reset\n"); + goto out_err; + } + + hl_set_max_power(hdev); + } else { + rc = hdev->asic_funcs->soft_reset_late_init(hdev); + if (rc) { + dev_err(hdev->dev, + "Failed late init after soft reset\n"); + goto out_err; + } + } + + atomic_set(&hdev->in_reset, 0); + + if (hard_reset) + hdev->hard_reset_cnt++; + else + hdev->soft_reset_cnt++; + + dev_warn(hdev->dev, "Successfully finished resetting the device\n"); + + return 0; + +out_err: + hdev->disabled = true; + + if (hard_reset) { + dev_err(hdev->dev, + "Failed to reset! Device is NOT usable\n"); + hdev->hard_reset_cnt++; + } else { + dev_err(hdev->dev, + "Failed to do soft-reset, trying hard reset\n"); + hdev->soft_reset_cnt++; + hard_reset = true; + goto again; + } + + atomic_set(&hdev->in_reset, 0); + + return rc; +} + +/* + * hl_device_init - main initialization function for habanalabs device + * + * @hdev: pointer to habanalabs device structure + * + * Allocate an id for the device, do early initialization and then call the + * ASIC specific initialization functions. Finally, create the cdev and the + * Linux device to expose it to the user + */ +int hl_device_init(struct hl_device *hdev, struct class *hclass) +{ + int i, rc, cq_cnt, cq_ready_cnt; + char *name; + bool add_cdev_sysfs_on_err = false; + + name = kasprintf(GFP_KERNEL, "hl%d", hdev->id / 2); + if (!name) { + rc = -ENOMEM; + goto out_disabled; + } + + /* Initialize cdev and device structures */ + rc = device_init_cdev(hdev, hclass, hdev->id, &hl_ops, name, + &hdev->cdev, &hdev->dev); + + kfree(name); + + if (rc) + goto out_disabled; + + name = kasprintf(GFP_KERNEL, "hl_controlD%d", hdev->id / 2); + if (!name) { + rc = -ENOMEM; + goto free_dev; + } + + /* Initialize cdev and device structures for control device */ + rc = device_init_cdev(hdev, hclass, hdev->id_control, &hl_ctrl_ops, + name, &hdev->cdev_ctrl, &hdev->dev_ctrl); + + kfree(name); + + if (rc) + goto free_dev; + + /* Initialize ASIC function pointers and perform early init */ + rc = device_early_init(hdev); + if (rc) + goto free_dev_ctrl; + + /* + * Start calling ASIC initialization. First S/W then H/W and finally + * late init + */ + rc = hdev->asic_funcs->sw_init(hdev); + if (rc) + goto early_fini; + + /* + * Initialize the H/W queues. Must be done before hw_init, because + * there the addresses of the kernel queue are being written to the + * registers of the device + */ + rc = hl_hw_queues_create(hdev); + if (rc) { + dev_err(hdev->dev, "failed to initialize kernel queues\n"); + goto sw_fini; + } + + cq_cnt = hdev->asic_prop.completion_queues_count; + + /* + * Initialize the completion queues. Must be done before hw_init, + * because there the addresses of the completion queues are being + * passed as arguments to request_irq + */ + if (cq_cnt) { + hdev->completion_queue = kcalloc(cq_cnt, + sizeof(*hdev->completion_queue), + GFP_KERNEL); + + if (!hdev->completion_queue) { + dev_err(hdev->dev, + "failed to allocate completion queues\n"); + rc = -ENOMEM; + goto hw_queues_destroy; + } + } + + for (i = 0, cq_ready_cnt = 0 ; i < cq_cnt ; i++, cq_ready_cnt++) { + rc = hl_cq_init(hdev, &hdev->completion_queue[i], + hdev->asic_funcs->get_queue_id_for_cq(hdev, i)); + if (rc) { + dev_err(hdev->dev, + "failed to initialize completion queue\n"); + goto cq_fini; + } + hdev->completion_queue[i].cq_idx = i; + } + + /* + * Initialize the event queue. Must be done before hw_init, + * because there the address of the event queue is being + * passed as argument to request_irq + */ + rc = hl_eq_init(hdev, &hdev->event_queue); + if (rc) { + dev_err(hdev->dev, "failed to initialize event queue\n"); + goto cq_fini; + } + + /* MMU S/W must be initialized before kernel context is created */ + rc = hl_mmu_init(hdev); + if (rc) { + dev_err(hdev->dev, "Failed to initialize MMU S/W structures\n"); + goto eq_fini; + } + + /* Allocate the kernel context */ + hdev->kernel_ctx = kzalloc(sizeof(*hdev->kernel_ctx), GFP_KERNEL); + if (!hdev->kernel_ctx) { + rc = -ENOMEM; + goto mmu_fini; + } + + hdev->compute_ctx = NULL; + + rc = hl_ctx_init(hdev, hdev->kernel_ctx, true); + if (rc) { + dev_err(hdev->dev, "failed to initialize kernel context\n"); + kfree(hdev->kernel_ctx); + goto mmu_fini; + } + + rc = hl_cb_pool_init(hdev); + if (rc) { + dev_err(hdev->dev, "failed to initialize CB pool\n"); + goto release_ctx; + } + + hl_debugfs_add_device(hdev); + + if (hdev->asic_funcs->get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) { + dev_info(hdev->dev, + "H/W state is dirty, must reset before initializing\n"); + hdev->asic_funcs->halt_engines(hdev, true); + hdev->asic_funcs->hw_fini(hdev, true); + } + + /* + * From this point, in case of an error, add char devices and create + * sysfs nodes as part of the error flow, to allow debugging. + */ + add_cdev_sysfs_on_err = true; + + /* Device is now enabled as part of the initialization requires + * communication with the device firmware to get information that + * is required for the initialization itself + */ + hdev->disabled = false; + + rc = hdev->asic_funcs->hw_init(hdev); + if (rc) { + dev_err(hdev->dev, "failed to initialize the H/W\n"); + rc = 0; + goto out_disabled; + } + + /* Check that the communication with the device is working */ + rc = hdev->asic_funcs->test_queues(hdev); + if (rc) { + dev_err(hdev->dev, "Failed to detect if device is alive\n"); + rc = 0; + goto out_disabled; + } + + rc = device_late_init(hdev); + if (rc) { + dev_err(hdev->dev, "Failed late initialization\n"); + rc = 0; + goto out_disabled; + } + + dev_info(hdev->dev, "Found %s device with %lluGB DRAM\n", + hdev->asic_name, + hdev->asic_prop.dram_size / 1024 / 1024 / 1024); + + rc = hl_vm_init(hdev); + if (rc) { + dev_err(hdev->dev, "Failed to initialize memory module\n"); + rc = 0; + goto out_disabled; + } + + /* + * Expose devices and sysfs nodes to user. + * From here there is no need to add char devices and create sysfs nodes + * in case of an error. + */ + add_cdev_sysfs_on_err = false; + rc = device_cdev_sysfs_add(hdev); + if (rc) { + dev_err(hdev->dev, + "Failed to add char devices and sysfs nodes\n"); + rc = 0; + goto out_disabled; + } + + /* Need to call this again because the max power might change, + * depending on card type for certain ASICs + */ + hl_set_max_power(hdev); + + /* + * hl_hwmon_init() must be called after device_late_init(), because only + * there we get the information from the device about which + * hwmon-related sensors the device supports. + * Furthermore, it must be done after adding the device to the system. + */ + rc = hl_hwmon_init(hdev); + if (rc) { + dev_err(hdev->dev, "Failed to initialize hwmon\n"); + rc = 0; + goto out_disabled; + } + + dev_notice(hdev->dev, + "Successfully added device to habanalabs driver\n"); + + hdev->init_done = true; + + return 0; + +release_ctx: + if (hl_ctx_put(hdev->kernel_ctx) != 1) + dev_err(hdev->dev, + "kernel ctx is still alive on initialization failure\n"); +mmu_fini: + hl_mmu_fini(hdev); +eq_fini: + hl_eq_fini(hdev, &hdev->event_queue); +cq_fini: + for (i = 0 ; i < cq_ready_cnt ; i++) + hl_cq_fini(hdev, &hdev->completion_queue[i]); + kfree(hdev->completion_queue); +hw_queues_destroy: + hl_hw_queues_destroy(hdev); +sw_fini: + hdev->asic_funcs->sw_fini(hdev); +early_fini: + device_early_fini(hdev); +free_dev_ctrl: + put_device(hdev->dev_ctrl); +free_dev: + put_device(hdev->dev); +out_disabled: + hdev->disabled = true; + if (add_cdev_sysfs_on_err) + device_cdev_sysfs_add(hdev); + if (hdev->pdev) + dev_err(&hdev->pdev->dev, + "Failed to initialize hl%d. Device is NOT usable !\n", + hdev->id / 2); + else + pr_err("Failed to initialize hl%d. Device is NOT usable !\n", + hdev->id / 2); + + return rc; +} + +/* + * hl_device_fini - main tear-down function for habanalabs device + * + * @hdev: pointer to habanalabs device structure + * + * Destroy the device, call ASIC fini functions and release the id + */ +void hl_device_fini(struct hl_device *hdev) +{ + int i, rc; + ktime_t timeout; + + dev_info(hdev->dev, "Removing device\n"); + + /* + * This function is competing with the reset function, so try to + * take the reset atomic and if we are already in middle of reset, + * wait until reset function is finished. Reset function is designed + * to always finish. However, in Gaudi, because of all the network + * ports, the hard reset could take between 10-30 seconds + */ + + timeout = ktime_add_us(ktime_get(), + HL_HARD_RESET_MAX_TIMEOUT * 1000 * 1000); + rc = atomic_cmpxchg(&hdev->in_reset, 0, 1); + while (rc) { + usleep_range(50, 200); + rc = atomic_cmpxchg(&hdev->in_reset, 0, 1); + if (ktime_compare(ktime_get(), timeout) > 0) { + WARN(1, "Failed to remove device because reset function did not finish\n"); + return; + } + } + + /* Disable PCI access from device F/W so it won't send us additional + * interrupts. We disable MSI/MSI-X at the halt_engines function and we + * can't have the F/W sending us interrupts after that. We need to + * disable the access here because if the device is marked disable, the + * message won't be send. Also, in case of heartbeat, the device CPU is + * marked as disable so this message won't be sent + */ + hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS); + + /* Mark device as disabled */ + hdev->disabled = true; + + /* Flush anyone that is inside the critical section of enqueue + * jobs to the H/W + */ + hdev->asic_funcs->hw_queues_lock(hdev); + hdev->asic_funcs->hw_queues_unlock(hdev); + + /* Flush anyone that is inside device open */ + mutex_lock(&hdev->fpriv_list_lock); + mutex_unlock(&hdev->fpriv_list_lock); + + hdev->hard_reset_pending = true; + + hl_hwmon_fini(hdev); + + device_late_fini(hdev); + + hl_debugfs_remove_device(hdev); + + /* + * Halt the engines and disable interrupts so we won't get any more + * completions from H/W and we won't have any accesses from the + * H/W to the host machine + */ + hdev->asic_funcs->halt_engines(hdev, true); + + /* Go over all the queues, release all CS and their jobs */ + hl_cs_rollback_all(hdev); + + /* Kill processes here after CS rollback. This is because the process + * can't really exit until all its CSs are done, which is what we + * do in cs rollback + */ + rc = device_kill_open_processes(hdev); + if (rc) + dev_crit(hdev->dev, "Failed to kill all open processes\n"); + + hl_cb_pool_fini(hdev); + + /* Reset the H/W. It will be in idle state after this returns */ + hdev->asic_funcs->hw_fini(hdev, true); + + /* Release kernel context */ + if ((hdev->kernel_ctx) && (hl_ctx_put(hdev->kernel_ctx) != 1)) + dev_err(hdev->dev, "kernel ctx is still alive\n"); + + hl_vm_fini(hdev); + + hl_mmu_fini(hdev); + + hl_eq_fini(hdev, &hdev->event_queue); + + for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) + hl_cq_fini(hdev, &hdev->completion_queue[i]); + kfree(hdev->completion_queue); + + hl_hw_queues_destroy(hdev); + + /* Call ASIC S/W finalize function */ + hdev->asic_funcs->sw_fini(hdev); + + device_early_fini(hdev); + + /* Hide devices and sysfs nodes from user */ + device_cdev_sysfs_del(hdev); + + pr_info("removed device successfully\n"); +} + +/* + * MMIO register access helper functions. + */ + +/* + * hl_rreg - Read an MMIO register + * + * @hdev: pointer to habanalabs device structure + * @reg: MMIO register offset (in bytes) + * + * Returns the value of the MMIO register we are asked to read + * + */ +inline u32 hl_rreg(struct hl_device *hdev, u32 reg) +{ + return readl(hdev->rmmio + reg); +} + +/* + * hl_wreg - Write to an MMIO register + * + * @hdev: pointer to habanalabs device structure + * @reg: MMIO register offset (in bytes) + * @val: 32-bit value + * + * Writes the 32-bit value into the MMIO register + * + */ +inline void hl_wreg(struct hl_device *hdev, u32 reg, u32 val) +{ + writel(val, hdev->rmmio + reg); +} diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c new file mode 100644 index 000000000..13c6eebd4 --- /dev/null +++ b/drivers/misc/habanalabs/common/firmware_if.c @@ -0,0 +1,716 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include "habanalabs.h" +#include "../include/common/hl_boot_if.h" + +#include <linux/firmware.h> +#include <linux/genalloc.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/slab.h> + +#define FW_FILE_MAX_SIZE 0x1400000 /* maximum size of 20MB */ +/** + * hl_fw_load_fw_to_device() - Load F/W code to device's memory. + * + * @hdev: pointer to hl_device structure. + * @fw_name: the firmware image name + * @dst: IO memory mapped address space to copy firmware to + * + * Copy fw code from firmware file to device memory. + * + * Return: 0 on success, non-zero for failure. + */ +int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name, + void __iomem *dst) +{ + const struct firmware *fw; + const u64 *fw_data; + size_t fw_size; + int rc; + + rc = request_firmware(&fw, fw_name, hdev->dev); + if (rc) { + dev_err(hdev->dev, "Firmware file %s is not found!\n", fw_name); + goto out; + } + + fw_size = fw->size; + if ((fw_size % 4) != 0) { + dev_err(hdev->dev, "Illegal %s firmware size %zu\n", + fw_name, fw_size); + rc = -EINVAL; + goto out; + } + + dev_dbg(hdev->dev, "%s firmware size == %zu\n", fw_name, fw_size); + + if (fw_size > FW_FILE_MAX_SIZE) { + dev_err(hdev->dev, + "FW file size %zu exceeds maximum of %u bytes\n", + fw_size, FW_FILE_MAX_SIZE); + rc = -EINVAL; + goto out; + } + + fw_data = (const u64 *) fw->data; + + memcpy_toio(dst, fw_data, fw_size); + +out: + release_firmware(fw); + return rc; +} + +int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode) +{ + struct cpucp_packet pkt = {}; + + pkt.ctl = cpu_to_le32(opcode << CPUCP_PKT_CTL_OPCODE_SHIFT); + + return hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, + sizeof(pkt), 0, NULL); +} + +int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg, + u16 len, u32 timeout, long *result) +{ + struct cpucp_packet *pkt; + dma_addr_t pkt_dma_addr; + u32 tmp; + int rc = 0; + + pkt = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, len, + &pkt_dma_addr); + if (!pkt) { + dev_err(hdev->dev, + "Failed to allocate DMA memory for packet to CPU\n"); + return -ENOMEM; + } + + memcpy(pkt, msg, len); + + mutex_lock(&hdev->send_cpu_message_lock); + + if (hdev->disabled) + goto out; + + if (hdev->device_cpu_disabled) { + rc = -EIO; + goto out; + } + + rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id, len, pkt_dma_addr); + if (rc) { + dev_err(hdev->dev, "Failed to send CB on CPU PQ (%d)\n", rc); + goto out; + } + + rc = hl_poll_timeout_memory(hdev, &pkt->fence, tmp, + (tmp == CPUCP_PACKET_FENCE_VAL), 1000, + timeout, true); + + hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id); + + if (rc == -ETIMEDOUT) { + dev_err(hdev->dev, "Device CPU packet timeout (0x%x)\n", tmp); + hdev->device_cpu_disabled = true; + goto out; + } + + tmp = le32_to_cpu(pkt->ctl); + + rc = (tmp & CPUCP_PKT_CTL_RC_MASK) >> CPUCP_PKT_CTL_RC_SHIFT; + if (rc) { + dev_err(hdev->dev, "F/W ERROR %d for CPU packet %d\n", + rc, + (tmp & CPUCP_PKT_CTL_OPCODE_MASK) + >> CPUCP_PKT_CTL_OPCODE_SHIFT); + rc = -EIO; + } else if (result) { + *result = (long) le64_to_cpu(pkt->result); + } + +out: + mutex_unlock(&hdev->send_cpu_message_lock); + + hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, len, pkt); + + return rc; +} + +int hl_fw_unmask_irq(struct hl_device *hdev, u16 event_type) +{ + struct cpucp_packet pkt; + long result; + int rc; + + memset(&pkt, 0, sizeof(pkt)); + + pkt.ctl = cpu_to_le32(CPUCP_PACKET_UNMASK_RAZWI_IRQ << + CPUCP_PKT_CTL_OPCODE_SHIFT); + pkt.value = cpu_to_le64(event_type); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + 0, &result); + + if (rc) + dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type); + + return rc; +} + +int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr, + size_t irq_arr_size) +{ + struct cpucp_unmask_irq_arr_packet *pkt; + size_t total_pkt_size; + long result; + int rc; + + total_pkt_size = sizeof(struct cpucp_unmask_irq_arr_packet) + + irq_arr_size; + + /* data should be aligned to 8 bytes in order to CPU-CP to copy it */ + total_pkt_size = (total_pkt_size + 0x7) & ~0x7; + + /* total_pkt_size is casted to u16 later on */ + if (total_pkt_size > USHRT_MAX) { + dev_err(hdev->dev, "too many elements in IRQ array\n"); + return -EINVAL; + } + + pkt = kzalloc(total_pkt_size, GFP_KERNEL); + if (!pkt) + return -ENOMEM; + + pkt->length = cpu_to_le32(irq_arr_size / sizeof(irq_arr[0])); + memcpy(&pkt->irqs, irq_arr, irq_arr_size); + + pkt->cpucp_pkt.ctl = cpu_to_le32(CPUCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY << + CPUCP_PKT_CTL_OPCODE_SHIFT); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt, + total_pkt_size, 0, &result); + + if (rc) + dev_err(hdev->dev, "failed to unmask IRQ array\n"); + + kfree(pkt); + + return rc; +} + +int hl_fw_test_cpu_queue(struct hl_device *hdev) +{ + struct cpucp_packet test_pkt = {}; + long result; + int rc; + + test_pkt.ctl = cpu_to_le32(CPUCP_PACKET_TEST << + CPUCP_PKT_CTL_OPCODE_SHIFT); + test_pkt.value = cpu_to_le64(CPUCP_PACKET_FENCE_VAL); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &test_pkt, + sizeof(test_pkt), 0, &result); + + if (!rc) { + if (result != CPUCP_PACKET_FENCE_VAL) + dev_err(hdev->dev, + "CPU queue test failed (0x%08lX)\n", result); + } else { + dev_err(hdev->dev, "CPU queue test failed, error %d\n", rc); + } + + return rc; +} + +void *hl_fw_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size, + dma_addr_t *dma_handle) +{ + u64 kernel_addr; + + kernel_addr = gen_pool_alloc(hdev->cpu_accessible_dma_pool, size); + + *dma_handle = hdev->cpu_accessible_dma_address + + (kernel_addr - (u64) (uintptr_t) hdev->cpu_accessible_dma_mem); + + return (void *) (uintptr_t) kernel_addr; +} + +void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size, + void *vaddr) +{ + gen_pool_free(hdev->cpu_accessible_dma_pool, (u64) (uintptr_t) vaddr, + size); +} + +int hl_fw_send_heartbeat(struct hl_device *hdev) +{ + struct cpucp_packet hb_pkt = {}; + long result; + int rc; + + hb_pkt.ctl = cpu_to_le32(CPUCP_PACKET_TEST << + CPUCP_PKT_CTL_OPCODE_SHIFT); + hb_pkt.value = cpu_to_le64(CPUCP_PACKET_FENCE_VAL); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &hb_pkt, + sizeof(hb_pkt), 0, &result); + + if ((rc) || (result != CPUCP_PACKET_FENCE_VAL)) + rc = -EIO; + + return rc; +} + +int hl_fw_cpucp_info_get(struct hl_device *hdev) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct cpucp_packet pkt = {}; + void *cpucp_info_cpu_addr; + dma_addr_t cpucp_info_dma_addr; + long result; + int rc; + + cpucp_info_cpu_addr = + hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, + sizeof(struct cpucp_info), + &cpucp_info_dma_addr); + if (!cpucp_info_cpu_addr) { + dev_err(hdev->dev, + "Failed to allocate DMA memory for CPU-CP info packet\n"); + return -ENOMEM; + } + + memset(cpucp_info_cpu_addr, 0, sizeof(struct cpucp_info)); + + pkt.ctl = cpu_to_le32(CPUCP_PACKET_INFO_GET << + CPUCP_PKT_CTL_OPCODE_SHIFT); + pkt.addr = cpu_to_le64(cpucp_info_dma_addr); + pkt.data_max_size = cpu_to_le32(sizeof(struct cpucp_info)); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + HL_CPUCP_INFO_TIMEOUT_USEC, &result); + if (rc) { + dev_err(hdev->dev, + "Failed to handle CPU-CP info pkt, error %d\n", rc); + goto out; + } + + memcpy(&prop->cpucp_info, cpucp_info_cpu_addr, + sizeof(prop->cpucp_info)); + + rc = hl_build_hwmon_channel_info(hdev, prop->cpucp_info.sensors); + if (rc) { + dev_err(hdev->dev, + "Failed to build hwmon channel info, error %d\n", rc); + rc = -EFAULT; + goto out; + } + +out: + hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, + sizeof(struct cpucp_info), cpucp_info_cpu_addr); + + return rc; +} + +int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size) +{ + struct cpucp_packet pkt = {}; + void *eeprom_info_cpu_addr; + dma_addr_t eeprom_info_dma_addr; + long result; + int rc; + + eeprom_info_cpu_addr = + hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, + max_size, &eeprom_info_dma_addr); + if (!eeprom_info_cpu_addr) { + dev_err(hdev->dev, + "Failed to allocate DMA memory for CPU-CP EEPROM packet\n"); + return -ENOMEM; + } + + memset(eeprom_info_cpu_addr, 0, max_size); + + pkt.ctl = cpu_to_le32(CPUCP_PACKET_EEPROM_DATA_GET << + CPUCP_PKT_CTL_OPCODE_SHIFT); + pkt.addr = cpu_to_le64(eeprom_info_dma_addr); + pkt.data_max_size = cpu_to_le32(max_size); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + HL_CPUCP_EEPROM_TIMEOUT_USEC, &result); + + if (rc) { + dev_err(hdev->dev, + "Failed to handle CPU-CP EEPROM packet, error %d\n", + rc); + goto out; + } + + /* result contains the actual size */ + memcpy(data, eeprom_info_cpu_addr, min((size_t)result, max_size)); + +out: + hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, max_size, + eeprom_info_cpu_addr); + + return rc; +} + +int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev, + struct hl_info_pci_counters *counters) +{ + struct cpucp_packet pkt = {}; + long result; + int rc; + + pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_THROUGHPUT_GET << + CPUCP_PKT_CTL_OPCODE_SHIFT); + + /* Fetch PCI rx counter */ + pkt.index = cpu_to_le32(cpucp_pcie_throughput_rx); + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + HL_CPUCP_INFO_TIMEOUT_USEC, &result); + if (rc) { + dev_err(hdev->dev, + "Failed to handle CPU-CP PCI info pkt, error %d\n", rc); + return rc; + } + counters->rx_throughput = result; + + memset(&pkt, 0, sizeof(pkt)); + pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_THROUGHPUT_GET << + CPUCP_PKT_CTL_OPCODE_SHIFT); + + /* Fetch PCI tx counter */ + pkt.index = cpu_to_le32(cpucp_pcie_throughput_tx); + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + HL_CPUCP_INFO_TIMEOUT_USEC, &result); + if (rc) { + dev_err(hdev->dev, + "Failed to handle CPU-CP PCI info pkt, error %d\n", rc); + return rc; + } + counters->tx_throughput = result; + + /* Fetch PCI replay counter */ + memset(&pkt, 0, sizeof(pkt)); + pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_REPLAY_CNT_GET << + CPUCP_PKT_CTL_OPCODE_SHIFT); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + HL_CPUCP_INFO_TIMEOUT_USEC, &result); + if (rc) { + dev_err(hdev->dev, + "Failed to handle CPU-CP PCI info pkt, error %d\n", rc); + return rc; + } + counters->replay_cnt = (u32) result; + + return rc; +} + +int hl_fw_cpucp_total_energy_get(struct hl_device *hdev, u64 *total_energy) +{ + struct cpucp_packet pkt = {}; + long result; + int rc; + + pkt.ctl = cpu_to_le32(CPUCP_PACKET_TOTAL_ENERGY_GET << + CPUCP_PKT_CTL_OPCODE_SHIFT); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + HL_CPUCP_INFO_TIMEOUT_USEC, &result); + if (rc) { + dev_err(hdev->dev, + "Failed to handle CpuCP total energy pkt, error %d\n", + rc); + return rc; + } + + *total_energy = result; + + return rc; +} + +static void fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg) +{ + u32 err_val; + + /* Some of the firmware status codes are deprecated in newer f/w + * versions. In those versions, the errors are reported + * in different registers. Therefore, we need to check those + * registers and print the exact errors. Moreover, there + * may be multiple errors, so we need to report on each error + * separately. Some of the error codes might indicate a state + * that is not an error per-se, but it is an error in production + * environment + */ + err_val = RREG32(boot_err0_reg); + if (!(err_val & CPU_BOOT_ERR0_ENABLED)) + return; + + if (err_val & CPU_BOOT_ERR0_DRAM_INIT_FAIL) + dev_err(hdev->dev, + "Device boot error - DRAM initialization failed\n"); + if (err_val & CPU_BOOT_ERR0_FIT_CORRUPTED) + dev_err(hdev->dev, "Device boot error - FIT image corrupted\n"); + if (err_val & CPU_BOOT_ERR0_TS_INIT_FAIL) + dev_err(hdev->dev, + "Device boot error - Thermal Sensor initialization failed\n"); + if (err_val & CPU_BOOT_ERR0_DRAM_SKIPPED) + dev_warn(hdev->dev, + "Device boot warning - Skipped DRAM initialization\n"); + if (err_val & CPU_BOOT_ERR0_BMC_WAIT_SKIPPED) + dev_warn(hdev->dev, + "Device boot error - Skipped waiting for BMC\n"); + if (err_val & CPU_BOOT_ERR0_NIC_DATA_NOT_RDY) + dev_err(hdev->dev, + "Device boot error - Serdes data from BMC not available\n"); + if (err_val & CPU_BOOT_ERR0_NIC_FW_FAIL) + dev_err(hdev->dev, + "Device boot error - NIC F/W initialization failed\n"); +} + +static void detect_cpu_boot_status(struct hl_device *hdev, u32 status) +{ + /* Some of the status codes below are deprecated in newer f/w + * versions but we keep them here for backward compatibility + */ + switch (status) { + case CPU_BOOT_STATUS_NA: + dev_err(hdev->dev, + "Device boot error - BTL did NOT run\n"); + break; + case CPU_BOOT_STATUS_IN_WFE: + dev_err(hdev->dev, + "Device boot error - Stuck inside WFE loop\n"); + break; + case CPU_BOOT_STATUS_IN_BTL: + dev_err(hdev->dev, + "Device boot error - Stuck in BTL\n"); + break; + case CPU_BOOT_STATUS_IN_PREBOOT: + dev_err(hdev->dev, + "Device boot error - Stuck in Preboot\n"); + break; + case CPU_BOOT_STATUS_IN_SPL: + dev_err(hdev->dev, + "Device boot error - Stuck in SPL\n"); + break; + case CPU_BOOT_STATUS_IN_UBOOT: + dev_err(hdev->dev, + "Device boot error - Stuck in u-boot\n"); + break; + case CPU_BOOT_STATUS_DRAM_INIT_FAIL: + dev_err(hdev->dev, + "Device boot error - DRAM initialization failed\n"); + break; + case CPU_BOOT_STATUS_UBOOT_NOT_READY: + dev_err(hdev->dev, + "Device boot error - u-boot stopped by user\n"); + break; + case CPU_BOOT_STATUS_TS_INIT_FAIL: + dev_err(hdev->dev, + "Device boot error - Thermal Sensor initialization failed\n"); + break; + default: + dev_err(hdev->dev, + "Device boot error - Invalid status code %d\n", + status); + break; + } +} + +int hl_fw_read_preboot_ver(struct hl_device *hdev, u32 cpu_boot_status_reg, + u32 boot_err0_reg, u32 timeout) +{ + u32 status; + int rc; + + if (!hdev->cpu_enable) + return 0; + + /* Need to check two possible scenarios: + * + * CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT - for newer firmwares where + * the preboot is waiting for the boot fit + * + * All other status values - for older firmwares where the uboot was + * loaded from the FLASH + */ + rc = hl_poll_timeout( + hdev, + cpu_boot_status_reg, + status, + (status == CPU_BOOT_STATUS_IN_UBOOT) || + (status == CPU_BOOT_STATUS_DRAM_RDY) || + (status == CPU_BOOT_STATUS_NIC_FW_RDY) || + (status == CPU_BOOT_STATUS_READY_TO_BOOT) || + (status == CPU_BOOT_STATUS_SRAM_AVAIL) || + (status == CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT), + 10000, + timeout); + + if (rc) { + dev_err(hdev->dev, "Failed to read preboot version\n"); + detect_cpu_boot_status(hdev, status); + fw_read_errors(hdev, boot_err0_reg); + return -EIO; + } + + hdev->asic_funcs->read_device_fw_version(hdev, FW_COMP_PREBOOT); + + return 0; +} + +int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg, + u32 msg_to_cpu_reg, u32 cpu_msg_status_reg, + u32 boot_err0_reg, bool skip_bmc, + u32 cpu_timeout, u32 boot_fit_timeout) +{ + u32 status; + int rc; + + dev_info(hdev->dev, "Going to wait for device boot (up to %lds)\n", + cpu_timeout / USEC_PER_SEC); + + /* Wait for boot FIT request */ + rc = hl_poll_timeout( + hdev, + cpu_boot_status_reg, + status, + status == CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT, + 10000, + boot_fit_timeout); + + if (rc) { + dev_dbg(hdev->dev, + "No boot fit request received, resuming boot\n"); + } else { + rc = hdev->asic_funcs->load_boot_fit_to_device(hdev); + if (rc) + goto out; + + /* Clear device CPU message status */ + WREG32(cpu_msg_status_reg, CPU_MSG_CLR); + + /* Signal device CPU that boot loader is ready */ + WREG32(msg_to_cpu_reg, KMD_MSG_FIT_RDY); + + /* Poll for CPU device ack */ + rc = hl_poll_timeout( + hdev, + cpu_msg_status_reg, + status, + status == CPU_MSG_OK, + 10000, + boot_fit_timeout); + + if (rc) { + dev_err(hdev->dev, + "Timeout waiting for boot fit load ack\n"); + goto out; + } + + /* Clear message */ + WREG32(msg_to_cpu_reg, KMD_MSG_NA); + } + + /* Make sure CPU boot-loader is running */ + rc = hl_poll_timeout( + hdev, + cpu_boot_status_reg, + status, + (status == CPU_BOOT_STATUS_DRAM_RDY) || + (status == CPU_BOOT_STATUS_NIC_FW_RDY) || + (status == CPU_BOOT_STATUS_READY_TO_BOOT) || + (status == CPU_BOOT_STATUS_SRAM_AVAIL), + 10000, + cpu_timeout); + + /* Read U-Boot version now in case we will later fail */ + hdev->asic_funcs->read_device_fw_version(hdev, FW_COMP_UBOOT); + + if (rc) { + detect_cpu_boot_status(hdev, status); + rc = -EIO; + goto out; + } + + if (!hdev->fw_loading) { + dev_info(hdev->dev, "Skip loading FW\n"); + goto out; + } + + if (status == CPU_BOOT_STATUS_SRAM_AVAIL) + goto out; + + dev_info(hdev->dev, + "Loading firmware to device, may take some time...\n"); + + rc = hdev->asic_funcs->load_firmware_to_device(hdev); + if (rc) + goto out; + + if (skip_bmc) { + WREG32(msg_to_cpu_reg, KMD_MSG_SKIP_BMC); + + rc = hl_poll_timeout( + hdev, + cpu_boot_status_reg, + status, + (status == CPU_BOOT_STATUS_BMC_WAITING_SKIPPED), + 10000, + cpu_timeout); + + if (rc) { + dev_err(hdev->dev, + "Failed to get ACK on skipping BMC, %d\n", + status); + WREG32(msg_to_cpu_reg, KMD_MSG_NA); + rc = -EIO; + goto out; + } + } + + WREG32(msg_to_cpu_reg, KMD_MSG_FIT_RDY); + + rc = hl_poll_timeout( + hdev, + cpu_boot_status_reg, + status, + (status == CPU_BOOT_STATUS_SRAM_AVAIL), + 10000, + cpu_timeout); + + /* Clear message */ + WREG32(msg_to_cpu_reg, KMD_MSG_NA); + + if (rc) { + if (status == CPU_BOOT_STATUS_FIT_CORRUPTED) + dev_err(hdev->dev, + "Device reports FIT image is corrupted\n"); + else + dev_err(hdev->dev, + "Failed to load firmware to device, %d\n", + status); + + rc = -EIO; + goto out; + } + + dev_info(hdev->dev, "Successfully loaded firmware to device\n"); + +out: + fw_read_errors(hdev, boot_err0_reg); + + return rc; +} diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h new file mode 100644 index 000000000..6ed974d2d --- /dev/null +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -0,0 +1,2078 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef HABANALABSP_H_ +#define HABANALABSP_H_ + +#include "../include/common/cpucp_if.h" +#include "../include/common/qman_if.h" +#include <uapi/misc/habanalabs.h> + +#include <linux/cdev.h> +#include <linux/iopoll.h> +#include <linux/irqreturn.h> +#include <linux/dma-direction.h> +#include <linux/scatterlist.h> +#include <linux/hashtable.h> +#include <linux/bitfield.h> + +#define HL_NAME "habanalabs" + +/* Use upper bits of mmap offset to store habana driver specific information. + * bits[63:62] - Encode mmap type + * bits[45:0] - mmap offset value + * + * NOTE: struct vm_area_struct.vm_pgoff uses offset in pages. Hence, these + * defines are w.r.t to PAGE_SIZE + */ +#define HL_MMAP_TYPE_SHIFT (62 - PAGE_SHIFT) +#define HL_MMAP_TYPE_MASK (0x3ull << HL_MMAP_TYPE_SHIFT) +#define HL_MMAP_TYPE_CB (0x2ull << HL_MMAP_TYPE_SHIFT) + +#define HL_MMAP_OFFSET_VALUE_MASK (0x3FFFFFFFFFFFull >> PAGE_SHIFT) +#define HL_MMAP_OFFSET_VALUE_GET(off) (off & HL_MMAP_OFFSET_VALUE_MASK) + +#define HL_PENDING_RESET_PER_SEC 30 + +#define HL_HARD_RESET_MAX_TIMEOUT 120 + +#define HL_DEVICE_TIMEOUT_USEC 1000000 /* 1 s */ + +#define HL_HEARTBEAT_PER_USEC 5000000 /* 5 s */ + +#define HL_PLL_LOW_JOB_FREQ_USEC 5000000 /* 5 s */ + +#define HL_CPUCP_INFO_TIMEOUT_USEC 10000000 /* 10s */ +#define HL_CPUCP_EEPROM_TIMEOUT_USEC 10000000 /* 10s */ + +#define HL_PCI_ELBI_TIMEOUT_MSEC 10 /* 10ms */ + +#define HL_SIM_MAX_TIMEOUT_US 10000000 /* 10s */ + +#define HL_IDLE_BUSY_TS_ARR_SIZE 4096 + +/* Memory */ +#define MEM_HASH_TABLE_BITS 7 /* 1 << 7 buckets */ + +/* MMU */ +#define MMU_HASH_TABLE_BITS 7 /* 1 << 7 buckets */ + +/* + * HL_RSVD_SOBS 'sync stream' reserved sync objects per QMAN stream + * HL_RSVD_MONS 'sync stream' reserved monitors per QMAN stream + */ +#define HL_RSVD_SOBS 4 +#define HL_RSVD_MONS 2 + +#define HL_RSVD_SOBS_IN_USE 2 +#define HL_RSVD_MONS_IN_USE 1 + +#define HL_MAX_SOB_VAL (1 << 15) + +#define IS_POWER_OF_2(n) (n != 0 && ((n & (n - 1)) == 0)) +#define IS_MAX_PENDING_CS_VALID(n) (IS_POWER_OF_2(n) && (n > 1)) + +#define HL_PCI_NUM_BARS 6 + +#define HL_MAX_DCORES 4 + +/** + * struct pgt_info - MMU hop page info. + * @node: hash linked-list node for the pgts shadow hash of pgts. + * @phys_addr: physical address of the pgt. + * @shadow_addr: shadow hop in the host. + * @ctx: pointer to the owner ctx. + * @num_of_ptes: indicates how many ptes are used in the pgt. + * + * The MMU page tables hierarchy is placed on the DRAM. When a new level (hop) + * is needed during mapping, a new page is allocated and this structure holds + * its essential information. During unmapping, if no valid PTEs remained in the + * page, it is freed with its pgt_info structure. + */ +struct pgt_info { + struct hlist_node node; + u64 phys_addr; + u64 shadow_addr; + struct hl_ctx *ctx; + int num_of_ptes; +}; + +struct hl_device; +struct hl_fpriv; + +/** + * enum hl_pci_match_mode - pci match mode per region + * @PCI_ADDRESS_MATCH_MODE: address match mode + * @PCI_BAR_MATCH_MODE: bar match mode + */ +enum hl_pci_match_mode { + PCI_ADDRESS_MATCH_MODE, + PCI_BAR_MATCH_MODE +}; + +/** + * enum hl_fw_component - F/W components to read version through registers. + * @FW_COMP_UBOOT: u-boot. + * @FW_COMP_PREBOOT: preboot. + */ +enum hl_fw_component { + FW_COMP_UBOOT, + FW_COMP_PREBOOT +}; + +/** + * enum hl_queue_type - Supported QUEUE types. + * @QUEUE_TYPE_NA: queue is not available. + * @QUEUE_TYPE_EXT: external queue which is a DMA channel that may access the + * host. + * @QUEUE_TYPE_INT: internal queue that performs DMA inside the device's + * memories and/or operates the compute engines. + * @QUEUE_TYPE_CPU: S/W queue for communication with the device's CPU. + * @QUEUE_TYPE_HW: queue of DMA and compute engines jobs, for which completion + * notifications are sent by H/W. + */ +enum hl_queue_type { + QUEUE_TYPE_NA, + QUEUE_TYPE_EXT, + QUEUE_TYPE_INT, + QUEUE_TYPE_CPU, + QUEUE_TYPE_HW +}; + +enum hl_cs_type { + CS_TYPE_DEFAULT, + CS_TYPE_SIGNAL, + CS_TYPE_WAIT +}; + +/* + * struct hl_inbound_pci_region - inbound region descriptor + * @mode: pci match mode for this region + * @addr: region target address + * @size: region size in bytes + * @offset_in_bar: offset within bar (address match mode) + * @bar: bar id + */ +struct hl_inbound_pci_region { + enum hl_pci_match_mode mode; + u64 addr; + u64 size; + u64 offset_in_bar; + u8 bar; +}; + +/* + * struct hl_outbound_pci_region - outbound region descriptor + * @addr: region target address + * @size: region size in bytes + */ +struct hl_outbound_pci_region { + u64 addr; + u64 size; +}; + +/* + * struct hl_hw_sob - H/W SOB info. + * @hdev: habanalabs device structure. + * @kref: refcount of this SOB. The SOB will reset once the refcount is zero. + * @sob_id: id of this SOB. + * @q_idx: the H/W queue that uses this SOB. + */ +struct hl_hw_sob { + struct hl_device *hdev; + struct kref kref; + u32 sob_id; + u32 q_idx; +}; + +/** + * struct hw_queue_properties - queue information. + * @type: queue type. + * @driver_only: true if only the driver is allowed to send a job to this queue, + * false otherwise. + * @requires_kernel_cb: true if a CB handle must be provided for jobs on this + * queue, false otherwise (a CB address must be provided). + * @supports_sync_stream: True if queue supports sync stream + */ +struct hw_queue_properties { + enum hl_queue_type type; + u8 driver_only; + u8 requires_kernel_cb; + u8 supports_sync_stream; +}; + +/** + * enum vm_type_t - virtual memory mapping request information. + * @VM_TYPE_USERPTR: mapping of user memory to device virtual address. + * @VM_TYPE_PHYS_PACK: mapping of DRAM memory to device virtual address. + */ +enum vm_type_t { + VM_TYPE_USERPTR = 0x1, + VM_TYPE_PHYS_PACK = 0x2 +}; + +/** + * enum hl_device_hw_state - H/W device state. use this to understand whether + * to do reset before hw_init or not + * @HL_DEVICE_HW_STATE_CLEAN: H/W state is clean. i.e. after hard reset + * @HL_DEVICE_HW_STATE_DIRTY: H/W state is dirty. i.e. we started to execute + * hw_init + */ +enum hl_device_hw_state { + HL_DEVICE_HW_STATE_CLEAN = 0, + HL_DEVICE_HW_STATE_DIRTY +}; + +/** + * struct hl_mmu_properties - ASIC specific MMU address translation properties. + * @start_addr: virtual start address of the memory region. + * @end_addr: virtual end address of the memory region. + * @hop0_shift: shift of hop 0 mask. + * @hop1_shift: shift of hop 1 mask. + * @hop2_shift: shift of hop 2 mask. + * @hop3_shift: shift of hop 3 mask. + * @hop4_shift: shift of hop 4 mask. + * @hop5_shift: shift of hop 5 mask. + * @hop0_mask: mask to get the PTE address in hop 0. + * @hop1_mask: mask to get the PTE address in hop 1. + * @hop2_mask: mask to get the PTE address in hop 2. + * @hop3_mask: mask to get the PTE address in hop 3. + * @hop4_mask: mask to get the PTE address in hop 4. + * @hop5_mask: mask to get the PTE address in hop 5. + * @page_size: default page size used to allocate memory. + * @num_hops: The amount of hops supported by the translation table. + */ +struct hl_mmu_properties { + u64 start_addr; + u64 end_addr; + u64 hop0_shift; + u64 hop1_shift; + u64 hop2_shift; + u64 hop3_shift; + u64 hop4_shift; + u64 hop5_shift; + u64 hop0_mask; + u64 hop1_mask; + u64 hop2_mask; + u64 hop3_mask; + u64 hop4_mask; + u64 hop5_mask; + u32 page_size; + u32 num_hops; +}; + +/** + * struct asic_fixed_properties - ASIC specific immutable properties. + * @hw_queues_props: H/W queues properties. + * @cpucp_info: received various information from CPU-CP regarding the H/W, e.g. + * available sensors. + * @uboot_ver: F/W U-boot version. + * @preboot_ver: F/W Preboot version. + * @dmmu: DRAM MMU address translation properties. + * @pmmu: PCI (host) MMU address translation properties. + * @pmmu_huge: PCI (host) MMU address translation properties for memory + * allocated with huge pages. + * @sram_base_address: SRAM physical start address. + * @sram_end_address: SRAM physical end address. + * @sram_user_base_address - SRAM physical start address for user access. + * @dram_base_address: DRAM physical start address. + * @dram_end_address: DRAM physical end address. + * @dram_user_base_address: DRAM physical start address for user access. + * @dram_size: DRAM total size. + * @dram_pci_bar_size: size of PCI bar towards DRAM. + * @max_power_default: max power of the device after reset + * @dram_size_for_default_page_mapping: DRAM size needed to map to avoid page + * fault. + * @pcie_dbi_base_address: Base address of the PCIE_DBI block. + * @pcie_aux_dbi_reg_addr: Address of the PCIE_AUX DBI register. + * @mmu_pgt_addr: base physical address in DRAM of MMU page tables. + * @mmu_dram_default_page_addr: DRAM default page physical address. + * @cb_va_start_addr: virtual start address of command buffers which are mapped + * to the device's MMU. + * @cb_va_end_addr: virtual end address of command buffers which are mapped to + * the device's MMU. + * @mmu_pgt_size: MMU page tables total size. + * @mmu_pte_size: PTE size in MMU page tables. + * @mmu_hop_table_size: MMU hop table size. + * @mmu_hop0_tables_total_size: total size of MMU hop0 tables. + * @dram_page_size: page size for MMU DRAM allocation. + * @cfg_size: configuration space size on SRAM. + * @sram_size: total size of SRAM. + * @max_asid: maximum number of open contexts (ASIDs). + * @num_of_events: number of possible internal H/W IRQs. + * @psoc_pci_pll_nr: PCI PLL NR value. + * @psoc_pci_pll_nf: PCI PLL NF value. + * @psoc_pci_pll_od: PCI PLL OD value. + * @psoc_pci_pll_div_factor: PCI PLL DIV FACTOR 1 value. + * @psoc_timestamp_frequency: frequency of the psoc timestamp clock. + * @high_pll: high PLL frequency used by the device. + * @cb_pool_cb_cnt: number of CBs in the CB pool. + * @cb_pool_cb_size: size of each CB in the CB pool. + * @max_pending_cs: maximum of concurrent pending command submissions + * @max_queues: maximum amount of queues in the system + * @sync_stream_first_sob: first sync object available for sync stream use + * @sync_stream_first_mon: first monitor available for sync stream use + * @first_available_user_sob: first sob available for the user + * @first_available_user_mon: first monitor available for the user + * @tpc_enabled_mask: which TPCs are enabled. + * @completion_queues_count: number of completion queues. + * @fw_security_disabled: true if security measures are disabled in firmware, + * false otherwise + */ +struct asic_fixed_properties { + struct hw_queue_properties *hw_queues_props; + struct cpucp_info cpucp_info; + char uboot_ver[VERSION_MAX_LEN]; + char preboot_ver[VERSION_MAX_LEN]; + struct hl_mmu_properties dmmu; + struct hl_mmu_properties pmmu; + struct hl_mmu_properties pmmu_huge; + u64 sram_base_address; + u64 sram_end_address; + u64 sram_user_base_address; + u64 dram_base_address; + u64 dram_end_address; + u64 dram_user_base_address; + u64 dram_size; + u64 dram_pci_bar_size; + u64 max_power_default; + u64 dram_size_for_default_page_mapping; + u64 pcie_dbi_base_address; + u64 pcie_aux_dbi_reg_addr; + u64 mmu_pgt_addr; + u64 mmu_dram_default_page_addr; + u64 cb_va_start_addr; + u64 cb_va_end_addr; + u32 mmu_pgt_size; + u32 mmu_pte_size; + u32 mmu_hop_table_size; + u32 mmu_hop0_tables_total_size; + u32 dram_page_size; + u32 cfg_size; + u32 sram_size; + u32 max_asid; + u32 num_of_events; + u32 psoc_pci_pll_nr; + u32 psoc_pci_pll_nf; + u32 psoc_pci_pll_od; + u32 psoc_pci_pll_div_factor; + u32 psoc_timestamp_frequency; + u32 high_pll; + u32 cb_pool_cb_cnt; + u32 cb_pool_cb_size; + u32 max_pending_cs; + u32 max_queues; + u16 sync_stream_first_sob; + u16 sync_stream_first_mon; + u16 first_available_user_sob[HL_MAX_DCORES]; + u16 first_available_user_mon[HL_MAX_DCORES]; + u8 tpc_enabled_mask; + u8 completion_queues_count; + u8 fw_security_disabled; +}; + +/** + * struct hl_fence - software synchronization primitive + * @completion: fence is implemented using completion + * @refcount: refcount for this fence + * @error: mark this fence with error + * + */ +struct hl_fence { + struct completion completion; + struct kref refcount; + int error; +}; + +/** + * struct hl_cs_compl - command submission completion object. + * @base_fence: hl fence object. + * @lock: spinlock to protect fence. + * @hdev: habanalabs device structure. + * @hw_sob: the H/W SOB used in this signal/wait CS. + * @cs_seq: command submission sequence number. + * @type: type of the CS - signal/wait. + * @sob_val: the SOB value that is used in this signal/wait CS. + */ +struct hl_cs_compl { + struct hl_fence base_fence; + spinlock_t lock; + struct hl_device *hdev; + struct hl_hw_sob *hw_sob; + u64 cs_seq; + enum hl_cs_type type; + u16 sob_val; +}; + +/* + * Command Buffers + */ + +/** + * struct hl_cb_mgr - describes a Command Buffer Manager. + * @cb_lock: protects cb_handles. + * @cb_handles: an idr to hold all command buffer handles. + */ +struct hl_cb_mgr { + spinlock_t cb_lock; + struct idr cb_handles; /* protected by cb_lock */ +}; + +/** + * struct hl_cb - describes a Command Buffer. + * @refcount: reference counter for usage of the CB. + * @hdev: pointer to device this CB belongs to. + * @ctx: pointer to the CB owner's context. + * @lock: spinlock to protect mmap/cs flows. + * @debugfs_list: node in debugfs list of command buffers. + * @pool_list: node in pool list of command buffers. + * @va_block_list: list of virtual addresses blocks of the CB if it is mapped to + * the device's MMU. + * @id: the CB's ID. + * @kernel_address: Holds the CB's kernel virtual address. + * @bus_address: Holds the CB's DMA address. + * @mmap_size: Holds the CB's size that was mmaped. + * @size: holds the CB's size. + * @cs_cnt: holds number of CS that this CB participates in. + * @mmap: true if the CB is currently mmaped to user. + * @is_pool: true if CB was acquired from the pool, false otherwise. + * @is_internal: internaly allocated + * @is_mmu_mapped: true if the CB is mapped to the device's MMU. + */ +struct hl_cb { + struct kref refcount; + struct hl_device *hdev; + struct hl_ctx *ctx; + spinlock_t lock; + struct list_head debugfs_list; + struct list_head pool_list; + struct list_head va_block_list; + u64 id; + void *kernel_address; + dma_addr_t bus_address; + u32 mmap_size; + u32 size; + u32 cs_cnt; + u8 mmap; + u8 is_pool; + u8 is_internal; + u8 is_mmu_mapped; +}; + + +/* + * QUEUES + */ + +struct hl_cs_job; + +/* Queue length of external and HW queues */ +#define HL_QUEUE_LENGTH 4096 +#define HL_QUEUE_SIZE_IN_BYTES (HL_QUEUE_LENGTH * HL_BD_SIZE) + +#if (HL_MAX_JOBS_PER_CS > HL_QUEUE_LENGTH) +#error "HL_QUEUE_LENGTH must be greater than HL_MAX_JOBS_PER_CS" +#endif + +/* HL_CQ_LENGTH is in units of struct hl_cq_entry */ +#define HL_CQ_LENGTH HL_QUEUE_LENGTH +#define HL_CQ_SIZE_IN_BYTES (HL_CQ_LENGTH * HL_CQ_ENTRY_SIZE) + +/* Must be power of 2 */ +#define HL_EQ_LENGTH 64 +#define HL_EQ_SIZE_IN_BYTES (HL_EQ_LENGTH * HL_EQ_ENTRY_SIZE) + +/* Host <-> CPU-CP shared memory size */ +#define HL_CPU_ACCESSIBLE_MEM_SIZE SZ_2M + +/** + * struct hl_hw_queue - describes a H/W transport queue. + * @hw_sob: array of the used H/W SOBs by this H/W queue. + * @shadow_queue: pointer to a shadow queue that holds pointers to jobs. + * @queue_type: type of queue. + * @kernel_address: holds the queue's kernel virtual address. + * @bus_address: holds the queue's DMA address. + * @pi: holds the queue's pi value. + * @ci: holds the queue's ci value, AS CALCULATED BY THE DRIVER (not real ci). + * @hw_queue_id: the id of the H/W queue. + * @cq_id: the id for the corresponding CQ for this H/W queue. + * @msi_vec: the IRQ number of the H/W queue. + * @int_queue_len: length of internal queue (number of entries). + * @next_sob_val: the next value to use for the currently used SOB. + * @base_sob_id: the base SOB id of the SOBs used by this queue. + * @base_mon_id: the base MON id of the MONs used by this queue. + * @valid: is the queue valid (we have array of 32 queues, not all of them + * exist). + * @curr_sob_offset: the id offset to the currently used SOB from the + * HL_RSVD_SOBS that are being used by this queue. + * @supports_sync_stream: True if queue supports sync stream + */ +struct hl_hw_queue { + struct hl_hw_sob hw_sob[HL_RSVD_SOBS]; + struct hl_cs_job **shadow_queue; + enum hl_queue_type queue_type; + void *kernel_address; + dma_addr_t bus_address; + u32 pi; + atomic_t ci; + u32 hw_queue_id; + u32 cq_id; + u32 msi_vec; + u16 int_queue_len; + u16 next_sob_val; + u16 base_sob_id; + u16 base_mon_id; + u8 valid; + u8 curr_sob_offset; + u8 supports_sync_stream; +}; + +/** + * struct hl_cq - describes a completion queue + * @hdev: pointer to the device structure + * @kernel_address: holds the queue's kernel virtual address + * @bus_address: holds the queue's DMA address + * @cq_idx: completion queue index in array + * @hw_queue_id: the id of the matching H/W queue + * @ci: ci inside the queue + * @pi: pi inside the queue + * @free_slots_cnt: counter of free slots in queue + */ +struct hl_cq { + struct hl_device *hdev; + void *kernel_address; + dma_addr_t bus_address; + u32 cq_idx; + u32 hw_queue_id; + u32 ci; + u32 pi; + atomic_t free_slots_cnt; +}; + +/** + * struct hl_eq - describes the event queue (single one per device) + * @hdev: pointer to the device structure + * @kernel_address: holds the queue's kernel virtual address + * @bus_address: holds the queue's DMA address + * @ci: ci inside the queue + */ +struct hl_eq { + struct hl_device *hdev; + void *kernel_address; + dma_addr_t bus_address; + u32 ci; +}; + + +/* + * ASICs + */ + +/** + * enum hl_asic_type - supported ASIC types. + * @ASIC_INVALID: Invalid ASIC type. + * @ASIC_GOYA: Goya device. + * @ASIC_GAUDI: Gaudi device. + */ +enum hl_asic_type { + ASIC_INVALID, + ASIC_GOYA, + ASIC_GAUDI +}; + +struct hl_cs_parser; + +/** + * enum hl_pm_mng_profile - power management profile. + * @PM_AUTO: internal clock is set by the Linux driver. + * @PM_MANUAL: internal clock is set by the user. + * @PM_LAST: last power management type. + */ +enum hl_pm_mng_profile { + PM_AUTO = 1, + PM_MANUAL, + PM_LAST +}; + +/** + * enum hl_pll_frequency - PLL frequency. + * @PLL_HIGH: high frequency. + * @PLL_LOW: low frequency. + * @PLL_LAST: last frequency values that were configured by the user. + */ +enum hl_pll_frequency { + PLL_HIGH = 1, + PLL_LOW, + PLL_LAST +}; + +#define PLL_REF_CLK 50 + +enum div_select_defs { + DIV_SEL_REF_CLK = 0, + DIV_SEL_PLL_CLK = 1, + DIV_SEL_DIVIDED_REF = 2, + DIV_SEL_DIVIDED_PLL = 3, +}; + +/** + * struct hl_asic_funcs - ASIC specific functions that are can be called from + * common code. + * @early_init: sets up early driver state (pre sw_init), doesn't configure H/W. + * @early_fini: tears down what was done in early_init. + * @late_init: sets up late driver/hw state (post hw_init) - Optional. + * @late_fini: tears down what was done in late_init (pre hw_fini) - Optional. + * @sw_init: sets up driver state, does not configure H/W. + * @sw_fini: tears down driver state, does not configure H/W. + * @hw_init: sets up the H/W state. + * @hw_fini: tears down the H/W state. + * @halt_engines: halt engines, needed for reset sequence. This also disables + * interrupts from the device. Should be called before + * hw_fini and before CS rollback. + * @suspend: handles IP specific H/W or SW changes for suspend. + * @resume: handles IP specific H/W or SW changes for resume. + * @cb_mmap: maps a CB. + * @ring_doorbell: increment PI on a given QMAN. + * @pqe_write: Write the PQ entry to the PQ. This is ASIC-specific + * function because the PQs are located in different memory areas + * per ASIC (SRAM, DRAM, Host memory) and therefore, the method of + * writing the PQE must match the destination memory area + * properties. + * @asic_dma_alloc_coherent: Allocate coherent DMA memory by calling + * dma_alloc_coherent(). This is ASIC function because + * its implementation is not trivial when the driver + * is loaded in simulation mode (not upstreamed). + * @asic_dma_free_coherent: Free coherent DMA memory by calling + * dma_free_coherent(). This is ASIC function because + * its implementation is not trivial when the driver + * is loaded in simulation mode (not upstreamed). + * @get_int_queue_base: get the internal queue base address. + * @test_queues: run simple test on all queues for sanity check. + * @asic_dma_pool_zalloc: small DMA allocation of coherent memory from DMA pool. + * size of allocation is HL_DMA_POOL_BLK_SIZE. + * @asic_dma_pool_free: free small DMA allocation from pool. + * @cpu_accessible_dma_pool_alloc: allocate CPU PQ packet from DMA pool. + * @cpu_accessible_dma_pool_free: free CPU PQ packet from DMA pool. + * @hl_dma_unmap_sg: DMA unmap scatter-gather list. + * @cs_parser: parse Command Submission. + * @asic_dma_map_sg: DMA map scatter-gather list. + * @get_dma_desc_list_size: get number of LIN_DMA packets required for CB. + * @add_end_of_cb_packets: Add packets to the end of CB, if device requires it. + * @update_eq_ci: update event queue CI. + * @context_switch: called upon ASID context switch. + * @restore_phase_topology: clear all SOBs amd MONs. + * @debugfs_read32: debug interface for reading u32 from DRAM/SRAM. + * @debugfs_write32: debug interface for writing u32 to DRAM/SRAM. + * @add_device_attr: add ASIC specific device attributes. + * @handle_eqe: handle event queue entry (IRQ) from CPU-CP. + * @set_pll_profile: change PLL profile (manual/automatic). + * @get_events_stat: retrieve event queue entries histogram. + * @read_pte: read MMU page table entry from DRAM. + * @write_pte: write MMU page table entry to DRAM. + * @mmu_invalidate_cache: flush MMU STLB host/DRAM cache, either with soft + * (L1 only) or hard (L0 & L1) flush. + * @mmu_invalidate_cache_range: flush specific MMU STLB cache lines with + * ASID-VA-size mask. + * @send_heartbeat: send is-alive packet to CPU-CP and verify response. + * @set_clock_gating: enable/disable clock gating per engine according to + * clock gating mask in hdev + * @disable_clock_gating: disable clock gating completely + * @debug_coresight: perform certain actions on Coresight for debugging. + * @is_device_idle: return true if device is idle, false otherwise. + * @soft_reset_late_init: perform certain actions needed after soft reset. + * @hw_queues_lock: acquire H/W queues lock. + * @hw_queues_unlock: release H/W queues lock. + * @get_pci_id: retrieve PCI ID. + * @get_eeprom_data: retrieve EEPROM data from F/W. + * @send_cpu_message: send message to F/W. If the message is timedout, the + * driver will eventually reset the device. The timeout can + * be determined by the calling function or it can be 0 and + * then the timeout is the default timeout for the specific + * ASIC + * @get_hw_state: retrieve the H/W state + * @pci_bars_map: Map PCI BARs. + * @init_iatu: Initialize the iATU unit inside the PCI controller. + * @rreg: Read a register. Needed for simulator support. + * @wreg: Write a register. Needed for simulator support. + * @halt_coresight: stop the ETF and ETR traces. + * @ctx_init: context dependent initialization. + * @get_clk_rate: Retrieve the ASIC current and maximum clock rate in MHz + * @get_queue_id_for_cq: Get the H/W queue id related to the given CQ index. + * @read_device_fw_version: read the device's firmware versions that are + * contained in registers + * @load_firmware_to_device: load the firmware to the device's memory + * @load_boot_fit_to_device: load boot fit to device's memory + * @get_signal_cb_size: Get signal CB size. + * @get_wait_cb_size: Get wait CB size. + * @gen_signal_cb: Generate a signal CB. + * @gen_wait_cb: Generate a wait CB. + * @reset_sob: Reset a SOB. + * @set_dma_mask_from_fw: set the DMA mask in the driver according to the + * firmware configuration + * @get_device_time: Get the device time. + */ +struct hl_asic_funcs { + int (*early_init)(struct hl_device *hdev); + int (*early_fini)(struct hl_device *hdev); + int (*late_init)(struct hl_device *hdev); + void (*late_fini)(struct hl_device *hdev); + int (*sw_init)(struct hl_device *hdev); + int (*sw_fini)(struct hl_device *hdev); + int (*hw_init)(struct hl_device *hdev); + void (*hw_fini)(struct hl_device *hdev, bool hard_reset); + void (*halt_engines)(struct hl_device *hdev, bool hard_reset); + int (*suspend)(struct hl_device *hdev); + int (*resume)(struct hl_device *hdev); + int (*cb_mmap)(struct hl_device *hdev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size); + void (*ring_doorbell)(struct hl_device *hdev, u32 hw_queue_id, u32 pi); + void (*pqe_write)(struct hl_device *hdev, __le64 *pqe, + struct hl_bd *bd); + void* (*asic_dma_alloc_coherent)(struct hl_device *hdev, size_t size, + dma_addr_t *dma_handle, gfp_t flag); + void (*asic_dma_free_coherent)(struct hl_device *hdev, size_t size, + void *cpu_addr, dma_addr_t dma_handle); + void* (*get_int_queue_base)(struct hl_device *hdev, u32 queue_id, + dma_addr_t *dma_handle, u16 *queue_len); + int (*test_queues)(struct hl_device *hdev); + void* (*asic_dma_pool_zalloc)(struct hl_device *hdev, size_t size, + gfp_t mem_flags, dma_addr_t *dma_handle); + void (*asic_dma_pool_free)(struct hl_device *hdev, void *vaddr, + dma_addr_t dma_addr); + void* (*cpu_accessible_dma_pool_alloc)(struct hl_device *hdev, + size_t size, dma_addr_t *dma_handle); + void (*cpu_accessible_dma_pool_free)(struct hl_device *hdev, + size_t size, void *vaddr); + void (*hl_dma_unmap_sg)(struct hl_device *hdev, + struct scatterlist *sgl, int nents, + enum dma_data_direction dir); + int (*cs_parser)(struct hl_device *hdev, struct hl_cs_parser *parser); + int (*asic_dma_map_sg)(struct hl_device *hdev, + struct scatterlist *sgl, int nents, + enum dma_data_direction dir); + u32 (*get_dma_desc_list_size)(struct hl_device *hdev, + struct sg_table *sgt); + void (*add_end_of_cb_packets)(struct hl_device *hdev, + void *kernel_address, u32 len, + u64 cq_addr, u32 cq_val, u32 msix_num, + bool eb); + void (*update_eq_ci)(struct hl_device *hdev, u32 val); + int (*context_switch)(struct hl_device *hdev, u32 asid); + void (*restore_phase_topology)(struct hl_device *hdev); + int (*debugfs_read32)(struct hl_device *hdev, u64 addr, u32 *val); + int (*debugfs_write32)(struct hl_device *hdev, u64 addr, u32 val); + int (*debugfs_read64)(struct hl_device *hdev, u64 addr, u64 *val); + int (*debugfs_write64)(struct hl_device *hdev, u64 addr, u64 val); + void (*add_device_attr)(struct hl_device *hdev, + struct attribute_group *dev_attr_grp); + void (*handle_eqe)(struct hl_device *hdev, + struct hl_eq_entry *eq_entry); + void (*set_pll_profile)(struct hl_device *hdev, + enum hl_pll_frequency freq); + void* (*get_events_stat)(struct hl_device *hdev, bool aggregate, + u32 *size); + u64 (*read_pte)(struct hl_device *hdev, u64 addr); + void (*write_pte)(struct hl_device *hdev, u64 addr, u64 val); + int (*mmu_invalidate_cache)(struct hl_device *hdev, bool is_hard, + u32 flags); + int (*mmu_invalidate_cache_range)(struct hl_device *hdev, bool is_hard, + u32 asid, u64 va, u64 size); + int (*send_heartbeat)(struct hl_device *hdev); + void (*set_clock_gating)(struct hl_device *hdev); + void (*disable_clock_gating)(struct hl_device *hdev); + int (*debug_coresight)(struct hl_device *hdev, void *data); + bool (*is_device_idle)(struct hl_device *hdev, u64 *mask, + struct seq_file *s); + int (*soft_reset_late_init)(struct hl_device *hdev); + void (*hw_queues_lock)(struct hl_device *hdev); + void (*hw_queues_unlock)(struct hl_device *hdev); + u32 (*get_pci_id)(struct hl_device *hdev); + int (*get_eeprom_data)(struct hl_device *hdev, void *data, + size_t max_size); + int (*send_cpu_message)(struct hl_device *hdev, u32 *msg, + u16 len, u32 timeout, long *result); + enum hl_device_hw_state (*get_hw_state)(struct hl_device *hdev); + int (*pci_bars_map)(struct hl_device *hdev); + int (*init_iatu)(struct hl_device *hdev); + u32 (*rreg)(struct hl_device *hdev, u32 reg); + void (*wreg)(struct hl_device *hdev, u32 reg, u32 val); + void (*halt_coresight)(struct hl_device *hdev); + int (*ctx_init)(struct hl_ctx *ctx); + int (*get_clk_rate)(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk); + u32 (*get_queue_id_for_cq)(struct hl_device *hdev, u32 cq_idx); + void (*read_device_fw_version)(struct hl_device *hdev, + enum hl_fw_component fwc); + int (*load_firmware_to_device)(struct hl_device *hdev); + int (*load_boot_fit_to_device)(struct hl_device *hdev); + u32 (*get_signal_cb_size)(struct hl_device *hdev); + u32 (*get_wait_cb_size)(struct hl_device *hdev); + void (*gen_signal_cb)(struct hl_device *hdev, void *data, u16 sob_id); + void (*gen_wait_cb)(struct hl_device *hdev, void *data, u16 sob_id, + u16 sob_val, u16 mon_id, u32 q_idx); + void (*reset_sob)(struct hl_device *hdev, void *data); + void (*set_dma_mask_from_fw)(struct hl_device *hdev); + u64 (*get_device_time)(struct hl_device *hdev); +}; + + +/* + * CONTEXTS + */ + +#define HL_KERNEL_ASID_ID 0 + +/** + * struct hl_va_range - virtual addresses range. + * @lock: protects the virtual addresses list. + * @list: list of virtual addresses blocks available for mappings. + * @start_addr: range start address. + * @end_addr: range end address. + */ +struct hl_va_range { + struct mutex lock; + struct list_head list; + u64 start_addr; + u64 end_addr; +}; + +/** + * struct hl_ctx - user/kernel context. + * @mem_hash: holds mapping from virtual address to virtual memory area + * descriptor (hl_vm_phys_pg_list or hl_userptr). + * @mmu_shadow_hash: holds a mapping from shadow address to pgt_info structure. + * @hpriv: pointer to the private (Kernel Driver) data of the process (fd). + * @hdev: pointer to the device structure. + * @refcount: reference counter for the context. Context is released only when + * this hits 0l. It is incremented on CS and CS_WAIT. + * @cs_pending: array of hl fence objects representing pending CS. + * @host_va_range: holds available virtual addresses for host mappings. + * @host_huge_va_range: holds available virtual addresses for host mappings + * with huge pages. + * @dram_va_range: holds available virtual addresses for DRAM mappings. + * @mem_hash_lock: protects the mem_hash. + * @mmu_lock: protects the MMU page tables. Any change to the PGT, modifying the + * MMU hash or walking the PGT requires talking this lock. + * @debugfs_list: node in debugfs list of contexts. + * @cb_va_pool: device VA pool for command buffers which are mapped to the + * device's MMU. + * @cs_sequence: sequence number for CS. Value is assigned to a CS and passed + * to user so user could inquire about CS. It is used as + * index to cs_pending array. + * @dram_default_hops: array that holds all hops addresses needed for default + * DRAM mapping. + * @cs_lock: spinlock to protect cs_sequence. + * @dram_phys_mem: amount of used physical DRAM memory by this context. + * @thread_ctx_switch_token: token to prevent multiple threads of the same + * context from running the context switch phase. + * Only a single thread should run it. + * @thread_ctx_switch_wait_token: token to prevent the threads that didn't run + * the context switch phase from moving to their + * execution phase before the context switch phase + * has finished. + * @asid: context's unique address space ID in the device's MMU. + * @handle: context's opaque handle for user + */ +struct hl_ctx { + DECLARE_HASHTABLE(mem_hash, MEM_HASH_TABLE_BITS); + DECLARE_HASHTABLE(mmu_shadow_hash, MMU_HASH_TABLE_BITS); + struct hl_fpriv *hpriv; + struct hl_device *hdev; + struct kref refcount; + struct hl_fence **cs_pending; + struct hl_va_range *host_va_range; + struct hl_va_range *host_huge_va_range; + struct hl_va_range *dram_va_range; + struct mutex mem_hash_lock; + struct mutex mmu_lock; + struct list_head debugfs_list; + struct hl_cs_counters cs_counters; + struct gen_pool *cb_va_pool; + u64 cs_sequence; + u64 *dram_default_hops; + spinlock_t cs_lock; + atomic64_t dram_phys_mem; + atomic_t thread_ctx_switch_token; + u32 thread_ctx_switch_wait_token; + u32 asid; + u32 handle; +}; + +/** + * struct hl_ctx_mgr - for handling multiple contexts. + * @ctx_lock: protects ctx_handles. + * @ctx_handles: idr to hold all ctx handles. + */ +struct hl_ctx_mgr { + struct mutex ctx_lock; + struct idr ctx_handles; +}; + + + +/* + * COMMAND SUBMISSIONS + */ + +/** + * struct hl_userptr - memory mapping chunk information + * @vm_type: type of the VM. + * @job_node: linked-list node for hanging the object on the Job's list. + * @vec: pointer to the frame vector. + * @sgt: pointer to the scatter-gather table that holds the pages. + * @dir: for DMA unmapping, the direction must be supplied, so save it. + * @debugfs_list: node in debugfs list of command submissions. + * @addr: user-space virtual address of the start of the memory area. + * @size: size of the memory area to pin & map. + * @dma_mapped: true if the SG was mapped to DMA addresses, false otherwise. + */ +struct hl_userptr { + enum vm_type_t vm_type; /* must be first */ + struct list_head job_node; + struct frame_vector *vec; + struct sg_table *sgt; + enum dma_data_direction dir; + struct list_head debugfs_list; + u64 addr; + u32 size; + u8 dma_mapped; +}; + +/** + * struct hl_cs - command submission. + * @jobs_in_queue_cnt: per each queue, maintain counter of submitted jobs. + * @ctx: the context this CS belongs to. + * @job_list: list of the CS's jobs in the various queues. + * @job_lock: spinlock for the CS's jobs list. Needed for free_job. + * @refcount: reference counter for usage of the CS. + * @fence: pointer to the fence object of this CS. + * @signal_fence: pointer to the fence object of the signal CS (used by wait + * CS only). + * @finish_work: workqueue object to run when CS is completed by H/W. + * @work_tdr: delayed work node for TDR. + * @mirror_node : node in device mirror list of command submissions. + * @debugfs_list: node in debugfs list of command submissions. + * @sequence: the sequence number of this CS. + * @type: CS_TYPE_*. + * @submitted: true if CS was submitted to H/W. + * @completed: true if CS was completed by device. + * @timedout : true if CS was timedout. + * @tdr_active: true if TDR was activated for this CS (to prevent + * double TDR activation). + * @aborted: true if CS was aborted due to some device error. + */ +struct hl_cs { + u16 *jobs_in_queue_cnt; + struct hl_ctx *ctx; + struct list_head job_list; + spinlock_t job_lock; + struct kref refcount; + struct hl_fence *fence; + struct hl_fence *signal_fence; + struct work_struct finish_work; + struct delayed_work work_tdr; + struct list_head mirror_node; + struct list_head debugfs_list; + u64 sequence; + enum hl_cs_type type; + u8 submitted; + u8 completed; + u8 timedout; + u8 tdr_active; + u8 aborted; +}; + +/** + * struct hl_cs_job - command submission job. + * @cs_node: the node to hang on the CS jobs list. + * @cs: the CS this job belongs to. + * @user_cb: the CB we got from the user. + * @patched_cb: in case of patching, this is internal CB which is submitted on + * the queue instead of the CB we got from the IOCTL. + * @finish_work: workqueue object to run when job is completed. + * @userptr_list: linked-list of userptr mappings that belong to this job and + * wait for completion. + * @debugfs_list: node in debugfs list of command submission jobs. + * @queue_type: the type of the H/W queue this job is submitted to. + * @id: the id of this job inside a CS. + * @hw_queue_id: the id of the H/W queue this job is submitted to. + * @user_cb_size: the actual size of the CB we got from the user. + * @job_cb_size: the actual size of the CB that we put on the queue. + * @is_kernel_allocated_cb: true if the CB handle we got from the user holds a + * handle to a kernel-allocated CB object, false + * otherwise (SRAM/DRAM/host address). + * @contains_dma_pkt: whether the JOB contains at least one DMA packet. This + * info is needed later, when adding the 2xMSG_PROT at the + * end of the JOB, to know which barriers to put in the + * MSG_PROT packets. Relevant only for GAUDI as GOYA doesn't + * have streams so the engine can't be busy by another + * stream. + */ +struct hl_cs_job { + struct list_head cs_node; + struct hl_cs *cs; + struct hl_cb *user_cb; + struct hl_cb *patched_cb; + struct work_struct finish_work; + struct list_head userptr_list; + struct list_head debugfs_list; + enum hl_queue_type queue_type; + u32 id; + u32 hw_queue_id; + u32 user_cb_size; + u32 job_cb_size; + u8 is_kernel_allocated_cb; + u8 contains_dma_pkt; +}; + +/** + * struct hl_cs_parser - command submission parser properties. + * @user_cb: the CB we got from the user. + * @patched_cb: in case of patching, this is internal CB which is submitted on + * the queue instead of the CB we got from the IOCTL. + * @job_userptr_list: linked-list of userptr mappings that belong to the related + * job and wait for completion. + * @cs_sequence: the sequence number of the related CS. + * @queue_type: the type of the H/W queue this job is submitted to. + * @ctx_id: the ID of the context the related CS belongs to. + * @hw_queue_id: the id of the H/W queue this job is submitted to. + * @user_cb_size: the actual size of the CB we got from the user. + * @patched_cb_size: the size of the CB after parsing. + * @job_id: the id of the related job inside the related CS. + * @is_kernel_allocated_cb: true if the CB handle we got from the user holds a + * handle to a kernel-allocated CB object, false + * otherwise (SRAM/DRAM/host address). + * @contains_dma_pkt: whether the JOB contains at least one DMA packet. This + * info is needed later, when adding the 2xMSG_PROT at the + * end of the JOB, to know which barriers to put in the + * MSG_PROT packets. Relevant only for GAUDI as GOYA doesn't + * have streams so the engine can't be busy by another + * stream. + */ +struct hl_cs_parser { + struct hl_cb *user_cb; + struct hl_cb *patched_cb; + struct list_head *job_userptr_list; + u64 cs_sequence; + enum hl_queue_type queue_type; + u32 ctx_id; + u32 hw_queue_id; + u32 user_cb_size; + u32 patched_cb_size; + u8 job_id; + u8 is_kernel_allocated_cb; + u8 contains_dma_pkt; +}; + + +/* + * MEMORY STRUCTURE + */ + +/** + * struct hl_vm_hash_node - hash element from virtual address to virtual + * memory area descriptor (hl_vm_phys_pg_list or + * hl_userptr). + * @node: node to hang on the hash table in context object. + * @vaddr: key virtual address. + * @ptr: value pointer (hl_vm_phys_pg_list or hl_userptr). + */ +struct hl_vm_hash_node { + struct hlist_node node; + u64 vaddr; + void *ptr; +}; + +/** + * struct hl_vm_phys_pg_pack - physical page pack. + * @vm_type: describes the type of the virtual area descriptor. + * @pages: the physical page array. + * @npages: num physical pages in the pack. + * @total_size: total size of all the pages in this list. + * @mapping_cnt: number of shared mappings. + * @asid: the context related to this list. + * @page_size: size of each page in the pack. + * @flags: HL_MEM_* flags related to this list. + * @handle: the provided handle related to this list. + * @offset: offset from the first page. + * @contiguous: is contiguous physical memory. + * @created_from_userptr: is product of host virtual address. + */ +struct hl_vm_phys_pg_pack { + enum vm_type_t vm_type; /* must be first */ + u64 *pages; + u64 npages; + u64 total_size; + atomic_t mapping_cnt; + u32 asid; + u32 page_size; + u32 flags; + u32 handle; + u32 offset; + u8 contiguous; + u8 created_from_userptr; +}; + +/** + * struct hl_vm_va_block - virtual range block information. + * @node: node to hang on the virtual range list in context object. + * @start: virtual range start address. + * @end: virtual range end address. + * @size: virtual range size. + */ +struct hl_vm_va_block { + struct list_head node; + u64 start; + u64 end; + u64 size; +}; + +/** + * struct hl_vm - virtual memory manager for MMU. + * @dram_pg_pool: pool for DRAM physical pages of 2MB. + * @dram_pg_pool_refcount: reference counter for the pool usage. + * @idr_lock: protects the phys_pg_list_handles. + * @phys_pg_pack_handles: idr to hold all device allocations handles. + * @init_done: whether initialization was done. We need this because VM + * initialization might be skipped during device initialization. + */ +struct hl_vm { + struct gen_pool *dram_pg_pool; + struct kref dram_pg_pool_refcount; + spinlock_t idr_lock; + struct idr phys_pg_pack_handles; + u8 init_done; +}; + + +/* + * DEBUG, PROFILING STRUCTURE + */ + +/** + * struct hl_debug_params - Coresight debug parameters. + * @input: pointer to component specific input parameters. + * @output: pointer to component specific output parameters. + * @output_size: size of output buffer. + * @reg_idx: relevant register ID. + * @op: component operation to execute. + * @enable: true if to enable component debugging, false otherwise. + */ +struct hl_debug_params { + void *input; + void *output; + u32 output_size; + u32 reg_idx; + u32 op; + bool enable; +}; + +/* + * FILE PRIVATE STRUCTURE + */ + +/** + * struct hl_fpriv - process information stored in FD private data. + * @hdev: habanalabs device structure. + * @filp: pointer to the given file structure. + * @taskpid: current process ID. + * @ctx: current executing context. TODO: remove for multiple ctx per process + * @ctx_mgr: context manager to handle multiple context for this FD. + * @cb_mgr: command buffer manager to handle multiple buffers for this FD. + * @debugfs_list: list of relevant ASIC debugfs. + * @dev_node: node in the device list of file private data + * @refcount: number of related contexts. + * @restore_phase_mutex: lock for context switch and restore phase. + * @is_control: true for control device, false otherwise + */ +struct hl_fpriv { + struct hl_device *hdev; + struct file *filp; + struct pid *taskpid; + struct hl_ctx *ctx; + struct hl_ctx_mgr ctx_mgr; + struct hl_cb_mgr cb_mgr; + struct list_head debugfs_list; + struct list_head dev_node; + struct kref refcount; + struct mutex restore_phase_mutex; + u8 is_control; +}; + + +/* + * DebugFS + */ + +/** + * struct hl_info_list - debugfs file ops. + * @name: file name. + * @show: function to output information. + * @write: function to write to the file. + */ +struct hl_info_list { + const char *name; + int (*show)(struct seq_file *s, void *data); + ssize_t (*write)(struct file *file, const char __user *buf, + size_t count, loff_t *f_pos); +}; + +/** + * struct hl_debugfs_entry - debugfs dentry wrapper. + * @dent: base debugfs entry structure. + * @info_ent: dentry realted ops. + * @dev_entry: ASIC specific debugfs manager. + */ +struct hl_debugfs_entry { + struct dentry *dent; + const struct hl_info_list *info_ent; + struct hl_dbg_device_entry *dev_entry; +}; + +/** + * struct hl_dbg_device_entry - ASIC specific debugfs manager. + * @root: root dentry. + * @hdev: habanalabs device structure. + * @entry_arr: array of available hl_debugfs_entry. + * @file_list: list of available debugfs files. + * @file_mutex: protects file_list. + * @cb_list: list of available CBs. + * @cb_spinlock: protects cb_list. + * @cs_list: list of available CSs. + * @cs_spinlock: protects cs_list. + * @cs_job_list: list of available CB jobs. + * @cs_job_spinlock: protects cs_job_list. + * @userptr_list: list of available userptrs (virtual memory chunk descriptor). + * @userptr_spinlock: protects userptr_list. + * @ctx_mem_hash_list: list of available contexts with MMU mappings. + * @ctx_mem_hash_spinlock: protects cb_list. + * @addr: next address to read/write from/to in read/write32. + * @mmu_addr: next virtual address to translate to physical address in mmu_show. + * @mmu_asid: ASID to use while translating in mmu_show. + * @i2c_bus: generic u8 debugfs file for bus value to use in i2c_data_read. + * @i2c_bus: generic u8 debugfs file for address value to use in i2c_data_read. + * @i2c_bus: generic u8 debugfs file for register value to use in i2c_data_read. + */ +struct hl_dbg_device_entry { + struct dentry *root; + struct hl_device *hdev; + struct hl_debugfs_entry *entry_arr; + struct list_head file_list; + struct mutex file_mutex; + struct list_head cb_list; + spinlock_t cb_spinlock; + struct list_head cs_list; + spinlock_t cs_spinlock; + struct list_head cs_job_list; + spinlock_t cs_job_spinlock; + struct list_head userptr_list; + spinlock_t userptr_spinlock; + struct list_head ctx_mem_hash_list; + spinlock_t ctx_mem_hash_spinlock; + u64 addr; + u64 mmu_addr; + u32 mmu_asid; + u8 i2c_bus; + u8 i2c_addr; + u8 i2c_reg; +}; + + +/* + * DEVICES + */ + +/* Theoretical limit only. A single host can only contain up to 4 or 8 PCIe + * x16 cards. In extreme cases, there are hosts that can accommodate 16 cards. + */ +#define HL_MAX_MINORS 256 + +/* + * Registers read & write functions. + */ + +u32 hl_rreg(struct hl_device *hdev, u32 reg); +void hl_wreg(struct hl_device *hdev, u32 reg, u32 val); + +#define RREG32(reg) hdev->asic_funcs->rreg(hdev, (reg)) +#define WREG32(reg, v) hdev->asic_funcs->wreg(hdev, (reg), (v)) +#define DREG32(reg) pr_info("REGISTER: " #reg " : 0x%08X\n", \ + hdev->asic_funcs->rreg(hdev, (reg))) + +#define WREG32_P(reg, val, mask) \ + do { \ + u32 tmp_ = RREG32(reg); \ + tmp_ &= (mask); \ + tmp_ |= ((val) & ~(mask)); \ + WREG32(reg, tmp_); \ + } while (0) +#define WREG32_AND(reg, and) WREG32_P(reg, 0, and) +#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or)) + +#define RMWREG32(reg, val, mask) \ + do { \ + u32 tmp_ = RREG32(reg); \ + tmp_ &= ~(mask); \ + tmp_ |= ((val) << __ffs(mask)); \ + WREG32(reg, tmp_); \ + } while (0) + +#define RREG32_MASK(reg, mask) ((RREG32(reg) & mask) >> __ffs(mask)) + +#define REG_FIELD_SHIFT(reg, field) reg##_##field##_SHIFT +#define REG_FIELD_MASK(reg, field) reg##_##field##_MASK +#define WREG32_FIELD(reg, offset, field, val) \ + WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & \ + ~REG_FIELD_MASK(reg, field)) | \ + (val) << REG_FIELD_SHIFT(reg, field)) + +/* Timeout should be longer when working with simulator but cap the + * increased timeout to some maximum + */ +#define hl_poll_timeout(hdev, addr, val, cond, sleep_us, timeout_us) \ +({ \ + ktime_t __timeout; \ + if (hdev->pdev) \ + __timeout = ktime_add_us(ktime_get(), timeout_us); \ + else \ + __timeout = ktime_add_us(ktime_get(),\ + min((u64)(timeout_us * 10), \ + (u64) HL_SIM_MAX_TIMEOUT_US)); \ + might_sleep_if(sleep_us); \ + for (;;) { \ + (val) = RREG32(addr); \ + if (cond) \ + break; \ + if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) { \ + (val) = RREG32(addr); \ + break; \ + } \ + if (sleep_us) \ + usleep_range((sleep_us >> 2) + 1, sleep_us); \ + } \ + (cond) ? 0 : -ETIMEDOUT; \ +}) + +/* + * address in this macro points always to a memory location in the + * host's (server's) memory. That location is updated asynchronously + * either by the direct access of the device or by another core. + * + * To work both in LE and BE architectures, we need to distinguish between the + * two states (device or another core updates the memory location). Therefore, + * if mem_written_by_device is true, the host memory being polled will be + * updated directly by the device. If false, the host memory being polled will + * be updated by host CPU. Required so host knows whether or not the memory + * might need to be byte-swapped before returning value to caller. + */ +#define hl_poll_timeout_memory(hdev, addr, val, cond, sleep_us, timeout_us, \ + mem_written_by_device) \ +({ \ + ktime_t __timeout; \ + if (hdev->pdev) \ + __timeout = ktime_add_us(ktime_get(), timeout_us); \ + else \ + __timeout = ktime_add_us(ktime_get(),\ + min((u64)(timeout_us * 10), \ + (u64) HL_SIM_MAX_TIMEOUT_US)); \ + might_sleep_if(sleep_us); \ + for (;;) { \ + /* Verify we read updates done by other cores or by device */ \ + mb(); \ + (val) = *((u32 *)(addr)); \ + if (mem_written_by_device) \ + (val) = le32_to_cpu(*(__le32 *) &(val)); \ + if (cond) \ + break; \ + if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) { \ + (val) = *((u32 *)(addr)); \ + if (mem_written_by_device) \ + (val) = le32_to_cpu(*(__le32 *) &(val)); \ + break; \ + } \ + if (sleep_us) \ + usleep_range((sleep_us >> 2) + 1, sleep_us); \ + } \ + (cond) ? 0 : -ETIMEDOUT; \ +}) + +#define hl_poll_timeout_device_memory(hdev, addr, val, cond, sleep_us, \ + timeout_us) \ +({ \ + ktime_t __timeout; \ + if (hdev->pdev) \ + __timeout = ktime_add_us(ktime_get(), timeout_us); \ + else \ + __timeout = ktime_add_us(ktime_get(),\ + min((u64)(timeout_us * 10), \ + (u64) HL_SIM_MAX_TIMEOUT_US)); \ + might_sleep_if(sleep_us); \ + for (;;) { \ + (val) = readl(addr); \ + if (cond) \ + break; \ + if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) { \ + (val) = readl(addr); \ + break; \ + } \ + if (sleep_us) \ + usleep_range((sleep_us >> 2) + 1, sleep_us); \ + } \ + (cond) ? 0 : -ETIMEDOUT; \ +}) + +struct hwmon_chip_info; + +/** + * struct hl_device_reset_work - reset workqueue task wrapper. + * @reset_work: reset work to be done. + * @hdev: habanalabs device structure. + */ +struct hl_device_reset_work { + struct work_struct reset_work; + struct hl_device *hdev; +}; + +/** + * struct hl_device_idle_busy_ts - used for calculating device utilization rate. + * @idle_to_busy_ts: timestamp where device changed from idle to busy. + * @busy_to_idle_ts: timestamp where device changed from busy to idle. + */ +struct hl_device_idle_busy_ts { + ktime_t idle_to_busy_ts; + ktime_t busy_to_idle_ts; +}; + + +/** + * struct hl_mmu_priv - used for holding per-device mmu internal information. + * @mmu_pgt_pool: pool of page tables used by MMU for allocating hops. + * @mmu_shadow_hop0: shadow array of hop0 tables. + */ +struct hl_mmu_priv { + struct gen_pool *mmu_pgt_pool; + void *mmu_shadow_hop0; +}; + +/** + * struct hl_mmu_funcs - Device related MMU functions. + * @init: initialize the MMU module. + * @fini: release the MMU module. + * @ctx_init: Initialize a context for using the MMU module. + * @ctx_fini: disable a ctx from using the mmu module. + * @map: maps a virtual address to physical address for a context. + * @unmap: unmap a virtual address of a context. + * @flush: flush all writes from all cores to reach device MMU. + * @swap_out: marks all mapping of the given context as swapped out. + * @swap_in: marks all mapping of the given context as swapped in. + */ +struct hl_mmu_funcs { + int (*init)(struct hl_device *hdev); + void (*fini)(struct hl_device *hdev); + int (*ctx_init)(struct hl_ctx *ctx); + void (*ctx_fini)(struct hl_ctx *ctx); + int (*map)(struct hl_ctx *ctx, + u64 virt_addr, u64 phys_addr, u32 page_size, + bool is_dram_addr); + int (*unmap)(struct hl_ctx *ctx, + u64 virt_addr, bool is_dram_addr); + void (*flush)(struct hl_ctx *ctx); + void (*swap_out)(struct hl_ctx *ctx); + void (*swap_in)(struct hl_ctx *ctx); +}; + +/** + * struct hl_device - habanalabs device structure. + * @pdev: pointer to PCI device, can be NULL in case of simulator device. + * @pcie_bar_phys: array of available PCIe bars physical addresses. + * (required only for PCI address match mode) + * @pcie_bar: array of available PCIe bars virtual addresses. + * @rmmio: configuration area address on SRAM. + * @cdev: related char device. + * @cdev_ctrl: char device for control operations only (INFO IOCTL) + * @dev: related kernel basic device structure. + * @dev_ctrl: related kernel device structure for the control device + * @work_freq: delayed work to lower device frequency if possible. + * @work_heartbeat: delayed work for CPU-CP is-alive check. + * @asic_name: ASIC specific name. + * @asic_type: ASIC specific type. + * @completion_queue: array of hl_cq. + * @cq_wq: work queues of completion queues for executing work in process + * context. + * @eq_wq: work queue of event queue for executing work in process context. + * @kernel_ctx: Kernel driver context structure. + * @kernel_queues: array of hl_hw_queue. + * @hw_queues_mirror_list: CS mirror list for TDR. + * @hw_queues_mirror_lock: protects hw_queues_mirror_list. + * @kernel_cb_mgr: command buffer manager for creating/destroying/handling CGs. + * @event_queue: event queue for IRQ from CPU-CP. + * @dma_pool: DMA pool for small allocations. + * @cpu_accessible_dma_mem: Host <-> CPU-CP shared memory CPU address. + * @cpu_accessible_dma_address: Host <-> CPU-CP shared memory DMA address. + * @cpu_accessible_dma_pool: Host <-> CPU-CP shared memory pool. + * @asid_bitmap: holds used/available ASIDs. + * @asid_mutex: protects asid_bitmap. + * @send_cpu_message_lock: enforces only one message in Host <-> CPU-CP queue. + * @debug_lock: protects critical section of setting debug mode for device + * @asic_prop: ASIC specific immutable properties. + * @asic_funcs: ASIC specific functions. + * @asic_specific: ASIC specific information to use only from ASIC files. + * @vm: virtual memory manager for MMU. + * @mmu_cache_lock: protects MMU cache invalidation as it can serve one context. + * @hwmon_dev: H/W monitor device. + * @pm_mng_profile: current power management profile. + * @hl_chip_info: ASIC's sensors information. + * @hl_debugfs: device's debugfs manager. + * @cb_pool: list of preallocated CBs. + * @cb_pool_lock: protects the CB pool. + * @internal_cb_pool_virt_addr: internal command buffer pool virtual address. + * @internal_cb_pool_dma_addr: internal command buffer pool dma address. + * @internal_cb_pool: internal command buffer memory pool. + * @internal_cb_va_base: internal cb pool mmu virtual address base + * @fpriv_list: list of file private data structures. Each structure is created + * when a user opens the device + * @fpriv_list_lock: protects the fpriv_list + * @compute_ctx: current compute context executing. + * @idle_busy_ts_arr: array to hold time stamps of transitions from idle to busy + * and vice-versa + * @aggregated_cs_counters: aggregated cs counters among all contexts + * @mmu_priv: device-specific MMU data. + * @mmu_func: device-related MMU functions. + * @dram_used_mem: current DRAM memory consumption. + * @timeout_jiffies: device CS timeout value. + * @max_power: the max power of the device, as configured by the sysadmin. This + * value is saved so in case of hard-reset, the driver will restore + * this value and update the F/W after the re-initialization + * @clock_gating_mask: is clock gating enabled. bitmask that represents the + * different engines. See debugfs-driver-habanalabs for + * details. + * @in_reset: is device in reset flow. + * @curr_pll_profile: current PLL profile. + * @card_type: Various ASICs have several card types. This indicates the card + * type of the current device. + * @cs_active_cnt: number of active command submissions on this device (active + * means already in H/W queues) + * @major: habanalabs kernel driver major. + * @high_pll: high PLL profile frequency. + * @soft_reset_cnt: number of soft reset since the driver was loaded. + * @hard_reset_cnt: number of hard reset since the driver was loaded. + * @idle_busy_ts_idx: index of current entry in idle_busy_ts_arr + * @clk_throttling_reason: bitmask represents the current clk throttling reasons + * @id: device minor. + * @id_control: minor of the control device + * @cpu_pci_msb_addr: 50-bit extension bits for the device CPU's 40-bit + * addresses. + * @disabled: is device disabled. + * @late_init_done: is late init stage was done during initialization. + * @hwmon_initialized: is H/W monitor sensors was initialized. + * @hard_reset_pending: is there a hard reset work pending. + * @heartbeat: is heartbeat sanity check towards CPU-CP enabled. + * @reset_on_lockup: true if a reset should be done in case of stuck CS, false + * otherwise. + * @dram_supports_virtual_memory: is MMU enabled towards DRAM. + * @dram_default_page_mapping: is DRAM default page mapping enabled. + * @pmmu_huge_range: is a different virtual addresses range used for PMMU with + * huge pages. + * @init_done: is the initialization of the device done. + * @mmu_enable: is MMU enabled. + * @mmu_huge_page_opt: is MMU huge pages optimization enabled. + * @device_cpu_disabled: is the device CPU disabled (due to timeouts) + * @dma_mask: the dma mask that was set for this device + * @in_debug: is device under debug. This, together with fpriv_list, enforces + * that only a single user is configuring the debug infrastructure. + * @power9_64bit_dma_enable: true to enable 64-bit DMA mask support. Relevant + * only to POWER9 machines. + * @cdev_sysfs_created: were char devices and sysfs nodes created. + * @stop_on_err: true if engines should stop on error. + * @supports_sync_stream: is sync stream supported. + * @sync_stream_queue_idx: helper index for sync stream queues initialization. + * @supports_coresight: is CoreSight supported. + * @supports_soft_reset: is soft reset supported. + * @supports_cb_mapping: is mapping a CB to the device's MMU supported. + */ +struct hl_device { + struct pci_dev *pdev; + u64 pcie_bar_phys[HL_PCI_NUM_BARS]; + void __iomem *pcie_bar[HL_PCI_NUM_BARS]; + void __iomem *rmmio; + struct cdev cdev; + struct cdev cdev_ctrl; + struct device *dev; + struct device *dev_ctrl; + struct delayed_work work_freq; + struct delayed_work work_heartbeat; + char asic_name[32]; + enum hl_asic_type asic_type; + struct hl_cq *completion_queue; + struct workqueue_struct **cq_wq; + struct workqueue_struct *eq_wq; + struct hl_ctx *kernel_ctx; + struct hl_hw_queue *kernel_queues; + struct list_head hw_queues_mirror_list; + spinlock_t hw_queues_mirror_lock; + struct hl_cb_mgr kernel_cb_mgr; + struct hl_eq event_queue; + struct dma_pool *dma_pool; + void *cpu_accessible_dma_mem; + dma_addr_t cpu_accessible_dma_address; + struct gen_pool *cpu_accessible_dma_pool; + unsigned long *asid_bitmap; + struct mutex asid_mutex; + struct mutex send_cpu_message_lock; + struct mutex debug_lock; + struct asic_fixed_properties asic_prop; + const struct hl_asic_funcs *asic_funcs; + void *asic_specific; + struct hl_vm vm; + struct mutex mmu_cache_lock; + struct device *hwmon_dev; + enum hl_pm_mng_profile pm_mng_profile; + struct hwmon_chip_info *hl_chip_info; + + struct hl_dbg_device_entry hl_debugfs; + + struct list_head cb_pool; + spinlock_t cb_pool_lock; + + void *internal_cb_pool_virt_addr; + dma_addr_t internal_cb_pool_dma_addr; + struct gen_pool *internal_cb_pool; + u64 internal_cb_va_base; + + struct list_head fpriv_list; + struct mutex fpriv_list_lock; + + struct hl_ctx *compute_ctx; + + struct hl_device_idle_busy_ts *idle_busy_ts_arr; + + struct hl_cs_counters aggregated_cs_counters; + + struct hl_mmu_priv mmu_priv; + struct hl_mmu_funcs mmu_func; + + atomic64_t dram_used_mem; + u64 timeout_jiffies; + u64 max_power; + u64 clock_gating_mask; + atomic_t in_reset; + enum hl_pll_frequency curr_pll_profile; + enum cpucp_card_types card_type; + int cs_active_cnt; + u32 major; + u32 high_pll; + u32 soft_reset_cnt; + u32 hard_reset_cnt; + u32 idle_busy_ts_idx; + u32 clk_throttling_reason; + u16 id; + u16 id_control; + u16 cpu_pci_msb_addr; + u8 disabled; + u8 late_init_done; + u8 hwmon_initialized; + u8 hard_reset_pending; + u8 heartbeat; + u8 reset_on_lockup; + u8 dram_supports_virtual_memory; + u8 dram_default_page_mapping; + u8 pmmu_huge_range; + u8 init_done; + u8 device_cpu_disabled; + u8 dma_mask; + u8 in_debug; + u8 power9_64bit_dma_enable; + u8 cdev_sysfs_created; + u8 stop_on_err; + u8 supports_sync_stream; + u8 sync_stream_queue_idx; + u8 supports_coresight; + u8 supports_soft_reset; + u8 supports_cb_mapping; + + /* Parameters for bring-up */ + u8 mmu_enable; + u8 mmu_huge_page_opt; + u8 cpu_enable; + u8 reset_pcilink; + u8 cpu_queues_enable; + u8 fw_loading; + u8 pldm; + u8 axi_drain; + u8 sram_scrambler_enable; + u8 dram_scrambler_enable; + u8 hard_reset_on_fw_events; + u8 bmc_enable; + u8 rl_enable; +}; + + +/* + * IOCTLs + */ + +/** + * typedef hl_ioctl_t - typedef for ioctl function in the driver + * @hpriv: pointer to the FD's private data, which contains state of + * user process + * @data: pointer to the input/output arguments structure of the IOCTL + * + * Return: 0 for success, negative value for error + */ +typedef int hl_ioctl_t(struct hl_fpriv *hpriv, void *data); + +/** + * struct hl_ioctl_desc - describes an IOCTL entry of the driver. + * @cmd: the IOCTL code as created by the kernel macros. + * @func: pointer to the driver's function that should be called for this IOCTL. + */ +struct hl_ioctl_desc { + unsigned int cmd; + hl_ioctl_t *func; +}; + + +/* + * Kernel module functions that can be accessed by entire module + */ + +/** + * hl_mem_area_inside_range() - Checks whether address+size are inside a range. + * @address: The start address of the area we want to validate. + * @size: The size in bytes of the area we want to validate. + * @range_start_address: The start address of the valid range. + * @range_end_address: The end address of the valid range. + * + * Return: true if the area is inside the valid range, false otherwise. + */ +static inline bool hl_mem_area_inside_range(u64 address, u64 size, + u64 range_start_address, u64 range_end_address) +{ + u64 end_address = address + size; + + if ((address >= range_start_address) && + (end_address <= range_end_address) && + (end_address > address)) + return true; + + return false; +} + +/** + * hl_mem_area_crosses_range() - Checks whether address+size crossing a range. + * @address: The start address of the area we want to validate. + * @size: The size in bytes of the area we want to validate. + * @range_start_address: The start address of the valid range. + * @range_end_address: The end address of the valid range. + * + * Return: true if the area overlaps part or all of the valid range, + * false otherwise. + */ +static inline bool hl_mem_area_crosses_range(u64 address, u32 size, + u64 range_start_address, u64 range_end_address) +{ + u64 end_address = address + size; + + if ((address >= range_start_address) && + (address < range_end_address)) + return true; + + if ((end_address >= range_start_address) && + (end_address < range_end_address)) + return true; + + if ((address < range_start_address) && + (end_address >= range_end_address)) + return true; + + return false; +} + +int hl_device_open(struct inode *inode, struct file *filp); +int hl_device_open_ctrl(struct inode *inode, struct file *filp); +bool hl_device_disabled_or_in_reset(struct hl_device *hdev); +enum hl_device_status hl_device_status(struct hl_device *hdev); +int hl_device_set_debug_mode(struct hl_device *hdev, bool enable); +int create_hdev(struct hl_device **dev, struct pci_dev *pdev, + enum hl_asic_type asic_type, int minor); +void destroy_hdev(struct hl_device *hdev); +int hl_hw_queues_create(struct hl_device *hdev); +void hl_hw_queues_destroy(struct hl_device *hdev); +int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id, + u32 cb_size, u64 cb_ptr); +int hl_hw_queue_schedule_cs(struct hl_cs *cs); +u32 hl_hw_queue_add_ptr(u32 ptr, u16 val); +void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id); +void hl_int_hw_queue_update_ci(struct hl_cs *cs); +void hl_hw_queue_reset(struct hl_device *hdev, bool hard_reset); + +#define hl_queue_inc_ptr(p) hl_hw_queue_add_ptr(p, 1) +#define hl_pi_2_offset(pi) ((pi) & (HL_QUEUE_LENGTH - 1)) + +int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id); +void hl_cq_fini(struct hl_device *hdev, struct hl_cq *q); +int hl_eq_init(struct hl_device *hdev, struct hl_eq *q); +void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q); +void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q); +void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q); +irqreturn_t hl_irq_handler_cq(int irq, void *arg); +irqreturn_t hl_irq_handler_eq(int irq, void *arg); +u32 hl_cq_inc_ptr(u32 ptr); + +int hl_asid_init(struct hl_device *hdev); +void hl_asid_fini(struct hl_device *hdev); +unsigned long hl_asid_alloc(struct hl_device *hdev); +void hl_asid_free(struct hl_device *hdev, unsigned long asid); + +int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv); +void hl_ctx_free(struct hl_device *hdev, struct hl_ctx *ctx); +int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx); +void hl_ctx_do_release(struct kref *ref); +void hl_ctx_get(struct hl_device *hdev, struct hl_ctx *ctx); +int hl_ctx_put(struct hl_ctx *ctx); +struct hl_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq); +void hl_ctx_mgr_init(struct hl_ctx_mgr *mgr); +void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *mgr); + +int hl_device_init(struct hl_device *hdev, struct class *hclass); +void hl_device_fini(struct hl_device *hdev); +int hl_device_suspend(struct hl_device *hdev); +int hl_device_resume(struct hl_device *hdev); +int hl_device_reset(struct hl_device *hdev, bool hard_reset, + bool from_hard_reset_thread); +void hl_hpriv_get(struct hl_fpriv *hpriv); +void hl_hpriv_put(struct hl_fpriv *hpriv); +int hl_device_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq); +uint32_t hl_device_utilization(struct hl_device *hdev, uint32_t period_ms); + +int hl_build_hwmon_channel_info(struct hl_device *hdev, + struct cpucp_sensor *sensors_arr); + +int hl_sysfs_init(struct hl_device *hdev); +void hl_sysfs_fini(struct hl_device *hdev); + +int hl_hwmon_init(struct hl_device *hdev); +void hl_hwmon_fini(struct hl_device *hdev); + +int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr, + struct hl_ctx *ctx, u32 cb_size, bool internal_cb, + bool map_cb, u64 *handle); +int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle); +int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma); +struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr, + u32 handle); +void hl_cb_put(struct hl_cb *cb); +void hl_cb_mgr_init(struct hl_cb_mgr *mgr); +void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr); +struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size, + bool internal_cb); +int hl_cb_pool_init(struct hl_device *hdev); +int hl_cb_pool_fini(struct hl_device *hdev); +int hl_cb_va_pool_init(struct hl_ctx *ctx); +void hl_cb_va_pool_fini(struct hl_ctx *ctx); + +void hl_cs_rollback_all(struct hl_device *hdev); +struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev, + enum hl_queue_type queue_type, bool is_kernel_allocated_cb); +void hl_sob_reset_error(struct kref *ref); +void hl_fence_put(struct hl_fence *fence); +void hl_fence_get(struct hl_fence *fence); + +void goya_set_asic_funcs(struct hl_device *hdev); +void gaudi_set_asic_funcs(struct hl_device *hdev); + +int hl_vm_ctx_init(struct hl_ctx *ctx); +void hl_vm_ctx_fini(struct hl_ctx *ctx); + +int hl_vm_init(struct hl_device *hdev); +void hl_vm_fini(struct hl_device *hdev); + +int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size, + struct hl_userptr *userptr); +void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr); +void hl_userptr_delete_list(struct hl_device *hdev, + struct list_head *userptr_list); +bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr, u32 size, + struct list_head *userptr_list, + struct hl_userptr **userptr); + +int hl_mmu_init(struct hl_device *hdev); +void hl_mmu_fini(struct hl_device *hdev); +int hl_mmu_ctx_init(struct hl_ctx *ctx); +void hl_mmu_ctx_fini(struct hl_ctx *ctx); +int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, + u32 page_size, bool flush_pte); +int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, + bool flush_pte); +void hl_mmu_swap_out(struct hl_ctx *ctx); +void hl_mmu_swap_in(struct hl_ctx *ctx); +int hl_mmu_if_set_funcs(struct hl_device *hdev); +void hl_mmu_v1_set_funcs(struct hl_device *hdev); + +int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name, + void __iomem *dst); +int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode); +int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg, + u16 len, u32 timeout, long *result); +int hl_fw_unmask_irq(struct hl_device *hdev, u16 event_type); +int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr, + size_t irq_arr_size); +int hl_fw_test_cpu_queue(struct hl_device *hdev); +void *hl_fw_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size, + dma_addr_t *dma_handle); +void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size, + void *vaddr); +int hl_fw_send_heartbeat(struct hl_device *hdev); +int hl_fw_cpucp_info_get(struct hl_device *hdev); +int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size); +int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev, + struct hl_info_pci_counters *counters); +int hl_fw_cpucp_total_energy_get(struct hl_device *hdev, + u64 *total_energy); +int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg, + u32 msg_to_cpu_reg, u32 cpu_msg_status_reg, + u32 boot_err0_reg, bool skip_bmc, + u32 cpu_timeout, u32 boot_fit_timeout); +int hl_fw_read_preboot_ver(struct hl_device *hdev, u32 cpu_boot_status_reg, + u32 boot_err0_reg, u32 timeout); + +int hl_pci_bars_map(struct hl_device *hdev, const char * const name[3], + bool is_wc[3]); +int hl_pci_iatu_write(struct hl_device *hdev, u32 addr, u32 data); +int hl_pci_set_inbound_region(struct hl_device *hdev, u8 region, + struct hl_inbound_pci_region *pci_region); +int hl_pci_set_outbound_region(struct hl_device *hdev, + struct hl_outbound_pci_region *pci_region); +int hl_pci_init(struct hl_device *hdev, u32 cpu_boot_status_reg, + u32 boot_err0_reg, u32 preboot_ver_timeout); +void hl_pci_fini(struct hl_device *hdev); + +long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr); +void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq); +int hl_get_temperature(struct hl_device *hdev, + int sensor_index, u32 attr, long *value); +int hl_set_temperature(struct hl_device *hdev, + int sensor_index, u32 attr, long value); +int hl_get_voltage(struct hl_device *hdev, + int sensor_index, u32 attr, long *value); +int hl_get_current(struct hl_device *hdev, + int sensor_index, u32 attr, long *value); +int hl_get_fan_speed(struct hl_device *hdev, + int sensor_index, u32 attr, long *value); +int hl_get_pwm_info(struct hl_device *hdev, + int sensor_index, u32 attr, long *value); +void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr, + long value); +u64 hl_get_max_power(struct hl_device *hdev); +void hl_set_max_power(struct hl_device *hdev); +int hl_set_voltage(struct hl_device *hdev, + int sensor_index, u32 attr, long value); +int hl_set_current(struct hl_device *hdev, + int sensor_index, u32 attr, long value); + +#ifdef CONFIG_DEBUG_FS + +void hl_debugfs_init(void); +void hl_debugfs_fini(void); +void hl_debugfs_add_device(struct hl_device *hdev); +void hl_debugfs_remove_device(struct hl_device *hdev); +void hl_debugfs_add_file(struct hl_fpriv *hpriv); +void hl_debugfs_remove_file(struct hl_fpriv *hpriv); +void hl_debugfs_add_cb(struct hl_cb *cb); +void hl_debugfs_remove_cb(struct hl_cb *cb); +void hl_debugfs_add_cs(struct hl_cs *cs); +void hl_debugfs_remove_cs(struct hl_cs *cs); +void hl_debugfs_add_job(struct hl_device *hdev, struct hl_cs_job *job); +void hl_debugfs_remove_job(struct hl_device *hdev, struct hl_cs_job *job); +void hl_debugfs_add_userptr(struct hl_device *hdev, struct hl_userptr *userptr); +void hl_debugfs_remove_userptr(struct hl_device *hdev, + struct hl_userptr *userptr); +void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx); +void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx); + +#else + +static inline void __init hl_debugfs_init(void) +{ +} + +static inline void hl_debugfs_fini(void) +{ +} + +static inline void hl_debugfs_add_device(struct hl_device *hdev) +{ +} + +static inline void hl_debugfs_remove_device(struct hl_device *hdev) +{ +} + +static inline void hl_debugfs_add_file(struct hl_fpriv *hpriv) +{ +} + +static inline void hl_debugfs_remove_file(struct hl_fpriv *hpriv) +{ +} + +static inline void hl_debugfs_add_cb(struct hl_cb *cb) +{ +} + +static inline void hl_debugfs_remove_cb(struct hl_cb *cb) +{ +} + +static inline void hl_debugfs_add_cs(struct hl_cs *cs) +{ +} + +static inline void hl_debugfs_remove_cs(struct hl_cs *cs) +{ +} + +static inline void hl_debugfs_add_job(struct hl_device *hdev, + struct hl_cs_job *job) +{ +} + +static inline void hl_debugfs_remove_job(struct hl_device *hdev, + struct hl_cs_job *job) +{ +} + +static inline void hl_debugfs_add_userptr(struct hl_device *hdev, + struct hl_userptr *userptr) +{ +} + +static inline void hl_debugfs_remove_userptr(struct hl_device *hdev, + struct hl_userptr *userptr) +{ +} + +static inline void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev, + struct hl_ctx *ctx) +{ +} + +static inline void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, + struct hl_ctx *ctx) +{ +} + +#endif + +/* IOCTLs */ +long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); +long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg); +int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data); +int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data); +int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data); +int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data); + +#endif /* HABANALABSP_H_ */ diff --git a/drivers/misc/habanalabs/common/habanalabs_drv.c b/drivers/misc/habanalabs/common/habanalabs_drv.c new file mode 100644 index 000000000..ded92b3cb --- /dev/null +++ b/drivers/misc/habanalabs/common/habanalabs_drv.c @@ -0,0 +1,605 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#define pr_fmt(fmt) "habanalabs: " fmt + +#include "habanalabs.h" + +#include <linux/pci.h> +#include <linux/aer.h> +#include <linux/module.h> + +#define HL_DRIVER_AUTHOR "HabanaLabs Kernel Driver Team" + +#define HL_DRIVER_DESC "Driver for HabanaLabs's AI Accelerators" + +MODULE_AUTHOR(HL_DRIVER_AUTHOR); +MODULE_DESCRIPTION(HL_DRIVER_DESC); +MODULE_LICENSE("GPL v2"); + +static int hl_major; +static struct class *hl_class; +static DEFINE_IDR(hl_devs_idr); +static DEFINE_MUTEX(hl_devs_idr_lock); + +static int timeout_locked = 5; +static int reset_on_lockup = 1; + +module_param(timeout_locked, int, 0444); +MODULE_PARM_DESC(timeout_locked, + "Device lockup timeout in seconds (0 = disabled, default 5s)"); + +module_param(reset_on_lockup, int, 0444); +MODULE_PARM_DESC(reset_on_lockup, + "Do device reset on lockup (0 = no, 1 = yes, default yes)"); + +#define PCI_VENDOR_ID_HABANALABS 0x1da3 + +#define PCI_IDS_GOYA 0x0001 +#define PCI_IDS_GAUDI 0x1000 + +static const struct pci_device_id ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_HABANALABS, PCI_IDS_GOYA), }, + { PCI_DEVICE(PCI_VENDOR_ID_HABANALABS, PCI_IDS_GAUDI), }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, ids); + +/* + * get_asic_type - translate device id to asic type + * + * @device: id of the PCI device + * + * Translate device id to asic type. + * In case of unidentified device, return -1 + */ +static enum hl_asic_type get_asic_type(u16 device) +{ + enum hl_asic_type asic_type; + + switch (device) { + case PCI_IDS_GOYA: + asic_type = ASIC_GOYA; + break; + case PCI_IDS_GAUDI: + asic_type = ASIC_GAUDI; + break; + default: + asic_type = ASIC_INVALID; + break; + } + + return asic_type; +} + +/* + * hl_device_open - open function for habanalabs device + * + * @inode: pointer to inode structure + * @filp: pointer to file structure + * + * Called when process opens an habanalabs device. + */ +int hl_device_open(struct inode *inode, struct file *filp) +{ + struct hl_device *hdev; + struct hl_fpriv *hpriv; + int rc; + + mutex_lock(&hl_devs_idr_lock); + hdev = idr_find(&hl_devs_idr, iminor(inode)); + mutex_unlock(&hl_devs_idr_lock); + + if (!hdev) { + pr_err("Couldn't find device %d:%d\n", + imajor(inode), iminor(inode)); + return -ENXIO; + } + + hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL); + if (!hpriv) + return -ENOMEM; + + hpriv->hdev = hdev; + filp->private_data = hpriv; + hpriv->filp = filp; + mutex_init(&hpriv->restore_phase_mutex); + kref_init(&hpriv->refcount); + nonseekable_open(inode, filp); + + hl_cb_mgr_init(&hpriv->cb_mgr); + hl_ctx_mgr_init(&hpriv->ctx_mgr); + + hpriv->taskpid = find_get_pid(current->pid); + + mutex_lock(&hdev->fpriv_list_lock); + + if (hl_device_disabled_or_in_reset(hdev)) { + dev_err_ratelimited(hdev->dev, + "Can't open %s because it is disabled or in reset\n", + dev_name(hdev->dev)); + rc = -EPERM; + goto out_err; + } + + if (hdev->in_debug) { + dev_err_ratelimited(hdev->dev, + "Can't open %s because it is being debugged by another user\n", + dev_name(hdev->dev)); + rc = -EPERM; + goto out_err; + } + + if (hdev->compute_ctx) { + dev_dbg_ratelimited(hdev->dev, + "Can't open %s because another user is working on it\n", + dev_name(hdev->dev)); + rc = -EBUSY; + goto out_err; + } + + rc = hl_ctx_create(hdev, hpriv); + if (rc) { + dev_err(hdev->dev, "Failed to create context %d\n", rc); + goto out_err; + } + + /* Device is IDLE at this point so it is legal to change PLLs. + * There is no need to check anything because if the PLL is + * already HIGH, the set function will return without doing + * anything + */ + hl_device_set_frequency(hdev, PLL_HIGH); + + list_add(&hpriv->dev_node, &hdev->fpriv_list); + mutex_unlock(&hdev->fpriv_list_lock); + + hl_debugfs_add_file(hpriv); + + return 0; + +out_err: + mutex_unlock(&hdev->fpriv_list_lock); + + hl_cb_mgr_fini(hpriv->hdev, &hpriv->cb_mgr); + hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr); + filp->private_data = NULL; + mutex_destroy(&hpriv->restore_phase_mutex); + put_pid(hpriv->taskpid); + + kfree(hpriv); + + return rc; +} + +int hl_device_open_ctrl(struct inode *inode, struct file *filp) +{ + struct hl_device *hdev; + struct hl_fpriv *hpriv; + int rc; + + mutex_lock(&hl_devs_idr_lock); + hdev = idr_find(&hl_devs_idr, iminor(inode)); + mutex_unlock(&hl_devs_idr_lock); + + if (!hdev) { + pr_err("Couldn't find device %d:%d\n", + imajor(inode), iminor(inode)); + return -ENXIO; + } + + hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL); + if (!hpriv) + return -ENOMEM; + + mutex_lock(&hdev->fpriv_list_lock); + + if (hl_device_disabled_or_in_reset(hdev)) { + dev_err_ratelimited(hdev->dev_ctrl, + "Can't open %s because it is disabled or in reset\n", + dev_name(hdev->dev_ctrl)); + rc = -EPERM; + goto out_err; + } + + list_add(&hpriv->dev_node, &hdev->fpriv_list); + mutex_unlock(&hdev->fpriv_list_lock); + + hpriv->hdev = hdev; + filp->private_data = hpriv; + hpriv->filp = filp; + hpriv->is_control = true; + nonseekable_open(inode, filp); + + hpriv->taskpid = find_get_pid(current->pid); + + return 0; + +out_err: + mutex_unlock(&hdev->fpriv_list_lock); + kfree(hpriv); + return rc; +} + +static void set_driver_behavior_per_device(struct hl_device *hdev) +{ + hdev->mmu_enable = 1; + hdev->cpu_enable = 1; + hdev->fw_loading = 1; + hdev->cpu_queues_enable = 1; + hdev->heartbeat = 1; + hdev->clock_gating_mask = ULONG_MAX; + + hdev->reset_pcilink = 0; + hdev->axi_drain = 0; + hdev->sram_scrambler_enable = 1; + hdev->dram_scrambler_enable = 1; + hdev->bmc_enable = 1; + hdev->hard_reset_on_fw_events = 1; +} + +/* + * create_hdev - create habanalabs device instance + * + * @dev: will hold the pointer to the new habanalabs device structure + * @pdev: pointer to the pci device + * @asic_type: in case of simulator device, which device is it + * @minor: in case of simulator device, the minor of the device + * + * Allocate memory for habanalabs device and initialize basic fields + * Identify the ASIC type + * Allocate ID (minor) for the device (only for real devices) + */ +int create_hdev(struct hl_device **dev, struct pci_dev *pdev, + enum hl_asic_type asic_type, int minor) +{ + struct hl_device *hdev; + int rc, main_id, ctrl_id = 0; + + *dev = NULL; + + hdev = kzalloc(sizeof(*hdev), GFP_KERNEL); + if (!hdev) + return -ENOMEM; + + /* First, we must find out which ASIC are we handling. This is needed + * to configure the behavior of the driver (kernel parameters) + */ + if (pdev) { + hdev->asic_type = get_asic_type(pdev->device); + if (hdev->asic_type == ASIC_INVALID) { + dev_err(&pdev->dev, "Unsupported ASIC\n"); + rc = -ENODEV; + goto free_hdev; + } + } else { + hdev->asic_type = asic_type; + } + + hdev->major = hl_major; + hdev->reset_on_lockup = reset_on_lockup; + hdev->pldm = 0; + + set_driver_behavior_per_device(hdev); + + if (timeout_locked) + hdev->timeout_jiffies = msecs_to_jiffies(timeout_locked * 1000); + else + hdev->timeout_jiffies = MAX_SCHEDULE_TIMEOUT; + + hdev->disabled = true; + hdev->pdev = pdev; /* can be NULL in case of simulator device */ + + /* Set default DMA mask to 32 bits */ + hdev->dma_mask = 32; + + mutex_lock(&hl_devs_idr_lock); + + /* Always save 2 numbers, 1 for main device and 1 for control. + * They must be consecutive + */ + main_id = idr_alloc(&hl_devs_idr, hdev, 0, HL_MAX_MINORS, + GFP_KERNEL); + + if (main_id >= 0) + ctrl_id = idr_alloc(&hl_devs_idr, hdev, main_id + 1, + main_id + 2, GFP_KERNEL); + + mutex_unlock(&hl_devs_idr_lock); + + if ((main_id < 0) || (ctrl_id < 0)) { + if ((main_id == -ENOSPC) || (ctrl_id == -ENOSPC)) + pr_err("too many devices in the system\n"); + + if (main_id >= 0) { + mutex_lock(&hl_devs_idr_lock); + idr_remove(&hl_devs_idr, main_id); + mutex_unlock(&hl_devs_idr_lock); + } + + rc = -EBUSY; + goto free_hdev; + } + + hdev->id = main_id; + hdev->id_control = ctrl_id; + + *dev = hdev; + + return 0; + +free_hdev: + kfree(hdev); + return rc; +} + +/* + * destroy_hdev - destroy habanalabs device instance + * + * @dev: pointer to the habanalabs device structure + * + */ +void destroy_hdev(struct hl_device *hdev) +{ + /* Remove device from the device list */ + mutex_lock(&hl_devs_idr_lock); + idr_remove(&hl_devs_idr, hdev->id); + idr_remove(&hl_devs_idr, hdev->id_control); + mutex_unlock(&hl_devs_idr_lock); + + kfree(hdev); +} + +static int hl_pmops_suspend(struct device *dev) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + + pr_debug("Going to suspend PCI device\n"); + + if (!hdev) { + pr_err("device pointer is NULL in suspend\n"); + return 0; + } + + return hl_device_suspend(hdev); +} + +static int hl_pmops_resume(struct device *dev) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + + pr_debug("Going to resume PCI device\n"); + + if (!hdev) { + pr_err("device pointer is NULL in resume\n"); + return 0; + } + + return hl_device_resume(hdev); +} + +/* + * hl_pci_probe - probe PCI habanalabs devices + * + * @pdev: pointer to pci device + * @id: pointer to pci device id structure + * + * Standard PCI probe function for habanalabs device. + * Create a new habanalabs device and initialize it according to the + * device's type + */ +static int hl_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct hl_device *hdev; + int rc; + + dev_info(&pdev->dev, HL_NAME + " device found [%04x:%04x] (rev %x)\n", + (int)pdev->vendor, (int)pdev->device, (int)pdev->revision); + + rc = create_hdev(&hdev, pdev, ASIC_INVALID, -1); + if (rc) + return rc; + + pci_set_drvdata(pdev, hdev); + + pci_enable_pcie_error_reporting(pdev); + + rc = hl_device_init(hdev, hl_class); + if (rc) { + dev_err(&pdev->dev, "Fatal error during habanalabs device init\n"); + rc = -ENODEV; + goto disable_device; + } + + return 0; + +disable_device: + pci_disable_pcie_error_reporting(pdev); + pci_set_drvdata(pdev, NULL); + destroy_hdev(hdev); + + return rc; +} + +/* + * hl_pci_remove - remove PCI habanalabs devices + * + * @pdev: pointer to pci device + * + * Standard PCI remove function for habanalabs device + */ +static void hl_pci_remove(struct pci_dev *pdev) +{ + struct hl_device *hdev; + + hdev = pci_get_drvdata(pdev); + if (!hdev) + return; + + hl_device_fini(hdev); + pci_disable_pcie_error_reporting(pdev); + pci_set_drvdata(pdev, NULL); + destroy_hdev(hdev); +} + +/** + * hl_pci_err_detected - a PCI bus error detected on this device + * + * @pdev: pointer to pci device + * @state: PCI error type + * + * Called by the PCI subsystem whenever a non-correctable + * PCI bus error is detected + */ +static pci_ers_result_t +hl_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t state) +{ + struct hl_device *hdev = pci_get_drvdata(pdev); + enum pci_ers_result result; + + switch (state) { + case pci_channel_io_normal: + return PCI_ERS_RESULT_CAN_RECOVER; + + case pci_channel_io_frozen: + dev_warn(hdev->dev, "frozen state error detected\n"); + result = PCI_ERS_RESULT_NEED_RESET; + break; + + case pci_channel_io_perm_failure: + dev_warn(hdev->dev, "failure state error detected\n"); + result = PCI_ERS_RESULT_DISCONNECT; + break; + + default: + result = PCI_ERS_RESULT_NONE; + } + + hdev->asic_funcs->halt_engines(hdev, true); + + return result; +} + +/** + * hl_pci_err_resume - resume after a PCI slot reset + * + * @pdev: pointer to pci device + * + */ +static void hl_pci_err_resume(struct pci_dev *pdev) +{ + struct hl_device *hdev = pci_get_drvdata(pdev); + + dev_warn(hdev->dev, "Resuming device after PCI slot reset\n"); + hl_device_resume(hdev); +} + +/** + * hl_pci_err_slot_reset - a PCI slot reset has just happened + * + * @pdev: pointer to pci device + * + * Determine if the driver can recover from the PCI slot reset + */ +static pci_ers_result_t hl_pci_err_slot_reset(struct pci_dev *pdev) +{ + return PCI_ERS_RESULT_RECOVERED; +} + +static const struct dev_pm_ops hl_pm_ops = { + .suspend = hl_pmops_suspend, + .resume = hl_pmops_resume, +}; + +static const struct pci_error_handlers hl_pci_err_handler = { + .error_detected = hl_pci_err_detected, + .slot_reset = hl_pci_err_slot_reset, + .resume = hl_pci_err_resume, +}; + +static struct pci_driver hl_pci_driver = { + .name = HL_NAME, + .id_table = ids, + .probe = hl_pci_probe, + .remove = hl_pci_remove, + .shutdown = hl_pci_remove, + .driver.pm = &hl_pm_ops, + .err_handler = &hl_pci_err_handler, +}; + +/* + * hl_init - Initialize the habanalabs kernel driver + */ +static int __init hl_init(void) +{ + int rc; + dev_t dev; + + pr_info("loading driver\n"); + + rc = alloc_chrdev_region(&dev, 0, HL_MAX_MINORS, HL_NAME); + if (rc < 0) { + pr_err("unable to get major\n"); + return rc; + } + + hl_major = MAJOR(dev); + + hl_class = class_create(THIS_MODULE, HL_NAME); + if (IS_ERR(hl_class)) { + pr_err("failed to allocate class\n"); + rc = PTR_ERR(hl_class); + goto remove_major; + } + + hl_debugfs_init(); + + rc = pci_register_driver(&hl_pci_driver); + if (rc) { + pr_err("failed to register pci device\n"); + goto remove_debugfs; + } + + pr_debug("driver loaded\n"); + + return 0; + +remove_debugfs: + hl_debugfs_fini(); + class_destroy(hl_class); +remove_major: + unregister_chrdev_region(MKDEV(hl_major, 0), HL_MAX_MINORS); + return rc; +} + +/* + * hl_exit - Release all resources of the habanalabs kernel driver + */ +static void __exit hl_exit(void) +{ + pci_unregister_driver(&hl_pci_driver); + + /* + * Removing debugfs must be after all devices or simulator devices + * have been removed because otherwise we get a bug in the + * debugfs module for referencing NULL objects + */ + hl_debugfs_fini(); + + class_destroy(hl_class); + unregister_chrdev_region(MKDEV(hl_major, 0), HL_MAX_MINORS); + + idr_destroy(&hl_devs_idr); + + pr_debug("driver removed\n"); +} + +module_init(hl_init); +module_exit(hl_exit); diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c new file mode 100644 index 000000000..354011489 --- /dev/null +++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c @@ -0,0 +1,641 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include <uapi/misc/habanalabs.h> +#include "habanalabs.h" + +#include <linux/kernel.h> +#include <linux/fs.h> +#include <linux/uaccess.h> +#include <linux/slab.h> + +static u32 hl_debug_struct_size[HL_DEBUG_OP_TIMESTAMP + 1] = { + [HL_DEBUG_OP_ETR] = sizeof(struct hl_debug_params_etr), + [HL_DEBUG_OP_ETF] = sizeof(struct hl_debug_params_etf), + [HL_DEBUG_OP_STM] = sizeof(struct hl_debug_params_stm), + [HL_DEBUG_OP_FUNNEL] = 0, + [HL_DEBUG_OP_BMON] = sizeof(struct hl_debug_params_bmon), + [HL_DEBUG_OP_SPMU] = sizeof(struct hl_debug_params_spmu), + [HL_DEBUG_OP_TIMESTAMP] = 0 + +}; + +static int device_status_info(struct hl_device *hdev, struct hl_info_args *args) +{ + struct hl_info_device_status dev_stat = {0}; + u32 size = args->return_size; + void __user *out = (void __user *) (uintptr_t) args->return_pointer; + + if ((!size) || (!out)) + return -EINVAL; + + dev_stat.status = hl_device_status(hdev); + + return copy_to_user(out, &dev_stat, + min((size_t)size, sizeof(dev_stat))) ? -EFAULT : 0; +} + +static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args) +{ + struct hl_info_hw_ip_info hw_ip = {0}; + u32 size = args->return_size; + void __user *out = (void __user *) (uintptr_t) args->return_pointer; + struct asic_fixed_properties *prop = &hdev->asic_prop; + u64 sram_kmd_size, dram_kmd_size; + + if ((!size) || (!out)) + return -EINVAL; + + sram_kmd_size = (prop->sram_user_base_address - + prop->sram_base_address); + dram_kmd_size = (prop->dram_user_base_address - + prop->dram_base_address); + + hw_ip.device_id = hdev->asic_funcs->get_pci_id(hdev); + hw_ip.sram_base_address = prop->sram_user_base_address; + hw_ip.dram_base_address = prop->dram_user_base_address; + hw_ip.tpc_enabled_mask = prop->tpc_enabled_mask; + hw_ip.sram_size = prop->sram_size - sram_kmd_size; + hw_ip.dram_size = prop->dram_size - dram_kmd_size; + if (hw_ip.dram_size > PAGE_SIZE) + hw_ip.dram_enabled = 1; + hw_ip.num_of_events = prop->num_of_events; + + memcpy(hw_ip.cpucp_version, prop->cpucp_info.cpucp_version, + min(VERSION_MAX_LEN, HL_INFO_VERSION_MAX_LEN)); + + memcpy(hw_ip.card_name, prop->cpucp_info.card_name, + min(CARD_NAME_MAX_LEN, HL_INFO_CARD_NAME_MAX_LEN)); + + hw_ip.cpld_version = le32_to_cpu(prop->cpucp_info.cpld_version); + hw_ip.module_id = le32_to_cpu(prop->cpucp_info.card_location); + + hw_ip.psoc_pci_pll_nr = prop->psoc_pci_pll_nr; + hw_ip.psoc_pci_pll_nf = prop->psoc_pci_pll_nf; + hw_ip.psoc_pci_pll_od = prop->psoc_pci_pll_od; + hw_ip.psoc_pci_pll_div_factor = prop->psoc_pci_pll_div_factor; + + return copy_to_user(out, &hw_ip, + min((size_t)size, sizeof(hw_ip))) ? -EFAULT : 0; +} + +static int hw_events_info(struct hl_device *hdev, bool aggregate, + struct hl_info_args *args) +{ + u32 size, max_size = args->return_size; + void __user *out = (void __user *) (uintptr_t) args->return_pointer; + void *arr; + + if ((!max_size) || (!out)) + return -EINVAL; + + arr = hdev->asic_funcs->get_events_stat(hdev, aggregate, &size); + + return copy_to_user(out, arr, min(max_size, size)) ? -EFAULT : 0; +} + +static int dram_usage_info(struct hl_fpriv *hpriv, struct hl_info_args *args) +{ + struct hl_device *hdev = hpriv->hdev; + struct hl_info_dram_usage dram_usage = {0}; + u32 max_size = args->return_size; + void __user *out = (void __user *) (uintptr_t) args->return_pointer; + struct asic_fixed_properties *prop = &hdev->asic_prop; + u64 dram_kmd_size; + + if ((!max_size) || (!out)) + return -EINVAL; + + dram_kmd_size = (prop->dram_user_base_address - + prop->dram_base_address); + dram_usage.dram_free_mem = (prop->dram_size - dram_kmd_size) - + atomic64_read(&hdev->dram_used_mem); + if (hpriv->ctx) + dram_usage.ctx_dram_mem = + atomic64_read(&hpriv->ctx->dram_phys_mem); + + return copy_to_user(out, &dram_usage, + min((size_t) max_size, sizeof(dram_usage))) ? -EFAULT : 0; +} + +static int hw_idle(struct hl_device *hdev, struct hl_info_args *args) +{ + struct hl_info_hw_idle hw_idle = {0}; + u32 max_size = args->return_size; + void __user *out = (void __user *) (uintptr_t) args->return_pointer; + + if ((!max_size) || (!out)) + return -EINVAL; + + hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev, + &hw_idle.busy_engines_mask_ext, NULL); + hw_idle.busy_engines_mask = + lower_32_bits(hw_idle.busy_engines_mask_ext); + + return copy_to_user(out, &hw_idle, + min((size_t) max_size, sizeof(hw_idle))) ? -EFAULT : 0; +} + +static int debug_coresight(struct hl_device *hdev, struct hl_debug_args *args) +{ + struct hl_debug_params *params; + void *input = NULL, *output = NULL; + int rc; + + params = kzalloc(sizeof(*params), GFP_KERNEL); + if (!params) + return -ENOMEM; + + params->reg_idx = args->reg_idx; + params->enable = args->enable; + params->op = args->op; + + if (args->input_ptr && args->input_size) { + input = kzalloc(hl_debug_struct_size[args->op], GFP_KERNEL); + if (!input) { + rc = -ENOMEM; + goto out; + } + + if (copy_from_user(input, u64_to_user_ptr(args->input_ptr), + args->input_size)) { + rc = -EFAULT; + dev_err(hdev->dev, "failed to copy input debug data\n"); + goto out; + } + + params->input = input; + } + + if (args->output_ptr && args->output_size) { + output = kzalloc(args->output_size, GFP_KERNEL); + if (!output) { + rc = -ENOMEM; + goto out; + } + + params->output = output; + params->output_size = args->output_size; + } + + rc = hdev->asic_funcs->debug_coresight(hdev, params); + if (rc) { + dev_err(hdev->dev, + "debug coresight operation failed %d\n", rc); + goto out; + } + + if (output && copy_to_user((void __user *) (uintptr_t) args->output_ptr, + output, args->output_size)) { + dev_err(hdev->dev, "copy to user failed in debug ioctl\n"); + rc = -EFAULT; + goto out; + } + + +out: + kfree(params); + kfree(output); + kfree(input); + + return rc; +} + +static int device_utilization(struct hl_device *hdev, struct hl_info_args *args) +{ + struct hl_info_device_utilization device_util = {0}; + u32 max_size = args->return_size; + void __user *out = (void __user *) (uintptr_t) args->return_pointer; + + if ((!max_size) || (!out)) + return -EINVAL; + + if ((args->period_ms < 100) || (args->period_ms > 1000) || + (args->period_ms % 100)) { + dev_err(hdev->dev, + "period %u must be between 100 - 1000 and must be divisible by 100\n", + args->period_ms); + return -EINVAL; + } + + device_util.utilization = hl_device_utilization(hdev, args->period_ms); + + return copy_to_user(out, &device_util, + min((size_t) max_size, sizeof(device_util))) ? -EFAULT : 0; +} + +static int get_clk_rate(struct hl_device *hdev, struct hl_info_args *args) +{ + struct hl_info_clk_rate clk_rate = {0}; + u32 max_size = args->return_size; + void __user *out = (void __user *) (uintptr_t) args->return_pointer; + int rc; + + if ((!max_size) || (!out)) + return -EINVAL; + + rc = hdev->asic_funcs->get_clk_rate(hdev, &clk_rate.cur_clk_rate_mhz, + &clk_rate.max_clk_rate_mhz); + if (rc) + return rc; + + return copy_to_user(out, &clk_rate, + min((size_t) max_size, sizeof(clk_rate))) ? -EFAULT : 0; +} + +static int get_reset_count(struct hl_device *hdev, struct hl_info_args *args) +{ + struct hl_info_reset_count reset_count = {0}; + u32 max_size = args->return_size; + void __user *out = (void __user *) (uintptr_t) args->return_pointer; + + if ((!max_size) || (!out)) + return -EINVAL; + + reset_count.hard_reset_cnt = hdev->hard_reset_cnt; + reset_count.soft_reset_cnt = hdev->soft_reset_cnt; + + return copy_to_user(out, &reset_count, + min((size_t) max_size, sizeof(reset_count))) ? -EFAULT : 0; +} + +static int time_sync_info(struct hl_device *hdev, struct hl_info_args *args) +{ + struct hl_info_time_sync time_sync = {0}; + u32 max_size = args->return_size; + void __user *out = (void __user *) (uintptr_t) args->return_pointer; + + if ((!max_size) || (!out)) + return -EINVAL; + + time_sync.device_time = hdev->asic_funcs->get_device_time(hdev); + time_sync.host_time = ktime_get_raw_ns(); + + return copy_to_user(out, &time_sync, + min((size_t) max_size, sizeof(time_sync))) ? -EFAULT : 0; +} + +static int pci_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args) +{ + struct hl_device *hdev = hpriv->hdev; + struct hl_info_pci_counters pci_counters = {0}; + u32 max_size = args->return_size; + void __user *out = (void __user *) (uintptr_t) args->return_pointer; + int rc; + + if ((!max_size) || (!out)) + return -EINVAL; + + rc = hl_fw_cpucp_pci_counters_get(hdev, &pci_counters); + if (rc) + return rc; + + return copy_to_user(out, &pci_counters, + min((size_t) max_size, sizeof(pci_counters))) ? -EFAULT : 0; +} + +static int clk_throttle_info(struct hl_fpriv *hpriv, struct hl_info_args *args) +{ + struct hl_device *hdev = hpriv->hdev; + struct hl_info_clk_throttle clk_throttle = {0}; + u32 max_size = args->return_size; + void __user *out = (void __user *) (uintptr_t) args->return_pointer; + + if ((!max_size) || (!out)) + return -EINVAL; + + clk_throttle.clk_throttling_reason = hdev->clk_throttling_reason; + + return copy_to_user(out, &clk_throttle, + min((size_t) max_size, sizeof(clk_throttle))) ? -EFAULT : 0; +} + +static int cs_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args) +{ + struct hl_device *hdev = hpriv->hdev; + struct hl_info_cs_counters cs_counters = { {0} }; + u32 max_size = args->return_size; + void __user *out = (void __user *) (uintptr_t) args->return_pointer; + + if ((!max_size) || (!out)) + return -EINVAL; + + memcpy(&cs_counters.cs_counters, &hdev->aggregated_cs_counters, + sizeof(struct hl_cs_counters)); + + if (hpriv->ctx) + memcpy(&cs_counters.ctx_cs_counters, &hpriv->ctx->cs_counters, + sizeof(struct hl_cs_counters)); + + return copy_to_user(out, &cs_counters, + min((size_t) max_size, sizeof(cs_counters))) ? -EFAULT : 0; +} + +static int sync_manager_info(struct hl_fpriv *hpriv, struct hl_info_args *args) +{ + struct hl_device *hdev = hpriv->hdev; + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct hl_info_sync_manager sm_info = {0}; + u32 max_size = args->return_size; + void __user *out = (void __user *) (uintptr_t) args->return_pointer; + + if ((!max_size) || (!out)) + return -EINVAL; + + if (args->dcore_id >= HL_MAX_DCORES) + return -EINVAL; + + sm_info.first_available_sync_object = + prop->first_available_user_sob[args->dcore_id]; + sm_info.first_available_monitor = + prop->first_available_user_mon[args->dcore_id]; + + + return copy_to_user(out, &sm_info, min_t(size_t, (size_t) max_size, + sizeof(sm_info))) ? -EFAULT : 0; +} + +static int total_energy_consumption_info(struct hl_fpriv *hpriv, + struct hl_info_args *args) +{ + struct hl_device *hdev = hpriv->hdev; + struct hl_info_energy total_energy = {0}; + u32 max_size = args->return_size; + void __user *out = (void __user *) (uintptr_t) args->return_pointer; + int rc; + + if ((!max_size) || (!out)) + return -EINVAL; + + rc = hl_fw_cpucp_total_energy_get(hdev, + &total_energy.total_energy_consumption); + if (rc) + return rc; + + return copy_to_user(out, &total_energy, + min((size_t) max_size, sizeof(total_energy))) ? -EFAULT : 0; +} + +static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data, + struct device *dev) +{ + struct hl_info_args *args = data; + struct hl_device *hdev = hpriv->hdev; + int rc; + + /* + * Information is returned for the following opcodes even if the device + * is disabled or in reset. + */ + switch (args->op) { + case HL_INFO_HW_IP_INFO: + return hw_ip_info(hdev, args); + + case HL_INFO_DEVICE_STATUS: + return device_status_info(hdev, args); + + case HL_INFO_RESET_COUNT: + return get_reset_count(hdev, args); + + default: + break; + } + + if (hl_device_disabled_or_in_reset(hdev)) { + dev_warn_ratelimited(dev, + "Device is %s. Can't execute INFO IOCTL\n", + atomic_read(&hdev->in_reset) ? "in_reset" : "disabled"); + return -EBUSY; + } + + switch (args->op) { + case HL_INFO_HW_EVENTS: + rc = hw_events_info(hdev, false, args); + break; + + case HL_INFO_DRAM_USAGE: + rc = dram_usage_info(hpriv, args); + break; + + case HL_INFO_HW_IDLE: + rc = hw_idle(hdev, args); + break; + + case HL_INFO_DEVICE_UTILIZATION: + rc = device_utilization(hdev, args); + break; + + case HL_INFO_HW_EVENTS_AGGREGATE: + rc = hw_events_info(hdev, true, args); + break; + + case HL_INFO_CLK_RATE: + rc = get_clk_rate(hdev, args); + break; + + case HL_INFO_TIME_SYNC: + return time_sync_info(hdev, args); + + case HL_INFO_CS_COUNTERS: + return cs_counters_info(hpriv, args); + + case HL_INFO_PCI_COUNTERS: + return pci_counters_info(hpriv, args); + + case HL_INFO_CLK_THROTTLE_REASON: + return clk_throttle_info(hpriv, args); + + case HL_INFO_SYNC_MANAGER: + return sync_manager_info(hpriv, args); + + case HL_INFO_TOTAL_ENERGY: + return total_energy_consumption_info(hpriv, args); + + default: + dev_err(dev, "Invalid request %d\n", args->op); + rc = -ENOTTY; + break; + } + + return rc; +} + +static int hl_info_ioctl(struct hl_fpriv *hpriv, void *data) +{ + return _hl_info_ioctl(hpriv, data, hpriv->hdev->dev); +} + +static int hl_info_ioctl_control(struct hl_fpriv *hpriv, void *data) +{ + return _hl_info_ioctl(hpriv, data, hpriv->hdev->dev_ctrl); +} + +static int hl_debug_ioctl(struct hl_fpriv *hpriv, void *data) +{ + struct hl_debug_args *args = data; + struct hl_device *hdev = hpriv->hdev; + int rc = 0; + + if (hl_device_disabled_or_in_reset(hdev)) { + dev_warn_ratelimited(hdev->dev, + "Device is %s. Can't execute DEBUG IOCTL\n", + atomic_read(&hdev->in_reset) ? "in_reset" : "disabled"); + return -EBUSY; + } + + switch (args->op) { + case HL_DEBUG_OP_ETR: + case HL_DEBUG_OP_ETF: + case HL_DEBUG_OP_STM: + case HL_DEBUG_OP_FUNNEL: + case HL_DEBUG_OP_BMON: + case HL_DEBUG_OP_SPMU: + case HL_DEBUG_OP_TIMESTAMP: + if (!hdev->in_debug) { + dev_err_ratelimited(hdev->dev, + "Rejecting debug configuration request because device not in debug mode\n"); + return -EFAULT; + } + args->input_size = + min(args->input_size, hl_debug_struct_size[args->op]); + rc = debug_coresight(hdev, args); + break; + case HL_DEBUG_OP_SET_MODE: + rc = hl_device_set_debug_mode(hdev, (bool) args->enable); + break; + default: + dev_err(hdev->dev, "Invalid request %d\n", args->op); + rc = -ENOTTY; + break; + } + + return rc; +} + +#define HL_IOCTL_DEF(ioctl, _func) \ + [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func} + +static const struct hl_ioctl_desc hl_ioctls[] = { + HL_IOCTL_DEF(HL_IOCTL_INFO, hl_info_ioctl), + HL_IOCTL_DEF(HL_IOCTL_CB, hl_cb_ioctl), + HL_IOCTL_DEF(HL_IOCTL_CS, hl_cs_ioctl), + HL_IOCTL_DEF(HL_IOCTL_WAIT_CS, hl_cs_wait_ioctl), + HL_IOCTL_DEF(HL_IOCTL_MEMORY, hl_mem_ioctl), + HL_IOCTL_DEF(HL_IOCTL_DEBUG, hl_debug_ioctl) +}; + +static const struct hl_ioctl_desc hl_ioctls_control[] = { + HL_IOCTL_DEF(HL_IOCTL_INFO, hl_info_ioctl_control) +}; + +static long _hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg, + const struct hl_ioctl_desc *ioctl, struct device *dev) +{ + struct hl_fpriv *hpriv = filep->private_data; + struct hl_device *hdev = hpriv->hdev; + unsigned int nr = _IOC_NR(cmd); + char stack_kdata[128] = {0}; + char *kdata = NULL; + unsigned int usize, asize; + hl_ioctl_t *func; + u32 hl_size; + int retcode; + + if (hdev->hard_reset_pending) { + dev_crit_ratelimited(hdev->dev_ctrl, + "Device HARD reset pending! Please close FD\n"); + return -ENODEV; + } + + /* Do not trust userspace, use our own definition */ + func = ioctl->func; + + if (unlikely(!func)) { + dev_dbg(dev, "no function\n"); + retcode = -ENOTTY; + goto out_err; + } + + hl_size = _IOC_SIZE(ioctl->cmd); + usize = asize = _IOC_SIZE(cmd); + if (hl_size > asize) + asize = hl_size; + + cmd = ioctl->cmd; + + if (cmd & (IOC_IN | IOC_OUT)) { + if (asize <= sizeof(stack_kdata)) { + kdata = stack_kdata; + } else { + kdata = kzalloc(asize, GFP_KERNEL); + if (!kdata) { + retcode = -ENOMEM; + goto out_err; + } + } + } + + if (cmd & IOC_IN) { + if (copy_from_user(kdata, (void __user *)arg, usize)) { + retcode = -EFAULT; + goto out_err; + } + } else if (cmd & IOC_OUT) { + memset(kdata, 0, usize); + } + + retcode = func(hpriv, kdata); + + if ((cmd & IOC_OUT) && copy_to_user((void __user *)arg, kdata, usize)) + retcode = -EFAULT; + +out_err: + if (retcode) + dev_dbg(dev, "error in ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n", + task_pid_nr(current), cmd, nr); + + if (kdata != stack_kdata) + kfree(kdata); + + return retcode; +} + +long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) +{ + struct hl_fpriv *hpriv = filep->private_data; + struct hl_device *hdev = hpriv->hdev; + const struct hl_ioctl_desc *ioctl = NULL; + unsigned int nr = _IOC_NR(cmd); + + if ((nr >= HL_COMMAND_START) && (nr < HL_COMMAND_END)) { + ioctl = &hl_ioctls[nr]; + } else { + dev_err(hdev->dev, "invalid ioctl: pid=%d, nr=0x%02x\n", + task_pid_nr(current), nr); + return -ENOTTY; + } + + return _hl_ioctl(filep, cmd, arg, ioctl, hdev->dev); +} + +long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg) +{ + struct hl_fpriv *hpriv = filep->private_data; + struct hl_device *hdev = hpriv->hdev; + const struct hl_ioctl_desc *ioctl = NULL; + unsigned int nr = _IOC_NR(cmd); + + if (nr == _IOC_NR(HL_IOCTL_INFO)) { + ioctl = &hl_ioctls_control[nr]; + } else { + dev_err(hdev->dev_ctrl, "invalid ioctl: pid=%d, nr=0x%02x\n", + task_pid_nr(current), nr); + return -ENOTTY; + } + + return _hl_ioctl(filep, cmd, arg, ioctl, hdev->dev_ctrl); +} diff --git a/drivers/misc/habanalabs/common/hw_queue.c b/drivers/misc/habanalabs/common/hw_queue.c new file mode 100644 index 000000000..250cf9cef --- /dev/null +++ b/drivers/misc/habanalabs/common/hw_queue.c @@ -0,0 +1,917 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include "habanalabs.h" + +#include <linux/slab.h> + +/* + * hl_queue_add_ptr - add to pi or ci and checks if it wraps around + * + * @ptr: the current pi/ci value + * @val: the amount to add + * + * Add val to ptr. It can go until twice the queue length. + */ +inline u32 hl_hw_queue_add_ptr(u32 ptr, u16 val) +{ + ptr += val; + ptr &= ((HL_QUEUE_LENGTH << 1) - 1); + return ptr; +} +static inline int queue_ci_get(atomic_t *ci, u32 queue_len) +{ + return atomic_read(ci) & ((queue_len << 1) - 1); +} + +static inline int queue_free_slots(struct hl_hw_queue *q, u32 queue_len) +{ + int delta = (q->pi - queue_ci_get(&q->ci, queue_len)); + + if (delta >= 0) + return (queue_len - delta); + else + return (abs(delta) - queue_len); +} + +void hl_int_hw_queue_update_ci(struct hl_cs *cs) +{ + struct hl_device *hdev = cs->ctx->hdev; + struct hl_hw_queue *q; + int i; + + if (hdev->disabled) + return; + + q = &hdev->kernel_queues[0]; + for (i = 0 ; i < hdev->asic_prop.max_queues ; i++, q++) { + if (q->queue_type == QUEUE_TYPE_INT) + atomic_add(cs->jobs_in_queue_cnt[i], &q->ci); + } +} + +/* + * ext_and_hw_queue_submit_bd() - Submit a buffer descriptor to an external or a + * H/W queue. + * @hdev: pointer to habanalabs device structure + * @q: pointer to habanalabs queue structure + * @ctl: BD's control word + * @len: BD's length + * @ptr: BD's pointer + * + * This function assumes there is enough space on the queue to submit a new + * BD to it. It initializes the next BD and calls the device specific + * function to set the pi (and doorbell) + * + * This function must be called when the scheduler mutex is taken + * + */ +static void ext_and_hw_queue_submit_bd(struct hl_device *hdev, + struct hl_hw_queue *q, u32 ctl, u32 len, u64 ptr) +{ + struct hl_bd *bd; + + bd = q->kernel_address; + bd += hl_pi_2_offset(q->pi); + bd->ctl = cpu_to_le32(ctl); + bd->len = cpu_to_le32(len); + bd->ptr = cpu_to_le64(ptr); + + q->pi = hl_queue_inc_ptr(q->pi); + hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi); +} + +/* + * ext_queue_sanity_checks - perform some sanity checks on external queue + * + * @hdev : pointer to hl_device structure + * @q : pointer to hl_hw_queue structure + * @num_of_entries : how many entries to check for space + * @reserve_cq_entry : whether to reserve an entry in the cq + * + * H/W queues spinlock should be taken before calling this function + * + * Perform the following: + * - Make sure we have enough space in the h/w queue + * - Make sure we have enough space in the completion queue + * - Reserve space in the completion queue (needs to be reversed if there + * is a failure down the road before the actual submission of work). Only + * do this action if reserve_cq_entry is true + * + */ +static int ext_queue_sanity_checks(struct hl_device *hdev, + struct hl_hw_queue *q, int num_of_entries, + bool reserve_cq_entry) +{ + atomic_t *free_slots = + &hdev->completion_queue[q->cq_id].free_slots_cnt; + int free_slots_cnt; + + /* Check we have enough space in the queue */ + free_slots_cnt = queue_free_slots(q, HL_QUEUE_LENGTH); + + if (free_slots_cnt < num_of_entries) { + dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n", + q->hw_queue_id, num_of_entries); + return -EAGAIN; + } + + if (reserve_cq_entry) { + /* + * Check we have enough space in the completion queue + * Add -1 to counter (decrement) unless counter was already 0 + * In that case, CQ is full so we can't submit a new CB because + * we won't get ack on its completion + * atomic_add_unless will return 0 if counter was already 0 + */ + if (atomic_add_negative(num_of_entries * -1, free_slots)) { + dev_dbg(hdev->dev, "No space for %d on CQ %d\n", + num_of_entries, q->hw_queue_id); + atomic_add(num_of_entries, free_slots); + return -EAGAIN; + } + } + + return 0; +} + +/* + * int_queue_sanity_checks - perform some sanity checks on internal queue + * + * @hdev : pointer to hl_device structure + * @q : pointer to hl_hw_queue structure + * @num_of_entries : how many entries to check for space + * + * H/W queues spinlock should be taken before calling this function + * + * Perform the following: + * - Make sure we have enough space in the h/w queue + * + */ +static int int_queue_sanity_checks(struct hl_device *hdev, + struct hl_hw_queue *q, + int num_of_entries) +{ + int free_slots_cnt; + + if (num_of_entries > q->int_queue_len) { + dev_err(hdev->dev, + "Cannot populate queue %u with %u jobs\n", + q->hw_queue_id, num_of_entries); + return -ENOMEM; + } + + /* Check we have enough space in the queue */ + free_slots_cnt = queue_free_slots(q, q->int_queue_len); + + if (free_slots_cnt < num_of_entries) { + dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n", + q->hw_queue_id, num_of_entries); + return -EAGAIN; + } + + return 0; +} + +/* + * hw_queue_sanity_checks() - Make sure we have enough space in the h/w queue + * @hdev: Pointer to hl_device structure. + * @q: Pointer to hl_hw_queue structure. + * @num_of_entries: How many entries to check for space. + * + * Notice: We do not reserve queue entries so this function mustn't be called + * more than once per CS for the same queue + * + */ +static int hw_queue_sanity_checks(struct hl_device *hdev, struct hl_hw_queue *q, + int num_of_entries) +{ + int free_slots_cnt; + + /* Check we have enough space in the queue */ + free_slots_cnt = queue_free_slots(q, HL_QUEUE_LENGTH); + + if (free_slots_cnt < num_of_entries) { + dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n", + q->hw_queue_id, num_of_entries); + return -EAGAIN; + } + + return 0; +} + +/* + * hl_hw_queue_send_cb_no_cmpl - send a single CB (not a JOB) without completion + * + * @hdev: pointer to hl_device structure + * @hw_queue_id: Queue's type + * @cb_size: size of CB + * @cb_ptr: pointer to CB location + * + * This function sends a single CB, that must NOT generate a completion entry + * + */ +int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id, + u32 cb_size, u64 cb_ptr) +{ + struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id]; + int rc = 0; + + /* + * The CPU queue is a synchronous queue with an effective depth of + * a single entry (although it is allocated with room for multiple + * entries). Therefore, there is a different lock, called + * send_cpu_message_lock, that serializes accesses to the CPU queue. + * As a result, we don't need to lock the access to the entire H/W + * queues module when submitting a JOB to the CPU queue + */ + if (q->queue_type != QUEUE_TYPE_CPU) + hdev->asic_funcs->hw_queues_lock(hdev); + + if (hdev->disabled) { + rc = -EPERM; + goto out; + } + + /* + * hl_hw_queue_send_cb_no_cmpl() is called for queues of a H/W queue + * type only on init phase, when the queues are empty and being tested, + * so there is no need for sanity checks. + */ + if (q->queue_type != QUEUE_TYPE_HW) { + rc = ext_queue_sanity_checks(hdev, q, 1, false); + if (rc) + goto out; + } + + ext_and_hw_queue_submit_bd(hdev, q, 0, cb_size, cb_ptr); + +out: + if (q->queue_type != QUEUE_TYPE_CPU) + hdev->asic_funcs->hw_queues_unlock(hdev); + + return rc; +} + +/* + * ext_queue_schedule_job - submit a JOB to an external queue + * + * @job: pointer to the job that needs to be submitted to the queue + * + * This function must be called when the scheduler mutex is taken + * + */ +static void ext_queue_schedule_job(struct hl_cs_job *job) +{ + struct hl_device *hdev = job->cs->ctx->hdev; + struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id]; + struct hl_cq_entry cq_pkt; + struct hl_cq *cq; + u64 cq_addr; + struct hl_cb *cb; + u32 ctl; + u32 len; + u64 ptr; + + /* + * Update the JOB ID inside the BD CTL so the device would know what + * to write in the completion queue + */ + ctl = ((q->pi << BD_CTL_SHADOW_INDEX_SHIFT) & BD_CTL_SHADOW_INDEX_MASK); + + cb = job->patched_cb; + len = job->job_cb_size; + ptr = cb->bus_address; + + cq_pkt.data = cpu_to_le32( + ((q->pi << CQ_ENTRY_SHADOW_INDEX_SHIFT) + & CQ_ENTRY_SHADOW_INDEX_MASK) | + FIELD_PREP(CQ_ENTRY_SHADOW_INDEX_VALID_MASK, 1) | + FIELD_PREP(CQ_ENTRY_READY_MASK, 1)); + + /* + * No need to protect pi_offset because scheduling to the + * H/W queues is done under the scheduler mutex + * + * No need to check if CQ is full because it was already + * checked in ext_queue_sanity_checks + */ + cq = &hdev->completion_queue[q->cq_id]; + cq_addr = cq->bus_address + cq->pi * sizeof(struct hl_cq_entry); + + hdev->asic_funcs->add_end_of_cb_packets(hdev, cb->kernel_address, len, + cq_addr, + le32_to_cpu(cq_pkt.data), + q->msi_vec, + job->contains_dma_pkt); + + q->shadow_queue[hl_pi_2_offset(q->pi)] = job; + + cq->pi = hl_cq_inc_ptr(cq->pi); + + ext_and_hw_queue_submit_bd(hdev, q, ctl, len, ptr); +} + +/* + * int_queue_schedule_job - submit a JOB to an internal queue + * + * @job: pointer to the job that needs to be submitted to the queue + * + * This function must be called when the scheduler mutex is taken + * + */ +static void int_queue_schedule_job(struct hl_cs_job *job) +{ + struct hl_device *hdev = job->cs->ctx->hdev; + struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id]; + struct hl_bd bd; + __le64 *pi; + + bd.ctl = 0; + bd.len = cpu_to_le32(job->job_cb_size); + bd.ptr = cpu_to_le64((u64) (uintptr_t) job->user_cb); + + pi = q->kernel_address + (q->pi & (q->int_queue_len - 1)) * sizeof(bd); + + q->pi++; + q->pi &= ((q->int_queue_len << 1) - 1); + + hdev->asic_funcs->pqe_write(hdev, pi, &bd); + + hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi); +} + +/* + * hw_queue_schedule_job - submit a JOB to a H/W queue + * + * @job: pointer to the job that needs to be submitted to the queue + * + * This function must be called when the scheduler mutex is taken + * + */ +static void hw_queue_schedule_job(struct hl_cs_job *job) +{ + struct hl_device *hdev = job->cs->ctx->hdev; + struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id]; + u64 ptr; + u32 offset, ctl, len; + + /* + * Upon PQE completion, COMP_DATA is used as the write data to the + * completion queue (QMAN HBW message), and COMP_OFFSET is used as the + * write address offset in the SM block (QMAN LBW message). + * The write address offset is calculated as "COMP_OFFSET << 2". + */ + offset = job->cs->sequence & (hdev->asic_prop.max_pending_cs - 1); + ctl = ((offset << BD_CTL_COMP_OFFSET_SHIFT) & BD_CTL_COMP_OFFSET_MASK) | + ((q->pi << BD_CTL_COMP_DATA_SHIFT) & BD_CTL_COMP_DATA_MASK); + + len = job->job_cb_size; + + /* + * A patched CB is created only if a user CB was allocated by driver and + * MMU is disabled. If MMU is enabled, the user CB should be used + * instead. If the user CB wasn't allocated by driver, assume that it + * holds an address. + */ + if (job->patched_cb) + ptr = job->patched_cb->bus_address; + else if (job->is_kernel_allocated_cb) + ptr = job->user_cb->bus_address; + else + ptr = (u64) (uintptr_t) job->user_cb; + + ext_and_hw_queue_submit_bd(hdev, q, ctl, len, ptr); +} + +/* + * init_signal_wait_cs - initialize a signal/wait CS + * @cs: pointer to the signal/wait CS + * + * H/W queues spinlock should be taken before calling this function + */ +static void init_signal_wait_cs(struct hl_cs *cs) +{ + struct hl_ctx *ctx = cs->ctx; + struct hl_device *hdev = ctx->hdev; + struct hl_hw_queue *hw_queue; + struct hl_cs_compl *cs_cmpl = + container_of(cs->fence, struct hl_cs_compl, base_fence); + + struct hl_hw_sob *hw_sob; + struct hl_cs_job *job; + u32 q_idx; + + /* There is only one job in a signal/wait CS */ + job = list_first_entry(&cs->job_list, struct hl_cs_job, + cs_node); + q_idx = job->hw_queue_id; + hw_queue = &hdev->kernel_queues[q_idx]; + + if (cs->type & CS_TYPE_SIGNAL) { + hw_sob = &hw_queue->hw_sob[hw_queue->curr_sob_offset]; + + cs_cmpl->hw_sob = hw_sob; + cs_cmpl->sob_val = hw_queue->next_sob_val++; + + dev_dbg(hdev->dev, + "generate signal CB, sob_id: %d, sob val: 0x%x, q_idx: %d\n", + cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, q_idx); + + hdev->asic_funcs->gen_signal_cb(hdev, job->patched_cb, + cs_cmpl->hw_sob->sob_id); + + kref_get(&hw_sob->kref); + + /* check for wraparound */ + if (hw_queue->next_sob_val == HL_MAX_SOB_VAL) { + /* + * Decrement as we reached the max value. + * The release function won't be called here as we've + * just incremented the refcount. + */ + kref_put(&hw_sob->kref, hl_sob_reset_error); + hw_queue->next_sob_val = 1; + /* only two SOBs are currently in use */ + hw_queue->curr_sob_offset = + (hw_queue->curr_sob_offset + 1) % + HL_RSVD_SOBS_IN_USE; + + dev_dbg(hdev->dev, "switched to SOB %d, q_idx: %d\n", + hw_queue->curr_sob_offset, q_idx); + } + } else if (cs->type & CS_TYPE_WAIT) { + struct hl_cs_compl *signal_cs_cmpl; + + signal_cs_cmpl = container_of(cs->signal_fence, + struct hl_cs_compl, + base_fence); + + /* copy the the SOB id and value of the signal CS */ + cs_cmpl->hw_sob = signal_cs_cmpl->hw_sob; + cs_cmpl->sob_val = signal_cs_cmpl->sob_val; + + dev_dbg(hdev->dev, + "generate wait CB, sob_id: %d, sob_val: 0x%x, mon_id: %d, q_idx: %d\n", + cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, + hw_queue->base_mon_id, q_idx); + + hdev->asic_funcs->gen_wait_cb(hdev, job->patched_cb, + cs_cmpl->hw_sob->sob_id, + cs_cmpl->sob_val, + hw_queue->base_mon_id, + q_idx); + + kref_get(&cs_cmpl->hw_sob->kref); + /* + * Must put the signal fence after the SOB refcnt increment so + * the SOB refcnt won't turn 0 and reset the SOB before the + * wait CS was submitted. + */ + mb(); + hl_fence_put(cs->signal_fence); + cs->signal_fence = NULL; + } +} + +/* + * hl_hw_queue_schedule_cs - schedule a command submission + * @cs: pointer to the CS + */ +int hl_hw_queue_schedule_cs(struct hl_cs *cs) +{ + struct hl_ctx *ctx = cs->ctx; + struct hl_device *hdev = ctx->hdev; + struct hl_cs_job *job, *tmp; + struct hl_hw_queue *q; + u32 max_queues; + int rc = 0, i, cq_cnt; + + hdev->asic_funcs->hw_queues_lock(hdev); + + if (hl_device_disabled_or_in_reset(hdev)) { + ctx->cs_counters.device_in_reset_drop_cnt++; + dev_err(hdev->dev, + "device is disabled or in reset, CS rejected!\n"); + rc = -EPERM; + goto out; + } + + max_queues = hdev->asic_prop.max_queues; + + q = &hdev->kernel_queues[0]; + for (i = 0, cq_cnt = 0 ; i < max_queues ; i++, q++) { + if (cs->jobs_in_queue_cnt[i]) { + switch (q->queue_type) { + case QUEUE_TYPE_EXT: + rc = ext_queue_sanity_checks(hdev, q, + cs->jobs_in_queue_cnt[i], true); + break; + case QUEUE_TYPE_INT: + rc = int_queue_sanity_checks(hdev, q, + cs->jobs_in_queue_cnt[i]); + break; + case QUEUE_TYPE_HW: + rc = hw_queue_sanity_checks(hdev, q, + cs->jobs_in_queue_cnt[i]); + break; + default: + dev_err(hdev->dev, "Queue type %d is invalid\n", + q->queue_type); + rc = -EINVAL; + break; + } + + if (rc) { + ctx->cs_counters.queue_full_drop_cnt++; + goto unroll_cq_resv; + } + + if (q->queue_type == QUEUE_TYPE_EXT) + cq_cnt++; + } + } + + if ((cs->type == CS_TYPE_SIGNAL) || (cs->type == CS_TYPE_WAIT)) + init_signal_wait_cs(cs); + + spin_lock(&hdev->hw_queues_mirror_lock); + list_add_tail(&cs->mirror_node, &hdev->hw_queues_mirror_list); + + /* Queue TDR if the CS is the first entry and if timeout is wanted */ + if ((hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT) && + (list_first_entry(&hdev->hw_queues_mirror_list, + struct hl_cs, mirror_node) == cs)) { + cs->tdr_active = true; + schedule_delayed_work(&cs->work_tdr, hdev->timeout_jiffies); + spin_unlock(&hdev->hw_queues_mirror_lock); + } else { + spin_unlock(&hdev->hw_queues_mirror_lock); + } + + if (!hdev->cs_active_cnt++) { + struct hl_device_idle_busy_ts *ts; + + ts = &hdev->idle_busy_ts_arr[hdev->idle_busy_ts_idx]; + ts->busy_to_idle_ts = ktime_set(0, 0); + ts->idle_to_busy_ts = ktime_get(); + } + + list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) + switch (job->queue_type) { + case QUEUE_TYPE_EXT: + ext_queue_schedule_job(job); + break; + case QUEUE_TYPE_INT: + int_queue_schedule_job(job); + break; + case QUEUE_TYPE_HW: + hw_queue_schedule_job(job); + break; + default: + break; + } + + cs->submitted = true; + + goto out; + +unroll_cq_resv: + q = &hdev->kernel_queues[0]; + for (i = 0 ; (i < max_queues) && (cq_cnt > 0) ; i++, q++) { + if ((q->queue_type == QUEUE_TYPE_EXT) && + (cs->jobs_in_queue_cnt[i])) { + atomic_t *free_slots = + &hdev->completion_queue[i].free_slots_cnt; + atomic_add(cs->jobs_in_queue_cnt[i], free_slots); + cq_cnt--; + } + } + +out: + hdev->asic_funcs->hw_queues_unlock(hdev); + + return rc; +} + +/* + * hl_hw_queue_inc_ci_kernel - increment ci for kernel's queue + * + * @hdev: pointer to hl_device structure + * @hw_queue_id: which queue to increment its ci + */ +void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id) +{ + struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id]; + + atomic_inc(&q->ci); +} + +static int ext_and_cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q, + bool is_cpu_queue) +{ + void *p; + int rc; + + if (is_cpu_queue) + p = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, + HL_QUEUE_SIZE_IN_BYTES, + &q->bus_address); + else + p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, + HL_QUEUE_SIZE_IN_BYTES, + &q->bus_address, + GFP_KERNEL | __GFP_ZERO); + if (!p) + return -ENOMEM; + + q->kernel_address = p; + + q->shadow_queue = kmalloc_array(HL_QUEUE_LENGTH, + sizeof(*q->shadow_queue), + GFP_KERNEL); + if (!q->shadow_queue) { + dev_err(hdev->dev, + "Failed to allocate shadow queue for H/W queue %d\n", + q->hw_queue_id); + rc = -ENOMEM; + goto free_queue; + } + + /* Make sure read/write pointers are initialized to start of queue */ + atomic_set(&q->ci, 0); + q->pi = 0; + + return 0; + +free_queue: + if (is_cpu_queue) + hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, + HL_QUEUE_SIZE_IN_BYTES, + q->kernel_address); + else + hdev->asic_funcs->asic_dma_free_coherent(hdev, + HL_QUEUE_SIZE_IN_BYTES, + q->kernel_address, + q->bus_address); + + return rc; +} + +static int int_queue_init(struct hl_device *hdev, struct hl_hw_queue *q) +{ + void *p; + + p = hdev->asic_funcs->get_int_queue_base(hdev, q->hw_queue_id, + &q->bus_address, &q->int_queue_len); + if (!p) { + dev_err(hdev->dev, + "Failed to get base address for internal queue %d\n", + q->hw_queue_id); + return -EFAULT; + } + + q->kernel_address = p; + q->pi = 0; + atomic_set(&q->ci, 0); + + return 0; +} + +static int cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q) +{ + return ext_and_cpu_queue_init(hdev, q, true); +} + +static int ext_queue_init(struct hl_device *hdev, struct hl_hw_queue *q) +{ + return ext_and_cpu_queue_init(hdev, q, false); +} + +static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q) +{ + void *p; + + p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, + HL_QUEUE_SIZE_IN_BYTES, + &q->bus_address, + GFP_KERNEL | __GFP_ZERO); + if (!p) + return -ENOMEM; + + q->kernel_address = p; + + /* Make sure read/write pointers are initialized to start of queue */ + atomic_set(&q->ci, 0); + q->pi = 0; + + return 0; +} + +static void sync_stream_queue_init(struct hl_device *hdev, u32 q_idx) +{ + struct hl_hw_queue *hw_queue = &hdev->kernel_queues[q_idx]; + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct hl_hw_sob *hw_sob; + int sob, queue_idx = hdev->sync_stream_queue_idx++; + + hw_queue->base_sob_id = + prop->sync_stream_first_sob + queue_idx * HL_RSVD_SOBS; + hw_queue->base_mon_id = + prop->sync_stream_first_mon + queue_idx * HL_RSVD_MONS; + hw_queue->next_sob_val = 1; + hw_queue->curr_sob_offset = 0; + + for (sob = 0 ; sob < HL_RSVD_SOBS ; sob++) { + hw_sob = &hw_queue->hw_sob[sob]; + hw_sob->hdev = hdev; + hw_sob->sob_id = hw_queue->base_sob_id + sob; + hw_sob->q_idx = q_idx; + kref_init(&hw_sob->kref); + } +} + +static void sync_stream_queue_reset(struct hl_device *hdev, u32 q_idx) +{ + struct hl_hw_queue *hw_queue = &hdev->kernel_queues[q_idx]; + + /* + * In case we got here due to a stuck CS, the refcnt might be bigger + * than 1 and therefore we reset it. + */ + kref_init(&hw_queue->hw_sob[hw_queue->curr_sob_offset].kref); + hw_queue->curr_sob_offset = 0; + hw_queue->next_sob_val = 1; +} + +/* + * queue_init - main initialization function for H/W queue object + * + * @hdev: pointer to hl_device device structure + * @q: pointer to hl_hw_queue queue structure + * @hw_queue_id: The id of the H/W queue + * + * Allocate dma-able memory for the queue and initialize fields + * Returns 0 on success + */ +static int queue_init(struct hl_device *hdev, struct hl_hw_queue *q, + u32 hw_queue_id) +{ + int rc; + + q->hw_queue_id = hw_queue_id; + + switch (q->queue_type) { + case QUEUE_TYPE_EXT: + rc = ext_queue_init(hdev, q); + break; + case QUEUE_TYPE_INT: + rc = int_queue_init(hdev, q); + break; + case QUEUE_TYPE_CPU: + rc = cpu_queue_init(hdev, q); + break; + case QUEUE_TYPE_HW: + rc = hw_queue_init(hdev, q); + break; + case QUEUE_TYPE_NA: + q->valid = 0; + return 0; + default: + dev_crit(hdev->dev, "wrong queue type %d during init\n", + q->queue_type); + rc = -EINVAL; + break; + } + + if (q->supports_sync_stream) + sync_stream_queue_init(hdev, q->hw_queue_id); + + if (rc) + return rc; + + q->valid = 1; + + return 0; +} + +/* + * hw_queue_fini - destroy queue + * + * @hdev: pointer to hl_device device structure + * @q: pointer to hl_hw_queue queue structure + * + * Free the queue memory + */ +static void queue_fini(struct hl_device *hdev, struct hl_hw_queue *q) +{ + if (!q->valid) + return; + + /* + * If we arrived here, there are no jobs waiting on this queue + * so we can safely remove it. + * This is because this function can only called when: + * 1. Either a context is deleted, which only can occur if all its + * jobs were finished + * 2. A context wasn't able to be created due to failure or timeout, + * which means there are no jobs on the queue yet + * + * The only exception are the queues of the kernel context, but + * if they are being destroyed, it means that the entire module is + * being removed. If the module is removed, it means there is no open + * user context. It also means that if a job was submitted by + * the kernel driver (e.g. context creation), the job itself was + * released by the kernel driver when a timeout occurred on its + * Completion. Thus, we don't need to release it again. + */ + + if (q->queue_type == QUEUE_TYPE_INT) + return; + + kfree(q->shadow_queue); + + if (q->queue_type == QUEUE_TYPE_CPU) + hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, + HL_QUEUE_SIZE_IN_BYTES, + q->kernel_address); + else + hdev->asic_funcs->asic_dma_free_coherent(hdev, + HL_QUEUE_SIZE_IN_BYTES, + q->kernel_address, + q->bus_address); +} + +int hl_hw_queues_create(struct hl_device *hdev) +{ + struct asic_fixed_properties *asic = &hdev->asic_prop; + struct hl_hw_queue *q; + int i, rc, q_ready_cnt; + + hdev->kernel_queues = kcalloc(asic->max_queues, + sizeof(*hdev->kernel_queues), GFP_KERNEL); + + if (!hdev->kernel_queues) { + dev_err(hdev->dev, "Not enough memory for H/W queues\n"); + return -ENOMEM; + } + + /* Initialize the H/W queues */ + for (i = 0, q_ready_cnt = 0, q = hdev->kernel_queues; + i < asic->max_queues ; i++, q_ready_cnt++, q++) { + + q->queue_type = asic->hw_queues_props[i].type; + q->supports_sync_stream = + asic->hw_queues_props[i].supports_sync_stream; + rc = queue_init(hdev, q, i); + if (rc) { + dev_err(hdev->dev, + "failed to initialize queue %d\n", i); + goto release_queues; + } + } + + return 0; + +release_queues: + for (i = 0, q = hdev->kernel_queues ; i < q_ready_cnt ; i++, q++) + queue_fini(hdev, q); + + kfree(hdev->kernel_queues); + + return rc; +} + +void hl_hw_queues_destroy(struct hl_device *hdev) +{ + struct hl_hw_queue *q; + u32 max_queues = hdev->asic_prop.max_queues; + int i; + + for (i = 0, q = hdev->kernel_queues ; i < max_queues ; i++, q++) + queue_fini(hdev, q); + + kfree(hdev->kernel_queues); +} + +void hl_hw_queue_reset(struct hl_device *hdev, bool hard_reset) +{ + struct hl_hw_queue *q; + u32 max_queues = hdev->asic_prop.max_queues; + int i; + + for (i = 0, q = hdev->kernel_queues ; i < max_queues ; i++, q++) { + if ((!q->valid) || + ((!hard_reset) && (q->queue_type == QUEUE_TYPE_CPU))) + continue; + q->pi = 0; + atomic_set(&q->ci, 0); + + if (q->supports_sync_stream) + sync_stream_queue_reset(hdev, q->hw_queue_id); + } +} diff --git a/drivers/misc/habanalabs/common/hwmon.c b/drivers/misc/habanalabs/common/hwmon.c new file mode 100644 index 000000000..2ac29cb2f --- /dev/null +++ b/drivers/misc/habanalabs/common/hwmon.c @@ -0,0 +1,578 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include "habanalabs.h" + +#include <linux/pci.h> +#include <linux/hwmon.h> + +#define HWMON_NR_SENSOR_TYPES (hwmon_pwm + 1) + +int hl_build_hwmon_channel_info(struct hl_device *hdev, + struct cpucp_sensor *sensors_arr) +{ + u32 counts[HWMON_NR_SENSOR_TYPES] = {0}; + u32 *sensors_by_type[HWMON_NR_SENSOR_TYPES] = {NULL}; + u32 sensors_by_type_next_index[HWMON_NR_SENSOR_TYPES] = {0}; + struct hwmon_channel_info **channels_info; + u32 num_sensors_for_type, num_active_sensor_types = 0, + arr_size = 0, *curr_arr; + enum hwmon_sensor_types type; + int rc, i, j; + + for (i = 0 ; i < CPUCP_MAX_SENSORS ; i++) { + type = le32_to_cpu(sensors_arr[i].type); + + if ((type == 0) && (sensors_arr[i].flags == 0)) + break; + + if (type >= HWMON_NR_SENSOR_TYPES) { + dev_err(hdev->dev, + "Got wrong sensor type %d from device\n", type); + return -EINVAL; + } + + counts[type]++; + arr_size++; + } + + for (i = 0 ; i < HWMON_NR_SENSOR_TYPES ; i++) { + if (counts[i] == 0) + continue; + + num_sensors_for_type = counts[i] + 1; + curr_arr = kcalloc(num_sensors_for_type, sizeof(*curr_arr), + GFP_KERNEL); + if (!curr_arr) { + rc = -ENOMEM; + goto sensors_type_err; + } + + num_active_sensor_types++; + sensors_by_type[i] = curr_arr; + } + + for (i = 0 ; i < arr_size ; i++) { + type = le32_to_cpu(sensors_arr[i].type); + curr_arr = sensors_by_type[type]; + curr_arr[sensors_by_type_next_index[type]++] = + le32_to_cpu(sensors_arr[i].flags); + } + + channels_info = kcalloc(num_active_sensor_types + 1, + sizeof(*channels_info), GFP_KERNEL); + if (!channels_info) { + rc = -ENOMEM; + goto channels_info_array_err; + } + + for (i = 0 ; i < num_active_sensor_types ; i++) { + channels_info[i] = kzalloc(sizeof(*channels_info[i]), + GFP_KERNEL); + if (!channels_info[i]) { + rc = -ENOMEM; + goto channel_info_err; + } + } + + for (i = 0, j = 0 ; i < HWMON_NR_SENSOR_TYPES ; i++) { + if (!sensors_by_type[i]) + continue; + + channels_info[j]->type = i; + channels_info[j]->config = sensors_by_type[i]; + j++; + } + + hdev->hl_chip_info->info = + (const struct hwmon_channel_info **)channels_info; + + return 0; + +channel_info_err: + for (i = 0 ; i < num_active_sensor_types ; i++) + if (channels_info[i]) { + kfree(channels_info[i]->config); + kfree(channels_info[i]); + } + kfree(channels_info); +channels_info_array_err: +sensors_type_err: + for (i = 0 ; i < HWMON_NR_SENSOR_TYPES ; i++) + kfree(sensors_by_type[i]); + + return rc; +} + +static int hl_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + int rc; + + if (hl_device_disabled_or_in_reset(hdev)) + return -ENODEV; + + switch (type) { + case hwmon_temp: + switch (attr) { + case hwmon_temp_input: + case hwmon_temp_max: + case hwmon_temp_crit: + case hwmon_temp_max_hyst: + case hwmon_temp_crit_hyst: + case hwmon_temp_offset: + case hwmon_temp_highest: + break; + default: + return -EINVAL; + } + + rc = hl_get_temperature(hdev, channel, attr, val); + break; + case hwmon_in: + switch (attr) { + case hwmon_in_input: + case hwmon_in_min: + case hwmon_in_max: + case hwmon_in_highest: + break; + default: + return -EINVAL; + } + + rc = hl_get_voltage(hdev, channel, attr, val); + break; + case hwmon_curr: + switch (attr) { + case hwmon_curr_input: + case hwmon_curr_min: + case hwmon_curr_max: + case hwmon_curr_highest: + break; + default: + return -EINVAL; + } + + rc = hl_get_current(hdev, channel, attr, val); + break; + case hwmon_fan: + switch (attr) { + case hwmon_fan_input: + case hwmon_fan_min: + case hwmon_fan_max: + break; + default: + return -EINVAL; + } + rc = hl_get_fan_speed(hdev, channel, attr, val); + break; + case hwmon_pwm: + switch (attr) { + case hwmon_pwm_input: + case hwmon_pwm_enable: + break; + default: + return -EINVAL; + } + rc = hl_get_pwm_info(hdev, channel, attr, val); + break; + default: + return -EINVAL; + } + return rc; +} + +static int hl_write(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long val) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + + if (hl_device_disabled_or_in_reset(hdev)) + return -ENODEV; + + switch (type) { + case hwmon_temp: + switch (attr) { + case hwmon_temp_offset: + case hwmon_temp_reset_history: + break; + default: + return -EINVAL; + } + hl_set_temperature(hdev, channel, attr, val); + break; + case hwmon_pwm: + switch (attr) { + case hwmon_pwm_input: + case hwmon_pwm_enable: + break; + default: + return -EINVAL; + } + hl_set_pwm_info(hdev, channel, attr, val); + break; + case hwmon_in: + switch (attr) { + case hwmon_in_reset_history: + break; + default: + return -EINVAL; + } + hl_set_voltage(hdev, channel, attr, val); + break; + case hwmon_curr: + switch (attr) { + case hwmon_curr_reset_history: + break; + default: + return -EINVAL; + } + hl_set_current(hdev, channel, attr, val); + break; + default: + return -EINVAL; + } + return 0; +} + +static umode_t hl_is_visible(const void *data, enum hwmon_sensor_types type, + u32 attr, int channel) +{ + switch (type) { + case hwmon_temp: + switch (attr) { + case hwmon_temp_input: + case hwmon_temp_max: + case hwmon_temp_max_hyst: + case hwmon_temp_crit: + case hwmon_temp_crit_hyst: + case hwmon_temp_highest: + return 0444; + case hwmon_temp_offset: + return 0644; + case hwmon_temp_reset_history: + return 0200; + } + break; + case hwmon_in: + switch (attr) { + case hwmon_in_input: + case hwmon_in_min: + case hwmon_in_max: + case hwmon_in_highest: + return 0444; + case hwmon_in_reset_history: + return 0200; + } + break; + case hwmon_curr: + switch (attr) { + case hwmon_curr_input: + case hwmon_curr_min: + case hwmon_curr_max: + case hwmon_curr_highest: + return 0444; + case hwmon_curr_reset_history: + return 0200; + } + break; + case hwmon_fan: + switch (attr) { + case hwmon_fan_input: + case hwmon_fan_min: + case hwmon_fan_max: + return 0444; + } + break; + case hwmon_pwm: + switch (attr) { + case hwmon_pwm_input: + case hwmon_pwm_enable: + return 0644; + } + break; + default: + break; + } + return 0; +} + +static const struct hwmon_ops hl_hwmon_ops = { + .is_visible = hl_is_visible, + .read = hl_read, + .write = hl_write +}; + +int hl_get_temperature(struct hl_device *hdev, + int sensor_index, u32 attr, long *value) +{ + struct cpucp_packet pkt; + int rc; + + memset(&pkt, 0, sizeof(pkt)); + + pkt.ctl = cpu_to_le32(CPUCP_PACKET_TEMPERATURE_GET << + CPUCP_PKT_CTL_OPCODE_SHIFT); + pkt.sensor_index = __cpu_to_le16(sensor_index); + pkt.type = __cpu_to_le16(attr); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + 0, value); + + if (rc) { + dev_err(hdev->dev, + "Failed to get temperature from sensor %d, error %d\n", + sensor_index, rc); + *value = 0; + } + + return rc; +} + +int hl_set_temperature(struct hl_device *hdev, + int sensor_index, u32 attr, long value) +{ + struct cpucp_packet pkt; + int rc; + + memset(&pkt, 0, sizeof(pkt)); + + pkt.ctl = cpu_to_le32(CPUCP_PACKET_TEMPERATURE_SET << + CPUCP_PKT_CTL_OPCODE_SHIFT); + pkt.sensor_index = __cpu_to_le16(sensor_index); + pkt.type = __cpu_to_le16(attr); + pkt.value = __cpu_to_le64(value); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + 0, NULL); + + if (rc) + dev_err(hdev->dev, + "Failed to set temperature of sensor %d, error %d\n", + sensor_index, rc); + + return rc; +} + +int hl_get_voltage(struct hl_device *hdev, + int sensor_index, u32 attr, long *value) +{ + struct cpucp_packet pkt; + int rc; + + memset(&pkt, 0, sizeof(pkt)); + + pkt.ctl = cpu_to_le32(CPUCP_PACKET_VOLTAGE_GET << + CPUCP_PKT_CTL_OPCODE_SHIFT); + pkt.sensor_index = __cpu_to_le16(sensor_index); + pkt.type = __cpu_to_le16(attr); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + 0, value); + + if (rc) { + dev_err(hdev->dev, + "Failed to get voltage from sensor %d, error %d\n", + sensor_index, rc); + *value = 0; + } + + return rc; +} + +int hl_get_current(struct hl_device *hdev, + int sensor_index, u32 attr, long *value) +{ + struct cpucp_packet pkt; + int rc; + + memset(&pkt, 0, sizeof(pkt)); + + pkt.ctl = cpu_to_le32(CPUCP_PACKET_CURRENT_GET << + CPUCP_PKT_CTL_OPCODE_SHIFT); + pkt.sensor_index = __cpu_to_le16(sensor_index); + pkt.type = __cpu_to_le16(attr); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + 0, value); + + if (rc) { + dev_err(hdev->dev, + "Failed to get current from sensor %d, error %d\n", + sensor_index, rc); + *value = 0; + } + + return rc; +} + +int hl_get_fan_speed(struct hl_device *hdev, + int sensor_index, u32 attr, long *value) +{ + struct cpucp_packet pkt; + int rc; + + memset(&pkt, 0, sizeof(pkt)); + + pkt.ctl = cpu_to_le32(CPUCP_PACKET_FAN_SPEED_GET << + CPUCP_PKT_CTL_OPCODE_SHIFT); + pkt.sensor_index = __cpu_to_le16(sensor_index); + pkt.type = __cpu_to_le16(attr); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + 0, value); + + if (rc) { + dev_err(hdev->dev, + "Failed to get fan speed from sensor %d, error %d\n", + sensor_index, rc); + *value = 0; + } + + return rc; +} + +int hl_get_pwm_info(struct hl_device *hdev, + int sensor_index, u32 attr, long *value) +{ + struct cpucp_packet pkt; + int rc; + + memset(&pkt, 0, sizeof(pkt)); + + pkt.ctl = cpu_to_le32(CPUCP_PACKET_PWM_GET << + CPUCP_PKT_CTL_OPCODE_SHIFT); + pkt.sensor_index = __cpu_to_le16(sensor_index); + pkt.type = __cpu_to_le16(attr); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + 0, value); + + if (rc) { + dev_err(hdev->dev, + "Failed to get pwm info from sensor %d, error %d\n", + sensor_index, rc); + *value = 0; + } + + return rc; +} + +void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr, + long value) +{ + struct cpucp_packet pkt; + int rc; + + memset(&pkt, 0, sizeof(pkt)); + + pkt.ctl = cpu_to_le32(CPUCP_PACKET_PWM_SET << + CPUCP_PKT_CTL_OPCODE_SHIFT); + pkt.sensor_index = __cpu_to_le16(sensor_index); + pkt.type = __cpu_to_le16(attr); + pkt.value = cpu_to_le64(value); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + 0, NULL); + + if (rc) + dev_err(hdev->dev, + "Failed to set pwm info to sensor %d, error %d\n", + sensor_index, rc); +} + +int hl_set_voltage(struct hl_device *hdev, + int sensor_index, u32 attr, long value) +{ + struct cpucp_packet pkt; + int rc; + + memset(&pkt, 0, sizeof(pkt)); + + pkt.ctl = cpu_to_le32(CPUCP_PACKET_VOLTAGE_SET << + CPUCP_PKT_CTL_OPCODE_SHIFT); + pkt.sensor_index = __cpu_to_le16(sensor_index); + pkt.type = __cpu_to_le16(attr); + pkt.value = __cpu_to_le64(value); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + 0, NULL); + + if (rc) + dev_err(hdev->dev, + "Failed to set voltage of sensor %d, error %d\n", + sensor_index, rc); + + return rc; +} + +int hl_set_current(struct hl_device *hdev, + int sensor_index, u32 attr, long value) +{ + struct cpucp_packet pkt; + int rc; + + memset(&pkt, 0, sizeof(pkt)); + + pkt.ctl = cpu_to_le32(CPUCP_PACKET_CURRENT_SET << + CPUCP_PKT_CTL_OPCODE_SHIFT); + pkt.sensor_index = __cpu_to_le16(sensor_index); + pkt.type = __cpu_to_le16(attr); + pkt.value = __cpu_to_le64(value); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + 0, NULL); + + if (rc) + dev_err(hdev->dev, + "Failed to set current of sensor %d, error %d\n", + sensor_index, rc); + + return rc; +} + +int hl_hwmon_init(struct hl_device *hdev) +{ + struct device *dev = hdev->pdev ? &hdev->pdev->dev : hdev->dev; + struct asic_fixed_properties *prop = &hdev->asic_prop; + int rc; + + if ((hdev->hwmon_initialized) || !(hdev->fw_loading)) + return 0; + + if (hdev->hl_chip_info->info) { + hdev->hl_chip_info->ops = &hl_hwmon_ops; + + hdev->hwmon_dev = hwmon_device_register_with_info(dev, + prop->cpucp_info.card_name, hdev, + hdev->hl_chip_info, NULL); + if (IS_ERR(hdev->hwmon_dev)) { + rc = PTR_ERR(hdev->hwmon_dev); + dev_err(hdev->dev, + "Unable to register hwmon device: %d\n", rc); + return rc; + } + + dev_info(hdev->dev, "%s: add sensors information\n", + dev_name(hdev->hwmon_dev)); + + hdev->hwmon_initialized = true; + } else { + dev_info(hdev->dev, "no available sensors\n"); + } + + return 0; +} + +void hl_hwmon_fini(struct hl_device *hdev) +{ + if (!hdev->hwmon_initialized) + return; + + hwmon_device_unregister(hdev->hwmon_dev); +} diff --git a/drivers/misc/habanalabs/common/irq.c b/drivers/misc/habanalabs/common/irq.c new file mode 100644 index 000000000..de53fb5f9 --- /dev/null +++ b/drivers/misc/habanalabs/common/irq.c @@ -0,0 +1,321 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include "habanalabs.h" + +#include <linux/slab.h> + +/** + * struct hl_eqe_work - This structure is used to schedule work of EQ + * entry and cpucp_reset event + * + * @eq_work: workqueue object to run when EQ entry is received + * @hdev: pointer to device structure + * @eq_entry: copy of the EQ entry + */ +struct hl_eqe_work { + struct work_struct eq_work; + struct hl_device *hdev; + struct hl_eq_entry eq_entry; +}; + +/** + * hl_cq_inc_ptr - increment ci or pi of cq + * + * @ptr: the current ci or pi value of the completion queue + * + * Increment ptr by 1. If it reaches the number of completion queue + * entries, set it to 0 + */ +inline u32 hl_cq_inc_ptr(u32 ptr) +{ + ptr++; + if (unlikely(ptr == HL_CQ_LENGTH)) + ptr = 0; + return ptr; +} + +/** + * hl_eq_inc_ptr - increment ci of eq + * + * @ptr: the current ci value of the event queue + * + * Increment ptr by 1. If it reaches the number of event queue + * entries, set it to 0 + */ +inline u32 hl_eq_inc_ptr(u32 ptr) +{ + ptr++; + if (unlikely(ptr == HL_EQ_LENGTH)) + ptr = 0; + return ptr; +} + +static void irq_handle_eqe(struct work_struct *work) +{ + struct hl_eqe_work *eqe_work = container_of(work, struct hl_eqe_work, + eq_work); + struct hl_device *hdev = eqe_work->hdev; + + hdev->asic_funcs->handle_eqe(hdev, &eqe_work->eq_entry); + + kfree(eqe_work); +} + +/** + * hl_irq_handler_cq - irq handler for completion queue + * + * @irq: irq number + * @arg: pointer to completion queue structure + * + */ +irqreturn_t hl_irq_handler_cq(int irq, void *arg) +{ + struct hl_cq *cq = arg; + struct hl_device *hdev = cq->hdev; + struct hl_hw_queue *queue; + struct hl_cs_job *job; + bool shadow_index_valid; + u16 shadow_index; + struct hl_cq_entry *cq_entry, *cq_base; + + if (hdev->disabled) { + dev_dbg(hdev->dev, + "Device disabled but received IRQ %d for CQ %d\n", + irq, cq->hw_queue_id); + return IRQ_HANDLED; + } + + cq_base = cq->kernel_address; + + while (1) { + bool entry_ready = ((le32_to_cpu(cq_base[cq->ci].data) & + CQ_ENTRY_READY_MASK) + >> CQ_ENTRY_READY_SHIFT); + + if (!entry_ready) + break; + + cq_entry = (struct hl_cq_entry *) &cq_base[cq->ci]; + + /* Make sure we read CQ entry contents after we've + * checked the ownership bit. + */ + dma_rmb(); + + shadow_index_valid = ((le32_to_cpu(cq_entry->data) & + CQ_ENTRY_SHADOW_INDEX_VALID_MASK) + >> CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT); + + shadow_index = (u16) ((le32_to_cpu(cq_entry->data) & + CQ_ENTRY_SHADOW_INDEX_MASK) + >> CQ_ENTRY_SHADOW_INDEX_SHIFT); + + queue = &hdev->kernel_queues[cq->hw_queue_id]; + + if ((shadow_index_valid) && (!hdev->disabled)) { + job = queue->shadow_queue[hl_pi_2_offset(shadow_index)]; + queue_work(hdev->cq_wq[cq->cq_idx], &job->finish_work); + } + + atomic_inc(&queue->ci); + + /* Clear CQ entry ready bit */ + cq_entry->data = cpu_to_le32(le32_to_cpu(cq_entry->data) & + ~CQ_ENTRY_READY_MASK); + + cq->ci = hl_cq_inc_ptr(cq->ci); + + /* Increment free slots */ + atomic_inc(&cq->free_slots_cnt); + } + + return IRQ_HANDLED; +} + +/** + * hl_irq_handler_eq - irq handler for event queue + * + * @irq: irq number + * @arg: pointer to event queue structure + * + */ +irqreturn_t hl_irq_handler_eq(int irq, void *arg) +{ + struct hl_eq *eq = arg; + struct hl_device *hdev = eq->hdev; + struct hl_eq_entry *eq_entry; + struct hl_eq_entry *eq_base; + struct hl_eqe_work *handle_eqe_work; + + eq_base = eq->kernel_address; + + while (1) { + bool entry_ready = + ((le32_to_cpu(eq_base[eq->ci].hdr.ctl) & + EQ_CTL_READY_MASK) >> EQ_CTL_READY_SHIFT); + + if (!entry_ready) + break; + + eq_entry = &eq_base[eq->ci]; + + /* + * Make sure we read EQ entry contents after we've + * checked the ownership bit. + */ + dma_rmb(); + + if (hdev->disabled) { + dev_warn(hdev->dev, + "Device disabled but received IRQ %d for EQ\n", + irq); + goto skip_irq; + } + + handle_eqe_work = kmalloc(sizeof(*handle_eqe_work), GFP_ATOMIC); + if (handle_eqe_work) { + INIT_WORK(&handle_eqe_work->eq_work, irq_handle_eqe); + handle_eqe_work->hdev = hdev; + + memcpy(&handle_eqe_work->eq_entry, eq_entry, + sizeof(*eq_entry)); + + queue_work(hdev->eq_wq, &handle_eqe_work->eq_work); + } +skip_irq: + /* Clear EQ entry ready bit */ + eq_entry->hdr.ctl = + cpu_to_le32(le32_to_cpu(eq_entry->hdr.ctl) & + ~EQ_CTL_READY_MASK); + + eq->ci = hl_eq_inc_ptr(eq->ci); + + hdev->asic_funcs->update_eq_ci(hdev, eq->ci); + } + + return IRQ_HANDLED; +} + +/** + * hl_cq_init - main initialization function for an cq object + * + * @hdev: pointer to device structure + * @q: pointer to cq structure + * @hw_queue_id: The H/W queue ID this completion queue belongs to + * + * Allocate dma-able memory for the completion queue and initialize fields + * Returns 0 on success + */ +int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id) +{ + void *p; + + p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, HL_CQ_SIZE_IN_BYTES, + &q->bus_address, GFP_KERNEL | __GFP_ZERO); + if (!p) + return -ENOMEM; + + q->hdev = hdev; + q->kernel_address = p; + q->hw_queue_id = hw_queue_id; + q->ci = 0; + q->pi = 0; + + atomic_set(&q->free_slots_cnt, HL_CQ_LENGTH); + + return 0; +} + +/** + * hl_cq_fini - destroy completion queue + * + * @hdev: pointer to device structure + * @q: pointer to cq structure + * + * Free the completion queue memory + */ +void hl_cq_fini(struct hl_device *hdev, struct hl_cq *q) +{ + hdev->asic_funcs->asic_dma_free_coherent(hdev, HL_CQ_SIZE_IN_BYTES, + q->kernel_address, + q->bus_address); +} + +void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q) +{ + q->ci = 0; + q->pi = 0; + + atomic_set(&q->free_slots_cnt, HL_CQ_LENGTH); + + /* + * It's not enough to just reset the PI/CI because the H/W may have + * written valid completion entries before it was halted and therefore + * we need to clean the actual queues so we won't process old entries + * when the device is operational again + */ + + memset(q->kernel_address, 0, HL_CQ_SIZE_IN_BYTES); +} + +/** + * hl_eq_init - main initialization function for an event queue object + * + * @hdev: pointer to device structure + * @q: pointer to eq structure + * + * Allocate dma-able memory for the event queue and initialize fields + * Returns 0 on success + */ +int hl_eq_init(struct hl_device *hdev, struct hl_eq *q) +{ + void *p; + + p = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, + HL_EQ_SIZE_IN_BYTES, + &q->bus_address); + if (!p) + return -ENOMEM; + + q->hdev = hdev; + q->kernel_address = p; + q->ci = 0; + + return 0; +} + +/** + * hl_eq_fini - destroy event queue + * + * @hdev: pointer to device structure + * @q: pointer to eq structure + * + * Free the event queue memory + */ +void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q) +{ + flush_workqueue(hdev->eq_wq); + + hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, + HL_EQ_SIZE_IN_BYTES, + q->kernel_address); +} + +void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q) +{ + q->ci = 0; + + /* + * It's not enough to just reset the PI/CI because the H/W may have + * written valid completion entries before it was halted and therefore + * we need to clean the actual queues so we won't process old entries + * when the device is operational again + */ + + memset(q->kernel_address, 0, HL_EQ_SIZE_IN_BYTES); +} diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c new file mode 100644 index 000000000..bfe223abf --- /dev/null +++ b/drivers/misc/habanalabs/common/memory.c @@ -0,0 +1,1859 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include <uapi/misc/habanalabs.h> +#include "habanalabs.h" +#include "../include/hw_ip/mmu/mmu_general.h" + +#include <linux/uaccess.h> +#include <linux/slab.h> +#include <linux/genalloc.h> + +#define HL_MMU_DEBUG 0 + +/* + * The va ranges in context object contain a list with the available chunks of + * device virtual memory. + * There is one range for host allocations and one for DRAM allocations. + * + * On initialization each range contains one chunk of all of its available + * virtual range which is a half of the total device virtual range. + * + * On each mapping of physical pages, a suitable virtual range chunk (with a + * minimum size) is selected from the list. If the chunk size equals the + * requested size, the chunk is returned. Otherwise, the chunk is split into + * two chunks - one to return as result and a remainder to stay in the list. + * + * On each Unmapping of a virtual address, the relevant virtual chunk is + * returned to the list. The chunk is added to the list and if its edges match + * the edges of the adjacent chunks (means a contiguous chunk can be created), + * the chunks are merged. + * + * On finish, the list is checked to have only one chunk of all the relevant + * virtual range (which is a half of the device total virtual range). + * If not (means not all mappings were unmapped), a warning is printed. + */ + +/* + * alloc_device_memory - allocate device memory + * + * @ctx : current context + * @args : host parameters containing the requested size + * @ret_handle : result handle + * + * This function does the following: + * - Allocate the requested size rounded up to 2MB pages + * - Return unique handle + */ +static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args, + u32 *ret_handle) +{ + struct hl_device *hdev = ctx->hdev; + struct hl_vm *vm = &hdev->vm; + struct hl_vm_phys_pg_pack *phys_pg_pack; + u64 paddr = 0, total_size, num_pgs, i; + u32 num_curr_pgs, page_size, page_shift; + int handle, rc; + bool contiguous; + + num_curr_pgs = 0; + page_size = hdev->asic_prop.dram_page_size; + page_shift = __ffs(page_size); + num_pgs = (args->alloc.mem_size + (page_size - 1)) >> page_shift; + total_size = num_pgs << page_shift; + + if (!total_size) { + dev_err(hdev->dev, "Cannot allocate 0 bytes\n"); + return -EINVAL; + } + + contiguous = args->flags & HL_MEM_CONTIGUOUS; + + if (contiguous) { + paddr = (u64) gen_pool_alloc(vm->dram_pg_pool, total_size); + if (!paddr) { + dev_err(hdev->dev, + "failed to allocate %llu contiguous pages with total size of %llu\n", + num_pgs, total_size); + return -ENOMEM; + } + } + + phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL); + if (!phys_pg_pack) { + rc = -ENOMEM; + goto pages_pack_err; + } + + phys_pg_pack->vm_type = VM_TYPE_PHYS_PACK; + phys_pg_pack->asid = ctx->asid; + phys_pg_pack->npages = num_pgs; + phys_pg_pack->page_size = page_size; + phys_pg_pack->total_size = total_size; + phys_pg_pack->flags = args->flags; + phys_pg_pack->contiguous = contiguous; + + phys_pg_pack->pages = kvmalloc_array(num_pgs, sizeof(u64), GFP_KERNEL); + if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) { + rc = -ENOMEM; + goto pages_arr_err; + } + + if (phys_pg_pack->contiguous) { + for (i = 0 ; i < num_pgs ; i++) + phys_pg_pack->pages[i] = paddr + i * page_size; + } else { + for (i = 0 ; i < num_pgs ; i++) { + phys_pg_pack->pages[i] = (u64) gen_pool_alloc( + vm->dram_pg_pool, + page_size); + if (!phys_pg_pack->pages[i]) { + dev_err(hdev->dev, + "Failed to allocate device memory (out of memory)\n"); + rc = -ENOMEM; + goto page_err; + } + + num_curr_pgs++; + } + } + + spin_lock(&vm->idr_lock); + handle = idr_alloc(&vm->phys_pg_pack_handles, phys_pg_pack, 1, 0, + GFP_ATOMIC); + spin_unlock(&vm->idr_lock); + + if (handle < 0) { + dev_err(hdev->dev, "Failed to get handle for page\n"); + rc = -EFAULT; + goto idr_err; + } + + for (i = 0 ; i < num_pgs ; i++) + kref_get(&vm->dram_pg_pool_refcount); + + phys_pg_pack->handle = handle; + + atomic64_add(phys_pg_pack->total_size, &ctx->dram_phys_mem); + atomic64_add(phys_pg_pack->total_size, &hdev->dram_used_mem); + + *ret_handle = handle; + + return 0; + +idr_err: +page_err: + if (!phys_pg_pack->contiguous) + for (i = 0 ; i < num_curr_pgs ; i++) + gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i], + page_size); + + kvfree(phys_pg_pack->pages); +pages_arr_err: + kfree(phys_pg_pack); +pages_pack_err: + if (contiguous) + gen_pool_free(vm->dram_pg_pool, paddr, total_size); + + return rc; +} + +/* + * dma_map_host_va - DMA mapping of the given host virtual address. + * @hdev: habanalabs device structure + * @addr: the host virtual address of the memory area + * @size: the size of the memory area + * @p_userptr: pointer to result userptr structure + * + * This function does the following: + * - Allocate userptr structure + * - Pin the given host memory using the userptr structure + * - Perform DMA mapping to have the DMA addresses of the pages + */ +static int dma_map_host_va(struct hl_device *hdev, u64 addr, u64 size, + struct hl_userptr **p_userptr) +{ + struct hl_userptr *userptr; + int rc; + + userptr = kzalloc(sizeof(*userptr), GFP_KERNEL); + if (!userptr) { + rc = -ENOMEM; + goto userptr_err; + } + + rc = hl_pin_host_memory(hdev, addr, size, userptr); + if (rc) { + dev_err(hdev->dev, "Failed to pin host memory\n"); + goto pin_err; + } + + rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl, + userptr->sgt->nents, DMA_BIDIRECTIONAL); + if (rc) { + dev_err(hdev->dev, "failed to map sgt with DMA region\n"); + goto dma_map_err; + } + + userptr->dma_mapped = true; + userptr->dir = DMA_BIDIRECTIONAL; + userptr->vm_type = VM_TYPE_USERPTR; + + *p_userptr = userptr; + + return 0; + +dma_map_err: + hl_unpin_host_memory(hdev, userptr); +pin_err: + kfree(userptr); +userptr_err: + + return rc; +} + +/* + * dma_unmap_host_va - DMA unmapping of the given host virtual address. + * @hdev: habanalabs device structure + * @userptr: userptr to free + * + * This function does the following: + * - Unpins the physical pages + * - Frees the userptr structure + */ +static void dma_unmap_host_va(struct hl_device *hdev, + struct hl_userptr *userptr) +{ + hl_unpin_host_memory(hdev, userptr); + kfree(userptr); +} + +/* + * dram_pg_pool_do_release - free DRAM pages pool + * + * @ref : pointer to reference object + * + * This function does the following: + * - Frees the idr structure of physical pages handles + * - Frees the generic pool of DRAM physical pages + */ +static void dram_pg_pool_do_release(struct kref *ref) +{ + struct hl_vm *vm = container_of(ref, struct hl_vm, + dram_pg_pool_refcount); + + /* + * free the idr here as only here we know for sure that there are no + * allocated physical pages and hence there are no handles in use + */ + idr_destroy(&vm->phys_pg_pack_handles); + gen_pool_destroy(vm->dram_pg_pool); +} + +/* + * free_phys_pg_pack - free physical page pack + * @hdev: habanalabs device structure + * @phys_pg_pack: physical page pack to free + * + * This function does the following: + * - For DRAM memory only, iterate over the pack and free each physical block + * structure by returning it to the general pool + * - Free the hl_vm_phys_pg_pack structure + */ +static void free_phys_pg_pack(struct hl_device *hdev, + struct hl_vm_phys_pg_pack *phys_pg_pack) +{ + struct hl_vm *vm = &hdev->vm; + u64 i; + + if (!phys_pg_pack->created_from_userptr) { + if (phys_pg_pack->contiguous) { + gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[0], + phys_pg_pack->total_size); + + for (i = 0; i < phys_pg_pack->npages ; i++) + kref_put(&vm->dram_pg_pool_refcount, + dram_pg_pool_do_release); + } else { + for (i = 0 ; i < phys_pg_pack->npages ; i++) { + gen_pool_free(vm->dram_pg_pool, + phys_pg_pack->pages[i], + phys_pg_pack->page_size); + kref_put(&vm->dram_pg_pool_refcount, + dram_pg_pool_do_release); + } + } + } + + kvfree(phys_pg_pack->pages); + kfree(phys_pg_pack); +} + +/* + * free_device_memory - free device memory + * + * @ctx : current context + * @handle : handle of the memory chunk to free + * + * This function does the following: + * - Free the device memory related to the given handle + */ +static int free_device_memory(struct hl_ctx *ctx, u32 handle) +{ + struct hl_device *hdev = ctx->hdev; + struct hl_vm *vm = &hdev->vm; + struct hl_vm_phys_pg_pack *phys_pg_pack; + + spin_lock(&vm->idr_lock); + phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle); + if (phys_pg_pack) { + if (atomic_read(&phys_pg_pack->mapping_cnt) > 0) { + dev_err(hdev->dev, "handle %u is mapped, cannot free\n", + handle); + spin_unlock(&vm->idr_lock); + return -EINVAL; + } + + /* + * must remove from idr before the freeing of the physical + * pages as the refcount of the pool is also the trigger of the + * idr destroy + */ + idr_remove(&vm->phys_pg_pack_handles, handle); + spin_unlock(&vm->idr_lock); + + atomic64_sub(phys_pg_pack->total_size, &ctx->dram_phys_mem); + atomic64_sub(phys_pg_pack->total_size, &hdev->dram_used_mem); + + free_phys_pg_pack(hdev, phys_pg_pack); + } else { + spin_unlock(&vm->idr_lock); + dev_err(hdev->dev, + "free device memory failed, no match for handle %u\n", + handle); + return -EINVAL; + } + + return 0; +} + +/* + * clear_va_list_locked - free virtual addresses list + * + * @hdev : habanalabs device structure + * @va_list : list of virtual addresses to free + * + * This function does the following: + * - Iterate over the list and free each virtual addresses block + * + * This function should be called only when va_list lock is taken + */ +static void clear_va_list_locked(struct hl_device *hdev, + struct list_head *va_list) +{ + struct hl_vm_va_block *va_block, *tmp; + + list_for_each_entry_safe(va_block, tmp, va_list, node) { + list_del(&va_block->node); + kfree(va_block); + } +} + +/* + * print_va_list_locked - print virtual addresses list + * + * @hdev : habanalabs device structure + * @va_list : list of virtual addresses to print + * + * This function does the following: + * - Iterate over the list and print each virtual addresses block + * + * This function should be called only when va_list lock is taken + */ +static void print_va_list_locked(struct hl_device *hdev, + struct list_head *va_list) +{ +#if HL_MMU_DEBUG + struct hl_vm_va_block *va_block; + + dev_dbg(hdev->dev, "print va list:\n"); + + list_for_each_entry(va_block, va_list, node) + dev_dbg(hdev->dev, + "va block, start: 0x%llx, end: 0x%llx, size: %llu\n", + va_block->start, va_block->end, va_block->size); +#endif +} + +/* + * merge_va_blocks_locked - merge a virtual block if possible + * + * @hdev : pointer to the habanalabs device structure + * @va_list : pointer to the virtual addresses block list + * @va_block : virtual block to merge with adjacent blocks + * + * This function does the following: + * - Merge the given blocks with the adjacent blocks if their virtual ranges + * create a contiguous virtual range + * + * This Function should be called only when va_list lock is taken + */ +static void merge_va_blocks_locked(struct hl_device *hdev, + struct list_head *va_list, struct hl_vm_va_block *va_block) +{ + struct hl_vm_va_block *prev, *next; + + prev = list_prev_entry(va_block, node); + if (&prev->node != va_list && prev->end + 1 == va_block->start) { + prev->end = va_block->end; + prev->size = prev->end - prev->start; + list_del(&va_block->node); + kfree(va_block); + va_block = prev; + } + + next = list_next_entry(va_block, node); + if (&next->node != va_list && va_block->end + 1 == next->start) { + next->start = va_block->start; + next->size = next->end - next->start; + list_del(&va_block->node); + kfree(va_block); + } +} + +/* + * add_va_block_locked - add a virtual block to the virtual addresses list + * + * @hdev : pointer to the habanalabs device structure + * @va_list : pointer to the virtual addresses block list + * @start : start virtual address + * @end : end virtual address + * + * This function does the following: + * - Add the given block to the virtual blocks list and merge with other + * blocks if a contiguous virtual block can be created + * + * This Function should be called only when va_list lock is taken + */ +static int add_va_block_locked(struct hl_device *hdev, + struct list_head *va_list, u64 start, u64 end) +{ + struct hl_vm_va_block *va_block, *res = NULL; + u64 size = end - start; + + print_va_list_locked(hdev, va_list); + + list_for_each_entry(va_block, va_list, node) { + /* TODO: remove upon matureness */ + if (hl_mem_area_crosses_range(start, size, va_block->start, + va_block->end)) { + dev_err(hdev->dev, + "block crossing ranges at start 0x%llx, end 0x%llx\n", + va_block->start, va_block->end); + return -EINVAL; + } + + if (va_block->end < start) + res = va_block; + } + + va_block = kmalloc(sizeof(*va_block), GFP_KERNEL); + if (!va_block) + return -ENOMEM; + + va_block->start = start; + va_block->end = end; + va_block->size = size; + + if (!res) + list_add(&va_block->node, va_list); + else + list_add(&va_block->node, &res->node); + + merge_va_blocks_locked(hdev, va_list, va_block); + + print_va_list_locked(hdev, va_list); + + return 0; +} + +/* + * add_va_block - wrapper for add_va_block_locked + * + * @hdev : pointer to the habanalabs device structure + * @va_list : pointer to the virtual addresses block list + * @start : start virtual address + * @end : end virtual address + * + * This function does the following: + * - Takes the list lock and calls add_va_block_locked + */ +static inline int add_va_block(struct hl_device *hdev, + struct hl_va_range *va_range, u64 start, u64 end) +{ + int rc; + + mutex_lock(&va_range->lock); + rc = add_va_block_locked(hdev, &va_range->list, start, end); + mutex_unlock(&va_range->lock); + + return rc; +} + +/* + * get_va_block() - get a virtual block for the given size and alignment. + * @hdev: pointer to the habanalabs device structure. + * @va_range: pointer to the virtual addresses range. + * @size: requested block size. + * @hint_addr: hint for requested address by the user. + * @va_block_align: required alignment of the virtual block start address. + * + * This function does the following: + * - Iterate on the virtual block list to find a suitable virtual block for the + * given size and alignment. + * - Reserve the requested block and update the list. + * - Return the start address of the virtual block. + */ +static u64 get_va_block(struct hl_device *hdev, struct hl_va_range *va_range, + u64 size, u64 hint_addr, u32 va_block_align) +{ + struct hl_vm_va_block *va_block, *new_va_block = NULL; + u64 valid_start, valid_size, prev_start, prev_end, align_mask, + res_valid_start = 0, res_valid_size = 0; + bool add_prev = false; + + align_mask = ~((u64)va_block_align - 1); + + /* check if hint_addr is aligned */ + if (hint_addr & (va_block_align - 1)) + hint_addr = 0; + + mutex_lock(&va_range->lock); + + print_va_list_locked(hdev, &va_range->list); + + list_for_each_entry(va_block, &va_range->list, node) { + /* calc the first possible aligned addr */ + valid_start = va_block->start; + + if (valid_start & (va_block_align - 1)) { + valid_start &= align_mask; + valid_start += va_block_align; + if (valid_start > va_block->end) + continue; + } + + valid_size = va_block->end - valid_start; + + if (valid_size >= size && + (!new_va_block || valid_size < res_valid_size)) { + new_va_block = va_block; + res_valid_start = valid_start; + res_valid_size = valid_size; + } + + if (hint_addr && hint_addr >= valid_start && + ((hint_addr + size) <= va_block->end)) { + new_va_block = va_block; + res_valid_start = hint_addr; + res_valid_size = valid_size; + break; + } + } + + if (!new_va_block) { + dev_err(hdev->dev, "no available va block for size %llu\n", + size); + goto out; + } + + if (res_valid_start > new_va_block->start) { + prev_start = new_va_block->start; + prev_end = res_valid_start - 1; + + new_va_block->start = res_valid_start; + new_va_block->size = res_valid_size; + + add_prev = true; + } + + if (new_va_block->size > size) { + new_va_block->start += size; + new_va_block->size = new_va_block->end - new_va_block->start; + } else { + list_del(&new_va_block->node); + kfree(new_va_block); + } + + if (add_prev) + add_va_block_locked(hdev, &va_range->list, prev_start, + prev_end); + + print_va_list_locked(hdev, &va_range->list); +out: + mutex_unlock(&va_range->lock); + + return res_valid_start; +} + +/* + * get_sg_info - get number of pages and the DMA address from SG list + * + * @sg : the SG list + * @dma_addr : pointer to DMA address to return + * + * Calculate the number of consecutive pages described by the SG list. Take the + * offset of the address in the first page, add to it the length and round it up + * to the number of needed pages. + */ +static u32 get_sg_info(struct scatterlist *sg, dma_addr_t *dma_addr) +{ + *dma_addr = sg_dma_address(sg); + + return ((((*dma_addr) & (PAGE_SIZE - 1)) + sg_dma_len(sg)) + + (PAGE_SIZE - 1)) >> PAGE_SHIFT; +} + +/* + * init_phys_pg_pack_from_userptr - initialize physical page pack from host + * memory + * @ctx: current context + * @userptr: userptr to initialize from + * @pphys_pg_pack: result pointer + * + * This function does the following: + * - Pin the physical pages related to the given virtual block + * - Create a physical page pack from the physical pages related to the given + * virtual block + */ +static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx, + struct hl_userptr *userptr, + struct hl_vm_phys_pg_pack **pphys_pg_pack) +{ + struct hl_vm_phys_pg_pack *phys_pg_pack; + struct scatterlist *sg; + dma_addr_t dma_addr; + u64 page_mask, total_npages; + u32 npages, page_size = PAGE_SIZE, + huge_page_size = ctx->hdev->asic_prop.pmmu_huge.page_size; + bool first = true, is_huge_page_opt = true; + int rc, i, j; + u32 pgs_in_huge_page = huge_page_size >> __ffs(page_size); + + phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL); + if (!phys_pg_pack) + return -ENOMEM; + + phys_pg_pack->vm_type = userptr->vm_type; + phys_pg_pack->created_from_userptr = true; + phys_pg_pack->asid = ctx->asid; + atomic_set(&phys_pg_pack->mapping_cnt, 1); + + /* Only if all dma_addrs are aligned to 2MB and their + * sizes is at least 2MB, we can use huge page mapping. + * We limit the 2MB optimization to this condition, + * since later on we acquire the related VA range as one + * consecutive block. + */ + total_npages = 0; + for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) { + npages = get_sg_info(sg, &dma_addr); + + total_npages += npages; + + if ((npages % pgs_in_huge_page) || + (dma_addr & (huge_page_size - 1))) + is_huge_page_opt = false; + } + + if (is_huge_page_opt) { + page_size = huge_page_size; + do_div(total_npages, pgs_in_huge_page); + } + + page_mask = ~(((u64) page_size) - 1); + + phys_pg_pack->pages = kvmalloc_array(total_npages, sizeof(u64), + GFP_KERNEL); + if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) { + rc = -ENOMEM; + goto page_pack_arr_mem_err; + } + + phys_pg_pack->npages = total_npages; + phys_pg_pack->page_size = page_size; + phys_pg_pack->total_size = total_npages * page_size; + + j = 0; + for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) { + npages = get_sg_info(sg, &dma_addr); + + /* align down to physical page size and save the offset */ + if (first) { + first = false; + phys_pg_pack->offset = dma_addr & (page_size - 1); + dma_addr &= page_mask; + } + + while (npages) { + phys_pg_pack->pages[j++] = dma_addr; + dma_addr += page_size; + + if (is_huge_page_opt) + npages -= pgs_in_huge_page; + else + npages--; + } + } + + *pphys_pg_pack = phys_pg_pack; + + return 0; + +page_pack_arr_mem_err: + kfree(phys_pg_pack); + + return rc; +} + +/* + * map_phys_pg_pack - maps the physical page pack. + * @ctx: current context + * @vaddr: start address of the virtual area to map from + * @phys_pg_pack: the pack of physical pages to map to + * + * This function does the following: + * - Maps each chunk of virtual memory to matching physical chunk + * - Stores number of successful mappings in the given argument + * - Returns 0 on success, error code otherwise + */ +static int map_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr, + struct hl_vm_phys_pg_pack *phys_pg_pack) +{ + struct hl_device *hdev = ctx->hdev; + u64 next_vaddr = vaddr, paddr, mapped_pg_cnt = 0, i; + u32 page_size = phys_pg_pack->page_size; + int rc = 0; + + for (i = 0 ; i < phys_pg_pack->npages ; i++) { + paddr = phys_pg_pack->pages[i]; + + rc = hl_mmu_map(ctx, next_vaddr, paddr, page_size, + (i + 1) == phys_pg_pack->npages); + if (rc) { + dev_err(hdev->dev, + "map failed for handle %u, npages: %llu, mapped: %llu", + phys_pg_pack->handle, phys_pg_pack->npages, + mapped_pg_cnt); + goto err; + } + + mapped_pg_cnt++; + next_vaddr += page_size; + } + + return 0; + +err: + next_vaddr = vaddr; + for (i = 0 ; i < mapped_pg_cnt ; i++) { + if (hl_mmu_unmap(ctx, next_vaddr, page_size, + (i + 1) == mapped_pg_cnt)) + dev_warn_ratelimited(hdev->dev, + "failed to unmap handle %u, va: 0x%llx, pa: 0x%llx, page size: %u\n", + phys_pg_pack->handle, next_vaddr, + phys_pg_pack->pages[i], page_size); + + next_vaddr += page_size; + } + + return rc; +} + +/* + * unmap_phys_pg_pack - unmaps the physical page pack + * @ctx: current context + * @vaddr: start address of the virtual area to unmap + * @phys_pg_pack: the pack of physical pages to unmap + */ +static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr, + struct hl_vm_phys_pg_pack *phys_pg_pack) +{ + struct hl_device *hdev = ctx->hdev; + u64 next_vaddr, i; + u32 page_size; + + page_size = phys_pg_pack->page_size; + next_vaddr = vaddr; + + for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) { + if (hl_mmu_unmap(ctx, next_vaddr, page_size, + (i + 1) == phys_pg_pack->npages)) + dev_warn_ratelimited(hdev->dev, + "unmap failed for vaddr: 0x%llx\n", next_vaddr); + + /* + * unmapping on Palladium can be really long, so avoid a CPU + * soft lockup bug by sleeping a little between unmapping pages + */ + if (hdev->pldm) + usleep_range(500, 1000); + } +} + +static int get_paddr_from_handle(struct hl_ctx *ctx, struct hl_mem_in *args, + u64 *paddr) +{ + struct hl_device *hdev = ctx->hdev; + struct hl_vm *vm = &hdev->vm; + struct hl_vm_phys_pg_pack *phys_pg_pack; + u32 handle; + + handle = lower_32_bits(args->map_device.handle); + spin_lock(&vm->idr_lock); + phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle); + if (!phys_pg_pack) { + spin_unlock(&vm->idr_lock); + dev_err(hdev->dev, "no match for handle %u\n", handle); + return -EINVAL; + } + + *paddr = phys_pg_pack->pages[0]; + + spin_unlock(&vm->idr_lock); + + return 0; +} + +/* + * map_device_va - map the given memory + * + * @ctx : current context + * @args : host parameters with handle/host virtual address + * @device_addr : pointer to result device virtual address + * + * This function does the following: + * - If given a physical device memory handle, map to a device virtual block + * and return the start address of this block + * - If given a host virtual address and size, find the related physical pages, + * map a device virtual block to this pages and return the start address of + * this block + */ +static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, + u64 *device_addr) +{ + struct hl_device *hdev = ctx->hdev; + struct hl_vm *vm = &hdev->vm; + struct hl_vm_phys_pg_pack *phys_pg_pack; + struct hl_userptr *userptr = NULL; + struct hl_vm_hash_node *hnode; + struct hl_va_range *va_range; + enum vm_type_t *vm_type; + u64 ret_vaddr, hint_addr; + u32 handle = 0, va_block_align; + int rc; + bool is_userptr = args->flags & HL_MEM_USERPTR; + + /* Assume failure */ + *device_addr = 0; + + if (is_userptr) { + u64 addr = args->map_host.host_virt_addr, + size = args->map_host.mem_size; + u32 page_size = hdev->asic_prop.pmmu.page_size, + huge_page_size = hdev->asic_prop.pmmu_huge.page_size; + + rc = dma_map_host_va(hdev, addr, size, &userptr); + if (rc) { + dev_err(hdev->dev, "failed to get userptr from va\n"); + return rc; + } + + rc = init_phys_pg_pack_from_userptr(ctx, userptr, + &phys_pg_pack); + if (rc) { + dev_err(hdev->dev, + "unable to init page pack for vaddr 0x%llx\n", + addr); + goto init_page_pack_err; + } + + vm_type = (enum vm_type_t *) userptr; + hint_addr = args->map_host.hint_addr; + handle = phys_pg_pack->handle; + + /* get required alignment */ + if (phys_pg_pack->page_size == page_size) { + va_range = ctx->host_va_range; + + /* + * huge page alignment may be needed in case of regular + * page mapping, depending on the host VA alignment + */ + if (addr & (huge_page_size - 1)) + va_block_align = page_size; + else + va_block_align = huge_page_size; + } else { + /* + * huge page alignment is needed in case of huge page + * mapping + */ + va_range = ctx->host_huge_va_range; + va_block_align = huge_page_size; + } + } else { + handle = lower_32_bits(args->map_device.handle); + + spin_lock(&vm->idr_lock); + phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle); + if (!phys_pg_pack) { + spin_unlock(&vm->idr_lock); + dev_err(hdev->dev, + "no match for handle %u\n", handle); + return -EINVAL; + } + + /* increment now to avoid freeing device memory while mapping */ + atomic_inc(&phys_pg_pack->mapping_cnt); + + spin_unlock(&vm->idr_lock); + + vm_type = (enum vm_type_t *) phys_pg_pack; + + hint_addr = args->map_device.hint_addr; + + /* DRAM VA alignment is the same as the DRAM page size */ + va_range = ctx->dram_va_range; + va_block_align = hdev->asic_prop.dmmu.page_size; + } + + /* + * relevant for mapping device physical memory only, as host memory is + * implicitly shared + */ + if (!is_userptr && !(phys_pg_pack->flags & HL_MEM_SHARED) && + phys_pg_pack->asid != ctx->asid) { + dev_err(hdev->dev, + "Failed to map memory, handle %u is not shared\n", + handle); + rc = -EPERM; + goto shared_err; + } + + hnode = kzalloc(sizeof(*hnode), GFP_KERNEL); + if (!hnode) { + rc = -ENOMEM; + goto hnode_err; + } + + ret_vaddr = get_va_block(hdev, va_range, phys_pg_pack->total_size, + hint_addr, va_block_align); + if (!ret_vaddr) { + dev_err(hdev->dev, "no available va block for handle %u\n", + handle); + rc = -ENOMEM; + goto va_block_err; + } + + mutex_lock(&ctx->mmu_lock); + + rc = map_phys_pg_pack(ctx, ret_vaddr, phys_pg_pack); + if (rc) { + mutex_unlock(&ctx->mmu_lock); + dev_err(hdev->dev, "mapping page pack failed for handle %u\n", + handle); + goto map_err; + } + + rc = hdev->asic_funcs->mmu_invalidate_cache(hdev, false, *vm_type); + + mutex_unlock(&ctx->mmu_lock); + + if (rc) { + dev_err(hdev->dev, + "mapping handle %u failed due to MMU cache invalidation\n", + handle); + goto map_err; + } + + ret_vaddr += phys_pg_pack->offset; + + hnode->ptr = vm_type; + hnode->vaddr = ret_vaddr; + + mutex_lock(&ctx->mem_hash_lock); + hash_add(ctx->mem_hash, &hnode->node, ret_vaddr); + mutex_unlock(&ctx->mem_hash_lock); + + *device_addr = ret_vaddr; + + if (is_userptr) + free_phys_pg_pack(hdev, phys_pg_pack); + + return 0; + +map_err: + if (add_va_block(hdev, va_range, ret_vaddr, + ret_vaddr + phys_pg_pack->total_size - 1)) + dev_warn(hdev->dev, + "release va block failed for handle 0x%x, vaddr: 0x%llx\n", + handle, ret_vaddr); + +va_block_err: + kfree(hnode); +hnode_err: +shared_err: + atomic_dec(&phys_pg_pack->mapping_cnt); + if (is_userptr) + free_phys_pg_pack(hdev, phys_pg_pack); +init_page_pack_err: + if (is_userptr) + dma_unmap_host_va(hdev, userptr); + + return rc; +} + +/* + * unmap_device_va - unmap the given device virtual address + * + * @ctx : current context + * @vaddr : device virtual address to unmap + * @ctx_free : true if in context free flow, false otherwise. + * + * This function does the following: + * - Unmap the physical pages related to the given virtual address + * - return the device virtual block to the virtual block list + */ +static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr, bool ctx_free) +{ + struct hl_device *hdev = ctx->hdev; + struct hl_vm_phys_pg_pack *phys_pg_pack = NULL; + struct hl_vm_hash_node *hnode = NULL; + struct hl_userptr *userptr = NULL; + struct hl_va_range *va_range; + enum vm_type_t *vm_type; + bool is_userptr; + int rc = 0; + + /* protect from double entrance */ + mutex_lock(&ctx->mem_hash_lock); + hash_for_each_possible(ctx->mem_hash, hnode, node, (unsigned long)vaddr) + if (vaddr == hnode->vaddr) + break; + + if (!hnode) { + mutex_unlock(&ctx->mem_hash_lock); + dev_err(hdev->dev, + "unmap failed, no mem hnode for vaddr 0x%llx\n", + vaddr); + return -EINVAL; + } + + hash_del(&hnode->node); + mutex_unlock(&ctx->mem_hash_lock); + + vm_type = hnode->ptr; + + if (*vm_type == VM_TYPE_USERPTR) { + is_userptr = true; + userptr = hnode->ptr; + rc = init_phys_pg_pack_from_userptr(ctx, userptr, + &phys_pg_pack); + if (rc) { + dev_err(hdev->dev, + "unable to init page pack for vaddr 0x%llx\n", + vaddr); + goto vm_type_err; + } + + if (phys_pg_pack->page_size == + hdev->asic_prop.pmmu.page_size) + va_range = ctx->host_va_range; + else + va_range = ctx->host_huge_va_range; + } else if (*vm_type == VM_TYPE_PHYS_PACK) { + is_userptr = false; + va_range = ctx->dram_va_range; + phys_pg_pack = hnode->ptr; + } else { + dev_warn(hdev->dev, + "unmap failed, unknown vm desc for vaddr 0x%llx\n", + vaddr); + rc = -EFAULT; + goto vm_type_err; + } + + if (atomic_read(&phys_pg_pack->mapping_cnt) == 0) { + dev_err(hdev->dev, "vaddr 0x%llx is not mapped\n", vaddr); + rc = -EINVAL; + goto mapping_cnt_err; + } + + vaddr &= ~(((u64) phys_pg_pack->page_size) - 1); + + mutex_lock(&ctx->mmu_lock); + + unmap_phys_pg_pack(ctx, vaddr, phys_pg_pack); + + /* + * During context free this function is called in a loop to clean all + * the context mappings. Hence the cache invalidation can be called once + * at the loop end rather than for each iteration + */ + if (!ctx_free) + rc = hdev->asic_funcs->mmu_invalidate_cache(hdev, true, + *vm_type); + + mutex_unlock(&ctx->mmu_lock); + + /* + * If the context is closing we don't need to check for the MMU cache + * invalidation return code and update the VA free list as in this flow + * we invalidate the MMU cache outside of this unmap function and the VA + * free list will be freed anyway. + */ + if (!ctx_free) { + int tmp_rc; + + if (rc) + dev_err(hdev->dev, + "unmapping vaddr 0x%llx failed due to MMU cache invalidation\n", + vaddr); + + tmp_rc = add_va_block(hdev, va_range, vaddr, + vaddr + phys_pg_pack->total_size - 1); + if (tmp_rc) { + dev_warn(hdev->dev, + "add va block failed for vaddr: 0x%llx\n", + vaddr); + if (!rc) + rc = tmp_rc; + } + } + + atomic_dec(&phys_pg_pack->mapping_cnt); + kfree(hnode); + + if (is_userptr) { + free_phys_pg_pack(hdev, phys_pg_pack); + dma_unmap_host_va(hdev, userptr); + } + + return rc; + +mapping_cnt_err: + if (is_userptr) + free_phys_pg_pack(hdev, phys_pg_pack); +vm_type_err: + mutex_lock(&ctx->mem_hash_lock); + hash_add(ctx->mem_hash, &hnode->node, vaddr); + mutex_unlock(&ctx->mem_hash_lock); + + return rc; +} + +static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args) +{ + struct hl_device *hdev = hpriv->hdev; + struct hl_ctx *ctx = hpriv->ctx; + u64 device_addr = 0; + u32 handle = 0; + int rc; + + switch (args->in.op) { + case HL_MEM_OP_ALLOC: + if (args->in.alloc.mem_size == 0) { + dev_err(hdev->dev, + "alloc size must be larger than 0\n"); + rc = -EINVAL; + goto out; + } + + /* Force contiguous as there are no real MMU + * translations to overcome physical memory gaps + */ + args->in.flags |= HL_MEM_CONTIGUOUS; + rc = alloc_device_memory(ctx, &args->in, &handle); + + memset(args, 0, sizeof(*args)); + args->out.handle = (__u64) handle; + break; + + case HL_MEM_OP_FREE: + rc = free_device_memory(ctx, args->in.free.handle); + break; + + case HL_MEM_OP_MAP: + if (args->in.flags & HL_MEM_USERPTR) { + device_addr = args->in.map_host.host_virt_addr; + rc = 0; + } else { + rc = get_paddr_from_handle(ctx, &args->in, + &device_addr); + } + + memset(args, 0, sizeof(*args)); + args->out.device_virt_addr = device_addr; + break; + + case HL_MEM_OP_UNMAP: + rc = 0; + break; + + default: + dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n"); + rc = -ENOTTY; + break; + } + +out: + return rc; +} + +int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data) +{ + union hl_mem_args *args = data; + struct hl_device *hdev = hpriv->hdev; + struct hl_ctx *ctx = hpriv->ctx; + u64 device_addr = 0; + u32 handle = 0; + int rc; + + if (hl_device_disabled_or_in_reset(hdev)) { + dev_warn_ratelimited(hdev->dev, + "Device is %s. Can't execute MEMORY IOCTL\n", + atomic_read(&hdev->in_reset) ? "in_reset" : "disabled"); + return -EBUSY; + } + + if (!hdev->mmu_enable) + return mem_ioctl_no_mmu(hpriv, args); + + switch (args->in.op) { + case HL_MEM_OP_ALLOC: + if (!hdev->dram_supports_virtual_memory) { + dev_err(hdev->dev, "DRAM alloc is not supported\n"); + rc = -EINVAL; + goto out; + } + + if (args->in.alloc.mem_size == 0) { + dev_err(hdev->dev, + "alloc size must be larger than 0\n"); + rc = -EINVAL; + goto out; + } + rc = alloc_device_memory(ctx, &args->in, &handle); + + memset(args, 0, sizeof(*args)); + args->out.handle = (__u64) handle; + break; + + case HL_MEM_OP_FREE: + rc = free_device_memory(ctx, args->in.free.handle); + break; + + case HL_MEM_OP_MAP: + rc = map_device_va(ctx, &args->in, &device_addr); + + memset(args, 0, sizeof(*args)); + args->out.device_virt_addr = device_addr; + break; + + case HL_MEM_OP_UNMAP: + rc = unmap_device_va(ctx, args->in.unmap.device_virt_addr, + false); + break; + + default: + dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n"); + rc = -ENOTTY; + break; + } + +out: + return rc; +} + +static int get_user_memory(struct hl_device *hdev, u64 addr, u64 size, + u32 npages, u64 start, u32 offset, + struct hl_userptr *userptr) +{ + int rc; + + if (!access_ok((void __user *) (uintptr_t) addr, size)) { + dev_err(hdev->dev, "user pointer is invalid - 0x%llx\n", addr); + return -EFAULT; + } + + userptr->vec = frame_vector_create(npages); + if (!userptr->vec) { + dev_err(hdev->dev, "Failed to create frame vector\n"); + return -ENOMEM; + } + + rc = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE, + userptr->vec); + + if (rc != npages) { + dev_err(hdev->dev, + "Failed to map host memory, user ptr probably wrong\n"); + if (rc < 0) + goto destroy_framevec; + rc = -EFAULT; + goto put_framevec; + } + + if (frame_vector_to_pages(userptr->vec) < 0) { + dev_err(hdev->dev, + "Failed to translate frame vector to pages\n"); + rc = -EFAULT; + goto put_framevec; + } + + rc = sg_alloc_table_from_pages(userptr->sgt, + frame_vector_pages(userptr->vec), + npages, offset, size, GFP_ATOMIC); + if (rc < 0) { + dev_err(hdev->dev, "failed to create SG table from pages\n"); + goto put_framevec; + } + + return 0; + +put_framevec: + put_vaddr_frames(userptr->vec); +destroy_framevec: + frame_vector_destroy(userptr->vec); + return rc; +} + +/* + * hl_pin_host_memory - pins a chunk of host memory. + * @hdev: pointer to the habanalabs device structure + * @addr: the host virtual address of the memory area + * @size: the size of the memory area + * @userptr: pointer to hl_userptr structure + * + * This function does the following: + * - Pins the physical pages + * - Create an SG list from those pages + */ +int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size, + struct hl_userptr *userptr) +{ + u64 start, end; + u32 npages, offset; + int rc; + + if (!size) { + dev_err(hdev->dev, "size to pin is invalid - %llu\n", size); + return -EINVAL; + } + + /* + * If the combination of the address and size requested for this memory + * region causes an integer overflow, return error. + */ + if (((addr + size) < addr) || + PAGE_ALIGN(addr + size) < (addr + size)) { + dev_err(hdev->dev, + "user pointer 0x%llx + %llu causes integer overflow\n", + addr, size); + return -EINVAL; + } + + /* + * This function can be called also from data path, hence use atomic + * always as it is not a big allocation. + */ + userptr->sgt = kzalloc(sizeof(*userptr->sgt), GFP_ATOMIC); + if (!userptr->sgt) + return -ENOMEM; + + start = addr & PAGE_MASK; + offset = addr & ~PAGE_MASK; + end = PAGE_ALIGN(addr + size); + npages = (end - start) >> PAGE_SHIFT; + + userptr->size = size; + userptr->addr = addr; + userptr->dma_mapped = false; + INIT_LIST_HEAD(&userptr->job_node); + + rc = get_user_memory(hdev, addr, size, npages, start, offset, + userptr); + if (rc) { + dev_err(hdev->dev, + "failed to get user memory for address 0x%llx\n", + addr); + goto free_sgt; + } + + hl_debugfs_add_userptr(hdev, userptr); + + return 0; + +free_sgt: + kfree(userptr->sgt); + return rc; +} + +/* + * hl_unpin_host_memory - unpins a chunk of host memory. + * @hdev: pointer to the habanalabs device structure + * @userptr: pointer to hl_userptr structure + * + * This function does the following: + * - Unpins the physical pages related to the host memory + * - Free the SG list + */ +void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr) +{ + struct page **pages; + + hl_debugfs_remove_userptr(hdev, userptr); + + if (userptr->dma_mapped) + hdev->asic_funcs->hl_dma_unmap_sg(hdev, userptr->sgt->sgl, + userptr->sgt->nents, + userptr->dir); + + pages = frame_vector_pages(userptr->vec); + if (!IS_ERR(pages)) { + int i; + + for (i = 0; i < frame_vector_count(userptr->vec); i++) + set_page_dirty_lock(pages[i]); + } + put_vaddr_frames(userptr->vec); + frame_vector_destroy(userptr->vec); + + list_del(&userptr->job_node); + + sg_free_table(userptr->sgt); + kfree(userptr->sgt); +} + +/* + * hl_userptr_delete_list - clear userptr list + * + * @hdev : pointer to the habanalabs device structure + * @userptr_list : pointer to the list to clear + * + * This function does the following: + * - Iterates over the list and unpins the host memory and frees the userptr + * structure. + */ +void hl_userptr_delete_list(struct hl_device *hdev, + struct list_head *userptr_list) +{ + struct hl_userptr *userptr, *tmp; + + list_for_each_entry_safe(userptr, tmp, userptr_list, job_node) { + hl_unpin_host_memory(hdev, userptr); + kfree(userptr); + } + + INIT_LIST_HEAD(userptr_list); +} + +/* + * hl_userptr_is_pinned - returns whether the given userptr is pinned + * + * @hdev : pointer to the habanalabs device structure + * @userptr_list : pointer to the list to clear + * @userptr : pointer to userptr to check + * + * This function does the following: + * - Iterates over the list and checks if the given userptr is in it, means is + * pinned. If so, returns true, otherwise returns false. + */ +bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr, + u32 size, struct list_head *userptr_list, + struct hl_userptr **userptr) +{ + list_for_each_entry((*userptr), userptr_list, job_node) { + if ((addr == (*userptr)->addr) && (size == (*userptr)->size)) + return true; + } + + return false; +} + +/* + * va_range_init - initialize virtual addresses range + * @hdev: pointer to the habanalabs device structure + * @va_range: pointer to the range to initialize + * @start: range start address + * @end: range end address + * + * This function does the following: + * - Initializes the virtual addresses list of the given range with the given + * addresses. + */ +static int va_range_init(struct hl_device *hdev, struct hl_va_range *va_range, + u64 start, u64 end) +{ + int rc; + + INIT_LIST_HEAD(&va_range->list); + + /* PAGE_SIZE alignment */ + + if (start & (PAGE_SIZE - 1)) { + start &= PAGE_MASK; + start += PAGE_SIZE; + } + + if (end & (PAGE_SIZE - 1)) + end &= PAGE_MASK; + + if (start >= end) { + dev_err(hdev->dev, "too small vm range for va list\n"); + return -EFAULT; + } + + rc = add_va_block(hdev, va_range, start, end); + + if (rc) { + dev_err(hdev->dev, "Failed to init host va list\n"); + return rc; + } + + va_range->start_addr = start; + va_range->end_addr = end; + + return 0; +} + +/* + * va_range_fini() - clear a virtual addresses range + * @hdev: pointer to the habanalabs structure + * va_range: pointer to virtual addresses range + * + * This function does the following: + * - Frees the virtual addresses block list and its lock + */ +static void va_range_fini(struct hl_device *hdev, + struct hl_va_range *va_range) +{ + mutex_lock(&va_range->lock); + clear_va_list_locked(hdev, &va_range->list); + mutex_unlock(&va_range->lock); + + mutex_destroy(&va_range->lock); + kfree(va_range); +} + +/* + * vm_ctx_init_with_ranges() - initialize virtual memory for context + * @ctx: pointer to the habanalabs context structure + * @host_range_start: host virtual addresses range start. + * @host_range_end: host virtual addresses range end. + * @host_huge_range_start: host virtual addresses range start for memory + * allocated with huge pages. + * @host_huge_range_end: host virtual addresses range end for memory allocated + * with huge pages. + * @dram_range_start: dram virtual addresses range start. + * @dram_range_end: dram virtual addresses range end. + * + * This function initializes the following: + * - MMU for context + * - Virtual address to area descriptor hashtable + * - Virtual block list of available virtual memory + */ +static int vm_ctx_init_with_ranges(struct hl_ctx *ctx, + u64 host_range_start, + u64 host_range_end, + u64 host_huge_range_start, + u64 host_huge_range_end, + u64 dram_range_start, + u64 dram_range_end) +{ + struct hl_device *hdev = ctx->hdev; + int rc; + + ctx->host_va_range = kzalloc(sizeof(*ctx->host_va_range), GFP_KERNEL); + if (!ctx->host_va_range) + return -ENOMEM; + + ctx->host_huge_va_range = kzalloc(sizeof(*ctx->host_huge_va_range), + GFP_KERNEL); + if (!ctx->host_huge_va_range) { + rc = -ENOMEM; + goto host_huge_va_range_err; + } + + ctx->dram_va_range = kzalloc(sizeof(*ctx->dram_va_range), GFP_KERNEL); + if (!ctx->dram_va_range) { + rc = -ENOMEM; + goto dram_va_range_err; + } + + rc = hl_mmu_ctx_init(ctx); + if (rc) { + dev_err(hdev->dev, "failed to init context %d\n", ctx->asid); + goto mmu_ctx_err; + } + + mutex_init(&ctx->mem_hash_lock); + hash_init(ctx->mem_hash); + + mutex_init(&ctx->host_va_range->lock); + + rc = va_range_init(hdev, ctx->host_va_range, host_range_start, + host_range_end); + if (rc) { + dev_err(hdev->dev, "failed to init host vm range\n"); + goto host_page_range_err; + } + + if (hdev->pmmu_huge_range) { + mutex_init(&ctx->host_huge_va_range->lock); + + rc = va_range_init(hdev, ctx->host_huge_va_range, + host_huge_range_start, + host_huge_range_end); + if (rc) { + dev_err(hdev->dev, + "failed to init host huge vm range\n"); + goto host_hpage_range_err; + } + } else { + kfree(ctx->host_huge_va_range); + ctx->host_huge_va_range = ctx->host_va_range; + } + + mutex_init(&ctx->dram_va_range->lock); + + rc = va_range_init(hdev, ctx->dram_va_range, dram_range_start, + dram_range_end); + if (rc) { + dev_err(hdev->dev, "failed to init dram vm range\n"); + goto dram_vm_err; + } + + hl_debugfs_add_ctx_mem_hash(hdev, ctx); + + return 0; + +dram_vm_err: + mutex_destroy(&ctx->dram_va_range->lock); + + if (hdev->pmmu_huge_range) { + mutex_lock(&ctx->host_huge_va_range->lock); + clear_va_list_locked(hdev, &ctx->host_huge_va_range->list); + mutex_unlock(&ctx->host_huge_va_range->lock); + } +host_hpage_range_err: + if (hdev->pmmu_huge_range) + mutex_destroy(&ctx->host_huge_va_range->lock); + mutex_lock(&ctx->host_va_range->lock); + clear_va_list_locked(hdev, &ctx->host_va_range->list); + mutex_unlock(&ctx->host_va_range->lock); +host_page_range_err: + mutex_destroy(&ctx->host_va_range->lock); + mutex_destroy(&ctx->mem_hash_lock); + hl_mmu_ctx_fini(ctx); +mmu_ctx_err: + kfree(ctx->dram_va_range); +dram_va_range_err: + kfree(ctx->host_huge_va_range); +host_huge_va_range_err: + kfree(ctx->host_va_range); + + return rc; +} + +int hl_vm_ctx_init(struct hl_ctx *ctx) +{ + struct asic_fixed_properties *prop = &ctx->hdev->asic_prop; + u64 host_range_start, host_range_end, host_huge_range_start, + host_huge_range_end, dram_range_start, dram_range_end; + + atomic64_set(&ctx->dram_phys_mem, 0); + + /* + * - If MMU is enabled, init the ranges as usual. + * - If MMU is disabled, in case of host mapping, the returned address + * is the given one. + * In case of DRAM mapping, the returned address is the physical + * address of the memory related to the given handle. + */ + if (ctx->hdev->mmu_enable) { + dram_range_start = prop->dmmu.start_addr; + dram_range_end = prop->dmmu.end_addr; + host_range_start = prop->pmmu.start_addr; + host_range_end = prop->pmmu.end_addr; + host_huge_range_start = prop->pmmu_huge.start_addr; + host_huge_range_end = prop->pmmu_huge.end_addr; + } else { + dram_range_start = prop->dram_user_base_address; + dram_range_end = prop->dram_end_address; + host_range_start = prop->dram_user_base_address; + host_range_end = prop->dram_end_address; + host_huge_range_start = prop->dram_user_base_address; + host_huge_range_end = prop->dram_end_address; + } + + return vm_ctx_init_with_ranges(ctx, host_range_start, host_range_end, + host_huge_range_start, + host_huge_range_end, + dram_range_start, + dram_range_end); +} + +/* + * hl_vm_ctx_fini - virtual memory teardown of context + * + * @ctx : pointer to the habanalabs context structure + * + * This function perform teardown the following: + * - Virtual block list of available virtual memory + * - Virtual address to area descriptor hashtable + * - MMU for context + * + * In addition this function does the following: + * - Unmaps the existing hashtable nodes if the hashtable is not empty. The + * hashtable should be empty as no valid mappings should exist at this + * point. + * - Frees any existing physical page list from the idr which relates to the + * current context asid. + * - This function checks the virtual block list for correctness. At this point + * the list should contain one element which describes the whole virtual + * memory range of the context. Otherwise, a warning is printed. + */ +void hl_vm_ctx_fini(struct hl_ctx *ctx) +{ + struct hl_device *hdev = ctx->hdev; + struct hl_vm *vm = &hdev->vm; + struct hl_vm_phys_pg_pack *phys_pg_list; + struct hl_vm_hash_node *hnode; + struct hlist_node *tmp_node; + int i; + + hl_debugfs_remove_ctx_mem_hash(hdev, ctx); + + /* + * Clearly something went wrong on hard reset so no point in printing + * another side effect error + */ + if (!hdev->hard_reset_pending && !hash_empty(ctx->mem_hash)) + dev_notice(hdev->dev, + "user released device without removing its memory mappings\n"); + + hash_for_each_safe(ctx->mem_hash, i, tmp_node, hnode, node) { + dev_dbg(hdev->dev, + "hl_mem_hash_node of vaddr 0x%llx of asid %d is still alive\n", + hnode->vaddr, ctx->asid); + unmap_device_va(ctx, hnode->vaddr, true); + } + + /* invalidate the cache once after the unmapping loop */ + hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR); + hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_PHYS_PACK); + + spin_lock(&vm->idr_lock); + idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_list, i) + if (phys_pg_list->asid == ctx->asid) { + dev_dbg(hdev->dev, + "page list 0x%px of asid %d is still alive\n", + phys_pg_list, ctx->asid); + atomic64_sub(phys_pg_list->total_size, + &hdev->dram_used_mem); + free_phys_pg_pack(hdev, phys_pg_list); + idr_remove(&vm->phys_pg_pack_handles, i); + } + spin_unlock(&vm->idr_lock); + + va_range_fini(hdev, ctx->dram_va_range); + if (hdev->pmmu_huge_range) + va_range_fini(hdev, ctx->host_huge_va_range); + va_range_fini(hdev, ctx->host_va_range); + + mutex_destroy(&ctx->mem_hash_lock); + hl_mmu_ctx_fini(ctx); +} + +/* + * hl_vm_init - initialize virtual memory module + * + * @hdev : pointer to the habanalabs device structure + * + * This function initializes the following: + * - MMU module + * - DRAM physical pages pool of 2MB + * - Idr for device memory allocation handles + */ +int hl_vm_init(struct hl_device *hdev) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct hl_vm *vm = &hdev->vm; + int rc; + + vm->dram_pg_pool = gen_pool_create(__ffs(prop->dram_page_size), -1); + if (!vm->dram_pg_pool) { + dev_err(hdev->dev, "Failed to create dram page pool\n"); + return -ENOMEM; + } + + kref_init(&vm->dram_pg_pool_refcount); + + rc = gen_pool_add(vm->dram_pg_pool, prop->dram_user_base_address, + prop->dram_end_address - prop->dram_user_base_address, + -1); + + if (rc) { + dev_err(hdev->dev, + "Failed to add memory to dram page pool %d\n", rc); + goto pool_add_err; + } + + spin_lock_init(&vm->idr_lock); + idr_init(&vm->phys_pg_pack_handles); + + atomic64_set(&hdev->dram_used_mem, 0); + + vm->init_done = true; + + return 0; + +pool_add_err: + gen_pool_destroy(vm->dram_pg_pool); + + return rc; +} + +/* + * hl_vm_fini - virtual memory module teardown + * + * @hdev : pointer to the habanalabs device structure + * + * This function perform teardown to the following: + * - Idr for device memory allocation handles + * - DRAM physical pages pool of 2MB + * - MMU module + */ +void hl_vm_fini(struct hl_device *hdev) +{ + struct hl_vm *vm = &hdev->vm; + + if (!vm->init_done) + return; + + /* + * At this point all the contexts should be freed and hence no DRAM + * memory should be in use. Hence the DRAM pool should be freed here. + */ + if (kref_put(&vm->dram_pg_pool_refcount, dram_pg_pool_do_release) != 1) + dev_warn(hdev->dev, "dram_pg_pool was not destroyed on %s\n", + __func__); + + vm->init_done = false; +} diff --git a/drivers/misc/habanalabs/common/mmu.c b/drivers/misc/habanalabs/common/mmu.c new file mode 100644 index 000000000..b5058798a --- /dev/null +++ b/drivers/misc/habanalabs/common/mmu.c @@ -0,0 +1,309 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2020 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include <linux/slab.h> + +#include "habanalabs.h" + +static bool is_dram_va(struct hl_device *hdev, u64 virt_addr) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + + return hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size, + prop->dmmu.start_addr, + prop->dmmu.end_addr); +} + +/** + * hl_mmu_init() - initialize the MMU module. + * @hdev: habanalabs device structure. + * + * This function does the following: + * - Create a pool of pages for pgt_infos. + * - Create a shadow table for pgt + * + * Return: 0 for success, non-zero for failure. + */ +int hl_mmu_init(struct hl_device *hdev) +{ + if (hdev->mmu_enable) + return hdev->mmu_func.init(hdev); + + return 0; +} + +/** + * hl_mmu_fini() - release the MMU module. + * @hdev: habanalabs device structure. + * + * This function does the following: + * - Disable MMU in H/W. + * - Free the pgt_infos pool. + * + * All contexts should be freed before calling this function. + */ +void hl_mmu_fini(struct hl_device *hdev) +{ + if (hdev->mmu_enable) + hdev->mmu_func.fini(hdev); +} + +/** + * hl_mmu_ctx_init() - initialize a context for using the MMU module. + * @ctx: pointer to the context structure to initialize. + * + * Initialize a mutex to protect the concurrent mapping flow, a hash to hold all + * page tables hops related to this context. + * Return: 0 on success, non-zero otherwise. + */ +int hl_mmu_ctx_init(struct hl_ctx *ctx) +{ + struct hl_device *hdev = ctx->hdev; + + if (hdev->mmu_enable) + return hdev->mmu_func.ctx_init(ctx); + + return 0; +} + +/* + * hl_mmu_ctx_fini - disable a ctx from using the mmu module + * + * @ctx: pointer to the context structure + * + * This function does the following: + * - Free any pgts which were not freed yet + * - Free the mutex + * - Free DRAM default page mapping hops + */ +void hl_mmu_ctx_fini(struct hl_ctx *ctx) +{ + struct hl_device *hdev = ctx->hdev; + + if (hdev->mmu_enable) + hdev->mmu_func.ctx_fini(ctx); +} + +/* + * hl_mmu_unmap - unmaps a virtual addr + * + * @ctx: pointer to the context structure + * @virt_addr: virt addr to map from + * @page_size: size of the page to unmap + * @flush_pte: whether to do a PCI flush + * + * This function does the following: + * - Check that the virt addr is mapped + * - Unmap the virt addr and frees pgts if possible + * - Returns 0 on success, -EINVAL if the given addr is not mapped + * + * Because this function changes the page tables in the device and because it + * changes the MMU hash, it must be protected by a lock. + * However, because it maps only a single page, the lock should be implemented + * in a higher level in order to protect the entire mapping of the memory area + * + * For optimization reasons PCI flush may be requested once after unmapping of + * large area. + */ +int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, + bool flush_pte) +{ + struct hl_device *hdev = ctx->hdev; + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct hl_mmu_properties *mmu_prop; + u64 real_virt_addr; + u32 real_page_size, npages; + int i, rc = 0; + bool is_dram_addr; + + if (!hdev->mmu_enable) + return 0; + + is_dram_addr = is_dram_va(hdev, virt_addr); + + if (is_dram_addr) + mmu_prop = &prop->dmmu; + else if ((page_size % prop->pmmu_huge.page_size) == 0) + mmu_prop = &prop->pmmu_huge; + else + mmu_prop = &prop->pmmu; + + /* + * The H/W handles mapping of specific page sizes. Hence if the page + * size is bigger, we break it to sub-pages and unmap them separately. + */ + if ((page_size % mmu_prop->page_size) == 0) { + real_page_size = mmu_prop->page_size; + } else { + dev_err(hdev->dev, + "page size of %u is not %uKB aligned, can't unmap\n", + page_size, mmu_prop->page_size >> 10); + + return -EFAULT; + } + + npages = page_size / real_page_size; + real_virt_addr = virt_addr; + + for (i = 0 ; i < npages ; i++) { + rc = hdev->mmu_func.unmap(ctx, real_virt_addr, is_dram_addr); + if (rc) + break; + + real_virt_addr += real_page_size; + } + + if (flush_pte) + hdev->mmu_func.flush(ctx); + + return rc; +} + +/* + * hl_mmu_map - maps a virtual addr to physical addr + * + * @ctx: pointer to the context structure + * @virt_addr: virt addr to map from + * @phys_addr: phys addr to map to + * @page_size: physical page size + * @flush_pte: whether to do a PCI flush + * + * This function does the following: + * - Check that the virt addr is not mapped + * - Allocate pgts as necessary in order to map the virt addr to the phys + * - Returns 0 on success, -EINVAL if addr is already mapped, or -ENOMEM. + * + * Because this function changes the page tables in the device and because it + * changes the MMU hash, it must be protected by a lock. + * However, because it maps only a single page, the lock should be implemented + * in a higher level in order to protect the entire mapping of the memory area + * + * For optimization reasons PCI flush may be requested once after mapping of + * large area. + */ +int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size, + bool flush_pte) +{ + struct hl_device *hdev = ctx->hdev; + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct hl_mmu_properties *mmu_prop; + u64 real_virt_addr, real_phys_addr; + u32 real_page_size, npages; + int i, rc, mapped_cnt = 0; + bool is_dram_addr; + + if (!hdev->mmu_enable) + return 0; + + is_dram_addr = is_dram_va(hdev, virt_addr); + + if (is_dram_addr) + mmu_prop = &prop->dmmu; + else if ((page_size % prop->pmmu_huge.page_size) == 0) + mmu_prop = &prop->pmmu_huge; + else + mmu_prop = &prop->pmmu; + + /* + * The H/W handles mapping of specific page sizes. Hence if the page + * size is bigger, we break it to sub-pages and map them separately. + */ + if ((page_size % mmu_prop->page_size) == 0) { + real_page_size = mmu_prop->page_size; + } else { + dev_err(hdev->dev, + "page size of %u is not %uKB aligned, can't unmap\n", + page_size, mmu_prop->page_size >> 10); + + return -EFAULT; + } + + WARN_ONCE((phys_addr & (real_page_size - 1)), + "Mapping 0x%llx with page size of 0x%x is erroneous! Address must be divisible by page size", + phys_addr, real_page_size); + + npages = page_size / real_page_size; + real_virt_addr = virt_addr; + real_phys_addr = phys_addr; + + for (i = 0 ; i < npages ; i++) { + rc = hdev->mmu_func.map(ctx, real_virt_addr, real_phys_addr, + real_page_size, is_dram_addr); + if (rc) + goto err; + + real_virt_addr += real_page_size; + real_phys_addr += real_page_size; + mapped_cnt++; + } + + if (flush_pte) + hdev->mmu_func.flush(ctx); + + return 0; + +err: + real_virt_addr = virt_addr; + for (i = 0 ; i < mapped_cnt ; i++) { + if (hdev->mmu_func.unmap(ctx, real_virt_addr, is_dram_addr)) + dev_warn_ratelimited(hdev->dev, + "failed to unmap va: 0x%llx\n", real_virt_addr); + + real_virt_addr += real_page_size; + } + + hdev->mmu_func.flush(ctx); + + return rc; +} + +/* + * hl_mmu_swap_out - marks all mapping of the given ctx as swapped out + * + * @ctx: pointer to the context structure + * + */ +void hl_mmu_swap_out(struct hl_ctx *ctx) +{ + struct hl_device *hdev = ctx->hdev; + + if (hdev->mmu_enable) + hdev->mmu_func.swap_out(ctx); +} + +/* + * hl_mmu_swap_in - marks all mapping of the given ctx as swapped in + * + * @ctx: pointer to the context structure + * + */ +void hl_mmu_swap_in(struct hl_ctx *ctx) +{ + struct hl_device *hdev = ctx->hdev; + + if (hdev->mmu_enable) + hdev->mmu_func.swap_in(ctx); +} + +int hl_mmu_if_set_funcs(struct hl_device *hdev) +{ + if (!hdev->mmu_enable) + return 0; + + switch (hdev->asic_type) { + case ASIC_GOYA: + case ASIC_GAUDI: + hl_mmu_v1_set_funcs(hdev); + break; + default: + dev_err(hdev->dev, "Unrecognized ASIC type %d\n", + hdev->asic_type); + return -EOPNOTSUPP; + } + + return 0; +} diff --git a/drivers/misc/habanalabs/common/mmu_v1.c b/drivers/misc/habanalabs/common/mmu_v1.c new file mode 100644 index 000000000..8d1eb5265 --- /dev/null +++ b/drivers/misc/habanalabs/common/mmu_v1.c @@ -0,0 +1,863 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include "habanalabs.h" +#include "../include/hw_ip/mmu/mmu_general.h" + +#include <linux/genalloc.h> +#include <linux/slab.h> + +static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr); + +static struct pgt_info *get_pgt_info(struct hl_ctx *ctx, u64 hop_addr) +{ + struct pgt_info *pgt_info = NULL; + + hash_for_each_possible(ctx->mmu_shadow_hash, pgt_info, node, + (unsigned long) hop_addr) + if (hop_addr == pgt_info->shadow_addr) + break; + + return pgt_info; +} + +static void _free_hop(struct hl_ctx *ctx, struct pgt_info *pgt_info) +{ + struct hl_device *hdev = ctx->hdev; + + gen_pool_free(hdev->mmu_priv.mmu_pgt_pool, pgt_info->phys_addr, + hdev->asic_prop.mmu_hop_table_size); + hash_del(&pgt_info->node); + kfree((u64 *) (uintptr_t) pgt_info->shadow_addr); + kfree(pgt_info); +} + +static void free_hop(struct hl_ctx *ctx, u64 hop_addr) +{ + struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr); + + _free_hop(ctx, pgt_info); +} + +static u64 alloc_hop(struct hl_ctx *ctx) +{ + struct hl_device *hdev = ctx->hdev; + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct pgt_info *pgt_info; + u64 phys_addr, shadow_addr; + + pgt_info = kmalloc(sizeof(*pgt_info), GFP_KERNEL); + if (!pgt_info) + return ULLONG_MAX; + + phys_addr = (u64) gen_pool_alloc(hdev->mmu_priv.mmu_pgt_pool, + prop->mmu_hop_table_size); + if (!phys_addr) { + dev_err(hdev->dev, "failed to allocate page\n"); + goto pool_add_err; + } + + shadow_addr = (u64) (uintptr_t) kzalloc(prop->mmu_hop_table_size, + GFP_KERNEL); + if (!shadow_addr) + goto shadow_err; + + pgt_info->phys_addr = phys_addr; + pgt_info->shadow_addr = shadow_addr; + pgt_info->ctx = ctx; + pgt_info->num_of_ptes = 0; + hash_add(ctx->mmu_shadow_hash, &pgt_info->node, shadow_addr); + + return shadow_addr; + +shadow_err: + gen_pool_free(hdev->mmu_priv.mmu_pgt_pool, phys_addr, + prop->mmu_hop_table_size); +pool_add_err: + kfree(pgt_info); + + return ULLONG_MAX; +} + +static inline u64 get_phys_hop0_addr(struct hl_ctx *ctx) +{ + return ctx->hdev->asic_prop.mmu_pgt_addr + + (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size); +} + +static inline u64 get_hop0_addr(struct hl_ctx *ctx) +{ + return (u64) (uintptr_t) ctx->hdev->mmu_priv.mmu_shadow_hop0 + + (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size); +} + +static void flush(struct hl_ctx *ctx) +{ + /* flush all writes from all cores to reach PCI */ + mb(); + ctx->hdev->asic_funcs->read_pte(ctx->hdev, get_phys_hop0_addr(ctx)); +} + +/* transform the value to physical address when writing to H/W */ +static inline void write_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, u64 val) +{ + /* + * The value to write is actually the address of the next shadow hop + + * flags at the 12 LSBs. + * Hence in order to get the value to write to the physical PTE, we + * clear the 12 LSBs and translate the shadow hop to its associated + * physical hop, and add back the original 12 LSBs. + */ + u64 phys_val = get_phys_addr(ctx, val & HOP_PHYS_ADDR_MASK) | + (val & FLAGS_MASK); + + ctx->hdev->asic_funcs->write_pte(ctx->hdev, + get_phys_addr(ctx, shadow_pte_addr), + phys_val); + + *(u64 *) (uintptr_t) shadow_pte_addr = val; +} + +/* do not transform the value to physical address when writing to H/W */ +static inline void write_final_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, + u64 val) +{ + ctx->hdev->asic_funcs->write_pte(ctx->hdev, + get_phys_addr(ctx, shadow_pte_addr), + val); + *(u64 *) (uintptr_t) shadow_pte_addr = val; +} + +/* clear the last and present bits */ +static inline void clear_pte(struct hl_ctx *ctx, u64 pte_addr) +{ + /* no need to transform the value to physical address */ + write_final_pte(ctx, pte_addr, 0); +} + +static inline void get_pte(struct hl_ctx *ctx, u64 hop_addr) +{ + get_pgt_info(ctx, hop_addr)->num_of_ptes++; +} + +/* + * put_pte - decrement the num of ptes and free the hop if possible + * + * @ctx: pointer to the context structure + * @hop_addr: addr of the hop + * + * This function returns the number of ptes left on this hop. If the number is + * 0, it means the pte was freed. + */ +static inline int put_pte(struct hl_ctx *ctx, u64 hop_addr) +{ + struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr); + int num_of_ptes_left; + + pgt_info->num_of_ptes--; + + /* + * Need to save the number of ptes left because free_hop might free + * the pgt_info + */ + num_of_ptes_left = pgt_info->num_of_ptes; + if (!num_of_ptes_left) + _free_hop(ctx, pgt_info); + + return num_of_ptes_left; +} + +static inline u64 get_hopN_pte_addr(struct hl_ctx *ctx, u64 hop_addr, + u64 virt_addr, u64 mask, u64 shift) +{ + return hop_addr + ctx->hdev->asic_prop.mmu_pte_size * + ((virt_addr & mask) >> shift); +} + +static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx, + struct hl_mmu_properties *mmu_prop, + u64 hop_addr, u64 vaddr) +{ + return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop0_mask, + mmu_prop->hop0_shift); +} + +static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx, + struct hl_mmu_properties *mmu_prop, + u64 hop_addr, u64 vaddr) +{ + return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop1_mask, + mmu_prop->hop1_shift); +} + +static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx, + struct hl_mmu_properties *mmu_prop, + u64 hop_addr, u64 vaddr) +{ + return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop2_mask, + mmu_prop->hop2_shift); +} + +static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx, + struct hl_mmu_properties *mmu_prop, + u64 hop_addr, u64 vaddr) +{ + return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop3_mask, + mmu_prop->hop3_shift); +} + +static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx, + struct hl_mmu_properties *mmu_prop, + u64 hop_addr, u64 vaddr) +{ + return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop4_mask, + mmu_prop->hop4_shift); +} + +static inline u64 get_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte) +{ + if (curr_pte & PAGE_PRESENT_MASK) + return curr_pte & HOP_PHYS_ADDR_MASK; + else + return ULLONG_MAX; +} + +static inline u64 get_alloc_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte, + bool *is_new_hop) +{ + u64 hop_addr = get_next_hop_addr(ctx, curr_pte); + + if (hop_addr == ULLONG_MAX) { + hop_addr = alloc_hop(ctx); + *is_new_hop = (hop_addr != ULLONG_MAX); + } + + return hop_addr; +} + +/* translates shadow address inside hop to a physical address */ +static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr) +{ + u64 page_mask = (ctx->hdev->asic_prop.mmu_hop_table_size - 1); + u64 shadow_hop_addr = shadow_addr & ~page_mask; + u64 pte_offset = shadow_addr & page_mask; + u64 phys_hop_addr; + + if (shadow_hop_addr != get_hop0_addr(ctx)) + phys_hop_addr = get_pgt_info(ctx, shadow_hop_addr)->phys_addr; + else + phys_hop_addr = get_phys_hop0_addr(ctx); + + return phys_hop_addr + pte_offset; +} + +static int dram_default_mapping_init(struct hl_ctx *ctx) +{ + struct hl_device *hdev = ctx->hdev; + struct asic_fixed_properties *prop = &hdev->asic_prop; + u64 num_of_hop3, total_hops, hop0_addr, hop1_addr, hop2_addr, + hop2_pte_addr, hop3_pte_addr, pte_val; + int rc, i, j, hop3_allocated = 0; + + if ((!hdev->dram_supports_virtual_memory) || + (!hdev->dram_default_page_mapping) || + (ctx->asid == HL_KERNEL_ASID_ID)) + return 0; + + num_of_hop3 = prop->dram_size_for_default_page_mapping; + do_div(num_of_hop3, prop->dram_page_size); + do_div(num_of_hop3, PTE_ENTRIES_IN_HOP); + + /* add hop1 and hop2 */ + total_hops = num_of_hop3 + 2; + + ctx->dram_default_hops = kzalloc(HL_PTE_SIZE * total_hops, GFP_KERNEL); + if (!ctx->dram_default_hops) + return -ENOMEM; + + hop0_addr = get_hop0_addr(ctx); + + hop1_addr = alloc_hop(ctx); + if (hop1_addr == ULLONG_MAX) { + dev_err(hdev->dev, "failed to alloc hop 1\n"); + rc = -ENOMEM; + goto hop1_err; + } + + ctx->dram_default_hops[total_hops - 1] = hop1_addr; + + hop2_addr = alloc_hop(ctx); + if (hop2_addr == ULLONG_MAX) { + dev_err(hdev->dev, "failed to alloc hop 2\n"); + rc = -ENOMEM; + goto hop2_err; + } + + ctx->dram_default_hops[total_hops - 2] = hop2_addr; + + for (i = 0 ; i < num_of_hop3 ; i++) { + ctx->dram_default_hops[i] = alloc_hop(ctx); + if (ctx->dram_default_hops[i] == ULLONG_MAX) { + dev_err(hdev->dev, "failed to alloc hop 3, i: %d\n", i); + rc = -ENOMEM; + goto hop3_err; + } + hop3_allocated++; + } + + /* need only pte 0 in hops 0 and 1 */ + pte_val = (hop1_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK; + write_pte(ctx, hop0_addr, pte_val); + + pte_val = (hop2_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK; + write_pte(ctx, hop1_addr, pte_val); + get_pte(ctx, hop1_addr); + + hop2_pte_addr = hop2_addr; + for (i = 0 ; i < num_of_hop3 ; i++) { + pte_val = (ctx->dram_default_hops[i] & HOP_PHYS_ADDR_MASK) | + PAGE_PRESENT_MASK; + write_pte(ctx, hop2_pte_addr, pte_val); + get_pte(ctx, hop2_addr); + hop2_pte_addr += HL_PTE_SIZE; + } + + pte_val = (prop->mmu_dram_default_page_addr & HOP_PHYS_ADDR_MASK) | + LAST_MASK | PAGE_PRESENT_MASK; + + for (i = 0 ; i < num_of_hop3 ; i++) { + hop3_pte_addr = ctx->dram_default_hops[i]; + for (j = 0 ; j < PTE_ENTRIES_IN_HOP ; j++) { + write_final_pte(ctx, hop3_pte_addr, pte_val); + get_pte(ctx, ctx->dram_default_hops[i]); + hop3_pte_addr += HL_PTE_SIZE; + } + } + + flush(ctx); + + return 0; + +hop3_err: + for (i = 0 ; i < hop3_allocated ; i++) + free_hop(ctx, ctx->dram_default_hops[i]); + + free_hop(ctx, hop2_addr); +hop2_err: + free_hop(ctx, hop1_addr); +hop1_err: + kfree(ctx->dram_default_hops); + + return rc; +} + +static void dram_default_mapping_fini(struct hl_ctx *ctx) +{ + struct hl_device *hdev = ctx->hdev; + struct asic_fixed_properties *prop = &hdev->asic_prop; + u64 num_of_hop3, total_hops, hop0_addr, hop1_addr, hop2_addr, + hop2_pte_addr, hop3_pte_addr; + int i, j; + + if ((!hdev->dram_supports_virtual_memory) || + (!hdev->dram_default_page_mapping) || + (ctx->asid == HL_KERNEL_ASID_ID)) + return; + + num_of_hop3 = prop->dram_size_for_default_page_mapping; + do_div(num_of_hop3, prop->dram_page_size); + do_div(num_of_hop3, PTE_ENTRIES_IN_HOP); + + hop0_addr = get_hop0_addr(ctx); + /* add hop1 and hop2 */ + total_hops = num_of_hop3 + 2; + hop1_addr = ctx->dram_default_hops[total_hops - 1]; + hop2_addr = ctx->dram_default_hops[total_hops - 2]; + + for (i = 0 ; i < num_of_hop3 ; i++) { + hop3_pte_addr = ctx->dram_default_hops[i]; + for (j = 0 ; j < PTE_ENTRIES_IN_HOP ; j++) { + clear_pte(ctx, hop3_pte_addr); + put_pte(ctx, ctx->dram_default_hops[i]); + hop3_pte_addr += HL_PTE_SIZE; + } + } + + hop2_pte_addr = hop2_addr; + hop2_pte_addr = hop2_addr; + for (i = 0 ; i < num_of_hop3 ; i++) { + clear_pte(ctx, hop2_pte_addr); + put_pte(ctx, hop2_addr); + hop2_pte_addr += HL_PTE_SIZE; + } + + clear_pte(ctx, hop1_addr); + put_pte(ctx, hop1_addr); + clear_pte(ctx, hop0_addr); + + kfree(ctx->dram_default_hops); + + flush(ctx); +} + +/** + * hl_mmu_v1_init() - initialize the MMU module. + * @hdev: habanalabs device structure. + * + * This function does the following: + * - Create a pool of pages for pgt_infos. + * - Create a shadow table for pgt + * + * Return: 0 for success, non-zero for failure. + */ +static int hl_mmu_v1_init(struct hl_device *hdev) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + int rc; + + hdev->mmu_priv.mmu_pgt_pool = + gen_pool_create(__ffs(prop->mmu_hop_table_size), -1); + + if (!hdev->mmu_priv.mmu_pgt_pool) { + dev_err(hdev->dev, "Failed to create page gen pool\n"); + return -ENOMEM; + } + + rc = gen_pool_add(hdev->mmu_priv.mmu_pgt_pool, prop->mmu_pgt_addr + + prop->mmu_hop0_tables_total_size, + prop->mmu_pgt_size - prop->mmu_hop0_tables_total_size, + -1); + if (rc) { + dev_err(hdev->dev, "Failed to add memory to page gen pool\n"); + goto err_pool_add; + } + + hdev->mmu_priv.mmu_shadow_hop0 = kvmalloc_array(prop->max_asid, + prop->mmu_hop_table_size, + GFP_KERNEL | __GFP_ZERO); + if (ZERO_OR_NULL_PTR(hdev->mmu_priv.mmu_shadow_hop0)) { + rc = -ENOMEM; + goto err_pool_add; + } + + /* MMU H/W init will be done in device hw_init() */ + + return 0; + +err_pool_add: + gen_pool_destroy(hdev->mmu_priv.mmu_pgt_pool); + + return rc; +} + +/** + * hl_mmu_fini() - release the MMU module. + * @hdev: habanalabs device structure. + * + * This function does the following: + * - Disable MMU in H/W. + * - Free the pgt_infos pool. + * + * All contexts should be freed before calling this function. + */ +static void hl_mmu_v1_fini(struct hl_device *hdev) +{ + /* MMU H/W fini was already done in device hw_fini() */ + + kvfree(hdev->mmu_priv.mmu_shadow_hop0); + gen_pool_destroy(hdev->mmu_priv.mmu_pgt_pool); +} + +/** + * hl_mmu_ctx_init() - initialize a context for using the MMU module. + * @ctx: pointer to the context structure to initialize. + * + * Initialize a mutex to protect the concurrent mapping flow, a hash to hold all + * page tables hops related to this context. + * Return: 0 on success, non-zero otherwise. + */ +static int hl_mmu_v1_ctx_init(struct hl_ctx *ctx) +{ + mutex_init(&ctx->mmu_lock); + hash_init(ctx->mmu_shadow_hash); + + return dram_default_mapping_init(ctx); +} + +/* + * hl_mmu_ctx_fini - disable a ctx from using the mmu module + * + * @ctx: pointer to the context structure + * + * This function does the following: + * - Free any pgts which were not freed yet + * - Free the mutex + * - Free DRAM default page mapping hops + */ +static void hl_mmu_v1_ctx_fini(struct hl_ctx *ctx) +{ + struct hl_device *hdev = ctx->hdev; + struct pgt_info *pgt_info; + struct hlist_node *tmp; + int i; + + dram_default_mapping_fini(ctx); + + if (!hash_empty(ctx->mmu_shadow_hash)) + dev_err(hdev->dev, "ctx %d is freed while it has pgts in use\n", + ctx->asid); + + hash_for_each_safe(ctx->mmu_shadow_hash, i, tmp, pgt_info, node) { + dev_err_ratelimited(hdev->dev, + "pgt_info of addr 0x%llx of asid %d was not destroyed, num_ptes: %d\n", + pgt_info->phys_addr, ctx->asid, pgt_info->num_of_ptes); + _free_hop(ctx, pgt_info); + } + + mutex_destroy(&ctx->mmu_lock); +} + +static int _hl_mmu_v1_unmap(struct hl_ctx *ctx, + u64 virt_addr, bool is_dram_addr) +{ + struct hl_device *hdev = ctx->hdev; + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct hl_mmu_properties *mmu_prop; + u64 hop0_addr = 0, hop0_pte_addr = 0, + hop1_addr = 0, hop1_pte_addr = 0, + hop2_addr = 0, hop2_pte_addr = 0, + hop3_addr = 0, hop3_pte_addr = 0, + hop4_addr = 0, hop4_pte_addr = 0, + curr_pte; + bool is_huge, clear_hop3 = true; + + /* shifts and masks are the same in PMMU and HPMMU, use one of them */ + mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu; + + hop0_addr = get_hop0_addr(ctx); + hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr); + + curr_pte = *(u64 *) (uintptr_t) hop0_pte_addr; + + hop1_addr = get_next_hop_addr(ctx, curr_pte); + + if (hop1_addr == ULLONG_MAX) + goto not_mapped; + + hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr); + + curr_pte = *(u64 *) (uintptr_t) hop1_pte_addr; + + hop2_addr = get_next_hop_addr(ctx, curr_pte); + + if (hop2_addr == ULLONG_MAX) + goto not_mapped; + + hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr); + + curr_pte = *(u64 *) (uintptr_t) hop2_pte_addr; + + hop3_addr = get_next_hop_addr(ctx, curr_pte); + + if (hop3_addr == ULLONG_MAX) + goto not_mapped; + + hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr); + + curr_pte = *(u64 *) (uintptr_t) hop3_pte_addr; + + is_huge = curr_pte & LAST_MASK; + + if (is_dram_addr && !is_huge) { + dev_err(hdev->dev, + "DRAM unmapping should use huge pages only\n"); + return -EFAULT; + } + + if (!is_huge) { + hop4_addr = get_next_hop_addr(ctx, curr_pte); + + if (hop4_addr == ULLONG_MAX) + goto not_mapped; + + hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop4_addr, + virt_addr); + + curr_pte = *(u64 *) (uintptr_t) hop4_pte_addr; + + clear_hop3 = false; + } + + if (hdev->dram_default_page_mapping && is_dram_addr) { + u64 default_pte = (prop->mmu_dram_default_page_addr & + HOP_PHYS_ADDR_MASK) | LAST_MASK | + PAGE_PRESENT_MASK; + if (curr_pte == default_pte) { + dev_err(hdev->dev, + "DRAM: hop3 PTE points to zero page, can't unmap, va: 0x%llx\n", + virt_addr); + goto not_mapped; + } + + if (!(curr_pte & PAGE_PRESENT_MASK)) { + dev_err(hdev->dev, + "DRAM: hop3 PTE is cleared! can't unmap, va: 0x%llx\n", + virt_addr); + goto not_mapped; + } + + write_final_pte(ctx, hop3_pte_addr, default_pte); + put_pte(ctx, hop3_addr); + } else { + if (!(curr_pte & PAGE_PRESENT_MASK)) + goto not_mapped; + + if (hop4_addr) + clear_pte(ctx, hop4_pte_addr); + else + clear_pte(ctx, hop3_pte_addr); + + if (hop4_addr && !put_pte(ctx, hop4_addr)) + clear_hop3 = true; + + if (!clear_hop3) + goto mapped; + + clear_pte(ctx, hop3_pte_addr); + + if (put_pte(ctx, hop3_addr)) + goto mapped; + + clear_pte(ctx, hop2_pte_addr); + + if (put_pte(ctx, hop2_addr)) + goto mapped; + + clear_pte(ctx, hop1_pte_addr); + + if (put_pte(ctx, hop1_addr)) + goto mapped; + + clear_pte(ctx, hop0_pte_addr); + } + +mapped: + return 0; + +not_mapped: + dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n", + virt_addr); + + return -EINVAL; +} + +static int _hl_mmu_v1_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, + u32 page_size, bool is_dram_addr) +{ + struct hl_device *hdev = ctx->hdev; + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct hl_mmu_properties *mmu_prop; + u64 hop0_addr = 0, hop0_pte_addr = 0, + hop1_addr = 0, hop1_pte_addr = 0, + hop2_addr = 0, hop2_pte_addr = 0, + hop3_addr = 0, hop3_pte_addr = 0, + hop4_addr = 0, hop4_pte_addr = 0, + curr_pte = 0; + bool hop1_new = false, hop2_new = false, hop3_new = false, + hop4_new = false, is_huge; + int rc = -ENOMEM; + + /* + * This mapping function can map a page or a huge page. For huge page + * there are only 3 hops rather than 4. Currently the DRAM allocation + * uses huge pages only but user memory could have been allocated with + * one of the two page sizes. Since this is a common code for all the + * three cases, we need this hugs page check. + */ + if (is_dram_addr) { + mmu_prop = &prop->dmmu; + is_huge = true; + } else if (page_size == prop->pmmu_huge.page_size) { + mmu_prop = &prop->pmmu_huge; + is_huge = true; + } else { + mmu_prop = &prop->pmmu; + is_huge = false; + } + + hop0_addr = get_hop0_addr(ctx); + hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr); + curr_pte = *(u64 *) (uintptr_t) hop0_pte_addr; + + hop1_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop1_new); + if (hop1_addr == ULLONG_MAX) + goto err; + + hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr); + curr_pte = *(u64 *) (uintptr_t) hop1_pte_addr; + + hop2_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop2_new); + if (hop2_addr == ULLONG_MAX) + goto err; + + hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr); + curr_pte = *(u64 *) (uintptr_t) hop2_pte_addr; + + hop3_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop3_new); + if (hop3_addr == ULLONG_MAX) + goto err; + + hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr); + curr_pte = *(u64 *) (uintptr_t) hop3_pte_addr; + + if (!is_huge) { + hop4_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop4_new); + if (hop4_addr == ULLONG_MAX) + goto err; + + hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop4_addr, + virt_addr); + curr_pte = *(u64 *) (uintptr_t) hop4_pte_addr; + } + + if (hdev->dram_default_page_mapping && is_dram_addr) { + u64 default_pte = (prop->mmu_dram_default_page_addr & + HOP_PHYS_ADDR_MASK) | LAST_MASK | + PAGE_PRESENT_MASK; + + if (curr_pte != default_pte) { + dev_err(hdev->dev, + "DRAM: mapping already exists for virt_addr 0x%llx\n", + virt_addr); + rc = -EINVAL; + goto err; + } + + if (hop1_new || hop2_new || hop3_new || hop4_new) { + dev_err(hdev->dev, + "DRAM mapping should not allocate more hops\n"); + rc = -EFAULT; + goto err; + } + } else if (curr_pte & PAGE_PRESENT_MASK) { + dev_err(hdev->dev, + "mapping already exists for virt_addr 0x%llx\n", + virt_addr); + + dev_dbg(hdev->dev, "hop0 pte: 0x%llx (0x%llx)\n", + *(u64 *) (uintptr_t) hop0_pte_addr, hop0_pte_addr); + dev_dbg(hdev->dev, "hop1 pte: 0x%llx (0x%llx)\n", + *(u64 *) (uintptr_t) hop1_pte_addr, hop1_pte_addr); + dev_dbg(hdev->dev, "hop2 pte: 0x%llx (0x%llx)\n", + *(u64 *) (uintptr_t) hop2_pte_addr, hop2_pte_addr); + dev_dbg(hdev->dev, "hop3 pte: 0x%llx (0x%llx)\n", + *(u64 *) (uintptr_t) hop3_pte_addr, hop3_pte_addr); + + if (!is_huge) + dev_dbg(hdev->dev, "hop4 pte: 0x%llx (0x%llx)\n", + *(u64 *) (uintptr_t) hop4_pte_addr, + hop4_pte_addr); + + rc = -EINVAL; + goto err; + } + + curr_pte = (phys_addr & HOP_PHYS_ADDR_MASK) | LAST_MASK + | PAGE_PRESENT_MASK; + + if (is_huge) + write_final_pte(ctx, hop3_pte_addr, curr_pte); + else + write_final_pte(ctx, hop4_pte_addr, curr_pte); + + if (hop1_new) { + curr_pte = + (hop1_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK; + write_pte(ctx, hop0_pte_addr, curr_pte); + } + if (hop2_new) { + curr_pte = + (hop2_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK; + write_pte(ctx, hop1_pte_addr, curr_pte); + get_pte(ctx, hop1_addr); + } + if (hop3_new) { + curr_pte = + (hop3_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK; + write_pte(ctx, hop2_pte_addr, curr_pte); + get_pte(ctx, hop2_addr); + } + + if (!is_huge) { + if (hop4_new) { + curr_pte = (hop4_addr & HOP_PHYS_ADDR_MASK) | + PAGE_PRESENT_MASK; + write_pte(ctx, hop3_pte_addr, curr_pte); + get_pte(ctx, hop3_addr); + } + + get_pte(ctx, hop4_addr); + } else { + get_pte(ctx, hop3_addr); + } + + return 0; + +err: + if (hop4_new) + free_hop(ctx, hop4_addr); + if (hop3_new) + free_hop(ctx, hop3_addr); + if (hop2_new) + free_hop(ctx, hop2_addr); + if (hop1_new) + free_hop(ctx, hop1_addr); + + return rc; +} + +/* + * hl_mmu_v1_swap_out - marks all mapping of the given ctx as swapped out + * + * @ctx: pointer to the context structure + * + */ +static void hl_mmu_v1_swap_out(struct hl_ctx *ctx) +{ + +} + +/* + * hl_mmu_v1_swap_in - marks all mapping of the given ctx as swapped in + * + * @ctx: pointer to the context structure + * + */ +static void hl_mmu_v1_swap_in(struct hl_ctx *ctx) +{ + +} + +/* + * hl_mmu_v1_prepare - prepare mmu for working with mmu v1 + * + * @hdev: pointer to the device structure + */ +void hl_mmu_v1_set_funcs(struct hl_device *hdev) +{ + struct hl_mmu_funcs *mmu = &hdev->mmu_func; + + mmu->init = hl_mmu_v1_init; + mmu->fini = hl_mmu_v1_fini; + mmu->ctx_init = hl_mmu_v1_ctx_init; + mmu->ctx_fini = hl_mmu_v1_ctx_fini; + mmu->map = _hl_mmu_v1_map; + mmu->unmap = _hl_mmu_v1_unmap; + mmu->flush = flush; + mmu->swap_out = hl_mmu_v1_swap_out; + mmu->swap_in = hl_mmu_v1_swap_in; +} diff --git a/drivers/misc/habanalabs/common/pci.c b/drivers/misc/habanalabs/common/pci.c new file mode 100644 index 000000000..607f9a11f --- /dev/null +++ b/drivers/misc/habanalabs/common/pci.c @@ -0,0 +1,421 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include "habanalabs.h" +#include "../include/hw_ip/pci/pci_general.h" + +#include <linux/pci.h> + +#define HL_PLDM_PCI_ELBI_TIMEOUT_MSEC (HL_PCI_ELBI_TIMEOUT_MSEC * 10) + +#define IATU_REGION_CTRL_REGION_EN_MASK BIT(31) +#define IATU_REGION_CTRL_MATCH_MODE_MASK BIT(30) +#define IATU_REGION_CTRL_NUM_MATCH_EN_MASK BIT(19) +#define IATU_REGION_CTRL_BAR_NUM_MASK GENMASK(10, 8) + +/** + * hl_pci_bars_map() - Map PCI BARs. + * @hdev: Pointer to hl_device structure. + * @name: Array of BAR names. + * @is_wc: Array with flag per BAR whether a write-combined mapping is needed. + * + * Request PCI regions and map them to kernel virtual addresses. + * + * Return: 0 on success, non-zero for failure. + */ +int hl_pci_bars_map(struct hl_device *hdev, const char * const name[3], + bool is_wc[3]) +{ + struct pci_dev *pdev = hdev->pdev; + int rc, i, bar; + + rc = pci_request_regions(pdev, HL_NAME); + if (rc) { + dev_err(hdev->dev, "Cannot obtain PCI resources\n"); + return rc; + } + + for (i = 0 ; i < 3 ; i++) { + bar = i * 2; /* 64-bit BARs */ + hdev->pcie_bar[bar] = is_wc[i] ? + pci_ioremap_wc_bar(pdev, bar) : + pci_ioremap_bar(pdev, bar); + if (!hdev->pcie_bar[bar]) { + dev_err(hdev->dev, "pci_ioremap%s_bar failed for %s\n", + is_wc[i] ? "_wc" : "", name[i]); + rc = -ENODEV; + goto err; + } + } + + return 0; + +err: + for (i = 2 ; i >= 0 ; i--) { + bar = i * 2; /* 64-bit BARs */ + if (hdev->pcie_bar[bar]) + iounmap(hdev->pcie_bar[bar]); + } + + pci_release_regions(pdev); + + return rc; +} + +/** + * hl_pci_bars_unmap() - Unmap PCI BARS. + * @hdev: Pointer to hl_device structure. + * + * Release all PCI BARs and unmap their virtual addresses. + */ +static void hl_pci_bars_unmap(struct hl_device *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + int i, bar; + + for (i = 2 ; i >= 0 ; i--) { + bar = i * 2; /* 64-bit BARs */ + iounmap(hdev->pcie_bar[bar]); + } + + pci_release_regions(pdev); +} + +/** + * hl_pci_elbi_write() - Write through the ELBI interface. + * @hdev: Pointer to hl_device structure. + * @addr: Address to write to + * @data: Data to write + * + * Return: 0 on success, negative value for failure. + */ +static int hl_pci_elbi_write(struct hl_device *hdev, u64 addr, u32 data) +{ + struct pci_dev *pdev = hdev->pdev; + ktime_t timeout; + u64 msec; + u32 val; + + if (hdev->pldm) + msec = HL_PLDM_PCI_ELBI_TIMEOUT_MSEC; + else + msec = HL_PCI_ELBI_TIMEOUT_MSEC; + + /* Clear previous status */ + pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, 0); + + pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_ADDR, (u32) addr); + pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_DATA, data); + pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_CTRL, + PCI_CONFIG_ELBI_CTRL_WRITE); + + timeout = ktime_add_ms(ktime_get(), msec); + for (;;) { + pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, &val); + if (val & PCI_CONFIG_ELBI_STS_MASK) + break; + if (ktime_compare(ktime_get(), timeout) > 0) { + pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, + &val); + break; + } + + usleep_range(300, 500); + } + + if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE) + return 0; + + if (val & PCI_CONFIG_ELBI_STS_ERR) + return -EIO; + + if (!(val & PCI_CONFIG_ELBI_STS_MASK)) { + dev_err(hdev->dev, "ELBI write didn't finish in time\n"); + return -EIO; + } + + dev_err(hdev->dev, "ELBI write has undefined bits in status\n"); + return -EIO; +} + +/** + * hl_pci_iatu_write() - iatu write routine. + * @hdev: Pointer to hl_device structure. + * @addr: Address to write to + * @data: Data to write + * + * Return: 0 on success, negative value for failure. + */ +int hl_pci_iatu_write(struct hl_device *hdev, u32 addr, u32 data) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + u32 dbi_offset; + int rc; + + dbi_offset = addr & 0xFFF; + + /* Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail + * in case the firmware security is enabled + */ + hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0x00300000); + + rc = hl_pci_elbi_write(hdev, prop->pcie_dbi_base_address + dbi_offset, + data); + + if (rc) + return -EIO; + + return 0; +} + +/** + * hl_pci_reset_link_through_bridge() - Reset PCI link. + * @hdev: Pointer to hl_device structure. + */ +static void hl_pci_reset_link_through_bridge(struct hl_device *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + struct pci_dev *parent_port; + u16 val; + + parent_port = pdev->bus->self; + pci_read_config_word(parent_port, PCI_BRIDGE_CONTROL, &val); + val |= PCI_BRIDGE_CTL_BUS_RESET; + pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val); + ssleep(1); + + val &= ~(PCI_BRIDGE_CTL_BUS_RESET); + pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val); + ssleep(3); +} + +/** + * hl_pci_set_inbound_region() - Configure inbound region + * @hdev: Pointer to hl_device structure. + * @region: Inbound region number. + * @pci_region: Inbound region parameters. + * + * Configure the iATU inbound region. + * + * Return: 0 on success, negative value for failure. + */ +int hl_pci_set_inbound_region(struct hl_device *hdev, u8 region, + struct hl_inbound_pci_region *pci_region) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + u64 bar_phys_base, region_base, region_end_address; + u32 offset, ctrl_reg_val; + int rc = 0; + + /* region offset */ + offset = (0x200 * region) + 0x100; + + if (pci_region->mode == PCI_ADDRESS_MATCH_MODE) { + bar_phys_base = hdev->pcie_bar_phys[pci_region->bar]; + region_base = bar_phys_base + pci_region->offset_in_bar; + region_end_address = region_base + pci_region->size - 1; + + rc |= hl_pci_iatu_write(hdev, offset + 0x8, + lower_32_bits(region_base)); + rc |= hl_pci_iatu_write(hdev, offset + 0xC, + upper_32_bits(region_base)); + rc |= hl_pci_iatu_write(hdev, offset + 0x10, + lower_32_bits(region_end_address)); + } + + /* Point to the specified address */ + rc |= hl_pci_iatu_write(hdev, offset + 0x14, + lower_32_bits(pci_region->addr)); + rc |= hl_pci_iatu_write(hdev, offset + 0x18, + upper_32_bits(pci_region->addr)); + rc |= hl_pci_iatu_write(hdev, offset + 0x0, 0); + + /* Enable + bar/address match + match enable + bar number */ + ctrl_reg_val = FIELD_PREP(IATU_REGION_CTRL_REGION_EN_MASK, 1); + ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_MATCH_MODE_MASK, + pci_region->mode); + ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_NUM_MATCH_EN_MASK, 1); + + if (pci_region->mode == PCI_BAR_MATCH_MODE) + ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_BAR_NUM_MASK, + pci_region->bar); + + rc |= hl_pci_iatu_write(hdev, offset + 0x4, ctrl_reg_val); + + /* Return the DBI window to the default location + * Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail + * in case the firmware security is enabled + */ + hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0); + + if (rc) + dev_err(hdev->dev, "failed to map bar %u to 0x%08llx\n", + pci_region->bar, pci_region->addr); + + return rc; +} + +/** + * hl_pci_set_outbound_region() - Configure outbound region 0 + * @hdev: Pointer to hl_device structure. + * @pci_region: Outbound region parameters. + * + * Configure the iATU outbound region 0. + * + * Return: 0 on success, negative value for failure. + */ +int hl_pci_set_outbound_region(struct hl_device *hdev, + struct hl_outbound_pci_region *pci_region) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + u64 outbound_region_end_address; + int rc = 0; + + /* Outbound Region 0 */ + outbound_region_end_address = + pci_region->addr + pci_region->size - 1; + rc |= hl_pci_iatu_write(hdev, 0x008, + lower_32_bits(pci_region->addr)); + rc |= hl_pci_iatu_write(hdev, 0x00C, + upper_32_bits(pci_region->addr)); + rc |= hl_pci_iatu_write(hdev, 0x010, + lower_32_bits(outbound_region_end_address)); + rc |= hl_pci_iatu_write(hdev, 0x014, 0); + + if ((hdev->power9_64bit_dma_enable) && (hdev->dma_mask == 64)) + rc |= hl_pci_iatu_write(hdev, 0x018, 0x08000000); + else + rc |= hl_pci_iatu_write(hdev, 0x018, 0); + + rc |= hl_pci_iatu_write(hdev, 0x020, + upper_32_bits(outbound_region_end_address)); + /* Increase region size */ + rc |= hl_pci_iatu_write(hdev, 0x000, 0x00002000); + /* Enable */ + rc |= hl_pci_iatu_write(hdev, 0x004, 0x80000000); + + /* Return the DBI window to the default location + * Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail + * in case the firmware security is enabled + */ + hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0); + + return rc; +} + +/** + * hl_pci_set_dma_mask() - Set DMA masks for the device. + * @hdev: Pointer to hl_device structure. + * + * This function sets the DMA masks (regular and consistent) for a specified + * value. If it doesn't succeed, it tries to set it to a fall-back value + * + * Return: 0 on success, non-zero for failure. + */ +static int hl_pci_set_dma_mask(struct hl_device *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + int rc; + + /* set DMA mask */ + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(hdev->dma_mask)); + if (rc) { + dev_err(hdev->dev, + "Failed to set pci dma mask to %d bits, error %d\n", + hdev->dma_mask, rc); + return rc; + } + + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(hdev->dma_mask)); + if (rc) { + dev_err(hdev->dev, + "Failed to set pci consistent dma mask to %d bits, error %d\n", + hdev->dma_mask, rc); + return rc; + } + + return 0; +} + +/** + * hl_pci_init() - PCI initialization code. + * @hdev: Pointer to hl_device structure. + * @cpu_boot_status_reg: status register of the device's CPU + * @boot_err0_reg: boot error register of the device's CPU + * @preboot_ver_timeout: how much to wait before bailing out on reading + * the preboot version + * + * Set DMA masks, initialize the PCI controller and map the PCI BARs. + * + * Return: 0 on success, non-zero for failure. + */ +int hl_pci_init(struct hl_device *hdev, u32 cpu_boot_status_reg, + u32 boot_err0_reg, u32 preboot_ver_timeout) +{ + struct pci_dev *pdev = hdev->pdev; + int rc; + + if (hdev->reset_pcilink) + hl_pci_reset_link_through_bridge(hdev); + + rc = pci_enable_device_mem(pdev); + if (rc) { + dev_err(hdev->dev, "can't enable PCI device\n"); + return rc; + } + + pci_set_master(pdev); + + rc = hdev->asic_funcs->pci_bars_map(hdev); + if (rc) { + dev_err(hdev->dev, "Failed to initialize PCI BARs\n"); + goto disable_device; + } + + rc = hdev->asic_funcs->init_iatu(hdev); + if (rc) { + dev_err(hdev->dev, "Failed to initialize iATU\n"); + goto unmap_pci_bars; + } + + rc = hl_pci_set_dma_mask(hdev); + if (rc) + goto unmap_pci_bars; + + /* Before continuing in the initialization, we need to read the preboot + * version to determine whether we run with a security-enabled firmware + * The check will be done in each ASIC's specific code + */ + rc = hl_fw_read_preboot_ver(hdev, cpu_boot_status_reg, boot_err0_reg, + preboot_ver_timeout); + if (rc) + goto unmap_pci_bars; + + return 0; + +unmap_pci_bars: + hl_pci_bars_unmap(hdev); +disable_device: + pci_clear_master(pdev); + pci_disable_device(pdev); + + return rc; +} + +/** + * hl_fw_fini() - PCI finalization code. + * @hdev: Pointer to hl_device structure + * + * Unmap PCI bars and disable PCI device. + */ +void hl_pci_fini(struct hl_device *hdev) +{ + hl_pci_bars_unmap(hdev); + + pci_clear_master(hdev->pdev); + pci_disable_device(hdev->pdev); +} diff --git a/drivers/misc/habanalabs/common/sysfs.c b/drivers/misc/habanalabs/common/sysfs.c new file mode 100644 index 000000000..3ceae8701 --- /dev/null +++ b/drivers/misc/habanalabs/common/sysfs.c @@ -0,0 +1,463 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include "habanalabs.h" + +#include <linux/pci.h> + +long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr) +{ + struct cpucp_packet pkt; + long result; + int rc; + + memset(&pkt, 0, sizeof(pkt)); + + if (curr) + pkt.ctl = cpu_to_le32(CPUCP_PACKET_FREQUENCY_CURR_GET << + CPUCP_PKT_CTL_OPCODE_SHIFT); + else + pkt.ctl = cpu_to_le32(CPUCP_PACKET_FREQUENCY_GET << + CPUCP_PKT_CTL_OPCODE_SHIFT); + pkt.pll_index = cpu_to_le32(pll_index); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + 0, &result); + + if (rc) { + dev_err(hdev->dev, + "Failed to get frequency of PLL %d, error %d\n", + pll_index, rc); + result = rc; + } + + return result; +} + +void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq) +{ + struct cpucp_packet pkt; + int rc; + + memset(&pkt, 0, sizeof(pkt)); + + pkt.ctl = cpu_to_le32(CPUCP_PACKET_FREQUENCY_SET << + CPUCP_PKT_CTL_OPCODE_SHIFT); + pkt.pll_index = cpu_to_le32(pll_index); + pkt.value = cpu_to_le64(freq); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + 0, NULL); + + if (rc) + dev_err(hdev->dev, + "Failed to set frequency to PLL %d, error %d\n", + pll_index, rc); +} + +u64 hl_get_max_power(struct hl_device *hdev) +{ + struct cpucp_packet pkt; + long result; + int rc; + + memset(&pkt, 0, sizeof(pkt)); + + pkt.ctl = cpu_to_le32(CPUCP_PACKET_MAX_POWER_GET << + CPUCP_PKT_CTL_OPCODE_SHIFT); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + 0, &result); + + if (rc) { + dev_err(hdev->dev, "Failed to get max power, error %d\n", rc); + result = rc; + } + + return result; +} + +void hl_set_max_power(struct hl_device *hdev) +{ + struct cpucp_packet pkt; + int rc; + + memset(&pkt, 0, sizeof(pkt)); + + pkt.ctl = cpu_to_le32(CPUCP_PACKET_MAX_POWER_SET << + CPUCP_PKT_CTL_OPCODE_SHIFT); + pkt.value = cpu_to_le64(hdev->max_power); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + 0, NULL); + + if (rc) + dev_err(hdev->dev, "Failed to set max power, error %d\n", rc); +} + +static ssize_t uboot_ver_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + + return sprintf(buf, "%s\n", hdev->asic_prop.uboot_ver); +} + +static ssize_t armcp_kernel_ver_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + + return sprintf(buf, "%s", hdev->asic_prop.cpucp_info.kernel_version); +} + +static ssize_t armcp_ver_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + + return sprintf(buf, "%s\n", hdev->asic_prop.cpucp_info.cpucp_version); +} + +static ssize_t cpld_ver_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + + return sprintf(buf, "0x%08x\n", + hdev->asic_prop.cpucp_info.cpld_version); +} + +static ssize_t cpucp_kernel_ver_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + + return sprintf(buf, "%s", hdev->asic_prop.cpucp_info.kernel_version); +} + +static ssize_t cpucp_ver_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + + return sprintf(buf, "%s\n", hdev->asic_prop.cpucp_info.cpucp_version); +} + +static ssize_t infineon_ver_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + + return sprintf(buf, "0x%04x\n", + hdev->asic_prop.cpucp_info.infineon_version); +} + +static ssize_t fuse_ver_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + + return sprintf(buf, "%s\n", hdev->asic_prop.cpucp_info.fuse_version); +} + +static ssize_t thermal_ver_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + + return sprintf(buf, "%s", hdev->asic_prop.cpucp_info.thermal_version); +} + +static ssize_t preboot_btl_ver_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + + return sprintf(buf, "%s\n", hdev->asic_prop.preboot_ver); +} + +static ssize_t soft_reset_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + long value; + int rc; + + rc = kstrtoul(buf, 0, &value); + + if (rc) { + count = -EINVAL; + goto out; + } + + if (!hdev->supports_soft_reset) { + dev_err(hdev->dev, "Device does not support soft-reset\n"); + goto out; + } + + dev_warn(hdev->dev, "Soft-Reset requested through sysfs\n"); + + hl_device_reset(hdev, false, false); + +out: + return count; +} + +static ssize_t hard_reset_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + long value; + int rc; + + rc = kstrtoul(buf, 0, &value); + + if (rc) { + count = -EINVAL; + goto out; + } + + dev_warn(hdev->dev, "Hard-Reset requested through sysfs\n"); + + hl_device_reset(hdev, true, false); + +out: + return count; +} + +static ssize_t device_type_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + char *str; + + switch (hdev->asic_type) { + case ASIC_GOYA: + str = "GOYA"; + break; + case ASIC_GAUDI: + str = "GAUDI"; + break; + default: + dev_err(hdev->dev, "Unrecognized ASIC type %d\n", + hdev->asic_type); + return -EINVAL; + } + + return sprintf(buf, "%s\n", str); +} + +static ssize_t pci_addr_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + + return sprintf(buf, "%04x:%02x:%02x.%x\n", + pci_domain_nr(hdev->pdev->bus), + hdev->pdev->bus->number, + PCI_SLOT(hdev->pdev->devfn), + PCI_FUNC(hdev->pdev->devfn)); +} + +static ssize_t status_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + char *str; + + if (atomic_read(&hdev->in_reset)) + str = "In reset"; + else if (hdev->disabled) + str = "Malfunction"; + else + str = "Operational"; + + return sprintf(buf, "%s\n", str); +} + +static ssize_t soft_reset_cnt_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", hdev->soft_reset_cnt); +} + +static ssize_t hard_reset_cnt_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", hdev->hard_reset_cnt); +} + +static ssize_t max_power_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + long val; + + if (hl_device_disabled_or_in_reset(hdev)) + return -ENODEV; + + val = hl_get_max_power(hdev); + + return sprintf(buf, "%lu\n", val); +} + +static ssize_t max_power_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + unsigned long value; + int rc; + + if (hl_device_disabled_or_in_reset(hdev)) { + count = -ENODEV; + goto out; + } + + rc = kstrtoul(buf, 0, &value); + + if (rc) { + count = -EINVAL; + goto out; + } + + hdev->max_power = value; + hl_set_max_power(hdev); + +out: + return count; +} + +static ssize_t eeprom_read_handler(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t offset, + size_t max_size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct hl_device *hdev = dev_get_drvdata(dev); + char *data; + int rc; + + if (hl_device_disabled_or_in_reset(hdev)) + return -ENODEV; + + if (!max_size) + return -EINVAL; + + data = kzalloc(max_size, GFP_KERNEL); + if (!data) + return -ENOMEM; + + rc = hdev->asic_funcs->get_eeprom_data(hdev, data, max_size); + if (rc) + goto out; + + memcpy(buf, data, max_size); + +out: + kfree(data); + + return max_size; +} + +static DEVICE_ATTR_RO(armcp_kernel_ver); +static DEVICE_ATTR_RO(armcp_ver); +static DEVICE_ATTR_RO(cpld_ver); +static DEVICE_ATTR_RO(cpucp_kernel_ver); +static DEVICE_ATTR_RO(cpucp_ver); +static DEVICE_ATTR_RO(device_type); +static DEVICE_ATTR_RO(fuse_ver); +static DEVICE_ATTR_WO(hard_reset); +static DEVICE_ATTR_RO(hard_reset_cnt); +static DEVICE_ATTR_RO(infineon_ver); +static DEVICE_ATTR_RW(max_power); +static DEVICE_ATTR_RO(pci_addr); +static DEVICE_ATTR_RO(preboot_btl_ver); +static DEVICE_ATTR_WO(soft_reset); +static DEVICE_ATTR_RO(soft_reset_cnt); +static DEVICE_ATTR_RO(status); +static DEVICE_ATTR_RO(thermal_ver); +static DEVICE_ATTR_RO(uboot_ver); + +static struct bin_attribute bin_attr_eeprom = { + .attr = {.name = "eeprom", .mode = (0444)}, + .size = PAGE_SIZE, + .read = eeprom_read_handler +}; + +static struct attribute *hl_dev_attrs[] = { + &dev_attr_armcp_kernel_ver.attr, + &dev_attr_armcp_ver.attr, + &dev_attr_cpld_ver.attr, + &dev_attr_cpucp_kernel_ver.attr, + &dev_attr_cpucp_ver.attr, + &dev_attr_device_type.attr, + &dev_attr_fuse_ver.attr, + &dev_attr_hard_reset.attr, + &dev_attr_hard_reset_cnt.attr, + &dev_attr_infineon_ver.attr, + &dev_attr_max_power.attr, + &dev_attr_pci_addr.attr, + &dev_attr_preboot_btl_ver.attr, + &dev_attr_soft_reset.attr, + &dev_attr_soft_reset_cnt.attr, + &dev_attr_status.attr, + &dev_attr_thermal_ver.attr, + &dev_attr_uboot_ver.attr, + NULL, +}; + +static struct bin_attribute *hl_dev_bin_attrs[] = { + &bin_attr_eeprom, + NULL +}; + +static struct attribute_group hl_dev_attr_group = { + .attrs = hl_dev_attrs, + .bin_attrs = hl_dev_bin_attrs, +}; + +static struct attribute_group hl_dev_clks_attr_group; + +static const struct attribute_group *hl_dev_attr_groups[] = { + &hl_dev_attr_group, + &hl_dev_clks_attr_group, + NULL, +}; + +int hl_sysfs_init(struct hl_device *hdev) +{ + int rc; + + if (hdev->asic_type == ASIC_GOYA) + hdev->pm_mng_profile = PM_AUTO; + else + hdev->pm_mng_profile = PM_MANUAL; + + hdev->max_power = hdev->asic_prop.max_power_default; + + hdev->asic_funcs->add_device_attr(hdev, &hl_dev_clks_attr_group); + + rc = device_add_groups(hdev->dev, hl_dev_attr_groups); + if (rc) { + dev_err(hdev->dev, + "Failed to add groups to device, error %d\n", rc); + return rc; + } + + return 0; +} + +void hl_sysfs_fini(struct hl_device *hdev) +{ + device_remove_groups(hdev->dev, hl_dev_attr_groups); +} diff --git a/drivers/misc/habanalabs/gaudi/Makefile b/drivers/misc/habanalabs/gaudi/Makefile new file mode 100644 index 000000000..c9f4703cf --- /dev/null +++ b/drivers/misc/habanalabs/gaudi/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only +HL_GAUDI_FILES := gaudi/gaudi.o gaudi/gaudi_hwmgr.o gaudi/gaudi_security.o \ + gaudi/gaudi_coresight.o diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c new file mode 100644 index 000000000..ebac53a73 --- /dev/null +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -0,0 +1,6699 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2020 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include "gaudiP.h" +#include "../include/hw_ip/mmu/mmu_general.h" +#include "../include/hw_ip/mmu/mmu_v1_1.h" +#include "../include/gaudi/gaudi_masks.h" +#include "../include/gaudi/gaudi_fw_if.h" +#include "../include/gaudi/gaudi_reg_map.h" +#include "../include/gaudi/gaudi_async_ids_map_extended.h" + +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/firmware.h> +#include <linux/hwmon.h> +#include <linux/genalloc.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/iommu.h> +#include <linux/seq_file.h> + +/* + * Gaudi security scheme: + * + * 1. Host is protected by: + * - Range registers + * - MMU + * + * 2. DDR is protected by: + * - Range registers (protect the first 512MB) + * + * 3. Configuration is protected by: + * - Range registers + * - Protection bits + * + * MMU is always enabled. + * + * QMAN DMA channels 0,1,5 (PCI DMAN): + * - DMA is not secured. + * - PQ and CQ are secured. + * - CP is secured: The driver needs to parse CB but WREG should be allowed + * because of TDMA (tensor DMA). Hence, WREG is always not + * secured. + * + * When the driver needs to use DMA it will check that Gaudi is idle, set DMA + * channel 0 to be secured, execute the DMA and change it back to not secured. + * Currently, the driver doesn't use the DMA while there are compute jobs + * running. + * + * The current use cases for the driver to use the DMA are: + * - Clear SRAM on context switch (happens on context switch when device is + * idle) + * - MMU page tables area clear (happens on init) + * + * QMAN DMA 2-4,6,7, TPC, MME, NIC: + * PQ is secured and is located on the Host (HBM CON TPC3 bug) + * CQ, CP and the engine are not secured + * + */ + +#define GAUDI_BOOT_FIT_FILE "habanalabs/gaudi/gaudi-boot-fit.itb" +#define GAUDI_LINUX_FW_FILE "habanalabs/gaudi/gaudi-fit.itb" +#define GAUDI_TPC_FW_FILE "habanalabs/gaudi/gaudi_tpc.bin" + +#define GAUDI_DMA_POOL_BLK_SIZE 0x100 /* 256 bytes */ + +#define GAUDI_RESET_TIMEOUT_MSEC 1000 /* 1000ms */ +#define GAUDI_RESET_WAIT_MSEC 1 /* 1ms */ +#define GAUDI_CPU_RESET_WAIT_MSEC 200 /* 200ms */ +#define GAUDI_TEST_QUEUE_WAIT_USEC 100000 /* 100ms */ + +#define GAUDI_PLDM_RESET_WAIT_MSEC 1000 /* 1s */ +#define GAUDI_PLDM_HRESET_TIMEOUT_MSEC 20000 /* 20s */ +#define GAUDI_PLDM_TEST_QUEUE_WAIT_USEC 1000000 /* 1s */ +#define GAUDI_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 100) +#define GAUDI_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30) +#define GAUDI_PLDM_TPC_KERNEL_WAIT_USEC (HL_DEVICE_TIMEOUT_USEC * 30) +#define GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC 1000000 /* 1s */ +#define GAUDI_MSG_TO_CPU_TIMEOUT_USEC 4000000 /* 4s */ + +#define GAUDI_QMAN0_FENCE_VAL 0x72E91AB9 + +#define GAUDI_MAX_STRING_LEN 20 + +#define GAUDI_CB_POOL_CB_CNT 512 +#define GAUDI_CB_POOL_CB_SIZE 0x20000 /* 128KB */ + +#define GAUDI_ALLOC_CPU_MEM_RETRY_CNT 3 + +#define GAUDI_NUM_OF_TPC_INTR_CAUSE 20 + +#define GAUDI_NUM_OF_QM_ERR_CAUSE 16 + +#define GAUDI_NUM_OF_QM_ARB_ERR_CAUSE 3 + +#define GAUDI_ARB_WDT_TIMEOUT 0x1000000 + +#define GAUDI_CLK_GATE_DEBUGFS_MASK (\ + BIT(GAUDI_ENGINE_ID_MME_0) |\ + BIT(GAUDI_ENGINE_ID_MME_2) |\ + GENMASK_ULL(GAUDI_ENGINE_ID_TPC_7, GAUDI_ENGINE_ID_TPC_0)) + +static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = { + "gaudi cq 0_0", "gaudi cq 0_1", "gaudi cq 0_2", "gaudi cq 0_3", + "gaudi cq 1_0", "gaudi cq 1_1", "gaudi cq 1_2", "gaudi cq 1_3", + "gaudi cq 5_0", "gaudi cq 5_1", "gaudi cq 5_2", "gaudi cq 5_3", + "gaudi cpu eq" +}; + +static const u8 gaudi_dma_assignment[GAUDI_DMA_MAX] = { + [GAUDI_PCI_DMA_1] = GAUDI_ENGINE_ID_DMA_0, + [GAUDI_PCI_DMA_2] = GAUDI_ENGINE_ID_DMA_1, + [GAUDI_PCI_DMA_3] = GAUDI_ENGINE_ID_DMA_5, + [GAUDI_HBM_DMA_1] = GAUDI_ENGINE_ID_DMA_2, + [GAUDI_HBM_DMA_2] = GAUDI_ENGINE_ID_DMA_3, + [GAUDI_HBM_DMA_3] = GAUDI_ENGINE_ID_DMA_4, + [GAUDI_HBM_DMA_4] = GAUDI_ENGINE_ID_DMA_6, + [GAUDI_HBM_DMA_5] = GAUDI_ENGINE_ID_DMA_7 +}; + +static const u8 gaudi_cq_assignment[NUMBER_OF_CMPLT_QUEUES] = { + [0] = GAUDI_QUEUE_ID_DMA_0_0, + [1] = GAUDI_QUEUE_ID_DMA_0_1, + [2] = GAUDI_QUEUE_ID_DMA_0_2, + [3] = GAUDI_QUEUE_ID_DMA_0_3, + [4] = GAUDI_QUEUE_ID_DMA_1_0, + [5] = GAUDI_QUEUE_ID_DMA_1_1, + [6] = GAUDI_QUEUE_ID_DMA_1_2, + [7] = GAUDI_QUEUE_ID_DMA_1_3, + [8] = GAUDI_QUEUE_ID_DMA_5_0, + [9] = GAUDI_QUEUE_ID_DMA_5_1, + [10] = GAUDI_QUEUE_ID_DMA_5_2, + [11] = GAUDI_QUEUE_ID_DMA_5_3 +}; + +static const u16 gaudi_packet_sizes[MAX_PACKET_ID] = { + [PACKET_WREG_32] = sizeof(struct packet_wreg32), + [PACKET_WREG_BULK] = sizeof(struct packet_wreg_bulk), + [PACKET_MSG_LONG] = sizeof(struct packet_msg_long), + [PACKET_MSG_SHORT] = sizeof(struct packet_msg_short), + [PACKET_CP_DMA] = sizeof(struct packet_cp_dma), + [PACKET_REPEAT] = sizeof(struct packet_repeat), + [PACKET_MSG_PROT] = sizeof(struct packet_msg_prot), + [PACKET_FENCE] = sizeof(struct packet_fence), + [PACKET_LIN_DMA] = sizeof(struct packet_lin_dma), + [PACKET_NOP] = sizeof(struct packet_nop), + [PACKET_STOP] = sizeof(struct packet_stop), + [PACKET_ARB_POINT] = sizeof(struct packet_arb_point), + [PACKET_WAIT] = sizeof(struct packet_wait), + [PACKET_LOAD_AND_EXE] = sizeof(struct packet_load_and_exe) +}; + +static inline bool validate_packet_id(enum packet_id id) +{ + switch (id) { + case PACKET_WREG_32: + case PACKET_WREG_BULK: + case PACKET_MSG_LONG: + case PACKET_MSG_SHORT: + case PACKET_CP_DMA: + case PACKET_REPEAT: + case PACKET_MSG_PROT: + case PACKET_FENCE: + case PACKET_LIN_DMA: + case PACKET_NOP: + case PACKET_STOP: + case PACKET_ARB_POINT: + case PACKET_WAIT: + case PACKET_LOAD_AND_EXE: + return true; + default: + return false; + } +} + +static const char * const +gaudi_tpc_interrupts_cause[GAUDI_NUM_OF_TPC_INTR_CAUSE] = { + "tpc_address_exceed_slm", + "tpc_div_by_0", + "tpc_spu_mac_overflow", + "tpc_spu_addsub_overflow", + "tpc_spu_abs_overflow", + "tpc_spu_fp_dst_nan_inf", + "tpc_spu_fp_dst_denorm", + "tpc_vpu_mac_overflow", + "tpc_vpu_addsub_overflow", + "tpc_vpu_abs_overflow", + "tpc_vpu_fp_dst_nan_inf", + "tpc_vpu_fp_dst_denorm", + "tpc_assertions", + "tpc_illegal_instruction", + "tpc_pc_wrap_around", + "tpc_qm_sw_err", + "tpc_hbw_rresp_err", + "tpc_hbw_bresp_err", + "tpc_lbw_rresp_err", + "tpc_lbw_bresp_err" +}; + +static const char * const +gaudi_qman_error_cause[GAUDI_NUM_OF_QM_ERR_CAUSE] = { + "PQ AXI HBW error", + "CQ AXI HBW error", + "CP AXI HBW error", + "CP error due to undefined OPCODE", + "CP encountered STOP OPCODE", + "CP AXI LBW error", + "CP WRREG32 or WRBULK returned error", + "N/A", + "FENCE 0 inc over max value and clipped", + "FENCE 1 inc over max value and clipped", + "FENCE 2 inc over max value and clipped", + "FENCE 3 inc over max value and clipped", + "FENCE 0 dec under min value and clipped", + "FENCE 1 dec under min value and clipped", + "FENCE 2 dec under min value and clipped", + "FENCE 3 dec under min value and clipped" +}; + +static const char * const +gaudi_qman_arb_error_cause[GAUDI_NUM_OF_QM_ARB_ERR_CAUSE] = { + "Choice push while full error", + "Choice Q watchdog error", + "MSG AXI LBW returned with error" +}; + +static enum hl_queue_type gaudi_queue_type[GAUDI_QUEUE_ID_SIZE] = { + QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_0_0 */ + QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_0_1 */ + QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_0_2 */ + QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_0_3 */ + QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_1_0 */ + QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_1_1 */ + QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_1_2 */ + QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_1_3 */ + QUEUE_TYPE_CPU, /* GAUDI_QUEUE_ID_CPU_PQ */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_2_0 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_2_1 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_2_2 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_2_3 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_3_0 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_3_1 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_3_2 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_3_3 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_4_0 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_4_1 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_4_2 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_4_3 */ + QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_5_0 */ + QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_5_1 */ + QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_5_2 */ + QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_5_3 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_6_0 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_6_1 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_6_2 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_6_3 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_7_0 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_7_1 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_7_2 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_7_3 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_0_0 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_0_1 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_0_2 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_0_3 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_1_0 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_1_1 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_1_2 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_1_3 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_0_0 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_0_1 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_0_2 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_0_3 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_1_0 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_1_1 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_1_2 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_1_3 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_2_0 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_2_1 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_2_2 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_2_3 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_3_0 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_3_1 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_3_2 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_3_3 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_4_0 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_4_1 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_4_2 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_4_3 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_5_0 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_5_1 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_5_2 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_5_3 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_6_0 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_6_1 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_6_2 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_6_3 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_7_0 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_7_1 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_7_2 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_7_3 */ + QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_0_0 */ + QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_0_1 */ + QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_0_2 */ + QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_0_3 */ + QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_1_0 */ + QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_1_1 */ + QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_1_2 */ + QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_1_3 */ + QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_2_0 */ + QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_2_1 */ + QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_2_2 */ + QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_2_3 */ + QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_3_0 */ + QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_3_1 */ + QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_3_2 */ + QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_3_3 */ + QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_4_0 */ + QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_4_1 */ + QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_4_2 */ + QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_4_3 */ + QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_5_0 */ + QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_5_1 */ + QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_5_2 */ + QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_5_3 */ + QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_6_0 */ + QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_6_1 */ + QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_6_2 */ + QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_6_3 */ + QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_7_0 */ + QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_7_1 */ + QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_7_2 */ + QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_7_3 */ + QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_8_0 */ + QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_8_1 */ + QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_8_2 */ + QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_8_3 */ + QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_9_0 */ + QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_9_1 */ + QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_9_2 */ + QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_9_3 */ +}; + +struct ecc_info_extract_params { + u64 block_address; + u32 num_memories; + bool derr; + bool disable_clock_gating; +}; + +static int gaudi_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid, + u64 phys_addr); +static int gaudi_send_job_on_qman0(struct hl_device *hdev, + struct hl_cs_job *job); +static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr, + u32 size, u64 val); +static int gaudi_run_tpc_kernel(struct hl_device *hdev, u64 tpc_kernel, + u32 tpc_id); +static int gaudi_mmu_clear_pgt_range(struct hl_device *hdev); +static int gaudi_cpucp_info_get(struct hl_device *hdev); +static void gaudi_disable_clock_gating(struct hl_device *hdev); +static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid); + +static int gaudi_get_fixed_properties(struct hl_device *hdev) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + u32 num_sync_stream_queues = 0; + int i; + + prop->max_queues = GAUDI_QUEUE_ID_SIZE; + prop->hw_queues_props = kcalloc(prop->max_queues, + sizeof(struct hw_queue_properties), + GFP_KERNEL); + + if (!prop->hw_queues_props) + return -ENOMEM; + + for (i = 0 ; i < prop->max_queues ; i++) { + if (gaudi_queue_type[i] == QUEUE_TYPE_EXT) { + prop->hw_queues_props[i].type = QUEUE_TYPE_EXT; + prop->hw_queues_props[i].driver_only = 0; + prop->hw_queues_props[i].requires_kernel_cb = 1; + prop->hw_queues_props[i].supports_sync_stream = 1; + num_sync_stream_queues++; + } else if (gaudi_queue_type[i] == QUEUE_TYPE_CPU) { + prop->hw_queues_props[i].type = QUEUE_TYPE_CPU; + prop->hw_queues_props[i].driver_only = 1; + prop->hw_queues_props[i].requires_kernel_cb = 0; + prop->hw_queues_props[i].supports_sync_stream = 0; + } else if (gaudi_queue_type[i] == QUEUE_TYPE_INT) { + prop->hw_queues_props[i].type = QUEUE_TYPE_INT; + prop->hw_queues_props[i].driver_only = 0; + prop->hw_queues_props[i].requires_kernel_cb = 0; + } else if (gaudi_queue_type[i] == QUEUE_TYPE_NA) { + prop->hw_queues_props[i].type = QUEUE_TYPE_NA; + prop->hw_queues_props[i].driver_only = 0; + prop->hw_queues_props[i].requires_kernel_cb = 0; + prop->hw_queues_props[i].supports_sync_stream = 0; + } + } + + prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES; + prop->sync_stream_first_sob = 0; + prop->sync_stream_first_mon = 0; + prop->dram_base_address = DRAM_PHYS_BASE; + prop->dram_size = GAUDI_HBM_SIZE_32GB; + prop->dram_end_address = prop->dram_base_address + + prop->dram_size; + prop->dram_user_base_address = DRAM_BASE_ADDR_USER; + + prop->sram_base_address = SRAM_BASE_ADDR; + prop->sram_size = SRAM_SIZE; + prop->sram_end_address = prop->sram_base_address + + prop->sram_size; + prop->sram_user_base_address = prop->sram_base_address + + SRAM_USER_BASE_OFFSET; + + prop->mmu_pgt_addr = MMU_PAGE_TABLES_ADDR; + if (hdev->pldm) + prop->mmu_pgt_size = 0x800000; /* 8MB */ + else + prop->mmu_pgt_size = MMU_PAGE_TABLES_SIZE; + prop->mmu_pte_size = HL_PTE_SIZE; + prop->mmu_hop_table_size = HOP_TABLE_SIZE; + prop->mmu_hop0_tables_total_size = HOP0_TABLES_TOTAL_SIZE; + prop->dram_page_size = PAGE_SIZE_2MB; + + prop->pmmu.hop0_shift = HOP0_SHIFT; + prop->pmmu.hop1_shift = HOP1_SHIFT; + prop->pmmu.hop2_shift = HOP2_SHIFT; + prop->pmmu.hop3_shift = HOP3_SHIFT; + prop->pmmu.hop4_shift = HOP4_SHIFT; + prop->pmmu.hop0_mask = HOP0_MASK; + prop->pmmu.hop1_mask = HOP1_MASK; + prop->pmmu.hop2_mask = HOP2_MASK; + prop->pmmu.hop3_mask = HOP3_MASK; + prop->pmmu.hop4_mask = HOP4_MASK; + prop->pmmu.start_addr = VA_HOST_SPACE_START; + prop->pmmu.end_addr = + (VA_HOST_SPACE_START + VA_HOST_SPACE_SIZE / 2) - 1; + prop->pmmu.page_size = PAGE_SIZE_4KB; + prop->pmmu.num_hops = MMU_ARCH_5_HOPS; + + /* PMMU and HPMMU are the same except of page size */ + memcpy(&prop->pmmu_huge, &prop->pmmu, sizeof(prop->pmmu)); + prop->pmmu_huge.page_size = PAGE_SIZE_2MB; + + /* shifts and masks are the same in PMMU and DMMU */ + memcpy(&prop->dmmu, &prop->pmmu, sizeof(prop->pmmu)); + prop->dmmu.start_addr = (VA_HOST_SPACE_START + VA_HOST_SPACE_SIZE / 2); + prop->dmmu.end_addr = VA_HOST_SPACE_END; + prop->dmmu.page_size = PAGE_SIZE_2MB; + + prop->cfg_size = CFG_SIZE; + prop->max_asid = MAX_ASID; + prop->num_of_events = GAUDI_EVENT_SIZE; + prop->tpc_enabled_mask = TPC_ENABLED_MASK; + + prop->max_power_default = MAX_POWER_DEFAULT_PCI; + + prop->cb_pool_cb_cnt = GAUDI_CB_POOL_CB_CNT; + prop->cb_pool_cb_size = GAUDI_CB_POOL_CB_SIZE; + + prop->pcie_dbi_base_address = mmPCIE_DBI_BASE; + prop->pcie_aux_dbi_reg_addr = CFG_BASE + mmPCIE_AUX_DBI; + + strncpy(prop->cpucp_info.card_name, GAUDI_DEFAULT_CARD_NAME, + CARD_NAME_MAX_LEN); + + prop->max_pending_cs = GAUDI_MAX_PENDING_CS; + + prop->first_available_user_sob[HL_GAUDI_WS_DCORE] = + num_sync_stream_queues * HL_RSVD_SOBS; + prop->first_available_user_mon[HL_GAUDI_WS_DCORE] = + num_sync_stream_queues * HL_RSVD_MONS; + + return 0; +} + +static int gaudi_pci_bars_map(struct hl_device *hdev) +{ + static const char * const name[] = {"SRAM", "CFG", "HBM"}; + bool is_wc[3] = {false, false, true}; + int rc; + + rc = hl_pci_bars_map(hdev, name, is_wc); + if (rc) + return rc; + + hdev->rmmio = hdev->pcie_bar[CFG_BAR_ID] + + (CFG_BASE - SPI_FLASH_BASE_ADDR); + + return 0; +} + +static u64 gaudi_set_hbm_bar_base(struct hl_device *hdev, u64 addr) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + struct hl_inbound_pci_region pci_region; + u64 old_addr = addr; + int rc; + + if ((gaudi) && (gaudi->hbm_bar_cur_addr == addr)) + return old_addr; + + /* Inbound Region 2 - Bar 4 - Point to HBM */ + pci_region.mode = PCI_BAR_MATCH_MODE; + pci_region.bar = HBM_BAR_ID; + pci_region.addr = addr; + rc = hl_pci_set_inbound_region(hdev, 2, &pci_region); + if (rc) + return U64_MAX; + + if (gaudi) { + old_addr = gaudi->hbm_bar_cur_addr; + gaudi->hbm_bar_cur_addr = addr; + } + + return old_addr; +} + +static int gaudi_init_iatu(struct hl_device *hdev) +{ + struct hl_inbound_pci_region inbound_region; + struct hl_outbound_pci_region outbound_region; + int rc; + + /* Inbound Region 0 - Bar 0 - Point to SRAM + CFG */ + inbound_region.mode = PCI_BAR_MATCH_MODE; + inbound_region.bar = SRAM_BAR_ID; + inbound_region.addr = SRAM_BASE_ADDR; + rc = hl_pci_set_inbound_region(hdev, 0, &inbound_region); + if (rc) + goto done; + + /* Inbound Region 1 - Bar 2 - Point to SPI FLASH */ + inbound_region.mode = PCI_BAR_MATCH_MODE; + inbound_region.bar = CFG_BAR_ID; + inbound_region.addr = SPI_FLASH_BASE_ADDR; + rc = hl_pci_set_inbound_region(hdev, 1, &inbound_region); + if (rc) + goto done; + + /* Inbound Region 2 - Bar 4 - Point to HBM */ + inbound_region.mode = PCI_BAR_MATCH_MODE; + inbound_region.bar = HBM_BAR_ID; + inbound_region.addr = DRAM_PHYS_BASE; + rc = hl_pci_set_inbound_region(hdev, 2, &inbound_region); + if (rc) + goto done; + + hdev->asic_funcs->set_dma_mask_from_fw(hdev); + + /* Outbound Region 0 - Point to Host */ + outbound_region.addr = HOST_PHYS_BASE; + outbound_region.size = HOST_PHYS_SIZE; + rc = hl_pci_set_outbound_region(hdev, &outbound_region); + +done: + return rc; +} + +static int gaudi_early_init(struct hl_device *hdev) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct pci_dev *pdev = hdev->pdev; + int rc; + + rc = gaudi_get_fixed_properties(hdev); + if (rc) { + dev_err(hdev->dev, "Failed to get fixed properties\n"); + return rc; + } + + /* Check BAR sizes */ + if (pci_resource_len(pdev, SRAM_BAR_ID) != SRAM_BAR_SIZE) { + dev_err(hdev->dev, + "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n", + SRAM_BAR_ID, + (unsigned long long) pci_resource_len(pdev, + SRAM_BAR_ID), + SRAM_BAR_SIZE); + rc = -ENODEV; + goto free_queue_props; + } + + if (pci_resource_len(pdev, CFG_BAR_ID) != CFG_BAR_SIZE) { + dev_err(hdev->dev, + "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n", + CFG_BAR_ID, + (unsigned long long) pci_resource_len(pdev, + CFG_BAR_ID), + CFG_BAR_SIZE); + rc = -ENODEV; + goto free_queue_props; + } + + prop->dram_pci_bar_size = pci_resource_len(pdev, HBM_BAR_ID); + + rc = hl_pci_init(hdev, mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS, + mmCPU_BOOT_ERR0, GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC); + if (rc) + goto free_queue_props; + + /* GAUDI Firmware does not yet support security */ + prop->fw_security_disabled = true; + dev_info(hdev->dev, "firmware-level security is disabled\n"); + + return 0; + +free_queue_props: + kfree(hdev->asic_prop.hw_queues_props); + return rc; +} + +static int gaudi_early_fini(struct hl_device *hdev) +{ + kfree(hdev->asic_prop.hw_queues_props); + hl_pci_fini(hdev); + + return 0; +} + +/** + * gaudi_fetch_psoc_frequency - Fetch PSOC frequency values + * + * @hdev: pointer to hl_device structure + * + */ +static void gaudi_fetch_psoc_frequency(struct hl_device *hdev) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + u32 trace_freq = 0; + u32 pll_clk = 0; + u32 div_fctr = RREG32(mmPSOC_CPU_PLL_DIV_FACTOR_2); + u32 div_sel = RREG32(mmPSOC_CPU_PLL_DIV_SEL_2); + u32 nr = RREG32(mmPSOC_CPU_PLL_NR); + u32 nf = RREG32(mmPSOC_CPU_PLL_NF); + u32 od = RREG32(mmPSOC_CPU_PLL_OD); + + if (div_sel == DIV_SEL_REF_CLK || div_sel == DIV_SEL_DIVIDED_REF) { + if (div_sel == DIV_SEL_REF_CLK) + trace_freq = PLL_REF_CLK; + else + trace_freq = PLL_REF_CLK / (div_fctr + 1); + } else if (div_sel == DIV_SEL_PLL_CLK || + div_sel == DIV_SEL_DIVIDED_PLL) { + pll_clk = PLL_REF_CLK * (nf + 1) / ((nr + 1) * (od + 1)); + if (div_sel == DIV_SEL_PLL_CLK) + trace_freq = pll_clk; + else + trace_freq = pll_clk / (div_fctr + 1); + } else { + dev_warn(hdev->dev, + "Received invalid div select value: %d", div_sel); + } + + prop->psoc_timestamp_frequency = trace_freq; + prop->psoc_pci_pll_nr = nr; + prop->psoc_pci_pll_nf = nf; + prop->psoc_pci_pll_od = od; + prop->psoc_pci_pll_div_factor = div_fctr; +} + +static int _gaudi_init_tpc_mem(struct hl_device *hdev, + dma_addr_t tpc_kernel_src_addr, u32 tpc_kernel_size) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct packet_lin_dma *init_tpc_mem_pkt; + struct hl_cs_job *job; + struct hl_cb *cb; + u64 dst_addr; + u32 cb_size, ctl; + u8 tpc_id; + int rc; + + cb = hl_cb_kernel_create(hdev, PAGE_SIZE, false); + if (!cb) + return -EFAULT; + + init_tpc_mem_pkt = cb->kernel_address; + cb_size = sizeof(*init_tpc_mem_pkt); + memset(init_tpc_mem_pkt, 0, cb_size); + + init_tpc_mem_pkt->tsize = cpu_to_le32(tpc_kernel_size); + + ctl = FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_LIN_DMA); + ctl |= FIELD_PREP(GAUDI_PKT_LIN_DMA_CTL_LIN_MASK, 1); + ctl |= FIELD_PREP(GAUDI_PKT_CTL_RB_MASK, 1); + ctl |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1); + + init_tpc_mem_pkt->ctl = cpu_to_le32(ctl); + + init_tpc_mem_pkt->src_addr = cpu_to_le64(tpc_kernel_src_addr); + dst_addr = (prop->sram_user_base_address & + GAUDI_PKT_LIN_DMA_DST_ADDR_MASK) >> + GAUDI_PKT_LIN_DMA_DST_ADDR_SHIFT; + init_tpc_mem_pkt->dst_addr |= cpu_to_le64(dst_addr); + + job = hl_cs_allocate_job(hdev, QUEUE_TYPE_EXT, true); + if (!job) { + dev_err(hdev->dev, "Failed to allocate a new job\n"); + rc = -ENOMEM; + goto release_cb; + } + + job->id = 0; + job->user_cb = cb; + job->user_cb->cs_cnt++; + job->user_cb_size = cb_size; + job->hw_queue_id = GAUDI_QUEUE_ID_DMA_0_0; + job->patched_cb = job->user_cb; + job->job_cb_size = job->user_cb_size + sizeof(struct packet_msg_prot); + + hl_debugfs_add_job(hdev, job); + + rc = gaudi_send_job_on_qman0(hdev, job); + + if (rc) + goto free_job; + + for (tpc_id = 0 ; tpc_id < TPC_NUMBER_OF_ENGINES ; tpc_id++) { + rc = gaudi_run_tpc_kernel(hdev, dst_addr, tpc_id); + if (rc) + break; + } + +free_job: + hl_userptr_delete_list(hdev, &job->userptr_list); + hl_debugfs_remove_job(hdev, job); + kfree(job); + cb->cs_cnt--; + +release_cb: + hl_cb_put(cb); + hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT); + + return rc; +} + +/* + * gaudi_init_tpc_mem() - Initialize TPC memories. + * @hdev: Pointer to hl_device structure. + * + * Copy TPC kernel fw from firmware file and run it to initialize TPC memories. + * + * Return: 0 for success, negative value for error. + */ +static int gaudi_init_tpc_mem(struct hl_device *hdev) +{ + const struct firmware *fw; + size_t fw_size; + void *cpu_addr; + dma_addr_t dma_handle; + int rc, count = 5; + +again: + rc = request_firmware(&fw, GAUDI_TPC_FW_FILE, hdev->dev); + if (rc == -EINTR && count-- > 0) { + msleep(50); + goto again; + } + + if (rc) { + dev_err(hdev->dev, "Failed to load firmware file %s\n", + GAUDI_TPC_FW_FILE); + goto out; + } + + fw_size = fw->size; + cpu_addr = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, fw_size, + &dma_handle, GFP_KERNEL | __GFP_ZERO); + if (!cpu_addr) { + dev_err(hdev->dev, + "Failed to allocate %zu of dma memory for TPC kernel\n", + fw_size); + rc = -ENOMEM; + goto out; + } + + memcpy(cpu_addr, fw->data, fw_size); + + rc = _gaudi_init_tpc_mem(hdev, dma_handle, fw_size); + + hdev->asic_funcs->asic_dma_free_coherent(hdev, fw->size, cpu_addr, + dma_handle); + +out: + release_firmware(fw); + return rc; +} + +static int gaudi_late_init(struct hl_device *hdev) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + int rc; + + rc = gaudi->cpucp_info_get(hdev); + if (rc) { + dev_err(hdev->dev, "Failed to get cpucp info\n"); + return rc; + } + + rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_ENABLE_PCI_ACCESS); + if (rc) { + dev_err(hdev->dev, "Failed to enable PCI access from CPU\n"); + return rc; + } + + WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, GAUDI_EVENT_INTS_REGISTER); + + gaudi_fetch_psoc_frequency(hdev); + + rc = gaudi_mmu_clear_pgt_range(hdev); + if (rc) { + dev_err(hdev->dev, "Failed to clear MMU page tables range\n"); + goto disable_pci_access; + } + + rc = gaudi_init_tpc_mem(hdev); + if (rc) { + dev_err(hdev->dev, "Failed to initialize TPC memories\n"); + goto disable_pci_access; + } + + return 0; + +disable_pci_access: + hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS); + + return rc; +} + +static void gaudi_late_fini(struct hl_device *hdev) +{ + const struct hwmon_channel_info **channel_info_arr; + int i = 0; + + if (!hdev->hl_chip_info->info) + return; + + channel_info_arr = hdev->hl_chip_info->info; + + while (channel_info_arr[i]) { + kfree(channel_info_arr[i]->config); + kfree(channel_info_arr[i]); + i++; + } + + kfree(channel_info_arr); + + hdev->hl_chip_info->info = NULL; +} + +static int gaudi_alloc_cpu_accessible_dma_mem(struct hl_device *hdev) +{ + dma_addr_t dma_addr_arr[GAUDI_ALLOC_CPU_MEM_RETRY_CNT] = {}, end_addr; + void *virt_addr_arr[GAUDI_ALLOC_CPU_MEM_RETRY_CNT] = {}; + int i, j, rc = 0; + + /* + * The device CPU works with 40-bits addresses, while bit 39 must be set + * to '1' when accessing the host. + * Bits 49:39 of the full host address are saved for a later + * configuration of the HW to perform extension to 50 bits. + * Because there is a single HW register that holds the extension bits, + * these bits must be identical in all allocated range. + */ + + for (i = 0 ; i < GAUDI_ALLOC_CPU_MEM_RETRY_CNT ; i++) { + virt_addr_arr[i] = + hdev->asic_funcs->asic_dma_alloc_coherent(hdev, + HL_CPU_ACCESSIBLE_MEM_SIZE, + &dma_addr_arr[i], + GFP_KERNEL | __GFP_ZERO); + if (!virt_addr_arr[i]) { + rc = -ENOMEM; + goto free_dma_mem_arr; + } + + end_addr = dma_addr_arr[i] + HL_CPU_ACCESSIBLE_MEM_SIZE - 1; + if (GAUDI_CPU_PCI_MSB_ADDR(dma_addr_arr[i]) == + GAUDI_CPU_PCI_MSB_ADDR(end_addr)) + break; + } + + if (i == GAUDI_ALLOC_CPU_MEM_RETRY_CNT) { + dev_err(hdev->dev, + "MSB of CPU accessible DMA memory are not identical in all range\n"); + rc = -EFAULT; + goto free_dma_mem_arr; + } + + hdev->cpu_accessible_dma_mem = virt_addr_arr[i]; + hdev->cpu_accessible_dma_address = dma_addr_arr[i]; + hdev->cpu_pci_msb_addr = + GAUDI_CPU_PCI_MSB_ADDR(hdev->cpu_accessible_dma_address); + + GAUDI_PCI_TO_CPU_ADDR(hdev->cpu_accessible_dma_address); + +free_dma_mem_arr: + for (j = 0 ; j < i ; j++) + hdev->asic_funcs->asic_dma_free_coherent(hdev, + HL_CPU_ACCESSIBLE_MEM_SIZE, + virt_addr_arr[j], + dma_addr_arr[j]); + + return rc; +} + +static void gaudi_free_internal_qmans_pq_mem(struct hl_device *hdev) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + struct gaudi_internal_qman_info *q; + u32 i; + + for (i = 0 ; i < GAUDI_QUEUE_ID_SIZE ; i++) { + q = &gaudi->internal_qmans[i]; + if (!q->pq_kernel_addr) + continue; + hdev->asic_funcs->asic_dma_free_coherent(hdev, q->pq_size, + q->pq_kernel_addr, + q->pq_dma_addr); + } +} + +static int gaudi_alloc_internal_qmans_pq_mem(struct hl_device *hdev) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + struct gaudi_internal_qman_info *q; + int rc, i; + + for (i = 0 ; i < GAUDI_QUEUE_ID_SIZE ; i++) { + if (gaudi_queue_type[i] != QUEUE_TYPE_INT) + continue; + + q = &gaudi->internal_qmans[i]; + + switch (i) { + case GAUDI_QUEUE_ID_DMA_2_0 ... GAUDI_QUEUE_ID_DMA_4_3: + case GAUDI_QUEUE_ID_DMA_6_0 ... GAUDI_QUEUE_ID_DMA_7_3: + q->pq_size = HBM_DMA_QMAN_SIZE_IN_BYTES; + break; + case GAUDI_QUEUE_ID_MME_0_0 ... GAUDI_QUEUE_ID_MME_1_3: + q->pq_size = MME_QMAN_SIZE_IN_BYTES; + break; + case GAUDI_QUEUE_ID_TPC_0_0 ... GAUDI_QUEUE_ID_TPC_7_3: + q->pq_size = TPC_QMAN_SIZE_IN_BYTES; + break; + default: + dev_err(hdev->dev, "Bad internal queue index %d", i); + rc = -EINVAL; + goto free_internal_qmans_pq_mem; + } + + q->pq_kernel_addr = hdev->asic_funcs->asic_dma_alloc_coherent( + hdev, q->pq_size, + &q->pq_dma_addr, + GFP_KERNEL | __GFP_ZERO); + if (!q->pq_kernel_addr) { + rc = -ENOMEM; + goto free_internal_qmans_pq_mem; + } + } + + return 0; + +free_internal_qmans_pq_mem: + gaudi_free_internal_qmans_pq_mem(hdev); + return rc; +} + +static int gaudi_sw_init(struct hl_device *hdev) +{ + struct gaudi_device *gaudi; + u32 i, event_id = 0; + int rc; + + /* Allocate device structure */ + gaudi = kzalloc(sizeof(*gaudi), GFP_KERNEL); + if (!gaudi) + return -ENOMEM; + + for (i = 0 ; i < ARRAY_SIZE(gaudi_irq_map_table) ; i++) { + if (gaudi_irq_map_table[i].valid) { + if (event_id == GAUDI_EVENT_SIZE) { + dev_err(hdev->dev, + "Event array exceeds the limit of %u events\n", + GAUDI_EVENT_SIZE); + rc = -EINVAL; + goto free_gaudi_device; + } + + gaudi->events[event_id++] = + gaudi_irq_map_table[i].fc_id; + } + } + + gaudi->cpucp_info_get = gaudi_cpucp_info_get; + + gaudi->max_freq_value = GAUDI_MAX_CLK_FREQ; + + hdev->asic_specific = gaudi; + + /* Create DMA pool for small allocations */ + hdev->dma_pool = dma_pool_create(dev_name(hdev->dev), + &hdev->pdev->dev, GAUDI_DMA_POOL_BLK_SIZE, 8, 0); + if (!hdev->dma_pool) { + dev_err(hdev->dev, "failed to create DMA pool\n"); + rc = -ENOMEM; + goto free_gaudi_device; + } + + rc = gaudi_alloc_cpu_accessible_dma_mem(hdev); + if (rc) + goto free_dma_pool; + + hdev->cpu_accessible_dma_pool = gen_pool_create(ilog2(32), -1); + if (!hdev->cpu_accessible_dma_pool) { + dev_err(hdev->dev, + "Failed to create CPU accessible DMA pool\n"); + rc = -ENOMEM; + goto free_cpu_dma_mem; + } + + rc = gen_pool_add(hdev->cpu_accessible_dma_pool, + (uintptr_t) hdev->cpu_accessible_dma_mem, + HL_CPU_ACCESSIBLE_MEM_SIZE, -1); + if (rc) { + dev_err(hdev->dev, + "Failed to add memory to CPU accessible DMA pool\n"); + rc = -EFAULT; + goto free_cpu_accessible_dma_pool; + } + + rc = gaudi_alloc_internal_qmans_pq_mem(hdev); + if (rc) + goto free_cpu_accessible_dma_pool; + + spin_lock_init(&gaudi->hw_queues_lock); + mutex_init(&gaudi->clk_gate_mutex); + + hdev->supports_sync_stream = true; + hdev->supports_coresight = true; + + return 0; + +free_cpu_accessible_dma_pool: + gen_pool_destroy(hdev->cpu_accessible_dma_pool); +free_cpu_dma_mem: + GAUDI_CPU_TO_PCI_ADDR(hdev->cpu_accessible_dma_address, + hdev->cpu_pci_msb_addr); + hdev->asic_funcs->asic_dma_free_coherent(hdev, + HL_CPU_ACCESSIBLE_MEM_SIZE, + hdev->cpu_accessible_dma_mem, + hdev->cpu_accessible_dma_address); +free_dma_pool: + dma_pool_destroy(hdev->dma_pool); +free_gaudi_device: + kfree(gaudi); + return rc; +} + +static int gaudi_sw_fini(struct hl_device *hdev) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + + gaudi_free_internal_qmans_pq_mem(hdev); + + gen_pool_destroy(hdev->cpu_accessible_dma_pool); + + GAUDI_CPU_TO_PCI_ADDR(hdev->cpu_accessible_dma_address, + hdev->cpu_pci_msb_addr); + hdev->asic_funcs->asic_dma_free_coherent(hdev, + HL_CPU_ACCESSIBLE_MEM_SIZE, + hdev->cpu_accessible_dma_mem, + hdev->cpu_accessible_dma_address); + + dma_pool_destroy(hdev->dma_pool); + + mutex_destroy(&gaudi->clk_gate_mutex); + + kfree(gaudi); + + return 0; +} + +static irqreturn_t gaudi_irq_handler_single(int irq, void *arg) +{ + struct hl_device *hdev = arg; + int i; + + if (hdev->disabled) + return IRQ_HANDLED; + + for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) + hl_irq_handler_cq(irq, &hdev->completion_queue[i]); + + hl_irq_handler_eq(irq, &hdev->event_queue); + + return IRQ_HANDLED; +} + +/* + * For backward compatibility, new MSI interrupts should be set after the + * existing CPU and NIC interrupts. + */ +static int gaudi_pci_irq_vector(struct hl_device *hdev, unsigned int nr, + bool cpu_eq) +{ + int msi_vec; + + if ((nr != GAUDI_EVENT_QUEUE_MSI_IDX) && (cpu_eq)) + dev_crit(hdev->dev, "CPU EQ must use IRQ %d\n", + GAUDI_EVENT_QUEUE_MSI_IDX); + + msi_vec = ((nr < GAUDI_EVENT_QUEUE_MSI_IDX) || (cpu_eq)) ? nr : + (nr + NIC_NUMBER_OF_ENGINES + 1); + + return pci_irq_vector(hdev->pdev, msi_vec); +} + +static int gaudi_enable_msi_single(struct hl_device *hdev) +{ + int rc, irq; + + dev_info(hdev->dev, "Working in single MSI IRQ mode\n"); + + irq = gaudi_pci_irq_vector(hdev, 0, false); + rc = request_irq(irq, gaudi_irq_handler_single, 0, + "gaudi single msi", hdev); + if (rc) + dev_err(hdev->dev, + "Failed to request single MSI IRQ\n"); + + return rc; +} + +static int gaudi_enable_msi_multi(struct hl_device *hdev) +{ + int cq_cnt = hdev->asic_prop.completion_queues_count; + int rc, i, irq_cnt_init, irq; + + for (i = 0, irq_cnt_init = 0 ; i < cq_cnt ; i++, irq_cnt_init++) { + irq = gaudi_pci_irq_vector(hdev, i, false); + rc = request_irq(irq, hl_irq_handler_cq, 0, gaudi_irq_name[i], + &hdev->completion_queue[i]); + if (rc) { + dev_err(hdev->dev, "Failed to request IRQ %d", irq); + goto free_irqs; + } + } + + irq = gaudi_pci_irq_vector(hdev, GAUDI_EVENT_QUEUE_MSI_IDX, true); + rc = request_irq(irq, hl_irq_handler_eq, 0, gaudi_irq_name[cq_cnt], + &hdev->event_queue); + if (rc) { + dev_err(hdev->dev, "Failed to request IRQ %d", irq); + goto free_irqs; + } + + return 0; + +free_irqs: + for (i = 0 ; i < irq_cnt_init ; i++) + free_irq(gaudi_pci_irq_vector(hdev, i, false), + &hdev->completion_queue[i]); + return rc; +} + +static int gaudi_enable_msi(struct hl_device *hdev) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + int rc; + + if (gaudi->hw_cap_initialized & HW_CAP_MSI) + return 0; + + rc = pci_alloc_irq_vectors(hdev->pdev, 1, GAUDI_MSI_ENTRIES, + PCI_IRQ_MSI); + if (rc < 0) { + dev_err(hdev->dev, "MSI: Failed to enable support %d\n", rc); + return rc; + } + + if (rc < NUMBER_OF_INTERRUPTS) { + gaudi->multi_msi_mode = false; + rc = gaudi_enable_msi_single(hdev); + } else { + gaudi->multi_msi_mode = true; + rc = gaudi_enable_msi_multi(hdev); + } + + if (rc) + goto free_pci_irq_vectors; + + gaudi->hw_cap_initialized |= HW_CAP_MSI; + + return 0; + +free_pci_irq_vectors: + pci_free_irq_vectors(hdev->pdev); + return rc; +} + +static void gaudi_sync_irqs(struct hl_device *hdev) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + int i, cq_cnt = hdev->asic_prop.completion_queues_count; + + if (!(gaudi->hw_cap_initialized & HW_CAP_MSI)) + return; + + /* Wait for all pending IRQs to be finished */ + if (gaudi->multi_msi_mode) { + for (i = 0 ; i < cq_cnt ; i++) + synchronize_irq(gaudi_pci_irq_vector(hdev, i, false)); + + synchronize_irq(gaudi_pci_irq_vector(hdev, + GAUDI_EVENT_QUEUE_MSI_IDX, + true)); + } else { + synchronize_irq(gaudi_pci_irq_vector(hdev, 0, false)); + } +} + +static void gaudi_disable_msi(struct hl_device *hdev) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + int i, irq, cq_cnt = hdev->asic_prop.completion_queues_count; + + if (!(gaudi->hw_cap_initialized & HW_CAP_MSI)) + return; + + gaudi_sync_irqs(hdev); + + if (gaudi->multi_msi_mode) { + irq = gaudi_pci_irq_vector(hdev, GAUDI_EVENT_QUEUE_MSI_IDX, + true); + free_irq(irq, &hdev->event_queue); + + for (i = 0 ; i < cq_cnt ; i++) { + irq = gaudi_pci_irq_vector(hdev, i, false); + free_irq(irq, &hdev->completion_queue[i]); + } + } else { + free_irq(gaudi_pci_irq_vector(hdev, 0, false), hdev); + } + + pci_free_irq_vectors(hdev->pdev); + + gaudi->hw_cap_initialized &= ~HW_CAP_MSI; +} + +static void gaudi_init_scrambler_sram(struct hl_device *hdev) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + + if (gaudi->hw_cap_initialized & HW_CAP_SRAM_SCRAMBLER) + return; + + if (!hdev->sram_scrambler_enable) + return; + + WREG32(mmNIF_RTR_CTRL_0_SCRAM_SRAM_EN, + 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT); + WREG32(mmNIF_RTR_CTRL_1_SCRAM_SRAM_EN, + 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT); + WREG32(mmNIF_RTR_CTRL_2_SCRAM_SRAM_EN, + 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT); + WREG32(mmNIF_RTR_CTRL_3_SCRAM_SRAM_EN, + 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT); + WREG32(mmNIF_RTR_CTRL_4_SCRAM_SRAM_EN, + 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT); + WREG32(mmNIF_RTR_CTRL_5_SCRAM_SRAM_EN, + 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT); + WREG32(mmNIF_RTR_CTRL_6_SCRAM_SRAM_EN, + 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT); + WREG32(mmNIF_RTR_CTRL_7_SCRAM_SRAM_EN, + 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT); + + WREG32(mmSIF_RTR_CTRL_0_SCRAM_SRAM_EN, + 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT); + WREG32(mmSIF_RTR_CTRL_1_SCRAM_SRAM_EN, + 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT); + WREG32(mmSIF_RTR_CTRL_2_SCRAM_SRAM_EN, + 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT); + WREG32(mmSIF_RTR_CTRL_3_SCRAM_SRAM_EN, + 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT); + WREG32(mmSIF_RTR_CTRL_4_SCRAM_SRAM_EN, + 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT); + WREG32(mmSIF_RTR_CTRL_5_SCRAM_SRAM_EN, + 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT); + WREG32(mmSIF_RTR_CTRL_6_SCRAM_SRAM_EN, + 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT); + WREG32(mmSIF_RTR_CTRL_7_SCRAM_SRAM_EN, + 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT); + + WREG32(mmDMA_IF_E_N_DOWN_CH0_SCRAM_SRAM_EN, + 1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT); + WREG32(mmDMA_IF_E_N_DOWN_CH1_SCRAM_SRAM_EN, + 1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT); + WREG32(mmDMA_IF_E_S_DOWN_CH0_SCRAM_SRAM_EN, + 1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT); + WREG32(mmDMA_IF_E_S_DOWN_CH1_SCRAM_SRAM_EN, + 1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT); + WREG32(mmDMA_IF_W_N_DOWN_CH0_SCRAM_SRAM_EN, + 1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT); + WREG32(mmDMA_IF_W_N_DOWN_CH1_SCRAM_SRAM_EN, + 1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT); + WREG32(mmDMA_IF_W_S_DOWN_CH0_SCRAM_SRAM_EN, + 1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT); + WREG32(mmDMA_IF_W_S_DOWN_CH1_SCRAM_SRAM_EN, + 1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT); + + gaudi->hw_cap_initialized |= HW_CAP_SRAM_SCRAMBLER; +} + +static void gaudi_init_scrambler_hbm(struct hl_device *hdev) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + + if (gaudi->hw_cap_initialized & HW_CAP_HBM_SCRAMBLER) + return; + + if (!hdev->dram_scrambler_enable) + return; + + WREG32(mmNIF_RTR_CTRL_0_SCRAM_HBM_EN, + 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT); + WREG32(mmNIF_RTR_CTRL_1_SCRAM_HBM_EN, + 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT); + WREG32(mmNIF_RTR_CTRL_2_SCRAM_HBM_EN, + 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT); + WREG32(mmNIF_RTR_CTRL_3_SCRAM_HBM_EN, + 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT); + WREG32(mmNIF_RTR_CTRL_4_SCRAM_HBM_EN, + 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT); + WREG32(mmNIF_RTR_CTRL_5_SCRAM_HBM_EN, + 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT); + WREG32(mmNIF_RTR_CTRL_6_SCRAM_HBM_EN, + 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT); + WREG32(mmNIF_RTR_CTRL_7_SCRAM_HBM_EN, + 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT); + + WREG32(mmSIF_RTR_CTRL_0_SCRAM_HBM_EN, + 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT); + WREG32(mmSIF_RTR_CTRL_1_SCRAM_HBM_EN, + 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT); + WREG32(mmSIF_RTR_CTRL_2_SCRAM_HBM_EN, + 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT); + WREG32(mmSIF_RTR_CTRL_3_SCRAM_HBM_EN, + 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT); + WREG32(mmSIF_RTR_CTRL_4_SCRAM_HBM_EN, + 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT); + WREG32(mmSIF_RTR_CTRL_5_SCRAM_HBM_EN, + 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT); + WREG32(mmSIF_RTR_CTRL_6_SCRAM_HBM_EN, + 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT); + WREG32(mmSIF_RTR_CTRL_7_SCRAM_HBM_EN, + 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT); + + WREG32(mmDMA_IF_E_N_DOWN_CH0_SCRAM_HBM_EN, + 1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT); + WREG32(mmDMA_IF_E_N_DOWN_CH1_SCRAM_HBM_EN, + 1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT); + WREG32(mmDMA_IF_E_S_DOWN_CH0_SCRAM_HBM_EN, + 1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT); + WREG32(mmDMA_IF_E_S_DOWN_CH1_SCRAM_HBM_EN, + 1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT); + WREG32(mmDMA_IF_W_N_DOWN_CH0_SCRAM_HBM_EN, + 1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT); + WREG32(mmDMA_IF_W_N_DOWN_CH1_SCRAM_HBM_EN, + 1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT); + WREG32(mmDMA_IF_W_S_DOWN_CH0_SCRAM_HBM_EN, + 1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT); + WREG32(mmDMA_IF_W_S_DOWN_CH1_SCRAM_HBM_EN, + 1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT); + + gaudi->hw_cap_initialized |= HW_CAP_HBM_SCRAMBLER; +} + +static void gaudi_init_e2e(struct hl_device *hdev) +{ + WREG32(mmSIF_RTR_CTRL_0_E2E_HBM_WR_SIZE, 247 >> 3); + WREG32(mmSIF_RTR_CTRL_0_E2E_HBM_RD_SIZE, 785 >> 3); + WREG32(mmSIF_RTR_CTRL_0_E2E_PCI_WR_SIZE, 49); + WREG32(mmSIF_RTR_CTRL_0_E2E_PCI_RD_SIZE, 101); + + WREG32(mmSIF_RTR_CTRL_1_E2E_HBM_WR_SIZE, 275 >> 3); + WREG32(mmSIF_RTR_CTRL_1_E2E_HBM_RD_SIZE, 614 >> 3); + WREG32(mmSIF_RTR_CTRL_1_E2E_PCI_WR_SIZE, 1); + WREG32(mmSIF_RTR_CTRL_1_E2E_PCI_RD_SIZE, 39); + + WREG32(mmSIF_RTR_CTRL_2_E2E_HBM_WR_SIZE, 1); + WREG32(mmSIF_RTR_CTRL_2_E2E_HBM_RD_SIZE, 1); + WREG32(mmSIF_RTR_CTRL_2_E2E_PCI_WR_SIZE, 1); + WREG32(mmSIF_RTR_CTRL_2_E2E_PCI_RD_SIZE, 32); + + WREG32(mmSIF_RTR_CTRL_3_E2E_HBM_WR_SIZE, 176 >> 3); + WREG32(mmSIF_RTR_CTRL_3_E2E_HBM_RD_SIZE, 32 >> 3); + WREG32(mmSIF_RTR_CTRL_3_E2E_PCI_WR_SIZE, 19); + WREG32(mmSIF_RTR_CTRL_3_E2E_PCI_RD_SIZE, 32); + + WREG32(mmSIF_RTR_CTRL_4_E2E_HBM_WR_SIZE, 176 >> 3); + WREG32(mmSIF_RTR_CTRL_4_E2E_HBM_RD_SIZE, 32 >> 3); + WREG32(mmSIF_RTR_CTRL_4_E2E_PCI_WR_SIZE, 19); + WREG32(mmSIF_RTR_CTRL_4_E2E_PCI_RD_SIZE, 32); + + WREG32(mmSIF_RTR_CTRL_5_E2E_HBM_WR_SIZE, 1); + WREG32(mmSIF_RTR_CTRL_5_E2E_HBM_RD_SIZE, 1); + WREG32(mmSIF_RTR_CTRL_5_E2E_PCI_WR_SIZE, 1); + WREG32(mmSIF_RTR_CTRL_5_E2E_PCI_RD_SIZE, 32); + + WREG32(mmSIF_RTR_CTRL_6_E2E_HBM_WR_SIZE, 275 >> 3); + WREG32(mmSIF_RTR_CTRL_6_E2E_HBM_RD_SIZE, 614 >> 3); + WREG32(mmSIF_RTR_CTRL_6_E2E_PCI_WR_SIZE, 1); + WREG32(mmSIF_RTR_CTRL_6_E2E_PCI_RD_SIZE, 39); + + WREG32(mmSIF_RTR_CTRL_7_E2E_HBM_WR_SIZE, 297 >> 3); + WREG32(mmSIF_RTR_CTRL_7_E2E_HBM_RD_SIZE, 908 >> 3); + WREG32(mmSIF_RTR_CTRL_7_E2E_PCI_WR_SIZE, 19); + WREG32(mmSIF_RTR_CTRL_7_E2E_PCI_RD_SIZE, 19); + + WREG32(mmNIF_RTR_CTRL_0_E2E_HBM_WR_SIZE, 318 >> 3); + WREG32(mmNIF_RTR_CTRL_0_E2E_HBM_RD_SIZE, 956 >> 3); + WREG32(mmNIF_RTR_CTRL_0_E2E_PCI_WR_SIZE, 79); + WREG32(mmNIF_RTR_CTRL_0_E2E_PCI_RD_SIZE, 163); + + WREG32(mmNIF_RTR_CTRL_1_E2E_HBM_WR_SIZE, 275 >> 3); + WREG32(mmNIF_RTR_CTRL_1_E2E_HBM_RD_SIZE, 614 >> 3); + WREG32(mmNIF_RTR_CTRL_1_E2E_PCI_WR_SIZE, 1); + WREG32(mmNIF_RTR_CTRL_1_E2E_PCI_RD_SIZE, 39); + + WREG32(mmNIF_RTR_CTRL_2_E2E_HBM_WR_SIZE, 1); + WREG32(mmNIF_RTR_CTRL_2_E2E_HBM_RD_SIZE, 1); + WREG32(mmNIF_RTR_CTRL_2_E2E_PCI_WR_SIZE, 1); + WREG32(mmNIF_RTR_CTRL_2_E2E_PCI_RD_SIZE, 32); + + WREG32(mmNIF_RTR_CTRL_3_E2E_HBM_WR_SIZE, 176 >> 3); + WREG32(mmNIF_RTR_CTRL_3_E2E_HBM_RD_SIZE, 32 >> 3); + WREG32(mmNIF_RTR_CTRL_3_E2E_PCI_WR_SIZE, 19); + WREG32(mmNIF_RTR_CTRL_3_E2E_PCI_RD_SIZE, 32); + + WREG32(mmNIF_RTR_CTRL_4_E2E_HBM_WR_SIZE, 176 >> 3); + WREG32(mmNIF_RTR_CTRL_4_E2E_HBM_RD_SIZE, 32 >> 3); + WREG32(mmNIF_RTR_CTRL_4_E2E_PCI_WR_SIZE, 19); + WREG32(mmNIF_RTR_CTRL_4_E2E_PCI_RD_SIZE, 32); + + WREG32(mmNIF_RTR_CTRL_5_E2E_HBM_WR_SIZE, 1); + WREG32(mmNIF_RTR_CTRL_5_E2E_HBM_RD_SIZE, 1); + WREG32(mmNIF_RTR_CTRL_5_E2E_PCI_WR_SIZE, 1); + WREG32(mmNIF_RTR_CTRL_5_E2E_PCI_RD_SIZE, 32); + + WREG32(mmNIF_RTR_CTRL_6_E2E_HBM_WR_SIZE, 275 >> 3); + WREG32(mmNIF_RTR_CTRL_6_E2E_HBM_RD_SIZE, 614 >> 3); + WREG32(mmNIF_RTR_CTRL_6_E2E_PCI_WR_SIZE, 1); + WREG32(mmNIF_RTR_CTRL_6_E2E_PCI_RD_SIZE, 39); + + WREG32(mmNIF_RTR_CTRL_7_E2E_HBM_WR_SIZE, 318 >> 3); + WREG32(mmNIF_RTR_CTRL_7_E2E_HBM_RD_SIZE, 956 >> 3); + WREG32(mmNIF_RTR_CTRL_7_E2E_PCI_WR_SIZE, 79); + WREG32(mmNIF_RTR_CTRL_7_E2E_PCI_RD_SIZE, 79); + + WREG32(mmDMA_IF_E_N_DOWN_CH0_E2E_HBM_WR_SIZE, 344 >> 3); + WREG32(mmDMA_IF_E_N_DOWN_CH0_E2E_HBM_RD_SIZE, 1000 >> 3); + WREG32(mmDMA_IF_E_N_DOWN_CH0_E2E_PCI_WR_SIZE, 162); + WREG32(mmDMA_IF_E_N_DOWN_CH0_E2E_PCI_RD_SIZE, 338); + + WREG32(mmDMA_IF_E_N_DOWN_CH1_E2E_HBM_WR_SIZE, 344 >> 3); + WREG32(mmDMA_IF_E_N_DOWN_CH1_E2E_HBM_RD_SIZE, 1000 >> 3); + WREG32(mmDMA_IF_E_N_DOWN_CH1_E2E_PCI_WR_SIZE, 162); + WREG32(mmDMA_IF_E_N_DOWN_CH1_E2E_PCI_RD_SIZE, 338); + + WREG32(mmDMA_IF_E_S_DOWN_CH0_E2E_HBM_WR_SIZE, 344 >> 3); + WREG32(mmDMA_IF_E_S_DOWN_CH0_E2E_HBM_RD_SIZE, 1000 >> 3); + WREG32(mmDMA_IF_E_S_DOWN_CH0_E2E_PCI_WR_SIZE, 162); + WREG32(mmDMA_IF_E_S_DOWN_CH0_E2E_PCI_RD_SIZE, 338); + + WREG32(mmDMA_IF_E_S_DOWN_CH1_E2E_HBM_WR_SIZE, 344 >> 3); + WREG32(mmDMA_IF_E_S_DOWN_CH1_E2E_HBM_RD_SIZE, 1000 >> 3); + WREG32(mmDMA_IF_E_S_DOWN_CH1_E2E_PCI_WR_SIZE, 162); + WREG32(mmDMA_IF_E_S_DOWN_CH1_E2E_PCI_RD_SIZE, 338); + + WREG32(mmDMA_IF_W_N_DOWN_CH0_E2E_HBM_WR_SIZE, 344 >> 3); + WREG32(mmDMA_IF_W_N_DOWN_CH0_E2E_HBM_RD_SIZE, 1000 >> 3); + WREG32(mmDMA_IF_W_N_DOWN_CH0_E2E_PCI_WR_SIZE, 162); + WREG32(mmDMA_IF_W_N_DOWN_CH0_E2E_PCI_RD_SIZE, 338); + + WREG32(mmDMA_IF_W_N_DOWN_CH1_E2E_HBM_WR_SIZE, 344 >> 3); + WREG32(mmDMA_IF_W_N_DOWN_CH1_E2E_HBM_RD_SIZE, 1000 >> 3); + WREG32(mmDMA_IF_W_N_DOWN_CH1_E2E_PCI_WR_SIZE, 162); + WREG32(mmDMA_IF_W_N_DOWN_CH1_E2E_PCI_RD_SIZE, 338); + + WREG32(mmDMA_IF_W_S_DOWN_CH0_E2E_HBM_WR_SIZE, 344 >> 3); + WREG32(mmDMA_IF_W_S_DOWN_CH0_E2E_HBM_RD_SIZE, 1000 >> 3); + WREG32(mmDMA_IF_W_S_DOWN_CH0_E2E_PCI_WR_SIZE, 162); + WREG32(mmDMA_IF_W_S_DOWN_CH0_E2E_PCI_RD_SIZE, 338); + + WREG32(mmDMA_IF_W_S_DOWN_CH1_E2E_HBM_WR_SIZE, 344 >> 3); + WREG32(mmDMA_IF_W_S_DOWN_CH1_E2E_HBM_RD_SIZE, 1000 >> 3); + WREG32(mmDMA_IF_W_S_DOWN_CH1_E2E_PCI_WR_SIZE, 162); + WREG32(mmDMA_IF_W_S_DOWN_CH1_E2E_PCI_RD_SIZE, 338); + + if (!hdev->dram_scrambler_enable) { + WREG32(mmSIF_RTR_CTRL_0_NL_HBM_SEL_0, 0x21); + WREG32(mmSIF_RTR_CTRL_0_NL_HBM_SEL_1, 0x22); + WREG32(mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_18, 0x1F); + WREG32(mmSIF_RTR_CTRL_0_NL_HBM_PC_SEL_3, 0x20); + + WREG32(mmSIF_RTR_CTRL_1_NL_HBM_SEL_0, 0x21); + WREG32(mmSIF_RTR_CTRL_1_NL_HBM_SEL_1, 0x22); + WREG32(mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_18, 0x1F); + WREG32(mmSIF_RTR_CTRL_1_NL_HBM_PC_SEL_3, 0x20); + + WREG32(mmSIF_RTR_CTRL_2_NL_HBM_SEL_0, 0x21); + WREG32(mmSIF_RTR_CTRL_2_NL_HBM_SEL_1, 0x22); + WREG32(mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_18, 0x1F); + WREG32(mmSIF_RTR_CTRL_2_NL_HBM_PC_SEL_3, 0x20); + + WREG32(mmSIF_RTR_CTRL_3_NL_HBM_SEL_0, 0x21); + WREG32(mmSIF_RTR_CTRL_3_NL_HBM_SEL_1, 0x22); + WREG32(mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_18, 0x1F); + WREG32(mmSIF_RTR_CTRL_3_NL_HBM_PC_SEL_3, 0x20); + + WREG32(mmSIF_RTR_CTRL_4_NL_HBM_SEL_0, 0x21); + WREG32(mmSIF_RTR_CTRL_4_NL_HBM_SEL_1, 0x22); + WREG32(mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_18, 0x1F); + WREG32(mmSIF_RTR_CTRL_4_NL_HBM_PC_SEL_3, 0x20); + + WREG32(mmSIF_RTR_CTRL_5_NL_HBM_SEL_0, 0x21); + WREG32(mmSIF_RTR_CTRL_5_NL_HBM_SEL_1, 0x22); + WREG32(mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_18, 0x1F); + WREG32(mmSIF_RTR_CTRL_5_NL_HBM_PC_SEL_3, 0x20); + + WREG32(mmSIF_RTR_CTRL_6_NL_HBM_SEL_0, 0x21); + WREG32(mmSIF_RTR_CTRL_6_NL_HBM_SEL_1, 0x22); + WREG32(mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_18, 0x1F); + WREG32(mmSIF_RTR_CTRL_6_NL_HBM_PC_SEL_3, 0x20); + + WREG32(mmSIF_RTR_CTRL_7_NL_HBM_SEL_0, 0x21); + WREG32(mmSIF_RTR_CTRL_7_NL_HBM_SEL_1, 0x22); + WREG32(mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_18, 0x1F); + WREG32(mmSIF_RTR_CTRL_7_NL_HBM_PC_SEL_3, 0x20); + + WREG32(mmNIF_RTR_CTRL_0_NL_HBM_SEL_0, 0x21); + WREG32(mmNIF_RTR_CTRL_0_NL_HBM_SEL_1, 0x22); + WREG32(mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_18, 0x1F); + WREG32(mmNIF_RTR_CTRL_0_NL_HBM_PC_SEL_3, 0x20); + + WREG32(mmNIF_RTR_CTRL_1_NL_HBM_SEL_0, 0x21); + WREG32(mmNIF_RTR_CTRL_1_NL_HBM_SEL_1, 0x22); + WREG32(mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_18, 0x1F); + WREG32(mmNIF_RTR_CTRL_1_NL_HBM_PC_SEL_3, 0x20); + + WREG32(mmNIF_RTR_CTRL_2_NL_HBM_SEL_0, 0x21); + WREG32(mmNIF_RTR_CTRL_2_NL_HBM_SEL_1, 0x22); + WREG32(mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_18, 0x1F); + WREG32(mmNIF_RTR_CTRL_2_NL_HBM_PC_SEL_3, 0x20); + + WREG32(mmNIF_RTR_CTRL_3_NL_HBM_SEL_0, 0x21); + WREG32(mmNIF_RTR_CTRL_3_NL_HBM_SEL_1, 0x22); + WREG32(mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_18, 0x1F); + WREG32(mmNIF_RTR_CTRL_3_NL_HBM_PC_SEL_3, 0x20); + + WREG32(mmNIF_RTR_CTRL_4_NL_HBM_SEL_0, 0x21); + WREG32(mmNIF_RTR_CTRL_4_NL_HBM_SEL_1, 0x22); + WREG32(mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_18, 0x1F); + WREG32(mmNIF_RTR_CTRL_4_NL_HBM_PC_SEL_3, 0x20); + + WREG32(mmNIF_RTR_CTRL_5_NL_HBM_SEL_0, 0x21); + WREG32(mmNIF_RTR_CTRL_5_NL_HBM_SEL_1, 0x22); + WREG32(mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_18, 0x1F); + WREG32(mmNIF_RTR_CTRL_5_NL_HBM_PC_SEL_3, 0x20); + + WREG32(mmNIF_RTR_CTRL_6_NL_HBM_SEL_0, 0x21); + WREG32(mmNIF_RTR_CTRL_6_NL_HBM_SEL_1, 0x22); + WREG32(mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_18, 0x1F); + WREG32(mmNIF_RTR_CTRL_6_NL_HBM_PC_SEL_3, 0x20); + + WREG32(mmNIF_RTR_CTRL_7_NL_HBM_SEL_0, 0x21); + WREG32(mmNIF_RTR_CTRL_7_NL_HBM_SEL_1, 0x22); + WREG32(mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_18, 0x1F); + WREG32(mmNIF_RTR_CTRL_7_NL_HBM_PC_SEL_3, 0x20); + + WREG32(mmDMA_IF_E_N_DOWN_CH0_NL_HBM_SEL_0, 0x21); + WREG32(mmDMA_IF_E_N_DOWN_CH0_NL_HBM_SEL_1, 0x22); + WREG32(mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_18, 0x1F); + WREG32(mmDMA_IF_E_N_DOWN_CH0_NL_HBM_PC_SEL_3, 0x20); + + WREG32(mmDMA_IF_E_N_DOWN_CH1_NL_HBM_SEL_0, 0x21); + WREG32(mmDMA_IF_E_N_DOWN_CH1_NL_HBM_SEL_1, 0x22); + WREG32(mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_18, 0x1F); + WREG32(mmDMA_IF_E_N_DOWN_CH1_NL_HBM_PC_SEL_3, 0x20); + + WREG32(mmDMA_IF_E_S_DOWN_CH0_NL_HBM_SEL_0, 0x21); + WREG32(mmDMA_IF_E_S_DOWN_CH0_NL_HBM_SEL_1, 0x22); + WREG32(mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_18, 0x1F); + WREG32(mmDMA_IF_E_S_DOWN_CH0_NL_HBM_PC_SEL_3, 0x20); + + WREG32(mmDMA_IF_E_S_DOWN_CH1_NL_HBM_SEL_0, 0x21); + WREG32(mmDMA_IF_E_S_DOWN_CH1_NL_HBM_SEL_1, 0x22); + WREG32(mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_18, 0x1F); + WREG32(mmDMA_IF_E_S_DOWN_CH1_NL_HBM_PC_SEL_3, 0x20); + + WREG32(mmDMA_IF_W_N_DOWN_CH0_NL_HBM_SEL_0, 0x21); + WREG32(mmDMA_IF_W_N_DOWN_CH0_NL_HBM_SEL_1, 0x22); + WREG32(mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_18, 0x1F); + WREG32(mmDMA_IF_W_N_DOWN_CH0_NL_HBM_PC_SEL_3, 0x20); + + WREG32(mmDMA_IF_W_N_DOWN_CH1_NL_HBM_SEL_0, 0x21); + WREG32(mmDMA_IF_W_N_DOWN_CH1_NL_HBM_SEL_1, 0x22); + WREG32(mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_18, 0x1F); + WREG32(mmDMA_IF_W_N_DOWN_CH1_NL_HBM_PC_SEL_3, 0x20); + + WREG32(mmDMA_IF_W_S_DOWN_CH0_NL_HBM_SEL_0, 0x21); + WREG32(mmDMA_IF_W_S_DOWN_CH0_NL_HBM_SEL_1, 0x22); + WREG32(mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_18, 0x1F); + WREG32(mmDMA_IF_W_S_DOWN_CH0_NL_HBM_PC_SEL_3, 0x20); + + WREG32(mmDMA_IF_W_S_DOWN_CH1_NL_HBM_SEL_0, 0x21); + WREG32(mmDMA_IF_W_S_DOWN_CH1_NL_HBM_SEL_1, 0x22); + WREG32(mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_18, 0x1F); + WREG32(mmDMA_IF_W_S_DOWN_CH1_NL_HBM_PC_SEL_3, 0x20); + } + + WREG32(mmSIF_RTR_CTRL_0_E2E_HBM_EN, + 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT); + WREG32(mmSIF_RTR_CTRL_0_E2E_PCI_EN, + 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT); + + WREG32(mmSIF_RTR_CTRL_1_E2E_HBM_EN, + 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT); + WREG32(mmSIF_RTR_CTRL_1_E2E_PCI_EN, + 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT); + + WREG32(mmSIF_RTR_CTRL_2_E2E_HBM_EN, + 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT); + WREG32(mmSIF_RTR_CTRL_2_E2E_PCI_EN, + 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT); + + WREG32(mmSIF_RTR_CTRL_3_E2E_HBM_EN, + 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT); + WREG32(mmSIF_RTR_CTRL_3_E2E_PCI_EN, + 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT); + + WREG32(mmSIF_RTR_CTRL_4_E2E_HBM_EN, + 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT); + WREG32(mmSIF_RTR_CTRL_4_E2E_PCI_EN, + 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT); + + WREG32(mmSIF_RTR_CTRL_5_E2E_HBM_EN, + 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT); + WREG32(mmSIF_RTR_CTRL_5_E2E_PCI_EN, + 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT); + + WREG32(mmSIF_RTR_CTRL_6_E2E_HBM_EN, + 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT); + WREG32(mmSIF_RTR_CTRL_6_E2E_PCI_EN, + 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT); + + WREG32(mmSIF_RTR_CTRL_7_E2E_HBM_EN, + 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT); + WREG32(mmSIF_RTR_CTRL_7_E2E_PCI_EN, + 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT); + + WREG32(mmNIF_RTR_CTRL_0_E2E_HBM_EN, + 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT); + WREG32(mmNIF_RTR_CTRL_0_E2E_PCI_EN, + 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT); + + WREG32(mmNIF_RTR_CTRL_1_E2E_HBM_EN, + 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT); + WREG32(mmNIF_RTR_CTRL_1_E2E_PCI_EN, + 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT); + + WREG32(mmNIF_RTR_CTRL_2_E2E_HBM_EN, + 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT); + WREG32(mmNIF_RTR_CTRL_2_E2E_PCI_EN, + 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT); + + WREG32(mmNIF_RTR_CTRL_3_E2E_HBM_EN, + 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT); + WREG32(mmNIF_RTR_CTRL_3_E2E_PCI_EN, + 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT); + + WREG32(mmNIF_RTR_CTRL_4_E2E_HBM_EN, + 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT); + WREG32(mmNIF_RTR_CTRL_4_E2E_PCI_EN, + 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT); + + WREG32(mmNIF_RTR_CTRL_5_E2E_HBM_EN, + 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT); + WREG32(mmNIF_RTR_CTRL_5_E2E_PCI_EN, + 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT); + + WREG32(mmNIF_RTR_CTRL_6_E2E_HBM_EN, + 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT); + WREG32(mmNIF_RTR_CTRL_6_E2E_PCI_EN, + 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT); + + WREG32(mmNIF_RTR_CTRL_7_E2E_HBM_EN, + 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT); + WREG32(mmNIF_RTR_CTRL_7_E2E_PCI_EN, + 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT); + + WREG32(mmDMA_IF_E_N_DOWN_CH0_E2E_HBM_EN, + 1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT); + WREG32(mmDMA_IF_E_N_DOWN_CH0_E2E_PCI_EN, + 1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT); + + WREG32(mmDMA_IF_E_N_DOWN_CH1_E2E_HBM_EN, + 1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT); + WREG32(mmDMA_IF_E_N_DOWN_CH1_E2E_PCI_EN, + 1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT); + + WREG32(mmDMA_IF_E_S_DOWN_CH0_E2E_HBM_EN, + 1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT); + WREG32(mmDMA_IF_E_S_DOWN_CH0_E2E_PCI_EN, + 1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT); + + WREG32(mmDMA_IF_E_S_DOWN_CH1_E2E_HBM_EN, + 1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT); + WREG32(mmDMA_IF_E_S_DOWN_CH1_E2E_PCI_EN, + 1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT); + + WREG32(mmDMA_IF_W_N_DOWN_CH0_E2E_HBM_EN, + 1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT); + WREG32(mmDMA_IF_W_N_DOWN_CH0_E2E_PCI_EN, + 1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT); + + WREG32(mmDMA_IF_W_N_DOWN_CH1_E2E_HBM_EN, + 1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT); + WREG32(mmDMA_IF_W_N_DOWN_CH1_E2E_PCI_EN, + 1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT); + + WREG32(mmDMA_IF_W_S_DOWN_CH0_E2E_HBM_EN, + 1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT); + WREG32(mmDMA_IF_W_S_DOWN_CH0_E2E_PCI_EN, + 1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT); + + WREG32(mmDMA_IF_W_S_DOWN_CH1_E2E_HBM_EN, + 1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT); + WREG32(mmDMA_IF_W_S_DOWN_CH1_E2E_PCI_EN, + 1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT); +} + +static void gaudi_init_hbm_cred(struct hl_device *hdev) +{ + uint32_t hbm0_wr, hbm1_wr, hbm0_rd, hbm1_rd; + + hbm0_wr = 0x33333333; + hbm0_rd = 0x77777777; + hbm1_wr = 0x55555555; + hbm1_rd = 0xDDDDDDDD; + + WREG32(mmDMA_IF_E_N_HBM0_WR_CRED_CNT, hbm0_wr); + WREG32(mmDMA_IF_E_N_HBM1_WR_CRED_CNT, hbm1_wr); + WREG32(mmDMA_IF_E_N_HBM0_RD_CRED_CNT, hbm0_rd); + WREG32(mmDMA_IF_E_N_HBM1_RD_CRED_CNT, hbm1_rd); + + WREG32(mmDMA_IF_E_S_HBM0_WR_CRED_CNT, hbm0_wr); + WREG32(mmDMA_IF_E_S_HBM1_WR_CRED_CNT, hbm1_wr); + WREG32(mmDMA_IF_E_S_HBM0_RD_CRED_CNT, hbm0_rd); + WREG32(mmDMA_IF_E_S_HBM1_RD_CRED_CNT, hbm1_rd); + + WREG32(mmDMA_IF_W_N_HBM0_WR_CRED_CNT, hbm0_wr); + WREG32(mmDMA_IF_W_N_HBM1_WR_CRED_CNT, hbm1_wr); + WREG32(mmDMA_IF_W_N_HBM0_RD_CRED_CNT, hbm0_rd); + WREG32(mmDMA_IF_W_N_HBM1_RD_CRED_CNT, hbm1_rd); + + WREG32(mmDMA_IF_W_S_HBM0_WR_CRED_CNT, hbm0_wr); + WREG32(mmDMA_IF_W_S_HBM1_WR_CRED_CNT, hbm1_wr); + WREG32(mmDMA_IF_W_S_HBM0_RD_CRED_CNT, hbm0_rd); + WREG32(mmDMA_IF_W_S_HBM1_RD_CRED_CNT, hbm1_rd); + + WREG32(mmDMA_IF_E_N_HBM_CRED_EN_0, + (1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) | + (1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT)); + WREG32(mmDMA_IF_E_S_HBM_CRED_EN_0, + (1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) | + (1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT)); + WREG32(mmDMA_IF_W_N_HBM_CRED_EN_0, + (1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) | + (1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT)); + WREG32(mmDMA_IF_W_S_HBM_CRED_EN_0, + (1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) | + (1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT)); + + WREG32(mmDMA_IF_E_N_HBM_CRED_EN_1, + (1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) | + (1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT)); + WREG32(mmDMA_IF_E_S_HBM_CRED_EN_1, + (1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) | + (1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT)); + WREG32(mmDMA_IF_W_N_HBM_CRED_EN_1, + (1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) | + (1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT)); + WREG32(mmDMA_IF_W_S_HBM_CRED_EN_1, + (1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) | + (1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT)); +} + +static void gaudi_init_golden_registers(struct hl_device *hdev) +{ + u32 tpc_offset; + int tpc_id, i; + + gaudi_init_e2e(hdev); + + gaudi_init_hbm_cred(hdev); + + hdev->asic_funcs->disable_clock_gating(hdev); + + for (tpc_id = 0, tpc_offset = 0; + tpc_id < TPC_NUMBER_OF_ENGINES; + tpc_id++, tpc_offset += TPC_CFG_OFFSET) { + /* Mask all arithmetic interrupts from TPC */ + WREG32(mmTPC0_CFG_TPC_INTR_MASK + tpc_offset, 0x8FFF); + /* Set 16 cache lines */ + WREG32_FIELD(TPC0_CFG_MSS_CONFIG, tpc_offset, + ICACHE_FETCH_LINE_NUM, 2); + } + + /* Make sure 1st 128 bytes in SRAM are 0 for Tensor DMA */ + for (i = 0 ; i < 128 ; i += 8) + writeq(0, hdev->pcie_bar[SRAM_BAR_ID] + i); + + WREG32(mmMME0_CTRL_EUS_ROLLUP_CNT_ADD, 3); + WREG32(mmMME1_CTRL_EUS_ROLLUP_CNT_ADD, 3); + WREG32(mmMME2_CTRL_EUS_ROLLUP_CNT_ADD, 3); + WREG32(mmMME3_CTRL_EUS_ROLLUP_CNT_ADD, 3); +} + +static void gaudi_init_pci_dma_qman(struct hl_device *hdev, int dma_id, + int qman_id, dma_addr_t qman_pq_addr) +{ + u32 mtr_base_en_lo, mtr_base_en_hi, mtr_base_ws_lo, mtr_base_ws_hi; + u32 so_base_en_lo, so_base_en_hi, so_base_ws_lo, so_base_ws_hi; + u32 q_off, dma_qm_offset; + u32 dma_qm_err_cfg; + + dma_qm_offset = dma_id * DMA_QMAN_OFFSET; + + mtr_base_en_lo = lower_32_bits(CFG_BASE + + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0); + mtr_base_en_hi = upper_32_bits(CFG_BASE + + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0); + so_base_en_lo = lower_32_bits(CFG_BASE + + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0); + so_base_en_hi = upper_32_bits(CFG_BASE + + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0); + mtr_base_ws_lo = lower_32_bits(CFG_BASE + + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0); + mtr_base_ws_hi = upper_32_bits(CFG_BASE + + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0); + so_base_ws_lo = lower_32_bits(CFG_BASE + + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0); + so_base_ws_hi = upper_32_bits(CFG_BASE + + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0); + + q_off = dma_qm_offset + qman_id * 4; + + WREG32(mmDMA0_QM_PQ_BASE_LO_0 + q_off, lower_32_bits(qman_pq_addr)); + WREG32(mmDMA0_QM_PQ_BASE_HI_0 + q_off, upper_32_bits(qman_pq_addr)); + + WREG32(mmDMA0_QM_PQ_SIZE_0 + q_off, ilog2(HL_QUEUE_LENGTH)); + WREG32(mmDMA0_QM_PQ_PI_0 + q_off, 0); + WREG32(mmDMA0_QM_PQ_CI_0 + q_off, 0); + + WREG32(mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off, QMAN_LDMA_SIZE_OFFSET); + WREG32(mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off, + QMAN_LDMA_SRC_OFFSET); + WREG32(mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off, + QMAN_LDMA_DST_OFFSET); + + WREG32(mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_0 + q_off, mtr_base_en_lo); + WREG32(mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_0 + q_off, mtr_base_en_hi); + WREG32(mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_0 + q_off, so_base_en_lo); + WREG32(mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_0 + q_off, so_base_en_hi); + WREG32(mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_0 + q_off, mtr_base_ws_lo); + WREG32(mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_0 + q_off, mtr_base_ws_hi); + WREG32(mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_0 + q_off, so_base_ws_lo); + WREG32(mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_0 + q_off, so_base_ws_hi); + + WREG32(mmDMA0_QM_CP_BARRIER_CFG_0 + q_off, 0x100); + + /* The following configuration is needed only once per QMAN */ + if (qman_id == 0) { + /* Configure RAZWI IRQ */ + dma_qm_err_cfg = PCI_DMA_QMAN_GLBL_ERR_CFG_MSG_EN_MASK; + if (hdev->stop_on_err) { + dma_qm_err_cfg |= + PCI_DMA_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK; + } + + WREG32(mmDMA0_QM_GLBL_ERR_CFG + dma_qm_offset, dma_qm_err_cfg); + WREG32(mmDMA0_QM_GLBL_ERR_ADDR_LO + dma_qm_offset, + lower_32_bits(CFG_BASE + + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR)); + WREG32(mmDMA0_QM_GLBL_ERR_ADDR_HI + dma_qm_offset, + upper_32_bits(CFG_BASE + + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR)); + WREG32(mmDMA0_QM_GLBL_ERR_WDATA + dma_qm_offset, + gaudi_irq_map_table[GAUDI_EVENT_DMA0_QM].cpu_id + + dma_id); + + WREG32(mmDMA0_QM_ARB_ERR_MSG_EN + dma_qm_offset, + QM_ARB_ERR_MSG_EN_MASK); + + /* Increase ARB WDT to support streams architecture */ + WREG32(mmDMA0_QM_ARB_SLV_CHOISE_WDT + dma_qm_offset, + GAUDI_ARB_WDT_TIMEOUT); + + WREG32(mmDMA0_QM_GLBL_PROT + dma_qm_offset, + QMAN_EXTERNAL_MAKE_TRUSTED); + + WREG32(mmDMA0_QM_GLBL_CFG1 + dma_qm_offset, 0); + } +} + +static void gaudi_init_dma_core(struct hl_device *hdev, int dma_id) +{ + u32 dma_offset = dma_id * DMA_CORE_OFFSET; + u32 dma_err_cfg = 1 << DMA0_CORE_ERR_CFG_ERR_MSG_EN_SHIFT; + + /* Set to maximum possible according to physical size */ + WREG32(mmDMA0_CORE_RD_MAX_OUTSTAND + dma_offset, 0); + WREG32(mmDMA0_CORE_RD_MAX_SIZE + dma_offset, 0); + + /* WA for H/W bug H3-2116 */ + WREG32(mmDMA0_CORE_LBW_MAX_OUTSTAND + dma_offset, 15); + + /* STOP_ON bit implies no completion to operation in case of RAZWI */ + if (hdev->stop_on_err) + dma_err_cfg |= 1 << DMA0_CORE_ERR_CFG_STOP_ON_ERR_SHIFT; + + WREG32(mmDMA0_CORE_ERR_CFG + dma_offset, dma_err_cfg); + WREG32(mmDMA0_CORE_ERRMSG_ADDR_LO + dma_offset, + lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR)); + WREG32(mmDMA0_CORE_ERRMSG_ADDR_HI + dma_offset, + upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR)); + WREG32(mmDMA0_CORE_ERRMSG_WDATA + dma_offset, + gaudi_irq_map_table[GAUDI_EVENT_DMA0_CORE].cpu_id + dma_id); + WREG32(mmDMA0_CORE_PROT + dma_offset, + 1 << DMA0_CORE_PROT_ERR_VAL_SHIFT); + /* If the channel is secured, it should be in MMU bypass mode */ + WREG32(mmDMA0_CORE_SECURE_PROPS + dma_offset, + 1 << DMA0_CORE_SECURE_PROPS_MMBP_SHIFT); + WREG32(mmDMA0_CORE_CFG_0 + dma_offset, 1 << DMA0_CORE_CFG_0_EN_SHIFT); +} + +static void gaudi_enable_qman(struct hl_device *hdev, int dma_id, + u32 enable_mask) +{ + u32 dma_qm_offset = dma_id * DMA_QMAN_OFFSET; + + WREG32(mmDMA0_QM_GLBL_CFG0 + dma_qm_offset, enable_mask); +} + +static void gaudi_init_pci_dma_qmans(struct hl_device *hdev) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + struct hl_hw_queue *q; + int i, j, dma_id, cpu_skip, nic_skip, cq_id = 0, q_idx, msi_vec = 0; + + if (gaudi->hw_cap_initialized & HW_CAP_PCI_DMA) + return; + + for (i = 0 ; i < PCI_DMA_NUMBER_OF_CHNLS ; i++) { + dma_id = gaudi_dma_assignment[i]; + /* + * For queues after the CPU Q need to add 1 to get the correct + * queue. In addition, need to add the CPU EQ and NIC IRQs in + * order to get the correct MSI register. + */ + if (dma_id > 1) { + cpu_skip = 1; + nic_skip = NIC_NUMBER_OF_ENGINES; + } else { + cpu_skip = 0; + nic_skip = 0; + } + + for (j = 0 ; j < QMAN_STREAMS ; j++) { + q_idx = 4 * dma_id + j + cpu_skip; + q = &hdev->kernel_queues[q_idx]; + q->cq_id = cq_id++; + q->msi_vec = nic_skip + cpu_skip + msi_vec++; + gaudi_init_pci_dma_qman(hdev, dma_id, j, + q->bus_address); + } + + gaudi_init_dma_core(hdev, dma_id); + + gaudi_enable_qman(hdev, dma_id, PCI_DMA_QMAN_ENABLE); + } + + gaudi->hw_cap_initialized |= HW_CAP_PCI_DMA; +} + +static void gaudi_init_hbm_dma_qman(struct hl_device *hdev, int dma_id, + int qman_id, u64 qman_base_addr) +{ + u32 mtr_base_lo, mtr_base_hi; + u32 so_base_lo, so_base_hi; + u32 q_off, dma_qm_offset; + u32 dma_qm_err_cfg; + + dma_qm_offset = dma_id * DMA_QMAN_OFFSET; + + mtr_base_lo = lower_32_bits(CFG_BASE + + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0); + mtr_base_hi = upper_32_bits(CFG_BASE + + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0); + so_base_lo = lower_32_bits(CFG_BASE + + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0); + so_base_hi = upper_32_bits(CFG_BASE + + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0); + + q_off = dma_qm_offset + qman_id * 4; + + if (qman_id < 4) { + WREG32(mmDMA0_QM_PQ_BASE_LO_0 + q_off, + lower_32_bits(qman_base_addr)); + WREG32(mmDMA0_QM_PQ_BASE_HI_0 + q_off, + upper_32_bits(qman_base_addr)); + + WREG32(mmDMA0_QM_PQ_SIZE_0 + q_off, ilog2(HBM_DMA_QMAN_LENGTH)); + WREG32(mmDMA0_QM_PQ_PI_0 + q_off, 0); + WREG32(mmDMA0_QM_PQ_CI_0 + q_off, 0); + + WREG32(mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off, + QMAN_CPDMA_SIZE_OFFSET); + WREG32(mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off, + QMAN_CPDMA_SRC_OFFSET); + WREG32(mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off, + QMAN_CPDMA_DST_OFFSET); + } else { + WREG32(mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off, + QMAN_LDMA_SIZE_OFFSET); + WREG32(mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off, + QMAN_LDMA_SRC_OFFSET); + WREG32(mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off, + QMAN_LDMA_DST_OFFSET); + + /* Configure RAZWI IRQ */ + dma_qm_err_cfg = HBM_DMA_QMAN_GLBL_ERR_CFG_MSG_EN_MASK; + if (hdev->stop_on_err) { + dma_qm_err_cfg |= + HBM_DMA_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK; + } + WREG32(mmDMA0_QM_GLBL_ERR_CFG + dma_qm_offset, dma_qm_err_cfg); + + WREG32(mmDMA0_QM_GLBL_ERR_ADDR_LO + dma_qm_offset, + lower_32_bits(CFG_BASE + + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR)); + WREG32(mmDMA0_QM_GLBL_ERR_ADDR_HI + dma_qm_offset, + upper_32_bits(CFG_BASE + + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR)); + WREG32(mmDMA0_QM_GLBL_ERR_WDATA + dma_qm_offset, + gaudi_irq_map_table[GAUDI_EVENT_DMA0_QM].cpu_id + + dma_id); + + WREG32(mmDMA0_QM_ARB_ERR_MSG_EN + dma_qm_offset, + QM_ARB_ERR_MSG_EN_MASK); + + /* Increase ARB WDT to support streams architecture */ + WREG32(mmDMA0_QM_ARB_SLV_CHOISE_WDT + dma_qm_offset, + GAUDI_ARB_WDT_TIMEOUT); + + WREG32(mmDMA0_QM_GLBL_CFG1 + dma_qm_offset, 0); + WREG32(mmDMA0_QM_GLBL_PROT + dma_qm_offset, + QMAN_INTERNAL_MAKE_TRUSTED); + } + + WREG32(mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_0 + q_off, mtr_base_lo); + WREG32(mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_0 + q_off, mtr_base_hi); + WREG32(mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_0 + q_off, so_base_lo); + WREG32(mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_0 + q_off, so_base_hi); +} + +static void gaudi_init_hbm_dma_qmans(struct hl_device *hdev) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + struct gaudi_internal_qman_info *q; + u64 qman_base_addr; + int i, j, dma_id, internal_q_index; + + if (gaudi->hw_cap_initialized & HW_CAP_HBM_DMA) + return; + + for (i = 0 ; i < HBM_DMA_NUMBER_OF_CHNLS ; i++) { + dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_1 + i]; + + for (j = 0 ; j < QMAN_STREAMS ; j++) { + /* + * Add the CPU queue in order to get the correct queue + * number as all internal queue are placed after it + */ + internal_q_index = dma_id * QMAN_STREAMS + j + 1; + + q = &gaudi->internal_qmans[internal_q_index]; + qman_base_addr = (u64) q->pq_dma_addr; + gaudi_init_hbm_dma_qman(hdev, dma_id, j, + qman_base_addr); + } + + /* Initializing lower CP for HBM DMA QMAN */ + gaudi_init_hbm_dma_qman(hdev, dma_id, 4, 0); + + gaudi_init_dma_core(hdev, dma_id); + + gaudi_enable_qman(hdev, dma_id, HBM_DMA_QMAN_ENABLE); + } + + gaudi->hw_cap_initialized |= HW_CAP_HBM_DMA; +} + +static void gaudi_init_mme_qman(struct hl_device *hdev, u32 mme_offset, + int qman_id, u64 qman_base_addr) +{ + u32 mtr_base_lo, mtr_base_hi; + u32 so_base_lo, so_base_hi; + u32 q_off, mme_id; + u32 mme_qm_err_cfg; + + mtr_base_lo = lower_32_bits(CFG_BASE + + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0); + mtr_base_hi = upper_32_bits(CFG_BASE + + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0); + so_base_lo = lower_32_bits(CFG_BASE + + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0); + so_base_hi = upper_32_bits(CFG_BASE + + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0); + + q_off = mme_offset + qman_id * 4; + + if (qman_id < 4) { + WREG32(mmMME0_QM_PQ_BASE_LO_0 + q_off, + lower_32_bits(qman_base_addr)); + WREG32(mmMME0_QM_PQ_BASE_HI_0 + q_off, + upper_32_bits(qman_base_addr)); + + WREG32(mmMME0_QM_PQ_SIZE_0 + q_off, ilog2(MME_QMAN_LENGTH)); + WREG32(mmMME0_QM_PQ_PI_0 + q_off, 0); + WREG32(mmMME0_QM_PQ_CI_0 + q_off, 0); + + WREG32(mmMME0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off, + QMAN_CPDMA_SIZE_OFFSET); + WREG32(mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off, + QMAN_CPDMA_SRC_OFFSET); + WREG32(mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off, + QMAN_CPDMA_DST_OFFSET); + } else { + WREG32(mmMME0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off, + QMAN_LDMA_SIZE_OFFSET); + WREG32(mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off, + QMAN_LDMA_SRC_OFFSET); + WREG32(mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off, + QMAN_LDMA_DST_OFFSET); + + /* Configure RAZWI IRQ */ + mme_id = mme_offset / + (mmMME1_QM_GLBL_CFG0 - mmMME0_QM_GLBL_CFG0) / 2; + + mme_qm_err_cfg = MME_QMAN_GLBL_ERR_CFG_MSG_EN_MASK; + if (hdev->stop_on_err) { + mme_qm_err_cfg |= + MME_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK; + } + WREG32(mmMME0_QM_GLBL_ERR_CFG + mme_offset, mme_qm_err_cfg); + WREG32(mmMME0_QM_GLBL_ERR_ADDR_LO + mme_offset, + lower_32_bits(CFG_BASE + + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR)); + WREG32(mmMME0_QM_GLBL_ERR_ADDR_HI + mme_offset, + upper_32_bits(CFG_BASE + + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR)); + WREG32(mmMME0_QM_GLBL_ERR_WDATA + mme_offset, + gaudi_irq_map_table[GAUDI_EVENT_MME0_QM].cpu_id + + mme_id); + + WREG32(mmMME0_QM_ARB_ERR_MSG_EN + mme_offset, + QM_ARB_ERR_MSG_EN_MASK); + + /* Increase ARB WDT to support streams architecture */ + WREG32(mmMME0_QM_ARB_SLV_CHOISE_WDT + mme_offset, + GAUDI_ARB_WDT_TIMEOUT); + + WREG32(mmMME0_QM_GLBL_CFG1 + mme_offset, 0); + WREG32(mmMME0_QM_GLBL_PROT + mme_offset, + QMAN_INTERNAL_MAKE_TRUSTED); + } + + WREG32(mmMME0_QM_CP_MSG_BASE0_ADDR_LO_0 + q_off, mtr_base_lo); + WREG32(mmMME0_QM_CP_MSG_BASE0_ADDR_HI_0 + q_off, mtr_base_hi); + WREG32(mmMME0_QM_CP_MSG_BASE1_ADDR_LO_0 + q_off, so_base_lo); + WREG32(mmMME0_QM_CP_MSG_BASE1_ADDR_HI_0 + q_off, so_base_hi); +} + +static void gaudi_init_mme_qmans(struct hl_device *hdev) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + struct gaudi_internal_qman_info *q; + u64 qman_base_addr; + u32 mme_offset; + int i, internal_q_index; + + if (gaudi->hw_cap_initialized & HW_CAP_MME) + return; + + /* + * map GAUDI_QUEUE_ID_MME_0_X to the N_W_MME (mmMME2_QM_BASE) + * and GAUDI_QUEUE_ID_MME_1_X to the S_W_MME (mmMME0_QM_BASE) + */ + + mme_offset = mmMME2_QM_GLBL_CFG0 - mmMME0_QM_GLBL_CFG0; + + for (i = 0 ; i < MME_NUMBER_OF_QMANS ; i++) { + internal_q_index = GAUDI_QUEUE_ID_MME_0_0 + i; + q = &gaudi->internal_qmans[internal_q_index]; + qman_base_addr = (u64) q->pq_dma_addr; + gaudi_init_mme_qman(hdev, mme_offset, (i & 0x3), + qman_base_addr); + if (i == 3) + mme_offset = 0; + } + + /* Initializing lower CP for MME QMANs */ + mme_offset = mmMME2_QM_GLBL_CFG0 - mmMME0_QM_GLBL_CFG0; + gaudi_init_mme_qman(hdev, mme_offset, 4, 0); + gaudi_init_mme_qman(hdev, 0, 4, 0); + + WREG32(mmMME2_QM_GLBL_CFG0, QMAN_MME_ENABLE); + WREG32(mmMME0_QM_GLBL_CFG0, QMAN_MME_ENABLE); + + gaudi->hw_cap_initialized |= HW_CAP_MME; +} + +static void gaudi_init_tpc_qman(struct hl_device *hdev, u32 tpc_offset, + int qman_id, u64 qman_base_addr) +{ + u32 mtr_base_lo, mtr_base_hi; + u32 so_base_lo, so_base_hi; + u32 q_off, tpc_id; + u32 tpc_qm_err_cfg; + + mtr_base_lo = lower_32_bits(CFG_BASE + + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0); + mtr_base_hi = upper_32_bits(CFG_BASE + + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0); + so_base_lo = lower_32_bits(CFG_BASE + + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0); + so_base_hi = upper_32_bits(CFG_BASE + + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0); + + q_off = tpc_offset + qman_id * 4; + + if (qman_id < 4) { + WREG32(mmTPC0_QM_PQ_BASE_LO_0 + q_off, + lower_32_bits(qman_base_addr)); + WREG32(mmTPC0_QM_PQ_BASE_HI_0 + q_off, + upper_32_bits(qman_base_addr)); + + WREG32(mmTPC0_QM_PQ_SIZE_0 + q_off, ilog2(TPC_QMAN_LENGTH)); + WREG32(mmTPC0_QM_PQ_PI_0 + q_off, 0); + WREG32(mmTPC0_QM_PQ_CI_0 + q_off, 0); + + WREG32(mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off, + QMAN_CPDMA_SIZE_OFFSET); + WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off, + QMAN_CPDMA_SRC_OFFSET); + WREG32(mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off, + QMAN_CPDMA_DST_OFFSET); + } else { + WREG32(mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off, + QMAN_LDMA_SIZE_OFFSET); + WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off, + QMAN_LDMA_SRC_OFFSET); + WREG32(mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off, + QMAN_LDMA_DST_OFFSET); + + /* Configure RAZWI IRQ */ + tpc_id = tpc_offset / + (mmTPC1_QM_GLBL_CFG0 - mmTPC0_QM_GLBL_CFG0); + + tpc_qm_err_cfg = TPC_QMAN_GLBL_ERR_CFG_MSG_EN_MASK; + if (hdev->stop_on_err) { + tpc_qm_err_cfg |= + TPC_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK; + } + + WREG32(mmTPC0_QM_GLBL_ERR_CFG + tpc_offset, tpc_qm_err_cfg); + WREG32(mmTPC0_QM_GLBL_ERR_ADDR_LO + tpc_offset, + lower_32_bits(CFG_BASE + + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR)); + WREG32(mmTPC0_QM_GLBL_ERR_ADDR_HI + tpc_offset, + upper_32_bits(CFG_BASE + + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR)); + WREG32(mmTPC0_QM_GLBL_ERR_WDATA + tpc_offset, + gaudi_irq_map_table[GAUDI_EVENT_TPC0_QM].cpu_id + + tpc_id); + + WREG32(mmTPC0_QM_ARB_ERR_MSG_EN + tpc_offset, + QM_ARB_ERR_MSG_EN_MASK); + + /* Increase ARB WDT to support streams architecture */ + WREG32(mmTPC0_QM_ARB_SLV_CHOISE_WDT + tpc_offset, + GAUDI_ARB_WDT_TIMEOUT); + + WREG32(mmTPC0_QM_GLBL_CFG1 + tpc_offset, 0); + WREG32(mmTPC0_QM_GLBL_PROT + tpc_offset, + QMAN_INTERNAL_MAKE_TRUSTED); + } + + WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_0 + q_off, mtr_base_lo); + WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_0 + q_off, mtr_base_hi); + WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_0 + q_off, so_base_lo); + WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_0 + q_off, so_base_hi); +} + +static void gaudi_init_tpc_qmans(struct hl_device *hdev) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + struct gaudi_internal_qman_info *q; + u64 qman_base_addr; + u32 so_base_hi, tpc_offset = 0; + u32 tpc_delta = mmTPC1_CFG_SM_BASE_ADDRESS_HIGH - + mmTPC0_CFG_SM_BASE_ADDRESS_HIGH; + int i, tpc_id, internal_q_index; + + if (gaudi->hw_cap_initialized & HW_CAP_TPC_MASK) + return; + + so_base_hi = upper_32_bits(CFG_BASE + + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0); + + for (tpc_id = 0 ; tpc_id < TPC_NUMBER_OF_ENGINES ; tpc_id++) { + for (i = 0 ; i < QMAN_STREAMS ; i++) { + internal_q_index = GAUDI_QUEUE_ID_TPC_0_0 + + tpc_id * QMAN_STREAMS + i; + q = &gaudi->internal_qmans[internal_q_index]; + qman_base_addr = (u64) q->pq_dma_addr; + gaudi_init_tpc_qman(hdev, tpc_offset, i, + qman_base_addr); + + if (i == 3) { + /* Initializing lower CP for TPC QMAN */ + gaudi_init_tpc_qman(hdev, tpc_offset, 4, 0); + + /* Enable the QMAN and TPC channel */ + WREG32(mmTPC0_QM_GLBL_CFG0 + tpc_offset, + QMAN_TPC_ENABLE); + } + } + + WREG32(mmTPC0_CFG_SM_BASE_ADDRESS_HIGH + tpc_id * tpc_delta, + so_base_hi); + + tpc_offset += mmTPC1_QM_GLBL_CFG0 - mmTPC0_QM_GLBL_CFG0; + + gaudi->hw_cap_initialized |= + FIELD_PREP(HW_CAP_TPC_MASK, 1 << tpc_id); + } +} + +static void gaudi_disable_pci_dma_qmans(struct hl_device *hdev) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + + if (!(gaudi->hw_cap_initialized & HW_CAP_PCI_DMA)) + return; + + WREG32(mmDMA0_QM_GLBL_CFG0, 0); + WREG32(mmDMA1_QM_GLBL_CFG0, 0); + WREG32(mmDMA5_QM_GLBL_CFG0, 0); +} + +static void gaudi_disable_hbm_dma_qmans(struct hl_device *hdev) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + + if (!(gaudi->hw_cap_initialized & HW_CAP_HBM_DMA)) + return; + + WREG32(mmDMA2_QM_GLBL_CFG0, 0); + WREG32(mmDMA3_QM_GLBL_CFG0, 0); + WREG32(mmDMA4_QM_GLBL_CFG0, 0); + WREG32(mmDMA6_QM_GLBL_CFG0, 0); + WREG32(mmDMA7_QM_GLBL_CFG0, 0); +} + +static void gaudi_disable_mme_qmans(struct hl_device *hdev) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + + if (!(gaudi->hw_cap_initialized & HW_CAP_MME)) + return; + + WREG32(mmMME2_QM_GLBL_CFG0, 0); + WREG32(mmMME0_QM_GLBL_CFG0, 0); +} + +static void gaudi_disable_tpc_qmans(struct hl_device *hdev) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + u32 tpc_offset = 0; + int tpc_id; + + if (!(gaudi->hw_cap_initialized & HW_CAP_TPC_MASK)) + return; + + for (tpc_id = 0 ; tpc_id < TPC_NUMBER_OF_ENGINES ; tpc_id++) { + WREG32(mmTPC0_QM_GLBL_CFG0 + tpc_offset, 0); + tpc_offset += mmTPC1_QM_GLBL_CFG0 - mmTPC0_QM_GLBL_CFG0; + } +} + +static void gaudi_stop_pci_dma_qmans(struct hl_device *hdev) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + + if (!(gaudi->hw_cap_initialized & HW_CAP_PCI_DMA)) + return; + + /* Stop upper CPs of QMANs 0.0 to 1.3 and 5.0 to 5.3 */ + WREG32(mmDMA0_QM_GLBL_CFG1, 0xF << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT); + WREG32(mmDMA1_QM_GLBL_CFG1, 0xF << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT); + WREG32(mmDMA5_QM_GLBL_CFG1, 0xF << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT); +} + +static void gaudi_stop_hbm_dma_qmans(struct hl_device *hdev) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + + if (!(gaudi->hw_cap_initialized & HW_CAP_HBM_DMA)) + return; + + /* Stop CPs of HBM DMA QMANs */ + + WREG32(mmDMA2_QM_GLBL_CFG1, 0x1F << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT); + WREG32(mmDMA3_QM_GLBL_CFG1, 0x1F << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT); + WREG32(mmDMA4_QM_GLBL_CFG1, 0x1F << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT); + WREG32(mmDMA6_QM_GLBL_CFG1, 0x1F << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT); + WREG32(mmDMA7_QM_GLBL_CFG1, 0x1F << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT); +} + +static void gaudi_stop_mme_qmans(struct hl_device *hdev) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + + if (!(gaudi->hw_cap_initialized & HW_CAP_MME)) + return; + + /* Stop CPs of MME QMANs */ + WREG32(mmMME2_QM_GLBL_CFG1, 0x1F << MME0_QM_GLBL_CFG1_CP_STOP_SHIFT); + WREG32(mmMME0_QM_GLBL_CFG1, 0x1F << MME0_QM_GLBL_CFG1_CP_STOP_SHIFT); +} + +static void gaudi_stop_tpc_qmans(struct hl_device *hdev) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + + if (!(gaudi->hw_cap_initialized & HW_CAP_TPC_MASK)) + return; + + WREG32(mmTPC0_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT); + WREG32(mmTPC1_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT); + WREG32(mmTPC2_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT); + WREG32(mmTPC3_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT); + WREG32(mmTPC4_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT); + WREG32(mmTPC5_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT); + WREG32(mmTPC6_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT); + WREG32(mmTPC7_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT); +} + +static void gaudi_pci_dma_stall(struct hl_device *hdev) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + + if (!(gaudi->hw_cap_initialized & HW_CAP_PCI_DMA)) + return; + + WREG32(mmDMA0_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT); + WREG32(mmDMA1_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT); + WREG32(mmDMA5_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT); +} + +static void gaudi_hbm_dma_stall(struct hl_device *hdev) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + + if (!(gaudi->hw_cap_initialized & HW_CAP_HBM_DMA)) + return; + + WREG32(mmDMA2_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT); + WREG32(mmDMA3_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT); + WREG32(mmDMA4_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT); + WREG32(mmDMA6_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT); + WREG32(mmDMA7_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT); +} + +static void gaudi_mme_stall(struct hl_device *hdev) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + + if (!(gaudi->hw_cap_initialized & HW_CAP_MME)) + return; + + /* WA for H3-1800 bug: do ACC and SBAB writes twice */ + WREG32(mmMME0_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT); + WREG32(mmMME0_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT); + WREG32(mmMME0_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT); + WREG32(mmMME0_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT); + WREG32(mmMME1_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT); + WREG32(mmMME1_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT); + WREG32(mmMME1_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT); + WREG32(mmMME1_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT); + WREG32(mmMME2_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT); + WREG32(mmMME2_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT); + WREG32(mmMME2_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT); + WREG32(mmMME2_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT); + WREG32(mmMME3_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT); + WREG32(mmMME3_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT); + WREG32(mmMME3_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT); + WREG32(mmMME3_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT); +} + +static void gaudi_tpc_stall(struct hl_device *hdev) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + + if (!(gaudi->hw_cap_initialized & HW_CAP_TPC_MASK)) + return; + + WREG32(mmTPC0_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT); + WREG32(mmTPC1_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT); + WREG32(mmTPC2_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT); + WREG32(mmTPC3_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT); + WREG32(mmTPC4_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT); + WREG32(mmTPC5_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT); + WREG32(mmTPC6_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT); + WREG32(mmTPC7_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT); +} + +static void gaudi_set_clock_gating(struct hl_device *hdev) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + u32 qman_offset; + bool enable; + int i; + + /* In case we are during debug session, don't enable the clock gate + * as it may interfere + */ + if (hdev->in_debug) + return; + + for (i = GAUDI_PCI_DMA_1, qman_offset = 0 ; i < GAUDI_HBM_DMA_1 ; i++) { + enable = !!(hdev->clock_gating_mask & + (BIT_ULL(gaudi_dma_assignment[i]))); + + qman_offset = gaudi_dma_assignment[i] * DMA_QMAN_OFFSET; + WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset, + enable ? QMAN_CGM1_PWR_GATE_EN : 0); + WREG32(mmDMA0_QM_CGM_CFG + qman_offset, + enable ? QMAN_UPPER_CP_CGM_PWR_GATE_EN : 0); + } + + for (i = GAUDI_HBM_DMA_1 ; i < GAUDI_DMA_MAX ; i++) { + enable = !!(hdev->clock_gating_mask & + (BIT_ULL(gaudi_dma_assignment[i]))); + + qman_offset = gaudi_dma_assignment[i] * DMA_QMAN_OFFSET; + WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset, + enable ? QMAN_CGM1_PWR_GATE_EN : 0); + WREG32(mmDMA0_QM_CGM_CFG + qman_offset, + enable ? QMAN_COMMON_CP_CGM_PWR_GATE_EN : 0); + } + + enable = !!(hdev->clock_gating_mask & (BIT_ULL(GAUDI_ENGINE_ID_MME_0))); + WREG32(mmMME0_QM_CGM_CFG1, enable ? QMAN_CGM1_PWR_GATE_EN : 0); + WREG32(mmMME0_QM_CGM_CFG, enable ? QMAN_COMMON_CP_CGM_PWR_GATE_EN : 0); + + enable = !!(hdev->clock_gating_mask & (BIT_ULL(GAUDI_ENGINE_ID_MME_2))); + WREG32(mmMME2_QM_CGM_CFG1, enable ? QMAN_CGM1_PWR_GATE_EN : 0); + WREG32(mmMME2_QM_CGM_CFG, enable ? QMAN_COMMON_CP_CGM_PWR_GATE_EN : 0); + + for (i = 0, qman_offset = 0 ; i < TPC_NUMBER_OF_ENGINES ; i++) { + enable = !!(hdev->clock_gating_mask & + (BIT_ULL(GAUDI_ENGINE_ID_TPC_0 + i))); + + WREG32(mmTPC0_QM_CGM_CFG1 + qman_offset, + enable ? QMAN_CGM1_PWR_GATE_EN : 0); + WREG32(mmTPC0_QM_CGM_CFG + qman_offset, + enable ? QMAN_COMMON_CP_CGM_PWR_GATE_EN : 0); + + qman_offset += TPC_QMAN_OFFSET; + } + + gaudi->hw_cap_initialized |= HW_CAP_CLK_GATE; +} + +static void gaudi_disable_clock_gating(struct hl_device *hdev) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + u32 qman_offset; + int i; + + if (!(gaudi->hw_cap_initialized & HW_CAP_CLK_GATE)) + return; + + for (i = 0, qman_offset = 0 ; i < DMA_NUMBER_OF_CHANNELS ; i++) { + WREG32(mmDMA0_QM_CGM_CFG + qman_offset, 0); + WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset, 0); + + qman_offset += (mmDMA1_QM_CGM_CFG - mmDMA0_QM_CGM_CFG); + } + + WREG32(mmMME0_QM_CGM_CFG, 0); + WREG32(mmMME0_QM_CGM_CFG1, 0); + WREG32(mmMME2_QM_CGM_CFG, 0); + WREG32(mmMME2_QM_CGM_CFG1, 0); + + for (i = 0, qman_offset = 0 ; i < TPC_NUMBER_OF_ENGINES ; i++) { + WREG32(mmTPC0_QM_CGM_CFG + qman_offset, 0); + WREG32(mmTPC0_QM_CGM_CFG1 + qman_offset, 0); + + qman_offset += (mmTPC1_QM_CGM_CFG - mmTPC0_QM_CGM_CFG); + } + + gaudi->hw_cap_initialized &= ~(HW_CAP_CLK_GATE); +} + +static void gaudi_enable_timestamp(struct hl_device *hdev) +{ + /* Disable the timestamp counter */ + WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0); + + /* Zero the lower/upper parts of the 64-bit counter */ + WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0xC, 0); + WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0x8, 0); + + /* Enable the counter */ + WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 1); +} + +static void gaudi_disable_timestamp(struct hl_device *hdev) +{ + /* Disable the timestamp counter */ + WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0); +} + +static void gaudi_halt_engines(struct hl_device *hdev, bool hard_reset) +{ + u32 wait_timeout_ms; + + dev_info(hdev->dev, + "Halting compute engines and disabling interrupts\n"); + + if (hdev->pldm) + wait_timeout_ms = GAUDI_PLDM_RESET_WAIT_MSEC; + else + wait_timeout_ms = GAUDI_RESET_WAIT_MSEC; + + + gaudi_stop_mme_qmans(hdev); + gaudi_stop_tpc_qmans(hdev); + gaudi_stop_hbm_dma_qmans(hdev); + gaudi_stop_pci_dma_qmans(hdev); + + hdev->asic_funcs->disable_clock_gating(hdev); + + msleep(wait_timeout_ms); + + gaudi_pci_dma_stall(hdev); + gaudi_hbm_dma_stall(hdev); + gaudi_tpc_stall(hdev); + gaudi_mme_stall(hdev); + + msleep(wait_timeout_ms); + + gaudi_disable_mme_qmans(hdev); + gaudi_disable_tpc_qmans(hdev); + gaudi_disable_hbm_dma_qmans(hdev); + gaudi_disable_pci_dma_qmans(hdev); + + gaudi_disable_timestamp(hdev); + + gaudi_disable_msi(hdev); +} + +static int gaudi_mmu_init(struct hl_device *hdev) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct gaudi_device *gaudi = hdev->asic_specific; + u64 hop0_addr; + int rc, i; + + if (!hdev->mmu_enable) + return 0; + + if (gaudi->hw_cap_initialized & HW_CAP_MMU) + return 0; + + hdev->dram_supports_virtual_memory = false; + + for (i = 0 ; i < prop->max_asid ; i++) { + hop0_addr = prop->mmu_pgt_addr + + (i * prop->mmu_hop_table_size); + + rc = gaudi_mmu_update_asid_hop0_addr(hdev, i, hop0_addr); + if (rc) { + dev_err(hdev->dev, + "failed to set hop0 addr for asid %d\n", i); + goto err; + } + } + + /* init MMU cache manage page */ + WREG32(mmSTLB_CACHE_INV_BASE_39_8, MMU_CACHE_MNG_ADDR >> 8); + WREG32(mmSTLB_CACHE_INV_BASE_49_40, MMU_CACHE_MNG_ADDR >> 40); + + hdev->asic_funcs->mmu_invalidate_cache(hdev, true, 0); + + WREG32(mmMMU_UP_MMU_ENABLE, 1); + WREG32(mmMMU_UP_SPI_MASK, 0xF); + + WREG32(mmSTLB_HOP_CONFIGURATION, + hdev->mmu_huge_page_opt ? 0x30440 : 0x40440); + + /* + * The H/W expects the first PI after init to be 1. After wraparound + * we'll write 0. + */ + gaudi->mmu_cache_inv_pi = 1; + + gaudi->hw_cap_initialized |= HW_CAP_MMU; + + return 0; + +err: + return rc; +} + +static int gaudi_load_firmware_to_device(struct hl_device *hdev) +{ + void __iomem *dst; + + /* HBM scrambler must be initialized before pushing F/W to HBM */ + gaudi_init_scrambler_hbm(hdev); + + dst = hdev->pcie_bar[HBM_BAR_ID] + LINUX_FW_OFFSET; + + return hl_fw_load_fw_to_device(hdev, GAUDI_LINUX_FW_FILE, dst); +} + +static int gaudi_load_boot_fit_to_device(struct hl_device *hdev) +{ + void __iomem *dst; + + dst = hdev->pcie_bar[SRAM_BAR_ID] + BOOT_FIT_SRAM_OFFSET; + + return hl_fw_load_fw_to_device(hdev, GAUDI_BOOT_FIT_FILE, dst); +} + +static void gaudi_read_device_fw_version(struct hl_device *hdev, + enum hl_fw_component fwc) +{ + const char *name; + u32 ver_off; + char *dest; + + switch (fwc) { + case FW_COMP_UBOOT: + ver_off = RREG32(mmUBOOT_VER_OFFSET); + dest = hdev->asic_prop.uboot_ver; + name = "U-Boot"; + break; + case FW_COMP_PREBOOT: + ver_off = RREG32(mmPREBOOT_VER_OFFSET); + dest = hdev->asic_prop.preboot_ver; + name = "Preboot"; + break; + default: + dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc); + return; + } + + ver_off &= ~((u32)SRAM_BASE_ADDR); + + if (ver_off < SRAM_SIZE - VERSION_MAX_LEN) { + memcpy_fromio(dest, hdev->pcie_bar[SRAM_BAR_ID] + ver_off, + VERSION_MAX_LEN); + } else { + dev_err(hdev->dev, "%s version offset (0x%x) is above SRAM\n", + name, ver_off); + strcpy(dest, "unavailable"); + } +} + +static int gaudi_init_cpu(struct hl_device *hdev) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + int rc; + + if (!hdev->cpu_enable) + return 0; + + if (gaudi->hw_cap_initialized & HW_CAP_CPU) + return 0; + + /* + * The device CPU works with 40 bits addresses. + * This register sets the extension to 50 bits. + */ + WREG32(mmCPU_IF_CPU_MSB_ADDR, hdev->cpu_pci_msb_addr); + + rc = hl_fw_init_cpu(hdev, mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS, + mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU, + mmCPU_CMD_STATUS_TO_HOST, + mmCPU_BOOT_ERR0, + !hdev->bmc_enable, GAUDI_CPU_TIMEOUT_USEC, + GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC); + + if (rc) + return rc; + + gaudi->hw_cap_initialized |= HW_CAP_CPU; + + return 0; +} + +static int gaudi_init_cpu_queues(struct hl_device *hdev, u32 cpu_timeout) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + struct hl_eq *eq; + u32 status; + struct hl_hw_queue *cpu_pq = + &hdev->kernel_queues[GAUDI_QUEUE_ID_CPU_PQ]; + int err; + + if (!hdev->cpu_queues_enable) + return 0; + + if (gaudi->hw_cap_initialized & HW_CAP_CPU_Q) + return 0; + + eq = &hdev->event_queue; + + WREG32(mmCPU_IF_PQ_BASE_ADDR_LOW, lower_32_bits(cpu_pq->bus_address)); + WREG32(mmCPU_IF_PQ_BASE_ADDR_HIGH, upper_32_bits(cpu_pq->bus_address)); + + WREG32(mmCPU_IF_EQ_BASE_ADDR_LOW, lower_32_bits(eq->bus_address)); + WREG32(mmCPU_IF_EQ_BASE_ADDR_HIGH, upper_32_bits(eq->bus_address)); + + WREG32(mmCPU_IF_CQ_BASE_ADDR_LOW, + lower_32_bits(hdev->cpu_accessible_dma_address)); + WREG32(mmCPU_IF_CQ_BASE_ADDR_HIGH, + upper_32_bits(hdev->cpu_accessible_dma_address)); + + WREG32(mmCPU_IF_PQ_LENGTH, HL_QUEUE_SIZE_IN_BYTES); + WREG32(mmCPU_IF_EQ_LENGTH, HL_EQ_SIZE_IN_BYTES); + WREG32(mmCPU_IF_CQ_LENGTH, HL_CPU_ACCESSIBLE_MEM_SIZE); + + /* Used for EQ CI */ + WREG32(mmCPU_IF_EQ_RD_OFFS, 0); + + WREG32(mmCPU_IF_PF_PQ_PI, 0); + + if (gaudi->multi_msi_mode) + WREG32(mmCPU_IF_QUEUE_INIT, PQ_INIT_STATUS_READY_FOR_CP); + else + WREG32(mmCPU_IF_QUEUE_INIT, + PQ_INIT_STATUS_READY_FOR_CP_SINGLE_MSI); + + WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, GAUDI_EVENT_PI_UPDATE); + + err = hl_poll_timeout( + hdev, + mmCPU_IF_QUEUE_INIT, + status, + (status == PQ_INIT_STATUS_READY_FOR_HOST), + 1000, + cpu_timeout); + + if (err) { + dev_err(hdev->dev, + "Failed to communicate with Device CPU (CPU-CP timeout)\n"); + return -EIO; + } + + gaudi->hw_cap_initialized |= HW_CAP_CPU_Q; + return 0; +} + +static void gaudi_pre_hw_init(struct hl_device *hdev) +{ + /* Perform read from the device to make sure device is up */ + RREG32(mmHW_STATE); + + /* Set the access through PCI bars (Linux driver only) as + * secured + */ + WREG32(mmPCIE_WRAP_LBW_PROT_OVR, + (PCIE_WRAP_LBW_PROT_OVR_RD_EN_MASK | + PCIE_WRAP_LBW_PROT_OVR_WR_EN_MASK)); + + /* Perform read to flush the waiting writes to ensure + * configuration was set in the device + */ + RREG32(mmPCIE_WRAP_LBW_PROT_OVR); + + /* + * Let's mark in the H/W that we have reached this point. We check + * this value in the reset_before_init function to understand whether + * we need to reset the chip before doing H/W init. This register is + * cleared by the H/W upon H/W reset + */ + WREG32(mmHW_STATE, HL_DEVICE_HW_STATE_DIRTY); + + /* Configure the reset registers. Must be done as early as possible + * in case we fail during H/W initialization + */ + WREG32(mmPSOC_GLOBAL_CONF_SOFT_RST_CFG_H, + (CFG_RST_H_DMA_MASK | + CFG_RST_H_MME_MASK | + CFG_RST_H_SM_MASK | + CFG_RST_H_TPC_7_MASK)); + + WREG32(mmPSOC_GLOBAL_CONF_SOFT_RST_CFG_L, CFG_RST_L_TPC_MASK); + + WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG_H, + (CFG_RST_H_HBM_MASK | + CFG_RST_H_TPC_7_MASK | + CFG_RST_H_NIC_MASK | + CFG_RST_H_SM_MASK | + CFG_RST_H_DMA_MASK | + CFG_RST_H_MME_MASK | + CFG_RST_H_CPU_MASK | + CFG_RST_H_MMU_MASK)); + + WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG_L, + (CFG_RST_L_IF_MASK | + CFG_RST_L_PSOC_MASK | + CFG_RST_L_TPC_MASK)); +} + +static int gaudi_hw_init(struct hl_device *hdev) +{ + int rc; + + dev_info(hdev->dev, "Starting initialization of H/W\n"); + + gaudi_pre_hw_init(hdev); + + gaudi_init_pci_dma_qmans(hdev); + + gaudi_init_hbm_dma_qmans(hdev); + + rc = gaudi_init_cpu(hdev); + if (rc) { + dev_err(hdev->dev, "failed to initialize CPU\n"); + return rc; + } + + /* SRAM scrambler must be initialized after CPU is running from HBM */ + gaudi_init_scrambler_sram(hdev); + + /* This is here just in case we are working without CPU */ + gaudi_init_scrambler_hbm(hdev); + + gaudi_init_golden_registers(hdev); + + rc = gaudi_mmu_init(hdev); + if (rc) + return rc; + + gaudi_init_security(hdev); + + gaudi_init_mme_qmans(hdev); + + gaudi_init_tpc_qmans(hdev); + + hdev->asic_funcs->set_clock_gating(hdev); + + gaudi_enable_timestamp(hdev); + + /* MSI must be enabled before CPU queues are initialized */ + rc = gaudi_enable_msi(hdev); + if (rc) + goto disable_queues; + + /* must be called after MSI was enabled */ + rc = gaudi_init_cpu_queues(hdev, GAUDI_CPU_TIMEOUT_USEC); + if (rc) { + dev_err(hdev->dev, "failed to initialize CPU H/W queues %d\n", + rc); + goto disable_msi; + } + + /* Perform read from the device to flush all configuration */ + RREG32(mmHW_STATE); + + return 0; + +disable_msi: + gaudi_disable_msi(hdev); +disable_queues: + gaudi_disable_mme_qmans(hdev); + gaudi_disable_pci_dma_qmans(hdev); + + return rc; +} + +static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + u32 status, reset_timeout_ms, cpu_timeout_ms, boot_strap = 0; + + if (!hard_reset) { + dev_err(hdev->dev, "GAUDI doesn't support soft-reset\n"); + return; + } + + if (hdev->pldm) { + reset_timeout_ms = GAUDI_PLDM_HRESET_TIMEOUT_MSEC; + cpu_timeout_ms = GAUDI_PLDM_RESET_WAIT_MSEC; + } else { + reset_timeout_ms = GAUDI_RESET_TIMEOUT_MSEC; + cpu_timeout_ms = GAUDI_CPU_RESET_WAIT_MSEC; + } + + /* Set device to handle FLR by H/W as we will put the device CPU to + * halt mode + */ + WREG32(mmPCIE_AUX_FLR_CTRL, (PCIE_AUX_FLR_CTRL_HW_CTRL_MASK | + PCIE_AUX_FLR_CTRL_INT_MASK_MASK)); + + /* I don't know what is the state of the CPU so make sure it is + * stopped in any means necessary + */ + WREG32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU, KMD_MSG_GOTO_WFE); + WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, GAUDI_EVENT_HALT_MACHINE); + + msleep(cpu_timeout_ms); + + /* Tell ASIC not to re-initialize PCIe */ + WREG32(mmPREBOOT_PCIE_EN, LKD_HARD_RESET_MAGIC); + + boot_strap = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS); + + /* H/W bug WA: + * rdata[31:0] = strap_read_val; + * wdata[31:0] = rdata[30:21],1'b0,rdata[20:0] + */ + boot_strap = (((boot_strap & 0x7FE00000) << 1) | + (boot_strap & 0x001FFFFF)); + WREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS, boot_strap & ~0x2); + + /* Restart BTL/BLR upon hard-reset */ + WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START, 1); + + WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST, + 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_IND_SHIFT); + dev_info(hdev->dev, + "Issued HARD reset command, going to wait %dms\n", + reset_timeout_ms); + + /* + * After hard reset, we can't poll the BTM_FSM register because the PSOC + * itself is in reset. Need to wait until the reset is deasserted + */ + msleep(reset_timeout_ms); + + status = RREG32(mmPSOC_GLOBAL_CONF_BTM_FSM); + if (status & PSOC_GLOBAL_CONF_BTM_FSM_STATE_MASK) + dev_err(hdev->dev, + "Timeout while waiting for device to reset 0x%x\n", + status); + + WREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS, boot_strap); + + gaudi->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q | + HW_CAP_HBM | HW_CAP_PCI_DMA | + HW_CAP_MME | HW_CAP_TPC_MASK | + HW_CAP_HBM_DMA | HW_CAP_PLL | + HW_CAP_MMU | + HW_CAP_SRAM_SCRAMBLER | + HW_CAP_HBM_SCRAMBLER | + HW_CAP_CLK_GATE); + + memset(gaudi->events_stat, 0, sizeof(gaudi->events_stat)); +} + +static int gaudi_suspend(struct hl_device *hdev) +{ + int rc; + + rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS); + if (rc) + dev_err(hdev->dev, "Failed to disable PCI access from CPU\n"); + + return rc; +} + +static int gaudi_resume(struct hl_device *hdev) +{ + return gaudi_init_iatu(hdev); +} + +static int gaudi_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size) +{ + int rc; + + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | + VM_DONTCOPY | VM_NORESERVE; + + rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr, + (dma_addr - HOST_PHYS_BASE), size); + if (rc) + dev_err(hdev->dev, "dma_mmap_coherent error %d", rc); + + return rc; +} + +static void gaudi_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + u32 db_reg_offset, db_value, dma_qm_offset, q_off; + int dma_id; + bool invalid_queue = false; + + switch (hw_queue_id) { + case GAUDI_QUEUE_ID_DMA_0_0...GAUDI_QUEUE_ID_DMA_0_3: + dma_id = gaudi_dma_assignment[GAUDI_PCI_DMA_1]; + dma_qm_offset = dma_id * DMA_QMAN_OFFSET; + q_off = dma_qm_offset + (hw_queue_id & 0x3) * 4; + db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off; + break; + + case GAUDI_QUEUE_ID_DMA_1_0...GAUDI_QUEUE_ID_DMA_1_3: + dma_id = gaudi_dma_assignment[GAUDI_PCI_DMA_2]; + dma_qm_offset = dma_id * DMA_QMAN_OFFSET; + q_off = dma_qm_offset + (hw_queue_id & 0x3) * 4; + db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off; + break; + + case GAUDI_QUEUE_ID_DMA_2_0...GAUDI_QUEUE_ID_DMA_2_3: + dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_1]; + dma_qm_offset = dma_id * DMA_QMAN_OFFSET; + q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4; + db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off; + break; + + case GAUDI_QUEUE_ID_DMA_3_0...GAUDI_QUEUE_ID_DMA_3_3: + dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_2]; + dma_qm_offset = dma_id * DMA_QMAN_OFFSET; + q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4; + db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off; + break; + + case GAUDI_QUEUE_ID_DMA_4_0...GAUDI_QUEUE_ID_DMA_4_3: + dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_3]; + dma_qm_offset = dma_id * DMA_QMAN_OFFSET; + q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4; + db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off; + break; + + case GAUDI_QUEUE_ID_DMA_5_0...GAUDI_QUEUE_ID_DMA_5_3: + dma_id = gaudi_dma_assignment[GAUDI_PCI_DMA_3]; + dma_qm_offset = dma_id * DMA_QMAN_OFFSET; + q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4; + db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off; + break; + + case GAUDI_QUEUE_ID_DMA_6_0...GAUDI_QUEUE_ID_DMA_6_3: + dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_4]; + dma_qm_offset = dma_id * DMA_QMAN_OFFSET; + q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4; + db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off; + break; + + case GAUDI_QUEUE_ID_DMA_7_0...GAUDI_QUEUE_ID_DMA_7_3: + dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_5]; + dma_qm_offset = dma_id * DMA_QMAN_OFFSET; + q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4; + db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off; + break; + + case GAUDI_QUEUE_ID_CPU_PQ: + if (gaudi->hw_cap_initialized & HW_CAP_CPU_Q) + db_reg_offset = mmCPU_IF_PF_PQ_PI; + else + invalid_queue = true; + break; + + case GAUDI_QUEUE_ID_MME_0_0: + db_reg_offset = mmMME2_QM_PQ_PI_0; + break; + + case GAUDI_QUEUE_ID_MME_0_1: + db_reg_offset = mmMME2_QM_PQ_PI_1; + break; + + case GAUDI_QUEUE_ID_MME_0_2: + db_reg_offset = mmMME2_QM_PQ_PI_2; + break; + + case GAUDI_QUEUE_ID_MME_0_3: + db_reg_offset = mmMME2_QM_PQ_PI_3; + break; + + case GAUDI_QUEUE_ID_MME_1_0: + db_reg_offset = mmMME0_QM_PQ_PI_0; + break; + + case GAUDI_QUEUE_ID_MME_1_1: + db_reg_offset = mmMME0_QM_PQ_PI_1; + break; + + case GAUDI_QUEUE_ID_MME_1_2: + db_reg_offset = mmMME0_QM_PQ_PI_2; + break; + + case GAUDI_QUEUE_ID_MME_1_3: + db_reg_offset = mmMME0_QM_PQ_PI_3; + break; + + case GAUDI_QUEUE_ID_TPC_0_0: + db_reg_offset = mmTPC0_QM_PQ_PI_0; + break; + + case GAUDI_QUEUE_ID_TPC_0_1: + db_reg_offset = mmTPC0_QM_PQ_PI_1; + break; + + case GAUDI_QUEUE_ID_TPC_0_2: + db_reg_offset = mmTPC0_QM_PQ_PI_2; + break; + + case GAUDI_QUEUE_ID_TPC_0_3: + db_reg_offset = mmTPC0_QM_PQ_PI_3; + break; + + case GAUDI_QUEUE_ID_TPC_1_0: + db_reg_offset = mmTPC1_QM_PQ_PI_0; + break; + + case GAUDI_QUEUE_ID_TPC_1_1: + db_reg_offset = mmTPC1_QM_PQ_PI_1; + break; + + case GAUDI_QUEUE_ID_TPC_1_2: + db_reg_offset = mmTPC1_QM_PQ_PI_2; + break; + + case GAUDI_QUEUE_ID_TPC_1_3: + db_reg_offset = mmTPC1_QM_PQ_PI_3; + break; + + case GAUDI_QUEUE_ID_TPC_2_0: + db_reg_offset = mmTPC2_QM_PQ_PI_0; + break; + + case GAUDI_QUEUE_ID_TPC_2_1: + db_reg_offset = mmTPC2_QM_PQ_PI_1; + break; + + case GAUDI_QUEUE_ID_TPC_2_2: + db_reg_offset = mmTPC2_QM_PQ_PI_2; + break; + + case GAUDI_QUEUE_ID_TPC_2_3: + db_reg_offset = mmTPC2_QM_PQ_PI_3; + break; + + case GAUDI_QUEUE_ID_TPC_3_0: + db_reg_offset = mmTPC3_QM_PQ_PI_0; + break; + + case GAUDI_QUEUE_ID_TPC_3_1: + db_reg_offset = mmTPC3_QM_PQ_PI_1; + break; + + case GAUDI_QUEUE_ID_TPC_3_2: + db_reg_offset = mmTPC3_QM_PQ_PI_2; + break; + + case GAUDI_QUEUE_ID_TPC_3_3: + db_reg_offset = mmTPC3_QM_PQ_PI_3; + break; + + case GAUDI_QUEUE_ID_TPC_4_0: + db_reg_offset = mmTPC4_QM_PQ_PI_0; + break; + + case GAUDI_QUEUE_ID_TPC_4_1: + db_reg_offset = mmTPC4_QM_PQ_PI_1; + break; + + case GAUDI_QUEUE_ID_TPC_4_2: + db_reg_offset = mmTPC4_QM_PQ_PI_2; + break; + + case GAUDI_QUEUE_ID_TPC_4_3: + db_reg_offset = mmTPC4_QM_PQ_PI_3; + break; + + case GAUDI_QUEUE_ID_TPC_5_0: + db_reg_offset = mmTPC5_QM_PQ_PI_0; + break; + + case GAUDI_QUEUE_ID_TPC_5_1: + db_reg_offset = mmTPC5_QM_PQ_PI_1; + break; + + case GAUDI_QUEUE_ID_TPC_5_2: + db_reg_offset = mmTPC5_QM_PQ_PI_2; + break; + + case GAUDI_QUEUE_ID_TPC_5_3: + db_reg_offset = mmTPC5_QM_PQ_PI_3; + break; + + case GAUDI_QUEUE_ID_TPC_6_0: + db_reg_offset = mmTPC6_QM_PQ_PI_0; + break; + + case GAUDI_QUEUE_ID_TPC_6_1: + db_reg_offset = mmTPC6_QM_PQ_PI_1; + break; + + case GAUDI_QUEUE_ID_TPC_6_2: + db_reg_offset = mmTPC6_QM_PQ_PI_2; + break; + + case GAUDI_QUEUE_ID_TPC_6_3: + db_reg_offset = mmTPC6_QM_PQ_PI_3; + break; + + case GAUDI_QUEUE_ID_TPC_7_0: + db_reg_offset = mmTPC7_QM_PQ_PI_0; + break; + + case GAUDI_QUEUE_ID_TPC_7_1: + db_reg_offset = mmTPC7_QM_PQ_PI_1; + break; + + case GAUDI_QUEUE_ID_TPC_7_2: + db_reg_offset = mmTPC7_QM_PQ_PI_2; + break; + + case GAUDI_QUEUE_ID_TPC_7_3: + db_reg_offset = mmTPC7_QM_PQ_PI_3; + break; + + default: + invalid_queue = true; + } + + if (invalid_queue) { + /* Should never get here */ + dev_err(hdev->dev, "h/w queue %d is invalid. Can't set pi\n", + hw_queue_id); + return; + } + + db_value = pi; + + /* ring the doorbell */ + WREG32(db_reg_offset, db_value); + + if (hw_queue_id == GAUDI_QUEUE_ID_CPU_PQ) + WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, + GAUDI_EVENT_PI_UPDATE); +} + +static void gaudi_pqe_write(struct hl_device *hdev, __le64 *pqe, + struct hl_bd *bd) +{ + __le64 *pbd = (__le64 *) bd; + + /* The QMANs are on the host memory so a simple copy suffice */ + pqe[0] = pbd[0]; + pqe[1] = pbd[1]; +} + +static void *gaudi_dma_alloc_coherent(struct hl_device *hdev, size_t size, + dma_addr_t *dma_handle, gfp_t flags) +{ + void *kernel_addr = dma_alloc_coherent(&hdev->pdev->dev, size, + dma_handle, flags); + + /* Shift to the device's base physical address of host memory */ + if (kernel_addr) + *dma_handle += HOST_PHYS_BASE; + + return kernel_addr; +} + +static void gaudi_dma_free_coherent(struct hl_device *hdev, size_t size, + void *cpu_addr, dma_addr_t dma_handle) +{ + /* Cancel the device's base physical address of host memory */ + dma_addr_t fixed_dma_handle = dma_handle - HOST_PHYS_BASE; + + dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, fixed_dma_handle); +} + +static void *gaudi_get_int_queue_base(struct hl_device *hdev, + u32 queue_id, dma_addr_t *dma_handle, + u16 *queue_len) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + struct gaudi_internal_qman_info *q; + + if (queue_id >= GAUDI_QUEUE_ID_SIZE || + gaudi_queue_type[queue_id] != QUEUE_TYPE_INT) { + dev_err(hdev->dev, "Got invalid queue id %d\n", queue_id); + return NULL; + } + + q = &gaudi->internal_qmans[queue_id]; + *dma_handle = q->pq_dma_addr; + *queue_len = q->pq_size / QMAN_PQ_ENTRY_SIZE; + + return q->pq_kernel_addr; +} + +static int gaudi_send_cpu_message(struct hl_device *hdev, u32 *msg, + u16 len, u32 timeout, long *result) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + + if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q)) { + if (result) + *result = 0; + return 0; + } + + if (!timeout) + timeout = GAUDI_MSG_TO_CPU_TIMEOUT_USEC; + + return hl_fw_send_cpu_message(hdev, GAUDI_QUEUE_ID_CPU_PQ, msg, len, + timeout, result); +} + +static int gaudi_test_queue(struct hl_device *hdev, u32 hw_queue_id) +{ + struct packet_msg_prot *fence_pkt; + dma_addr_t pkt_dma_addr; + u32 fence_val, tmp, timeout_usec; + dma_addr_t fence_dma_addr; + u32 *fence_ptr; + int rc; + + if (hdev->pldm) + timeout_usec = GAUDI_PLDM_TEST_QUEUE_WAIT_USEC; + else + timeout_usec = GAUDI_TEST_QUEUE_WAIT_USEC; + + fence_val = GAUDI_QMAN0_FENCE_VAL; + + fence_ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL, + &fence_dma_addr); + if (!fence_ptr) { + dev_err(hdev->dev, + "Failed to allocate memory for H/W queue %d testing\n", + hw_queue_id); + return -ENOMEM; + } + + *fence_ptr = 0; + + fence_pkt = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, + sizeof(struct packet_msg_prot), + GFP_KERNEL, &pkt_dma_addr); + if (!fence_pkt) { + dev_err(hdev->dev, + "Failed to allocate packet for H/W queue %d testing\n", + hw_queue_id); + rc = -ENOMEM; + goto free_fence_ptr; + } + + tmp = FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_PROT); + tmp |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, 1); + tmp |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1); + + fence_pkt->ctl = cpu_to_le32(tmp); + fence_pkt->value = cpu_to_le32(fence_val); + fence_pkt->addr = cpu_to_le64(fence_dma_addr); + + rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id, + sizeof(struct packet_msg_prot), + pkt_dma_addr); + if (rc) { + dev_err(hdev->dev, + "Failed to send fence packet to H/W queue %d\n", + hw_queue_id); + goto free_pkt; + } + + rc = hl_poll_timeout_memory(hdev, fence_ptr, tmp, (tmp == fence_val), + 1000, timeout_usec, true); + + hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id); + + if (rc == -ETIMEDOUT) { + dev_err(hdev->dev, + "H/W queue %d test failed (scratch(0x%08llX) == 0x%08X)\n", + hw_queue_id, (unsigned long long) fence_dma_addr, tmp); + rc = -EIO; + } + +free_pkt: + hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_pkt, + pkt_dma_addr); +free_fence_ptr: + hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_ptr, + fence_dma_addr); + return rc; +} + +static int gaudi_test_cpu_queue(struct hl_device *hdev) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + + /* + * check capability here as send_cpu_message() won't update the result + * value if no capability + */ + if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q)) + return 0; + + return hl_fw_test_cpu_queue(hdev); +} + +static int gaudi_test_queues(struct hl_device *hdev) +{ + int i, rc, ret_val = 0; + + for (i = 0 ; i < hdev->asic_prop.max_queues ; i++) { + if (hdev->asic_prop.hw_queues_props[i].type == QUEUE_TYPE_EXT) { + rc = gaudi_test_queue(hdev, i); + if (rc) + ret_val = -EINVAL; + } + } + + rc = gaudi_test_cpu_queue(hdev); + if (rc) + ret_val = -EINVAL; + + return ret_val; +} + +static void *gaudi_dma_pool_zalloc(struct hl_device *hdev, size_t size, + gfp_t mem_flags, dma_addr_t *dma_handle) +{ + void *kernel_addr; + + if (size > GAUDI_DMA_POOL_BLK_SIZE) + return NULL; + + kernel_addr = dma_pool_zalloc(hdev->dma_pool, mem_flags, dma_handle); + + /* Shift to the device's base physical address of host memory */ + if (kernel_addr) + *dma_handle += HOST_PHYS_BASE; + + return kernel_addr; +} + +static void gaudi_dma_pool_free(struct hl_device *hdev, void *vaddr, + dma_addr_t dma_addr) +{ + /* Cancel the device's base physical address of host memory */ + dma_addr_t fixed_dma_addr = dma_addr - HOST_PHYS_BASE; + + dma_pool_free(hdev->dma_pool, vaddr, fixed_dma_addr); +} + +static void *gaudi_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, + size_t size, dma_addr_t *dma_handle) +{ + return hl_fw_cpu_accessible_dma_pool_alloc(hdev, size, dma_handle); +} + +static void gaudi_cpu_accessible_dma_pool_free(struct hl_device *hdev, + size_t size, void *vaddr) +{ + hl_fw_cpu_accessible_dma_pool_free(hdev, size, vaddr); +} + +static int gaudi_dma_map_sg(struct hl_device *hdev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir) +{ + struct scatterlist *sg; + int i; + + if (!dma_map_sg(&hdev->pdev->dev, sgl, nents, dir)) + return -ENOMEM; + + /* Shift to the device's base physical address of host memory */ + for_each_sg(sgl, sg, nents, i) + sg->dma_address += HOST_PHYS_BASE; + + return 0; +} + +static void gaudi_dma_unmap_sg(struct hl_device *hdev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir) +{ + struct scatterlist *sg; + int i; + + /* Cancel the device's base physical address of host memory */ + for_each_sg(sgl, sg, nents, i) + sg->dma_address -= HOST_PHYS_BASE; + + dma_unmap_sg(&hdev->pdev->dev, sgl, nents, dir); +} + +static u32 gaudi_get_dma_desc_list_size(struct hl_device *hdev, + struct sg_table *sgt) +{ + struct scatterlist *sg, *sg_next_iter; + u32 count, dma_desc_cnt; + u64 len, len_next; + dma_addr_t addr, addr_next; + + dma_desc_cnt = 0; + + for_each_sg(sgt->sgl, sg, sgt->nents, count) { + + len = sg_dma_len(sg); + addr = sg_dma_address(sg); + + if (len == 0) + break; + + while ((count + 1) < sgt->nents) { + sg_next_iter = sg_next(sg); + len_next = sg_dma_len(sg_next_iter); + addr_next = sg_dma_address(sg_next_iter); + + if (len_next == 0) + break; + + if ((addr + len == addr_next) && + (len + len_next <= DMA_MAX_TRANSFER_SIZE)) { + len += len_next; + count++; + sg = sg_next_iter; + } else { + break; + } + } + + dma_desc_cnt++; + } + + return dma_desc_cnt * sizeof(struct packet_lin_dma); +} + +static int gaudi_pin_memory_before_cs(struct hl_device *hdev, + struct hl_cs_parser *parser, + struct packet_lin_dma *user_dma_pkt, + u64 addr, enum dma_data_direction dir) +{ + struct hl_userptr *userptr; + int rc; + + if (hl_userptr_is_pinned(hdev, addr, le32_to_cpu(user_dma_pkt->tsize), + parser->job_userptr_list, &userptr)) + goto already_pinned; + + userptr = kzalloc(sizeof(*userptr), GFP_ATOMIC); + if (!userptr) + return -ENOMEM; + + rc = hl_pin_host_memory(hdev, addr, le32_to_cpu(user_dma_pkt->tsize), + userptr); + if (rc) + goto free_userptr; + + list_add_tail(&userptr->job_node, parser->job_userptr_list); + + rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl, + userptr->sgt->nents, dir); + if (rc) { + dev_err(hdev->dev, "failed to map sgt with DMA region\n"); + goto unpin_memory; + } + + userptr->dma_mapped = true; + userptr->dir = dir; + +already_pinned: + parser->patched_cb_size += + gaudi_get_dma_desc_list_size(hdev, userptr->sgt); + + return 0; + +unpin_memory: + list_del(&userptr->job_node); + hl_unpin_host_memory(hdev, userptr); +free_userptr: + kfree(userptr); + return rc; +} + +static int gaudi_validate_dma_pkt_host(struct hl_device *hdev, + struct hl_cs_parser *parser, + struct packet_lin_dma *user_dma_pkt, + bool src_in_host) +{ + enum dma_data_direction dir; + bool skip_host_mem_pin = false, user_memset; + u64 addr; + int rc = 0; + + user_memset = (le32_to_cpu(user_dma_pkt->ctl) & + GAUDI_PKT_LIN_DMA_CTL_MEMSET_MASK) >> + GAUDI_PKT_LIN_DMA_CTL_MEMSET_SHIFT; + + if (src_in_host) { + if (user_memset) + skip_host_mem_pin = true; + + dev_dbg(hdev->dev, "DMA direction is HOST --> DEVICE\n"); + dir = DMA_TO_DEVICE; + addr = le64_to_cpu(user_dma_pkt->src_addr); + } else { + dev_dbg(hdev->dev, "DMA direction is DEVICE --> HOST\n"); + dir = DMA_FROM_DEVICE; + addr = (le64_to_cpu(user_dma_pkt->dst_addr) & + GAUDI_PKT_LIN_DMA_DST_ADDR_MASK) >> + GAUDI_PKT_LIN_DMA_DST_ADDR_SHIFT; + } + + if (skip_host_mem_pin) + parser->patched_cb_size += sizeof(*user_dma_pkt); + else + rc = gaudi_pin_memory_before_cs(hdev, parser, user_dma_pkt, + addr, dir); + + return rc; +} + +static int gaudi_validate_dma_pkt_no_mmu(struct hl_device *hdev, + struct hl_cs_parser *parser, + struct packet_lin_dma *user_dma_pkt) +{ + bool src_in_host = false; + u64 dst_addr = (le64_to_cpu(user_dma_pkt->dst_addr) & + GAUDI_PKT_LIN_DMA_DST_ADDR_MASK) >> + GAUDI_PKT_LIN_DMA_DST_ADDR_SHIFT; + + dev_dbg(hdev->dev, "DMA packet details:\n"); + dev_dbg(hdev->dev, "source == 0x%llx\n", + le64_to_cpu(user_dma_pkt->src_addr)); + dev_dbg(hdev->dev, "destination == 0x%llx\n", dst_addr); + dev_dbg(hdev->dev, "size == %u\n", le32_to_cpu(user_dma_pkt->tsize)); + + /* + * Special handling for DMA with size 0. Bypass all validations + * because no transactions will be done except for WR_COMP, which + * is not a security issue + */ + if (!le32_to_cpu(user_dma_pkt->tsize)) { + parser->patched_cb_size += sizeof(*user_dma_pkt); + return 0; + } + + if (parser->hw_queue_id <= GAUDI_QUEUE_ID_DMA_0_3) + src_in_host = true; + + return gaudi_validate_dma_pkt_host(hdev, parser, user_dma_pkt, + src_in_host); +} + +static int gaudi_validate_load_and_exe_pkt(struct hl_device *hdev, + struct hl_cs_parser *parser, + struct packet_load_and_exe *user_pkt) +{ + u32 cfg; + + cfg = le32_to_cpu(user_pkt->cfg); + + if (cfg & GAUDI_PKT_LOAD_AND_EXE_CFG_DST_MASK) { + dev_err(hdev->dev, + "User not allowed to use Load and Execute\n"); + return -EPERM; + } + + parser->patched_cb_size += sizeof(struct packet_load_and_exe); + + return 0; +} + +static int gaudi_validate_cb(struct hl_device *hdev, + struct hl_cs_parser *parser, bool is_mmu) +{ + u32 cb_parsed_length = 0; + int rc = 0; + + parser->patched_cb_size = 0; + + /* cb_user_size is more than 0 so loop will always be executed */ + while (cb_parsed_length < parser->user_cb_size) { + enum packet_id pkt_id; + u16 pkt_size; + struct gaudi_packet *user_pkt; + + user_pkt = parser->user_cb->kernel_address + cb_parsed_length; + + pkt_id = (enum packet_id) ( + (le64_to_cpu(user_pkt->header) & + PACKET_HEADER_PACKET_ID_MASK) >> + PACKET_HEADER_PACKET_ID_SHIFT); + + if (!validate_packet_id(pkt_id)) { + dev_err(hdev->dev, "Invalid packet id %u\n", pkt_id); + rc = -EINVAL; + break; + } + + pkt_size = gaudi_packet_sizes[pkt_id]; + cb_parsed_length += pkt_size; + if (cb_parsed_length > parser->user_cb_size) { + dev_err(hdev->dev, + "packet 0x%x is out of CB boundary\n", pkt_id); + rc = -EINVAL; + break; + } + + switch (pkt_id) { + case PACKET_MSG_PROT: + dev_err(hdev->dev, + "User not allowed to use MSG_PROT\n"); + rc = -EPERM; + break; + + case PACKET_CP_DMA: + dev_err(hdev->dev, "User not allowed to use CP_DMA\n"); + rc = -EPERM; + break; + + case PACKET_STOP: + dev_err(hdev->dev, "User not allowed to use STOP\n"); + rc = -EPERM; + break; + + case PACKET_WREG_BULK: + dev_err(hdev->dev, + "User not allowed to use WREG_BULK\n"); + rc = -EPERM; + break; + + case PACKET_LOAD_AND_EXE: + rc = gaudi_validate_load_and_exe_pkt(hdev, parser, + (struct packet_load_and_exe *) user_pkt); + break; + + case PACKET_LIN_DMA: + parser->contains_dma_pkt = true; + if (is_mmu) + parser->patched_cb_size += pkt_size; + else + rc = gaudi_validate_dma_pkt_no_mmu(hdev, parser, + (struct packet_lin_dma *) user_pkt); + break; + + case PACKET_WREG_32: + case PACKET_MSG_LONG: + case PACKET_MSG_SHORT: + case PACKET_REPEAT: + case PACKET_FENCE: + case PACKET_NOP: + case PACKET_ARB_POINT: + parser->patched_cb_size += pkt_size; + break; + + default: + dev_err(hdev->dev, "Invalid packet header 0x%x\n", + pkt_id); + rc = -EINVAL; + break; + } + + if (rc) + break; + } + + /* + * The new CB should have space at the end for two MSG_PROT packets: + * 1. A packet that will act as a completion packet + * 2. A packet that will generate MSI-X interrupt + */ + parser->patched_cb_size += sizeof(struct packet_msg_prot) * 2; + + return rc; +} + +static int gaudi_patch_dma_packet(struct hl_device *hdev, + struct hl_cs_parser *parser, + struct packet_lin_dma *user_dma_pkt, + struct packet_lin_dma *new_dma_pkt, + u32 *new_dma_pkt_size) +{ + struct hl_userptr *userptr; + struct scatterlist *sg, *sg_next_iter; + u32 count, dma_desc_cnt, user_wrcomp_en_mask, ctl; + u64 len, len_next; + dma_addr_t dma_addr, dma_addr_next; + u64 device_memory_addr, addr; + enum dma_data_direction dir; + struct sg_table *sgt; + bool src_in_host = false; + bool skip_host_mem_pin = false; + bool user_memset; + + ctl = le32_to_cpu(user_dma_pkt->ctl); + + if (parser->hw_queue_id <= GAUDI_QUEUE_ID_DMA_0_3) + src_in_host = true; + + user_memset = (ctl & GAUDI_PKT_LIN_DMA_CTL_MEMSET_MASK) >> + GAUDI_PKT_LIN_DMA_CTL_MEMSET_SHIFT; + + if (src_in_host) { + addr = le64_to_cpu(user_dma_pkt->src_addr); + device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr); + dir = DMA_TO_DEVICE; + if (user_memset) + skip_host_mem_pin = true; + } else { + addr = le64_to_cpu(user_dma_pkt->dst_addr); + device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr); + dir = DMA_FROM_DEVICE; + } + + if ((!skip_host_mem_pin) && + (!hl_userptr_is_pinned(hdev, addr, + le32_to_cpu(user_dma_pkt->tsize), + parser->job_userptr_list, &userptr))) { + dev_err(hdev->dev, "Userptr 0x%llx + 0x%x NOT mapped\n", + addr, user_dma_pkt->tsize); + return -EFAULT; + } + + if ((user_memset) && (dir == DMA_TO_DEVICE)) { + memcpy(new_dma_pkt, user_dma_pkt, sizeof(*user_dma_pkt)); + *new_dma_pkt_size = sizeof(*user_dma_pkt); + return 0; + } + + user_wrcomp_en_mask = ctl & GAUDI_PKT_LIN_DMA_CTL_WRCOMP_EN_MASK; + + sgt = userptr->sgt; + dma_desc_cnt = 0; + + for_each_sg(sgt->sgl, sg, sgt->nents, count) { + len = sg_dma_len(sg); + dma_addr = sg_dma_address(sg); + + if (len == 0) + break; + + while ((count + 1) < sgt->nents) { + sg_next_iter = sg_next(sg); + len_next = sg_dma_len(sg_next_iter); + dma_addr_next = sg_dma_address(sg_next_iter); + + if (len_next == 0) + break; + + if ((dma_addr + len == dma_addr_next) && + (len + len_next <= DMA_MAX_TRANSFER_SIZE)) { + len += len_next; + count++; + sg = sg_next_iter; + } else { + break; + } + } + + ctl = le32_to_cpu(user_dma_pkt->ctl); + if (likely(dma_desc_cnt)) + ctl &= ~GAUDI_PKT_CTL_EB_MASK; + ctl &= ~GAUDI_PKT_LIN_DMA_CTL_WRCOMP_EN_MASK; + new_dma_pkt->ctl = cpu_to_le32(ctl); + new_dma_pkt->tsize = cpu_to_le32(len); + + if (dir == DMA_TO_DEVICE) { + new_dma_pkt->src_addr = cpu_to_le64(dma_addr); + new_dma_pkt->dst_addr = cpu_to_le64(device_memory_addr); + } else { + new_dma_pkt->src_addr = cpu_to_le64(device_memory_addr); + new_dma_pkt->dst_addr = cpu_to_le64(dma_addr); + } + + if (!user_memset) + device_memory_addr += len; + dma_desc_cnt++; + new_dma_pkt++; + } + + if (!dma_desc_cnt) { + dev_err(hdev->dev, + "Error of 0 SG entries when patching DMA packet\n"); + return -EFAULT; + } + + /* Fix the last dma packet - wrcomp must be as user set it */ + new_dma_pkt--; + new_dma_pkt->ctl |= cpu_to_le32(user_wrcomp_en_mask); + + *new_dma_pkt_size = dma_desc_cnt * sizeof(struct packet_lin_dma); + + return 0; +} + +static int gaudi_patch_cb(struct hl_device *hdev, + struct hl_cs_parser *parser) +{ + u32 cb_parsed_length = 0; + u32 cb_patched_cur_length = 0; + int rc = 0; + + /* cb_user_size is more than 0 so loop will always be executed */ + while (cb_parsed_length < parser->user_cb_size) { + enum packet_id pkt_id; + u16 pkt_size; + u32 new_pkt_size = 0; + struct gaudi_packet *user_pkt, *kernel_pkt; + + user_pkt = parser->user_cb->kernel_address + cb_parsed_length; + kernel_pkt = parser->patched_cb->kernel_address + + cb_patched_cur_length; + + pkt_id = (enum packet_id) ( + (le64_to_cpu(user_pkt->header) & + PACKET_HEADER_PACKET_ID_MASK) >> + PACKET_HEADER_PACKET_ID_SHIFT); + + if (!validate_packet_id(pkt_id)) { + dev_err(hdev->dev, "Invalid packet id %u\n", pkt_id); + rc = -EINVAL; + break; + } + + pkt_size = gaudi_packet_sizes[pkt_id]; + cb_parsed_length += pkt_size; + if (cb_parsed_length > parser->user_cb_size) { + dev_err(hdev->dev, + "packet 0x%x is out of CB boundary\n", pkt_id); + rc = -EINVAL; + break; + } + + switch (pkt_id) { + case PACKET_LIN_DMA: + rc = gaudi_patch_dma_packet(hdev, parser, + (struct packet_lin_dma *) user_pkt, + (struct packet_lin_dma *) kernel_pkt, + &new_pkt_size); + cb_patched_cur_length += new_pkt_size; + break; + + case PACKET_MSG_PROT: + dev_err(hdev->dev, + "User not allowed to use MSG_PROT\n"); + rc = -EPERM; + break; + + case PACKET_CP_DMA: + dev_err(hdev->dev, "User not allowed to use CP_DMA\n"); + rc = -EPERM; + break; + + case PACKET_STOP: + dev_err(hdev->dev, "User not allowed to use STOP\n"); + rc = -EPERM; + break; + + case PACKET_WREG_32: + case PACKET_WREG_BULK: + case PACKET_MSG_LONG: + case PACKET_MSG_SHORT: + case PACKET_REPEAT: + case PACKET_FENCE: + case PACKET_NOP: + case PACKET_ARB_POINT: + case PACKET_LOAD_AND_EXE: + memcpy(kernel_pkt, user_pkt, pkt_size); + cb_patched_cur_length += pkt_size; + break; + + default: + dev_err(hdev->dev, "Invalid packet header 0x%x\n", + pkt_id); + rc = -EINVAL; + break; + } + + if (rc) + break; + } + + return rc; +} + +static int gaudi_parse_cb_mmu(struct hl_device *hdev, + struct hl_cs_parser *parser) +{ + u64 patched_cb_handle; + u32 patched_cb_size; + struct hl_cb *user_cb; + int rc; + + /* + * The new CB should have space at the end for two MSG_PROT pkt: + * 1. A packet that will act as a completion packet + * 2. A packet that will generate MSI interrupt + */ + parser->patched_cb_size = parser->user_cb_size + + sizeof(struct packet_msg_prot) * 2; + + rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx, + parser->patched_cb_size, false, false, + &patched_cb_handle); + + if (rc) { + dev_err(hdev->dev, + "Failed to allocate patched CB for DMA CS %d\n", + rc); + return rc; + } + + patched_cb_handle >>= PAGE_SHIFT; + parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, + (u32) patched_cb_handle); + /* hl_cb_get should never fail here so use kernel WARN */ + WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n", + (u32) patched_cb_handle); + if (!parser->patched_cb) { + rc = -EFAULT; + goto out; + } + + /* + * The check that parser->user_cb_size <= parser->user_cb->size was done + * in validate_queue_index(). + */ + memcpy(parser->patched_cb->kernel_address, + parser->user_cb->kernel_address, + parser->user_cb_size); + + patched_cb_size = parser->patched_cb_size; + + /* Validate patched CB instead of user CB */ + user_cb = parser->user_cb; + parser->user_cb = parser->patched_cb; + rc = gaudi_validate_cb(hdev, parser, true); + parser->user_cb = user_cb; + + if (rc) { + hl_cb_put(parser->patched_cb); + goto out; + } + + if (patched_cb_size != parser->patched_cb_size) { + dev_err(hdev->dev, "user CB size mismatch\n"); + hl_cb_put(parser->patched_cb); + rc = -EINVAL; + goto out; + } + +out: + /* + * Always call cb destroy here because we still have 1 reference + * to it by calling cb_get earlier. After the job will be completed, + * cb_put will release it, but here we want to remove it from the + * idr + */ + hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, + patched_cb_handle << PAGE_SHIFT); + + return rc; +} + +static int gaudi_parse_cb_no_mmu(struct hl_device *hdev, + struct hl_cs_parser *parser) +{ + u64 patched_cb_handle; + int rc; + + rc = gaudi_validate_cb(hdev, parser, false); + + if (rc) + goto free_userptr; + + rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx, + parser->patched_cb_size, false, false, + &patched_cb_handle); + if (rc) { + dev_err(hdev->dev, + "Failed to allocate patched CB for DMA CS %d\n", rc); + goto free_userptr; + } + + patched_cb_handle >>= PAGE_SHIFT; + parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, + (u32) patched_cb_handle); + /* hl_cb_get should never fail here so use kernel WARN */ + WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n", + (u32) patched_cb_handle); + if (!parser->patched_cb) { + rc = -EFAULT; + goto out; + } + + rc = gaudi_patch_cb(hdev, parser); + + if (rc) + hl_cb_put(parser->patched_cb); + +out: + /* + * Always call cb destroy here because we still have 1 reference + * to it by calling cb_get earlier. After the job will be completed, + * cb_put will release it, but here we want to remove it from the + * idr + */ + hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, + patched_cb_handle << PAGE_SHIFT); + +free_userptr: + if (rc) + hl_userptr_delete_list(hdev, parser->job_userptr_list); + return rc; +} + +static int gaudi_parse_cb_no_ext_queue(struct hl_device *hdev, + struct hl_cs_parser *parser) +{ + struct asic_fixed_properties *asic_prop = &hdev->asic_prop; + + /* For internal queue jobs just check if CB address is valid */ + if (hl_mem_area_inside_range((u64) (uintptr_t) parser->user_cb, + parser->user_cb_size, + asic_prop->sram_user_base_address, + asic_prop->sram_end_address)) + return 0; + + if (hl_mem_area_inside_range((u64) (uintptr_t) parser->user_cb, + parser->user_cb_size, + asic_prop->dram_user_base_address, + asic_prop->dram_end_address)) + return 0; + + /* PMMU and HPMMU addresses are equal, check only one of them */ + if (hl_mem_area_inside_range((u64) (uintptr_t) parser->user_cb, + parser->user_cb_size, + asic_prop->pmmu.start_addr, + asic_prop->pmmu.end_addr)) + return 0; + + dev_err(hdev->dev, + "CB address 0x%px + 0x%x for internal QMAN is not valid\n", + parser->user_cb, parser->user_cb_size); + + return -EFAULT; +} + +static int gaudi_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + + if (parser->queue_type == QUEUE_TYPE_INT) + return gaudi_parse_cb_no_ext_queue(hdev, parser); + + if (gaudi->hw_cap_initialized & HW_CAP_MMU) + return gaudi_parse_cb_mmu(hdev, parser); + else + return gaudi_parse_cb_no_mmu(hdev, parser); +} + +static void gaudi_add_end_of_cb_packets(struct hl_device *hdev, + void *kernel_address, u32 len, + u64 cq_addr, u32 cq_val, u32 msi_vec, + bool eb) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + struct packet_msg_prot *cq_pkt; + u32 tmp; + + cq_pkt = kernel_address + len - (sizeof(struct packet_msg_prot) * 2); + + tmp = FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_PROT); + tmp |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1); + + if (eb) + tmp |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, 1); + + cq_pkt->ctl = cpu_to_le32(tmp); + cq_pkt->value = cpu_to_le32(cq_val); + cq_pkt->addr = cpu_to_le64(cq_addr); + + cq_pkt++; + + tmp = FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_PROT); + tmp |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1); + cq_pkt->ctl = cpu_to_le32(tmp); + cq_pkt->value = cpu_to_le32(1); + + if (!gaudi->multi_msi_mode) + msi_vec = 0; + + cq_pkt->addr = cpu_to_le64(CFG_BASE + mmPCIE_MSI_INTR_0 + msi_vec * 4); +} + +static void gaudi_update_eq_ci(struct hl_device *hdev, u32 val) +{ + WREG32(mmCPU_IF_EQ_RD_OFFS, val); +} + +static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr, + u32 size, u64 val) +{ + struct packet_lin_dma *lin_dma_pkt; + struct hl_cs_job *job; + u32 cb_size, ctl, err_cause; + struct hl_cb *cb; + int rc; + + cb = hl_cb_kernel_create(hdev, PAGE_SIZE, false); + if (!cb) + return -EFAULT; + + lin_dma_pkt = cb->kernel_address; + memset(lin_dma_pkt, 0, sizeof(*lin_dma_pkt)); + cb_size = sizeof(*lin_dma_pkt); + + ctl = FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_LIN_DMA); + ctl |= FIELD_PREP(GAUDI_PKT_LIN_DMA_CTL_MEMSET_MASK, 1); + ctl |= FIELD_PREP(GAUDI_PKT_LIN_DMA_CTL_LIN_MASK, 1); + ctl |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1); + ctl |= FIELD_PREP(GAUDI_PKT_CTL_RB_MASK, 1); + + lin_dma_pkt->ctl = cpu_to_le32(ctl); + lin_dma_pkt->src_addr = cpu_to_le64(val); + lin_dma_pkt->dst_addr |= cpu_to_le64(addr); + lin_dma_pkt->tsize = cpu_to_le32(size); + + job = hl_cs_allocate_job(hdev, QUEUE_TYPE_EXT, true); + if (!job) { + dev_err(hdev->dev, "Failed to allocate a new job\n"); + rc = -ENOMEM; + goto release_cb; + } + + /* Verify DMA is OK */ + err_cause = RREG32(mmDMA0_CORE_ERR_CAUSE); + if (err_cause && !hdev->init_done) { + dev_dbg(hdev->dev, + "Clearing DMA0 engine from errors (cause 0x%x)\n", + err_cause); + WREG32(mmDMA0_CORE_ERR_CAUSE, err_cause); + } + + job->id = 0; + job->user_cb = cb; + job->user_cb->cs_cnt++; + job->user_cb_size = cb_size; + job->hw_queue_id = GAUDI_QUEUE_ID_DMA_0_0; + job->patched_cb = job->user_cb; + job->job_cb_size = job->user_cb_size + sizeof(struct packet_msg_prot); + + hl_debugfs_add_job(hdev, job); + + rc = gaudi_send_job_on_qman0(hdev, job); + hl_debugfs_remove_job(hdev, job); + kfree(job); + cb->cs_cnt--; + + /* Verify DMA is OK */ + err_cause = RREG32(mmDMA0_CORE_ERR_CAUSE); + if (err_cause) { + dev_err(hdev->dev, "DMA Failed, cause 0x%x\n", err_cause); + rc = -EIO; + if (!hdev->init_done) { + dev_dbg(hdev->dev, + "Clearing DMA0 engine from errors (cause 0x%x)\n", + err_cause); + WREG32(mmDMA0_CORE_ERR_CAUSE, err_cause); + } + } + +release_cb: + hl_cb_put(cb); + hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT); + + return rc; +} + +static void gaudi_restore_sm_registers(struct hl_device *hdev) +{ + int i; + + for (i = 0 ; i < NUM_OF_SOB_IN_BLOCK << 2 ; i += 4) { + WREG32(mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0 + i, 0); + WREG32(mmSYNC_MNGR_E_S_SYNC_MNGR_OBJS_SOB_OBJ_0 + i, 0); + WREG32(mmSYNC_MNGR_W_N_SYNC_MNGR_OBJS_SOB_OBJ_0 + i, 0); + } + + for (i = 0 ; i < NUM_OF_MONITORS_IN_BLOCK << 2 ; i += 4) { + WREG32(mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_STATUS_0 + i, 0); + WREG32(mmSYNC_MNGR_E_S_SYNC_MNGR_OBJS_MON_STATUS_0 + i, 0); + WREG32(mmSYNC_MNGR_W_N_SYNC_MNGR_OBJS_MON_STATUS_0 + i, 0); + } + + i = GAUDI_FIRST_AVAILABLE_W_S_SYNC_OBJECT * 4; + + for (; i < NUM_OF_SOB_IN_BLOCK << 2 ; i += 4) + WREG32(mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 + i, 0); + + i = GAUDI_FIRST_AVAILABLE_W_S_MONITOR * 4; + + for (; i < NUM_OF_MONITORS_IN_BLOCK << 2 ; i += 4) + WREG32(mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_STATUS_0 + i, 0); +} + +static void gaudi_restore_dma_registers(struct hl_device *hdev) +{ + u32 sob_delta = mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_1 - + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0; + int i; + + for (i = 0 ; i < DMA_NUMBER_OF_CHANNELS ; i++) { + u64 sob_addr = CFG_BASE + + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0 + + (i * sob_delta); + u32 dma_offset = i * DMA_CORE_OFFSET; + + WREG32(mmDMA0_CORE_WR_COMP_ADDR_LO + dma_offset, + lower_32_bits(sob_addr)); + WREG32(mmDMA0_CORE_WR_COMP_ADDR_HI + dma_offset, + upper_32_bits(sob_addr)); + WREG32(mmDMA0_CORE_WR_COMP_WDATA + dma_offset, 0x80000001); + + /* For DMAs 2-7, need to restore WR_AWUSER_31_11 as it can be + * modified by the user for SRAM reduction + */ + if (i > 1) + WREG32(mmDMA0_CORE_WR_AWUSER_31_11 + dma_offset, + 0x00000001); + } +} + +static void gaudi_restore_qm_registers(struct hl_device *hdev) +{ + u32 qman_offset; + int i; + + for (i = 0 ; i < DMA_NUMBER_OF_CHANNELS ; i++) { + qman_offset = i * DMA_QMAN_OFFSET; + WREG32(mmDMA0_QM_ARB_CFG_0 + qman_offset, 0); + } + + for (i = 0 ; i < MME_NUMBER_OF_MASTER_ENGINES ; i++) { + qman_offset = i * (mmMME2_QM_BASE - mmMME0_QM_BASE); + WREG32(mmMME0_QM_ARB_CFG_0 + qman_offset, 0); + } + + for (i = 0 ; i < TPC_NUMBER_OF_ENGINES ; i++) { + qman_offset = i * TPC_QMAN_OFFSET; + WREG32(mmTPC0_QM_ARB_CFG_0 + qman_offset, 0); + } +} + +static void gaudi_restore_user_registers(struct hl_device *hdev) +{ + gaudi_restore_sm_registers(hdev); + gaudi_restore_dma_registers(hdev); + gaudi_restore_qm_registers(hdev); +} + +static int gaudi_context_switch(struct hl_device *hdev, u32 asid) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + u64 addr = prop->sram_user_base_address; + u32 size = hdev->pldm ? 0x10000 : + (prop->sram_size - SRAM_USER_BASE_OFFSET); + u64 val = 0x7777777777777777ull; + int rc; + + rc = gaudi_memset_device_memory(hdev, addr, size, val); + if (rc) { + dev_err(hdev->dev, "Failed to clear SRAM in context switch\n"); + return rc; + } + + gaudi_mmu_prepare(hdev, asid); + + gaudi_restore_user_registers(hdev); + + return 0; +} + +static int gaudi_mmu_clear_pgt_range(struct hl_device *hdev) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct gaudi_device *gaudi = hdev->asic_specific; + u64 addr = prop->mmu_pgt_addr; + u32 size = prop->mmu_pgt_size + MMU_CACHE_MNG_SIZE; + + if (!(gaudi->hw_cap_initialized & HW_CAP_MMU)) + return 0; + + return gaudi_memset_device_memory(hdev, addr, size, 0); +} + +static void gaudi_restore_phase_topology(struct hl_device *hdev) +{ + +} + +static int gaudi_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct gaudi_device *gaudi = hdev->asic_specific; + u64 hbm_bar_addr; + int rc = 0; + + if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) { + + if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) && + (hdev->clock_gating_mask & + GAUDI_CLK_GATE_DEBUGFS_MASK)) { + + dev_err_ratelimited(hdev->dev, + "Can't read register - clock gating is enabled!\n"); + rc = -EFAULT; + } else { + *val = RREG32(addr - CFG_BASE); + } + + } else if ((addr >= SRAM_BASE_ADDR) && + (addr < SRAM_BASE_ADDR + SRAM_BAR_SIZE)) { + *val = readl(hdev->pcie_bar[SRAM_BAR_ID] + + (addr - SRAM_BASE_ADDR)); + } else if (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size) { + u64 bar_base_addr = DRAM_PHYS_BASE + + (addr & ~(prop->dram_pci_bar_size - 0x1ull)); + + hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, bar_base_addr); + if (hbm_bar_addr != U64_MAX) { + *val = readl(hdev->pcie_bar[HBM_BAR_ID] + + (addr - bar_base_addr)); + + hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, + hbm_bar_addr); + } + if (hbm_bar_addr == U64_MAX) + rc = -EIO; + } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) { + *val = *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE); + } else { + rc = -EFAULT; + } + + return rc; +} + +static int gaudi_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct gaudi_device *gaudi = hdev->asic_specific; + u64 hbm_bar_addr; + int rc = 0; + + if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) { + + if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) && + (hdev->clock_gating_mask & + GAUDI_CLK_GATE_DEBUGFS_MASK)) { + + dev_err_ratelimited(hdev->dev, + "Can't write register - clock gating is enabled!\n"); + rc = -EFAULT; + } else { + WREG32(addr - CFG_BASE, val); + } + + } else if ((addr >= SRAM_BASE_ADDR) && + (addr < SRAM_BASE_ADDR + SRAM_BAR_SIZE)) { + writel(val, hdev->pcie_bar[SRAM_BAR_ID] + + (addr - SRAM_BASE_ADDR)); + } else if (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size) { + u64 bar_base_addr = DRAM_PHYS_BASE + + (addr & ~(prop->dram_pci_bar_size - 0x1ull)); + + hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, bar_base_addr); + if (hbm_bar_addr != U64_MAX) { + writel(val, hdev->pcie_bar[HBM_BAR_ID] + + (addr - bar_base_addr)); + + hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, + hbm_bar_addr); + } + if (hbm_bar_addr == U64_MAX) + rc = -EIO; + } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) { + *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE) = val; + } else { + rc = -EFAULT; + } + + return rc; +} + +static int gaudi_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct gaudi_device *gaudi = hdev->asic_specific; + u64 hbm_bar_addr; + int rc = 0; + + if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) { + + if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) && + (hdev->clock_gating_mask & + GAUDI_CLK_GATE_DEBUGFS_MASK)) { + + dev_err_ratelimited(hdev->dev, + "Can't read register - clock gating is enabled!\n"); + rc = -EFAULT; + } else { + u32 val_l = RREG32(addr - CFG_BASE); + u32 val_h = RREG32(addr + sizeof(u32) - CFG_BASE); + + *val = (((u64) val_h) << 32) | val_l; + } + + } else if ((addr >= SRAM_BASE_ADDR) && + (addr <= SRAM_BASE_ADDR + SRAM_BAR_SIZE - sizeof(u64))) { + *val = readq(hdev->pcie_bar[SRAM_BAR_ID] + + (addr - SRAM_BASE_ADDR)); + } else if (addr <= + DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64)) { + u64 bar_base_addr = DRAM_PHYS_BASE + + (addr & ~(prop->dram_pci_bar_size - 0x1ull)); + + hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, bar_base_addr); + if (hbm_bar_addr != U64_MAX) { + *val = readq(hdev->pcie_bar[HBM_BAR_ID] + + (addr - bar_base_addr)); + + hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, + hbm_bar_addr); + } + if (hbm_bar_addr == U64_MAX) + rc = -EIO; + } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) { + *val = *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE); + } else { + rc = -EFAULT; + } + + return rc; +} + +static int gaudi_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct gaudi_device *gaudi = hdev->asic_specific; + u64 hbm_bar_addr; + int rc = 0; + + if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) { + + if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) && + (hdev->clock_gating_mask & + GAUDI_CLK_GATE_DEBUGFS_MASK)) { + + dev_err_ratelimited(hdev->dev, + "Can't write register - clock gating is enabled!\n"); + rc = -EFAULT; + } else { + WREG32(addr - CFG_BASE, lower_32_bits(val)); + WREG32(addr + sizeof(u32) - CFG_BASE, + upper_32_bits(val)); + } + + } else if ((addr >= SRAM_BASE_ADDR) && + (addr <= SRAM_BASE_ADDR + SRAM_BAR_SIZE - sizeof(u64))) { + writeq(val, hdev->pcie_bar[SRAM_BAR_ID] + + (addr - SRAM_BASE_ADDR)); + } else if (addr <= + DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64)) { + u64 bar_base_addr = DRAM_PHYS_BASE + + (addr & ~(prop->dram_pci_bar_size - 0x1ull)); + + hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, bar_base_addr); + if (hbm_bar_addr != U64_MAX) { + writeq(val, hdev->pcie_bar[HBM_BAR_ID] + + (addr - bar_base_addr)); + + hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, + hbm_bar_addr); + } + if (hbm_bar_addr == U64_MAX) + rc = -EIO; + } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) { + *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE) = val; + } else { + rc = -EFAULT; + } + + return rc; +} + +static u64 gaudi_read_pte(struct hl_device *hdev, u64 addr) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + + if (hdev->hard_reset_pending) + return U64_MAX; + + return readq(hdev->pcie_bar[HBM_BAR_ID] + + (addr - gaudi->hbm_bar_cur_addr)); +} + +static void gaudi_write_pte(struct hl_device *hdev, u64 addr, u64 val) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + + if (hdev->hard_reset_pending) + return; + + writeq(val, hdev->pcie_bar[HBM_BAR_ID] + + (addr - gaudi->hbm_bar_cur_addr)); +} + +void gaudi_mmu_prepare_reg(struct hl_device *hdev, u64 reg, u32 asid) +{ + /* mask to zero the MMBP and ASID bits */ + WREG32_AND(reg, ~0x7FF); + WREG32_OR(reg, asid); +} + +static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + + if (!(gaudi->hw_cap_initialized & HW_CAP_MMU)) + return; + + if (asid & ~DMA0_QM_GLBL_NON_SECURE_PROPS_0_ASID_MASK) { + WARN(1, "asid %u is too big\n", asid); + return; + } + + mutex_lock(&gaudi->clk_gate_mutex); + + hdev->asic_funcs->disable_clock_gating(hdev); + + gaudi_mmu_prepare_reg(hdev, mmDMA0_QM_GLBL_NON_SECURE_PROPS_0, asid); + gaudi_mmu_prepare_reg(hdev, mmDMA0_QM_GLBL_NON_SECURE_PROPS_1, asid); + gaudi_mmu_prepare_reg(hdev, mmDMA0_QM_GLBL_NON_SECURE_PROPS_2, asid); + gaudi_mmu_prepare_reg(hdev, mmDMA0_QM_GLBL_NON_SECURE_PROPS_3, asid); + gaudi_mmu_prepare_reg(hdev, mmDMA0_QM_GLBL_NON_SECURE_PROPS_4, asid); + + gaudi_mmu_prepare_reg(hdev, mmDMA1_QM_GLBL_NON_SECURE_PROPS_0, asid); + gaudi_mmu_prepare_reg(hdev, mmDMA1_QM_GLBL_NON_SECURE_PROPS_1, asid); + gaudi_mmu_prepare_reg(hdev, mmDMA1_QM_GLBL_NON_SECURE_PROPS_2, asid); + gaudi_mmu_prepare_reg(hdev, mmDMA1_QM_GLBL_NON_SECURE_PROPS_3, asid); + gaudi_mmu_prepare_reg(hdev, mmDMA1_QM_GLBL_NON_SECURE_PROPS_4, asid); + + gaudi_mmu_prepare_reg(hdev, mmDMA2_QM_GLBL_NON_SECURE_PROPS_0, asid); + gaudi_mmu_prepare_reg(hdev, mmDMA2_QM_GLBL_NON_SECURE_PROPS_1, asid); + gaudi_mmu_prepare_reg(hdev, mmDMA2_QM_GLBL_NON_SECURE_PROPS_2, asid); + gaudi_mmu_prepare_reg(hdev, mmDMA2_QM_GLBL_NON_SECURE_PROPS_3, asid); + gaudi_mmu_prepare_reg(hdev, mmDMA2_QM_GLBL_NON_SECURE_PROPS_4, asid); + + gaudi_mmu_prepare_reg(hdev, mmDMA3_QM_GLBL_NON_SECURE_PROPS_0, asid); + gaudi_mmu_prepare_reg(hdev, mmDMA3_QM_GLBL_NON_SECURE_PROPS_1, asid); + gaudi_mmu_prepare_reg(hdev, mmDMA3_QM_GLBL_NON_SECURE_PROPS_2, asid); + gaudi_mmu_prepare_reg(hdev, mmDMA3_QM_GLBL_NON_SECURE_PROPS_3, asid); + gaudi_mmu_prepare_reg(hdev, mmDMA3_QM_GLBL_NON_SECURE_PROPS_4, asid); + + gaudi_mmu_prepare_reg(hdev, mmDMA4_QM_GLBL_NON_SECURE_PROPS_0, asid); + gaudi_mmu_prepare_reg(hdev, mmDMA4_QM_GLBL_NON_SECURE_PROPS_1, asid); + gaudi_mmu_prepare_reg(hdev, mmDMA4_QM_GLBL_NON_SECURE_PROPS_2, asid); + gaudi_mmu_prepare_reg(hdev, mmDMA4_QM_GLBL_NON_SECURE_PROPS_3, asid); + gaudi_mmu_prepare_reg(hdev, mmDMA4_QM_GLBL_NON_SECURE_PROPS_4, asid); + + gaudi_mmu_prepare_reg(hdev, mmDMA5_QM_GLBL_NON_SECURE_PROPS_0, asid); + gaudi_mmu_prepare_reg(hdev, mmDMA5_QM_GLBL_NON_SECURE_PROPS_1, asid); + gaudi_mmu_prepare_reg(hdev, mmDMA5_QM_GLBL_NON_SECURE_PROPS_2, asid); + gaudi_mmu_prepare_reg(hdev, mmDMA5_QM_GLBL_NON_SECURE_PROPS_3, asid); + gaudi_mmu_prepare_reg(hdev, mmDMA5_QM_GLBL_NON_SECURE_PROPS_4, asid); + + gaudi_mmu_prepare_reg(hdev, mmDMA6_QM_GLBL_NON_SECURE_PROPS_0, asid); + gaudi_mmu_prepare_reg(hdev, mmDMA6_QM_GLBL_NON_SECURE_PROPS_1, asid); + gaudi_mmu_prepare_reg(hdev, mmDMA6_QM_GLBL_NON_SECURE_PROPS_2, asid); + gaudi_mmu_prepare_reg(hdev, mmDMA6_QM_GLBL_NON_SECURE_PROPS_3, asid); + gaudi_mmu_prepare_reg(hdev, mmDMA6_QM_GLBL_NON_SECURE_PROPS_4, asid); + + gaudi_mmu_prepare_reg(hdev, mmDMA7_QM_GLBL_NON_SECURE_PROPS_0, asid); + gaudi_mmu_prepare_reg(hdev, mmDMA7_QM_GLBL_NON_SECURE_PROPS_1, asid); + gaudi_mmu_prepare_reg(hdev, mmDMA7_QM_GLBL_NON_SECURE_PROPS_2, asid); + gaudi_mmu_prepare_reg(hdev, mmDMA7_QM_GLBL_NON_SECURE_PROPS_3, asid); + gaudi_mmu_prepare_reg(hdev, mmDMA7_QM_GLBL_NON_SECURE_PROPS_4, asid); + + gaudi_mmu_prepare_reg(hdev, mmDMA0_CORE_NON_SECURE_PROPS, asid); + gaudi_mmu_prepare_reg(hdev, mmDMA1_CORE_NON_SECURE_PROPS, asid); + gaudi_mmu_prepare_reg(hdev, mmDMA2_CORE_NON_SECURE_PROPS, asid); + gaudi_mmu_prepare_reg(hdev, mmDMA3_CORE_NON_SECURE_PROPS, asid); + gaudi_mmu_prepare_reg(hdev, mmDMA4_CORE_NON_SECURE_PROPS, asid); + gaudi_mmu_prepare_reg(hdev, mmDMA5_CORE_NON_SECURE_PROPS, asid); + gaudi_mmu_prepare_reg(hdev, mmDMA6_CORE_NON_SECURE_PROPS, asid); + gaudi_mmu_prepare_reg(hdev, mmDMA7_CORE_NON_SECURE_PROPS, asid); + + gaudi_mmu_prepare_reg(hdev, mmTPC0_QM_GLBL_NON_SECURE_PROPS_0, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC0_QM_GLBL_NON_SECURE_PROPS_1, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC0_QM_GLBL_NON_SECURE_PROPS_2, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC0_QM_GLBL_NON_SECURE_PROPS_3, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC0_QM_GLBL_NON_SECURE_PROPS_4, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC0_CFG_ARUSER_LO, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC0_CFG_AWUSER_LO, asid); + + gaudi_mmu_prepare_reg(hdev, mmTPC1_QM_GLBL_NON_SECURE_PROPS_0, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC1_QM_GLBL_NON_SECURE_PROPS_1, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC1_QM_GLBL_NON_SECURE_PROPS_2, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC1_QM_GLBL_NON_SECURE_PROPS_3, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC1_QM_GLBL_NON_SECURE_PROPS_4, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC1_CFG_ARUSER_LO, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC1_CFG_AWUSER_LO, asid); + + gaudi_mmu_prepare_reg(hdev, mmTPC2_QM_GLBL_NON_SECURE_PROPS_0, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC2_QM_GLBL_NON_SECURE_PROPS_1, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC2_QM_GLBL_NON_SECURE_PROPS_2, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC2_QM_GLBL_NON_SECURE_PROPS_3, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC2_QM_GLBL_NON_SECURE_PROPS_4, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC2_CFG_ARUSER_LO, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC2_CFG_AWUSER_LO, asid); + + gaudi_mmu_prepare_reg(hdev, mmTPC3_QM_GLBL_NON_SECURE_PROPS_0, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC3_QM_GLBL_NON_SECURE_PROPS_1, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC3_QM_GLBL_NON_SECURE_PROPS_2, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC3_QM_GLBL_NON_SECURE_PROPS_3, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC3_QM_GLBL_NON_SECURE_PROPS_4, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC3_CFG_ARUSER_LO, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC3_CFG_AWUSER_LO, asid); + + gaudi_mmu_prepare_reg(hdev, mmTPC4_QM_GLBL_NON_SECURE_PROPS_0, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC4_QM_GLBL_NON_SECURE_PROPS_1, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC4_QM_GLBL_NON_SECURE_PROPS_2, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC4_QM_GLBL_NON_SECURE_PROPS_3, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC4_QM_GLBL_NON_SECURE_PROPS_4, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC4_CFG_ARUSER_LO, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC4_CFG_AWUSER_LO, asid); + + gaudi_mmu_prepare_reg(hdev, mmTPC5_QM_GLBL_NON_SECURE_PROPS_0, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC5_QM_GLBL_NON_SECURE_PROPS_1, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC5_QM_GLBL_NON_SECURE_PROPS_2, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC5_QM_GLBL_NON_SECURE_PROPS_3, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC5_QM_GLBL_NON_SECURE_PROPS_4, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC5_CFG_ARUSER_LO, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC5_CFG_AWUSER_LO, asid); + + gaudi_mmu_prepare_reg(hdev, mmTPC6_QM_GLBL_NON_SECURE_PROPS_0, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC6_QM_GLBL_NON_SECURE_PROPS_1, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC6_QM_GLBL_NON_SECURE_PROPS_2, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC6_QM_GLBL_NON_SECURE_PROPS_3, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC6_QM_GLBL_NON_SECURE_PROPS_4, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC6_CFG_ARUSER_LO, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC6_CFG_AWUSER_LO, asid); + + gaudi_mmu_prepare_reg(hdev, mmTPC7_QM_GLBL_NON_SECURE_PROPS_0, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC7_QM_GLBL_NON_SECURE_PROPS_1, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC7_QM_GLBL_NON_SECURE_PROPS_2, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC7_QM_GLBL_NON_SECURE_PROPS_3, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC7_QM_GLBL_NON_SECURE_PROPS_4, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC7_CFG_ARUSER_LO, asid); + gaudi_mmu_prepare_reg(hdev, mmTPC7_CFG_AWUSER_LO, asid); + + gaudi_mmu_prepare_reg(hdev, mmMME0_QM_GLBL_NON_SECURE_PROPS_0, asid); + gaudi_mmu_prepare_reg(hdev, mmMME0_QM_GLBL_NON_SECURE_PROPS_1, asid); + gaudi_mmu_prepare_reg(hdev, mmMME0_QM_GLBL_NON_SECURE_PROPS_2, asid); + gaudi_mmu_prepare_reg(hdev, mmMME0_QM_GLBL_NON_SECURE_PROPS_3, asid); + gaudi_mmu_prepare_reg(hdev, mmMME0_QM_GLBL_NON_SECURE_PROPS_4, asid); + gaudi_mmu_prepare_reg(hdev, mmMME2_QM_GLBL_NON_SECURE_PROPS_0, asid); + gaudi_mmu_prepare_reg(hdev, mmMME2_QM_GLBL_NON_SECURE_PROPS_1, asid); + gaudi_mmu_prepare_reg(hdev, mmMME2_QM_GLBL_NON_SECURE_PROPS_2, asid); + gaudi_mmu_prepare_reg(hdev, mmMME2_QM_GLBL_NON_SECURE_PROPS_3, asid); + gaudi_mmu_prepare_reg(hdev, mmMME2_QM_GLBL_NON_SECURE_PROPS_4, asid); + + gaudi_mmu_prepare_reg(hdev, mmMME0_SBAB_ARUSER0, asid); + gaudi_mmu_prepare_reg(hdev, mmMME0_SBAB_ARUSER1, asid); + gaudi_mmu_prepare_reg(hdev, mmMME1_SBAB_ARUSER0, asid); + gaudi_mmu_prepare_reg(hdev, mmMME1_SBAB_ARUSER1, asid); + gaudi_mmu_prepare_reg(hdev, mmMME2_SBAB_ARUSER0, asid); + gaudi_mmu_prepare_reg(hdev, mmMME2_SBAB_ARUSER1, asid); + gaudi_mmu_prepare_reg(hdev, mmMME3_SBAB_ARUSER0, asid); + gaudi_mmu_prepare_reg(hdev, mmMME3_SBAB_ARUSER1, asid); + gaudi_mmu_prepare_reg(hdev, mmMME0_ACC_WBC, asid); + gaudi_mmu_prepare_reg(hdev, mmMME1_ACC_WBC, asid); + gaudi_mmu_prepare_reg(hdev, mmMME2_ACC_WBC, asid); + gaudi_mmu_prepare_reg(hdev, mmMME3_ACC_WBC, asid); + + hdev->asic_funcs->set_clock_gating(hdev); + + mutex_unlock(&gaudi->clk_gate_mutex); +} + +static int gaudi_send_job_on_qman0(struct hl_device *hdev, + struct hl_cs_job *job) +{ + struct packet_msg_prot *fence_pkt; + u32 *fence_ptr; + dma_addr_t fence_dma_addr; + struct hl_cb *cb; + u32 tmp, timeout, dma_offset; + int rc; + + if (hdev->pldm) + timeout = GAUDI_PLDM_QMAN0_TIMEOUT_USEC; + else + timeout = HL_DEVICE_TIMEOUT_USEC; + + if (!hdev->asic_funcs->is_device_idle(hdev, NULL, NULL)) { + dev_err_ratelimited(hdev->dev, + "Can't send driver job on QMAN0 because the device is not idle\n"); + return -EBUSY; + } + + fence_ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL, + &fence_dma_addr); + if (!fence_ptr) { + dev_err(hdev->dev, + "Failed to allocate fence memory for QMAN0\n"); + return -ENOMEM; + } + + cb = job->patched_cb; + + fence_pkt = cb->kernel_address + + job->job_cb_size - sizeof(struct packet_msg_prot); + + tmp = FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_PROT); + tmp |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, 1); + tmp |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1); + + fence_pkt->ctl = cpu_to_le32(tmp); + fence_pkt->value = cpu_to_le32(GAUDI_QMAN0_FENCE_VAL); + fence_pkt->addr = cpu_to_le64(fence_dma_addr); + + dma_offset = gaudi_dma_assignment[GAUDI_PCI_DMA_1] * DMA_CORE_OFFSET; + + WREG32_OR(mmDMA0_CORE_PROT + dma_offset, BIT(DMA0_CORE_PROT_VAL_SHIFT)); + + rc = hl_hw_queue_send_cb_no_cmpl(hdev, GAUDI_QUEUE_ID_DMA_0_0, + job->job_cb_size, cb->bus_address); + if (rc) { + dev_err(hdev->dev, "Failed to send CB on QMAN0, %d\n", rc); + goto free_fence_ptr; + } + + rc = hl_poll_timeout_memory(hdev, fence_ptr, tmp, + (tmp == GAUDI_QMAN0_FENCE_VAL), 1000, + timeout, true); + + hl_hw_queue_inc_ci_kernel(hdev, GAUDI_QUEUE_ID_DMA_0_0); + + if (rc == -ETIMEDOUT) { + dev_err(hdev->dev, "QMAN0 Job timeout (0x%x)\n", tmp); + goto free_fence_ptr; + } + +free_fence_ptr: + WREG32_AND(mmDMA0_CORE_PROT + dma_offset, + ~BIT(DMA0_CORE_PROT_VAL_SHIFT)); + + hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_ptr, + fence_dma_addr); + return rc; +} + +static void gaudi_get_event_desc(u16 event_type, char *desc, size_t size) +{ + if (event_type >= GAUDI_EVENT_SIZE) + goto event_not_supported; + + if (!gaudi_irq_map_table[event_type].valid) + goto event_not_supported; + + snprintf(desc, size, gaudi_irq_map_table[event_type].name); + + return; + +event_not_supported: + snprintf(desc, size, "N/A"); +} + +static const char *gaudi_get_razwi_initiator_dma_name(struct hl_device *hdev, + u32 x_y, bool is_write) +{ + u32 dma_id[2], dma_offset, err_cause[2], mask, i; + + mask = is_write ? DMA0_CORE_ERR_CAUSE_HBW_WR_ERR_MASK : + DMA0_CORE_ERR_CAUSE_HBW_RD_ERR_MASK; + + switch (x_y) { + case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_0: + case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_1: + dma_id[0] = 0; + dma_id[1] = 2; + break; + case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_0: + case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_1: + dma_id[0] = 1; + dma_id[1] = 3; + break; + case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_0: + case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_1: + dma_id[0] = 4; + dma_id[1] = 6; + break; + case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_0: + case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_1: + dma_id[0] = 5; + dma_id[1] = 7; + break; + default: + goto unknown_initiator; + } + + for (i = 0 ; i < 2 ; i++) { + dma_offset = dma_id[i] * DMA_CORE_OFFSET; + err_cause[i] = RREG32(mmDMA0_CORE_ERR_CAUSE + dma_offset); + } + + switch (x_y) { + case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_0: + case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_1: + if ((err_cause[0] & mask) && !(err_cause[1] & mask)) + return "DMA0"; + else if (!(err_cause[0] & mask) && (err_cause[1] & mask)) + return "DMA2"; + else + return "DMA0 or DMA2"; + case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_0: + case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_1: + if ((err_cause[0] & mask) && !(err_cause[1] & mask)) + return "DMA1"; + else if (!(err_cause[0] & mask) && (err_cause[1] & mask)) + return "DMA3"; + else + return "DMA1 or DMA3"; + case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_0: + case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_1: + if ((err_cause[0] & mask) && !(err_cause[1] & mask)) + return "DMA4"; + else if (!(err_cause[0] & mask) && (err_cause[1] & mask)) + return "DMA6"; + else + return "DMA4 or DMA6"; + case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_0: + case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_1: + if ((err_cause[0] & mask) && !(err_cause[1] & mask)) + return "DMA5"; + else if (!(err_cause[0] & mask) && (err_cause[1] & mask)) + return "DMA7"; + else + return "DMA5 or DMA7"; + } + +unknown_initiator: + return "unknown initiator"; +} + +static const char *gaudi_get_razwi_initiator_name(struct hl_device *hdev, + bool is_write) +{ + u32 val, x_y, axi_id; + + val = is_write ? RREG32(mmMMU_UP_RAZWI_WRITE_ID) : + RREG32(mmMMU_UP_RAZWI_READ_ID); + x_y = val & ((RAZWI_INITIATOR_Y_MASK << RAZWI_INITIATOR_Y_SHIFT) | + (RAZWI_INITIATOR_X_MASK << RAZWI_INITIATOR_X_SHIFT)); + axi_id = val & (RAZWI_INITIATOR_AXI_ID_MASK << + RAZWI_INITIATOR_AXI_ID_SHIFT); + + switch (x_y) { + case RAZWI_INITIATOR_ID_X_Y_TPC0_NIC0: + if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC)) + return "TPC0"; + if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC)) + return "NIC0"; + break; + case RAZWI_INITIATOR_ID_X_Y_TPC1: + return "TPC1"; + case RAZWI_INITIATOR_ID_X_Y_MME0_0: + case RAZWI_INITIATOR_ID_X_Y_MME0_1: + return "MME0"; + case RAZWI_INITIATOR_ID_X_Y_MME1_0: + case RAZWI_INITIATOR_ID_X_Y_MME1_1: + return "MME1"; + case RAZWI_INITIATOR_ID_X_Y_TPC2: + return "TPC2"; + case RAZWI_INITIATOR_ID_X_Y_TPC3_PCI_CPU_PSOC: + if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC)) + return "TPC3"; + if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_PCI)) + return "PCI"; + if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_CPU)) + return "CPU"; + if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_PSOC)) + return "PSOC"; + break; + case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_0: + case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_1: + case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_0: + case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_1: + case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_0: + case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_1: + case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_0: + case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_1: + return gaudi_get_razwi_initiator_dma_name(hdev, x_y, is_write); + case RAZWI_INITIATOR_ID_X_Y_TPC4_NIC1_NIC2: + if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC)) + return "TPC4"; + if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC)) + return "NIC1"; + if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC_FT)) + return "NIC2"; + break; + case RAZWI_INITIATOR_ID_X_Y_TPC5: + return "TPC5"; + case RAZWI_INITIATOR_ID_X_Y_MME2_0: + case RAZWI_INITIATOR_ID_X_Y_MME2_1: + return "MME2"; + case RAZWI_INITIATOR_ID_X_Y_MME3_0: + case RAZWI_INITIATOR_ID_X_Y_MME3_1: + return "MME3"; + case RAZWI_INITIATOR_ID_X_Y_TPC6: + return "TPC6"; + case RAZWI_INITIATOR_ID_X_Y_TPC7_NIC4_NIC5: + if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC)) + return "TPC7"; + if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC)) + return "NIC4"; + if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC_FT)) + return "NIC5"; + break; + default: + break; + } + + dev_err(hdev->dev, + "Unknown RAZWI initiator ID 0x%x [Y=%d, X=%d, AXI_ID=%d]\n", + val, + (val >> RAZWI_INITIATOR_Y_SHIFT) & RAZWI_INITIATOR_Y_MASK, + (val >> RAZWI_INITIATOR_X_SHIFT) & RAZWI_INITIATOR_X_MASK, + (val >> RAZWI_INITIATOR_AXI_ID_SHIFT) & + RAZWI_INITIATOR_AXI_ID_MASK); + + return "unknown initiator"; +} + +static void gaudi_print_razwi_info(struct hl_device *hdev) +{ + if (RREG32(mmMMU_UP_RAZWI_WRITE_VLD)) { + dev_err_ratelimited(hdev->dev, + "RAZWI event caused by illegal write of %s\n", + gaudi_get_razwi_initiator_name(hdev, true)); + WREG32(mmMMU_UP_RAZWI_WRITE_VLD, 0); + } + + if (RREG32(mmMMU_UP_RAZWI_READ_VLD)) { + dev_err_ratelimited(hdev->dev, + "RAZWI event caused by illegal read of %s\n", + gaudi_get_razwi_initiator_name(hdev, false)); + WREG32(mmMMU_UP_RAZWI_READ_VLD, 0); + } +} + +static void gaudi_print_mmu_error_info(struct hl_device *hdev) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + u64 addr; + u32 val; + + if (!(gaudi->hw_cap_initialized & HW_CAP_MMU)) + return; + + val = RREG32(mmMMU_UP_PAGE_ERROR_CAPTURE); + if (val & MMU_UP_PAGE_ERROR_CAPTURE_ENTRY_VALID_MASK) { + addr = val & MMU_UP_PAGE_ERROR_CAPTURE_VA_49_32_MASK; + addr <<= 32; + addr |= RREG32(mmMMU_UP_PAGE_ERROR_CAPTURE_VA); + + dev_err_ratelimited(hdev->dev, "MMU page fault on va 0x%llx\n", + addr); + + WREG32(mmMMU_UP_PAGE_ERROR_CAPTURE, 0); + } + + val = RREG32(mmMMU_UP_ACCESS_ERROR_CAPTURE); + if (val & MMU_UP_ACCESS_ERROR_CAPTURE_ENTRY_VALID_MASK) { + addr = val & MMU_UP_ACCESS_ERROR_CAPTURE_VA_49_32_MASK; + addr <<= 32; + addr |= RREG32(mmMMU_UP_ACCESS_ERROR_CAPTURE_VA); + + dev_err_ratelimited(hdev->dev, + "MMU access error on va 0x%llx\n", addr); + + WREG32(mmMMU_UP_ACCESS_ERROR_CAPTURE, 0); + } +} + +/* + * +-------------------+------------------------------------------------------+ + * | Configuration Reg | Description | + * | Address | | + * +-------------------+------------------------------------------------------+ + * | 0xF30 - 0xF3F |ECC single error indication (1 bit per memory wrapper)| + * | |0xF30 memory wrappers 31:0 (MSB to LSB) | + * | |0xF34 memory wrappers 63:32 | + * | |0xF38 memory wrappers 95:64 | + * | |0xF3C memory wrappers 127:96 | + * +-------------------+------------------------------------------------------+ + * | 0xF40 - 0xF4F |ECC double error indication (1 bit per memory wrapper)| + * | |0xF40 memory wrappers 31:0 (MSB to LSB) | + * | |0xF44 memory wrappers 63:32 | + * | |0xF48 memory wrappers 95:64 | + * | |0xF4C memory wrappers 127:96 | + * +-------------------+------------------------------------------------------+ + */ +static int gaudi_extract_ecc_info(struct hl_device *hdev, + struct ecc_info_extract_params *params, u64 *ecc_address, + u64 *ecc_syndrom, u8 *memory_wrapper_idx) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + u32 i, num_mem_regs, reg, err_bit; + u64 err_addr, err_word = 0; + int rc = 0; + + num_mem_regs = params->num_memories / 32 + + ((params->num_memories % 32) ? 1 : 0); + + if (params->block_address >= CFG_BASE) + params->block_address -= CFG_BASE; + + if (params->derr) + err_addr = params->block_address + GAUDI_ECC_DERR0_OFFSET; + else + err_addr = params->block_address + GAUDI_ECC_SERR0_OFFSET; + + if (params->disable_clock_gating) { + mutex_lock(&gaudi->clk_gate_mutex); + hdev->asic_funcs->disable_clock_gating(hdev); + } + + /* Set invalid wrapper index */ + *memory_wrapper_idx = 0xFF; + + /* Iterate through memory wrappers, a single bit must be set */ + for (i = 0 ; i < num_mem_regs ; i++) { + err_addr += i * 4; + err_word = RREG32(err_addr); + if (err_word) { + err_bit = __ffs(err_word); + *memory_wrapper_idx = err_bit + (32 * i); + break; + } + } + + if (*memory_wrapper_idx == 0xFF) { + dev_err(hdev->dev, "ECC error information cannot be found\n"); + rc = -EINVAL; + goto enable_clk_gate; + } + + WREG32(params->block_address + GAUDI_ECC_MEM_SEL_OFFSET, + *memory_wrapper_idx); + + *ecc_address = + RREG32(params->block_address + GAUDI_ECC_ADDRESS_OFFSET); + *ecc_syndrom = + RREG32(params->block_address + GAUDI_ECC_SYNDROME_OFFSET); + + /* Clear error indication */ + reg = RREG32(params->block_address + GAUDI_ECC_MEM_INFO_CLR_OFFSET); + if (params->derr) + reg |= FIELD_PREP(GAUDI_ECC_MEM_INFO_CLR_DERR_MASK, 1); + else + reg |= FIELD_PREP(GAUDI_ECC_MEM_INFO_CLR_SERR_MASK, 1); + + WREG32(params->block_address + GAUDI_ECC_MEM_INFO_CLR_OFFSET, reg); + +enable_clk_gate: + if (params->disable_clock_gating) { + hdev->asic_funcs->set_clock_gating(hdev); + + mutex_unlock(&gaudi->clk_gate_mutex); + } + + return rc; +} + +static void gaudi_handle_qman_err_generic(struct hl_device *hdev, + const char *qm_name, + u64 glbl_sts_addr, + u64 arb_err_addr) +{ + u32 i, j, glbl_sts_val, arb_err_val, glbl_sts_clr_val; + char reg_desc[32]; + + /* Iterate through all stream GLBL_STS1 registers + Lower CP */ + for (i = 0 ; i < QMAN_STREAMS + 1 ; i++) { + glbl_sts_clr_val = 0; + glbl_sts_val = RREG32(glbl_sts_addr + 4 * i); + + if (!glbl_sts_val) + continue; + + if (i == QMAN_STREAMS) + snprintf(reg_desc, ARRAY_SIZE(reg_desc), "LowerCP"); + else + snprintf(reg_desc, ARRAY_SIZE(reg_desc), "stream%u", i); + + for (j = 0 ; j < GAUDI_NUM_OF_QM_ERR_CAUSE ; j++) { + if (glbl_sts_val & BIT(j)) { + dev_err_ratelimited(hdev->dev, + "%s %s. err cause: %s\n", + qm_name, reg_desc, + gaudi_qman_error_cause[j]); + glbl_sts_clr_val |= BIT(j); + } + } + + /* Write 1 clear errors */ + WREG32(glbl_sts_addr + 4 * i, glbl_sts_clr_val); + } + + arb_err_val = RREG32(arb_err_addr); + + if (!arb_err_val) + return; + + for (j = 0 ; j < GAUDI_NUM_OF_QM_ARB_ERR_CAUSE ; j++) { + if (arb_err_val & BIT(j)) { + dev_err_ratelimited(hdev->dev, + "%s ARB_ERR. err cause: %s\n", + qm_name, + gaudi_qman_arb_error_cause[j]); + } + } +} + +static void gaudi_handle_ecc_event(struct hl_device *hdev, u16 event_type, + struct hl_eq_ecc_data *ecc_data) +{ + struct ecc_info_extract_params params; + u64 ecc_address = 0, ecc_syndrom = 0; + u8 index, memory_wrapper_idx = 0; + bool extract_info_from_fw; + int rc; + + switch (event_type) { + case GAUDI_EVENT_PCIE_CORE_SERR ... GAUDI_EVENT_PCIE_PHY_DERR: + case GAUDI_EVENT_DMA0_SERR_ECC ... GAUDI_EVENT_MMU_DERR: + extract_info_from_fw = true; + break; + case GAUDI_EVENT_TPC0_SERR ... GAUDI_EVENT_TPC7_SERR: + index = event_type - GAUDI_EVENT_TPC0_SERR; + params.block_address = mmTPC0_CFG_BASE + index * TPC_CFG_OFFSET; + params.num_memories = 90; + params.derr = false; + params.disable_clock_gating = true; + extract_info_from_fw = false; + break; + case GAUDI_EVENT_TPC0_DERR ... GAUDI_EVENT_TPC7_DERR: + index = event_type - GAUDI_EVENT_TPC0_DERR; + params.block_address = + mmTPC0_CFG_BASE + index * TPC_CFG_OFFSET; + params.num_memories = 90; + params.derr = true; + params.disable_clock_gating = true; + extract_info_from_fw = false; + break; + case GAUDI_EVENT_MME0_ACC_SERR: + case GAUDI_EVENT_MME1_ACC_SERR: + case GAUDI_EVENT_MME2_ACC_SERR: + case GAUDI_EVENT_MME3_ACC_SERR: + index = (event_type - GAUDI_EVENT_MME0_ACC_SERR) / 4; + params.block_address = mmMME0_ACC_BASE + index * MME_ACC_OFFSET; + params.num_memories = 128; + params.derr = false; + params.disable_clock_gating = true; + extract_info_from_fw = false; + break; + case GAUDI_EVENT_MME0_ACC_DERR: + case GAUDI_EVENT_MME1_ACC_DERR: + case GAUDI_EVENT_MME2_ACC_DERR: + case GAUDI_EVENT_MME3_ACC_DERR: + index = (event_type - GAUDI_EVENT_MME0_ACC_DERR) / 4; + params.block_address = mmMME0_ACC_BASE + index * MME_ACC_OFFSET; + params.num_memories = 128; + params.derr = true; + params.disable_clock_gating = true; + extract_info_from_fw = false; + break; + case GAUDI_EVENT_MME0_SBAB_SERR: + case GAUDI_EVENT_MME1_SBAB_SERR: + case GAUDI_EVENT_MME2_SBAB_SERR: + case GAUDI_EVENT_MME3_SBAB_SERR: + index = (event_type - GAUDI_EVENT_MME0_SBAB_SERR) / 4; + params.block_address = + mmMME0_SBAB_BASE + index * MME_ACC_OFFSET; + params.num_memories = 33; + params.derr = false; + params.disable_clock_gating = true; + extract_info_from_fw = false; + break; + case GAUDI_EVENT_MME0_SBAB_DERR: + case GAUDI_EVENT_MME1_SBAB_DERR: + case GAUDI_EVENT_MME2_SBAB_DERR: + case GAUDI_EVENT_MME3_SBAB_DERR: + index = (event_type - GAUDI_EVENT_MME0_SBAB_DERR) / 4; + params.block_address = + mmMME0_SBAB_BASE + index * MME_ACC_OFFSET; + params.num_memories = 33; + params.derr = true; + params.disable_clock_gating = true; + extract_info_from_fw = false; + break; + default: + return; + } + + if (extract_info_from_fw) { + ecc_address = le64_to_cpu(ecc_data->ecc_address); + ecc_syndrom = le64_to_cpu(ecc_data->ecc_syndrom); + memory_wrapper_idx = ecc_data->memory_wrapper_idx; + } else { + rc = gaudi_extract_ecc_info(hdev, ¶ms, &ecc_address, + &ecc_syndrom, &memory_wrapper_idx); + if (rc) + return; + } + + dev_err(hdev->dev, + "ECC error detected. address: %#llx. Syndrom: %#llx. block id %u\n", + ecc_address, ecc_syndrom, memory_wrapper_idx); +} + +static void gaudi_handle_qman_err(struct hl_device *hdev, u16 event_type) +{ + u64 glbl_sts_addr, arb_err_addr; + u8 index; + char desc[32]; + + switch (event_type) { + case GAUDI_EVENT_TPC0_QM ... GAUDI_EVENT_TPC7_QM: + index = event_type - GAUDI_EVENT_TPC0_QM; + glbl_sts_addr = + mmTPC0_QM_GLBL_STS1_0 + index * TPC_QMAN_OFFSET; + arb_err_addr = + mmTPC0_QM_ARB_ERR_CAUSE + index * TPC_QMAN_OFFSET; + snprintf(desc, ARRAY_SIZE(desc), "%s%d", "TPC_QM", index); + break; + case GAUDI_EVENT_MME0_QM ... GAUDI_EVENT_MME2_QM: + index = event_type - GAUDI_EVENT_MME0_QM; + glbl_sts_addr = + mmMME0_QM_GLBL_STS1_0 + index * MME_QMAN_OFFSET; + arb_err_addr = + mmMME0_QM_ARB_ERR_CAUSE + index * MME_QMAN_OFFSET; + snprintf(desc, ARRAY_SIZE(desc), "%s%d", "MME_QM", index); + break; + case GAUDI_EVENT_DMA0_QM ... GAUDI_EVENT_DMA7_QM: + index = event_type - GAUDI_EVENT_DMA0_QM; + glbl_sts_addr = + mmDMA0_QM_GLBL_STS1_0 + index * DMA_QMAN_OFFSET; + arb_err_addr = + mmDMA0_QM_ARB_ERR_CAUSE + index * DMA_QMAN_OFFSET; + snprintf(desc, ARRAY_SIZE(desc), "%s%d", "DMA_QM", index); + break; + default: + return; + } + + gaudi_handle_qman_err_generic(hdev, desc, glbl_sts_addr, arb_err_addr); +} + +static void gaudi_print_irq_info(struct hl_device *hdev, u16 event_type, + bool razwi) +{ + char desc[64] = ""; + + gaudi_get_event_desc(event_type, desc, sizeof(desc)); + dev_err_ratelimited(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n", + event_type, desc); + + if (razwi) { + gaudi_print_razwi_info(hdev); + gaudi_print_mmu_error_info(hdev); + } +} + +static int gaudi_soft_reset_late_init(struct hl_device *hdev) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + + /* Unmask all IRQs since some could have been received + * during the soft reset + */ + return hl_fw_unmask_irq_arr(hdev, gaudi->events, sizeof(gaudi->events)); +} + +static int gaudi_hbm_read_interrupts(struct hl_device *hdev, int device) +{ + int ch, err = 0; + u32 base, val, val2; + + base = GAUDI_HBM_CFG_BASE + device * GAUDI_HBM_CFG_OFFSET; + for (ch = 0 ; ch < GAUDI_HBM_CHANNELS ; ch++) { + val = RREG32_MASK(base + ch * 0x1000 + 0x06C, 0x0000FFFF); + val = (val & 0xFF) | ((val >> 8) & 0xFF); + if (val) { + err = 1; + dev_err(hdev->dev, + "HBM%d pc%d interrupts info: WR_PAR=%d, RD_PAR=%d, CA_PAR=%d, SERR=%d, DERR=%d\n", + device, ch * 2, val & 0x1, (val >> 1) & 0x1, + (val >> 2) & 0x1, (val >> 3) & 0x1, + (val >> 4) & 0x1); + + val2 = RREG32(base + ch * 0x1000 + 0x060); + dev_err(hdev->dev, + "HBM%d pc%d ECC info: 1ST_ERR_ADDR=0x%x, 1ST_ERR_TYPE=%d, SEC_CONT_CNT=%d, SEC_CNT=%d, DED_CNT=%d\n", + device, ch * 2, + RREG32(base + ch * 0x1000 + 0x064), + (val2 & 0x200) >> 9, (val2 & 0xFC00) >> 10, + (val2 & 0xFF0000) >> 16, + (val2 & 0xFF000000) >> 24); + } + + val = RREG32_MASK(base + ch * 0x1000 + 0x07C, 0x0000FFFF); + val = (val & 0xFF) | ((val >> 8) & 0xFF); + if (val) { + err = 1; + dev_err(hdev->dev, + "HBM%d pc%d interrupts info: WR_PAR=%d, RD_PAR=%d, CA_PAR=%d, SERR=%d, DERR=%d\n", + device, ch * 2 + 1, val & 0x1, (val >> 1) & 0x1, + (val >> 2) & 0x1, (val >> 3) & 0x1, + (val >> 4) & 0x1); + + val2 = RREG32(base + ch * 0x1000 + 0x070); + dev_err(hdev->dev, + "HBM%d pc%d ECC info: 1ST_ERR_ADDR=0x%x, 1ST_ERR_TYPE=%d, SEC_CONT_CNT=%d, SEC_CNT=%d, DED_CNT=%d\n", + device, ch * 2 + 1, + RREG32(base + ch * 0x1000 + 0x074), + (val2 & 0x200) >> 9, (val2 & 0xFC00) >> 10, + (val2 & 0xFF0000) >> 16, + (val2 & 0xFF000000) >> 24); + } + + /* Clear interrupts */ + RMWREG32(base + (ch * 0x1000) + 0x060, 0x1C8, 0x1FF); + RMWREG32(base + (ch * 0x1000) + 0x070, 0x1C8, 0x1FF); + WREG32(base + (ch * 0x1000) + 0x06C, 0x1F1F); + WREG32(base + (ch * 0x1000) + 0x07C, 0x1F1F); + RMWREG32(base + (ch * 0x1000) + 0x060, 0x0, 0xF); + RMWREG32(base + (ch * 0x1000) + 0x070, 0x0, 0xF); + } + + val = RREG32(base + 0x8F30); + val2 = RREG32(base + 0x8F34); + if (val | val2) { + err = 1; + dev_err(hdev->dev, + "HBM %d MC SRAM SERR info: Reg 0x8F30=0x%x, Reg 0x8F34=0x%x\n", + device, val, val2); + } + val = RREG32(base + 0x8F40); + val2 = RREG32(base + 0x8F44); + if (val | val2) { + err = 1; + dev_err(hdev->dev, + "HBM %d MC SRAM DERR info: Reg 0x8F40=0x%x, Reg 0x8F44=0x%x\n", + device, val, val2); + } + + return err; +} + +static int gaudi_hbm_event_to_dev(u16 hbm_event_type) +{ + switch (hbm_event_type) { + case GAUDI_EVENT_HBM0_SPI_0: + case GAUDI_EVENT_HBM0_SPI_1: + return 0; + case GAUDI_EVENT_HBM1_SPI_0: + case GAUDI_EVENT_HBM1_SPI_1: + return 1; + case GAUDI_EVENT_HBM2_SPI_0: + case GAUDI_EVENT_HBM2_SPI_1: + return 2; + case GAUDI_EVENT_HBM3_SPI_0: + case GAUDI_EVENT_HBM3_SPI_1: + return 3; + default: + break; + } + + /* Should never happen */ + return 0; +} + +static bool gaudi_tpc_read_interrupts(struct hl_device *hdev, u8 tpc_id, + char *interrupt_name) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + u32 tpc_offset = tpc_id * TPC_CFG_OFFSET, tpc_interrupts_cause, i; + bool soft_reset_required = false; + + /* Accessing the TPC_INTR_CAUSE registers requires disabling the clock + * gating, and thus cannot be done in CPU-CP and should be done instead + * by the driver. + */ + + mutex_lock(&gaudi->clk_gate_mutex); + + hdev->asic_funcs->disable_clock_gating(hdev); + + tpc_interrupts_cause = RREG32(mmTPC0_CFG_TPC_INTR_CAUSE + tpc_offset) & + TPC0_CFG_TPC_INTR_CAUSE_CAUSE_MASK; + + for (i = 0 ; i < GAUDI_NUM_OF_TPC_INTR_CAUSE ; i++) + if (tpc_interrupts_cause & BIT(i)) { + dev_err_ratelimited(hdev->dev, + "TPC%d_%s interrupt cause: %s\n", + tpc_id, interrupt_name, + gaudi_tpc_interrupts_cause[i]); + /* If this is QM error, we need to soft-reset */ + if (i == 15) + soft_reset_required = true; + } + + /* Clear interrupts */ + WREG32(mmTPC0_CFG_TPC_INTR_CAUSE + tpc_offset, 0); + + hdev->asic_funcs->set_clock_gating(hdev); + + mutex_unlock(&gaudi->clk_gate_mutex); + + return soft_reset_required; +} + +static int tpc_dec_event_to_tpc_id(u16 tpc_dec_event_type) +{ + return (tpc_dec_event_type - GAUDI_EVENT_TPC0_DEC) >> 1; +} + +static int tpc_krn_event_to_tpc_id(u16 tpc_dec_event_type) +{ + return (tpc_dec_event_type - GAUDI_EVENT_TPC0_KRN_ERR) / 6; +} + +static void gaudi_print_clk_change_info(struct hl_device *hdev, + u16 event_type) +{ + switch (event_type) { + case GAUDI_EVENT_FIX_POWER_ENV_S: + hdev->clk_throttling_reason |= HL_CLK_THROTTLE_POWER; + dev_info_ratelimited(hdev->dev, + "Clock throttling due to power consumption\n"); + break; + + case GAUDI_EVENT_FIX_POWER_ENV_E: + hdev->clk_throttling_reason &= ~HL_CLK_THROTTLE_POWER; + dev_info_ratelimited(hdev->dev, + "Power envelop is safe, back to optimal clock\n"); + break; + + case GAUDI_EVENT_FIX_THERMAL_ENV_S: + hdev->clk_throttling_reason |= HL_CLK_THROTTLE_THERMAL; + dev_info_ratelimited(hdev->dev, + "Clock throttling due to overheating\n"); + break; + + case GAUDI_EVENT_FIX_THERMAL_ENV_E: + hdev->clk_throttling_reason &= ~HL_CLK_THROTTLE_THERMAL; + dev_info_ratelimited(hdev->dev, + "Thermal envelop is safe, back to optimal clock\n"); + break; + + default: + dev_err(hdev->dev, "Received invalid clock change event %d\n", + event_type); + break; + } +} + +static void gaudi_handle_eqe(struct hl_device *hdev, + struct hl_eq_entry *eq_entry) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + u32 ctl = le32_to_cpu(eq_entry->hdr.ctl); + u16 event_type = ((ctl & EQ_CTL_EVENT_TYPE_MASK) + >> EQ_CTL_EVENT_TYPE_SHIFT); + u8 cause; + bool reset_required; + + if (event_type >= GAUDI_EVENT_SIZE) { + dev_err(hdev->dev, "Event type %u exceeds maximum of %u", + event_type, GAUDI_EVENT_SIZE - 1); + return; + } + + gaudi->events_stat[event_type]++; + gaudi->events_stat_aggregate[event_type]++; + + switch (event_type) { + case GAUDI_EVENT_PCIE_CORE_DERR: + case GAUDI_EVENT_PCIE_IF_DERR: + case GAUDI_EVENT_PCIE_PHY_DERR: + case GAUDI_EVENT_TPC0_DERR ... GAUDI_EVENT_TPC7_DERR: + case GAUDI_EVENT_MME0_ACC_DERR: + case GAUDI_EVENT_MME0_SBAB_DERR: + case GAUDI_EVENT_MME1_ACC_DERR: + case GAUDI_EVENT_MME1_SBAB_DERR: + case GAUDI_EVENT_MME2_ACC_DERR: + case GAUDI_EVENT_MME2_SBAB_DERR: + case GAUDI_EVENT_MME3_ACC_DERR: + case GAUDI_EVENT_MME3_SBAB_DERR: + case GAUDI_EVENT_DMA0_DERR_ECC ... GAUDI_EVENT_DMA7_DERR_ECC: + fallthrough; + case GAUDI_EVENT_CPU_IF_ECC_DERR: + case GAUDI_EVENT_PSOC_MEM_DERR: + case GAUDI_EVENT_PSOC_CORESIGHT_DERR: + case GAUDI_EVENT_SRAM0_DERR ... GAUDI_EVENT_SRAM28_DERR: + case GAUDI_EVENT_DMA_IF0_DERR ... GAUDI_EVENT_DMA_IF3_DERR: + case GAUDI_EVENT_HBM_0_DERR ... GAUDI_EVENT_HBM_3_DERR: + case GAUDI_EVENT_MMU_DERR: + gaudi_print_irq_info(hdev, event_type, true); + gaudi_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data); + if (hdev->hard_reset_on_fw_events) + hl_device_reset(hdev, true, false); + break; + + case GAUDI_EVENT_GIC500: + case GAUDI_EVENT_AXI_ECC: + case GAUDI_EVENT_L2_RAM_ECC: + case GAUDI_EVENT_PLL0 ... GAUDI_EVENT_PLL17: + gaudi_print_irq_info(hdev, event_type, false); + if (hdev->hard_reset_on_fw_events) + hl_device_reset(hdev, true, false); + break; + + case GAUDI_EVENT_HBM0_SPI_0: + case GAUDI_EVENT_HBM1_SPI_0: + case GAUDI_EVENT_HBM2_SPI_0: + case GAUDI_EVENT_HBM3_SPI_0: + gaudi_print_irq_info(hdev, event_type, false); + gaudi_hbm_read_interrupts(hdev, + gaudi_hbm_event_to_dev(event_type)); + if (hdev->hard_reset_on_fw_events) + hl_device_reset(hdev, true, false); + break; + + case GAUDI_EVENT_HBM0_SPI_1: + case GAUDI_EVENT_HBM1_SPI_1: + case GAUDI_EVENT_HBM2_SPI_1: + case GAUDI_EVENT_HBM3_SPI_1: + gaudi_print_irq_info(hdev, event_type, false); + gaudi_hbm_read_interrupts(hdev, + gaudi_hbm_event_to_dev(event_type)); + break; + + case GAUDI_EVENT_TPC0_DEC: + case GAUDI_EVENT_TPC1_DEC: + case GAUDI_EVENT_TPC2_DEC: + case GAUDI_EVENT_TPC3_DEC: + case GAUDI_EVENT_TPC4_DEC: + case GAUDI_EVENT_TPC5_DEC: + case GAUDI_EVENT_TPC6_DEC: + case GAUDI_EVENT_TPC7_DEC: + gaudi_print_irq_info(hdev, event_type, true); + reset_required = gaudi_tpc_read_interrupts(hdev, + tpc_dec_event_to_tpc_id(event_type), + "AXI_SLV_DEC_Error"); + if (reset_required) { + dev_err(hdev->dev, "hard reset required due to %s\n", + gaudi_irq_map_table[event_type].name); + + if (hdev->hard_reset_on_fw_events) + hl_device_reset(hdev, true, false); + } else { + hl_fw_unmask_irq(hdev, event_type); + } + break; + + case GAUDI_EVENT_TPC0_KRN_ERR: + case GAUDI_EVENT_TPC1_KRN_ERR: + case GAUDI_EVENT_TPC2_KRN_ERR: + case GAUDI_EVENT_TPC3_KRN_ERR: + case GAUDI_EVENT_TPC4_KRN_ERR: + case GAUDI_EVENT_TPC5_KRN_ERR: + case GAUDI_EVENT_TPC6_KRN_ERR: + case GAUDI_EVENT_TPC7_KRN_ERR: + gaudi_print_irq_info(hdev, event_type, true); + reset_required = gaudi_tpc_read_interrupts(hdev, + tpc_krn_event_to_tpc_id(event_type), + "KRN_ERR"); + if (reset_required) { + dev_err(hdev->dev, "hard reset required due to %s\n", + gaudi_irq_map_table[event_type].name); + + if (hdev->hard_reset_on_fw_events) + hl_device_reset(hdev, true, false); + } else { + hl_fw_unmask_irq(hdev, event_type); + } + break; + + case GAUDI_EVENT_PCIE_CORE_SERR: + case GAUDI_EVENT_PCIE_IF_SERR: + case GAUDI_EVENT_PCIE_PHY_SERR: + case GAUDI_EVENT_TPC0_SERR ... GAUDI_EVENT_TPC7_SERR: + case GAUDI_EVENT_MME0_ACC_SERR: + case GAUDI_EVENT_MME0_SBAB_SERR: + case GAUDI_EVENT_MME1_ACC_SERR: + case GAUDI_EVENT_MME1_SBAB_SERR: + case GAUDI_EVENT_MME2_ACC_SERR: + case GAUDI_EVENT_MME2_SBAB_SERR: + case GAUDI_EVENT_MME3_ACC_SERR: + case GAUDI_EVENT_MME3_SBAB_SERR: + case GAUDI_EVENT_DMA0_SERR_ECC ... GAUDI_EVENT_DMA7_SERR_ECC: + case GAUDI_EVENT_CPU_IF_ECC_SERR: + case GAUDI_EVENT_PSOC_MEM_SERR: + case GAUDI_EVENT_PSOC_CORESIGHT_SERR: + case GAUDI_EVENT_SRAM0_SERR ... GAUDI_EVENT_SRAM28_SERR: + case GAUDI_EVENT_DMA_IF0_SERR ... GAUDI_EVENT_DMA_IF3_SERR: + case GAUDI_EVENT_HBM_0_SERR ... GAUDI_EVENT_HBM_3_SERR: + fallthrough; + case GAUDI_EVENT_MMU_SERR: + gaudi_print_irq_info(hdev, event_type, true); + gaudi_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data); + hl_fw_unmask_irq(hdev, event_type); + break; + + case GAUDI_EVENT_PCIE_DEC: + case GAUDI_EVENT_MME0_WBC_RSP: + case GAUDI_EVENT_MME0_SBAB0_RSP: + case GAUDI_EVENT_MME1_WBC_RSP: + case GAUDI_EVENT_MME1_SBAB0_RSP: + case GAUDI_EVENT_MME2_WBC_RSP: + case GAUDI_EVENT_MME2_SBAB0_RSP: + case GAUDI_EVENT_MME3_WBC_RSP: + case GAUDI_EVENT_MME3_SBAB0_RSP: + case GAUDI_EVENT_CPU_AXI_SPLITTER: + case GAUDI_EVENT_PSOC_AXI_DEC: + case GAUDI_EVENT_PSOC_PRSTN_FALL: + case GAUDI_EVENT_MMU_PAGE_FAULT: + case GAUDI_EVENT_MMU_WR_PERM: + case GAUDI_EVENT_RAZWI_OR_ADC: + case GAUDI_EVENT_TPC0_QM ... GAUDI_EVENT_TPC7_QM: + case GAUDI_EVENT_MME0_QM ... GAUDI_EVENT_MME2_QM: + case GAUDI_EVENT_DMA0_QM ... GAUDI_EVENT_DMA7_QM: + fallthrough; + case GAUDI_EVENT_DMA0_CORE ... GAUDI_EVENT_DMA7_CORE: + gaudi_print_irq_info(hdev, event_type, true); + gaudi_handle_qman_err(hdev, event_type); + hl_fw_unmask_irq(hdev, event_type); + break; + + case GAUDI_EVENT_RAZWI_OR_ADC_SW: + gaudi_print_irq_info(hdev, event_type, true); + if (hdev->hard_reset_on_fw_events) + hl_device_reset(hdev, true, false); + break; + + case GAUDI_EVENT_TPC0_BMON_SPMU: + case GAUDI_EVENT_TPC1_BMON_SPMU: + case GAUDI_EVENT_TPC2_BMON_SPMU: + case GAUDI_EVENT_TPC3_BMON_SPMU: + case GAUDI_EVENT_TPC4_BMON_SPMU: + case GAUDI_EVENT_TPC5_BMON_SPMU: + case GAUDI_EVENT_TPC6_BMON_SPMU: + case GAUDI_EVENT_TPC7_BMON_SPMU: + case GAUDI_EVENT_DMA_BM_CH0 ... GAUDI_EVENT_DMA_BM_CH7: + gaudi_print_irq_info(hdev, event_type, false); + hl_fw_unmask_irq(hdev, event_type); + break; + + case GAUDI_EVENT_FIX_POWER_ENV_S ... GAUDI_EVENT_FIX_THERMAL_ENV_E: + gaudi_print_clk_change_info(hdev, event_type); + hl_fw_unmask_irq(hdev, event_type); + break; + + case GAUDI_EVENT_PSOC_GPIO_U16_0: + cause = le64_to_cpu(eq_entry->data[0]) & 0xFF; + dev_err(hdev->dev, + "Received high temp H/W interrupt %d (cause %d)\n", + event_type, cause); + break; + + default: + dev_err(hdev->dev, "Received invalid H/W interrupt %d\n", + event_type); + break; + } +} + +static void *gaudi_get_events_stat(struct hl_device *hdev, bool aggregate, + u32 *size) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + + if (aggregate) { + *size = (u32) sizeof(gaudi->events_stat_aggregate); + return gaudi->events_stat_aggregate; + } + + *size = (u32) sizeof(gaudi->events_stat); + return gaudi->events_stat; +} + +static int gaudi_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard, + u32 flags) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + u32 status, timeout_usec; + int rc; + + if (!(gaudi->hw_cap_initialized & HW_CAP_MMU) || + hdev->hard_reset_pending) + return 0; + + if (hdev->pldm) + timeout_usec = GAUDI_PLDM_MMU_TIMEOUT_USEC; + else + timeout_usec = MMU_CONFIG_TIMEOUT_USEC; + + mutex_lock(&hdev->mmu_cache_lock); + + /* L0 & L1 invalidation */ + WREG32(mmSTLB_INV_PS, 3); + WREG32(mmSTLB_CACHE_INV, gaudi->mmu_cache_inv_pi++); + WREG32(mmSTLB_INV_PS, 2); + + rc = hl_poll_timeout( + hdev, + mmSTLB_INV_PS, + status, + !status, + 1000, + timeout_usec); + + WREG32(mmSTLB_INV_SET, 0); + + mutex_unlock(&hdev->mmu_cache_lock); + + if (rc) { + dev_err_ratelimited(hdev->dev, + "MMU cache invalidation timeout\n"); + hl_device_reset(hdev, true, false); + } + + return rc; +} + +static int gaudi_mmu_invalidate_cache_range(struct hl_device *hdev, + bool is_hard, u32 asid, u64 va, u64 size) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + u32 status, timeout_usec; + u32 inv_data; + u32 pi; + int rc; + + if (!(gaudi->hw_cap_initialized & HW_CAP_MMU) || + hdev->hard_reset_pending) + return 0; + + mutex_lock(&hdev->mmu_cache_lock); + + if (hdev->pldm) + timeout_usec = GAUDI_PLDM_MMU_TIMEOUT_USEC; + else + timeout_usec = MMU_CONFIG_TIMEOUT_USEC; + + /* + * TODO: currently invalidate entire L0 & L1 as in regular hard + * invalidation. Need to apply invalidation of specific cache + * lines with mask of ASID & VA & size. + * Note that L1 with be flushed entirely in any case. + */ + + /* L0 & L1 invalidation */ + inv_data = RREG32(mmSTLB_CACHE_INV); + /* PI is 8 bit */ + pi = ((inv_data & STLB_CACHE_INV_PRODUCER_INDEX_MASK) + 1) & 0xFF; + WREG32(mmSTLB_CACHE_INV, + (inv_data & STLB_CACHE_INV_INDEX_MASK_MASK) | pi); + + rc = hl_poll_timeout( + hdev, + mmSTLB_INV_CONSUMER_INDEX, + status, + status == pi, + 1000, + timeout_usec); + + mutex_unlock(&hdev->mmu_cache_lock); + + if (rc) { + dev_err_ratelimited(hdev->dev, + "MMU cache invalidation timeout\n"); + hl_device_reset(hdev, true, false); + } + + return rc; +} + +static int gaudi_mmu_update_asid_hop0_addr(struct hl_device *hdev, + u32 asid, u64 phys_addr) +{ + u32 status, timeout_usec; + int rc; + + if (hdev->pldm) + timeout_usec = GAUDI_PLDM_MMU_TIMEOUT_USEC; + else + timeout_usec = MMU_CONFIG_TIMEOUT_USEC; + + WREG32(MMU_ASID, asid); + WREG32(MMU_HOP0_PA43_12, phys_addr >> MMU_HOP0_PA43_12_SHIFT); + WREG32(MMU_HOP0_PA49_44, phys_addr >> MMU_HOP0_PA49_44_SHIFT); + WREG32(MMU_BUSY, 0x80000000); + + rc = hl_poll_timeout( + hdev, + MMU_BUSY, + status, + !(status & 0x80000000), + 1000, + timeout_usec); + + if (rc) { + dev_err(hdev->dev, + "Timeout during MMU hop0 config of asid %d\n", asid); + return rc; + } + + return 0; +} + +static int gaudi_send_heartbeat(struct hl_device *hdev) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + + if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q)) + return 0; + + return hl_fw_send_heartbeat(hdev); +} + +static int gaudi_cpucp_info_get(struct hl_device *hdev) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + struct asic_fixed_properties *prop = &hdev->asic_prop; + int rc; + + if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q)) + return 0; + + rc = hl_fw_cpucp_info_get(hdev); + if (rc) + return rc; + + if (!strlen(prop->cpucp_info.card_name)) + strncpy(prop->cpucp_info.card_name, GAUDI_DEFAULT_CARD_NAME, + CARD_NAME_MAX_LEN); + + hdev->card_type = le32_to_cpu(hdev->asic_prop.cpucp_info.card_type); + + if (hdev->card_type == cpucp_card_type_pci) + prop->max_power_default = MAX_POWER_DEFAULT_PCI; + else if (hdev->card_type == cpucp_card_type_pmc) + prop->max_power_default = MAX_POWER_DEFAULT_PMC; + + hdev->max_power = prop->max_power_default; + + return 0; +} + +static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask, + struct seq_file *s) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + const char *fmt = "%-5d%-9s%#-14x%#-12x%#x\n"; + const char *mme_slave_fmt = "%-5d%-9s%-14s%-12s%#x\n"; + u32 qm_glbl_sts0, qm_cgm_sts, dma_core_sts0, tpc_cfg_sts, mme_arch_sts; + bool is_idle = true, is_eng_idle, is_slave; + u64 offset; + int i, dma_id; + + mutex_lock(&gaudi->clk_gate_mutex); + + hdev->asic_funcs->disable_clock_gating(hdev); + + if (s) + seq_puts(s, + "\nDMA is_idle QM_GLBL_STS0 QM_CGM_STS DMA_CORE_STS0\n" + "--- ------- ------------ ---------- -------------\n"); + + for (i = 0 ; i < DMA_NUMBER_OF_CHNLS ; i++) { + dma_id = gaudi_dma_assignment[i]; + offset = dma_id * DMA_QMAN_OFFSET; + + qm_glbl_sts0 = RREG32(mmDMA0_QM_GLBL_STS0 + offset); + qm_cgm_sts = RREG32(mmDMA0_QM_CGM_STS + offset); + dma_core_sts0 = RREG32(mmDMA0_CORE_STS0 + offset); + is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts) && + IS_DMA_IDLE(dma_core_sts0); + is_idle &= is_eng_idle; + + if (mask) + *mask |= ((u64) !is_eng_idle) << + (GAUDI_ENGINE_ID_DMA_0 + dma_id); + if (s) + seq_printf(s, fmt, dma_id, + is_eng_idle ? "Y" : "N", qm_glbl_sts0, + qm_cgm_sts, dma_core_sts0); + } + + if (s) + seq_puts(s, + "\nTPC is_idle QM_GLBL_STS0 QM_CGM_STS CFG_STATUS\n" + "--- ------- ------------ ---------- ----------\n"); + + for (i = 0 ; i < TPC_NUMBER_OF_ENGINES ; i++) { + offset = i * TPC_QMAN_OFFSET; + qm_glbl_sts0 = RREG32(mmTPC0_QM_GLBL_STS0 + offset); + qm_cgm_sts = RREG32(mmTPC0_QM_CGM_STS + offset); + tpc_cfg_sts = RREG32(mmTPC0_CFG_STATUS + offset); + is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts) && + IS_TPC_IDLE(tpc_cfg_sts); + is_idle &= is_eng_idle; + + if (mask) + *mask |= ((u64) !is_eng_idle) << + (GAUDI_ENGINE_ID_TPC_0 + i); + if (s) + seq_printf(s, fmt, i, + is_eng_idle ? "Y" : "N", + qm_glbl_sts0, qm_cgm_sts, tpc_cfg_sts); + } + + if (s) + seq_puts(s, + "\nMME is_idle QM_GLBL_STS0 QM_CGM_STS ARCH_STATUS\n" + "--- ------- ------------ ---------- -----------\n"); + + for (i = 0 ; i < MME_NUMBER_OF_ENGINES ; i++) { + offset = i * MME_QMAN_OFFSET; + mme_arch_sts = RREG32(mmMME0_CTRL_ARCH_STATUS + offset); + is_eng_idle = IS_MME_IDLE(mme_arch_sts); + + /* MME 1 & 3 are slaves, no need to check their QMANs */ + is_slave = i % 2; + if (!is_slave) { + qm_glbl_sts0 = RREG32(mmMME0_QM_GLBL_STS0 + offset); + qm_cgm_sts = RREG32(mmMME0_QM_CGM_STS + offset); + is_eng_idle &= IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts); + } + + is_idle &= is_eng_idle; + + if (mask) + *mask |= ((u64) !is_eng_idle) << + (GAUDI_ENGINE_ID_MME_0 + i); + if (s) { + if (!is_slave) + seq_printf(s, fmt, i, + is_eng_idle ? "Y" : "N", + qm_glbl_sts0, qm_cgm_sts, mme_arch_sts); + else + seq_printf(s, mme_slave_fmt, i, + is_eng_idle ? "Y" : "N", "-", + "-", mme_arch_sts); + } + } + + if (s) + seq_puts(s, "\n"); + + hdev->asic_funcs->set_clock_gating(hdev); + + mutex_unlock(&gaudi->clk_gate_mutex); + + return is_idle; +} + +static void gaudi_hw_queues_lock(struct hl_device *hdev) + __acquires(&gaudi->hw_queues_lock) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + + spin_lock(&gaudi->hw_queues_lock); +} + +static void gaudi_hw_queues_unlock(struct hl_device *hdev) + __releases(&gaudi->hw_queues_lock) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + + spin_unlock(&gaudi->hw_queues_lock); +} + +static u32 gaudi_get_pci_id(struct hl_device *hdev) +{ + return hdev->pdev->device; +} + +static int gaudi_get_eeprom_data(struct hl_device *hdev, void *data, + size_t max_size) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + + if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q)) + return 0; + + return hl_fw_get_eeprom_data(hdev, data, max_size); +} + +/* + * this function should be used only during initialization and/or after reset, + * when there are no active users. + */ +static int gaudi_run_tpc_kernel(struct hl_device *hdev, u64 tpc_kernel, + u32 tpc_id) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + u64 kernel_timeout; + u32 status, offset; + int rc; + + offset = tpc_id * (mmTPC1_CFG_STATUS - mmTPC0_CFG_STATUS); + + if (hdev->pldm) + kernel_timeout = GAUDI_PLDM_TPC_KERNEL_WAIT_USEC; + else + kernel_timeout = HL_DEVICE_TIMEOUT_USEC; + + mutex_lock(&gaudi->clk_gate_mutex); + + hdev->asic_funcs->disable_clock_gating(hdev); + + WREG32(mmTPC0_CFG_QM_KERNEL_BASE_ADDRESS_LOW + offset, + lower_32_bits(tpc_kernel)); + WREG32(mmTPC0_CFG_QM_KERNEL_BASE_ADDRESS_HIGH + offset, + upper_32_bits(tpc_kernel)); + + WREG32(mmTPC0_CFG_ICACHE_BASE_ADDERESS_LOW + offset, + lower_32_bits(tpc_kernel)); + WREG32(mmTPC0_CFG_ICACHE_BASE_ADDERESS_HIGH + offset, + upper_32_bits(tpc_kernel)); + /* set a valid LUT pointer, content is of no significance */ + WREG32(mmTPC0_CFG_LUT_FUNC256_BASE_ADDR_LO + offset, + lower_32_bits(tpc_kernel)); + WREG32(mmTPC0_CFG_LUT_FUNC256_BASE_ADDR_HI + offset, + upper_32_bits(tpc_kernel)); + + WREG32(mmTPC0_CFG_QM_SYNC_OBJECT_ADDR + offset, + lower_32_bits(CFG_BASE + + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0)); + + WREG32(mmTPC0_CFG_TPC_CMD + offset, + (1 << TPC0_CFG_TPC_CMD_ICACHE_INVALIDATE_SHIFT | + 1 << TPC0_CFG_TPC_CMD_ICACHE_PREFETCH_64KB_SHIFT)); + /* wait a bit for the engine to start executing */ + usleep_range(1000, 1500); + + /* wait until engine has finished executing */ + rc = hl_poll_timeout( + hdev, + mmTPC0_CFG_STATUS + offset, + status, + (status & TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_MASK) == + TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_MASK, + 1000, + kernel_timeout); + + if (rc) { + dev_err(hdev->dev, + "Timeout while waiting for TPC%d icache prefetch\n", + tpc_id); + hdev->asic_funcs->set_clock_gating(hdev); + mutex_unlock(&gaudi->clk_gate_mutex); + return -EIO; + } + + WREG32(mmTPC0_CFG_TPC_EXECUTE + offset, + 1 << TPC0_CFG_TPC_EXECUTE_V_SHIFT); + + /* wait a bit for the engine to start executing */ + usleep_range(1000, 1500); + + /* wait until engine has finished executing */ + rc = hl_poll_timeout( + hdev, + mmTPC0_CFG_STATUS + offset, + status, + (status & TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_MASK) == + TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_MASK, + 1000, + kernel_timeout); + + if (rc) { + dev_err(hdev->dev, + "Timeout while waiting for TPC%d vector pipe\n", + tpc_id); + hdev->asic_funcs->set_clock_gating(hdev); + mutex_unlock(&gaudi->clk_gate_mutex); + return -EIO; + } + + rc = hl_poll_timeout( + hdev, + mmTPC0_CFG_WQ_INFLIGHT_CNTR + offset, + status, + (status == 0), + 1000, + kernel_timeout); + + hdev->asic_funcs->set_clock_gating(hdev); + mutex_unlock(&gaudi->clk_gate_mutex); + + if (rc) { + dev_err(hdev->dev, + "Timeout while waiting for TPC%d kernel to execute\n", + tpc_id); + return -EIO; + } + + return 0; +} + +static enum hl_device_hw_state gaudi_get_hw_state(struct hl_device *hdev) +{ + return RREG32(mmHW_STATE); +} + +static int gaudi_ctx_init(struct hl_ctx *ctx) +{ + return 0; +} + +static u32 gaudi_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx) +{ + return gaudi_cq_assignment[cq_idx]; +} + +static u32 gaudi_get_signal_cb_size(struct hl_device *hdev) +{ + return sizeof(struct packet_msg_short) + + sizeof(struct packet_msg_prot) * 2; +} + +static u32 gaudi_get_wait_cb_size(struct hl_device *hdev) +{ + return sizeof(struct packet_msg_short) * 4 + + sizeof(struct packet_fence) + + sizeof(struct packet_msg_prot) * 2; +} + +static void gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id) +{ + struct hl_cb *cb = (struct hl_cb *) data; + struct packet_msg_short *pkt; + u32 value, ctl; + + pkt = cb->kernel_address; + memset(pkt, 0, sizeof(*pkt)); + + /* Inc by 1, Mode ADD */ + value = FIELD_PREP(GAUDI_PKT_SHORT_VAL_SOB_SYNC_VAL_MASK, 1); + value |= FIELD_PREP(GAUDI_PKT_SHORT_VAL_SOB_MOD_MASK, 1); + + ctl = FIELD_PREP(GAUDI_PKT_SHORT_CTL_ADDR_MASK, sob_id * 4); + ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OP_MASK, 0); /* write the value */ + ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_BASE_MASK, 3); /* W_S SOB base */ + ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OPCODE_MASK, PACKET_MSG_SHORT); + ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, 1); + ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_RB_MASK, 1); + ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_MB_MASK, 1); + + pkt->value = cpu_to_le32(value); + pkt->ctl = cpu_to_le32(ctl); +} + +static u32 gaudi_add_mon_msg_short(struct packet_msg_short *pkt, u32 value, + u16 addr) +{ + u32 ctl, pkt_size = sizeof(*pkt); + + memset(pkt, 0, pkt_size); + + ctl = FIELD_PREP(GAUDI_PKT_SHORT_CTL_ADDR_MASK, addr); + ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_BASE_MASK, 2); /* W_S MON base */ + ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OPCODE_MASK, PACKET_MSG_SHORT); + ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, 0); + ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_RB_MASK, 1); + ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_MB_MASK, 0); /* last pkt MB */ + + pkt->value = cpu_to_le32(value); + pkt->ctl = cpu_to_le32(ctl); + + return pkt_size; +} + +static u32 gaudi_add_arm_monitor_pkt(struct packet_msg_short *pkt, u16 sob_id, + u16 sob_val, u16 addr) +{ + u32 ctl, value, pkt_size = sizeof(*pkt); + u8 mask = ~(1 << (sob_id & 0x7)); + + memset(pkt, 0, pkt_size); + + value = FIELD_PREP(GAUDI_PKT_SHORT_VAL_MON_SYNC_GID_MASK, sob_id / 8); + value |= FIELD_PREP(GAUDI_PKT_SHORT_VAL_MON_SYNC_VAL_MASK, sob_val); + value |= FIELD_PREP(GAUDI_PKT_SHORT_VAL_MON_MODE_MASK, + 0); /* GREATER OR EQUAL*/ + value |= FIELD_PREP(GAUDI_PKT_SHORT_VAL_MON_MASK_MASK, mask); + + ctl = FIELD_PREP(GAUDI_PKT_SHORT_CTL_ADDR_MASK, addr); + ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OP_MASK, 0); /* write the value */ + ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_BASE_MASK, 2); /* W_S MON base */ + ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OPCODE_MASK, PACKET_MSG_SHORT); + ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, 0); + ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_RB_MASK, 1); + ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_MB_MASK, 1); + + pkt->value = cpu_to_le32(value); + pkt->ctl = cpu_to_le32(ctl); + + return pkt_size; +} + +static u32 gaudi_add_fence_pkt(struct packet_fence *pkt) +{ + u32 ctl, cfg, pkt_size = sizeof(*pkt); + + memset(pkt, 0, pkt_size); + + cfg = FIELD_PREP(GAUDI_PKT_FENCE_CFG_DEC_VAL_MASK, 1); + cfg |= FIELD_PREP(GAUDI_PKT_FENCE_CFG_TARGET_VAL_MASK, 1); + cfg |= FIELD_PREP(GAUDI_PKT_FENCE_CFG_ID_MASK, 2); + + ctl = FIELD_PREP(GAUDI_PKT_FENCE_CTL_OPCODE_MASK, PACKET_FENCE); + ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, 0); + ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_RB_MASK, 1); + ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_MB_MASK, 1); + + pkt->cfg = cpu_to_le32(cfg); + pkt->ctl = cpu_to_le32(ctl); + + return pkt_size; +} + +static void gaudi_gen_wait_cb(struct hl_device *hdev, void *data, u16 sob_id, + u16 sob_val, u16 mon_id, u32 q_idx) +{ + struct hl_cb *cb = (struct hl_cb *) data; + void *buf = cb->kernel_address; + u64 monitor_base, fence_addr = 0; + u32 size = 0; + u16 msg_addr_offset; + + switch (q_idx) { + case GAUDI_QUEUE_ID_DMA_0_0: + fence_addr = mmDMA0_QM_CP_FENCE2_RDATA_0; + break; + case GAUDI_QUEUE_ID_DMA_0_1: + fence_addr = mmDMA0_QM_CP_FENCE2_RDATA_1; + break; + case GAUDI_QUEUE_ID_DMA_0_2: + fence_addr = mmDMA0_QM_CP_FENCE2_RDATA_2; + break; + case GAUDI_QUEUE_ID_DMA_0_3: + fence_addr = mmDMA0_QM_CP_FENCE2_RDATA_3; + break; + case GAUDI_QUEUE_ID_DMA_1_0: + fence_addr = mmDMA1_QM_CP_FENCE2_RDATA_0; + break; + case GAUDI_QUEUE_ID_DMA_1_1: + fence_addr = mmDMA1_QM_CP_FENCE2_RDATA_1; + break; + case GAUDI_QUEUE_ID_DMA_1_2: + fence_addr = mmDMA1_QM_CP_FENCE2_RDATA_2; + break; + case GAUDI_QUEUE_ID_DMA_1_3: + fence_addr = mmDMA1_QM_CP_FENCE2_RDATA_3; + break; + case GAUDI_QUEUE_ID_DMA_5_0: + fence_addr = mmDMA5_QM_CP_FENCE2_RDATA_0; + break; + case GAUDI_QUEUE_ID_DMA_5_1: + fence_addr = mmDMA5_QM_CP_FENCE2_RDATA_1; + break; + case GAUDI_QUEUE_ID_DMA_5_2: + fence_addr = mmDMA5_QM_CP_FENCE2_RDATA_2; + break; + case GAUDI_QUEUE_ID_DMA_5_3: + fence_addr = mmDMA5_QM_CP_FENCE2_RDATA_3; + break; + default: + /* queue index should be valid here */ + dev_crit(hdev->dev, "wrong queue id %d for wait packet\n", + q_idx); + return; + } + + fence_addr += CFG_BASE; + + /* + * monitor_base should be the content of the base0 address registers, + * so it will be added to the msg short offsets + */ + monitor_base = mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0; + + /* First monitor config packet: low address of the sync */ + msg_addr_offset = + (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 + mon_id * 4) - + monitor_base; + + size += gaudi_add_mon_msg_short(buf + size, (u32) fence_addr, + msg_addr_offset); + + /* Second monitor config packet: high address of the sync */ + msg_addr_offset = + (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRH_0 + mon_id * 4) - + monitor_base; + + size += gaudi_add_mon_msg_short(buf + size, (u32) (fence_addr >> 32), + msg_addr_offset); + + /* + * Third monitor config packet: the payload, i.e. what to write when the + * sync triggers + */ + msg_addr_offset = + (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_DATA_0 + mon_id * 4) - + monitor_base; + + size += gaudi_add_mon_msg_short(buf + size, 1, msg_addr_offset); + + /* Fourth monitor config packet: bind the monitor to a sync object */ + msg_addr_offset = + (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0 + mon_id * 4) - + monitor_base; + size += gaudi_add_arm_monitor_pkt(buf + size, sob_id, sob_val, + msg_addr_offset); + + /* Fence packet */ + size += gaudi_add_fence_pkt(buf + size); +} + +static void gaudi_reset_sob(struct hl_device *hdev, void *data) +{ + struct hl_hw_sob *hw_sob = (struct hl_hw_sob *) data; + + dev_dbg(hdev->dev, "reset SOB, q_idx: %d, sob_id: %d\n", hw_sob->q_idx, + hw_sob->sob_id); + + WREG32(mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 + hw_sob->sob_id * 4, + 0); + + kref_init(&hw_sob->kref); +} + +static void gaudi_set_dma_mask_from_fw(struct hl_device *hdev) +{ + if (RREG32(mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_0) == + HL_POWER9_HOST_MAGIC) { + hdev->power9_64bit_dma_enable = 1; + hdev->dma_mask = 64; + } else { + hdev->power9_64bit_dma_enable = 0; + hdev->dma_mask = 48; + } +} + +static u64 gaudi_get_device_time(struct hl_device *hdev) +{ + u64 device_time = ((u64) RREG32(mmPSOC_TIMESTAMP_CNTCVU)) << 32; + + return device_time | RREG32(mmPSOC_TIMESTAMP_CNTCVL); +} + +static const struct hl_asic_funcs gaudi_funcs = { + .early_init = gaudi_early_init, + .early_fini = gaudi_early_fini, + .late_init = gaudi_late_init, + .late_fini = gaudi_late_fini, + .sw_init = gaudi_sw_init, + .sw_fini = gaudi_sw_fini, + .hw_init = gaudi_hw_init, + .hw_fini = gaudi_hw_fini, + .halt_engines = gaudi_halt_engines, + .suspend = gaudi_suspend, + .resume = gaudi_resume, + .cb_mmap = gaudi_cb_mmap, + .ring_doorbell = gaudi_ring_doorbell, + .pqe_write = gaudi_pqe_write, + .asic_dma_alloc_coherent = gaudi_dma_alloc_coherent, + .asic_dma_free_coherent = gaudi_dma_free_coherent, + .get_int_queue_base = gaudi_get_int_queue_base, + .test_queues = gaudi_test_queues, + .asic_dma_pool_zalloc = gaudi_dma_pool_zalloc, + .asic_dma_pool_free = gaudi_dma_pool_free, + .cpu_accessible_dma_pool_alloc = gaudi_cpu_accessible_dma_pool_alloc, + .cpu_accessible_dma_pool_free = gaudi_cpu_accessible_dma_pool_free, + .hl_dma_unmap_sg = gaudi_dma_unmap_sg, + .cs_parser = gaudi_cs_parser, + .asic_dma_map_sg = gaudi_dma_map_sg, + .get_dma_desc_list_size = gaudi_get_dma_desc_list_size, + .add_end_of_cb_packets = gaudi_add_end_of_cb_packets, + .update_eq_ci = gaudi_update_eq_ci, + .context_switch = gaudi_context_switch, + .restore_phase_topology = gaudi_restore_phase_topology, + .debugfs_read32 = gaudi_debugfs_read32, + .debugfs_write32 = gaudi_debugfs_write32, + .debugfs_read64 = gaudi_debugfs_read64, + .debugfs_write64 = gaudi_debugfs_write64, + .add_device_attr = gaudi_add_device_attr, + .handle_eqe = gaudi_handle_eqe, + .set_pll_profile = gaudi_set_pll_profile, + .get_events_stat = gaudi_get_events_stat, + .read_pte = gaudi_read_pte, + .write_pte = gaudi_write_pte, + .mmu_invalidate_cache = gaudi_mmu_invalidate_cache, + .mmu_invalidate_cache_range = gaudi_mmu_invalidate_cache_range, + .send_heartbeat = gaudi_send_heartbeat, + .set_clock_gating = gaudi_set_clock_gating, + .disable_clock_gating = gaudi_disable_clock_gating, + .debug_coresight = gaudi_debug_coresight, + .is_device_idle = gaudi_is_device_idle, + .soft_reset_late_init = gaudi_soft_reset_late_init, + .hw_queues_lock = gaudi_hw_queues_lock, + .hw_queues_unlock = gaudi_hw_queues_unlock, + .get_pci_id = gaudi_get_pci_id, + .get_eeprom_data = gaudi_get_eeprom_data, + .send_cpu_message = gaudi_send_cpu_message, + .get_hw_state = gaudi_get_hw_state, + .pci_bars_map = gaudi_pci_bars_map, + .init_iatu = gaudi_init_iatu, + .rreg = hl_rreg, + .wreg = hl_wreg, + .halt_coresight = gaudi_halt_coresight, + .ctx_init = gaudi_ctx_init, + .get_clk_rate = gaudi_get_clk_rate, + .get_queue_id_for_cq = gaudi_get_queue_id_for_cq, + .read_device_fw_version = gaudi_read_device_fw_version, + .load_firmware_to_device = gaudi_load_firmware_to_device, + .load_boot_fit_to_device = gaudi_load_boot_fit_to_device, + .get_signal_cb_size = gaudi_get_signal_cb_size, + .get_wait_cb_size = gaudi_get_wait_cb_size, + .gen_signal_cb = gaudi_gen_signal_cb, + .gen_wait_cb = gaudi_gen_wait_cb, + .reset_sob = gaudi_reset_sob, + .set_dma_mask_from_fw = gaudi_set_dma_mask_from_fw, + .get_device_time = gaudi_get_device_time +}; + +/** + * gaudi_set_asic_funcs - set GAUDI function pointers + * + * @hdev: pointer to hl_device structure + * + */ +void gaudi_set_asic_funcs(struct hl_device *hdev) +{ + hdev->asic_funcs = &gaudi_funcs; +} diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/misc/habanalabs/gaudi/gaudiP.h new file mode 100644 index 000000000..8eb598db8 --- /dev/null +++ b/drivers/misc/habanalabs/gaudi/gaudiP.h @@ -0,0 +1,276 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2019-2020 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef GAUDIP_H_ +#define GAUDIP_H_ + +#include <uapi/misc/habanalabs.h> +#include "../common/habanalabs.h" +#include "../include/common/hl_boot_if.h" +#include "../include/gaudi/gaudi_packets.h" +#include "../include/gaudi/gaudi.h" +#include "../include/gaudi/gaudi_async_events.h" + +#define NUMBER_OF_EXT_HW_QUEUES 12 +#define NUMBER_OF_CMPLT_QUEUES NUMBER_OF_EXT_HW_QUEUES +#define NUMBER_OF_CPU_HW_QUEUES 1 +#define NUMBER_OF_INT_HW_QUEUES 100 +#define NUMBER_OF_HW_QUEUES (NUMBER_OF_EXT_HW_QUEUES + \ + NUMBER_OF_CPU_HW_QUEUES + \ + NUMBER_OF_INT_HW_QUEUES) + +/* + * Number of MSI interrupts IDS: + * Each completion queue has 1 ID + * The event queue has 1 ID + */ +#define NUMBER_OF_INTERRUPTS (NUMBER_OF_CMPLT_QUEUES + \ + NUMBER_OF_CPU_HW_QUEUES) + +#if (NUMBER_OF_INTERRUPTS > GAUDI_MSI_ENTRIES) +#error "Number of MSI interrupts must be smaller or equal to GAUDI_MSI_ENTRIES" +#endif + +#define CORESIGHT_TIMEOUT_USEC 100000 /* 100 ms */ + +#define GAUDI_MAX_CLK_FREQ 2200000000ull /* 2200 MHz */ + +#define MAX_POWER_DEFAULT_PCI 200000 /* 200W */ +#define MAX_POWER_DEFAULT_PMC 350000 /* 350W */ + +#define GAUDI_CPU_TIMEOUT_USEC 30000000 /* 30s */ + +#define TPC_ENABLED_MASK 0xFF + +#define GAUDI_HBM_SIZE_32GB 0x800000000ull +#define GAUDI_HBM_DEVICES 4 +#define GAUDI_HBM_CHANNELS 8 +#define GAUDI_HBM_CFG_BASE (mmHBM0_BASE - CFG_BASE) +#define GAUDI_HBM_CFG_OFFSET (mmHBM1_BASE - mmHBM0_BASE) + +#define DMA_MAX_TRANSFER_SIZE U32_MAX + +#define GAUDI_DEFAULT_CARD_NAME "HL2000" + +#define GAUDI_MAX_PENDING_CS 1024 + +#if !IS_MAX_PENDING_CS_VALID(GAUDI_MAX_PENDING_CS) +#error "GAUDI_MAX_PENDING_CS must be power of 2 and greater than 1" +#endif + +#define PCI_DMA_NUMBER_OF_CHNLS 3 +#define HBM_DMA_NUMBER_OF_CHNLS 5 +#define DMA_NUMBER_OF_CHNLS (PCI_DMA_NUMBER_OF_CHNLS + \ + HBM_DMA_NUMBER_OF_CHNLS) + +#define MME_NUMBER_OF_SLAVE_ENGINES 2 +#define MME_NUMBER_OF_ENGINES (MME_NUMBER_OF_MASTER_ENGINES + \ + MME_NUMBER_OF_SLAVE_ENGINES) +#define MME_NUMBER_OF_QMANS (MME_NUMBER_OF_MASTER_ENGINES * \ + QMAN_STREAMS) + +#define QMAN_STREAMS 4 + +#define DMA_QMAN_OFFSET (mmDMA1_QM_BASE - mmDMA0_QM_BASE) +#define TPC_QMAN_OFFSET (mmTPC1_QM_BASE - mmTPC0_QM_BASE) +#define MME_QMAN_OFFSET (mmMME1_QM_BASE - mmMME0_QM_BASE) +#define NIC_MACRO_QMAN_OFFSET (mmNIC1_QM0_BASE - mmNIC0_QM0_BASE) + +#define TPC_CFG_OFFSET (mmTPC1_CFG_BASE - mmTPC0_CFG_BASE) + +#define DMA_CORE_OFFSET (mmDMA1_CORE_BASE - mmDMA0_CORE_BASE) + +#define QMAN_LDMA_SRC_OFFSET (mmDMA0_CORE_SRC_BASE_LO - mmDMA0_CORE_CFG_0) +#define QMAN_LDMA_DST_OFFSET (mmDMA0_CORE_DST_BASE_LO - mmDMA0_CORE_CFG_0) +#define QMAN_LDMA_SIZE_OFFSET (mmDMA0_CORE_DST_TSIZE_0 - mmDMA0_CORE_CFG_0) + +#define QMAN_CPDMA_SRC_OFFSET (mmDMA0_QM_CQ_PTR_LO_4 - mmDMA0_CORE_CFG_0) +#define QMAN_CPDMA_DST_OFFSET (mmDMA0_CORE_DST_BASE_LO - mmDMA0_CORE_CFG_0) +#define QMAN_CPDMA_SIZE_OFFSET (mmDMA0_QM_CQ_TSIZE_4 - mmDMA0_CORE_CFG_0) + +#define SIF_RTR_CTRL_OFFSET (mmSIF_RTR_CTRL_1_BASE - mmSIF_RTR_CTRL_0_BASE) + +#define NIF_RTR_CTRL_OFFSET (mmNIF_RTR_CTRL_1_BASE - mmNIF_RTR_CTRL_0_BASE) + +#define MME_ACC_OFFSET (mmMME1_ACC_BASE - mmMME0_ACC_BASE) +#define SRAM_BANK_OFFSET (mmSRAM_Y0_X1_RTR_BASE - mmSRAM_Y0_X0_RTR_BASE) + +#define NUM_OF_SOB_IN_BLOCK \ + (((mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_2047 - \ + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0) + 4) >> 2) + +#define NUM_OF_MONITORS_IN_BLOCK \ + (((mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_STATUS_511 - \ + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_STATUS_0) + 4) >> 2) + + +/* DRAM Memory Map */ + +#define CPU_FW_IMAGE_SIZE 0x10000000 /* 256MB */ +#define MMU_PAGE_TABLES_SIZE 0x0BF00000 /* 191MB */ +#define MMU_CACHE_MNG_SIZE 0x00100000 /* 1MB */ +#define RESERVED 0x04000000 /* 64MB */ + +#define CPU_FW_IMAGE_ADDR DRAM_PHYS_BASE +#define MMU_PAGE_TABLES_ADDR (CPU_FW_IMAGE_ADDR + CPU_FW_IMAGE_SIZE) +#define MMU_CACHE_MNG_ADDR (MMU_PAGE_TABLES_ADDR + MMU_PAGE_TABLES_SIZE) + +#define DRAM_DRIVER_END_ADDR (MMU_CACHE_MNG_ADDR + MMU_CACHE_MNG_SIZE +\ + RESERVED) + +#define DRAM_BASE_ADDR_USER 0x20000000 + +#if (DRAM_DRIVER_END_ADDR > DRAM_BASE_ADDR_USER) +#error "Driver must reserve no more than 512MB" +#endif + +/* Internal QMANs PQ sizes */ + +#define MME_QMAN_LENGTH 1024 +#define MME_QMAN_SIZE_IN_BYTES (MME_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE) + +#define HBM_DMA_QMAN_LENGTH 1024 +#define HBM_DMA_QMAN_SIZE_IN_BYTES \ + (HBM_DMA_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE) + +#define TPC_QMAN_LENGTH 1024 +#define TPC_QMAN_SIZE_IN_BYTES (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE) + +#define SRAM_USER_BASE_OFFSET GAUDI_DRIVER_SRAM_RESERVED_SIZE_FROM_START + +/* Virtual address space */ +#define VA_HOST_SPACE_START 0x1000000000000ull /* 256TB */ +#define VA_HOST_SPACE_END 0x3FF8000000000ull /* 1PB - 1TB */ +#define VA_HOST_SPACE_SIZE (VA_HOST_SPACE_END - \ + VA_HOST_SPACE_START) /* 767TB */ + +#define HW_CAP_PLL BIT(0) +#define HW_CAP_HBM BIT(1) +#define HW_CAP_MMU BIT(2) +#define HW_CAP_MME BIT(3) +#define HW_CAP_CPU BIT(4) +#define HW_CAP_PCI_DMA BIT(5) +#define HW_CAP_MSI BIT(6) +#define HW_CAP_CPU_Q BIT(7) +#define HW_CAP_HBM_DMA BIT(8) +#define HW_CAP_CLK_GATE BIT(9) +#define HW_CAP_SRAM_SCRAMBLER BIT(10) +#define HW_CAP_HBM_SCRAMBLER BIT(11) + +#define HW_CAP_TPC0 BIT(24) +#define HW_CAP_TPC1 BIT(25) +#define HW_CAP_TPC2 BIT(26) +#define HW_CAP_TPC3 BIT(27) +#define HW_CAP_TPC4 BIT(28) +#define HW_CAP_TPC5 BIT(29) +#define HW_CAP_TPC6 BIT(30) +#define HW_CAP_TPC7 BIT(31) +#define HW_CAP_TPC_MASK GENMASK(31, 24) +#define HW_CAP_TPC_SHIFT 24 + +#define GAUDI_CPU_PCI_MSB_ADDR(addr) (((addr) & GENMASK_ULL(49, 39)) >> 39) +#define GAUDI_PCI_TO_CPU_ADDR(addr) \ + do { \ + (addr) &= ~GENMASK_ULL(49, 39); \ + (addr) |= BIT_ULL(39); \ + } while (0) +#define GAUDI_CPU_TO_PCI_ADDR(addr, extension) \ + do { \ + (addr) &= ~GENMASK_ULL(49, 39); \ + (addr) |= (u64) (extension) << 39; \ + } while (0) + +enum gaudi_dma_channels { + GAUDI_PCI_DMA_1, + GAUDI_PCI_DMA_2, + GAUDI_PCI_DMA_3, + GAUDI_HBM_DMA_1, + GAUDI_HBM_DMA_2, + GAUDI_HBM_DMA_3, + GAUDI_HBM_DMA_4, + GAUDI_HBM_DMA_5, + GAUDI_DMA_MAX +}; + +enum gaudi_tpc_mask { + GAUDI_TPC_MASK_TPC0 = 0x01, + GAUDI_TPC_MASK_TPC1 = 0x02, + GAUDI_TPC_MASK_TPC2 = 0x04, + GAUDI_TPC_MASK_TPC3 = 0x08, + GAUDI_TPC_MASK_TPC4 = 0x10, + GAUDI_TPC_MASK_TPC5 = 0x20, + GAUDI_TPC_MASK_TPC6 = 0x40, + GAUDI_TPC_MASK_TPC7 = 0x80, + GAUDI_TPC_MASK_ALL = 0xFF +}; + +/** + * struct gaudi_internal_qman_info - Internal QMAN information. + * @pq_kernel_addr: Kernel address of the PQ memory area in the host. + * @pq_dma_addr: DMA address of the PQ memory area in the host. + * @pq_size: Size of allocated host memory for PQ. + */ +struct gaudi_internal_qman_info { + void *pq_kernel_addr; + dma_addr_t pq_dma_addr; + size_t pq_size; +}; + +/** + * struct gaudi_device - ASIC specific manage structure. + * @cpucp_info_get: get information on device from CPU-CP + * @hw_queues_lock: protects the H/W queues from concurrent access. + * @clk_gate_mutex: protects code areas that require clock gating to be disabled + * temporarily + * @internal_qmans: Internal QMANs information. The array size is larger than + * the actual number of internal queues because they are not in + * consecutive order. + * @hbm_bar_cur_addr: current address of HBM PCI bar. + * @max_freq_value: current max clk frequency. + * @events: array that holds all event id's + * @events_stat: array that holds histogram of all received events. + * @events_stat_aggregate: same as events_stat but doesn't get cleared on reset + * @hw_cap_initialized: This field contains a bit per H/W engine. When that + * engine is initialized, that bit is set by the driver to + * signal we can use this engine in later code paths. + * Each bit is cleared upon reset of its corresponding H/W + * engine. + * @multi_msi_mode: whether we are working in multi MSI single MSI mode. + * Multi MSI is possible only with IOMMU enabled. + * @mmu_cache_inv_pi: PI for MMU cache invalidation flow. The H/W expects an + * 8-bit value so use u8. + */ +struct gaudi_device { + int (*cpucp_info_get)(struct hl_device *hdev); + + /* TODO: remove hw_queues_lock after moving to scheduler code */ + spinlock_t hw_queues_lock; + struct mutex clk_gate_mutex; + + struct gaudi_internal_qman_info internal_qmans[GAUDI_QUEUE_ID_SIZE]; + + u64 hbm_bar_cur_addr; + u64 max_freq_value; + + u32 events[GAUDI_EVENT_SIZE]; + u32 events_stat[GAUDI_EVENT_SIZE]; + u32 events_stat_aggregate[GAUDI_EVENT_SIZE]; + u32 hw_cap_initialized; + u8 multi_msi_mode; + u8 mmu_cache_inv_pi; +}; + +void gaudi_init_security(struct hl_device *hdev); +void gaudi_add_device_attr(struct hl_device *hdev, + struct attribute_group *dev_attr_grp); +void gaudi_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq); +int gaudi_debug_coresight(struct hl_device *hdev, void *data); +void gaudi_halt_coresight(struct hl_device *hdev); +int gaudi_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk); +void gaudi_mmu_prepare_reg(struct hl_device *hdev, u64 reg, u32 asid); + +#endif /* GAUDIP_H_ */ diff --git a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c new file mode 100644 index 000000000..283d37b76 --- /dev/null +++ b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c @@ -0,0 +1,900 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include "gaudiP.h" +#include "../include/gaudi/gaudi_coresight.h" +#include "../include/gaudi/asic_reg/gaudi_regs.h" +#include "../include/gaudi/gaudi_masks.h" +#include "../include/gaudi/gaudi_reg_map.h" + +#include <uapi/misc/habanalabs.h> +#include <linux/coresight.h> + +#define SPMU_SECTION_SIZE MME0_ACC_SPMU_MAX_OFFSET +#define SPMU_EVENT_TYPES_OFFSET 0x400 +#define SPMU_MAX_COUNTERS 6 + +static u64 debug_stm_regs[GAUDI_STM_LAST + 1] = { + [GAUDI_STM_MME0_ACC] = mmMME0_ACC_STM_BASE, + [GAUDI_STM_MME0_SBAB] = mmMME0_SBAB_STM_BASE, + [GAUDI_STM_MME0_CTRL] = mmMME0_CTRL_STM_BASE, + [GAUDI_STM_MME1_ACC] = mmMME1_ACC_STM_BASE, + [GAUDI_STM_MME1_SBAB] = mmMME1_SBAB_STM_BASE, + [GAUDI_STM_MME1_CTRL] = mmMME1_CTRL_STM_BASE, + [GAUDI_STM_MME2_ACC] = mmMME2_ACC_STM_BASE, + [GAUDI_STM_MME2_SBAB] = mmMME2_SBAB_STM_BASE, + [GAUDI_STM_MME2_CTRL] = mmMME2_CTRL_STM_BASE, + [GAUDI_STM_MME3_ACC] = mmMME3_ACC_STM_BASE, + [GAUDI_STM_MME3_SBAB] = mmMME3_SBAB_STM_BASE, + [GAUDI_STM_MME3_CTRL] = mmMME3_CTRL_STM_BASE, + [GAUDI_STM_DMA_IF_W_S] = mmDMA_IF_W_S_STM_BASE, + [GAUDI_STM_DMA_IF_E_S] = mmDMA_IF_E_S_STM_BASE, + [GAUDI_STM_DMA_IF_W_N] = mmDMA_IF_W_N_STM_BASE, + [GAUDI_STM_DMA_IF_E_N] = mmDMA_IF_E_N_STM_BASE, + [GAUDI_STM_CPU] = mmCPU_STM_BASE, + [GAUDI_STM_DMA_CH_0_CS] = mmDMA_CH_0_CS_STM_BASE, + [GAUDI_STM_DMA_CH_1_CS] = mmDMA_CH_1_CS_STM_BASE, + [GAUDI_STM_DMA_CH_2_CS] = mmDMA_CH_2_CS_STM_BASE, + [GAUDI_STM_DMA_CH_3_CS] = mmDMA_CH_3_CS_STM_BASE, + [GAUDI_STM_DMA_CH_4_CS] = mmDMA_CH_4_CS_STM_BASE, + [GAUDI_STM_DMA_CH_5_CS] = mmDMA_CH_5_CS_STM_BASE, + [GAUDI_STM_DMA_CH_6_CS] = mmDMA_CH_6_CS_STM_BASE, + [GAUDI_STM_DMA_CH_7_CS] = mmDMA_CH_7_CS_STM_BASE, + [GAUDI_STM_PCIE] = mmPCIE_STM_BASE, + [GAUDI_STM_MMU_CS] = mmMMU_CS_STM_BASE, + [GAUDI_STM_PSOC] = mmPSOC_STM_BASE, + [GAUDI_STM_NIC0_0] = mmSTM_0_NIC0_DBG_BASE, + [GAUDI_STM_NIC0_1] = mmSTM_1_NIC0_DBG_BASE, + [GAUDI_STM_NIC1_0] = mmSTM_0_NIC1_DBG_BASE, + [GAUDI_STM_NIC1_1] = mmSTM_1_NIC1_DBG_BASE, + [GAUDI_STM_NIC2_0] = mmSTM_0_NIC2_DBG_BASE, + [GAUDI_STM_NIC2_1] = mmSTM_1_NIC2_DBG_BASE, + [GAUDI_STM_NIC3_0] = mmSTM_0_NIC3_DBG_BASE, + [GAUDI_STM_NIC3_1] = mmSTM_1_NIC3_DBG_BASE, + [GAUDI_STM_NIC4_0] = mmSTM_0_NIC4_DBG_BASE, + [GAUDI_STM_NIC4_1] = mmSTM_1_NIC4_DBG_BASE, + [GAUDI_STM_TPC0_EML] = mmTPC0_EML_STM_BASE, + [GAUDI_STM_TPC1_EML] = mmTPC1_EML_STM_BASE, + [GAUDI_STM_TPC2_EML] = mmTPC2_EML_STM_BASE, + [GAUDI_STM_TPC3_EML] = mmTPC3_EML_STM_BASE, + [GAUDI_STM_TPC4_EML] = mmTPC4_EML_STM_BASE, + [GAUDI_STM_TPC5_EML] = mmTPC5_EML_STM_BASE, + [GAUDI_STM_TPC6_EML] = mmTPC6_EML_STM_BASE, + [GAUDI_STM_TPC7_EML] = mmTPC7_EML_STM_BASE +}; + +static u64 debug_etf_regs[GAUDI_ETF_LAST + 1] = { + [GAUDI_ETF_MME0_ACC] = mmMME0_ACC_ETF_BASE, + [GAUDI_ETF_MME0_SBAB] = mmMME0_SBAB_ETF_BASE, + [GAUDI_ETF_MME0_CTRL] = mmMME0_CTRL_ETF_BASE, + [GAUDI_ETF_MME1_ACC] = mmMME1_ACC_ETF_BASE, + [GAUDI_ETF_MME1_SBAB] = mmMME1_SBAB_ETF_BASE, + [GAUDI_ETF_MME1_CTRL] = mmMME1_CTRL_ETF_BASE, + [GAUDI_ETF_MME2_ACC] = mmMME2_MME2_ACC_ETF_BASE, + [GAUDI_ETF_MME2_SBAB] = mmMME2_SBAB_ETF_BASE, + [GAUDI_ETF_MME2_CTRL] = mmMME2_CTRL_ETF_BASE, + [GAUDI_ETF_MME3_ACC] = mmMME3_ACC_ETF_BASE, + [GAUDI_ETF_MME3_SBAB] = mmMME3_SBAB_ETF_BASE, + [GAUDI_ETF_MME3_CTRL] = mmMME3_CTRL_ETF_BASE, + [GAUDI_ETF_DMA_IF_W_S] = mmDMA_IF_W_S_ETF_BASE, + [GAUDI_ETF_DMA_IF_E_S] = mmDMA_IF_E_S_ETF_BASE, + [GAUDI_ETF_DMA_IF_W_N] = mmDMA_IF_W_N_ETF_BASE, + [GAUDI_ETF_DMA_IF_E_N] = mmDMA_IF_E_N_ETF_BASE, + [GAUDI_ETF_CPU_0] = mmCPU_ETF_0_BASE, + [GAUDI_ETF_CPU_1] = mmCPU_ETF_1_BASE, + [GAUDI_ETF_CPU_TRACE] = mmCPU_ETF_TRACE_BASE, + [GAUDI_ETF_DMA_CH_0_CS] = mmDMA_CH_0_CS_ETF_BASE, + [GAUDI_ETF_DMA_CH_1_CS] = mmDMA_CH_1_CS_ETF_BASE, + [GAUDI_ETF_DMA_CH_2_CS] = mmDMA_CH_2_CS_ETF_BASE, + [GAUDI_ETF_DMA_CH_3_CS] = mmDMA_CH_3_CS_ETF_BASE, + [GAUDI_ETF_DMA_CH_4_CS] = mmDMA_CH_4_CS_ETF_BASE, + [GAUDI_ETF_DMA_CH_5_CS] = mmDMA_CH_5_CS_ETF_BASE, + [GAUDI_ETF_DMA_CH_6_CS] = mmDMA_CH_6_CS_ETF_BASE, + [GAUDI_ETF_DMA_CH_7_CS] = mmDMA_CH_7_CS_ETF_BASE, + [GAUDI_ETF_PCIE] = mmPCIE_ETF_BASE, + [GAUDI_ETF_MMU_CS] = mmMMU_CS_ETF_BASE, + [GAUDI_ETF_PSOC] = mmPSOC_ETF_BASE, + [GAUDI_ETF_NIC0_0] = mmETF_0_NIC0_DBG_BASE, + [GAUDI_ETF_NIC0_1] = mmETF_1_NIC0_DBG_BASE, + [GAUDI_ETF_NIC1_0] = mmETF_0_NIC1_DBG_BASE, + [GAUDI_ETF_NIC1_1] = mmETF_1_NIC1_DBG_BASE, + [GAUDI_ETF_NIC2_0] = mmETF_0_NIC2_DBG_BASE, + [GAUDI_ETF_NIC2_1] = mmETF_1_NIC2_DBG_BASE, + [GAUDI_ETF_NIC3_0] = mmETF_0_NIC3_DBG_BASE, + [GAUDI_ETF_NIC3_1] = mmETF_1_NIC3_DBG_BASE, + [GAUDI_ETF_NIC4_0] = mmETF_0_NIC4_DBG_BASE, + [GAUDI_ETF_NIC4_1] = mmETF_1_NIC4_DBG_BASE, + [GAUDI_ETF_TPC0_EML] = mmTPC0_EML_ETF_BASE, + [GAUDI_ETF_TPC1_EML] = mmTPC1_EML_ETF_BASE, + [GAUDI_ETF_TPC2_EML] = mmTPC2_EML_ETF_BASE, + [GAUDI_ETF_TPC3_EML] = mmTPC3_EML_ETF_BASE, + [GAUDI_ETF_TPC4_EML] = mmTPC4_EML_ETF_BASE, + [GAUDI_ETF_TPC5_EML] = mmTPC5_EML_ETF_BASE, + [GAUDI_ETF_TPC6_EML] = mmTPC6_EML_ETF_BASE, + [GAUDI_ETF_TPC7_EML] = mmTPC7_EML_ETF_BASE +}; + +static u64 debug_funnel_regs[GAUDI_FUNNEL_LAST + 1] = { + [GAUDI_FUNNEL_MME0_ACC] = mmMME0_ACC_FUNNEL_BASE, + [GAUDI_FUNNEL_MME1_ACC] = mmMME1_ACC_FUNNEL_BASE, + [GAUDI_FUNNEL_MME2_ACC] = mmMME2_ACC_FUNNEL_BASE, + [GAUDI_FUNNEL_MME3_ACC] = mmMME3_ACC_FUNNEL_BASE, + [GAUDI_FUNNEL_SRAM_Y0_X0] = mmSRAM_Y0_X0_FUNNEL_BASE, + [GAUDI_FUNNEL_SRAM_Y0_X1] = mmSRAM_Y0_X1_FUNNEL_BASE, + [GAUDI_FUNNEL_SRAM_Y0_X2] = mmSRAM_Y0_X2_FUNNEL_BASE, + [GAUDI_FUNNEL_SRAM_Y0_X3] = mmSRAM_Y0_X3_FUNNEL_BASE, + [GAUDI_FUNNEL_SRAM_Y0_X4] = mmSRAM_Y0_X4_FUNNEL_BASE, + [GAUDI_FUNNEL_SRAM_Y0_X5] = mmSRAM_Y0_X5_FUNNEL_BASE, + [GAUDI_FUNNEL_SRAM_Y0_X6] = mmSRAM_Y0_X6_FUNNEL_BASE, + [GAUDI_FUNNEL_SRAM_Y0_X7] = mmSRAM_Y0_X7_FUNNEL_BASE, + [GAUDI_FUNNEL_SRAM_Y1_X0] = mmSRAM_Y1_X0_FUNNEL_BASE, + [GAUDI_FUNNEL_SRAM_Y1_X1] = mmSRAM_Y1_X1_FUNNEL_BASE, + [GAUDI_FUNNEL_SRAM_Y1_X2] = mmSRAM_Y1_X2_FUNNEL_BASE, + [GAUDI_FUNNEL_SRAM_Y1_X3] = mmSRAM_Y1_X3_FUNNEL_BASE, + [GAUDI_FUNNEL_SRAM_Y1_X4] = mmSRAM_Y1_X4_FUNNEL_BASE, + [GAUDI_FUNNEL_SRAM_Y1_X5] = mmSRAM_Y1_X5_FUNNEL_BASE, + [GAUDI_FUNNEL_SRAM_Y1_X6] = mmSRAM_Y1_X6_FUNNEL_BASE, + [GAUDI_FUNNEL_SRAM_Y1_X7] = mmSRAM_Y1_X7_FUNNEL_BASE, + [GAUDI_FUNNEL_SRAM_Y2_X0] = mmSRAM_Y2_X0_FUNNEL_BASE, + [GAUDI_FUNNEL_SRAM_Y2_X1] = mmSRAM_Y2_X1_FUNNEL_BASE, + [GAUDI_FUNNEL_SRAM_Y2_X2] = mmSRAM_Y2_X2_FUNNEL_BASE, + [GAUDI_FUNNEL_SRAM_Y2_X3] = mmSRAM_Y2_X3_FUNNEL_BASE, + [GAUDI_FUNNEL_SRAM_Y2_X4] = mmSRAM_Y2_X4_FUNNEL_BASE, + [GAUDI_FUNNEL_SRAM_Y2_X5] = mmSRAM_Y2_X5_FUNNEL_BASE, + [GAUDI_FUNNEL_SRAM_Y2_X6] = mmSRAM_Y2_X6_FUNNEL_BASE, + [GAUDI_FUNNEL_SRAM_Y2_X7] = mmSRAM_Y2_X7_FUNNEL_BASE, + [GAUDI_FUNNEL_SRAM_Y3_X0] = mmSRAM_Y3_X0_FUNNEL_BASE, + [GAUDI_FUNNEL_SRAM_Y3_X1] = mmSRAM_Y3_X1_FUNNEL_BASE, + [GAUDI_FUNNEL_SRAM_Y3_X2] = mmSRAM_Y3_X2_FUNNEL_BASE, + [GAUDI_FUNNEL_SRAM_Y3_X4] = mmSRAM_Y3_X4_FUNNEL_BASE, + [GAUDI_FUNNEL_SRAM_Y3_X3] = mmSRAM_Y3_X3_FUNNEL_BASE, + [GAUDI_FUNNEL_SRAM_Y3_X5] = mmSRAM_Y3_X5_FUNNEL_BASE, + [GAUDI_FUNNEL_SRAM_Y3_X6] = mmSRAM_Y3_X6_FUNNEL_BASE, + [GAUDI_FUNNEL_SRAM_Y3_X7] = mmSRAM_Y3_X7_FUNNEL_BASE, + [GAUDI_FUNNEL_SIF_0] = mmSIF_FUNNEL_0_BASE, + [GAUDI_FUNNEL_SIF_1] = mmSIF_FUNNEL_1_BASE, + [GAUDI_FUNNEL_SIF_2] = mmSIF_FUNNEL_2_BASE, + [GAUDI_FUNNEL_SIF_3] = mmSIF_FUNNEL_3_BASE, + [GAUDI_FUNNEL_SIF_4] = mmSIF_FUNNEL_4_BASE, + [GAUDI_FUNNEL_SIF_5] = mmSIF_FUNNEL_5_BASE, + [GAUDI_FUNNEL_SIF_6] = mmSIF_FUNNEL_6_BASE, + [GAUDI_FUNNEL_SIF_7] = mmSIF_FUNNEL_7_BASE, + [GAUDI_FUNNEL_NIF_0] = mmNIF_FUNNEL_0_BASE, + [GAUDI_FUNNEL_NIF_1] = mmNIF_FUNNEL_1_BASE, + [GAUDI_FUNNEL_NIF_2] = mmNIF_FUNNEL_2_BASE, + [GAUDI_FUNNEL_NIF_3] = mmNIF_FUNNEL_3_BASE, + [GAUDI_FUNNEL_NIF_4] = mmNIF_FUNNEL_4_BASE, + [GAUDI_FUNNEL_NIF_5] = mmNIF_FUNNEL_5_BASE, + [GAUDI_FUNNEL_NIF_6] = mmNIF_FUNNEL_6_BASE, + [GAUDI_FUNNEL_NIF_7] = mmNIF_FUNNEL_7_BASE, + [GAUDI_FUNNEL_DMA_IF_W_S] = mmDMA_IF_W_S_FUNNEL_BASE, + [GAUDI_FUNNEL_DMA_IF_E_S] = mmDMA_IF_E_S_FUNNEL_BASE, + [GAUDI_FUNNEL_DMA_IF_W_N] = mmDMA_IF_W_N_FUNNEL_BASE, + [GAUDI_FUNNEL_DMA_IF_E_N] = mmDMA_IF_E_N_FUNNEL_BASE, + [GAUDI_FUNNEL_CPU] = mmCPU_FUNNEL_BASE, + [GAUDI_FUNNEL_NIC_TPC_W_S] = mmNIC_TPC_FUNNEL_W_S_BASE, + [GAUDI_FUNNEL_NIC_TPC_E_S] = mmNIC_TPC_FUNNEL_E_S_BASE, + [GAUDI_FUNNEL_NIC_TPC_W_N] = mmNIC_TPC_FUNNEL_W_N_BASE, + [GAUDI_FUNNEL_NIC_TPC_E_N] = mmNIC_TPC_FUNNEL_E_N_BASE, + [GAUDI_FUNNEL_PCIE] = mmPCIE_FUNNEL_BASE, + [GAUDI_FUNNEL_PSOC] = mmPSOC_FUNNEL_BASE, + [GAUDI_FUNNEL_NIC0] = mmFUNNEL_NIC0_DBG_BASE, + [GAUDI_FUNNEL_NIC1] = mmFUNNEL_NIC1_DBG_BASE, + [GAUDI_FUNNEL_NIC2] = mmFUNNEL_NIC2_DBG_BASE, + [GAUDI_FUNNEL_NIC3] = mmFUNNEL_NIC3_DBG_BASE, + [GAUDI_FUNNEL_NIC4] = mmFUNNEL_NIC4_DBG_BASE, + [GAUDI_FUNNEL_TPC0_EML] = mmTPC0_EML_FUNNEL_BASE, + [GAUDI_FUNNEL_TPC1_EML] = mmTPC1_EML_FUNNEL_BASE, + [GAUDI_FUNNEL_TPC2_EML] = mmTPC2_EML_FUNNEL_BASE, + [GAUDI_FUNNEL_TPC3_EML] = mmTPC3_EML_FUNNEL_BASE, + [GAUDI_FUNNEL_TPC4_EML] = mmTPC4_EML_FUNNEL_BASE, + [GAUDI_FUNNEL_TPC5_EML] = mmTPC5_EML_FUNNEL_BASE, + [GAUDI_FUNNEL_TPC6_EML] = mmTPC6_EML_FUNNEL_BASE, + [GAUDI_FUNNEL_TPC7_EML] = mmTPC7_EML_FUNNEL_BASE +}; + +static u64 debug_bmon_regs[GAUDI_BMON_LAST + 1] = { + [GAUDI_BMON_MME0_ACC_0] = mmMME0_ACC_BMON0_BASE, + [GAUDI_BMON_MME0_SBAB_0] = mmMME0_SBAB_BMON0_BASE, + [GAUDI_BMON_MME0_SBAB_1] = mmMME0_SBAB_BMON1_BASE, + [GAUDI_BMON_MME0_CTRL_0] = mmMME0_CTRL_BMON0_BASE, + [GAUDI_BMON_MME0_CTRL_1] = mmMME0_CTRL_BMON1_BASE, + [GAUDI_BMON_MME1_ACC_0] = mmMME1_ACC_BMON0_BASE, + [GAUDI_BMON_MME1_SBAB_0] = mmMME1_SBAB_BMON0_BASE, + [GAUDI_BMON_MME1_SBAB_1] = mmMME1_SBAB_BMON1_BASE, + [GAUDI_BMON_MME1_CTRL_0] = mmMME1_CTRL_BMON0_BASE, + [GAUDI_BMON_MME1_CTRL_1] = mmMME1_CTRL_BMON1_BASE, + [GAUDI_BMON_MME2_ACC_0] = mmMME2_ACC_BMON0_BASE, + [GAUDI_BMON_MME2_SBAB_0] = mmMME2_SBAB_BMON0_BASE, + [GAUDI_BMON_MME2_SBAB_1] = mmMME2_SBAB_BMON1_BASE, + [GAUDI_BMON_MME2_CTRL_0] = mmMME2_CTRL_BMON0_BASE, + [GAUDI_BMON_MME2_CTRL_1] = mmMME2_CTRL_BMON1_BASE, + [GAUDI_BMON_MME3_ACC_0] = mmMME3_ACC_BMON0_BASE, + [GAUDI_BMON_MME3_SBAB_0] = mmMME3_SBAB_BMON0_BASE, + [GAUDI_BMON_MME3_SBAB_1] = mmMME3_SBAB_BMON1_BASE, + [GAUDI_BMON_MME3_CTRL_0] = mmMME3_CTRL_BMON0_BASE, + [GAUDI_BMON_MME3_CTRL_1] = mmMME3_CTRL_BMON1_BASE, + [GAUDI_BMON_DMA_IF_W_S_SOB_WR] = mmDMA_IF_W_S_SOB_WR_BMON_BASE, + [GAUDI_BMON_DMA_IF_W_S_0_WR] = mmDMA_IF_W_S_HBM0_WR_BMON_BASE, + [GAUDI_BMON_DMA_IF_W_S_0_RD] = mmDMA_IF_W_S_HBM0_RD_BMON_BASE, + [GAUDI_BMON_DMA_IF_W_S_1_WR] = mmDMA_IF_W_S_HBM1_WR_BMON_BASE, + [GAUDI_BMON_DMA_IF_W_S_1_RD] = mmDMA_IF_W_S_HBM1_RD_BMON_BASE, + [GAUDI_BMON_DMA_IF_E_S_SOB_WR] = mmDMA_IF_E_S_SOB_WR_BMON_BASE, + [GAUDI_BMON_DMA_IF_E_S_0_WR] = mmDMA_IF_E_S_HBM0_WR_BMON_BASE, + [GAUDI_BMON_DMA_IF_E_S_0_RD] = mmDMA_IF_E_S_HBM0_RD_BMON_BASE, + [GAUDI_BMON_DMA_IF_E_S_1_WR] = mmDMA_IF_E_S_HBM1_WR_BMON_BASE, + [GAUDI_BMON_DMA_IF_E_S_1_RD] = mmDMA_IF_E_S_HBM1_RD_BMON_BASE, + [GAUDI_BMON_DMA_IF_W_N_SOB_WR] = mmDMA_IF_W_N_SOB_WR_BMON_BASE, + [GAUDI_BMON_DMA_IF_W_N_HBM0_WR] = mmDMA_IF_W_N_HBM0_WR_BMON_BASE, + [GAUDI_BMON_DMA_IF_W_N_HBM0_RD] = mmDMA_IF_W_N_HBM0_RD_BMON_BASE, + [GAUDI_BMON_DMA_IF_W_N_HBM1_WR] = mmDMA_IF_W_N_HBM1_WR_BMON_BASE, + [GAUDI_BMON_DMA_IF_W_N_HBM1_RD] = mmDMA_IF_W_N_HBM1_RD_BMON_BASE, + [GAUDI_BMON_DMA_IF_E_N_SOB_WR] = mmDMA_IF_E_N_SOB_WR_BMON_BASE, + [GAUDI_BMON_DMA_IF_E_N_HBM0_WR] = mmDMA_IF_E_N_HBM0_WR_BMON_BASE, + [GAUDI_BMON_DMA_IF_E_N_HBM0_RD] = mmDMA_IF_E_N_HBM0_RD_BMON_BASE, + [GAUDI_BMON_DMA_IF_E_N_HBM1_WR] = mmDMA_IF_E_N_HBM1_WR_BMON_BASE, + [GAUDI_BMON_DMA_IF_E_N_HBM1_RD] = mmDMA_IF_E_N_HBM1_RD_BMON_BASE, + [GAUDI_BMON_CPU_WR] = mmCPU_WR_BMON_BASE, + [GAUDI_BMON_CPU_RD] = mmCPU_RD_BMON_BASE, + [GAUDI_BMON_DMA_CH_0_0] = mmDMA_CH_0_BMON_0_BASE, + [GAUDI_BMON_DMA_CH_0_1] = mmDMA_CH_0_BMON_1_BASE, + [GAUDI_BMON_DMA_CH_1_0] = mmDMA_CH_1_BMON_0_BASE, + [GAUDI_BMON_DMA_CH_1_1] = mmDMA_CH_1_BMON_1_BASE, + [GAUDI_BMON_DMA_CH_2_0] = mmDMA_CH_2_BMON_0_BASE, + [GAUDI_BMON_DMA_CH_2_1] = mmDMA_CH_2_BMON_1_BASE, + [GAUDI_BMON_DMA_CH_3_0] = mmDMA_CH_3_BMON_0_BASE, + [GAUDI_BMON_DMA_CH_3_1] = mmDMA_CH_3_BMON_1_BASE, + [GAUDI_BMON_DMA_CH_4_0] = mmDMA_CH_4_BMON_0_BASE, + [GAUDI_BMON_DMA_CH_4_1] = mmDMA_CH_4_BMON_1_BASE, + [GAUDI_BMON_DMA_CH_5_0] = mmDMA_CH_5_BMON_0_BASE, + [GAUDI_BMON_DMA_CH_5_1] = mmDMA_CH_5_BMON_1_BASE, + [GAUDI_BMON_DMA_CH_6_0] = mmDMA_CH_6_BMON_0_BASE, + [GAUDI_BMON_DMA_CH_6_1] = mmDMA_CH_6_BMON_1_BASE, + [GAUDI_BMON_DMA_CH_7_0] = mmDMA_CH_7_BMON_0_BASE, + [GAUDI_BMON_DMA_CH_7_1] = mmDMA_CH_7_BMON_1_BASE, + [GAUDI_BMON_PCIE_MSTR_WR] = mmPCIE_BMON_MSTR_WR_BASE, + [GAUDI_BMON_PCIE_MSTR_RD] = mmPCIE_BMON_MSTR_RD_BASE, + [GAUDI_BMON_PCIE_SLV_WR] = mmPCIE_BMON_SLV_WR_BASE, + [GAUDI_BMON_PCIE_SLV_RD] = mmPCIE_BMON_SLV_RD_BASE, + [GAUDI_BMON_MMU_0] = mmMMU_BMON_0_BASE, + [GAUDI_BMON_MMU_1] = mmMMU_BMON_1_BASE, + [GAUDI_BMON_NIC0_0] = mmBMON0_NIC0_DBG_BASE, + [GAUDI_BMON_NIC0_1] = mmBMON1_NIC0_DBG_BASE, + [GAUDI_BMON_NIC0_2] = mmBMON2_NIC0_DBG_BASE, + [GAUDI_BMON_NIC0_3] = mmBMON3_NIC0_DBG_BASE, + [GAUDI_BMON_NIC0_4] = mmBMON4_NIC0_DBG_BASE, + [GAUDI_BMON_NIC1_0] = mmBMON0_NIC1_DBG_BASE, + [GAUDI_BMON_NIC1_1] = mmBMON1_NIC1_DBG_BASE, + [GAUDI_BMON_NIC1_2] = mmBMON2_NIC1_DBG_BASE, + [GAUDI_BMON_NIC1_3] = mmBMON3_NIC1_DBG_BASE, + [GAUDI_BMON_NIC1_4] = mmBMON4_NIC1_DBG_BASE, + [GAUDI_BMON_NIC2_0] = mmBMON0_NIC2_DBG_BASE, + [GAUDI_BMON_NIC2_1] = mmBMON1_NIC2_DBG_BASE, + [GAUDI_BMON_NIC2_2] = mmBMON2_NIC2_DBG_BASE, + [GAUDI_BMON_NIC2_3] = mmBMON3_NIC2_DBG_BASE, + [GAUDI_BMON_NIC2_4] = mmBMON4_NIC2_DBG_BASE, + [GAUDI_BMON_NIC3_0] = mmBMON0_NIC3_DBG_BASE, + [GAUDI_BMON_NIC3_1] = mmBMON1_NIC3_DBG_BASE, + [GAUDI_BMON_NIC3_2] = mmBMON2_NIC3_DBG_BASE, + [GAUDI_BMON_NIC3_3] = mmBMON3_NIC3_DBG_BASE, + [GAUDI_BMON_NIC3_4] = mmBMON4_NIC3_DBG_BASE, + [GAUDI_BMON_NIC4_0] = mmBMON0_NIC4_DBG_BASE, + [GAUDI_BMON_NIC4_1] = mmBMON1_NIC4_DBG_BASE, + [GAUDI_BMON_NIC4_2] = mmBMON2_NIC4_DBG_BASE, + [GAUDI_BMON_NIC4_3] = mmBMON3_NIC4_DBG_BASE, + [GAUDI_BMON_NIC4_4] = mmBMON4_NIC4_DBG_BASE, + [GAUDI_BMON_TPC0_EML_0] = mmTPC0_EML_BUSMON_0_BASE, + [GAUDI_BMON_TPC0_EML_1] = mmTPC0_EML_BUSMON_1_BASE, + [GAUDI_BMON_TPC0_EML_2] = mmTPC0_EML_BUSMON_2_BASE, + [GAUDI_BMON_TPC0_EML_3] = mmTPC0_EML_BUSMON_3_BASE, + [GAUDI_BMON_TPC1_EML_0] = mmTPC1_EML_BUSMON_0_BASE, + [GAUDI_BMON_TPC1_EML_1] = mmTPC1_EML_BUSMON_1_BASE, + [GAUDI_BMON_TPC1_EML_2] = mmTPC1_EML_BUSMON_2_BASE, + [GAUDI_BMON_TPC1_EML_3] = mmTPC1_EML_BUSMON_3_BASE, + [GAUDI_BMON_TPC2_EML_0] = mmTPC2_EML_BUSMON_0_BASE, + [GAUDI_BMON_TPC2_EML_1] = mmTPC2_EML_BUSMON_1_BASE, + [GAUDI_BMON_TPC2_EML_2] = mmTPC2_EML_BUSMON_2_BASE, + [GAUDI_BMON_TPC2_EML_3] = mmTPC2_EML_BUSMON_3_BASE, + [GAUDI_BMON_TPC3_EML_0] = mmTPC3_EML_BUSMON_0_BASE, + [GAUDI_BMON_TPC3_EML_1] = mmTPC3_EML_BUSMON_1_BASE, + [GAUDI_BMON_TPC3_EML_2] = mmTPC3_EML_BUSMON_2_BASE, + [GAUDI_BMON_TPC3_EML_3] = mmTPC3_EML_BUSMON_3_BASE, + [GAUDI_BMON_TPC4_EML_0] = mmTPC4_EML_BUSMON_0_BASE, + [GAUDI_BMON_TPC4_EML_1] = mmTPC4_EML_BUSMON_1_BASE, + [GAUDI_BMON_TPC4_EML_2] = mmTPC4_EML_BUSMON_2_BASE, + [GAUDI_BMON_TPC4_EML_3] = mmTPC4_EML_BUSMON_3_BASE, + [GAUDI_BMON_TPC5_EML_0] = mmTPC5_EML_BUSMON_0_BASE, + [GAUDI_BMON_TPC5_EML_1] = mmTPC5_EML_BUSMON_1_BASE, + [GAUDI_BMON_TPC5_EML_2] = mmTPC5_EML_BUSMON_2_BASE, + [GAUDI_BMON_TPC5_EML_3] = mmTPC5_EML_BUSMON_3_BASE, + [GAUDI_BMON_TPC6_EML_0] = mmTPC6_EML_BUSMON_0_BASE, + [GAUDI_BMON_TPC6_EML_1] = mmTPC6_EML_BUSMON_1_BASE, + [GAUDI_BMON_TPC6_EML_2] = mmTPC6_EML_BUSMON_2_BASE, + [GAUDI_BMON_TPC6_EML_3] = mmTPC6_EML_BUSMON_3_BASE, + [GAUDI_BMON_TPC7_EML_0] = mmTPC7_EML_BUSMON_0_BASE, + [GAUDI_BMON_TPC7_EML_1] = mmTPC7_EML_BUSMON_1_BASE, + [GAUDI_BMON_TPC7_EML_2] = mmTPC7_EML_BUSMON_2_BASE, + [GAUDI_BMON_TPC7_EML_3] = mmTPC7_EML_BUSMON_3_BASE +}; + +static u64 debug_spmu_regs[GAUDI_SPMU_LAST + 1] = { + [GAUDI_SPMU_MME0_ACC] = mmMME0_ACC_SPMU_BASE, + [GAUDI_SPMU_MME0_SBAB] = mmMME0_SBAB_SPMU_BASE, + [GAUDI_SPMU_MME0_CTRL] = mmMME0_CTRL_SPMU_BASE, + [GAUDI_SPMU_MME1_ACC] = mmMME1_ACC_SPMU_BASE, + [GAUDI_SPMU_MME1_SBAB] = mmMME1_SBAB_SPMU_BASE, + [GAUDI_SPMU_MME1_CTRL] = mmMME1_CTRL_SPMU_BASE, + [GAUDI_SPMU_MME2_MME2_ACC] = mmMME2_ACC_SPMU_BASE, + [GAUDI_SPMU_MME2_SBAB] = mmMME2_SBAB_SPMU_BASE, + [GAUDI_SPMU_MME2_CTRL] = mmMME2_CTRL_SPMU_BASE, + [GAUDI_SPMU_MME3_ACC] = mmMME3_ACC_SPMU_BASE, + [GAUDI_SPMU_MME3_SBAB] = mmMME3_SBAB_SPMU_BASE, + [GAUDI_SPMU_MME3_CTRL] = mmMME3_CTRL_SPMU_BASE, + [GAUDI_SPMU_DMA_CH_0_CS] = mmDMA_CH_0_CS_SPMU_BASE, + [GAUDI_SPMU_DMA_CH_1_CS] = mmDMA_CH_1_CS_SPMU_BASE, + [GAUDI_SPMU_DMA_CH_2_CS] = mmDMA_CH_2_CS_SPMU_BASE, + [GAUDI_SPMU_DMA_CH_3_CS] = mmDMA_CH_3_CS_SPMU_BASE, + [GAUDI_SPMU_DMA_CH_4_CS] = mmDMA_CH_4_CS_SPMU_BASE, + [GAUDI_SPMU_DMA_CH_5_CS] = mmDMA_CH_5_CS_SPMU_BASE, + [GAUDI_SPMU_DMA_CH_6_CS] = mmDMA_CH_6_CS_SPMU_BASE, + [GAUDI_SPMU_DMA_CH_7_CS] = mmDMA_CH_7_CS_SPMU_BASE, + [GAUDI_SPMU_PCIE] = mmPCIE_SPMU_BASE, + [GAUDI_SPMU_MMU_CS] = mmMMU_CS_SPMU_BASE, + [GAUDI_SPMU_NIC0_0] = mmSPMU_0_NIC0_DBG_BASE, + [GAUDI_SPMU_NIC0_1] = mmSPMU_1_NIC0_DBG_BASE, + [GAUDI_SPMU_NIC1_0] = mmSPMU_0_NIC1_DBG_BASE, + [GAUDI_SPMU_NIC1_1] = mmSPMU_1_NIC1_DBG_BASE, + [GAUDI_SPMU_NIC2_0] = mmSPMU_0_NIC2_DBG_BASE, + [GAUDI_SPMU_NIC2_1] = mmSPMU_1_NIC2_DBG_BASE, + [GAUDI_SPMU_NIC3_0] = mmSPMU_0_NIC3_DBG_BASE, + [GAUDI_SPMU_NIC3_1] = mmSPMU_1_NIC3_DBG_BASE, + [GAUDI_SPMU_NIC4_0] = mmSPMU_0_NIC4_DBG_BASE, + [GAUDI_SPMU_NIC4_1] = mmSPMU_1_NIC4_DBG_BASE, + [GAUDI_SPMU_TPC0_EML] = mmTPC0_EML_SPMU_BASE, + [GAUDI_SPMU_TPC1_EML] = mmTPC1_EML_SPMU_BASE, + [GAUDI_SPMU_TPC2_EML] = mmTPC2_EML_SPMU_BASE, + [GAUDI_SPMU_TPC3_EML] = mmTPC3_EML_SPMU_BASE, + [GAUDI_SPMU_TPC4_EML] = mmTPC4_EML_SPMU_BASE, + [GAUDI_SPMU_TPC5_EML] = mmTPC5_EML_SPMU_BASE, + [GAUDI_SPMU_TPC6_EML] = mmTPC6_EML_SPMU_BASE, + [GAUDI_SPMU_TPC7_EML] = mmTPC7_EML_SPMU_BASE +}; + +static int gaudi_coresight_timeout(struct hl_device *hdev, u64 addr, + int position, bool up) +{ + int rc; + u32 val; + + rc = hl_poll_timeout( + hdev, + addr, + val, + up ? val & BIT(position) : !(val & BIT(position)), + 1000, + CORESIGHT_TIMEOUT_USEC); + + if (rc) { + dev_err(hdev->dev, + "Timeout while waiting for coresight, addr: 0x%llx, position: %d, up: %d\n", + addr, position, up); + return -EFAULT; + } + + return 0; +} + +static int gaudi_config_stm(struct hl_device *hdev, + struct hl_debug_params *params) +{ + struct hl_debug_params_stm *input; + u64 base_reg; + u32 frequency; + int rc; + + if (params->reg_idx >= ARRAY_SIZE(debug_stm_regs)) { + dev_err(hdev->dev, "Invalid register index in STM\n"); + return -EINVAL; + } + + base_reg = debug_stm_regs[params->reg_idx] - CFG_BASE; + + WREG32(base_reg + 0xFB0, CORESIGHT_UNLOCK); + + if (params->enable) { + input = params->input; + + if (!input) + return -EINVAL; + + WREG32(base_reg + 0xE80, 0x80004); + WREG32(base_reg + 0xD64, 7); + WREG32(base_reg + 0xD60, 0); + WREG32(base_reg + 0xD00, lower_32_bits(input->he_mask)); + WREG32(base_reg + 0xD60, 1); + WREG32(base_reg + 0xD00, upper_32_bits(input->he_mask)); + WREG32(base_reg + 0xE70, 0x10); + WREG32(base_reg + 0xE60, 0); + WREG32(base_reg + 0xE00, lower_32_bits(input->sp_mask)); + WREG32(base_reg + 0xEF4, input->id); + WREG32(base_reg + 0xDF4, 0x80); + frequency = hdev->asic_prop.psoc_timestamp_frequency; + if (frequency == 0) + frequency = input->frequency; + WREG32(base_reg + 0xE8C, frequency); + WREG32(base_reg + 0xE90, 0x7FF); + + /* SW-2176 - SW WA for HW bug */ + if ((CFG_BASE + base_reg) >= mmDMA_CH_0_CS_STM_BASE && + (CFG_BASE + base_reg) <= mmDMA_CH_7_CS_STM_BASE) { + + WREG32(base_reg + 0xE68, 0xffff8005); + WREG32(base_reg + 0xE6C, 0x0); + } + + WREG32(base_reg + 0xE80, 0x27 | (input->id << 16)); + } else { + WREG32(base_reg + 0xE80, 4); + WREG32(base_reg + 0xD64, 0); + WREG32(base_reg + 0xD60, 1); + WREG32(base_reg + 0xD00, 0); + WREG32(base_reg + 0xD20, 0); + WREG32(base_reg + 0xD60, 0); + WREG32(base_reg + 0xE20, 0); + WREG32(base_reg + 0xE00, 0); + WREG32(base_reg + 0xDF4, 0x80); + WREG32(base_reg + 0xE70, 0); + WREG32(base_reg + 0xE60, 0); + WREG32(base_reg + 0xE64, 0); + WREG32(base_reg + 0xE8C, 0); + + rc = gaudi_coresight_timeout(hdev, base_reg + 0xE80, 23, false); + if (rc) { + dev_err(hdev->dev, + "Failed to disable STM on timeout, error %d\n", + rc); + return rc; + } + + WREG32(base_reg + 0xE80, 4); + } + + return 0; +} + +static int gaudi_config_etf(struct hl_device *hdev, + struct hl_debug_params *params) +{ + struct hl_debug_params_etf *input; + u64 base_reg; + u32 val; + int rc; + + if (params->reg_idx >= ARRAY_SIZE(debug_etf_regs)) { + dev_err(hdev->dev, "Invalid register index in ETF\n"); + return -EINVAL; + } + + base_reg = debug_etf_regs[params->reg_idx] - CFG_BASE; + + WREG32(base_reg + 0xFB0, CORESIGHT_UNLOCK); + + val = RREG32(base_reg + 0x304); + val |= 0x1000; + WREG32(base_reg + 0x304, val); + val |= 0x40; + WREG32(base_reg + 0x304, val); + + rc = gaudi_coresight_timeout(hdev, base_reg + 0x304, 6, false); + if (rc) { + dev_err(hdev->dev, + "Failed to %s ETF on timeout, error %d\n", + params->enable ? "enable" : "disable", rc); + return rc; + } + + rc = gaudi_coresight_timeout(hdev, base_reg + 0xC, 2, true); + if (rc) { + dev_err(hdev->dev, + "Failed to %s ETF on timeout, error %d\n", + params->enable ? "enable" : "disable", rc); + return rc; + } + + WREG32(base_reg + 0x20, 0); + + if (params->enable) { + input = params->input; + + if (!input) + return -EINVAL; + + WREG32(base_reg + 0x34, 0x3FFC); + WREG32(base_reg + 0x28, input->sink_mode); + WREG32(base_reg + 0x304, 0x4001); + WREG32(base_reg + 0x308, 0xA); + WREG32(base_reg + 0x20, 1); + } else { + WREG32(base_reg + 0x34, 0); + WREG32(base_reg + 0x28, 0); + WREG32(base_reg + 0x304, 0); + } + + return 0; +} + +static bool gaudi_etr_validate_address(struct hl_device *hdev, u64 addr, + u64 size, bool *is_host) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct gaudi_device *gaudi = hdev->asic_specific; + + /* maximum address length is 50 bits */ + if (addr >> 50) { + dev_err(hdev->dev, + "ETR buffer address shouldn't exceed 50 bits\n"); + return false; + } + + if (addr > (addr + size)) { + dev_err(hdev->dev, + "ETR buffer size %llu overflow\n", size); + return false; + } + + /* PMMU and HPMMU addresses are equal, check only one of them */ + if ((gaudi->hw_cap_initialized & HW_CAP_MMU) && + hl_mem_area_inside_range(addr, size, + prop->pmmu.start_addr, + prop->pmmu.end_addr)) { + *is_host = true; + return true; + } + + if (hl_mem_area_inside_range(addr, size, + prop->dram_user_base_address, + prop->dram_end_address)) + return true; + + if (hl_mem_area_inside_range(addr, size, + prop->sram_user_base_address, + prop->sram_end_address)) + return true; + + if (!(gaudi->hw_cap_initialized & HW_CAP_MMU)) + dev_err(hdev->dev, "ETR buffer should be in SRAM/DRAM\n"); + + return false; +} + +static int gaudi_config_etr(struct hl_device *hdev, + struct hl_debug_params *params) +{ + struct hl_debug_params_etr *input; + u64 msb; + u32 val; + int rc; + + WREG32(mmPSOC_ETR_LAR, CORESIGHT_UNLOCK); + + val = RREG32(mmPSOC_ETR_FFCR); + val |= 0x1000; + WREG32(mmPSOC_ETR_FFCR, val); + val |= 0x40; + WREG32(mmPSOC_ETR_FFCR, val); + + rc = gaudi_coresight_timeout(hdev, mmPSOC_ETR_FFCR, 6, false); + if (rc) { + dev_err(hdev->dev, "Failed to %s ETR on timeout, error %d\n", + params->enable ? "enable" : "disable", rc); + return rc; + } + + rc = gaudi_coresight_timeout(hdev, mmPSOC_ETR_STS, 2, true); + if (rc) { + dev_err(hdev->dev, "Failed to %s ETR on timeout, error %d\n", + params->enable ? "enable" : "disable", rc); + return rc; + } + + WREG32(mmPSOC_ETR_CTL, 0); + + if (params->enable) { + bool is_host = false; + + input = params->input; + + if (!input) + return -EINVAL; + + if (input->buffer_size == 0) { + dev_err(hdev->dev, + "ETR buffer size should be bigger than 0\n"); + return -EINVAL; + } + + if (!gaudi_etr_validate_address(hdev, + input->buffer_address, input->buffer_size, + &is_host)) { + dev_err(hdev->dev, "ETR buffer address is invalid\n"); + return -EINVAL; + } + + gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_ARUSER, + hdev->compute_ctx->asid); + gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_AWUSER, + hdev->compute_ctx->asid); + + msb = upper_32_bits(input->buffer_address) >> 8; + msb &= PSOC_GLOBAL_CONF_TRACE_ADDR_MSB_MASK; + WREG32(mmPSOC_GLOBAL_CONF_TRACE_ADDR, msb); + + WREG32(mmPSOC_ETR_BUFWM, 0x3FFC); + WREG32(mmPSOC_ETR_RSZ, input->buffer_size); + WREG32(mmPSOC_ETR_MODE, input->sink_mode); + /* Workaround for H3 #HW-2075 bug: use small data chunks */ + WREG32(mmPSOC_ETR_AXICTL, (is_host ? 0 : 0x700) | + PSOC_ETR_AXICTL_PROTCTRLBIT1_SHIFT); + WREG32(mmPSOC_ETR_DBALO, + lower_32_bits(input->buffer_address)); + WREG32(mmPSOC_ETR_DBAHI, + upper_32_bits(input->buffer_address)); + WREG32(mmPSOC_ETR_FFCR, 3); + WREG32(mmPSOC_ETR_PSCR, 0xA); + WREG32(mmPSOC_ETR_CTL, 1); + } else { + WREG32(mmPSOC_ETR_BUFWM, 0); + WREG32(mmPSOC_ETR_RSZ, 0x400); + WREG32(mmPSOC_ETR_DBALO, 0); + WREG32(mmPSOC_ETR_DBAHI, 0); + WREG32(mmPSOC_ETR_PSCR, 0); + WREG32(mmPSOC_ETR_MODE, 0); + WREG32(mmPSOC_ETR_FFCR, 0); + + if (params->output_size >= sizeof(u64)) { + u32 rwp, rwphi; + + /* + * The trace buffer address is 50 bits wide. The end of + * the buffer is set in the RWP register (lower 32 + * bits), and in the RWPHI register (upper 8 bits). + * The 10 msb of the 50-bit address are stored in a + * global configuration register. + */ + rwp = RREG32(mmPSOC_ETR_RWP); + rwphi = RREG32(mmPSOC_ETR_RWPHI) & 0xff; + msb = RREG32(mmPSOC_GLOBAL_CONF_TRACE_ADDR) & + PSOC_GLOBAL_CONF_TRACE_ADDR_MSB_MASK; + *(u64 *) params->output = ((u64) msb << 40) | + ((u64) rwphi << 32) | rwp; + } + } + + return 0; +} + +static int gaudi_config_funnel(struct hl_device *hdev, + struct hl_debug_params *params) +{ + u64 base_reg; + + if (params->reg_idx >= ARRAY_SIZE(debug_funnel_regs)) { + dev_err(hdev->dev, "Invalid register index in FUNNEL\n"); + return -EINVAL; + } + + base_reg = debug_funnel_regs[params->reg_idx] - CFG_BASE; + + WREG32(base_reg + 0xFB0, CORESIGHT_UNLOCK); + + WREG32(base_reg, params->enable ? 0x33F : 0); + + return 0; +} + +static int gaudi_config_bmon(struct hl_device *hdev, + struct hl_debug_params *params) +{ + struct hl_debug_params_bmon *input; + u64 base_reg; + + if (params->reg_idx >= ARRAY_SIZE(debug_bmon_regs)) { + dev_err(hdev->dev, "Invalid register index in BMON\n"); + return -EINVAL; + } + + base_reg = debug_bmon_regs[params->reg_idx] - CFG_BASE; + + WREG32(base_reg + 0x104, 1); + + if (params->enable) { + input = params->input; + + if (!input) + return -EINVAL; + + WREG32(base_reg + 0x200, lower_32_bits(input->start_addr0)); + WREG32(base_reg + 0x204, upper_32_bits(input->start_addr0)); + WREG32(base_reg + 0x208, lower_32_bits(input->addr_mask0)); + WREG32(base_reg + 0x20C, upper_32_bits(input->addr_mask0)); + WREG32(base_reg + 0x240, lower_32_bits(input->start_addr1)); + WREG32(base_reg + 0x244, upper_32_bits(input->start_addr1)); + WREG32(base_reg + 0x248, lower_32_bits(input->addr_mask1)); + WREG32(base_reg + 0x24C, upper_32_bits(input->addr_mask1)); + WREG32(base_reg + 0x224, 0); + WREG32(base_reg + 0x234, 0); + WREG32(base_reg + 0x30C, input->bw_win); + WREG32(base_reg + 0x308, input->win_capture); + WREG32(base_reg + 0x700, 0xA000B00 | (input->id << 12)); + WREG32(base_reg + 0x708, 0xA000A00 | (input->id << 12)); + WREG32(base_reg + 0x70C, 0xA000C00 | (input->id << 12)); + WREG32(base_reg + 0x100, 0x11); + WREG32(base_reg + 0x304, 0x1); + } else { + WREG32(base_reg + 0x200, 0); + WREG32(base_reg + 0x204, 0); + WREG32(base_reg + 0x208, 0xFFFFFFFF); + WREG32(base_reg + 0x20C, 0xFFFFFFFF); + WREG32(base_reg + 0x240, 0); + WREG32(base_reg + 0x244, 0); + WREG32(base_reg + 0x248, 0xFFFFFFFF); + WREG32(base_reg + 0x24C, 0xFFFFFFFF); + WREG32(base_reg + 0x224, 0xFFFFFFFF); + WREG32(base_reg + 0x234, 0x1070F); + WREG32(base_reg + 0x30C, 0); + WREG32(base_reg + 0x308, 0xFFFF); + WREG32(base_reg + 0x700, 0xA000B00); + WREG32(base_reg + 0x708, 0xA000A00); + WREG32(base_reg + 0x70C, 0xA000C00); + WREG32(base_reg + 0x100, 1); + WREG32(base_reg + 0x304, 0); + WREG32(base_reg + 0x104, 0); + } + + return 0; +} + +static int gaudi_config_spmu(struct hl_device *hdev, + struct hl_debug_params *params) +{ + u64 base_reg; + struct hl_debug_params_spmu *input = params->input; + u64 *output; + u32 output_arr_len; + u32 events_num; + u32 overflow_idx; + u32 cycle_cnt_idx; + int i; + + if (params->reg_idx >= ARRAY_SIZE(debug_spmu_regs)) { + dev_err(hdev->dev, "Invalid register index in SPMU\n"); + return -EINVAL; + } + + base_reg = debug_spmu_regs[params->reg_idx] - CFG_BASE; + + if (params->enable) { + input = params->input; + + if (!input) + return -EINVAL; + + if (input->event_types_num < 3) { + dev_err(hdev->dev, + "not enough event types values for SPMU enable\n"); + return -EINVAL; + } + + if (input->event_types_num > SPMU_MAX_COUNTERS) { + dev_err(hdev->dev, + "too many event types values for SPMU enable\n"); + return -EINVAL; + } + + WREG32(base_reg + 0xE04, 0x41013046); + WREG32(base_reg + 0xE04, 0x41013040); + + for (i = 0 ; i < input->event_types_num ; i++) + WREG32(base_reg + SPMU_EVENT_TYPES_OFFSET + i * 4, + input->event_types[i]); + + WREG32(base_reg + 0xE04, 0x41013041); + WREG32(base_reg + 0xC00, 0x8000003F); + } else { + output = params->output; + output_arr_len = params->output_size / 8; + events_num = output_arr_len - 2; + overflow_idx = output_arr_len - 2; + cycle_cnt_idx = output_arr_len - 1; + + if (!output) + return -EINVAL; + + if (output_arr_len < 3) { + dev_err(hdev->dev, + "not enough values for SPMU disable\n"); + return -EINVAL; + } + + if (events_num > SPMU_MAX_COUNTERS) { + dev_err(hdev->dev, + "too many events values for SPMU disable\n"); + return -EINVAL; + } + + WREG32(base_reg + 0xE04, 0x41013040); + + for (i = 0 ; i < events_num ; i++) + output[i] = RREG32(base_reg + i * 8); + + output[overflow_idx] = RREG32(base_reg + 0xCC0); + + output[cycle_cnt_idx] = RREG32(base_reg + 0xFC); + output[cycle_cnt_idx] <<= 32; + output[cycle_cnt_idx] |= RREG32(base_reg + 0xF8); + + WREG32(base_reg + 0xCC0, 0); + } + + return 0; +} + +int gaudi_debug_coresight(struct hl_device *hdev, void *data) +{ + struct hl_debug_params *params = data; + int rc = 0; + + switch (params->op) { + case HL_DEBUG_OP_STM: + rc = gaudi_config_stm(hdev, params); + break; + case HL_DEBUG_OP_ETF: + rc = gaudi_config_etf(hdev, params); + break; + case HL_DEBUG_OP_ETR: + rc = gaudi_config_etr(hdev, params); + break; + case HL_DEBUG_OP_FUNNEL: + rc = gaudi_config_funnel(hdev, params); + break; + case HL_DEBUG_OP_BMON: + rc = gaudi_config_bmon(hdev, params); + break; + case HL_DEBUG_OP_SPMU: + rc = gaudi_config_spmu(hdev, params); + break; + case HL_DEBUG_OP_TIMESTAMP: + /* Do nothing as this opcode is deprecated */ + break; + + default: + dev_err(hdev->dev, "Unknown coresight id %d\n", params->op); + return -EINVAL; + } + + /* Perform read from the device to flush all configuration */ + RREG32(mmHW_STATE); + + return rc; +} + +void gaudi_halt_coresight(struct hl_device *hdev) +{ + struct hl_debug_params params = {}; + int i, rc; + + for (i = GAUDI_ETF_FIRST ; i <= GAUDI_ETF_LAST ; i++) { + params.reg_idx = i; + rc = gaudi_config_etf(hdev, ¶ms); + if (rc) + dev_err(hdev->dev, "halt ETF failed, %d/%d\n", rc, i); + } + + rc = gaudi_config_etr(hdev, ¶ms); + if (rc) + dev_err(hdev->dev, "halt ETR failed, %d\n", rc); +} diff --git a/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c b/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c new file mode 100644 index 000000000..1076b4932 --- /dev/null +++ b/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c @@ -0,0 +1,121 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include "gaudiP.h" +#include "../include/gaudi/gaudi_fw_if.h" + +void gaudi_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + + if (freq == PLL_LAST) + hl_set_frequency(hdev, MME_PLL, gaudi->max_freq_value); +} + +int gaudi_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk) +{ + long value; + + if (hl_device_disabled_or_in_reset(hdev)) + return -ENODEV; + + value = hl_get_frequency(hdev, MME_PLL, false); + + if (value < 0) { + dev_err(hdev->dev, "Failed to retrieve device max clock %ld\n", + value); + return value; + } + + *max_clk = (value / 1000 / 1000); + + value = hl_get_frequency(hdev, MME_PLL, true); + + if (value < 0) { + dev_err(hdev->dev, + "Failed to retrieve device current clock %ld\n", + value); + return value; + } + + *cur_clk = (value / 1000 / 1000); + + return 0; +} + +static ssize_t clk_max_freq_mhz_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + struct gaudi_device *gaudi = hdev->asic_specific; + long value; + + if (hl_device_disabled_or_in_reset(hdev)) + return -ENODEV; + + value = hl_get_frequency(hdev, MME_PLL, false); + + gaudi->max_freq_value = value; + + return sprintf(buf, "%lu\n", (value / 1000 / 1000)); +} + +static ssize_t clk_max_freq_mhz_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + struct gaudi_device *gaudi = hdev->asic_specific; + int rc; + u64 value; + + if (hl_device_disabled_or_in_reset(hdev)) { + count = -ENODEV; + goto fail; + } + + rc = kstrtoull(buf, 0, &value); + if (rc) { + count = -EINVAL; + goto fail; + } + + gaudi->max_freq_value = value * 1000 * 1000; + + hl_set_frequency(hdev, MME_PLL, gaudi->max_freq_value); + +fail: + return count; +} + +static ssize_t clk_cur_freq_mhz_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + long value; + + if (hl_device_disabled_or_in_reset(hdev)) + return -ENODEV; + + value = hl_get_frequency(hdev, MME_PLL, true); + + return sprintf(buf, "%lu\n", (value / 1000 / 1000)); +} + +static DEVICE_ATTR_RW(clk_max_freq_mhz); +static DEVICE_ATTR_RO(clk_cur_freq_mhz); + +static struct attribute *gaudi_dev_attrs[] = { + &dev_attr_clk_max_freq_mhz.attr, + &dev_attr_clk_cur_freq_mhz.attr, + NULL, +}; + +void gaudi_add_device_attr(struct hl_device *hdev, + struct attribute_group *dev_attr_grp) +{ + dev_attr_grp->attrs = gaudi_dev_attrs; +} diff --git a/drivers/misc/habanalabs/gaudi/gaudi_security.c b/drivers/misc/habanalabs/gaudi/gaudi_security.c new file mode 100644 index 000000000..9343a81d3 --- /dev/null +++ b/drivers/misc/habanalabs/gaudi/gaudi_security.c @@ -0,0 +1,9091 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include "gaudiP.h" +#include "../include/gaudi/asic_reg/gaudi_regs.h" + +#define GAUDI_NUMBER_OF_LBW_RR_REGS 28 +#define GAUDI_NUMBER_OF_HBW_RR_REGS 24 +#define GAUDI_NUMBER_OF_LBW_RANGES 10 + +static u64 gaudi_rr_lbw_hit_aw_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = { + mmDMA_IF_W_S_SOB_HIT_WPROT, + mmDMA_IF_W_S_DMA0_HIT_WPROT, + mmDMA_IF_W_S_DMA1_HIT_WPROT, + mmDMA_IF_E_S_SOB_HIT_WPROT, + mmDMA_IF_E_S_DMA0_HIT_WPROT, + mmDMA_IF_E_S_DMA1_HIT_WPROT, + mmDMA_IF_W_N_SOB_HIT_WPROT, + mmDMA_IF_W_N_DMA0_HIT_WPROT, + mmDMA_IF_W_N_DMA1_HIT_WPROT, + mmDMA_IF_E_N_SOB_HIT_WPROT, + mmDMA_IF_E_N_DMA0_HIT_WPROT, + mmDMA_IF_E_N_DMA1_HIT_WPROT, + mmSIF_RTR_0_LBW_RANGE_PROT_HIT_AW, + mmSIF_RTR_1_LBW_RANGE_PROT_HIT_AW, + mmSIF_RTR_2_LBW_RANGE_PROT_HIT_AW, + mmSIF_RTR_3_LBW_RANGE_PROT_HIT_AW, + mmSIF_RTR_4_LBW_RANGE_PROT_HIT_AW, + mmSIF_RTR_5_LBW_RANGE_PROT_HIT_AW, + mmSIF_RTR_6_LBW_RANGE_PROT_HIT_AW, + mmSIF_RTR_7_LBW_RANGE_PROT_HIT_AW, + mmNIF_RTR_0_LBW_RANGE_PROT_HIT_AW, + mmNIF_RTR_1_LBW_RANGE_PROT_HIT_AW, + mmNIF_RTR_2_LBW_RANGE_PROT_HIT_AW, + mmNIF_RTR_3_LBW_RANGE_PROT_HIT_AW, + mmNIF_RTR_4_LBW_RANGE_PROT_HIT_AW, + mmNIF_RTR_5_LBW_RANGE_PROT_HIT_AW, + mmNIF_RTR_6_LBW_RANGE_PROT_HIT_AW, + mmNIF_RTR_7_LBW_RANGE_PROT_HIT_AW, +}; + +static u64 gaudi_rr_lbw_hit_ar_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = { + mmDMA_IF_W_S_SOB_HIT_RPROT, + mmDMA_IF_W_S_DMA0_HIT_RPROT, + mmDMA_IF_W_S_DMA1_HIT_RPROT, + mmDMA_IF_E_S_SOB_HIT_RPROT, + mmDMA_IF_E_S_DMA0_HIT_RPROT, + mmDMA_IF_E_S_DMA1_HIT_RPROT, + mmDMA_IF_W_N_SOB_HIT_RPROT, + mmDMA_IF_W_N_DMA0_HIT_RPROT, + mmDMA_IF_W_N_DMA1_HIT_RPROT, + mmDMA_IF_E_N_SOB_HIT_RPROT, + mmDMA_IF_E_N_DMA0_HIT_RPROT, + mmDMA_IF_E_N_DMA1_HIT_RPROT, + mmSIF_RTR_0_LBW_RANGE_PROT_HIT_AR, + mmSIF_RTR_1_LBW_RANGE_PROT_HIT_AR, + mmSIF_RTR_2_LBW_RANGE_PROT_HIT_AR, + mmSIF_RTR_3_LBW_RANGE_PROT_HIT_AR, + mmSIF_RTR_4_LBW_RANGE_PROT_HIT_AR, + mmSIF_RTR_5_LBW_RANGE_PROT_HIT_AR, + mmSIF_RTR_6_LBW_RANGE_PROT_HIT_AR, + mmSIF_RTR_7_LBW_RANGE_PROT_HIT_AR, + mmNIF_RTR_0_LBW_RANGE_PROT_HIT_AR, + mmNIF_RTR_1_LBW_RANGE_PROT_HIT_AR, + mmNIF_RTR_2_LBW_RANGE_PROT_HIT_AR, + mmNIF_RTR_3_LBW_RANGE_PROT_HIT_AR, + mmNIF_RTR_4_LBW_RANGE_PROT_HIT_AR, + mmNIF_RTR_5_LBW_RANGE_PROT_HIT_AR, + mmNIF_RTR_6_LBW_RANGE_PROT_HIT_AR, + mmNIF_RTR_7_LBW_RANGE_PROT_HIT_AR, +}; + +static u64 gaudi_rr_lbw_min_aw_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = { + mmDMA_IF_W_S_SOB_MIN_WPROT_0, + mmDMA_IF_W_S_DMA0_MIN_WPROT_0, + mmDMA_IF_W_S_DMA1_MIN_WPROT_0, + mmDMA_IF_E_S_SOB_MIN_WPROT_0, + mmDMA_IF_E_S_DMA0_MIN_WPROT_0, + mmDMA_IF_E_S_DMA1_MIN_WPROT_0, + mmDMA_IF_W_N_SOB_MIN_WPROT_0, + mmDMA_IF_W_N_DMA0_MIN_WPROT_0, + mmDMA_IF_W_N_DMA1_MIN_WPROT_0, + mmDMA_IF_E_N_SOB_MIN_WPROT_0, + mmDMA_IF_E_N_DMA0_MIN_WPROT_0, + mmDMA_IF_E_N_DMA1_MIN_WPROT_0, + mmSIF_RTR_0_LBW_RANGE_PROT_MIN_AW_0, + mmSIF_RTR_1_LBW_RANGE_PROT_MIN_AW_0, + mmSIF_RTR_2_LBW_RANGE_PROT_MIN_AW_0, + mmSIF_RTR_3_LBW_RANGE_PROT_MIN_AW_0, + mmSIF_RTR_4_LBW_RANGE_PROT_MIN_AW_0, + mmSIF_RTR_5_LBW_RANGE_PROT_MIN_AW_0, + mmSIF_RTR_6_LBW_RANGE_PROT_MIN_AW_0, + mmSIF_RTR_7_LBW_RANGE_PROT_MIN_AW_0, + mmNIF_RTR_0_LBW_RANGE_PROT_MIN_AW_0, + mmNIF_RTR_1_LBW_RANGE_PROT_MIN_AW_0, + mmNIF_RTR_2_LBW_RANGE_PROT_MIN_AW_0, + mmNIF_RTR_3_LBW_RANGE_PROT_MIN_AW_0, + mmNIF_RTR_4_LBW_RANGE_PROT_MIN_AW_0, + mmNIF_RTR_5_LBW_RANGE_PROT_MIN_AW_0, + mmNIF_RTR_6_LBW_RANGE_PROT_MIN_AW_0, + mmNIF_RTR_7_LBW_RANGE_PROT_MIN_AW_0, +}; + +static u64 gaudi_rr_lbw_max_aw_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = { + mmDMA_IF_W_S_SOB_MAX_WPROT_0, + mmDMA_IF_W_S_DMA0_MAX_WPROT_0, + mmDMA_IF_W_S_DMA1_MAX_WPROT_0, + mmDMA_IF_E_S_SOB_MAX_WPROT_0, + mmDMA_IF_E_S_DMA0_MAX_WPROT_0, + mmDMA_IF_E_S_DMA1_MAX_WPROT_0, + mmDMA_IF_W_N_SOB_MAX_WPROT_0, + mmDMA_IF_W_N_DMA0_MAX_WPROT_0, + mmDMA_IF_W_N_DMA1_MAX_WPROT_0, + mmDMA_IF_E_N_SOB_MAX_WPROT_0, + mmDMA_IF_E_N_DMA0_MAX_WPROT_0, + mmDMA_IF_E_N_DMA1_MAX_WPROT_0, + mmSIF_RTR_0_LBW_RANGE_PROT_MAX_AW_0, + mmSIF_RTR_1_LBW_RANGE_PROT_MAX_AW_0, + mmSIF_RTR_2_LBW_RANGE_PROT_MAX_AW_0, + mmSIF_RTR_3_LBW_RANGE_PROT_MAX_AW_0, + mmSIF_RTR_4_LBW_RANGE_PROT_MAX_AW_0, + mmSIF_RTR_5_LBW_RANGE_PROT_MAX_AW_0, + mmSIF_RTR_6_LBW_RANGE_PROT_MAX_AW_0, + mmSIF_RTR_7_LBW_RANGE_PROT_MAX_AW_0, + mmNIF_RTR_0_LBW_RANGE_PROT_MAX_AW_0, + mmNIF_RTR_1_LBW_RANGE_PROT_MAX_AW_0, + mmNIF_RTR_2_LBW_RANGE_PROT_MAX_AW_0, + mmNIF_RTR_3_LBW_RANGE_PROT_MAX_AW_0, + mmNIF_RTR_4_LBW_RANGE_PROT_MAX_AW_0, + mmNIF_RTR_5_LBW_RANGE_PROT_MAX_AW_0, + mmNIF_RTR_6_LBW_RANGE_PROT_MAX_AW_0, + mmNIF_RTR_7_LBW_RANGE_PROT_MAX_AW_0, +}; + +static u64 gaudi_rr_lbw_min_ar_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = { + mmDMA_IF_W_S_SOB_MIN_RPROT_0, + mmDMA_IF_W_S_DMA0_MIN_RPROT_0, + mmDMA_IF_W_S_DMA1_MIN_RPROT_0, + mmDMA_IF_E_S_SOB_MIN_RPROT_0, + mmDMA_IF_E_S_DMA0_MIN_RPROT_0, + mmDMA_IF_E_S_DMA1_MIN_RPROT_0, + mmDMA_IF_W_N_SOB_MIN_RPROT_0, + mmDMA_IF_W_N_DMA0_MIN_RPROT_0, + mmDMA_IF_W_N_DMA1_MIN_RPROT_0, + mmDMA_IF_E_N_SOB_MIN_RPROT_0, + mmDMA_IF_E_N_DMA0_MIN_RPROT_0, + mmDMA_IF_E_N_DMA1_MIN_RPROT_0, + mmSIF_RTR_0_LBW_RANGE_PROT_MIN_AR_0, + mmSIF_RTR_1_LBW_RANGE_PROT_MIN_AR_0, + mmSIF_RTR_2_LBW_RANGE_PROT_MIN_AR_0, + mmSIF_RTR_3_LBW_RANGE_PROT_MIN_AR_0, + mmSIF_RTR_4_LBW_RANGE_PROT_MIN_AR_0, + mmSIF_RTR_5_LBW_RANGE_PROT_MIN_AR_0, + mmSIF_RTR_6_LBW_RANGE_PROT_MIN_AR_0, + mmSIF_RTR_7_LBW_RANGE_PROT_MIN_AR_0, + mmNIF_RTR_0_LBW_RANGE_PROT_MIN_AR_0, + mmNIF_RTR_1_LBW_RANGE_PROT_MIN_AR_0, + mmNIF_RTR_2_LBW_RANGE_PROT_MIN_AR_0, + mmNIF_RTR_3_LBW_RANGE_PROT_MIN_AR_0, + mmNIF_RTR_4_LBW_RANGE_PROT_MIN_AR_0, + mmNIF_RTR_5_LBW_RANGE_PROT_MIN_AR_0, + mmNIF_RTR_6_LBW_RANGE_PROT_MIN_AR_0, + mmNIF_RTR_7_LBW_RANGE_PROT_MIN_AR_0, +}; + +static u64 gaudi_rr_lbw_max_ar_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = { + mmDMA_IF_W_S_SOB_MAX_RPROT_0, + mmDMA_IF_W_S_DMA0_MAX_RPROT_0, + mmDMA_IF_W_S_DMA1_MAX_RPROT_0, + mmDMA_IF_E_S_SOB_MAX_RPROT_0, + mmDMA_IF_E_S_DMA0_MAX_RPROT_0, + mmDMA_IF_E_S_DMA1_MAX_RPROT_0, + mmDMA_IF_W_N_SOB_MAX_RPROT_0, + mmDMA_IF_W_N_DMA0_MAX_RPROT_0, + mmDMA_IF_W_N_DMA1_MAX_RPROT_0, + mmDMA_IF_E_N_SOB_MAX_RPROT_0, + mmDMA_IF_E_N_DMA0_MAX_RPROT_0, + mmDMA_IF_E_N_DMA1_MAX_RPROT_0, + mmSIF_RTR_0_LBW_RANGE_PROT_MAX_AR_0, + mmSIF_RTR_1_LBW_RANGE_PROT_MAX_AR_0, + mmSIF_RTR_2_LBW_RANGE_PROT_MAX_AR_0, + mmSIF_RTR_3_LBW_RANGE_PROT_MAX_AR_0, + mmSIF_RTR_4_LBW_RANGE_PROT_MAX_AR_0, + mmSIF_RTR_5_LBW_RANGE_PROT_MAX_AR_0, + mmSIF_RTR_6_LBW_RANGE_PROT_MAX_AR_0, + mmSIF_RTR_7_LBW_RANGE_PROT_MAX_AR_0, + mmNIF_RTR_0_LBW_RANGE_PROT_MAX_AR_0, + mmNIF_RTR_1_LBW_RANGE_PROT_MAX_AR_0, + mmNIF_RTR_2_LBW_RANGE_PROT_MAX_AR_0, + mmNIF_RTR_3_LBW_RANGE_PROT_MAX_AR_0, + mmNIF_RTR_4_LBW_RANGE_PROT_MAX_AR_0, + mmNIF_RTR_5_LBW_RANGE_PROT_MAX_AR_0, + mmNIF_RTR_6_LBW_RANGE_PROT_MAX_AR_0, + mmNIF_RTR_7_LBW_RANGE_PROT_MAX_AR_0, +}; + +static u64 gaudi_rr_hbw_hit_aw_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = { + mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_HIT_AW, + mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_HIT_AW, + mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_HIT_AW, + mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_HIT_AW, + mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_HIT_AW, + mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_HIT_AW, + mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_HIT_AW, + mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_HIT_AW, + mmSIF_RTR_CTRL_0_RANGE_SEC_HIT_AW, + mmSIF_RTR_CTRL_1_RANGE_SEC_HIT_AW, + mmSIF_RTR_CTRL_2_RANGE_SEC_HIT_AW, + mmSIF_RTR_CTRL_3_RANGE_SEC_HIT_AW, + mmSIF_RTR_CTRL_4_RANGE_SEC_HIT_AW, + mmSIF_RTR_CTRL_5_RANGE_SEC_HIT_AW, + mmSIF_RTR_CTRL_6_RANGE_SEC_HIT_AW, + mmSIF_RTR_CTRL_7_RANGE_SEC_HIT_AW, + mmNIF_RTR_CTRL_0_RANGE_SEC_HIT_AW, + mmNIF_RTR_CTRL_1_RANGE_SEC_HIT_AW, + mmNIF_RTR_CTRL_2_RANGE_SEC_HIT_AW, + mmNIF_RTR_CTRL_3_RANGE_SEC_HIT_AW, + mmNIF_RTR_CTRL_4_RANGE_SEC_HIT_AW, + mmNIF_RTR_CTRL_5_RANGE_SEC_HIT_AW, + mmNIF_RTR_CTRL_6_RANGE_SEC_HIT_AW, + mmNIF_RTR_CTRL_7_RANGE_SEC_HIT_AW +}; + +static u64 gaudi_rr_hbw_hit_ar_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = { + mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_HIT_AR, + mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_HIT_AR, + mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_HIT_AR, + mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_HIT_AR, + mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_HIT_AR, + mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_HIT_AR, + mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_HIT_AR, + mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_HIT_AR, + mmSIF_RTR_CTRL_0_RANGE_SEC_HIT_AR, + mmSIF_RTR_CTRL_1_RANGE_SEC_HIT_AR, + mmSIF_RTR_CTRL_2_RANGE_SEC_HIT_AR, + mmSIF_RTR_CTRL_3_RANGE_SEC_HIT_AR, + mmSIF_RTR_CTRL_4_RANGE_SEC_HIT_AR, + mmSIF_RTR_CTRL_5_RANGE_SEC_HIT_AR, + mmSIF_RTR_CTRL_6_RANGE_SEC_HIT_AR, + mmSIF_RTR_CTRL_7_RANGE_SEC_HIT_AR, + mmNIF_RTR_CTRL_0_RANGE_SEC_HIT_AR, + mmNIF_RTR_CTRL_1_RANGE_SEC_HIT_AR, + mmNIF_RTR_CTRL_2_RANGE_SEC_HIT_AR, + mmNIF_RTR_CTRL_3_RANGE_SEC_HIT_AR, + mmNIF_RTR_CTRL_4_RANGE_SEC_HIT_AR, + mmNIF_RTR_CTRL_5_RANGE_SEC_HIT_AR, + mmNIF_RTR_CTRL_6_RANGE_SEC_HIT_AR, + mmNIF_RTR_CTRL_7_RANGE_SEC_HIT_AR +}; + +static u64 gaudi_rr_hbw_base_low_aw_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = { + mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_0, + mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_0, + mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_0, + mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_0, + mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_0, + mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_0, + mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_0, + mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_0, + mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_0, + mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_0, + mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_0, + mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_0, + mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_0, + mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_0, + mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_0, + mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_0, + mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_0, + mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_0, + mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_0, + mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_0, + mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_0, + mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_0, + mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_0, + mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_0 +}; + +static u64 gaudi_rr_hbw_base_high_aw_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = { + mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_0, + mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_0, + mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_0, + mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_0, + mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_0, + mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_0, + mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_0, + mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_0, + mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_0, + mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_0, + mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_0, + mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_0, + mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_0, + mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_0, + mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_0, + mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_0, + mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_0, + mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_0, + mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_0, + mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_0, + mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_0, + mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_0, + mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_0, + mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_0 +}; + +static u64 gaudi_rr_hbw_mask_low_aw_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = { + mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_0, + mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_0, + mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_0, + mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_0, + mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_0, + mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_0, + mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_0, + mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_0, + mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_0, + mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_0, + mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_0, + mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_0, + mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_0, + mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_0, + mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_0, + mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_0, + mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_0, + mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_0, + mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_0, + mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_0, + mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_0, + mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_0, + mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_0, + mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_0 +}; + +static u64 gaudi_rr_hbw_mask_high_aw_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = { + mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_0, + mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_0, + mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_0, + mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_0, + mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_0, + mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_0, + mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_0, + mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_0, + mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_0, + mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_0, + mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_0, + mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_0, + mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_0, + mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_0, + mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_0, + mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_0, + mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_0, + mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_0, + mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_0, + mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_0, + mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_0, + mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_0, + mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_0, + mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_0 +}; + +static u64 gaudi_rr_hbw_base_low_ar_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = { + mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_0, + mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_0, + mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_0, + mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_0, + mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_0, + mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_0, + mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_0, + mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_0, + mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_0, + mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_0, + mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_0, + mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_0, + mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_0, + mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_0, + mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_0, + mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_0, + mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_0, + mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_0, + mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_0, + mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_0, + mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_0, + mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_0, + mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_0, + mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_0 +}; + +static u64 gaudi_rr_hbw_base_high_ar_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = { + mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_0, + mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_0, + mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_0, + mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_0, + mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_0, + mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_0, + mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_0, + mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_0, + mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_0, + mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_0, + mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_0, + mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_0, + mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_0, + mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_0, + mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_0, + mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_0, + mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_0, + mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_0, + mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_0, + mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_0, + mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_0, + mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_0, + mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_0, + mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_0 +}; + +static u64 gaudi_rr_hbw_mask_low_ar_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = { + mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_0, + mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_0, + mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_0, + mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_0, + mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_0, + mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_0, + mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_0, + mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_0, + mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_0, + mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_0, + mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_0, + mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_0, + mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_0, + mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_0, + mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_0, + mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_0, + mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_0, + mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_0, + mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_0, + mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_0, + mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_0, + mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_0, + mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_0, + mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_0 +}; + +static u64 gaudi_rr_hbw_mask_high_ar_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = { + mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_0, + mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_0, + mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_0, + mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_0, + mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_0, + mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_0, + mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_0, + mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_0, + mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_0, + mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_0, + mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_0, + mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_0, + mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_0, + mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_0, + mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_0, + mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_0, + mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_0, + mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_0, + mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_0, + mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_0, + mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_0, + mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_0, + mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_0, + mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_0 +}; + +/** + * gaudi_set_block_as_protected - set the given block as protected + * + * @hdev: pointer to hl_device structure + * @base: block base address + */ +static void gaudi_pb_set_block(struct hl_device *hdev, u64 base) +{ + u32 pb_addr = base - CFG_BASE + PROT_BITS_OFFS; + + while (pb_addr & 0xFFF) { + WREG32(pb_addr, 0); + pb_addr += 4; + } +} + +static void gaudi_init_mme_protection_bits(struct hl_device *hdev) +{ + u32 pb_addr, mask; + u8 word_offset; + + gaudi_pb_set_block(hdev, mmMME0_ACC_BASE); + gaudi_pb_set_block(hdev, mmMME0_SBAB_BASE); + gaudi_pb_set_block(hdev, mmMME0_PRTN_BASE); + gaudi_pb_set_block(hdev, mmMME1_ACC_BASE); + gaudi_pb_set_block(hdev, mmMME1_SBAB_BASE); + gaudi_pb_set_block(hdev, mmMME1_PRTN_BASE); + gaudi_pb_set_block(hdev, mmMME2_ACC_BASE); + gaudi_pb_set_block(hdev, mmMME2_SBAB_BASE); + gaudi_pb_set_block(hdev, mmMME2_PRTN_BASE); + gaudi_pb_set_block(hdev, mmMME3_ACC_BASE); + gaudi_pb_set_block(hdev, mmMME3_SBAB_BASE); + gaudi_pb_set_block(hdev, mmMME3_PRTN_BASE); + + WREG32(mmMME0_CTRL_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + WREG32(mmMME1_CTRL_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + WREG32(mmMME2_CTRL_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + WREG32(mmMME3_CTRL_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + + WREG32(mmMME0_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + WREG32(mmMME2_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + + pb_addr = (mmMME0_CTRL_RESET & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME0_CTRL_RESET & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmMME0_CTRL_RESET & 0x7F) >> 2); + mask |= 1U << ((mmMME0_CTRL_QM_STALL & 0x7F) >> 2); + mask |= 1U << ((mmMME0_CTRL_SYNC_OBJECT_FIFO_TH & 0x7F) >> 2); + mask |= 1U << ((mmMME0_CTRL_EUS_ROLLUP_CNT_ADD & 0x7F) >> 2); + mask |= 1U << ((mmMME0_CTRL_INTR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmMME0_CTRL_INTR_MASK & 0x7F) >> 2); + mask |= 1U << ((mmMME0_CTRL_LOG_SHADOW & 0x7F) >> 2); + mask |= 1U << ((mmMME0_CTRL_PCU_RL_DESC0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_CTRL_PCU_RL_TOKEN_UPDATE & 0x7F) >> 2); + mask |= 1U << ((mmMME0_CTRL_PCU_RL_TH & 0x7F) >> 2); + mask |= 1U << ((mmMME0_CTRL_PCU_RL_MIN & 0x7F) >> 2); + mask |= 1U << ((mmMME0_CTRL_PCU_RL_CTRL_EN & 0x7F) >> 2); + mask |= 1U << ((mmMME0_CTRL_PCU_RL_HISTORY_LOG_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmMME0_CTRL_PCU_DUMMY_A_BF16 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_CTRL_PCU_DUMMY_B_BF16 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_CTRL_PCU_DUMMY_A_FP32_ODD & 0x7F) >> 2); + mask |= 1U << ((mmMME0_CTRL_PCU_DUMMY_A_FP32_EVEN & 0x7F) >> 2); + mask |= 1U << ((mmMME0_CTRL_PCU_DUMMY_B_FP32_ODD & 0x7F) >> 2); + mask |= 1U << ((mmMME0_CTRL_PCU_DUMMY_B_FP32_EVEN & 0x7F) >> 2); + mask |= 1U << ((mmMME0_CTRL_PROT & 0x7F) >> 2); + mask |= 1U << ((mmMME0_CTRL_EU_POWER_SAVE_DISABLE & 0x7F) >> 2); + mask |= 1U << ((mmMME0_CTRL_CS_DBG_BLOCK_ID & 0x7F) >> 2); + mask |= 1U << ((mmMME0_CTRL_CS_DBG_STATUS_DROP_CNT & 0x7F) >> 2); + mask |= 1U << ((mmMME0_CTRL_TE_CLOSE_CGATE & 0x7F) >> 2); + mask |= 1U << ((mmMME0_CTRL_AGU_SM_INFLIGHT_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmMME0_CTRL_AGU_SM_TOTAL_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmMME0_CTRL_EZSYNC_OUT_CREDIT & 0x7F) >> 2); + mask |= 1U << ((mmMME0_CTRL_PCU_RL_SAT_SEC & 0x7F) >> 2); + mask |= 1U << ((mmMME0_CTRL_AGU_SYNC_MSG_AXI_USER & 0x7F) >> 2); + mask |= 1U << ((mmMME0_CTRL_QM_SLV_LBW_CLK_EN & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME0_CTRL_SHADOW_0_STATUS & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME0_CTRL_SHADOW_0_STATUS & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmMME0_CTRL_SHADOW_0_STATUS & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME0_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME0_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmMME0_QM_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_GLBL_PROT & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_GLBL_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_GLBL_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_GLBL_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_GLBL_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_GLBL_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_GLBL_STS1_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_GLBL_MSG_EN_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_GLBL_MSG_EN_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_GLBL_MSG_EN_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_GLBL_MSG_EN_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_GLBL_MSG_EN_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_PQ_BASE_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_PQ_BASE_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_PQ_BASE_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_PQ_BASE_LO_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME0_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME0_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmMME0_QM_PQ_BASE_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_PQ_BASE_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_PQ_BASE_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_PQ_BASE_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_PQ_SIZE_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_PQ_SIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_PQ_SIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_PQ_SIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_PQ_PI_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_PQ_PI_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_PQ_PI_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_PQ_PI_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_PQ_CI_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_PQ_CI_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_PQ_CI_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_PQ_CI_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_PQ_CFG0_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_PQ_CFG0_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_PQ_CFG0_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_PQ_CFG0_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_PQ_CFG1_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_PQ_CFG1_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_PQ_CFG1_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_PQ_CFG1_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_PQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_PQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_PQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_PQ_STS0_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME0_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME0_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmMME0_QM_PQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_PQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_PQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_PQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_STS0_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_PTR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_PTR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_TSIZE_0 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME0_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME0_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmMME0_QM_CQ_CTL_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_PTR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_PTR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_TSIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_CTL_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_PTR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_PTR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_TSIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_CTL_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_PTR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_PTR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_TSIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_CTL_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME0_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME0_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmMME0_QM_CQ_CTL_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_CTL_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_CTL_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_CTL_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_CTL_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME0_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME0_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmMME0_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME0_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME0_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmMME0_QM_CP_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME0_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME0_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmMME0_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_DBG_0_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_DBG_0_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME0_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME0_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmMME0_QM_CP_DBG_0_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_DBG_0_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_DBG_0_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME0_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME0_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmMME0_QM_ARB_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME0_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME0_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2); + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_23 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmMME0_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME0_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME0_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmMME0_QM_ARB_STATE_STS & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MSG_STS & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_ERR_MSG_EN & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_ERR_STS_DRP & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME0_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME0_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmMME0_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CGM_CFG & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CGM_STS & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CGM_CFG1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME0_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME0_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmMME0_QM_LOCAL_RANGE_BASE & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_GLBL_AXCACHE & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_IND_GW_APB_CFG & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_IND_GW_APB_WDATA & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_IND_GW_APB_RDATA & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_IND_GW_APB_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmMME0_QM_GLBL_ERR_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME0_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME0_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmMME0_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME1_CTRL_RESET & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME1_CTRL_RESET & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmMME1_CTRL_RESET & 0x7F) >> 2); + mask |= 1U << ((mmMME1_CTRL_QM_STALL & 0x7F) >> 2); + mask |= 1U << ((mmMME1_CTRL_SYNC_OBJECT_FIFO_TH & 0x7F) >> 2); + mask |= 1U << ((mmMME1_CTRL_EUS_ROLLUP_CNT_ADD & 0x7F) >> 2); + mask |= 1U << ((mmMME1_CTRL_INTR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmMME1_CTRL_INTR_MASK & 0x7F) >> 2); + mask |= 1U << ((mmMME1_CTRL_LOG_SHADOW & 0x7F) >> 2); + mask |= 1U << ((mmMME1_CTRL_PCU_RL_DESC0 & 0x7F) >> 2); + mask |= 1U << ((mmMME1_CTRL_PCU_RL_TOKEN_UPDATE & 0x7F) >> 2); + mask |= 1U << ((mmMME1_CTRL_PCU_RL_TH & 0x7F) >> 2); + mask |= 1U << ((mmMME1_CTRL_PCU_RL_MIN & 0x7F) >> 2); + mask |= 1U << ((mmMME1_CTRL_PCU_RL_CTRL_EN & 0x7F) >> 2); + mask |= 1U << ((mmMME1_CTRL_PCU_RL_HISTORY_LOG_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmMME1_CTRL_PCU_DUMMY_A_BF16 & 0x7F) >> 2); + mask |= 1U << ((mmMME1_CTRL_PCU_DUMMY_B_BF16 & 0x7F) >> 2); + mask |= 1U << ((mmMME1_CTRL_PCU_DUMMY_A_FP32_ODD & 0x7F) >> 2); + mask |= 1U << ((mmMME1_CTRL_PCU_DUMMY_A_FP32_EVEN & 0x7F) >> 2); + mask |= 1U << ((mmMME1_CTRL_PCU_DUMMY_B_FP32_ODD & 0x7F) >> 2); + mask |= 1U << ((mmMME1_CTRL_PCU_DUMMY_B_FP32_EVEN & 0x7F) >> 2); + mask |= 1U << ((mmMME1_CTRL_PROT & 0x7F) >> 2); + mask |= 1U << ((mmMME1_CTRL_EU_POWER_SAVE_DISABLE & 0x7F) >> 2); + mask |= 1U << ((mmMME1_CTRL_CS_DBG_BLOCK_ID & 0x7F) >> 2); + mask |= 1U << ((mmMME1_CTRL_CS_DBG_STATUS_DROP_CNT & 0x7F) >> 2); + mask |= 1U << ((mmMME1_CTRL_TE_CLOSE_CGATE & 0x7F) >> 2); + mask |= 1U << ((mmMME1_CTRL_AGU_SM_INFLIGHT_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmMME1_CTRL_AGU_SM_TOTAL_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmMME1_CTRL_EZSYNC_OUT_CREDIT & 0x7F) >> 2); + mask |= 1U << ((mmMME1_CTRL_PCU_RL_SAT_SEC & 0x7F) >> 2); + mask |= 1U << ((mmMME1_CTRL_AGU_SYNC_MSG_AXI_USER & 0x7F) >> 2); + mask |= 1U << ((mmMME1_CTRL_QM_SLV_LBW_CLK_EN & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME1_CTRL_SHADOW_0_STATUS & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME1_CTRL_SHADOW_0_STATUS & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmMME1_CTRL_SHADOW_0_STATUS & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + /* MME 1 is slave, hence its whole QM block is protected (with RR) */ + + pb_addr = (mmMME2_CTRL_RESET & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME2_CTRL_RESET & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmMME2_CTRL_RESET & 0x7F) >> 2); + mask |= 1U << ((mmMME2_CTRL_QM_STALL & 0x7F) >> 2); + mask |= 1U << ((mmMME2_CTRL_SYNC_OBJECT_FIFO_TH & 0x7F) >> 2); + mask |= 1U << ((mmMME2_CTRL_EUS_ROLLUP_CNT_ADD & 0x7F) >> 2); + mask |= 1U << ((mmMME2_CTRL_INTR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmMME2_CTRL_INTR_MASK & 0x7F) >> 2); + mask |= 1U << ((mmMME2_CTRL_LOG_SHADOW & 0x7F) >> 2); + mask |= 1U << ((mmMME2_CTRL_PCU_RL_DESC0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_CTRL_PCU_RL_TOKEN_UPDATE & 0x7F) >> 2); + mask |= 1U << ((mmMME2_CTRL_PCU_RL_TH & 0x7F) >> 2); + mask |= 1U << ((mmMME2_CTRL_PCU_RL_MIN & 0x7F) >> 2); + mask |= 1U << ((mmMME2_CTRL_PCU_RL_CTRL_EN & 0x7F) >> 2); + mask |= 1U << ((mmMME2_CTRL_PCU_RL_HISTORY_LOG_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmMME2_CTRL_PCU_DUMMY_A_BF16 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_CTRL_PCU_DUMMY_B_BF16 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_CTRL_PCU_DUMMY_A_FP32_ODD & 0x7F) >> 2); + mask |= 1U << ((mmMME2_CTRL_PCU_DUMMY_A_FP32_EVEN & 0x7F) >> 2); + mask |= 1U << ((mmMME2_CTRL_PCU_DUMMY_B_FP32_ODD & 0x7F) >> 2); + mask |= 1U << ((mmMME2_CTRL_PCU_DUMMY_B_FP32_EVEN & 0x7F) >> 2); + mask |= 1U << ((mmMME2_CTRL_PROT & 0x7F) >> 2); + mask |= 1U << ((mmMME2_CTRL_EU_POWER_SAVE_DISABLE & 0x7F) >> 2); + mask |= 1U << ((mmMME2_CTRL_CS_DBG_BLOCK_ID & 0x7F) >> 2); + mask |= 1U << ((mmMME2_CTRL_CS_DBG_STATUS_DROP_CNT & 0x7F) >> 2); + mask |= 1U << ((mmMME2_CTRL_TE_CLOSE_CGATE & 0x7F) >> 2); + mask |= 1U << ((mmMME2_CTRL_AGU_SM_INFLIGHT_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmMME2_CTRL_AGU_SM_TOTAL_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmMME2_CTRL_EZSYNC_OUT_CREDIT & 0x7F) >> 2); + mask |= 1U << ((mmMME2_CTRL_PCU_RL_SAT_SEC & 0x7F) >> 2); + mask |= 1U << ((mmMME2_CTRL_AGU_SYNC_MSG_AXI_USER & 0x7F) >> 2); + mask |= 1U << ((mmMME2_CTRL_QM_SLV_LBW_CLK_EN & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME2_CTRL_SHADOW_0_STATUS & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME2_CTRL_SHADOW_0_STATUS & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmMME2_CTRL_SHADOW_0_STATUS & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME2_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME2_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmMME2_QM_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_GLBL_PROT & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_GLBL_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_GLBL_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_GLBL_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_GLBL_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_GLBL_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_GLBL_STS1_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_GLBL_MSG_EN_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_GLBL_MSG_EN_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_GLBL_MSG_EN_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_GLBL_MSG_EN_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_GLBL_MSG_EN_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_PQ_BASE_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_PQ_BASE_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_PQ_BASE_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_PQ_BASE_LO_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME2_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME2_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmMME2_QM_PQ_BASE_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_PQ_BASE_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_PQ_BASE_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_PQ_BASE_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_PQ_SIZE_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_PQ_SIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_PQ_SIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_PQ_SIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_PQ_PI_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_PQ_PI_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_PQ_PI_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_PQ_PI_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_PQ_CI_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_PQ_CI_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_PQ_CI_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_PQ_CI_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_PQ_CFG0_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_PQ_CFG0_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_PQ_CFG0_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_PQ_CFG0_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_PQ_CFG1_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_PQ_CFG1_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_PQ_CFG1_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_PQ_CFG1_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_PQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_PQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_PQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_PQ_STS0_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME2_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME2_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmMME2_QM_PQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_PQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_PQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_PQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_STS0_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_PTR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_PTR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_TSIZE_0 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME2_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME2_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmMME2_QM_CQ_CTL_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_PTR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_PTR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_TSIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_CTL_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_PTR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_PTR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_TSIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_CTL_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_PTR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_PTR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_TSIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_CTL_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME2_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME2_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmMME2_QM_CQ_CTL_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_CTL_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_CTL_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_CTL_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_CTL_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME2_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME2_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmMME2_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME2_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME2_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmMME2_QM_CP_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME2_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME2_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmMME2_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_DBG_0_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_DBG_0_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME2_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME2_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmMME2_QM_CP_DBG_0_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_DBG_0_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_DBG_0_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME2_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME2_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmMME2_QM_ARB_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME2_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME2_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_23 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmMME2_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME2_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME2_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmMME2_QM_ARB_STATE_STS & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MSG_STS & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_ERR_MSG_EN & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_ERR_STS_DRP & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME2_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME2_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmMME2_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CGM_CFG & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CGM_STS & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CGM_CFG1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME2_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME2_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmMME2_QM_LOCAL_RANGE_BASE & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_GLBL_AXCACHE & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_IND_GW_APB_CFG & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_IND_GW_APB_WDATA & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_IND_GW_APB_RDATA & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_IND_GW_APB_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmMME2_QM_GLBL_ERR_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME2_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME2_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmMME2_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME3_CTRL_RESET & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME3_CTRL_RESET & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmMME3_CTRL_RESET & 0x7F) >> 2); + mask |= 1U << ((mmMME3_CTRL_QM_STALL & 0x7F) >> 2); + mask |= 1U << ((mmMME3_CTRL_SYNC_OBJECT_FIFO_TH & 0x7F) >> 2); + mask |= 1U << ((mmMME3_CTRL_EUS_ROLLUP_CNT_ADD & 0x7F) >> 2); + mask |= 1U << ((mmMME3_CTRL_INTR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmMME3_CTRL_INTR_MASK & 0x7F) >> 2); + mask |= 1U << ((mmMME3_CTRL_LOG_SHADOW & 0x7F) >> 2); + mask |= 1U << ((mmMME3_CTRL_PCU_RL_DESC0 & 0x7F) >> 2); + mask |= 1U << ((mmMME3_CTRL_PCU_RL_TOKEN_UPDATE & 0x7F) >> 2); + mask |= 1U << ((mmMME3_CTRL_PCU_RL_TH & 0x7F) >> 2); + mask |= 1U << ((mmMME3_CTRL_PCU_RL_MIN & 0x7F) >> 2); + mask |= 1U << ((mmMME3_CTRL_PCU_RL_CTRL_EN & 0x7F) >> 2); + mask |= 1U << ((mmMME3_CTRL_PCU_RL_HISTORY_LOG_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmMME3_CTRL_PCU_DUMMY_A_BF16 & 0x7F) >> 2); + mask |= 1U << ((mmMME3_CTRL_PCU_DUMMY_B_BF16 & 0x7F) >> 2); + mask |= 1U << ((mmMME3_CTRL_PCU_DUMMY_A_FP32_ODD & 0x7F) >> 2); + mask |= 1U << ((mmMME3_CTRL_PCU_DUMMY_A_FP32_EVEN & 0x7F) >> 2); + mask |= 1U << ((mmMME3_CTRL_PCU_DUMMY_B_FP32_ODD & 0x7F) >> 2); + mask |= 1U << ((mmMME3_CTRL_PCU_DUMMY_B_FP32_EVEN & 0x7F) >> 2); + mask |= 1U << ((mmMME3_CTRL_PROT & 0x7F) >> 2); + mask |= 1U << ((mmMME3_CTRL_EU_POWER_SAVE_DISABLE & 0x7F) >> 2); + mask |= 1U << ((mmMME3_CTRL_CS_DBG_BLOCK_ID & 0x7F) >> 2); + mask |= 1U << ((mmMME3_CTRL_CS_DBG_STATUS_DROP_CNT & 0x7F) >> 2); + mask |= 1U << ((mmMME3_CTRL_TE_CLOSE_CGATE & 0x7F) >> 2); + mask |= 1U << ((mmMME3_CTRL_AGU_SM_INFLIGHT_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmMME3_CTRL_AGU_SM_TOTAL_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmMME3_CTRL_EZSYNC_OUT_CREDIT & 0x7F) >> 2); + mask |= 1U << ((mmMME3_CTRL_PCU_RL_SAT_SEC & 0x7F) >> 2); + mask |= 1U << ((mmMME3_CTRL_AGU_SYNC_MSG_AXI_USER & 0x7F) >> 2); + mask |= 1U << ((mmMME3_CTRL_QM_SLV_LBW_CLK_EN & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME3_CTRL_SHADOW_0_STATUS & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME3_CTRL_SHADOW_0_STATUS & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmMME3_CTRL_SHADOW_0_STATUS & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + /* MME 3 is slave, hence its whole QM block is protected (with RR) */ +} + +static void gaudi_init_dma_protection_bits(struct hl_device *hdev) +{ + u32 pb_addr, mask; + u8 word_offset; + + gaudi_pb_set_block(hdev, mmDMA_IF_E_S_BASE); + gaudi_pb_set_block(hdev, mmDMA_IF_E_S_DOWN_CH0_BASE); + gaudi_pb_set_block(hdev, mmDMA_IF_E_S_DOWN_CH1_BASE); + gaudi_pb_set_block(hdev, mmDMA_E_PLL_BASE); + gaudi_pb_set_block(hdev, mmDMA_IF_E_S_DOWN_BASE); + + gaudi_pb_set_block(hdev, mmDMA_IF_W_N_BASE); + gaudi_pb_set_block(hdev, mmDMA_IF_W_N_DOWN_CH0_BASE); + gaudi_pb_set_block(hdev, mmDMA_IF_W_N_DOWN_CH1_BASE); + gaudi_pb_set_block(hdev, mmDMA_IF_W_N_DOWN_BASE); + + gaudi_pb_set_block(hdev, mmDMA_IF_E_N_BASE); + gaudi_pb_set_block(hdev, mmDMA_IF_E_N_DOWN_CH0_BASE); + gaudi_pb_set_block(hdev, mmDMA_IF_E_N_DOWN_CH1_BASE); + gaudi_pb_set_block(hdev, mmDMA_IF_E_N_DOWN_BASE); + + WREG32(mmDMA0_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + WREG32(mmDMA1_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + WREG32(mmDMA2_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + WREG32(mmDMA3_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + WREG32(mmDMA4_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + WREG32(mmDMA5_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + WREG32(mmDMA6_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + WREG32(mmDMA7_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + + WREG32(mmDMA0_CORE_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + WREG32(mmDMA1_CORE_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + WREG32(mmDMA2_CORE_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + WREG32(mmDMA3_CORE_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + WREG32(mmDMA4_CORE_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + WREG32(mmDMA5_CORE_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + WREG32(mmDMA6_CORE_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + WREG32(mmDMA7_CORE_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + + pb_addr = (mmDMA0_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA0_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA0_QM_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_GLBL_PROT & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_GLBL_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_GLBL_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_GLBL_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_GLBL_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_GLBL_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_GLBL_STS1_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_GLBL_MSG_EN_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_GLBL_MSG_EN_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_GLBL_MSG_EN_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_GLBL_MSG_EN_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_GLBL_MSG_EN_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_PQ_BASE_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_PQ_BASE_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_PQ_BASE_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_PQ_BASE_LO_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA0_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA0_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA0_QM_PQ_BASE_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_PQ_BASE_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_PQ_BASE_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_PQ_BASE_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_PQ_SIZE_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_PQ_SIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_PQ_SIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_PQ_SIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_PQ_PI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_PQ_PI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_PQ_PI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_PQ_PI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_PQ_CI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_PQ_CI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_PQ_CI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_PQ_CI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_PQ_CFG0_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_PQ_CFG0_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_PQ_CFG0_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_PQ_CFG0_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_PQ_CFG1_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_PQ_CFG1_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_PQ_CFG1_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_PQ_CFG1_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_PQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_PQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_PQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_PQ_STS0_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA0_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA0_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA0_QM_PQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_PQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_PQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_PQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_STS0_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_PTR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_PTR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_TSIZE_0 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA0_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA0_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA0_QM_CQ_CTL_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_PTR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_PTR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_TSIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_CTL_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_PTR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_PTR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_TSIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_CTL_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_PTR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_PTR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_TSIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_CTL_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA0_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA0_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA0_QM_CQ_CTL_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_CTL_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_CTL_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_CTL_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_CTL_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = + ((mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA0_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA0_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA0_QM_CP_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA0_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA0_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA0_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_DBG_0_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_DBG_0_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA0_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA0_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA0_QM_CP_DBG_0_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_DBG_0_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_DBG_0_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA0_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA0_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA0_QM_ARB_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA0_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA0_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2); + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = + ((mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA0_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA0_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA0_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA0_QM_ARB_STATE_STS & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MSG_STS & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_ERR_MSG_EN & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_ERR_STS_DRP & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA0_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA0_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CGM_CFG & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CGM_STS & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CGM_CFG1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA0_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA0_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA0_QM_LOCAL_RANGE_BASE & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_GLBL_AXCACHE & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_IND_GW_APB_CFG & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_IND_GW_APB_WDATA & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_IND_GW_APB_RDATA & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_IND_GW_APB_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_QM_GLBL_ERR_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA0_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA0_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA0_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA1_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA1_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA1_QM_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_GLBL_PROT & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_GLBL_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_GLBL_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_GLBL_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_GLBL_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_GLBL_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_GLBL_STS1_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_GLBL_MSG_EN_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_GLBL_MSG_EN_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_GLBL_MSG_EN_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_GLBL_MSG_EN_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_GLBL_MSG_EN_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_PQ_BASE_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_PQ_BASE_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_PQ_BASE_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_PQ_BASE_LO_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA1_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA1_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA1_QM_PQ_BASE_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_PQ_BASE_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_PQ_BASE_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_PQ_BASE_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_PQ_SIZE_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_PQ_SIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_PQ_SIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_PQ_SIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_PQ_PI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_PQ_PI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_PQ_PI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_PQ_PI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_PQ_CI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_PQ_CI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_PQ_CI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_PQ_CI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_PQ_CFG0_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_PQ_CFG0_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_PQ_CFG0_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_PQ_CFG0_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_PQ_CFG1_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_PQ_CFG1_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_PQ_CFG1_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_PQ_CFG1_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_PQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_PQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_PQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_PQ_STS0_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA1_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA1_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA1_QM_PQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_PQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_PQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_PQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_STS0_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_PTR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_PTR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_TSIZE_0 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA1_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA1_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA1_QM_CQ_CTL_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_PTR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_PTR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_TSIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_CTL_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_PTR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_PTR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_TSIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_CTL_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_PTR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_PTR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_TSIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_CTL_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA1_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA1_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA1_QM_CQ_CTL_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_CTL_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_CTL_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_CTL_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_CTL_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = + ((mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA1_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA1_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA1_QM_CP_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA1_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA1_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA1_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_DBG_0_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_DBG_0_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA1_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA1_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA1_QM_CP_DBG_0_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_DBG_0_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_DBG_0_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA1_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA1_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA1_QM_ARB_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA1_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA1_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = + ((mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA1_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA1_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA1_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA1_QM_ARB_STATE_STS & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MSG_STS & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_ERR_MSG_EN & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_ERR_STS_DRP & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA1_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA1_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CGM_CFG & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CGM_STS & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CGM_CFG1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA1_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA1_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA1_QM_LOCAL_RANGE_BASE & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_GLBL_AXCACHE & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_IND_GW_APB_CFG & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_IND_GW_APB_WDATA & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_IND_GW_APB_RDATA & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_IND_GW_APB_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_QM_GLBL_ERR_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA1_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA1_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA1_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA2_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA2_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA2_QM_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_GLBL_PROT & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_GLBL_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_GLBL_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_GLBL_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_GLBL_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_GLBL_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_GLBL_STS1_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_GLBL_MSG_EN_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_GLBL_MSG_EN_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_GLBL_MSG_EN_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_GLBL_MSG_EN_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_GLBL_MSG_EN_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_PQ_BASE_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_PQ_BASE_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_PQ_BASE_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_PQ_BASE_LO_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA2_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA2_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA2_QM_PQ_BASE_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_PQ_BASE_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_PQ_BASE_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_PQ_BASE_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_PQ_SIZE_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_PQ_SIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_PQ_SIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_PQ_SIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_PQ_PI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_PQ_PI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_PQ_PI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_PQ_PI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_PQ_CI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_PQ_CI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_PQ_CI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_PQ_CI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_PQ_CFG0_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_PQ_CFG0_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_PQ_CFG0_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_PQ_CFG0_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_PQ_CFG1_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_PQ_CFG1_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_PQ_CFG1_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_PQ_CFG1_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_PQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_PQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_PQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_PQ_STS0_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA2_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA2_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA2_QM_PQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_PQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_PQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_PQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_STS0_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_PTR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_PTR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_TSIZE_0 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA2_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA2_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA2_QM_CQ_CTL_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_PTR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_PTR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_TSIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_CTL_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_PTR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_PTR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_TSIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_CTL_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_PTR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_PTR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_TSIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_CTL_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA2_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA2_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA2_QM_CQ_CTL_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_CTL_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_CTL_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_CTL_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_CTL_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = + ((mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA2_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA2_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA2_QM_CP_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA2_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA2_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA2_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_DBG_0_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_DBG_0_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA2_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA2_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA2_QM_CP_DBG_0_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_DBG_0_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_DBG_0_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA2_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA2_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA2_QM_ARB_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA2_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA2_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = + ((mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA2_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA2_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA2_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA2_QM_ARB_STATE_STS & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MSG_STS & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_ERR_MSG_EN & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_ERR_STS_DRP & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA2_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA2_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CGM_CFG & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CGM_STS & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CGM_CFG1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA2_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA2_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA2_QM_LOCAL_RANGE_BASE & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_GLBL_AXCACHE & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_IND_GW_APB_CFG & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_IND_GW_APB_WDATA & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_IND_GW_APB_RDATA & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_IND_GW_APB_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_QM_GLBL_ERR_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA2_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA2_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA2_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA3_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA3_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA3_QM_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_GLBL_PROT & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_GLBL_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_GLBL_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_GLBL_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_GLBL_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_GLBL_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_GLBL_STS1_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_GLBL_MSG_EN_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_GLBL_MSG_EN_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_GLBL_MSG_EN_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_GLBL_MSG_EN_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_GLBL_MSG_EN_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_PQ_BASE_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_PQ_BASE_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_PQ_BASE_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_PQ_BASE_LO_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA3_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA3_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA3_QM_PQ_BASE_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_PQ_BASE_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_PQ_BASE_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_PQ_BASE_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_PQ_SIZE_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_PQ_SIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_PQ_SIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_PQ_SIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_PQ_PI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_PQ_PI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_PQ_PI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_PQ_PI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_PQ_CI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_PQ_CI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_PQ_CI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_PQ_CI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_PQ_CFG0_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_PQ_CFG0_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_PQ_CFG0_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_PQ_CFG0_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_PQ_CFG1_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_PQ_CFG1_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_PQ_CFG1_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_PQ_CFG1_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_PQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_PQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_PQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_PQ_STS0_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA3_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA3_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA3_QM_PQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_PQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_PQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_PQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_STS0_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_PTR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_PTR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_TSIZE_0 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA3_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA3_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA3_QM_CQ_CTL_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_PTR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_PTR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_TSIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_CTL_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_PTR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_PTR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_TSIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_CTL_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_PTR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_PTR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_TSIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_CTL_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA3_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA3_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA3_QM_CQ_CTL_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_CTL_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_CTL_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_CTL_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_CTL_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = + ((mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA3_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA3_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA3_QM_CP_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA3_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA3_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA3_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_DBG_0_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_DBG_0_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA3_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA3_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA3_QM_CP_DBG_0_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_DBG_0_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_DBG_0_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA3_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA3_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA3_QM_ARB_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA3_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA3_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = + ((mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA3_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA3_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA3_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA3_QM_ARB_STATE_STS & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MSG_STS & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_ERR_MSG_EN & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_ERR_STS_DRP & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA3_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA3_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CGM_CFG & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CGM_STS & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CGM_CFG1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA3_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA3_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA3_QM_LOCAL_RANGE_BASE & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_GLBL_AXCACHE & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_IND_GW_APB_CFG & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_IND_GW_APB_WDATA & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_IND_GW_APB_RDATA & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_IND_GW_APB_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_QM_GLBL_ERR_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA3_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA3_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA3_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA4_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA4_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA4_QM_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_GLBL_PROT & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_GLBL_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_GLBL_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_GLBL_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_GLBL_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_GLBL_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_GLBL_STS1_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_GLBL_MSG_EN_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_GLBL_MSG_EN_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_GLBL_MSG_EN_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_GLBL_MSG_EN_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_GLBL_MSG_EN_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_PQ_BASE_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_PQ_BASE_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_PQ_BASE_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_PQ_BASE_LO_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA4_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA4_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA4_QM_PQ_BASE_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_PQ_BASE_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_PQ_BASE_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_PQ_BASE_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_PQ_SIZE_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_PQ_SIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_PQ_SIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_PQ_SIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_PQ_PI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_PQ_PI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_PQ_PI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_PQ_PI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_PQ_CI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_PQ_CI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_PQ_CI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_PQ_CI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_PQ_CFG0_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_PQ_CFG0_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_PQ_CFG0_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_PQ_CFG0_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_PQ_CFG1_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_PQ_CFG1_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_PQ_CFG1_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_PQ_CFG1_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_PQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_PQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_PQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_PQ_STS0_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA4_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA4_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA4_QM_PQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_PQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_PQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_PQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_STS0_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_PTR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_PTR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_TSIZE_0 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA4_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA4_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA4_QM_CQ_CTL_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_PTR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_PTR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_TSIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_CTL_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_PTR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_PTR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_TSIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_CTL_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_PTR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_PTR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_TSIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_CTL_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA4_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA4_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA4_QM_CQ_CTL_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_CTL_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_CTL_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_CTL_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_CTL_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = + ((mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA4_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA4_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA4_QM_CP_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA4_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA4_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA4_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_DBG_0_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_DBG_0_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA4_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA4_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA4_QM_CP_DBG_0_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_DBG_0_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_DBG_0_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA4_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA4_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA4_QM_ARB_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA4_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA4_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = + ((mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA4_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA4_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA4_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA4_QM_ARB_STATE_STS & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MSG_STS & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_ERR_MSG_EN & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_ERR_STS_DRP & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA4_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA4_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CGM_CFG & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CGM_STS & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CGM_CFG1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA4_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA4_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA4_QM_LOCAL_RANGE_BASE & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_GLBL_AXCACHE & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_IND_GW_APB_CFG & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_IND_GW_APB_WDATA & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_IND_GW_APB_RDATA & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_IND_GW_APB_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_QM_GLBL_ERR_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA4_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA4_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA4_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA5_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA5_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA5_QM_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_GLBL_PROT & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_GLBL_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_GLBL_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_GLBL_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_GLBL_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_GLBL_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_GLBL_STS1_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_GLBL_MSG_EN_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_GLBL_MSG_EN_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_GLBL_MSG_EN_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_GLBL_MSG_EN_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_GLBL_MSG_EN_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_PQ_BASE_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_PQ_BASE_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_PQ_BASE_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_PQ_BASE_LO_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA5_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA5_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA5_QM_PQ_BASE_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_PQ_BASE_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_PQ_BASE_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_PQ_BASE_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_PQ_SIZE_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_PQ_SIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_PQ_SIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_PQ_SIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_PQ_PI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_PQ_PI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_PQ_PI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_PQ_PI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_PQ_CI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_PQ_CI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_PQ_CI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_PQ_CI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_PQ_CFG0_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_PQ_CFG0_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_PQ_CFG0_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_PQ_CFG0_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_PQ_CFG1_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_PQ_CFG1_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_PQ_CFG1_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_PQ_CFG1_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_PQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_PQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_PQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_PQ_STS0_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA5_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA5_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA5_QM_PQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_PQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_PQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_PQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_STS0_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_PTR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_PTR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_TSIZE_0 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA5_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA5_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA5_QM_CQ_CTL_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_PTR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_PTR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_TSIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_CTL_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_PTR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_PTR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_TSIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_CTL_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_PTR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_PTR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_TSIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_CTL_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA5_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA5_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA5_QM_CQ_CTL_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_CTL_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_CTL_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_CTL_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_CTL_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = + ((mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA5_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA5_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA5_QM_CP_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA5_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA5_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA5_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_DBG_0_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_DBG_0_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA5_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA5_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA5_QM_CP_DBG_0_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_DBG_0_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_DBG_0_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA5_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA5_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA5_QM_ARB_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA5_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA5_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = + ((mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA5_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA5_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA5_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA5_QM_ARB_STATE_STS & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MSG_STS & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_ERR_MSG_EN & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_ERR_STS_DRP & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA5_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA5_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CGM_CFG & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CGM_STS & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CGM_CFG1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA5_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA5_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA5_QM_LOCAL_RANGE_BASE & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_GLBL_AXCACHE & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_IND_GW_APB_CFG & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_IND_GW_APB_WDATA & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_IND_GW_APB_RDATA & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_IND_GW_APB_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_QM_GLBL_ERR_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA5_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA5_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA5_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA6_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA6_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA6_QM_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_GLBL_PROT & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_GLBL_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_GLBL_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_GLBL_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_GLBL_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_GLBL_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_GLBL_STS1_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_GLBL_MSG_EN_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_GLBL_MSG_EN_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_GLBL_MSG_EN_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_GLBL_MSG_EN_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_GLBL_MSG_EN_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_PQ_BASE_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_PQ_BASE_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_PQ_BASE_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_PQ_BASE_LO_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA6_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA6_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA6_QM_PQ_BASE_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_PQ_BASE_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_PQ_BASE_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_PQ_BASE_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_PQ_SIZE_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_PQ_SIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_PQ_SIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_PQ_SIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_PQ_PI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_PQ_PI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_PQ_PI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_PQ_PI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_PQ_CI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_PQ_CI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_PQ_CI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_PQ_CI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_PQ_CFG0_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_PQ_CFG0_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_PQ_CFG0_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_PQ_CFG0_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_PQ_CFG1_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_PQ_CFG1_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_PQ_CFG1_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_PQ_CFG1_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_PQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_PQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_PQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_PQ_STS0_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA6_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA6_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA6_QM_PQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_PQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_PQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_PQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_STS0_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_PTR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_PTR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_TSIZE_0 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA6_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA6_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA6_QM_CQ_CTL_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_PTR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_PTR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_TSIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_CTL_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_PTR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_PTR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_TSIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_CTL_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_PTR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_PTR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_TSIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_CTL_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA6_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA6_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA6_QM_CQ_CTL_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_CTL_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_CTL_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_CTL_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_CTL_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = + ((mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA6_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA6_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA6_QM_CP_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA6_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA6_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA6_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_DBG_0_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_DBG_0_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA6_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA6_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA6_QM_CP_DBG_0_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_DBG_0_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_DBG_0_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA6_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA6_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA6_QM_ARB_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA6_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA6_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = + ((mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7) + << 2; + + mask = 1U << ((mmDMA6_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA6_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA6_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA6_QM_ARB_STATE_STS & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MSG_STS & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_ERR_MSG_EN & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_ERR_STS_DRP & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA6_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA6_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CGM_CFG & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CGM_STS & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CGM_CFG1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA6_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA6_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA6_QM_LOCAL_RANGE_BASE & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_GLBL_AXCACHE & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_IND_GW_APB_CFG & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_IND_GW_APB_WDATA & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_IND_GW_APB_RDATA & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_IND_GW_APB_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_QM_GLBL_ERR_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA6_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA6_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA6_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA7_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA7_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA7_QM_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_GLBL_PROT & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_GLBL_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_GLBL_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_GLBL_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_GLBL_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_GLBL_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_GLBL_STS1_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_GLBL_MSG_EN_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_GLBL_MSG_EN_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_GLBL_MSG_EN_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_GLBL_MSG_EN_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_GLBL_MSG_EN_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_PQ_BASE_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_PQ_BASE_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_PQ_BASE_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_PQ_BASE_LO_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA7_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA7_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA7_QM_PQ_BASE_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_PQ_BASE_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_PQ_BASE_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_PQ_BASE_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_PQ_SIZE_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_PQ_SIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_PQ_SIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_PQ_SIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_PQ_PI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_PQ_PI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_PQ_PI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_PQ_PI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_PQ_CI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_PQ_CI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_PQ_CI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_PQ_CI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_PQ_CFG0_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_PQ_CFG0_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_PQ_CFG0_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_PQ_CFG0_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_PQ_CFG1_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_PQ_CFG1_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_PQ_CFG1_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_PQ_CFG1_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_PQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_PQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_PQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_PQ_STS0_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA7_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA7_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA7_QM_PQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_PQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_PQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_PQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_STS0_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_PTR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_PTR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_TSIZE_0 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA7_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA7_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA7_QM_CQ_CTL_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_PTR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_PTR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_TSIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_CTL_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_PTR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_PTR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_TSIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_CTL_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_PTR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_PTR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_TSIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_CTL_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA7_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA7_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA7_QM_CQ_CTL_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_CTL_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_CTL_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_CTL_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_CTL_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = + ((mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA7_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA7_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA7_QM_CP_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA7_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA7_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA7_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_DBG_0_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_DBG_0_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA7_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA7_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA7_QM_CP_DBG_0_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_DBG_0_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_DBG_0_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA7_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA7_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA7_QM_ARB_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA7_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA7_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = + ((mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA7_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA7_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA7_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA7_QM_ARB_STATE_STS & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MSG_STS & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_ERR_MSG_EN & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_ERR_STS_DRP & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA7_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA7_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CGM_CFG & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CGM_STS & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CGM_CFG1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA7_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA7_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA7_QM_LOCAL_RANGE_BASE & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_GLBL_AXCACHE & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_IND_GW_APB_CFG & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_IND_GW_APB_WDATA & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_IND_GW_APB_RDATA & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_IND_GW_APB_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_QM_GLBL_ERR_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA7_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA7_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA7_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA0_CORE_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA0_CORE_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA0_CORE_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_CORE_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA0_CORE_PROT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA0_CORE_PROT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA0_CORE_PROT & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_CORE_SECURE_PROPS & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_CORE_NON_SECURE_PROPS & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA0_CORE_RD_MAX_OUTSTAND & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA0_CORE_RD_MAX_OUTSTAND & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA0_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_CORE_RD_MAX_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_CORE_RD_ARCACHE & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_CORE_RD_ARUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_CORE_RD_INFLIGHTS & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_CORE_WR_MAX_AWID & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_CORE_WR_AWCACHE & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_CORE_WR_AWUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_CORE_WR_INFLIGHTS & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_CORE_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_CORE_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_CORE_ERRMSG_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA0_CORE_STS0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA0_CORE_STS0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA0_CORE_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_CORE_STS1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA0_CORE_RD_DBGMEM_ADD & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA0_CORE_RD_DBGMEM_ADD & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA0_CORE_RD_DBGMEM_ADD & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_CORE_RD_DBGMEM_RC & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_CORE_DBG_DESC_CNT & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_CORE_DBG_STS & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_CORE_DBG_RD_DESC_ID & 0x7F) >> 2); + mask |= 1U << ((mmDMA0_CORE_DBG_WR_DESC_ID & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA1_CORE_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA1_CORE_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA1_CORE_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_CORE_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA1_CORE_PROT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA1_CORE_PROT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA1_CORE_PROT & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_CORE_SECURE_PROPS & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_CORE_NON_SECURE_PROPS & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA1_CORE_RD_MAX_OUTSTAND & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA1_CORE_RD_MAX_OUTSTAND & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA1_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_CORE_RD_MAX_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_CORE_RD_ARCACHE & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_CORE_RD_ARUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_CORE_RD_INFLIGHTS & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_CORE_WR_MAX_AWID & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_CORE_WR_AWCACHE & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_CORE_WR_AWUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_CORE_WR_INFLIGHTS & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_CORE_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_CORE_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_CORE_ERRMSG_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA1_CORE_STS0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA1_CORE_STS0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA1_CORE_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_CORE_STS1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA1_CORE_RD_DBGMEM_ADD & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA1_CORE_RD_DBGMEM_ADD & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA1_CORE_RD_DBGMEM_ADD & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_CORE_RD_DBGMEM_RC & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_CORE_DBG_DESC_CNT & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_CORE_DBG_STS & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_CORE_DBG_RD_DESC_ID & 0x7F) >> 2); + mask |= 1U << ((mmDMA1_CORE_DBG_WR_DESC_ID & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA2_CORE_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA2_CORE_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA2_CORE_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_CORE_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA2_CORE_PROT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA2_CORE_PROT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA2_CORE_PROT & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_CORE_SECURE_PROPS & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_CORE_NON_SECURE_PROPS & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA2_CORE_RD_MAX_OUTSTAND & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA2_CORE_RD_MAX_OUTSTAND & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA2_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_CORE_RD_MAX_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_CORE_RD_ARCACHE & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_CORE_RD_ARUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_CORE_RD_INFLIGHTS & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_CORE_WR_MAX_AWID & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_CORE_WR_AWCACHE & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_CORE_WR_INFLIGHTS & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_CORE_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_CORE_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_CORE_ERRMSG_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA2_CORE_STS0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA2_CORE_STS0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA2_CORE_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_CORE_STS1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA2_CORE_RD_DBGMEM_ADD & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA2_CORE_RD_DBGMEM_ADD & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA2_CORE_RD_DBGMEM_ADD & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_CORE_RD_DBGMEM_RC & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_CORE_DBG_DESC_CNT & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_CORE_DBG_STS & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_CORE_DBG_RD_DESC_ID & 0x7F) >> 2); + mask |= 1U << ((mmDMA2_CORE_DBG_WR_DESC_ID & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA3_CORE_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA3_CORE_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA3_CORE_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_CORE_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA3_CORE_PROT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA3_CORE_PROT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA3_CORE_PROT & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_CORE_SECURE_PROPS & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_CORE_NON_SECURE_PROPS & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA3_CORE_RD_MAX_OUTSTAND & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA3_CORE_RD_MAX_OUTSTAND & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA3_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_CORE_RD_MAX_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_CORE_RD_ARCACHE & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_CORE_RD_ARUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_CORE_RD_INFLIGHTS & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_CORE_WR_MAX_AWID & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_CORE_WR_AWCACHE & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_CORE_WR_INFLIGHTS & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_CORE_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_CORE_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_CORE_ERRMSG_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA3_CORE_STS0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA3_CORE_STS0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA3_CORE_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_CORE_STS1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA3_CORE_RD_DBGMEM_ADD & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA3_CORE_RD_DBGMEM_ADD & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA3_CORE_RD_DBGMEM_ADD & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_CORE_RD_DBGMEM_RC & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_CORE_DBG_DESC_CNT & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_CORE_DBG_STS & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_CORE_DBG_RD_DESC_ID & 0x7F) >> 2); + mask |= 1U << ((mmDMA3_CORE_DBG_WR_DESC_ID & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA4_CORE_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA4_CORE_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA4_CORE_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_CORE_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA4_CORE_PROT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA4_CORE_PROT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA4_CORE_PROT & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_CORE_SECURE_PROPS & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_CORE_NON_SECURE_PROPS & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA4_CORE_RD_MAX_OUTSTAND & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA4_CORE_RD_MAX_OUTSTAND & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA4_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_CORE_RD_MAX_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_CORE_RD_ARCACHE & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_CORE_RD_ARUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_CORE_RD_INFLIGHTS & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_CORE_WR_MAX_AWID & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_CORE_WR_AWCACHE & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_CORE_WR_INFLIGHTS & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_CORE_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_CORE_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_CORE_ERRMSG_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA4_CORE_STS0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA4_CORE_STS0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA4_CORE_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_CORE_STS1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA4_CORE_RD_DBGMEM_ADD & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA4_CORE_RD_DBGMEM_ADD & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA4_CORE_RD_DBGMEM_ADD & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_CORE_RD_DBGMEM_RC & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_CORE_DBG_DESC_CNT & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_CORE_DBG_STS & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_CORE_DBG_RD_DESC_ID & 0x7F) >> 2); + mask |= 1U << ((mmDMA4_CORE_DBG_WR_DESC_ID & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA5_CORE_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA5_CORE_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA5_CORE_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_CORE_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA5_CORE_PROT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA5_CORE_PROT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA5_CORE_PROT & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_CORE_SECURE_PROPS & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_CORE_NON_SECURE_PROPS & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA5_CORE_RD_MAX_OUTSTAND & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA5_CORE_RD_MAX_OUTSTAND & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA5_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_CORE_RD_MAX_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_CORE_RD_ARCACHE & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_CORE_RD_ARUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_CORE_RD_INFLIGHTS & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_CORE_WR_MAX_AWID & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_CORE_WR_AWCACHE & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_CORE_WR_INFLIGHTS & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_CORE_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_CORE_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_CORE_ERRMSG_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA5_CORE_STS0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA5_CORE_STS0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA5_CORE_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_CORE_STS1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA5_CORE_RD_DBGMEM_ADD & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA5_CORE_RD_DBGMEM_ADD & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA5_CORE_RD_DBGMEM_ADD & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_CORE_RD_DBGMEM_RC & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_CORE_DBG_DESC_CNT & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_CORE_DBG_STS & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_CORE_DBG_RD_DESC_ID & 0x7F) >> 2); + mask |= 1U << ((mmDMA5_CORE_DBG_WR_DESC_ID & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA6_CORE_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA6_CORE_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA6_CORE_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_CORE_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA6_CORE_PROT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA6_CORE_PROT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA6_CORE_PROT & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_CORE_SECURE_PROPS & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_CORE_NON_SECURE_PROPS & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA6_CORE_RD_MAX_OUTSTAND & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA6_CORE_RD_MAX_OUTSTAND & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA6_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_CORE_RD_MAX_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_CORE_RD_ARCACHE & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_CORE_RD_ARUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_CORE_RD_INFLIGHTS & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_CORE_WR_MAX_AWID & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_CORE_WR_AWCACHE & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_CORE_WR_INFLIGHTS & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_CORE_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_CORE_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_CORE_ERRMSG_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA6_CORE_STS0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA6_CORE_STS0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA6_CORE_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_CORE_STS1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA6_CORE_RD_DBGMEM_ADD & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA6_CORE_RD_DBGMEM_ADD & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA6_CORE_RD_DBGMEM_ADD & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_CORE_RD_DBGMEM_RC & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_CORE_DBG_DESC_CNT & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_CORE_DBG_STS & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_CORE_DBG_RD_DESC_ID & 0x7F) >> 2); + mask |= 1U << ((mmDMA6_CORE_DBG_WR_DESC_ID & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA7_CORE_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA7_CORE_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA7_CORE_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_CORE_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA7_CORE_PROT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA7_CORE_PROT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA7_CORE_PROT & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_CORE_SECURE_PROPS & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_CORE_NON_SECURE_PROPS & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA7_CORE_RD_MAX_OUTSTAND & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA7_CORE_RD_MAX_OUTSTAND & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmDMA7_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_CORE_RD_MAX_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_CORE_RD_ARCACHE & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_CORE_RD_ARUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_CORE_RD_INFLIGHTS & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_CORE_WR_MAX_AWID & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_CORE_WR_AWCACHE & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_CORE_WR_INFLIGHTS & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_CORE_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_CORE_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_CORE_ERRMSG_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA7_CORE_STS0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA7_CORE_STS0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA7_CORE_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_CORE_STS1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA7_CORE_RD_DBGMEM_ADD & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA7_CORE_RD_DBGMEM_ADD & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmDMA7_CORE_RD_DBGMEM_ADD & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_CORE_RD_DBGMEM_RC & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_CORE_DBG_DESC_CNT & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_CORE_DBG_STS & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_CORE_DBG_RD_DESC_ID & 0x7F) >> 2); + mask |= 1U << ((mmDMA7_CORE_DBG_WR_DESC_ID & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); +} + +static void gaudi_init_tpc_protection_bits(struct hl_device *hdev) +{ + u32 pb_addr, mask; + u8 word_offset; + + gaudi_pb_set_block(hdev, mmTPC0_E2E_CRED_BASE); + gaudi_pb_set_block(hdev, mmTPC1_E2E_CRED_BASE); + gaudi_pb_set_block(hdev, mmTPC2_E2E_CRED_BASE); + gaudi_pb_set_block(hdev, mmTPC3_E2E_CRED_BASE); + gaudi_pb_set_block(hdev, mmTPC4_E2E_CRED_BASE); + gaudi_pb_set_block(hdev, mmTPC5_E2E_CRED_BASE); + gaudi_pb_set_block(hdev, mmTPC6_E2E_CRED_BASE); + gaudi_pb_set_block(hdev, mmTPC7_E2E_CRED_BASE); + + WREG32(mmTPC0_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + WREG32(mmTPC0_CFG_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + + pb_addr = (mmTPC0_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC0_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC0_QM_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_GLBL_PROT & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_GLBL_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_GLBL_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_GLBL_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_GLBL_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_GLBL_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_GLBL_STS1_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_GLBL_MSG_EN_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_GLBL_MSG_EN_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_GLBL_MSG_EN_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_GLBL_MSG_EN_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_GLBL_MSG_EN_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_PQ_BASE_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_PQ_BASE_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_PQ_BASE_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_PQ_BASE_LO_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC0_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC0_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC0_QM_PQ_BASE_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_PQ_BASE_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_PQ_BASE_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_PQ_BASE_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_PQ_SIZE_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_PQ_SIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_PQ_SIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_PQ_SIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_PQ_PI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_PQ_PI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_PQ_PI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_PQ_PI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_PQ_CI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_PQ_CI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_PQ_CI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_PQ_CI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_PQ_CFG0_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_PQ_CFG0_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_PQ_CFG0_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_PQ_CFG0_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_PQ_CFG1_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_PQ_CFG1_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_PQ_CFG1_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_PQ_CFG1_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_PQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_PQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_PQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_PQ_STS0_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC0_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC0_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC0_QM_PQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_PQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_PQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_PQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_STS0_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_PTR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_PTR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_TSIZE_0 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC0_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC0_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC0_QM_CQ_CTL_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_PTR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_PTR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_TSIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_CTL_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_PTR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_PTR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_TSIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_CTL_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_PTR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_PTR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_TSIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_CTL_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC0_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC0_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC0_QM_CQ_CTL_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_CTL_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_CTL_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_CTL_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_CTL_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) + + PROT_BITS_OFFS; + + word_offset = ((mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) + >> 7) << 2; + + mask = 1U << ((mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC0_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC0_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC0_QM_CP_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC0_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC0_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC0_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_DBG_0_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_DBG_0_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC0_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC0_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC0_QM_CP_DBG_0_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_DBG_0_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_DBG_0_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC0_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC0_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC0_QM_ARB_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC0_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC0_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) + + PROT_BITS_OFFS; + + word_offset = ((mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmTPC0_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC0_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC0_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC0_QM_ARB_STATE_STS & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MSG_STS & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_ERR_MSG_EN & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_ERR_STS_DRP & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC0_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC0_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CGM_CFG & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CGM_STS & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CGM_CFG1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC0_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC0_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC0_QM_LOCAL_RANGE_BASE & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_GLBL_AXCACHE & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_IND_GW_APB_CFG & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_IND_GW_APB_WDATA & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_IND_GW_APB_RDATA & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_IND_GW_APB_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_QM_GLBL_ERR_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC0_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC0_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmTPC0_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC0_CFG_ROUND_CSR & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC0_CFG_ROUND_CSR & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC0_CFG_ROUND_CSR & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC0_CFG_PROT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC0_CFG_PROT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC0_CFG_PROT & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_CFG_VFLAGS & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_CFG_SFLAGS & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_CFG_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_CFG_TPC_STALL & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_CFG_RD_RATE_LIMIT & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_CFG_WR_RATE_LIMIT & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_CFG_MSS_CONFIG & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_CFG_TPC_INTR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_CFG_TPC_INTR_MASK & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_CFG_WQ_CREDITS & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_CFG_ARUSER_LO & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_CFG_ARUSER_HI & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_CFG_AWUSER_LO & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_CFG_AWUSER_HI & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_CFG_OPCODE_EXEC & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC0_CFG_TSB_CFG_MAX_SIZE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC0_CFG_TSB_CFG_MAX_SIZE & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmTPC0_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_CFG_DBGMEM_ADD & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_CFG_DBGMEM_DATA_WR & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_CFG_DBGMEM_DATA_RD & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_CFG_DBGMEM_CTRL & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_CFG_DBGMEM_RC & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_CFG_FUNC_MBIST_PAT & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2); + mask |= 1U << ((mmTPC0_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + WREG32(mmTPC1_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + WREG32(mmTPC1_CFG_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + + pb_addr = (mmTPC1_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC1_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC1_QM_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_GLBL_PROT & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_GLBL_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_GLBL_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_GLBL_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_GLBL_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_GLBL_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_GLBL_STS1_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_GLBL_MSG_EN_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_GLBL_MSG_EN_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_GLBL_MSG_EN_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_GLBL_MSG_EN_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_GLBL_MSG_EN_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_PQ_BASE_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_PQ_BASE_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_PQ_BASE_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_PQ_BASE_LO_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC1_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC1_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC1_QM_PQ_BASE_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_PQ_BASE_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_PQ_BASE_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_PQ_BASE_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_PQ_SIZE_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_PQ_SIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_PQ_SIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_PQ_SIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_PQ_PI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_PQ_PI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_PQ_PI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_PQ_PI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_PQ_CI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_PQ_CI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_PQ_CI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_PQ_CI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_PQ_CFG0_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_PQ_CFG0_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_PQ_CFG0_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_PQ_CFG0_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_PQ_CFG1_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_PQ_CFG1_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_PQ_CFG1_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_PQ_CFG1_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_PQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_PQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_PQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_PQ_STS0_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC1_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC1_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC1_QM_PQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_PQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_PQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_PQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_STS0_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_PTR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_PTR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_TSIZE_0 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC1_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC1_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC1_QM_CQ_CTL_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_PTR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_PTR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_TSIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_CTL_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_PTR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_PTR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_TSIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_CTL_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_PTR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_PTR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_TSIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_CTL_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC1_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC1_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC1_QM_CQ_CTL_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_CTL_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_CTL_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_CTL_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_CTL_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC1_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC1_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC1_QM_CP_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC1_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC1_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC1_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_DBG_0_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_DBG_0_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC1_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC1_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC1_QM_CP_DBG_0_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_DBG_0_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_DBG_0_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC1_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC1_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC1_QM_ARB_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC1_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC1_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) + + PROT_BITS_OFFS; + + word_offset = ((mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmTPC1_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC1_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC1_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC1_QM_ARB_STATE_STS & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MSG_STS & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_ERR_MSG_EN & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_ERR_STS_DRP & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC1_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC1_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CGM_CFG & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CGM_STS & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CGM_CFG1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC1_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC1_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC1_QM_LOCAL_RANGE_BASE & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_GLBL_AXCACHE & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_IND_GW_APB_CFG & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_IND_GW_APB_WDATA & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_IND_GW_APB_RDATA & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_IND_GW_APB_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_QM_GLBL_ERR_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC1_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC1_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmTPC1_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC1_CFG_ROUND_CSR & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC1_CFG_ROUND_CSR & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC1_CFG_ROUND_CSR & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC1_CFG_PROT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC1_CFG_PROT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC1_CFG_PROT & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_CFG_VFLAGS & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_CFG_SFLAGS & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_CFG_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_CFG_TPC_STALL & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_CFG_RD_RATE_LIMIT & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_CFG_WR_RATE_LIMIT & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_CFG_MSS_CONFIG & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_CFG_TPC_INTR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_CFG_TPC_INTR_MASK & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_CFG_WQ_CREDITS & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_CFG_ARUSER_LO & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_CFG_ARUSER_HI & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_CFG_AWUSER_LO & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_CFG_AWUSER_HI & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_CFG_OPCODE_EXEC & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC1_CFG_TSB_CFG_MAX_SIZE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC1_CFG_TSB_CFG_MAX_SIZE & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmTPC1_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_CFG_DBGMEM_ADD & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_CFG_DBGMEM_DATA_WR & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_CFG_DBGMEM_DATA_RD & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_CFG_DBGMEM_CTRL & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_CFG_DBGMEM_RC & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_CFG_FUNC_MBIST_PAT & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2); + mask |= 1U << ((mmTPC1_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + WREG32(mmTPC2_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + WREG32(mmTPC2_CFG_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + + pb_addr = (mmTPC2_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC2_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC2_QM_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_GLBL_PROT & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_GLBL_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_GLBL_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_GLBL_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_GLBL_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_GLBL_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_GLBL_STS1_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_GLBL_MSG_EN_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_GLBL_MSG_EN_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_GLBL_MSG_EN_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_GLBL_MSG_EN_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_GLBL_MSG_EN_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_PQ_BASE_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_PQ_BASE_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_PQ_BASE_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_PQ_BASE_LO_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC2_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC2_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC2_QM_PQ_BASE_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_PQ_BASE_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_PQ_BASE_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_PQ_BASE_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_PQ_SIZE_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_PQ_SIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_PQ_SIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_PQ_SIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_PQ_PI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_PQ_PI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_PQ_PI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_PQ_PI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_PQ_CI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_PQ_CI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_PQ_CI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_PQ_CI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_PQ_CFG0_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_PQ_CFG0_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_PQ_CFG0_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_PQ_CFG0_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_PQ_CFG1_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_PQ_CFG1_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_PQ_CFG1_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_PQ_CFG1_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_PQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_PQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_PQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_PQ_STS0_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC2_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC2_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC2_QM_PQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_PQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_PQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_PQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_STS0_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_PTR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_PTR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_TSIZE_0 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC2_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC2_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC2_QM_CQ_CTL_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_PTR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_PTR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_TSIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_CTL_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_PTR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_PTR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_TSIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_CTL_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_PTR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_PTR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_TSIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_CTL_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC2_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC2_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC2_QM_CQ_CTL_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_CTL_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_CTL_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_CTL_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_CTL_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC2_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC2_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC2_QM_CP_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC2_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC2_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC2_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_DBG_0_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_DBG_0_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC2_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC2_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC2_QM_CP_DBG_0_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_DBG_0_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_DBG_0_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC2_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC2_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC2_QM_ARB_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC2_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC2_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmTPC2_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC2_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC2_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC2_QM_ARB_STATE_STS & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MSG_STS & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_ERR_MSG_EN & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_ERR_STS_DRP & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC2_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC2_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CGM_CFG & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CGM_STS & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CGM_CFG1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC2_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC2_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC2_QM_LOCAL_RANGE_BASE & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_GLBL_AXCACHE & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_IND_GW_APB_CFG & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_IND_GW_APB_WDATA & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_IND_GW_APB_RDATA & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_IND_GW_APB_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_QM_GLBL_ERR_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC2_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC2_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmTPC2_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC2_CFG_ROUND_CSR & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC2_CFG_ROUND_CSR & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC2_CFG_ROUND_CSR & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC2_CFG_PROT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC2_CFG_PROT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC2_CFG_PROT & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_CFG_VFLAGS & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_CFG_SFLAGS & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_CFG_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_CFG_TPC_STALL & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_CFG_RD_RATE_LIMIT & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_CFG_WR_RATE_LIMIT & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_CFG_MSS_CONFIG & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_CFG_TPC_INTR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_CFG_TPC_INTR_MASK & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_CFG_WQ_CREDITS & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_CFG_ARUSER_LO & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_CFG_ARUSER_HI & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_CFG_AWUSER_LO & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_CFG_AWUSER_HI & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_CFG_OPCODE_EXEC & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC2_CFG_TSB_CFG_MAX_SIZE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC2_CFG_TSB_CFG_MAX_SIZE & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmTPC2_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_CFG_DBGMEM_ADD & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_CFG_DBGMEM_DATA_WR & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_CFG_DBGMEM_DATA_RD & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_CFG_DBGMEM_CTRL & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_CFG_DBGMEM_RC & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_CFG_FUNC_MBIST_PAT & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2); + mask |= 1U << ((mmTPC2_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + WREG32(mmTPC3_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + WREG32(mmTPC3_CFG_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + + pb_addr = (mmTPC3_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC3_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC3_QM_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_GLBL_PROT & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_GLBL_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_GLBL_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_GLBL_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_GLBL_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_GLBL_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_GLBL_STS1_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_GLBL_MSG_EN_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_GLBL_MSG_EN_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_GLBL_MSG_EN_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_GLBL_MSG_EN_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_GLBL_MSG_EN_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_PQ_BASE_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_PQ_BASE_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_PQ_BASE_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_PQ_BASE_LO_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC3_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC3_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC3_QM_PQ_BASE_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_PQ_BASE_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_PQ_BASE_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_PQ_BASE_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_PQ_SIZE_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_PQ_SIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_PQ_SIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_PQ_SIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_PQ_PI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_PQ_PI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_PQ_PI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_PQ_PI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_PQ_CI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_PQ_CI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_PQ_CI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_PQ_CI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_PQ_CFG0_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_PQ_CFG0_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_PQ_CFG0_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_PQ_CFG0_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_PQ_CFG1_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_PQ_CFG1_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_PQ_CFG1_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_PQ_CFG1_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_PQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_PQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_PQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_PQ_STS0_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC3_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC3_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC3_QM_PQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_PQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_PQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_PQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_STS0_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_PTR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_PTR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_TSIZE_0 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC3_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC3_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC3_QM_CQ_CTL_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_PTR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_PTR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_TSIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_CTL_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_PTR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_PTR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_TSIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_CTL_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_PTR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_PTR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_TSIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_CTL_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC3_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC3_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC3_QM_CQ_CTL_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_CTL_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_CTL_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_CTL_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_CTL_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC3_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC3_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC3_QM_CP_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC3_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC3_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC3_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_DBG_0_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_DBG_0_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC3_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC3_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC3_QM_CP_DBG_0_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_DBG_0_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_DBG_0_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC3_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC3_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC3_QM_ARB_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC3_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC3_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmTPC3_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC3_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC3_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC3_QM_ARB_STATE_STS & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MSG_STS & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_ERR_MSG_EN & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_ERR_STS_DRP & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC3_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC3_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CGM_CFG & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CGM_STS & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CGM_CFG1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC3_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC3_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC3_QM_LOCAL_RANGE_BASE & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_GLBL_AXCACHE & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_IND_GW_APB_CFG & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_IND_GW_APB_WDATA & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_IND_GW_APB_RDATA & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_IND_GW_APB_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_QM_GLBL_ERR_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC3_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC3_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmTPC3_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC3_CFG_ROUND_CSR & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC3_CFG_ROUND_CSR & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC3_CFG_ROUND_CSR & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC3_CFG_PROT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC3_CFG_PROT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC3_CFG_PROT & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_CFG_VFLAGS & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_CFG_SFLAGS & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_CFG_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_CFG_TPC_STALL & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_CFG_RD_RATE_LIMIT & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_CFG_WR_RATE_LIMIT & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_CFG_MSS_CONFIG & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_CFG_TPC_INTR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_CFG_TPC_INTR_MASK & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_CFG_WQ_CREDITS & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_CFG_ARUSER_LO & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_CFG_ARUSER_HI & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_CFG_AWUSER_LO & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_CFG_AWUSER_HI & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_CFG_OPCODE_EXEC & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC3_CFG_TSB_CFG_MAX_SIZE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC3_CFG_TSB_CFG_MAX_SIZE & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmTPC3_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_CFG_DBGMEM_ADD & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_CFG_DBGMEM_DATA_WR & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_CFG_DBGMEM_DATA_RD & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_CFG_DBGMEM_CTRL & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_CFG_DBGMEM_RC & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_CFG_FUNC_MBIST_PAT & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2); + mask |= 1U << ((mmTPC3_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + WREG32(mmTPC4_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + WREG32(mmTPC4_CFG_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + + pb_addr = (mmTPC4_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC4_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC4_QM_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_GLBL_PROT & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_GLBL_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_GLBL_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_GLBL_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_GLBL_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_GLBL_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_GLBL_STS1_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_GLBL_MSG_EN_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_GLBL_MSG_EN_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_GLBL_MSG_EN_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_GLBL_MSG_EN_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_GLBL_MSG_EN_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_PQ_BASE_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_PQ_BASE_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_PQ_BASE_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_PQ_BASE_LO_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC4_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC4_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC4_QM_PQ_BASE_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_PQ_BASE_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_PQ_BASE_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_PQ_BASE_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_PQ_SIZE_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_PQ_SIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_PQ_SIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_PQ_SIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_PQ_PI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_PQ_PI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_PQ_PI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_PQ_PI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_PQ_CI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_PQ_CI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_PQ_CI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_PQ_CI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_PQ_CFG0_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_PQ_CFG0_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_PQ_CFG0_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_PQ_CFG0_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_PQ_CFG1_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_PQ_CFG1_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_PQ_CFG1_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_PQ_CFG1_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_PQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_PQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_PQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_PQ_STS0_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC4_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC4_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC4_QM_PQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_PQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_PQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_PQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_STS0_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_PTR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_PTR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_TSIZE_0 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC4_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC4_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC4_QM_CQ_CTL_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_PTR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_PTR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_TSIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_CTL_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_PTR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_PTR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_TSIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_CTL_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_PTR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_PTR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_TSIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_CTL_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC4_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC4_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC4_QM_CQ_CTL_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_CTL_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_CTL_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_CTL_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_CTL_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC4_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC4_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC4_QM_CP_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC4_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC4_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC4_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_DBG_0_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_DBG_0_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC4_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC4_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC4_QM_CP_DBG_0_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_DBG_0_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_DBG_0_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC4_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC4_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC4_QM_ARB_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC4_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC4_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmTPC4_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC4_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC4_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC4_QM_ARB_STATE_STS & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MSG_STS & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_ERR_MSG_EN & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_ERR_STS_DRP & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC4_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC4_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CGM_CFG & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CGM_STS & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CGM_CFG1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC4_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC4_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC4_QM_LOCAL_RANGE_BASE & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_GLBL_AXCACHE & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_IND_GW_APB_CFG & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_IND_GW_APB_WDATA & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_IND_GW_APB_RDATA & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_IND_GW_APB_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_QM_GLBL_ERR_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC4_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC4_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmTPC4_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC4_CFG_ROUND_CSR & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC4_CFG_ROUND_CSR & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC4_CFG_ROUND_CSR & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC4_CFG_PROT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC4_CFG_PROT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC4_CFG_PROT & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_CFG_VFLAGS & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_CFG_SFLAGS & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_CFG_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_CFG_TPC_STALL & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_CFG_RD_RATE_LIMIT & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_CFG_WR_RATE_LIMIT & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_CFG_MSS_CONFIG & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_CFG_TPC_INTR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_CFG_TPC_INTR_MASK & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_CFG_WQ_CREDITS & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_CFG_ARUSER_LO & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_CFG_ARUSER_HI & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_CFG_AWUSER_LO & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_CFG_AWUSER_HI & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_CFG_OPCODE_EXEC & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC4_CFG_TSB_CFG_MAX_SIZE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC4_CFG_TSB_CFG_MAX_SIZE & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmTPC4_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_CFG_DBGMEM_ADD & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_CFG_DBGMEM_DATA_WR & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_CFG_DBGMEM_DATA_RD & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_CFG_DBGMEM_CTRL & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_CFG_DBGMEM_RC & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_CFG_FUNC_MBIST_PAT & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2); + mask |= 1U << ((mmTPC4_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + WREG32(mmTPC5_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + WREG32(mmTPC5_CFG_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + + pb_addr = (mmTPC5_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC5_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC5_QM_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_GLBL_PROT & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_GLBL_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_GLBL_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_GLBL_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_GLBL_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_GLBL_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_GLBL_STS1_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_GLBL_MSG_EN_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_GLBL_MSG_EN_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_GLBL_MSG_EN_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_GLBL_MSG_EN_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_GLBL_MSG_EN_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_PQ_BASE_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_PQ_BASE_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_PQ_BASE_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_PQ_BASE_LO_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC5_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC5_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC5_QM_PQ_BASE_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_PQ_BASE_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_PQ_BASE_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_PQ_BASE_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_PQ_SIZE_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_PQ_SIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_PQ_SIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_PQ_SIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_PQ_PI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_PQ_PI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_PQ_PI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_PQ_PI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_PQ_CI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_PQ_CI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_PQ_CI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_PQ_CI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_PQ_CFG0_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_PQ_CFG0_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_PQ_CFG0_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_PQ_CFG0_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_PQ_CFG1_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_PQ_CFG1_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_PQ_CFG1_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_PQ_CFG1_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_PQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_PQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_PQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_PQ_STS0_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC5_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC5_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC5_QM_PQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_PQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_PQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_PQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_STS0_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_PTR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_PTR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_TSIZE_0 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC5_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC5_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC5_QM_CQ_CTL_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_PTR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_PTR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_TSIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_CTL_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_PTR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_PTR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_TSIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_CTL_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_PTR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_PTR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_TSIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_CTL_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC5_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC5_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC5_QM_CQ_CTL_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_CTL_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_CTL_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_CTL_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_CTL_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC5_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC5_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC5_QM_CP_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC5_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC5_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC5_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_DBG_0_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_DBG_0_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC5_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC5_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC5_QM_CP_DBG_0_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_DBG_0_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_DBG_0_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC5_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC5_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC5_QM_ARB_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC5_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC5_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmTPC5_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC5_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC5_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC5_QM_ARB_STATE_STS & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MSG_STS & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_ERR_MSG_EN & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_ERR_STS_DRP & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC5_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC5_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CGM_CFG & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CGM_STS & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CGM_CFG1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC5_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC5_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC5_QM_LOCAL_RANGE_BASE & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_GLBL_AXCACHE & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_IND_GW_APB_CFG & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_IND_GW_APB_WDATA & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_IND_GW_APB_RDATA & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_IND_GW_APB_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_QM_GLBL_ERR_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC5_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC5_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmTPC5_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC5_CFG_ROUND_CSR & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC5_CFG_ROUND_CSR & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC5_CFG_ROUND_CSR & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC5_CFG_PROT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC5_CFG_PROT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC5_CFG_PROT & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_CFG_VFLAGS & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_CFG_SFLAGS & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_CFG_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_CFG_TPC_STALL & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_CFG_RD_RATE_LIMIT & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_CFG_WR_RATE_LIMIT & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_CFG_MSS_CONFIG & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_CFG_TPC_INTR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_CFG_TPC_INTR_MASK & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_CFG_WQ_CREDITS & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_CFG_ARUSER_LO & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_CFG_ARUSER_HI & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_CFG_AWUSER_LO & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_CFG_AWUSER_HI & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_CFG_OPCODE_EXEC & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC5_CFG_TSB_CFG_MAX_SIZE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC5_CFG_TSB_CFG_MAX_SIZE & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmTPC5_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_CFG_DBGMEM_ADD & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_CFG_DBGMEM_DATA_WR & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_CFG_DBGMEM_DATA_RD & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_CFG_DBGMEM_CTRL & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_CFG_DBGMEM_RC & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_CFG_FUNC_MBIST_PAT & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2); + mask |= 1U << ((mmTPC5_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + WREG32(mmTPC6_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + WREG32(mmTPC6_CFG_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + + pb_addr = (mmTPC6_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC6_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC6_QM_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_GLBL_PROT & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_GLBL_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_GLBL_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_GLBL_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_GLBL_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_GLBL_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_GLBL_STS1_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_GLBL_MSG_EN_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_GLBL_MSG_EN_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_GLBL_MSG_EN_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_GLBL_MSG_EN_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_GLBL_MSG_EN_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_PQ_BASE_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_PQ_BASE_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_PQ_BASE_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_PQ_BASE_LO_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC6_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC6_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC6_QM_PQ_BASE_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_PQ_BASE_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_PQ_BASE_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_PQ_BASE_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_PQ_SIZE_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_PQ_SIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_PQ_SIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_PQ_SIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_PQ_PI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_PQ_PI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_PQ_PI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_PQ_PI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_PQ_CI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_PQ_CI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_PQ_CI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_PQ_CI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_PQ_CFG0_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_PQ_CFG0_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_PQ_CFG0_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_PQ_CFG0_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_PQ_CFG1_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_PQ_CFG1_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_PQ_CFG1_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_PQ_CFG1_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_PQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_PQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_PQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_PQ_STS0_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC6_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC6_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC6_QM_PQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_PQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_PQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_PQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_STS0_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_PTR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_PTR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_TSIZE_0 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC6_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC6_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC6_QM_CQ_CTL_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_PTR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_PTR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_TSIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_CTL_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_PTR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_PTR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_TSIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_CTL_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_PTR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_PTR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_TSIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_CTL_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC6_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC6_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC6_QM_CQ_CTL_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_CTL_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_CTL_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_CTL_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_CTL_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC6_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC6_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC6_QM_CP_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC6_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC6_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC6_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_DBG_0_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_DBG_0_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC6_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC6_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC6_QM_CP_DBG_0_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_DBG_0_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_DBG_0_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC6_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC6_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC6_QM_ARB_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC6_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC6_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) + + PROT_BITS_OFFS; + + word_offset = ((mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmTPC6_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC6_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC6_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC6_QM_ARB_STATE_STS & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MSG_STS & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_ERR_MSG_EN & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_ERR_STS_DRP & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC6_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC6_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CGM_CFG & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CGM_STS & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CGM_CFG1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC6_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC6_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC6_QM_LOCAL_RANGE_BASE & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_GLBL_AXCACHE & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_IND_GW_APB_CFG & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_IND_GW_APB_WDATA & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_IND_GW_APB_RDATA & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_IND_GW_APB_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_QM_GLBL_ERR_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC6_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC6_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7) + << 2; + + mask = 1U << ((mmTPC6_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC6_CFG_ROUND_CSR & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC6_CFG_ROUND_CSR & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC6_CFG_ROUND_CSR & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC6_CFG_PROT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC6_CFG_PROT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC6_CFG_PROT & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_CFG_VFLAGS & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_CFG_SFLAGS & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_CFG_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_CFG_TPC_STALL & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_CFG_RD_RATE_LIMIT & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_CFG_WR_RATE_LIMIT & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_CFG_MSS_CONFIG & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_CFG_TPC_INTR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_CFG_TPC_INTR_MASK & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_CFG_WQ_CREDITS & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_CFG_ARUSER_LO & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_CFG_ARUSER_HI & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_CFG_AWUSER_LO & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_CFG_AWUSER_HI & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_CFG_OPCODE_EXEC & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC6_CFG_TSB_CFG_MAX_SIZE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC6_CFG_TSB_CFG_MAX_SIZE & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmTPC6_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_CFG_DBGMEM_ADD & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_CFG_DBGMEM_DATA_WR & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_CFG_DBGMEM_DATA_RD & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_CFG_DBGMEM_CTRL & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_CFG_DBGMEM_RC & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_CFG_FUNC_MBIST_PAT & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2); + mask |= 1U << ((mmTPC6_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + WREG32(mmTPC7_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + WREG32(mmTPC7_CFG_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + + pb_addr = (mmTPC7_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC7_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC7_QM_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_GLBL_PROT & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_GLBL_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_GLBL_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_GLBL_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_GLBL_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_GLBL_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_GLBL_STS1_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_GLBL_MSG_EN_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_GLBL_MSG_EN_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_GLBL_MSG_EN_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_GLBL_MSG_EN_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_GLBL_MSG_EN_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_PQ_BASE_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_PQ_BASE_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_PQ_BASE_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_PQ_BASE_LO_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC7_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC7_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC7_QM_PQ_BASE_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_PQ_BASE_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_PQ_BASE_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_PQ_BASE_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_PQ_SIZE_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_PQ_SIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_PQ_SIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_PQ_SIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_PQ_PI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_PQ_PI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_PQ_PI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_PQ_PI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_PQ_CI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_PQ_CI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_PQ_CI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_PQ_CI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_PQ_CFG0_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_PQ_CFG0_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_PQ_CFG0_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_PQ_CFG0_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_PQ_CFG1_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_PQ_CFG1_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_PQ_CFG1_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_PQ_CFG1_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_PQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_PQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_PQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_PQ_STS0_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC7_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC7_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC7_QM_PQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_PQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_PQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_PQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_STS0_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_PTR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_PTR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_TSIZE_0 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC7_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC7_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC7_QM_CQ_CTL_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_PTR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_PTR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_TSIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_CTL_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_PTR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_PTR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_TSIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_CTL_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_PTR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_PTR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_TSIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_CTL_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC7_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC7_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC7_QM_CQ_CTL_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_CTL_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_CTL_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_CTL_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_CTL_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) + + PROT_BITS_OFFS; + + word_offset = ((mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) + >> 7) << 2; + + mask = 1U << ((mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC7_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC7_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC7_QM_CP_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC7_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC7_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC7_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_DBG_0_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_DBG_0_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC7_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC7_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC7_QM_CP_DBG_0_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_DBG_0_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_DBG_0_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC7_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC7_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC7_QM_ARB_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC7_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC7_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmTPC7_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC7_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC7_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC7_QM_ARB_STATE_STS & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MSG_STS & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_ERR_MSG_EN & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_ERR_STS_DRP & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC7_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC7_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CGM_CFG & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CGM_STS & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CGM_CFG1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC7_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC7_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC7_QM_LOCAL_RANGE_BASE & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_GLBL_AXCACHE & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_IND_GW_APB_CFG & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_IND_GW_APB_WDATA & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_IND_GW_APB_RDATA & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_IND_GW_APB_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_QM_GLBL_ERR_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC7_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC7_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmTPC7_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC7_CFG_ROUND_CSR & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC7_CFG_ROUND_CSR & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC7_CFG_ROUND_CSR & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC7_CFG_PROT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC7_CFG_PROT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmTPC7_CFG_PROT & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_CFG_VFLAGS & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_CFG_SFLAGS & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_CFG_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_CFG_TPC_STALL & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_CFG_RD_RATE_LIMIT & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_CFG_WR_RATE_LIMIT & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_CFG_MSS_CONFIG & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_CFG_TPC_INTR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_CFG_TPC_INTR_MASK & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_CFG_WQ_CREDITS & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_CFG_ARUSER_LO & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_CFG_ARUSER_HI & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_CFG_AWUSER_LO & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_CFG_AWUSER_HI & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_CFG_OPCODE_EXEC & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC7_CFG_TSB_CFG_MAX_SIZE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC7_CFG_TSB_CFG_MAX_SIZE & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1U << ((mmTPC7_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_CFG_DBGMEM_ADD & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_CFG_DBGMEM_DATA_WR & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_CFG_DBGMEM_DATA_RD & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_CFG_DBGMEM_CTRL & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_CFG_DBGMEM_RC & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_CFG_FUNC_MBIST_PAT & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2); + mask |= 1U << ((mmTPC7_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); +} + +/** + * gaudi_init_protection_bits - Initialize protection bits of specific registers + * + * @hdev: pointer to hl_device structure + * + * All protection bits are 1 by default, means not protected. Need to set to 0 + * each bit that belongs to a protected register. + * + */ +static void gaudi_init_protection_bits(struct hl_device *hdev) +{ + /* + * In each 4K block of registers, the last 128 bytes are protection + * bits - total of 1024 bits, one for each register. Each bit is related + * to a specific register, by the order of the registers. + * So in order to calculate the bit that is related to a given register, + * we need to calculate its word offset and then the exact bit inside + * the word (which is 4 bytes). + * + * Register address: + * + * 31 12 11 7 6 2 1 0 + * ----------------------------------------------------------------- + * | Don't | word | bit location | 0 | + * | care | offset | inside word | | + * ----------------------------------------------------------------- + * + * Bits 7-11 represents the word offset inside the 128 bytes. + * Bits 2-6 represents the bit location inside the word. + * + * When a bit is cleared, it means the register it represents can only + * be accessed by a secured entity. When the bit is set, any entity can + * access the register. + * + * The last 4 bytes in the block of the PBs control the security of + * the PBs themselves, so they always need to be configured to be + * secured + */ + + gaudi_pb_set_block(hdev, mmIF_E_PLL_BASE); + gaudi_pb_set_block(hdev, mmMESH_W_PLL_BASE); + gaudi_pb_set_block(hdev, mmSRAM_W_PLL_BASE); + gaudi_pb_set_block(hdev, mmMESH_E_PLL_BASE); + gaudi_pb_set_block(hdev, mmSRAM_E_PLL_BASE); + + gaudi_init_dma_protection_bits(hdev); + + gaudi_init_mme_protection_bits(hdev); + + gaudi_init_tpc_protection_bits(hdev); +} + +static void gaudi_init_range_registers_lbw(struct hl_device *hdev) +{ + u32 lbw_rng_start[GAUDI_NUMBER_OF_LBW_RANGES]; + u32 lbw_rng_end[GAUDI_NUMBER_OF_LBW_RANGES]; + int i, j; + + lbw_rng_start[0] = (0xFC0E8000 & 0x3FFFFFF) - 1; /* 0x000E7FFF */ + lbw_rng_end[0] = (0xFC11FFFF & 0x3FFFFFF) + 1; /* 0x00120000 */ + + lbw_rng_start[1] = (0xFC1E8000 & 0x3FFFFFF) - 1; /* 0x001E7FFF */ + lbw_rng_end[1] = (0xFC48FFFF & 0x3FFFFFF) + 1; /* 0x00490000 */ + + lbw_rng_start[2] = (0xFC600000 & 0x3FFFFFF) - 1; /* 0x005FFFFF */ + lbw_rng_end[2] = (0xFCC48FFF & 0x3FFFFFF) + 1; /* 0x00C49000 */ + + lbw_rng_start[3] = (0xFCC4A000 & 0x3FFFFFF) - 1; /* 0x00C49FFF */ + lbw_rng_end[3] = (0xFCCDFFFF & 0x3FFFFFF) + 1; /* 0x00CE0000 */ + + lbw_rng_start[4] = (0xFCCE4000 & 0x3FFFFFF) - 1; /* 0x00CE3FFF */ + lbw_rng_end[4] = (0xFCD1FFFF & 0x3FFFFFF) + 1; /* 0x00D20000 */ + + lbw_rng_start[5] = (0xFCD24000 & 0x3FFFFFF) - 1; /* 0x00D23FFF */ + lbw_rng_end[5] = (0xFCD5FFFF & 0x3FFFFFF) + 1; /* 0x00D60000 */ + + lbw_rng_start[6] = (0xFCD64000 & 0x3FFFFFF) - 1; /* 0x00D63FFF */ + lbw_rng_end[6] = (0xFCD9FFFF & 0x3FFFFFF) + 1; /* 0x00DA0000 */ + + lbw_rng_start[7] = (0xFCDA4000 & 0x3FFFFFF) - 1; /* 0x00DA3FFF */ + lbw_rng_end[7] = (0xFCDDFFFF & 0x3FFFFFF) + 1; /* 0x00DE0000 */ + + lbw_rng_start[8] = (0xFCDE4000 & 0x3FFFFFF) - 1; /* 0x00DE3FFF */ + lbw_rng_end[8] = (0xFCE05FFF & 0x3FFFFFF) + 1; /* 0x00E06000 */ + + lbw_rng_start[9] = (0xFCFC9000 & 0x3FFFFFF) - 1; /* 0x00FC8FFF */ + lbw_rng_end[9] = (0xFFFFFFFE & 0x3FFFFFF) + 1; /* 0x03FFFFFF */ + + for (i = 0 ; i < GAUDI_NUMBER_OF_LBW_RR_REGS ; i++) { + WREG32(gaudi_rr_lbw_hit_aw_regs[i], + (1 << GAUDI_NUMBER_OF_LBW_RANGES) - 1); + WREG32(gaudi_rr_lbw_hit_ar_regs[i], + (1 << GAUDI_NUMBER_OF_LBW_RANGES) - 1); + } + + for (i = 0 ; i < GAUDI_NUMBER_OF_LBW_RR_REGS ; i++) + for (j = 0 ; j < GAUDI_NUMBER_OF_LBW_RANGES ; j++) { + WREG32(gaudi_rr_lbw_min_aw_regs[i] + (j << 2), + lbw_rng_start[j]); + + WREG32(gaudi_rr_lbw_min_ar_regs[i] + (j << 2), + lbw_rng_start[j]); + + WREG32(gaudi_rr_lbw_max_aw_regs[i] + (j << 2), + lbw_rng_end[j]); + + WREG32(gaudi_rr_lbw_max_ar_regs[i] + (j << 2), + lbw_rng_end[j]); + } +} + +static void gaudi_init_range_registers_hbw(struct hl_device *hdev) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + + u32 dram_addr_lo = lower_32_bits(DRAM_PHYS_BASE); + u32 dram_addr_hi = upper_32_bits(DRAM_PHYS_BASE); + + u32 sram_addr_lo = lower_32_bits(SRAM_BASE_ADDR); + u32 sram_addr_hi = upper_32_bits(SRAM_BASE_ADDR); + + u32 scratch_addr_lo = lower_32_bits(PSOC_SCRATCHPAD_ADDR); + u32 scratch_addr_hi = upper_32_bits(PSOC_SCRATCHPAD_ADDR); + + u32 pcie_fw_addr_lo = lower_32_bits(PCIE_FW_SRAM_ADDR); + u32 pcie_fw_addr_hi = upper_32_bits(PCIE_FW_SRAM_ADDR); + + u32 spi_addr_lo = lower_32_bits(SPI_FLASH_BASE_ADDR); + u32 spi_addr_hi = upper_32_bits(SPI_FLASH_BASE_ADDR); + + int i; + + /* Configure HBW RR: + * 1st range is the DRAM (first 512MB) + * 2nd range is the 1st 128 bytes in SRAM (for tensor DMA). This area + * is defined as read-only for user + * 3rd range is the PSOC scratch-pad + * 4th range is the PCIe F/W SRAM area + * 5th range is the SPI FLASH area + * 6th range is the host + */ + + for (i = 0 ; i < GAUDI_NUMBER_OF_HBW_RR_REGS ; i++) { + WREG32(gaudi_rr_hbw_hit_aw_regs[i], 0x1F); + WREG32(gaudi_rr_hbw_hit_ar_regs[i], 0x1D); + } + + for (i = 0 ; i < GAUDI_NUMBER_OF_HBW_RR_REGS ; i++) { + WREG32(gaudi_rr_hbw_base_low_aw_regs[i], dram_addr_lo); + WREG32(gaudi_rr_hbw_base_low_ar_regs[i], dram_addr_lo); + + WREG32(gaudi_rr_hbw_base_high_aw_regs[i], dram_addr_hi); + WREG32(gaudi_rr_hbw_base_high_ar_regs[i], dram_addr_hi); + + WREG32(gaudi_rr_hbw_mask_low_aw_regs[i], 0xE0000000); + WREG32(gaudi_rr_hbw_mask_low_ar_regs[i], 0xE0000000); + + WREG32(gaudi_rr_hbw_mask_high_aw_regs[i], 0x3FFFF); + WREG32(gaudi_rr_hbw_mask_high_ar_regs[i], 0x3FFFF); + + WREG32(gaudi_rr_hbw_base_low_aw_regs[i] + 4, sram_addr_lo); + WREG32(gaudi_rr_hbw_base_high_aw_regs[i] + 4, sram_addr_hi); + WREG32(gaudi_rr_hbw_mask_low_aw_regs[i] + 4, 0xFFFFFF80); + WREG32(gaudi_rr_hbw_mask_high_aw_regs[i] + 4, 0x3FFFF); + + WREG32(gaudi_rr_hbw_base_low_aw_regs[i] + 8, scratch_addr_lo); + WREG32(gaudi_rr_hbw_base_low_ar_regs[i] + 8, scratch_addr_lo); + + WREG32(gaudi_rr_hbw_base_high_aw_regs[i] + 8, scratch_addr_hi); + WREG32(gaudi_rr_hbw_base_high_ar_regs[i] + 8, scratch_addr_hi); + + WREG32(gaudi_rr_hbw_mask_low_aw_regs[i] + 8, 0xFFFF0000); + WREG32(gaudi_rr_hbw_mask_low_ar_regs[i] + 8, 0xFFFF0000); + + WREG32(gaudi_rr_hbw_mask_high_aw_regs[i] + 8, 0x3FFFF); + WREG32(gaudi_rr_hbw_mask_high_ar_regs[i] + 8, 0x3FFFF); + + WREG32(gaudi_rr_hbw_base_low_aw_regs[i] + 12, pcie_fw_addr_lo); + WREG32(gaudi_rr_hbw_base_low_ar_regs[i] + 12, pcie_fw_addr_lo); + + WREG32(gaudi_rr_hbw_base_high_aw_regs[i] + 12, pcie_fw_addr_hi); + WREG32(gaudi_rr_hbw_base_high_ar_regs[i] + 12, pcie_fw_addr_hi); + + WREG32(gaudi_rr_hbw_mask_low_aw_regs[i] + 12, 0xFFFF8000); + WREG32(gaudi_rr_hbw_mask_low_ar_regs[i] + 12, 0xFFFF8000); + + WREG32(gaudi_rr_hbw_mask_high_aw_regs[i] + 12, 0x3FFFF); + WREG32(gaudi_rr_hbw_mask_high_ar_regs[i] + 12, 0x3FFFF); + + WREG32(gaudi_rr_hbw_base_low_aw_regs[i] + 16, spi_addr_lo); + WREG32(gaudi_rr_hbw_base_low_ar_regs[i] + 16, spi_addr_lo); + + WREG32(gaudi_rr_hbw_base_high_aw_regs[i] + 16, spi_addr_hi); + WREG32(gaudi_rr_hbw_base_high_ar_regs[i] + 16, spi_addr_hi); + + WREG32(gaudi_rr_hbw_mask_low_aw_regs[i] + 16, 0xFE000000); + WREG32(gaudi_rr_hbw_mask_low_ar_regs[i] + 16, 0xFE000000); + + WREG32(gaudi_rr_hbw_mask_high_aw_regs[i] + 16, 0x3FFFF); + WREG32(gaudi_rr_hbw_mask_high_ar_regs[i] + 16, 0x3FFFF); + + if (gaudi->hw_cap_initialized & HW_CAP_MMU) + continue; + + /* Protect HOST */ + WREG32(gaudi_rr_hbw_base_low_aw_regs[i] + 20, 0); + WREG32(gaudi_rr_hbw_base_low_ar_regs[i] + 20, 0); + + WREG32(gaudi_rr_hbw_base_high_aw_regs[i] + 20, 0); + WREG32(gaudi_rr_hbw_base_high_ar_regs[i] + 20, 0); + + WREG32(gaudi_rr_hbw_mask_low_aw_regs[i] + 20, 0); + WREG32(gaudi_rr_hbw_mask_low_ar_regs[i] + 20, 0); + + WREG32(gaudi_rr_hbw_mask_high_aw_regs[i] + 20, 0xFFF80); + WREG32(gaudi_rr_hbw_mask_high_ar_regs[i] + 20, 0xFFF80); + } +} + +/** + * gaudi_init_security - Initialize security model + * + * @hdev: pointer to hl_device structure + * + * Initialize the security model of the device + * That includes range registers and protection bit per register + * + */ +void gaudi_init_security(struct hl_device *hdev) +{ + /* Due to H/W errata GAUDI0500, need to override default security + * property configuration of MME SBAB and ACC to be non-privileged and + * non-secured + */ + WREG32(mmMME0_SBAB_PROT, 0x2); + WREG32(mmMME0_ACC_PROT, 0x2); + WREG32(mmMME1_SBAB_PROT, 0x2); + WREG32(mmMME1_ACC_PROT, 0x2); + WREG32(mmMME2_SBAB_PROT, 0x2); + WREG32(mmMME2_ACC_PROT, 0x2); + WREG32(mmMME3_SBAB_PROT, 0x2); + WREG32(mmMME3_ACC_PROT, 0x2); + + /* On RAZWI, 0 will be returned from RR and 0xBABA0BAD from PB */ + WREG32(0xC01B28, 0x1); + + gaudi_init_range_registers_lbw(hdev); + + gaudi_init_range_registers_hbw(hdev); + + gaudi_init_protection_bits(hdev); +} diff --git a/drivers/misc/habanalabs/goya/Makefile b/drivers/misc/habanalabs/goya/Makefile new file mode 100644 index 000000000..b3f3b7b96 --- /dev/null +++ b/drivers/misc/habanalabs/goya/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only +HL_GOYA_FILES := goya/goya.o goya/goya_security.o goya/goya_hwmgr.o \ + goya/goya_coresight.o diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c new file mode 100644 index 000000000..c8023b442 --- /dev/null +++ b/drivers/misc/habanalabs/goya/goya.c @@ -0,0 +1,5422 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include "goyaP.h" +#include "../include/hw_ip/mmu/mmu_general.h" +#include "../include/hw_ip/mmu/mmu_v1_0.h" +#include "../include/goya/asic_reg/goya_masks.h" +#include "../include/goya/goya_reg_map.h" + +#include <linux/pci.h> +#include <linux/genalloc.h> +#include <linux/hwmon.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/iommu.h> +#include <linux/seq_file.h> + +/* + * GOYA security scheme: + * + * 1. Host is protected by: + * - Range registers (When MMU is enabled, DMA RR does NOT protect host) + * - MMU + * + * 2. DRAM is protected by: + * - Range registers (protect the first 512MB) + * - MMU (isolation between users) + * + * 3. Configuration is protected by: + * - Range registers + * - Protection bits + * + * When MMU is disabled: + * + * QMAN DMA: PQ, CQ, CP, DMA are secured. + * PQ, CB and the data are on the host. + * + * QMAN TPC/MME: + * PQ, CQ and CP are not secured. + * PQ, CB and the data are on the SRAM/DRAM. + * + * Since QMAN DMA is secured, the driver is parsing the DMA CB: + * - checks DMA pointer + * - WREG, MSG_PROT are not allowed. + * - MSG_LONG/SHORT are allowed. + * + * A read/write transaction by the QMAN to a protected area will succeed if + * and only if the QMAN's CP is secured and MSG_PROT is used + * + * + * When MMU is enabled: + * + * QMAN DMA: PQ, CQ and CP are secured. + * MMU is set to bypass on the Secure props register of the QMAN. + * The reasons we don't enable MMU for PQ, CQ and CP are: + * - PQ entry is in kernel address space and the driver doesn't map it. + * - CP writes to MSIX register and to kernel address space (completion + * queue). + * + * DMA is not secured but because CP is secured, the driver still needs to parse + * the CB, but doesn't need to check the DMA addresses. + * + * For QMAN DMA 0, DMA is also secured because only the driver uses this DMA and + * the driver doesn't map memory in MMU. + * + * QMAN TPC/MME: PQ, CQ and CP aren't secured (no change from MMU disabled mode) + * + * DMA RR does NOT protect host because DMA is not secured + * + */ + +#define GOYA_BOOT_FIT_FILE "habanalabs/goya/goya-boot-fit.itb" +#define GOYA_LINUX_FW_FILE "habanalabs/goya/goya-fit.itb" + +#define GOYA_MMU_REGS_NUM 63 + +#define GOYA_DMA_POOL_BLK_SIZE 0x100 /* 256 bytes */ + +#define GOYA_RESET_TIMEOUT_MSEC 500 /* 500ms */ +#define GOYA_PLDM_RESET_TIMEOUT_MSEC 20000 /* 20s */ +#define GOYA_RESET_WAIT_MSEC 1 /* 1ms */ +#define GOYA_CPU_RESET_WAIT_MSEC 100 /* 100ms */ +#define GOYA_PLDM_RESET_WAIT_MSEC 1000 /* 1s */ +#define GOYA_TEST_QUEUE_WAIT_USEC 100000 /* 100ms */ +#define GOYA_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 100) +#define GOYA_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30) +#define GOYA_BOOT_FIT_REQ_TIMEOUT_USEC 1000000 /* 1s */ +#define GOYA_MSG_TO_CPU_TIMEOUT_USEC 4000000 /* 4s */ + +#define GOYA_QMAN0_FENCE_VAL 0xD169B243 + +#define GOYA_MAX_STRING_LEN 20 + +#define GOYA_CB_POOL_CB_CNT 512 +#define GOYA_CB_POOL_CB_SIZE 0x20000 /* 128KB */ + +#define IS_QM_IDLE(engine, qm_glbl_sts0) \ + (((qm_glbl_sts0) & engine##_QM_IDLE_MASK) == engine##_QM_IDLE_MASK) +#define IS_DMA_QM_IDLE(qm_glbl_sts0) IS_QM_IDLE(DMA, qm_glbl_sts0) +#define IS_TPC_QM_IDLE(qm_glbl_sts0) IS_QM_IDLE(TPC, qm_glbl_sts0) +#define IS_MME_QM_IDLE(qm_glbl_sts0) IS_QM_IDLE(MME, qm_glbl_sts0) + +#define IS_CMDQ_IDLE(engine, cmdq_glbl_sts0) \ + (((cmdq_glbl_sts0) & engine##_CMDQ_IDLE_MASK) == \ + engine##_CMDQ_IDLE_MASK) +#define IS_TPC_CMDQ_IDLE(cmdq_glbl_sts0) \ + IS_CMDQ_IDLE(TPC, cmdq_glbl_sts0) +#define IS_MME_CMDQ_IDLE(cmdq_glbl_sts0) \ + IS_CMDQ_IDLE(MME, cmdq_glbl_sts0) + +#define IS_DMA_IDLE(dma_core_sts0) \ + !((dma_core_sts0) & DMA_CH_0_STS0_DMA_BUSY_MASK) + +#define IS_TPC_IDLE(tpc_cfg_sts) \ + (((tpc_cfg_sts) & TPC_CFG_IDLE_MASK) == TPC_CFG_IDLE_MASK) + +#define IS_MME_IDLE(mme_arch_sts) \ + (((mme_arch_sts) & MME_ARCH_IDLE_MASK) == MME_ARCH_IDLE_MASK) + + +static const char goya_irq_name[GOYA_MSIX_ENTRIES][GOYA_MAX_STRING_LEN] = { + "goya cq 0", "goya cq 1", "goya cq 2", "goya cq 3", + "goya cq 4", "goya cpu eq" +}; + +static u16 goya_packet_sizes[MAX_PACKET_ID] = { + [PACKET_WREG_32] = sizeof(struct packet_wreg32), + [PACKET_WREG_BULK] = sizeof(struct packet_wreg_bulk), + [PACKET_MSG_LONG] = sizeof(struct packet_msg_long), + [PACKET_MSG_SHORT] = sizeof(struct packet_msg_short), + [PACKET_CP_DMA] = sizeof(struct packet_cp_dma), + [PACKET_MSG_PROT] = sizeof(struct packet_msg_prot), + [PACKET_FENCE] = sizeof(struct packet_fence), + [PACKET_LIN_DMA] = sizeof(struct packet_lin_dma), + [PACKET_NOP] = sizeof(struct packet_nop), + [PACKET_STOP] = sizeof(struct packet_stop) +}; + +static inline bool validate_packet_id(enum packet_id id) +{ + switch (id) { + case PACKET_WREG_32: + case PACKET_WREG_BULK: + case PACKET_MSG_LONG: + case PACKET_MSG_SHORT: + case PACKET_CP_DMA: + case PACKET_MSG_PROT: + case PACKET_FENCE: + case PACKET_LIN_DMA: + case PACKET_NOP: + case PACKET_STOP: + return true; + default: + return false; + } +} + +static u64 goya_mmu_regs[GOYA_MMU_REGS_NUM] = { + mmDMA_QM_0_GLBL_NON_SECURE_PROPS, + mmDMA_QM_1_GLBL_NON_SECURE_PROPS, + mmDMA_QM_2_GLBL_NON_SECURE_PROPS, + mmDMA_QM_3_GLBL_NON_SECURE_PROPS, + mmDMA_QM_4_GLBL_NON_SECURE_PROPS, + mmTPC0_QM_GLBL_SECURE_PROPS, + mmTPC0_QM_GLBL_NON_SECURE_PROPS, + mmTPC0_CMDQ_GLBL_SECURE_PROPS, + mmTPC0_CMDQ_GLBL_NON_SECURE_PROPS, + mmTPC0_CFG_ARUSER, + mmTPC0_CFG_AWUSER, + mmTPC1_QM_GLBL_SECURE_PROPS, + mmTPC1_QM_GLBL_NON_SECURE_PROPS, + mmTPC1_CMDQ_GLBL_SECURE_PROPS, + mmTPC1_CMDQ_GLBL_NON_SECURE_PROPS, + mmTPC1_CFG_ARUSER, + mmTPC1_CFG_AWUSER, + mmTPC2_QM_GLBL_SECURE_PROPS, + mmTPC2_QM_GLBL_NON_SECURE_PROPS, + mmTPC2_CMDQ_GLBL_SECURE_PROPS, + mmTPC2_CMDQ_GLBL_NON_SECURE_PROPS, + mmTPC2_CFG_ARUSER, + mmTPC2_CFG_AWUSER, + mmTPC3_QM_GLBL_SECURE_PROPS, + mmTPC3_QM_GLBL_NON_SECURE_PROPS, + mmTPC3_CMDQ_GLBL_SECURE_PROPS, + mmTPC3_CMDQ_GLBL_NON_SECURE_PROPS, + mmTPC3_CFG_ARUSER, + mmTPC3_CFG_AWUSER, + mmTPC4_QM_GLBL_SECURE_PROPS, + mmTPC4_QM_GLBL_NON_SECURE_PROPS, + mmTPC4_CMDQ_GLBL_SECURE_PROPS, + mmTPC4_CMDQ_GLBL_NON_SECURE_PROPS, + mmTPC4_CFG_ARUSER, + mmTPC4_CFG_AWUSER, + mmTPC5_QM_GLBL_SECURE_PROPS, + mmTPC5_QM_GLBL_NON_SECURE_PROPS, + mmTPC5_CMDQ_GLBL_SECURE_PROPS, + mmTPC5_CMDQ_GLBL_NON_SECURE_PROPS, + mmTPC5_CFG_ARUSER, + mmTPC5_CFG_AWUSER, + mmTPC6_QM_GLBL_SECURE_PROPS, + mmTPC6_QM_GLBL_NON_SECURE_PROPS, + mmTPC6_CMDQ_GLBL_SECURE_PROPS, + mmTPC6_CMDQ_GLBL_NON_SECURE_PROPS, + mmTPC6_CFG_ARUSER, + mmTPC6_CFG_AWUSER, + mmTPC7_QM_GLBL_SECURE_PROPS, + mmTPC7_QM_GLBL_NON_SECURE_PROPS, + mmTPC7_CMDQ_GLBL_SECURE_PROPS, + mmTPC7_CMDQ_GLBL_NON_SECURE_PROPS, + mmTPC7_CFG_ARUSER, + mmTPC7_CFG_AWUSER, + mmMME_QM_GLBL_SECURE_PROPS, + mmMME_QM_GLBL_NON_SECURE_PROPS, + mmMME_CMDQ_GLBL_SECURE_PROPS, + mmMME_CMDQ_GLBL_NON_SECURE_PROPS, + mmMME_SBA_CONTROL_DATA, + mmMME_SBB_CONTROL_DATA, + mmMME_SBC_CONTROL_DATA, + mmMME_WBC_CONTROL_DATA, + mmPCIE_WRAP_PSOC_ARUSER, + mmPCIE_WRAP_PSOC_AWUSER +}; + +static u32 goya_all_events[] = { + GOYA_ASYNC_EVENT_ID_PCIE_IF, + GOYA_ASYNC_EVENT_ID_TPC0_ECC, + GOYA_ASYNC_EVENT_ID_TPC1_ECC, + GOYA_ASYNC_EVENT_ID_TPC2_ECC, + GOYA_ASYNC_EVENT_ID_TPC3_ECC, + GOYA_ASYNC_EVENT_ID_TPC4_ECC, + GOYA_ASYNC_EVENT_ID_TPC5_ECC, + GOYA_ASYNC_EVENT_ID_TPC6_ECC, + GOYA_ASYNC_EVENT_ID_TPC7_ECC, + GOYA_ASYNC_EVENT_ID_MME_ECC, + GOYA_ASYNC_EVENT_ID_MME_ECC_EXT, + GOYA_ASYNC_EVENT_ID_MMU_ECC, + GOYA_ASYNC_EVENT_ID_DMA_MACRO, + GOYA_ASYNC_EVENT_ID_DMA_ECC, + GOYA_ASYNC_EVENT_ID_CPU_IF_ECC, + GOYA_ASYNC_EVENT_ID_PSOC_MEM, + GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT, + GOYA_ASYNC_EVENT_ID_SRAM0, + GOYA_ASYNC_EVENT_ID_SRAM1, + GOYA_ASYNC_EVENT_ID_SRAM2, + GOYA_ASYNC_EVENT_ID_SRAM3, + GOYA_ASYNC_EVENT_ID_SRAM4, + GOYA_ASYNC_EVENT_ID_SRAM5, + GOYA_ASYNC_EVENT_ID_SRAM6, + GOYA_ASYNC_EVENT_ID_SRAM7, + GOYA_ASYNC_EVENT_ID_SRAM8, + GOYA_ASYNC_EVENT_ID_SRAM9, + GOYA_ASYNC_EVENT_ID_SRAM10, + GOYA_ASYNC_EVENT_ID_SRAM11, + GOYA_ASYNC_EVENT_ID_SRAM12, + GOYA_ASYNC_EVENT_ID_SRAM13, + GOYA_ASYNC_EVENT_ID_SRAM14, + GOYA_ASYNC_EVENT_ID_SRAM15, + GOYA_ASYNC_EVENT_ID_SRAM16, + GOYA_ASYNC_EVENT_ID_SRAM17, + GOYA_ASYNC_EVENT_ID_SRAM18, + GOYA_ASYNC_EVENT_ID_SRAM19, + GOYA_ASYNC_EVENT_ID_SRAM20, + GOYA_ASYNC_EVENT_ID_SRAM21, + GOYA_ASYNC_EVENT_ID_SRAM22, + GOYA_ASYNC_EVENT_ID_SRAM23, + GOYA_ASYNC_EVENT_ID_SRAM24, + GOYA_ASYNC_EVENT_ID_SRAM25, + GOYA_ASYNC_EVENT_ID_SRAM26, + GOYA_ASYNC_EVENT_ID_SRAM27, + GOYA_ASYNC_EVENT_ID_SRAM28, + GOYA_ASYNC_EVENT_ID_SRAM29, + GOYA_ASYNC_EVENT_ID_GIC500, + GOYA_ASYNC_EVENT_ID_PLL0, + GOYA_ASYNC_EVENT_ID_PLL1, + GOYA_ASYNC_EVENT_ID_PLL3, + GOYA_ASYNC_EVENT_ID_PLL4, + GOYA_ASYNC_EVENT_ID_PLL5, + GOYA_ASYNC_EVENT_ID_PLL6, + GOYA_ASYNC_EVENT_ID_AXI_ECC, + GOYA_ASYNC_EVENT_ID_L2_RAM_ECC, + GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET, + GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT, + GOYA_ASYNC_EVENT_ID_PCIE_DEC, + GOYA_ASYNC_EVENT_ID_TPC0_DEC, + GOYA_ASYNC_EVENT_ID_TPC1_DEC, + GOYA_ASYNC_EVENT_ID_TPC2_DEC, + GOYA_ASYNC_EVENT_ID_TPC3_DEC, + GOYA_ASYNC_EVENT_ID_TPC4_DEC, + GOYA_ASYNC_EVENT_ID_TPC5_DEC, + GOYA_ASYNC_EVENT_ID_TPC6_DEC, + GOYA_ASYNC_EVENT_ID_TPC7_DEC, + GOYA_ASYNC_EVENT_ID_MME_WACS, + GOYA_ASYNC_EVENT_ID_MME_WACSD, + GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER, + GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC, + GOYA_ASYNC_EVENT_ID_PSOC, + GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR, + GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR, + GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR, + GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR, + GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR, + GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR, + GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR, + GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR, + GOYA_ASYNC_EVENT_ID_TPC0_CMDQ, + GOYA_ASYNC_EVENT_ID_TPC1_CMDQ, + GOYA_ASYNC_EVENT_ID_TPC2_CMDQ, + GOYA_ASYNC_EVENT_ID_TPC3_CMDQ, + GOYA_ASYNC_EVENT_ID_TPC4_CMDQ, + GOYA_ASYNC_EVENT_ID_TPC5_CMDQ, + GOYA_ASYNC_EVENT_ID_TPC6_CMDQ, + GOYA_ASYNC_EVENT_ID_TPC7_CMDQ, + GOYA_ASYNC_EVENT_ID_TPC0_QM, + GOYA_ASYNC_EVENT_ID_TPC1_QM, + GOYA_ASYNC_EVENT_ID_TPC2_QM, + GOYA_ASYNC_EVENT_ID_TPC3_QM, + GOYA_ASYNC_EVENT_ID_TPC4_QM, + GOYA_ASYNC_EVENT_ID_TPC5_QM, + GOYA_ASYNC_EVENT_ID_TPC6_QM, + GOYA_ASYNC_EVENT_ID_TPC7_QM, + GOYA_ASYNC_EVENT_ID_MME_QM, + GOYA_ASYNC_EVENT_ID_MME_CMDQ, + GOYA_ASYNC_EVENT_ID_DMA0_QM, + GOYA_ASYNC_EVENT_ID_DMA1_QM, + GOYA_ASYNC_EVENT_ID_DMA2_QM, + GOYA_ASYNC_EVENT_ID_DMA3_QM, + GOYA_ASYNC_EVENT_ID_DMA4_QM, + GOYA_ASYNC_EVENT_ID_DMA0_CH, + GOYA_ASYNC_EVENT_ID_DMA1_CH, + GOYA_ASYNC_EVENT_ID_DMA2_CH, + GOYA_ASYNC_EVENT_ID_DMA3_CH, + GOYA_ASYNC_EVENT_ID_DMA4_CH, + GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU, + GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU, + GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU, + GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU, + GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU, + GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU, + GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU, + GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU, + GOYA_ASYNC_EVENT_ID_DMA_BM_CH0, + GOYA_ASYNC_EVENT_ID_DMA_BM_CH1, + GOYA_ASYNC_EVENT_ID_DMA_BM_CH2, + GOYA_ASYNC_EVENT_ID_DMA_BM_CH3, + GOYA_ASYNC_EVENT_ID_DMA_BM_CH4, + GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_S, + GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_E, + GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_S, + GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E +}; + +static int goya_mmu_clear_pgt_range(struct hl_device *hdev); +static int goya_mmu_set_dram_default_page(struct hl_device *hdev); +static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev); +static void goya_mmu_prepare(struct hl_device *hdev, u32 asid); + +int goya_get_fixed_properties(struct hl_device *hdev) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + int i; + + prop->max_queues = GOYA_QUEUE_ID_SIZE; + prop->hw_queues_props = kcalloc(prop->max_queues, + sizeof(struct hw_queue_properties), + GFP_KERNEL); + + if (!prop->hw_queues_props) + return -ENOMEM; + + for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) { + prop->hw_queues_props[i].type = QUEUE_TYPE_EXT; + prop->hw_queues_props[i].driver_only = 0; + prop->hw_queues_props[i].requires_kernel_cb = 1; + } + + for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES ; i++) { + prop->hw_queues_props[i].type = QUEUE_TYPE_CPU; + prop->hw_queues_props[i].driver_only = 1; + prop->hw_queues_props[i].requires_kernel_cb = 0; + } + + for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES + + NUMBER_OF_INT_HW_QUEUES; i++) { + prop->hw_queues_props[i].type = QUEUE_TYPE_INT; + prop->hw_queues_props[i].driver_only = 0; + prop->hw_queues_props[i].requires_kernel_cb = 0; + } + + prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES; + + prop->dram_base_address = DRAM_PHYS_BASE; + prop->dram_size = DRAM_PHYS_DEFAULT_SIZE; + prop->dram_end_address = prop->dram_base_address + prop->dram_size; + prop->dram_user_base_address = DRAM_BASE_ADDR_USER; + + prop->sram_base_address = SRAM_BASE_ADDR; + prop->sram_size = SRAM_SIZE; + prop->sram_end_address = prop->sram_base_address + prop->sram_size; + prop->sram_user_base_address = prop->sram_base_address + + SRAM_USER_BASE_OFFSET; + + prop->mmu_pgt_addr = MMU_PAGE_TABLES_ADDR; + prop->mmu_dram_default_page_addr = MMU_DRAM_DEFAULT_PAGE_ADDR; + if (hdev->pldm) + prop->mmu_pgt_size = 0x800000; /* 8MB */ + else + prop->mmu_pgt_size = MMU_PAGE_TABLES_SIZE; + prop->mmu_pte_size = HL_PTE_SIZE; + prop->mmu_hop_table_size = HOP_TABLE_SIZE; + prop->mmu_hop0_tables_total_size = HOP0_TABLES_TOTAL_SIZE; + prop->dram_page_size = PAGE_SIZE_2MB; + + prop->dmmu.hop0_shift = HOP0_SHIFT; + prop->dmmu.hop1_shift = HOP1_SHIFT; + prop->dmmu.hop2_shift = HOP2_SHIFT; + prop->dmmu.hop3_shift = HOP3_SHIFT; + prop->dmmu.hop4_shift = HOP4_SHIFT; + prop->dmmu.hop0_mask = HOP0_MASK; + prop->dmmu.hop1_mask = HOP1_MASK; + prop->dmmu.hop2_mask = HOP2_MASK; + prop->dmmu.hop3_mask = HOP3_MASK; + prop->dmmu.hop4_mask = HOP4_MASK; + prop->dmmu.start_addr = VA_DDR_SPACE_START; + prop->dmmu.end_addr = VA_DDR_SPACE_END; + prop->dmmu.page_size = PAGE_SIZE_2MB; + prop->dmmu.num_hops = MMU_ARCH_5_HOPS; + + /* shifts and masks are the same in PMMU and DMMU */ + memcpy(&prop->pmmu, &prop->dmmu, sizeof(prop->dmmu)); + prop->pmmu.start_addr = VA_HOST_SPACE_START; + prop->pmmu.end_addr = VA_HOST_SPACE_END; + prop->pmmu.page_size = PAGE_SIZE_4KB; + prop->pmmu.num_hops = MMU_ARCH_5_HOPS; + + /* PMMU and HPMMU are the same except of page size */ + memcpy(&prop->pmmu_huge, &prop->pmmu, sizeof(prop->pmmu)); + prop->pmmu_huge.page_size = PAGE_SIZE_2MB; + + prop->dram_size_for_default_page_mapping = VA_DDR_SPACE_END; + prop->cfg_size = CFG_SIZE; + prop->max_asid = MAX_ASID; + prop->num_of_events = GOYA_ASYNC_EVENT_ID_SIZE; + prop->high_pll = PLL_HIGH_DEFAULT; + prop->cb_pool_cb_cnt = GOYA_CB_POOL_CB_CNT; + prop->cb_pool_cb_size = GOYA_CB_POOL_CB_SIZE; + prop->max_power_default = MAX_POWER_DEFAULT; + prop->tpc_enabled_mask = TPC_ENABLED_MASK; + prop->pcie_dbi_base_address = mmPCIE_DBI_BASE; + prop->pcie_aux_dbi_reg_addr = CFG_BASE + mmPCIE_AUX_DBI; + + strncpy(prop->cpucp_info.card_name, GOYA_DEFAULT_CARD_NAME, + CARD_NAME_MAX_LEN); + + prop->max_pending_cs = GOYA_MAX_PENDING_CS; + + return 0; +} + +/* + * goya_pci_bars_map - Map PCI BARS of Goya device + * + * @hdev: pointer to hl_device structure + * + * Request PCI regions and map them to kernel virtual addresses. + * Returns 0 on success + * + */ +static int goya_pci_bars_map(struct hl_device *hdev) +{ + static const char * const name[] = {"SRAM_CFG", "MSIX", "DDR"}; + bool is_wc[3] = {false, false, true}; + int rc; + + rc = hl_pci_bars_map(hdev, name, is_wc); + if (rc) + return rc; + + hdev->rmmio = hdev->pcie_bar[SRAM_CFG_BAR_ID] + + (CFG_BASE - SRAM_BASE_ADDR); + + return 0; +} + +static u64 goya_set_ddr_bar_base(struct hl_device *hdev, u64 addr) +{ + struct goya_device *goya = hdev->asic_specific; + struct hl_inbound_pci_region pci_region; + u64 old_addr = addr; + int rc; + + if ((goya) && (goya->ddr_bar_cur_addr == addr)) + return old_addr; + + /* Inbound Region 1 - Bar 4 - Point to DDR */ + pci_region.mode = PCI_BAR_MATCH_MODE; + pci_region.bar = DDR_BAR_ID; + pci_region.addr = addr; + rc = hl_pci_set_inbound_region(hdev, 1, &pci_region); + if (rc) + return U64_MAX; + + if (goya) { + old_addr = goya->ddr_bar_cur_addr; + goya->ddr_bar_cur_addr = addr; + } + + return old_addr; +} + +/* + * goya_init_iatu - Initialize the iATU unit inside the PCI controller + * + * @hdev: pointer to hl_device structure + * + * This is needed in case the firmware doesn't initialize the iATU + * + */ +static int goya_init_iatu(struct hl_device *hdev) +{ + struct hl_inbound_pci_region inbound_region; + struct hl_outbound_pci_region outbound_region; + int rc; + + /* Inbound Region 0 - Bar 0 - Point to SRAM and CFG */ + inbound_region.mode = PCI_BAR_MATCH_MODE; + inbound_region.bar = SRAM_CFG_BAR_ID; + inbound_region.addr = SRAM_BASE_ADDR; + rc = hl_pci_set_inbound_region(hdev, 0, &inbound_region); + if (rc) + goto done; + + /* Inbound Region 1 - Bar 4 - Point to DDR */ + inbound_region.mode = PCI_BAR_MATCH_MODE; + inbound_region.bar = DDR_BAR_ID; + inbound_region.addr = DRAM_PHYS_BASE; + rc = hl_pci_set_inbound_region(hdev, 1, &inbound_region); + if (rc) + goto done; + + hdev->asic_funcs->set_dma_mask_from_fw(hdev); + + /* Outbound Region 0 - Point to Host */ + outbound_region.addr = HOST_PHYS_BASE; + outbound_region.size = HOST_PHYS_SIZE; + rc = hl_pci_set_outbound_region(hdev, &outbound_region); + +done: + return rc; +} + +/* + * goya_early_init - GOYA early initialization code + * + * @hdev: pointer to hl_device structure + * + * Verify PCI bars + * Set DMA masks + * PCI controller initialization + * Map PCI bars + * + */ +static int goya_early_init(struct hl_device *hdev) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct pci_dev *pdev = hdev->pdev; + u32 val; + int rc; + + rc = goya_get_fixed_properties(hdev); + if (rc) { + dev_err(hdev->dev, "Failed to get fixed properties\n"); + return rc; + } + + /* Check BAR sizes */ + if (pci_resource_len(pdev, SRAM_CFG_BAR_ID) != CFG_BAR_SIZE) { + dev_err(hdev->dev, + "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n", + SRAM_CFG_BAR_ID, + (unsigned long long) pci_resource_len(pdev, + SRAM_CFG_BAR_ID), + CFG_BAR_SIZE); + rc = -ENODEV; + goto free_queue_props; + } + + if (pci_resource_len(pdev, MSIX_BAR_ID) != MSIX_BAR_SIZE) { + dev_err(hdev->dev, + "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n", + MSIX_BAR_ID, + (unsigned long long) pci_resource_len(pdev, + MSIX_BAR_ID), + MSIX_BAR_SIZE); + rc = -ENODEV; + goto free_queue_props; + } + + prop->dram_pci_bar_size = pci_resource_len(pdev, DDR_BAR_ID); + + rc = hl_pci_init(hdev, mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS, + mmCPU_BOOT_ERR0, GOYA_BOOT_FIT_REQ_TIMEOUT_USEC); + if (rc) + goto free_queue_props; + + /* Goya Firmware does not support security */ + prop->fw_security_disabled = true; + dev_info(hdev->dev, "firmware-level security is disabled\n"); + + if (!hdev->pldm) { + val = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS); + if (val & PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SRIOV_EN_MASK) + dev_warn(hdev->dev, + "PCI strap is not configured correctly, PCI bus errors may occur\n"); + } + + return 0; + +free_queue_props: + kfree(hdev->asic_prop.hw_queues_props); + return rc; +} + +/* + * goya_early_fini - GOYA early finalization code + * + * @hdev: pointer to hl_device structure + * + * Unmap PCI bars + * + */ +static int goya_early_fini(struct hl_device *hdev) +{ + kfree(hdev->asic_prop.hw_queues_props); + hl_pci_fini(hdev); + + return 0; +} + +static void goya_mmu_prepare_reg(struct hl_device *hdev, u64 reg, u32 asid) +{ + /* mask to zero the MMBP and ASID bits */ + WREG32_AND(reg, ~0x7FF); + WREG32_OR(reg, asid); +} + +static void goya_qman0_set_security(struct hl_device *hdev, bool secure) +{ + struct goya_device *goya = hdev->asic_specific; + + if (!(goya->hw_cap_initialized & HW_CAP_MMU)) + return; + + if (secure) + WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_FULLY_TRUSTED); + else + WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_PARTLY_TRUSTED); + + RREG32(mmDMA_QM_0_GLBL_PROT); +} + +/* + * goya_fetch_psoc_frequency - Fetch PSOC frequency values + * + * @hdev: pointer to hl_device structure + * + */ +static void goya_fetch_psoc_frequency(struct hl_device *hdev) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + u32 trace_freq = 0; + u32 pll_clk = 0; + u32 div_fctr = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1); + u32 div_sel = RREG32(mmPSOC_PCI_PLL_DIV_SEL_1); + u32 nr = RREG32(mmPSOC_PCI_PLL_NR); + u32 nf = RREG32(mmPSOC_PCI_PLL_NF); + u32 od = RREG32(mmPSOC_PCI_PLL_OD); + + if (div_sel == DIV_SEL_REF_CLK || div_sel == DIV_SEL_DIVIDED_REF) { + if (div_sel == DIV_SEL_REF_CLK) + trace_freq = PLL_REF_CLK; + else + trace_freq = PLL_REF_CLK / (div_fctr + 1); + } else if (div_sel == DIV_SEL_PLL_CLK || + div_sel == DIV_SEL_DIVIDED_PLL) { + pll_clk = PLL_REF_CLK * (nf + 1) / ((nr + 1) * (od + 1)); + if (div_sel == DIV_SEL_PLL_CLK) + trace_freq = pll_clk; + else + trace_freq = pll_clk / (div_fctr + 1); + } else { + dev_warn(hdev->dev, + "Received invalid div select value: %d", div_sel); + } + + prop->psoc_timestamp_frequency = trace_freq; + prop->psoc_pci_pll_nr = nr; + prop->psoc_pci_pll_nf = nf; + prop->psoc_pci_pll_od = od; + prop->psoc_pci_pll_div_factor = div_fctr; +} + +int goya_late_init(struct hl_device *hdev) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + int rc; + + goya_fetch_psoc_frequency(hdev); + + rc = goya_mmu_clear_pgt_range(hdev); + if (rc) { + dev_err(hdev->dev, + "Failed to clear MMU page tables range %d\n", rc); + return rc; + } + + rc = goya_mmu_set_dram_default_page(hdev); + if (rc) { + dev_err(hdev->dev, "Failed to set DRAM default page %d\n", rc); + return rc; + } + + rc = goya_mmu_add_mappings_for_device_cpu(hdev); + if (rc) + return rc; + + rc = goya_init_cpu_queues(hdev); + if (rc) + return rc; + + rc = goya_test_cpu_queue(hdev); + if (rc) + return rc; + + rc = goya_cpucp_info_get(hdev); + if (rc) { + dev_err(hdev->dev, "Failed to get cpucp info %d\n", rc); + return rc; + } + + /* Now that we have the DRAM size in ASIC prop, we need to check + * its size and configure the DMA_IF DDR wrap protection (which is in + * the MMU block) accordingly. The value is the log2 of the DRAM size + */ + WREG32(mmMMU_LOG2_DDR_SIZE, ilog2(prop->dram_size)); + + rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_ENABLE_PCI_ACCESS); + if (rc) { + dev_err(hdev->dev, + "Failed to enable PCI access from CPU %d\n", rc); + return rc; + } + + WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, + GOYA_ASYNC_EVENT_ID_INTS_REGISTER); + + return 0; +} + +/* + * goya_late_fini - GOYA late tear-down code + * + * @hdev: pointer to hl_device structure + * + * Free sensors allocated structures + */ +void goya_late_fini(struct hl_device *hdev) +{ + const struct hwmon_channel_info **channel_info_arr; + int i = 0; + + if (!hdev->hl_chip_info->info) + return; + + channel_info_arr = hdev->hl_chip_info->info; + + while (channel_info_arr[i]) { + kfree(channel_info_arr[i]->config); + kfree(channel_info_arr[i]); + i++; + } + + kfree(channel_info_arr); + + hdev->hl_chip_info->info = NULL; +} + +/* + * goya_sw_init - Goya software initialization code + * + * @hdev: pointer to hl_device structure + * + */ +static int goya_sw_init(struct hl_device *hdev) +{ + struct goya_device *goya; + int rc; + + /* Allocate device structure */ + goya = kzalloc(sizeof(*goya), GFP_KERNEL); + if (!goya) + return -ENOMEM; + + /* according to goya_init_iatu */ + goya->ddr_bar_cur_addr = DRAM_PHYS_BASE; + + goya->mme_clk = GOYA_PLL_FREQ_LOW; + goya->tpc_clk = GOYA_PLL_FREQ_LOW; + goya->ic_clk = GOYA_PLL_FREQ_LOW; + + hdev->asic_specific = goya; + + /* Create DMA pool for small allocations */ + hdev->dma_pool = dma_pool_create(dev_name(hdev->dev), + &hdev->pdev->dev, GOYA_DMA_POOL_BLK_SIZE, 8, 0); + if (!hdev->dma_pool) { + dev_err(hdev->dev, "failed to create DMA pool\n"); + rc = -ENOMEM; + goto free_goya_device; + } + + hdev->cpu_accessible_dma_mem = + hdev->asic_funcs->asic_dma_alloc_coherent(hdev, + HL_CPU_ACCESSIBLE_MEM_SIZE, + &hdev->cpu_accessible_dma_address, + GFP_KERNEL | __GFP_ZERO); + + if (!hdev->cpu_accessible_dma_mem) { + rc = -ENOMEM; + goto free_dma_pool; + } + + dev_dbg(hdev->dev, "cpu accessible memory at bus address %pad\n", + &hdev->cpu_accessible_dma_address); + + hdev->cpu_accessible_dma_pool = gen_pool_create(ilog2(32), -1); + if (!hdev->cpu_accessible_dma_pool) { + dev_err(hdev->dev, + "Failed to create CPU accessible DMA pool\n"); + rc = -ENOMEM; + goto free_cpu_dma_mem; + } + + rc = gen_pool_add(hdev->cpu_accessible_dma_pool, + (uintptr_t) hdev->cpu_accessible_dma_mem, + HL_CPU_ACCESSIBLE_MEM_SIZE, -1); + if (rc) { + dev_err(hdev->dev, + "Failed to add memory to CPU accessible DMA pool\n"); + rc = -EFAULT; + goto free_cpu_accessible_dma_pool; + } + + spin_lock_init(&goya->hw_queues_lock); + hdev->supports_coresight = true; + hdev->supports_soft_reset = true; + + return 0; + +free_cpu_accessible_dma_pool: + gen_pool_destroy(hdev->cpu_accessible_dma_pool); +free_cpu_dma_mem: + hdev->asic_funcs->asic_dma_free_coherent(hdev, + HL_CPU_ACCESSIBLE_MEM_SIZE, + hdev->cpu_accessible_dma_mem, + hdev->cpu_accessible_dma_address); +free_dma_pool: + dma_pool_destroy(hdev->dma_pool); +free_goya_device: + kfree(goya); + + return rc; +} + +/* + * goya_sw_fini - Goya software tear-down code + * + * @hdev: pointer to hl_device structure + * + */ +static int goya_sw_fini(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + + gen_pool_destroy(hdev->cpu_accessible_dma_pool); + + hdev->asic_funcs->asic_dma_free_coherent(hdev, + HL_CPU_ACCESSIBLE_MEM_SIZE, + hdev->cpu_accessible_dma_mem, + hdev->cpu_accessible_dma_address); + + dma_pool_destroy(hdev->dma_pool); + + kfree(goya); + + return 0; +} + +static void goya_init_dma_qman(struct hl_device *hdev, int dma_id, + dma_addr_t bus_address) +{ + struct goya_device *goya = hdev->asic_specific; + u32 mtr_base_lo, mtr_base_hi; + u32 so_base_lo, so_base_hi; + u32 gic_base_lo, gic_base_hi; + u32 reg_off = dma_id * (mmDMA_QM_1_PQ_PI - mmDMA_QM_0_PQ_PI); + u32 dma_err_cfg = QMAN_DMA_ERR_MSG_EN; + + mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0); + mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0); + so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0); + so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0); + + gic_base_lo = + lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR); + gic_base_hi = + upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR); + + WREG32(mmDMA_QM_0_PQ_BASE_LO + reg_off, lower_32_bits(bus_address)); + WREG32(mmDMA_QM_0_PQ_BASE_HI + reg_off, upper_32_bits(bus_address)); + + WREG32(mmDMA_QM_0_PQ_SIZE + reg_off, ilog2(HL_QUEUE_LENGTH)); + WREG32(mmDMA_QM_0_PQ_PI + reg_off, 0); + WREG32(mmDMA_QM_0_PQ_CI + reg_off, 0); + + WREG32(mmDMA_QM_0_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo); + WREG32(mmDMA_QM_0_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi); + WREG32(mmDMA_QM_0_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo); + WREG32(mmDMA_QM_0_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi); + WREG32(mmDMA_QM_0_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo); + WREG32(mmDMA_QM_0_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi); + WREG32(mmDMA_QM_0_GLBL_ERR_WDATA + reg_off, + GOYA_ASYNC_EVENT_ID_DMA0_QM + dma_id); + + /* PQ has buffer of 2 cache lines, while CQ has 8 lines */ + WREG32(mmDMA_QM_0_PQ_CFG1 + reg_off, 0x00020002); + WREG32(mmDMA_QM_0_CQ_CFG1 + reg_off, 0x00080008); + + if (goya->hw_cap_initialized & HW_CAP_MMU) + WREG32(mmDMA_QM_0_GLBL_PROT + reg_off, QMAN_DMA_PARTLY_TRUSTED); + else + WREG32(mmDMA_QM_0_GLBL_PROT + reg_off, QMAN_DMA_FULLY_TRUSTED); + + if (hdev->stop_on_err) + dma_err_cfg |= 1 << DMA_QM_0_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT; + + WREG32(mmDMA_QM_0_GLBL_ERR_CFG + reg_off, dma_err_cfg); + WREG32(mmDMA_QM_0_GLBL_CFG0 + reg_off, QMAN_DMA_ENABLE); +} + +static void goya_init_dma_ch(struct hl_device *hdev, int dma_id) +{ + u32 gic_base_lo, gic_base_hi; + u64 sob_addr; + u32 reg_off = dma_id * (mmDMA_CH_1_CFG1 - mmDMA_CH_0_CFG1); + + gic_base_lo = + lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR); + gic_base_hi = + upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR); + + WREG32(mmDMA_CH_0_ERRMSG_ADDR_LO + reg_off, gic_base_lo); + WREG32(mmDMA_CH_0_ERRMSG_ADDR_HI + reg_off, gic_base_hi); + WREG32(mmDMA_CH_0_ERRMSG_WDATA + reg_off, + GOYA_ASYNC_EVENT_ID_DMA0_CH + dma_id); + + if (dma_id) + sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1000 + + (dma_id - 1) * 4; + else + sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1007; + + WREG32(mmDMA_CH_0_WR_COMP_ADDR_HI + reg_off, upper_32_bits(sob_addr)); + WREG32(mmDMA_CH_0_WR_COMP_WDATA + reg_off, 0x80000001); +} + +/* + * goya_init_dma_qmans - Initialize QMAN DMA registers + * + * @hdev: pointer to hl_device structure + * + * Initialize the H/W registers of the QMAN DMA channels + * + */ +void goya_init_dma_qmans(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + struct hl_hw_queue *q; + int i; + + if (goya->hw_cap_initialized & HW_CAP_DMA) + return; + + q = &hdev->kernel_queues[0]; + + for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++, q++) { + q->cq_id = q->msi_vec = i; + goya_init_dma_qman(hdev, i, q->bus_address); + goya_init_dma_ch(hdev, i); + } + + goya->hw_cap_initialized |= HW_CAP_DMA; +} + +/* + * goya_disable_external_queues - Disable external queues + * + * @hdev: pointer to hl_device structure + * + */ +static void goya_disable_external_queues(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + + if (!(goya->hw_cap_initialized & HW_CAP_DMA)) + return; + + WREG32(mmDMA_QM_0_GLBL_CFG0, 0); + WREG32(mmDMA_QM_1_GLBL_CFG0, 0); + WREG32(mmDMA_QM_2_GLBL_CFG0, 0); + WREG32(mmDMA_QM_3_GLBL_CFG0, 0); + WREG32(mmDMA_QM_4_GLBL_CFG0, 0); +} + +static int goya_stop_queue(struct hl_device *hdev, u32 cfg_reg, + u32 cp_sts_reg, u32 glbl_sts0_reg) +{ + int rc; + u32 status; + + /* use the values of TPC0 as they are all the same*/ + + WREG32(cfg_reg, 1 << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT); + + status = RREG32(cp_sts_reg); + if (status & TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK) { + rc = hl_poll_timeout( + hdev, + cp_sts_reg, + status, + !(status & TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK), + 1000, + QMAN_FENCE_TIMEOUT_USEC); + + /* if QMAN is stuck in fence no need to check for stop */ + if (rc) + return 0; + } + + rc = hl_poll_timeout( + hdev, + glbl_sts0_reg, + status, + (status & TPC0_QM_GLBL_STS0_CP_IS_STOP_MASK), + 1000, + QMAN_STOP_TIMEOUT_USEC); + + if (rc) { + dev_err(hdev->dev, + "Timeout while waiting for QMAN to stop\n"); + return -EINVAL; + } + + return 0; +} + +/* + * goya_stop_external_queues - Stop external queues + * + * @hdev: pointer to hl_device structure + * + * Returns 0 on success + * + */ +static int goya_stop_external_queues(struct hl_device *hdev) +{ + int rc, retval = 0; + + struct goya_device *goya = hdev->asic_specific; + + if (!(goya->hw_cap_initialized & HW_CAP_DMA)) + return retval; + + rc = goya_stop_queue(hdev, + mmDMA_QM_0_GLBL_CFG1, + mmDMA_QM_0_CP_STS, + mmDMA_QM_0_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop DMA QMAN 0\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmDMA_QM_1_GLBL_CFG1, + mmDMA_QM_1_CP_STS, + mmDMA_QM_1_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop DMA QMAN 1\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmDMA_QM_2_GLBL_CFG1, + mmDMA_QM_2_CP_STS, + mmDMA_QM_2_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop DMA QMAN 2\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmDMA_QM_3_GLBL_CFG1, + mmDMA_QM_3_CP_STS, + mmDMA_QM_3_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop DMA QMAN 3\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmDMA_QM_4_GLBL_CFG1, + mmDMA_QM_4_CP_STS, + mmDMA_QM_4_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop DMA QMAN 4\n"); + retval = -EIO; + } + + return retval; +} + +/* + * goya_init_cpu_queues - Initialize PQ/CQ/EQ of CPU + * + * @hdev: pointer to hl_device structure + * + * Returns 0 on success + * + */ +int goya_init_cpu_queues(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + struct hl_eq *eq; + u32 status; + struct hl_hw_queue *cpu_pq = &hdev->kernel_queues[GOYA_QUEUE_ID_CPU_PQ]; + int err; + + if (!hdev->cpu_queues_enable) + return 0; + + if (goya->hw_cap_initialized & HW_CAP_CPU_Q) + return 0; + + eq = &hdev->event_queue; + + WREG32(mmCPU_PQ_BASE_ADDR_LOW, lower_32_bits(cpu_pq->bus_address)); + WREG32(mmCPU_PQ_BASE_ADDR_HIGH, upper_32_bits(cpu_pq->bus_address)); + + WREG32(mmCPU_EQ_BASE_ADDR_LOW, lower_32_bits(eq->bus_address)); + WREG32(mmCPU_EQ_BASE_ADDR_HIGH, upper_32_bits(eq->bus_address)); + + WREG32(mmCPU_CQ_BASE_ADDR_LOW, + lower_32_bits(VA_CPU_ACCESSIBLE_MEM_ADDR)); + WREG32(mmCPU_CQ_BASE_ADDR_HIGH, + upper_32_bits(VA_CPU_ACCESSIBLE_MEM_ADDR)); + + WREG32(mmCPU_PQ_LENGTH, HL_QUEUE_SIZE_IN_BYTES); + WREG32(mmCPU_EQ_LENGTH, HL_EQ_SIZE_IN_BYTES); + WREG32(mmCPU_CQ_LENGTH, HL_CPU_ACCESSIBLE_MEM_SIZE); + + /* Used for EQ CI */ + WREG32(mmCPU_EQ_CI, 0); + + WREG32(mmCPU_IF_PF_PQ_PI, 0); + + WREG32(mmCPU_PQ_INIT_STATUS, PQ_INIT_STATUS_READY_FOR_CP); + + WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, + GOYA_ASYNC_EVENT_ID_PI_UPDATE); + + err = hl_poll_timeout( + hdev, + mmCPU_PQ_INIT_STATUS, + status, + (status == PQ_INIT_STATUS_READY_FOR_HOST), + 1000, + GOYA_CPU_TIMEOUT_USEC); + + if (err) { + dev_err(hdev->dev, + "Failed to setup communication with device CPU\n"); + return -EIO; + } + + goya->hw_cap_initialized |= HW_CAP_CPU_Q; + return 0; +} + +static void goya_set_pll_refclk(struct hl_device *hdev) +{ + WREG32(mmCPU_PLL_DIV_SEL_0, 0x0); + WREG32(mmCPU_PLL_DIV_SEL_1, 0x0); + WREG32(mmCPU_PLL_DIV_SEL_2, 0x0); + WREG32(mmCPU_PLL_DIV_SEL_3, 0x0); + + WREG32(mmIC_PLL_DIV_SEL_0, 0x0); + WREG32(mmIC_PLL_DIV_SEL_1, 0x0); + WREG32(mmIC_PLL_DIV_SEL_2, 0x0); + WREG32(mmIC_PLL_DIV_SEL_3, 0x0); + + WREG32(mmMC_PLL_DIV_SEL_0, 0x0); + WREG32(mmMC_PLL_DIV_SEL_1, 0x0); + WREG32(mmMC_PLL_DIV_SEL_2, 0x0); + WREG32(mmMC_PLL_DIV_SEL_3, 0x0); + + WREG32(mmPSOC_MME_PLL_DIV_SEL_0, 0x0); + WREG32(mmPSOC_MME_PLL_DIV_SEL_1, 0x0); + WREG32(mmPSOC_MME_PLL_DIV_SEL_2, 0x0); + WREG32(mmPSOC_MME_PLL_DIV_SEL_3, 0x0); + + WREG32(mmPSOC_PCI_PLL_DIV_SEL_0, 0x0); + WREG32(mmPSOC_PCI_PLL_DIV_SEL_1, 0x0); + WREG32(mmPSOC_PCI_PLL_DIV_SEL_2, 0x0); + WREG32(mmPSOC_PCI_PLL_DIV_SEL_3, 0x0); + + WREG32(mmPSOC_EMMC_PLL_DIV_SEL_0, 0x0); + WREG32(mmPSOC_EMMC_PLL_DIV_SEL_1, 0x0); + WREG32(mmPSOC_EMMC_PLL_DIV_SEL_2, 0x0); + WREG32(mmPSOC_EMMC_PLL_DIV_SEL_3, 0x0); + + WREG32(mmTPC_PLL_DIV_SEL_0, 0x0); + WREG32(mmTPC_PLL_DIV_SEL_1, 0x0); + WREG32(mmTPC_PLL_DIV_SEL_2, 0x0); + WREG32(mmTPC_PLL_DIV_SEL_3, 0x0); +} + +static void goya_disable_clk_rlx(struct hl_device *hdev) +{ + WREG32(mmPSOC_MME_PLL_CLK_RLX_0, 0x100010); + WREG32(mmIC_PLL_CLK_RLX_0, 0x100010); +} + +static void _goya_tpc_mbist_workaround(struct hl_device *hdev, u8 tpc_id) +{ + u64 tpc_eml_address; + u32 val, tpc_offset, tpc_eml_offset, tpc_slm_offset; + int err, slm_index; + + tpc_offset = tpc_id * 0x40000; + tpc_eml_offset = tpc_id * 0x200000; + tpc_eml_address = (mmTPC0_EML_CFG_BASE + tpc_eml_offset - CFG_BASE); + tpc_slm_offset = tpc_eml_address + 0x100000; + + /* + * Workaround for Bug H2 #2443 : + * "TPC SB is not initialized on chip reset" + */ + + val = RREG32(mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset); + if (val & TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_ACTIVE_MASK) + dev_warn(hdev->dev, "TPC%d MBIST ACTIVE is not cleared\n", + tpc_id); + + WREG32(mmTPC0_CFG_FUNC_MBIST_PAT + tpc_offset, val & 0xFFFFF000); + + WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_0 + tpc_offset, 0x37FF); + WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_1 + tpc_offset, 0x303F); + WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_2 + tpc_offset, 0x71FF); + WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_3 + tpc_offset, 0x71FF); + WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_4 + tpc_offset, 0x70FF); + WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_5 + tpc_offset, 0x70FF); + WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_6 + tpc_offset, 0x70FF); + WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_7 + tpc_offset, 0x70FF); + WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_8 + tpc_offset, 0x70FF); + WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_9 + tpc_offset, 0x70FF); + + WREG32_OR(mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset, + 1 << TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_START_SHIFT); + + err = hl_poll_timeout( + hdev, + mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset, + val, + (val & TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_DONE_MASK), + 1000, + HL_DEVICE_TIMEOUT_USEC); + + if (err) + dev_err(hdev->dev, + "Timeout while waiting for TPC%d MBIST DONE\n", tpc_id); + + WREG32_OR(mmTPC0_EML_CFG_DBG_CNT + tpc_eml_offset, + 1 << TPC0_EML_CFG_DBG_CNT_CORE_RST_SHIFT); + + msleep(GOYA_RESET_WAIT_MSEC); + + WREG32_AND(mmTPC0_EML_CFG_DBG_CNT + tpc_eml_offset, + ~(1 << TPC0_EML_CFG_DBG_CNT_CORE_RST_SHIFT)); + + msleep(GOYA_RESET_WAIT_MSEC); + + for (slm_index = 0 ; slm_index < 256 ; slm_index++) + WREG32(tpc_slm_offset + (slm_index << 2), 0); + + val = RREG32(tpc_slm_offset); +} + +static void goya_tpc_mbist_workaround(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + int i; + + if (hdev->pldm) + return; + + if (goya->hw_cap_initialized & HW_CAP_TPC_MBIST) + return; + + /* Workaround for H2 #2443 */ + + for (i = 0 ; i < TPC_MAX_NUM ; i++) + _goya_tpc_mbist_workaround(hdev, i); + + goya->hw_cap_initialized |= HW_CAP_TPC_MBIST; +} + +/* + * goya_init_golden_registers - Initialize golden registers + * + * @hdev: pointer to hl_device structure + * + * Initialize the H/W registers of the device + * + */ +static void goya_init_golden_registers(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + u32 polynom[10], tpc_intr_mask, offset; + int i; + + if (goya->hw_cap_initialized & HW_CAP_GOLDEN) + return; + + polynom[0] = 0x00020080; + polynom[1] = 0x00401000; + polynom[2] = 0x00200800; + polynom[3] = 0x00002000; + polynom[4] = 0x00080200; + polynom[5] = 0x00040100; + polynom[6] = 0x00100400; + polynom[7] = 0x00004000; + polynom[8] = 0x00010000; + polynom[9] = 0x00008000; + + /* Mask all arithmetic interrupts from TPC */ + tpc_intr_mask = 0x7FFF; + + for (i = 0, offset = 0 ; i < 6 ; i++, offset += 0x20000) { + WREG32(mmSRAM_Y0_X0_RTR_HBW_RD_RQ_L_ARB + offset, 0x302); + WREG32(mmSRAM_Y0_X1_RTR_HBW_RD_RQ_L_ARB + offset, 0x302); + WREG32(mmSRAM_Y0_X2_RTR_HBW_RD_RQ_L_ARB + offset, 0x302); + WREG32(mmSRAM_Y0_X3_RTR_HBW_RD_RQ_L_ARB + offset, 0x302); + WREG32(mmSRAM_Y0_X4_RTR_HBW_RD_RQ_L_ARB + offset, 0x302); + + WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_L_ARB + offset, 0x204); + WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_L_ARB + offset, 0x204); + WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_L_ARB + offset, 0x204); + WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_L_ARB + offset, 0x204); + WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_L_ARB + offset, 0x204); + + + WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_E_ARB + offset, 0x206); + WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_E_ARB + offset, 0x206); + WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_E_ARB + offset, 0x206); + WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_E_ARB + offset, 0x207); + WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_E_ARB + offset, 0x207); + + WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_W_ARB + offset, 0x207); + WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_W_ARB + offset, 0x207); + WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_W_ARB + offset, 0x206); + WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_W_ARB + offset, 0x206); + WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_W_ARB + offset, 0x206); + + WREG32(mmSRAM_Y0_X0_RTR_HBW_WR_RS_E_ARB + offset, 0x101); + WREG32(mmSRAM_Y0_X1_RTR_HBW_WR_RS_E_ARB + offset, 0x102); + WREG32(mmSRAM_Y0_X2_RTR_HBW_WR_RS_E_ARB + offset, 0x103); + WREG32(mmSRAM_Y0_X3_RTR_HBW_WR_RS_E_ARB + offset, 0x104); + WREG32(mmSRAM_Y0_X4_RTR_HBW_WR_RS_E_ARB + offset, 0x105); + + WREG32(mmSRAM_Y0_X0_RTR_HBW_WR_RS_W_ARB + offset, 0x105); + WREG32(mmSRAM_Y0_X1_RTR_HBW_WR_RS_W_ARB + offset, 0x104); + WREG32(mmSRAM_Y0_X2_RTR_HBW_WR_RS_W_ARB + offset, 0x103); + WREG32(mmSRAM_Y0_X3_RTR_HBW_WR_RS_W_ARB + offset, 0x102); + WREG32(mmSRAM_Y0_X4_RTR_HBW_WR_RS_W_ARB + offset, 0x101); + } + + WREG32(mmMME_STORE_MAX_CREDIT, 0x21); + WREG32(mmMME_AGU, 0x0f0f0f10); + WREG32(mmMME_SEI_MASK, ~0x0); + + WREG32(mmMME6_RTR_HBW_RD_RQ_N_ARB, 0x01010101); + WREG32(mmMME5_RTR_HBW_RD_RQ_N_ARB, 0x01040101); + WREG32(mmMME4_RTR_HBW_RD_RQ_N_ARB, 0x01030101); + WREG32(mmMME3_RTR_HBW_RD_RQ_N_ARB, 0x01020101); + WREG32(mmMME2_RTR_HBW_RD_RQ_N_ARB, 0x01010101); + WREG32(mmMME1_RTR_HBW_RD_RQ_N_ARB, 0x07010701); + WREG32(mmMME6_RTR_HBW_RD_RQ_S_ARB, 0x04010401); + WREG32(mmMME5_RTR_HBW_RD_RQ_S_ARB, 0x04050401); + WREG32(mmMME4_RTR_HBW_RD_RQ_S_ARB, 0x03070301); + WREG32(mmMME3_RTR_HBW_RD_RQ_S_ARB, 0x01030101); + WREG32(mmMME2_RTR_HBW_RD_RQ_S_ARB, 0x01040101); + WREG32(mmMME1_RTR_HBW_RD_RQ_S_ARB, 0x01050105); + WREG32(mmMME6_RTR_HBW_RD_RQ_W_ARB, 0x01010501); + WREG32(mmMME5_RTR_HBW_RD_RQ_W_ARB, 0x01010501); + WREG32(mmMME4_RTR_HBW_RD_RQ_W_ARB, 0x01040301); + WREG32(mmMME3_RTR_HBW_RD_RQ_W_ARB, 0x01030401); + WREG32(mmMME2_RTR_HBW_RD_RQ_W_ARB, 0x01040101); + WREG32(mmMME1_RTR_HBW_RD_RQ_W_ARB, 0x01050101); + WREG32(mmMME6_RTR_HBW_WR_RQ_N_ARB, 0x02020202); + WREG32(mmMME5_RTR_HBW_WR_RQ_N_ARB, 0x01070101); + WREG32(mmMME4_RTR_HBW_WR_RQ_N_ARB, 0x02020201); + WREG32(mmMME3_RTR_HBW_WR_RQ_N_ARB, 0x07020701); + WREG32(mmMME2_RTR_HBW_WR_RQ_N_ARB, 0x01020101); + WREG32(mmMME1_RTR_HBW_WR_RQ_S_ARB, 0x01010101); + WREG32(mmMME6_RTR_HBW_WR_RQ_S_ARB, 0x01070101); + WREG32(mmMME5_RTR_HBW_WR_RQ_S_ARB, 0x01070101); + WREG32(mmMME4_RTR_HBW_WR_RQ_S_ARB, 0x07020701); + WREG32(mmMME3_RTR_HBW_WR_RQ_S_ARB, 0x02020201); + WREG32(mmMME2_RTR_HBW_WR_RQ_S_ARB, 0x01070101); + WREG32(mmMME1_RTR_HBW_WR_RQ_S_ARB, 0x01020102); + WREG32(mmMME6_RTR_HBW_WR_RQ_W_ARB, 0x01020701); + WREG32(mmMME5_RTR_HBW_WR_RQ_W_ARB, 0x01020701); + WREG32(mmMME4_RTR_HBW_WR_RQ_W_ARB, 0x07020707); + WREG32(mmMME3_RTR_HBW_WR_RQ_W_ARB, 0x01020201); + WREG32(mmMME2_RTR_HBW_WR_RQ_W_ARB, 0x01070201); + WREG32(mmMME1_RTR_HBW_WR_RQ_W_ARB, 0x01070201); + WREG32(mmMME6_RTR_HBW_RD_RS_N_ARB, 0x01070102); + WREG32(mmMME5_RTR_HBW_RD_RS_N_ARB, 0x01070102); + WREG32(mmMME4_RTR_HBW_RD_RS_N_ARB, 0x01060102); + WREG32(mmMME3_RTR_HBW_RD_RS_N_ARB, 0x01040102); + WREG32(mmMME2_RTR_HBW_RD_RS_N_ARB, 0x01020102); + WREG32(mmMME1_RTR_HBW_RD_RS_N_ARB, 0x01020107); + WREG32(mmMME6_RTR_HBW_RD_RS_S_ARB, 0x01020106); + WREG32(mmMME5_RTR_HBW_RD_RS_S_ARB, 0x01020102); + WREG32(mmMME4_RTR_HBW_RD_RS_S_ARB, 0x01040102); + WREG32(mmMME3_RTR_HBW_RD_RS_S_ARB, 0x01060102); + WREG32(mmMME2_RTR_HBW_RD_RS_S_ARB, 0x01070102); + WREG32(mmMME1_RTR_HBW_RD_RS_S_ARB, 0x01070102); + WREG32(mmMME6_RTR_HBW_RD_RS_E_ARB, 0x01020702); + WREG32(mmMME5_RTR_HBW_RD_RS_E_ARB, 0x01020702); + WREG32(mmMME4_RTR_HBW_RD_RS_E_ARB, 0x01040602); + WREG32(mmMME3_RTR_HBW_RD_RS_E_ARB, 0x01060402); + WREG32(mmMME2_RTR_HBW_RD_RS_E_ARB, 0x01070202); + WREG32(mmMME1_RTR_HBW_RD_RS_E_ARB, 0x01070102); + WREG32(mmMME6_RTR_HBW_RD_RS_W_ARB, 0x01060401); + WREG32(mmMME5_RTR_HBW_RD_RS_W_ARB, 0x01060401); + WREG32(mmMME4_RTR_HBW_RD_RS_W_ARB, 0x01060401); + WREG32(mmMME3_RTR_HBW_RD_RS_W_ARB, 0x01060401); + WREG32(mmMME2_RTR_HBW_RD_RS_W_ARB, 0x01060401); + WREG32(mmMME1_RTR_HBW_RD_RS_W_ARB, 0x01060401); + WREG32(mmMME6_RTR_HBW_WR_RS_N_ARB, 0x01050101); + WREG32(mmMME5_RTR_HBW_WR_RS_N_ARB, 0x01040101); + WREG32(mmMME4_RTR_HBW_WR_RS_N_ARB, 0x01030101); + WREG32(mmMME3_RTR_HBW_WR_RS_N_ARB, 0x01020101); + WREG32(mmMME2_RTR_HBW_WR_RS_N_ARB, 0x01010101); + WREG32(mmMME1_RTR_HBW_WR_RS_N_ARB, 0x01010107); + WREG32(mmMME6_RTR_HBW_WR_RS_S_ARB, 0x01010107); + WREG32(mmMME5_RTR_HBW_WR_RS_S_ARB, 0x01010101); + WREG32(mmMME4_RTR_HBW_WR_RS_S_ARB, 0x01020101); + WREG32(mmMME3_RTR_HBW_WR_RS_S_ARB, 0x01030101); + WREG32(mmMME2_RTR_HBW_WR_RS_S_ARB, 0x01040101); + WREG32(mmMME1_RTR_HBW_WR_RS_S_ARB, 0x01050101); + WREG32(mmMME6_RTR_HBW_WR_RS_E_ARB, 0x01010501); + WREG32(mmMME5_RTR_HBW_WR_RS_E_ARB, 0x01010501); + WREG32(mmMME4_RTR_HBW_WR_RS_E_ARB, 0x01040301); + WREG32(mmMME3_RTR_HBW_WR_RS_E_ARB, 0x01030401); + WREG32(mmMME2_RTR_HBW_WR_RS_E_ARB, 0x01040101); + WREG32(mmMME1_RTR_HBW_WR_RS_E_ARB, 0x01050101); + WREG32(mmMME6_RTR_HBW_WR_RS_W_ARB, 0x01010101); + WREG32(mmMME5_RTR_HBW_WR_RS_W_ARB, 0x01010101); + WREG32(mmMME4_RTR_HBW_WR_RS_W_ARB, 0x01010101); + WREG32(mmMME3_RTR_HBW_WR_RS_W_ARB, 0x01010101); + WREG32(mmMME2_RTR_HBW_WR_RS_W_ARB, 0x01010101); + WREG32(mmMME1_RTR_HBW_WR_RS_W_ARB, 0x01010101); + + WREG32(mmTPC1_RTR_HBW_RD_RQ_N_ARB, 0x01010101); + WREG32(mmTPC1_RTR_HBW_RD_RQ_S_ARB, 0x01010101); + WREG32(mmTPC1_RTR_HBW_RD_RQ_E_ARB, 0x01060101); + WREG32(mmTPC1_RTR_HBW_WR_RQ_N_ARB, 0x02020102); + WREG32(mmTPC1_RTR_HBW_WR_RQ_S_ARB, 0x01010101); + WREG32(mmTPC1_RTR_HBW_WR_RQ_E_ARB, 0x02070202); + WREG32(mmTPC1_RTR_HBW_RD_RS_N_ARB, 0x01020201); + WREG32(mmTPC1_RTR_HBW_RD_RS_S_ARB, 0x01070201); + WREG32(mmTPC1_RTR_HBW_RD_RS_W_ARB, 0x01070202); + WREG32(mmTPC1_RTR_HBW_WR_RS_N_ARB, 0x01010101); + WREG32(mmTPC1_RTR_HBW_WR_RS_S_ARB, 0x01050101); + WREG32(mmTPC1_RTR_HBW_WR_RS_W_ARB, 0x01050101); + + WREG32(mmTPC2_RTR_HBW_RD_RQ_N_ARB, 0x01020101); + WREG32(mmTPC2_RTR_HBW_RD_RQ_S_ARB, 0x01050101); + WREG32(mmTPC2_RTR_HBW_RD_RQ_E_ARB, 0x01010201); + WREG32(mmTPC2_RTR_HBW_WR_RQ_N_ARB, 0x02040102); + WREG32(mmTPC2_RTR_HBW_WR_RQ_S_ARB, 0x01050101); + WREG32(mmTPC2_RTR_HBW_WR_RQ_E_ARB, 0x02060202); + WREG32(mmTPC2_RTR_HBW_RD_RS_N_ARB, 0x01020201); + WREG32(mmTPC2_RTR_HBW_RD_RS_S_ARB, 0x01070201); + WREG32(mmTPC2_RTR_HBW_RD_RS_W_ARB, 0x01070202); + WREG32(mmTPC2_RTR_HBW_WR_RS_N_ARB, 0x01010101); + WREG32(mmTPC2_RTR_HBW_WR_RS_S_ARB, 0x01040101); + WREG32(mmTPC2_RTR_HBW_WR_RS_W_ARB, 0x01040101); + + WREG32(mmTPC3_RTR_HBW_RD_RQ_N_ARB, 0x01030101); + WREG32(mmTPC3_RTR_HBW_RD_RQ_S_ARB, 0x01040101); + WREG32(mmTPC3_RTR_HBW_RD_RQ_E_ARB, 0x01040301); + WREG32(mmTPC3_RTR_HBW_WR_RQ_N_ARB, 0x02060102); + WREG32(mmTPC3_RTR_HBW_WR_RQ_S_ARB, 0x01040101); + WREG32(mmTPC3_RTR_HBW_WR_RQ_E_ARB, 0x01040301); + WREG32(mmTPC3_RTR_HBW_RD_RS_N_ARB, 0x01040201); + WREG32(mmTPC3_RTR_HBW_RD_RS_S_ARB, 0x01060201); + WREG32(mmTPC3_RTR_HBW_RD_RS_W_ARB, 0x01060402); + WREG32(mmTPC3_RTR_HBW_WR_RS_N_ARB, 0x01020101); + WREG32(mmTPC3_RTR_HBW_WR_RS_S_ARB, 0x01030101); + WREG32(mmTPC3_RTR_HBW_WR_RS_W_ARB, 0x01030401); + + WREG32(mmTPC4_RTR_HBW_RD_RQ_N_ARB, 0x01040101); + WREG32(mmTPC4_RTR_HBW_RD_RQ_S_ARB, 0x01030101); + WREG32(mmTPC4_RTR_HBW_RD_RQ_E_ARB, 0x01030401); + WREG32(mmTPC4_RTR_HBW_WR_RQ_N_ARB, 0x02070102); + WREG32(mmTPC4_RTR_HBW_WR_RQ_S_ARB, 0x01030101); + WREG32(mmTPC4_RTR_HBW_WR_RQ_E_ARB, 0x02060702); + WREG32(mmTPC4_RTR_HBW_RD_RS_N_ARB, 0x01060201); + WREG32(mmTPC4_RTR_HBW_RD_RS_S_ARB, 0x01040201); + WREG32(mmTPC4_RTR_HBW_RD_RS_W_ARB, 0x01040602); + WREG32(mmTPC4_RTR_HBW_WR_RS_N_ARB, 0x01030101); + WREG32(mmTPC4_RTR_HBW_WR_RS_S_ARB, 0x01020101); + WREG32(mmTPC4_RTR_HBW_WR_RS_W_ARB, 0x01040301); + + WREG32(mmTPC5_RTR_HBW_RD_RQ_N_ARB, 0x01050101); + WREG32(mmTPC5_RTR_HBW_RD_RQ_S_ARB, 0x01020101); + WREG32(mmTPC5_RTR_HBW_RD_RQ_E_ARB, 0x01200501); + WREG32(mmTPC5_RTR_HBW_WR_RQ_N_ARB, 0x02070102); + WREG32(mmTPC5_RTR_HBW_WR_RQ_S_ARB, 0x01020101); + WREG32(mmTPC5_RTR_HBW_WR_RQ_E_ARB, 0x02020602); + WREG32(mmTPC5_RTR_HBW_RD_RS_N_ARB, 0x01070201); + WREG32(mmTPC5_RTR_HBW_RD_RS_S_ARB, 0x01020201); + WREG32(mmTPC5_RTR_HBW_RD_RS_W_ARB, 0x01020702); + WREG32(mmTPC5_RTR_HBW_WR_RS_N_ARB, 0x01040101); + WREG32(mmTPC5_RTR_HBW_WR_RS_S_ARB, 0x01010101); + WREG32(mmTPC5_RTR_HBW_WR_RS_W_ARB, 0x01010501); + + WREG32(mmTPC6_RTR_HBW_RD_RQ_N_ARB, 0x01010101); + WREG32(mmTPC6_RTR_HBW_RD_RQ_S_ARB, 0x01010101); + WREG32(mmTPC6_RTR_HBW_RD_RQ_E_ARB, 0x01010601); + WREG32(mmTPC6_RTR_HBW_WR_RQ_N_ARB, 0x01010101); + WREG32(mmTPC6_RTR_HBW_WR_RQ_S_ARB, 0x01010101); + WREG32(mmTPC6_RTR_HBW_WR_RQ_E_ARB, 0x02020702); + WREG32(mmTPC6_RTR_HBW_RD_RS_N_ARB, 0x01010101); + WREG32(mmTPC6_RTR_HBW_RD_RS_S_ARB, 0x01010101); + WREG32(mmTPC6_RTR_HBW_RD_RS_W_ARB, 0x01020702); + WREG32(mmTPC6_RTR_HBW_WR_RS_N_ARB, 0x01050101); + WREG32(mmTPC6_RTR_HBW_WR_RS_S_ARB, 0x01010101); + WREG32(mmTPC6_RTR_HBW_WR_RS_W_ARB, 0x01010501); + + for (i = 0, offset = 0 ; i < 10 ; i++, offset += 4) { + WREG32(mmMME1_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7); + WREG32(mmMME2_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7); + WREG32(mmMME3_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7); + WREG32(mmMME4_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7); + WREG32(mmMME5_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7); + WREG32(mmMME6_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7); + + WREG32(mmTPC0_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7); + WREG32(mmTPC1_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7); + WREG32(mmTPC2_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7); + WREG32(mmTPC3_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7); + WREG32(mmTPC4_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7); + WREG32(mmTPC5_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7); + WREG32(mmTPC6_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7); + WREG32(mmTPC7_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7); + + WREG32(mmPCI_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7); + WREG32(mmDMA_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7); + } + + for (i = 0, offset = 0 ; i < 6 ; i++, offset += 0x40000) { + WREG32(mmMME1_RTR_SCRAMB_EN + offset, + 1 << MME1_RTR_SCRAMB_EN_VAL_SHIFT); + WREG32(mmMME1_RTR_NON_LIN_SCRAMB + offset, + 1 << MME1_RTR_NON_LIN_SCRAMB_EN_SHIFT); + } + + for (i = 0, offset = 0 ; i < 8 ; i++, offset += 0x40000) { + /* + * Workaround for Bug H2 #2441 : + * "ST.NOP set trace event illegal opcode" + */ + WREG32(mmTPC0_CFG_TPC_INTR_MASK + offset, tpc_intr_mask); + + WREG32(mmTPC0_NRTR_SCRAMB_EN + offset, + 1 << TPC0_NRTR_SCRAMB_EN_VAL_SHIFT); + WREG32(mmTPC0_NRTR_NON_LIN_SCRAMB + offset, + 1 << TPC0_NRTR_NON_LIN_SCRAMB_EN_SHIFT); + + WREG32_FIELD(TPC0_CFG_MSS_CONFIG, offset, + ICACHE_FETCH_LINE_NUM, 2); + } + + WREG32(mmDMA_NRTR_SCRAMB_EN, 1 << DMA_NRTR_SCRAMB_EN_VAL_SHIFT); + WREG32(mmDMA_NRTR_NON_LIN_SCRAMB, + 1 << DMA_NRTR_NON_LIN_SCRAMB_EN_SHIFT); + + WREG32(mmPCI_NRTR_SCRAMB_EN, 1 << PCI_NRTR_SCRAMB_EN_VAL_SHIFT); + WREG32(mmPCI_NRTR_NON_LIN_SCRAMB, + 1 << PCI_NRTR_NON_LIN_SCRAMB_EN_SHIFT); + + /* + * Workaround for H2 #HW-23 bug + * Set DMA max outstanding read requests to 240 on DMA CH 1. + * This limitation is still large enough to not affect Gen4 bandwidth. + * We need to only limit that DMA channel because the user can only read + * from Host using DMA CH 1 + */ + WREG32(mmDMA_CH_1_CFG0, 0x0fff00F0); + + WREG32(mmTPC_PLL_CLK_RLX_0, 0x200020); + + goya->hw_cap_initialized |= HW_CAP_GOLDEN; +} + +static void goya_init_mme_qman(struct hl_device *hdev) +{ + u32 mtr_base_lo, mtr_base_hi; + u32 so_base_lo, so_base_hi; + u32 gic_base_lo, gic_base_hi; + u64 qman_base_addr; + + mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0); + mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0); + so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0); + so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0); + + gic_base_lo = + lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR); + gic_base_hi = + upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR); + + qman_base_addr = hdev->asic_prop.sram_base_address + + MME_QMAN_BASE_OFFSET; + + WREG32(mmMME_QM_PQ_BASE_LO, lower_32_bits(qman_base_addr)); + WREG32(mmMME_QM_PQ_BASE_HI, upper_32_bits(qman_base_addr)); + WREG32(mmMME_QM_PQ_SIZE, ilog2(MME_QMAN_LENGTH)); + WREG32(mmMME_QM_PQ_PI, 0); + WREG32(mmMME_QM_PQ_CI, 0); + WREG32(mmMME_QM_CP_LDMA_SRC_BASE_LO_OFFSET, 0x10C0); + WREG32(mmMME_QM_CP_LDMA_SRC_BASE_HI_OFFSET, 0x10C4); + WREG32(mmMME_QM_CP_LDMA_TSIZE_OFFSET, 0x10C8); + WREG32(mmMME_QM_CP_LDMA_COMMIT_OFFSET, 0x10CC); + + WREG32(mmMME_QM_CP_MSG_BASE0_ADDR_LO, mtr_base_lo); + WREG32(mmMME_QM_CP_MSG_BASE0_ADDR_HI, mtr_base_hi); + WREG32(mmMME_QM_CP_MSG_BASE1_ADDR_LO, so_base_lo); + WREG32(mmMME_QM_CP_MSG_BASE1_ADDR_HI, so_base_hi); + + /* QMAN CQ has 8 cache lines */ + WREG32(mmMME_QM_CQ_CFG1, 0x00080008); + + WREG32(mmMME_QM_GLBL_ERR_ADDR_LO, gic_base_lo); + WREG32(mmMME_QM_GLBL_ERR_ADDR_HI, gic_base_hi); + + WREG32(mmMME_QM_GLBL_ERR_WDATA, GOYA_ASYNC_EVENT_ID_MME_QM); + + WREG32(mmMME_QM_GLBL_ERR_CFG, QMAN_MME_ERR_MSG_EN); + + WREG32(mmMME_QM_GLBL_PROT, QMAN_MME_ERR_PROT); + + WREG32(mmMME_QM_GLBL_CFG0, QMAN_MME_ENABLE); +} + +static void goya_init_mme_cmdq(struct hl_device *hdev) +{ + u32 mtr_base_lo, mtr_base_hi; + u32 so_base_lo, so_base_hi; + u32 gic_base_lo, gic_base_hi; + + mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0); + mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0); + so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0); + so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0); + + gic_base_lo = + lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR); + gic_base_hi = + upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR); + + WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_LO, mtr_base_lo); + WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_HI, mtr_base_hi); + WREG32(mmMME_CMDQ_CP_MSG_BASE1_ADDR_LO, so_base_lo); + WREG32(mmMME_CMDQ_CP_MSG_BASE1_ADDR_HI, so_base_hi); + + /* CMDQ CQ has 20 cache lines */ + WREG32(mmMME_CMDQ_CQ_CFG1, 0x00140014); + + WREG32(mmMME_CMDQ_GLBL_ERR_ADDR_LO, gic_base_lo); + WREG32(mmMME_CMDQ_GLBL_ERR_ADDR_HI, gic_base_hi); + + WREG32(mmMME_CMDQ_GLBL_ERR_WDATA, GOYA_ASYNC_EVENT_ID_MME_CMDQ); + + WREG32(mmMME_CMDQ_GLBL_ERR_CFG, CMDQ_MME_ERR_MSG_EN); + + WREG32(mmMME_CMDQ_GLBL_PROT, CMDQ_MME_ERR_PROT); + + WREG32(mmMME_CMDQ_GLBL_CFG0, CMDQ_MME_ENABLE); +} + +void goya_init_mme_qmans(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + u32 so_base_lo, so_base_hi; + + if (goya->hw_cap_initialized & HW_CAP_MME) + return; + + so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0); + so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0); + + WREG32(mmMME_SM_BASE_ADDRESS_LOW, so_base_lo); + WREG32(mmMME_SM_BASE_ADDRESS_HIGH, so_base_hi); + + goya_init_mme_qman(hdev); + goya_init_mme_cmdq(hdev); + + goya->hw_cap_initialized |= HW_CAP_MME; +} + +static void goya_init_tpc_qman(struct hl_device *hdev, u32 base_off, int tpc_id) +{ + u32 mtr_base_lo, mtr_base_hi; + u32 so_base_lo, so_base_hi; + u32 gic_base_lo, gic_base_hi; + u64 qman_base_addr; + u32 reg_off = tpc_id * (mmTPC1_QM_PQ_PI - mmTPC0_QM_PQ_PI); + + mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0); + mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0); + so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0); + so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0); + + gic_base_lo = + lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR); + gic_base_hi = + upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR); + + qman_base_addr = hdev->asic_prop.sram_base_address + base_off; + + WREG32(mmTPC0_QM_PQ_BASE_LO + reg_off, lower_32_bits(qman_base_addr)); + WREG32(mmTPC0_QM_PQ_BASE_HI + reg_off, upper_32_bits(qman_base_addr)); + WREG32(mmTPC0_QM_PQ_SIZE + reg_off, ilog2(TPC_QMAN_LENGTH)); + WREG32(mmTPC0_QM_PQ_PI + reg_off, 0); + WREG32(mmTPC0_QM_PQ_CI + reg_off, 0); + WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET + reg_off, 0x10C0); + WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_HI_OFFSET + reg_off, 0x10C4); + WREG32(mmTPC0_QM_CP_LDMA_TSIZE_OFFSET + reg_off, 0x10C8); + WREG32(mmTPC0_QM_CP_LDMA_COMMIT_OFFSET + reg_off, 0x10CC); + + WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo); + WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi); + WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo); + WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi); + + WREG32(mmTPC0_QM_CQ_CFG1 + reg_off, 0x00080008); + + WREG32(mmTPC0_QM_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo); + WREG32(mmTPC0_QM_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi); + + WREG32(mmTPC0_QM_GLBL_ERR_WDATA + reg_off, + GOYA_ASYNC_EVENT_ID_TPC0_QM + tpc_id); + + WREG32(mmTPC0_QM_GLBL_ERR_CFG + reg_off, QMAN_TPC_ERR_MSG_EN); + + WREG32(mmTPC0_QM_GLBL_PROT + reg_off, QMAN_TPC_ERR_PROT); + + WREG32(mmTPC0_QM_GLBL_CFG0 + reg_off, QMAN_TPC_ENABLE); +} + +static void goya_init_tpc_cmdq(struct hl_device *hdev, int tpc_id) +{ + u32 mtr_base_lo, mtr_base_hi; + u32 so_base_lo, so_base_hi; + u32 gic_base_lo, gic_base_hi; + u32 reg_off = tpc_id * (mmTPC1_CMDQ_CQ_CFG1 - mmTPC0_CMDQ_CQ_CFG1); + + mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0); + mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0); + so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0); + so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0); + + gic_base_lo = + lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR); + gic_base_hi = + upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR); + + WREG32(mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo); + WREG32(mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi); + WREG32(mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo); + WREG32(mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi); + + WREG32(mmTPC0_CMDQ_CQ_CFG1 + reg_off, 0x00140014); + + WREG32(mmTPC0_CMDQ_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo); + WREG32(mmTPC0_CMDQ_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi); + + WREG32(mmTPC0_CMDQ_GLBL_ERR_WDATA + reg_off, + GOYA_ASYNC_EVENT_ID_TPC0_CMDQ + tpc_id); + + WREG32(mmTPC0_CMDQ_GLBL_ERR_CFG + reg_off, CMDQ_TPC_ERR_MSG_EN); + + WREG32(mmTPC0_CMDQ_GLBL_PROT + reg_off, CMDQ_TPC_ERR_PROT); + + WREG32(mmTPC0_CMDQ_GLBL_CFG0 + reg_off, CMDQ_TPC_ENABLE); +} + +void goya_init_tpc_qmans(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + u32 so_base_lo, so_base_hi; + u32 cfg_off = mmTPC1_CFG_SM_BASE_ADDRESS_LOW - + mmTPC0_CFG_SM_BASE_ADDRESS_LOW; + int i; + + if (goya->hw_cap_initialized & HW_CAP_TPC) + return; + + so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0); + so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0); + + for (i = 0 ; i < TPC_MAX_NUM ; i++) { + WREG32(mmTPC0_CFG_SM_BASE_ADDRESS_LOW + i * cfg_off, + so_base_lo); + WREG32(mmTPC0_CFG_SM_BASE_ADDRESS_HIGH + i * cfg_off, + so_base_hi); + } + + goya_init_tpc_qman(hdev, TPC0_QMAN_BASE_OFFSET, 0); + goya_init_tpc_qman(hdev, TPC1_QMAN_BASE_OFFSET, 1); + goya_init_tpc_qman(hdev, TPC2_QMAN_BASE_OFFSET, 2); + goya_init_tpc_qman(hdev, TPC3_QMAN_BASE_OFFSET, 3); + goya_init_tpc_qman(hdev, TPC4_QMAN_BASE_OFFSET, 4); + goya_init_tpc_qman(hdev, TPC5_QMAN_BASE_OFFSET, 5); + goya_init_tpc_qman(hdev, TPC6_QMAN_BASE_OFFSET, 6); + goya_init_tpc_qman(hdev, TPC7_QMAN_BASE_OFFSET, 7); + + for (i = 0 ; i < TPC_MAX_NUM ; i++) + goya_init_tpc_cmdq(hdev, i); + + goya->hw_cap_initialized |= HW_CAP_TPC; +} + +/* + * goya_disable_internal_queues - Disable internal queues + * + * @hdev: pointer to hl_device structure + * + */ +static void goya_disable_internal_queues(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + + if (!(goya->hw_cap_initialized & HW_CAP_MME)) + goto disable_tpc; + + WREG32(mmMME_QM_GLBL_CFG0, 0); + WREG32(mmMME_CMDQ_GLBL_CFG0, 0); + +disable_tpc: + if (!(goya->hw_cap_initialized & HW_CAP_TPC)) + return; + + WREG32(mmTPC0_QM_GLBL_CFG0, 0); + WREG32(mmTPC0_CMDQ_GLBL_CFG0, 0); + + WREG32(mmTPC1_QM_GLBL_CFG0, 0); + WREG32(mmTPC1_CMDQ_GLBL_CFG0, 0); + + WREG32(mmTPC2_QM_GLBL_CFG0, 0); + WREG32(mmTPC2_CMDQ_GLBL_CFG0, 0); + + WREG32(mmTPC3_QM_GLBL_CFG0, 0); + WREG32(mmTPC3_CMDQ_GLBL_CFG0, 0); + + WREG32(mmTPC4_QM_GLBL_CFG0, 0); + WREG32(mmTPC4_CMDQ_GLBL_CFG0, 0); + + WREG32(mmTPC5_QM_GLBL_CFG0, 0); + WREG32(mmTPC5_CMDQ_GLBL_CFG0, 0); + + WREG32(mmTPC6_QM_GLBL_CFG0, 0); + WREG32(mmTPC6_CMDQ_GLBL_CFG0, 0); + + WREG32(mmTPC7_QM_GLBL_CFG0, 0); + WREG32(mmTPC7_CMDQ_GLBL_CFG0, 0); +} + +/* + * goya_stop_internal_queues - Stop internal queues + * + * @hdev: pointer to hl_device structure + * + * Returns 0 on success + * + */ +static int goya_stop_internal_queues(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + int rc, retval = 0; + + if (!(goya->hw_cap_initialized & HW_CAP_MME)) + goto stop_tpc; + + /* + * Each queue (QMAN) is a separate H/W logic. That means that each + * QMAN can be stopped independently and failure to stop one does NOT + * mandate we should not try to stop other QMANs + */ + + rc = goya_stop_queue(hdev, + mmMME_QM_GLBL_CFG1, + mmMME_QM_CP_STS, + mmMME_QM_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop MME QMAN\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmMME_CMDQ_GLBL_CFG1, + mmMME_CMDQ_CP_STS, + mmMME_CMDQ_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop MME CMDQ\n"); + retval = -EIO; + } + +stop_tpc: + if (!(goya->hw_cap_initialized & HW_CAP_TPC)) + return retval; + + rc = goya_stop_queue(hdev, + mmTPC0_QM_GLBL_CFG1, + mmTPC0_QM_CP_STS, + mmTPC0_QM_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 0 QMAN\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC0_CMDQ_GLBL_CFG1, + mmTPC0_CMDQ_CP_STS, + mmTPC0_CMDQ_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 0 CMDQ\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC1_QM_GLBL_CFG1, + mmTPC1_QM_CP_STS, + mmTPC1_QM_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 1 QMAN\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC1_CMDQ_GLBL_CFG1, + mmTPC1_CMDQ_CP_STS, + mmTPC1_CMDQ_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 1 CMDQ\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC2_QM_GLBL_CFG1, + mmTPC2_QM_CP_STS, + mmTPC2_QM_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 2 QMAN\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC2_CMDQ_GLBL_CFG1, + mmTPC2_CMDQ_CP_STS, + mmTPC2_CMDQ_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 2 CMDQ\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC3_QM_GLBL_CFG1, + mmTPC3_QM_CP_STS, + mmTPC3_QM_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 3 QMAN\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC3_CMDQ_GLBL_CFG1, + mmTPC3_CMDQ_CP_STS, + mmTPC3_CMDQ_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 3 CMDQ\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC4_QM_GLBL_CFG1, + mmTPC4_QM_CP_STS, + mmTPC4_QM_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 4 QMAN\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC4_CMDQ_GLBL_CFG1, + mmTPC4_CMDQ_CP_STS, + mmTPC4_CMDQ_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 4 CMDQ\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC5_QM_GLBL_CFG1, + mmTPC5_QM_CP_STS, + mmTPC5_QM_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 5 QMAN\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC5_CMDQ_GLBL_CFG1, + mmTPC5_CMDQ_CP_STS, + mmTPC5_CMDQ_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 5 CMDQ\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC6_QM_GLBL_CFG1, + mmTPC6_QM_CP_STS, + mmTPC6_QM_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 6 QMAN\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC6_CMDQ_GLBL_CFG1, + mmTPC6_CMDQ_CP_STS, + mmTPC6_CMDQ_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 6 CMDQ\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC7_QM_GLBL_CFG1, + mmTPC7_QM_CP_STS, + mmTPC7_QM_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 7 QMAN\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC7_CMDQ_GLBL_CFG1, + mmTPC7_CMDQ_CP_STS, + mmTPC7_CMDQ_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 7 CMDQ\n"); + retval = -EIO; + } + + return retval; +} + +static void goya_dma_stall(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + + if (!(goya->hw_cap_initialized & HW_CAP_DMA)) + return; + + WREG32(mmDMA_QM_0_GLBL_CFG1, 1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT); + WREG32(mmDMA_QM_1_GLBL_CFG1, 1 << DMA_QM_1_GLBL_CFG1_DMA_STOP_SHIFT); + WREG32(mmDMA_QM_2_GLBL_CFG1, 1 << DMA_QM_2_GLBL_CFG1_DMA_STOP_SHIFT); + WREG32(mmDMA_QM_3_GLBL_CFG1, 1 << DMA_QM_3_GLBL_CFG1_DMA_STOP_SHIFT); + WREG32(mmDMA_QM_4_GLBL_CFG1, 1 << DMA_QM_4_GLBL_CFG1_DMA_STOP_SHIFT); +} + +static void goya_tpc_stall(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + + if (!(goya->hw_cap_initialized & HW_CAP_TPC)) + return; + + WREG32(mmTPC0_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT); + WREG32(mmTPC1_CFG_TPC_STALL, 1 << TPC1_CFG_TPC_STALL_V_SHIFT); + WREG32(mmTPC2_CFG_TPC_STALL, 1 << TPC2_CFG_TPC_STALL_V_SHIFT); + WREG32(mmTPC3_CFG_TPC_STALL, 1 << TPC3_CFG_TPC_STALL_V_SHIFT); + WREG32(mmTPC4_CFG_TPC_STALL, 1 << TPC4_CFG_TPC_STALL_V_SHIFT); + WREG32(mmTPC5_CFG_TPC_STALL, 1 << TPC5_CFG_TPC_STALL_V_SHIFT); + WREG32(mmTPC6_CFG_TPC_STALL, 1 << TPC6_CFG_TPC_STALL_V_SHIFT); + WREG32(mmTPC7_CFG_TPC_STALL, 1 << TPC7_CFG_TPC_STALL_V_SHIFT); +} + +static void goya_mme_stall(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + + if (!(goya->hw_cap_initialized & HW_CAP_MME)) + return; + + WREG32(mmMME_STALL, 0xFFFFFFFF); +} + +static int goya_enable_msix(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + int cq_cnt = hdev->asic_prop.completion_queues_count; + int rc, i, irq_cnt_init, irq; + + if (goya->hw_cap_initialized & HW_CAP_MSIX) + return 0; + + rc = pci_alloc_irq_vectors(hdev->pdev, GOYA_MSIX_ENTRIES, + GOYA_MSIX_ENTRIES, PCI_IRQ_MSIX); + if (rc < 0) { + dev_err(hdev->dev, + "MSI-X: Failed to enable support -- %d/%d\n", + GOYA_MSIX_ENTRIES, rc); + return rc; + } + + for (i = 0, irq_cnt_init = 0 ; i < cq_cnt ; i++, irq_cnt_init++) { + irq = pci_irq_vector(hdev->pdev, i); + rc = request_irq(irq, hl_irq_handler_cq, 0, goya_irq_name[i], + &hdev->completion_queue[i]); + if (rc) { + dev_err(hdev->dev, "Failed to request IRQ %d", irq); + goto free_irqs; + } + } + + irq = pci_irq_vector(hdev->pdev, GOYA_EVENT_QUEUE_MSIX_IDX); + + rc = request_irq(irq, hl_irq_handler_eq, 0, + goya_irq_name[GOYA_EVENT_QUEUE_MSIX_IDX], + &hdev->event_queue); + if (rc) { + dev_err(hdev->dev, "Failed to request IRQ %d", irq); + goto free_irqs; + } + + goya->hw_cap_initialized |= HW_CAP_MSIX; + return 0; + +free_irqs: + for (i = 0 ; i < irq_cnt_init ; i++) + free_irq(pci_irq_vector(hdev->pdev, i), + &hdev->completion_queue[i]); + + pci_free_irq_vectors(hdev->pdev); + return rc; +} + +static void goya_sync_irqs(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + int i; + + if (!(goya->hw_cap_initialized & HW_CAP_MSIX)) + return; + + /* Wait for all pending IRQs to be finished */ + for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) + synchronize_irq(pci_irq_vector(hdev->pdev, i)); + + synchronize_irq(pci_irq_vector(hdev->pdev, GOYA_EVENT_QUEUE_MSIX_IDX)); +} + +static void goya_disable_msix(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + int i, irq; + + if (!(goya->hw_cap_initialized & HW_CAP_MSIX)) + return; + + goya_sync_irqs(hdev); + + irq = pci_irq_vector(hdev->pdev, GOYA_EVENT_QUEUE_MSIX_IDX); + free_irq(irq, &hdev->event_queue); + + for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) { + irq = pci_irq_vector(hdev->pdev, i); + free_irq(irq, &hdev->completion_queue[i]); + } + + pci_free_irq_vectors(hdev->pdev); + + goya->hw_cap_initialized &= ~HW_CAP_MSIX; +} + +static void goya_enable_timestamp(struct hl_device *hdev) +{ + /* Disable the timestamp counter */ + WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0); + + /* Zero the lower/upper parts of the 64-bit counter */ + WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0xC, 0); + WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0x8, 0); + + /* Enable the counter */ + WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 1); +} + +static void goya_disable_timestamp(struct hl_device *hdev) +{ + /* Disable the timestamp counter */ + WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0); +} + +static void goya_halt_engines(struct hl_device *hdev, bool hard_reset) +{ + u32 wait_timeout_ms; + + dev_info(hdev->dev, + "Halting compute engines and disabling interrupts\n"); + + if (hdev->pldm) + wait_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC; + else + wait_timeout_ms = GOYA_RESET_WAIT_MSEC; + + goya_stop_external_queues(hdev); + goya_stop_internal_queues(hdev); + + msleep(wait_timeout_ms); + + goya_dma_stall(hdev); + goya_tpc_stall(hdev); + goya_mme_stall(hdev); + + msleep(wait_timeout_ms); + + goya_disable_external_queues(hdev); + goya_disable_internal_queues(hdev); + + goya_disable_timestamp(hdev); + + if (hard_reset) { + goya_disable_msix(hdev); + goya_mmu_remove_device_cpu_mappings(hdev); + } else { + goya_sync_irqs(hdev); + } +} + +/* + * goya_load_firmware_to_device() - Load LINUX FW code to device. + * @hdev: Pointer to hl_device structure. + * + * Copy LINUX fw code from firmware file to HBM BAR. + * + * Return: 0 on success, non-zero for failure. + */ +static int goya_load_firmware_to_device(struct hl_device *hdev) +{ + void __iomem *dst; + + dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET; + + return hl_fw_load_fw_to_device(hdev, GOYA_LINUX_FW_FILE, dst); +} + +/* + * goya_load_boot_fit_to_device() - Load boot fit to device. + * @hdev: Pointer to hl_device structure. + * + * Copy boot fit file to SRAM BAR. + * + * Return: 0 on success, non-zero for failure. + */ +static int goya_load_boot_fit_to_device(struct hl_device *hdev) +{ + void __iomem *dst; + + dst = hdev->pcie_bar[SRAM_CFG_BAR_ID] + BOOT_FIT_SRAM_OFFSET; + + return hl_fw_load_fw_to_device(hdev, GOYA_BOOT_FIT_FILE, dst); +} + +/* + * FW component passes an offset from SRAM_BASE_ADDR in SCRATCHPAD_xx. + * The version string should be located by that offset. + */ +static void goya_read_device_fw_version(struct hl_device *hdev, + enum hl_fw_component fwc) +{ + const char *name; + u32 ver_off; + char *dest; + + switch (fwc) { + case FW_COMP_UBOOT: + ver_off = RREG32(mmUBOOT_VER_OFFSET); + dest = hdev->asic_prop.uboot_ver; + name = "U-Boot"; + break; + case FW_COMP_PREBOOT: + ver_off = RREG32(mmPREBOOT_VER_OFFSET); + dest = hdev->asic_prop.preboot_ver; + name = "Preboot"; + break; + default: + dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc); + return; + } + + ver_off &= ~((u32)SRAM_BASE_ADDR); + + if (ver_off < SRAM_SIZE - VERSION_MAX_LEN) { + memcpy_fromio(dest, hdev->pcie_bar[SRAM_CFG_BAR_ID] + ver_off, + VERSION_MAX_LEN); + } else { + dev_err(hdev->dev, "%s version offset (0x%x) is above SRAM\n", + name, ver_off); + strcpy(dest, "unavailable"); + } +} + +static int goya_init_cpu(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + int rc; + + if (!hdev->cpu_enable) + return 0; + + if (goya->hw_cap_initialized & HW_CAP_CPU) + return 0; + + /* + * Before pushing u-boot/linux to device, need to set the ddr bar to + * base address of dram + */ + if (goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE) == U64_MAX) { + dev_err(hdev->dev, + "failed to map DDR bar to DRAM base address\n"); + return -EIO; + } + + rc = hl_fw_init_cpu(hdev, mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS, + mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, + mmCPU_CMD_STATUS_TO_HOST, mmCPU_BOOT_ERR0, + false, GOYA_CPU_TIMEOUT_USEC, + GOYA_BOOT_FIT_REQ_TIMEOUT_USEC); + + if (rc) + return rc; + + goya->hw_cap_initialized |= HW_CAP_CPU; + + return 0; +} + +static int goya_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid, + u64 phys_addr) +{ + u32 status, timeout_usec; + int rc; + + if (hdev->pldm) + timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC; + else + timeout_usec = MMU_CONFIG_TIMEOUT_USEC; + + WREG32(MMU_HOP0_PA43_12, phys_addr >> MMU_HOP0_PA43_12_SHIFT); + WREG32(MMU_HOP0_PA49_44, phys_addr >> MMU_HOP0_PA49_44_SHIFT); + WREG32(MMU_ASID_BUSY, 0x80000000 | asid); + + rc = hl_poll_timeout( + hdev, + MMU_ASID_BUSY, + status, + !(status & 0x80000000), + 1000, + timeout_usec); + + if (rc) { + dev_err(hdev->dev, + "Timeout during MMU hop0 config of asid %d\n", asid); + return rc; + } + + return 0; +} + +int goya_mmu_init(struct hl_device *hdev) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct goya_device *goya = hdev->asic_specific; + u64 hop0_addr; + int rc, i; + + if (!hdev->mmu_enable) + return 0; + + if (goya->hw_cap_initialized & HW_CAP_MMU) + return 0; + + hdev->dram_supports_virtual_memory = true; + hdev->dram_default_page_mapping = true; + + for (i = 0 ; i < prop->max_asid ; i++) { + hop0_addr = prop->mmu_pgt_addr + + (i * prop->mmu_hop_table_size); + + rc = goya_mmu_update_asid_hop0_addr(hdev, i, hop0_addr); + if (rc) { + dev_err(hdev->dev, + "failed to set hop0 addr for asid %d\n", i); + goto err; + } + } + + goya->hw_cap_initialized |= HW_CAP_MMU; + + /* init MMU cache manage page */ + WREG32(mmSTLB_CACHE_INV_BASE_39_8, + lower_32_bits(MMU_CACHE_MNG_ADDR >> 8)); + WREG32(mmSTLB_CACHE_INV_BASE_49_40, MMU_CACHE_MNG_ADDR >> 40); + + /* Remove follower feature due to performance bug */ + WREG32_AND(mmSTLB_STLB_FEATURE_EN, + (~STLB_STLB_FEATURE_EN_FOLLOWER_EN_MASK)); + + hdev->asic_funcs->mmu_invalidate_cache(hdev, true, + VM_TYPE_USERPTR | VM_TYPE_PHYS_PACK); + + WREG32(mmMMU_MMU_ENABLE, 1); + WREG32(mmMMU_SPI_MASK, 0xF); + + return 0; + +err: + return rc; +} + +/* + * goya_hw_init - Goya hardware initialization code + * + * @hdev: pointer to hl_device structure + * + * Returns 0 on success + * + */ +static int goya_hw_init(struct hl_device *hdev) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + int rc; + + dev_info(hdev->dev, "Starting initialization of H/W\n"); + + /* Perform read from the device to make sure device is up */ + RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG); + + /* + * Let's mark in the H/W that we have reached this point. We check + * this value in the reset_before_init function to understand whether + * we need to reset the chip before doing H/W init. This register is + * cleared by the H/W upon H/W reset + */ + WREG32(mmHW_STATE, HL_DEVICE_HW_STATE_DIRTY); + + rc = goya_init_cpu(hdev); + if (rc) { + dev_err(hdev->dev, "failed to initialize CPU\n"); + return rc; + } + + goya_tpc_mbist_workaround(hdev); + + goya_init_golden_registers(hdev); + + /* + * After CPU initialization is finished, change DDR bar mapping inside + * iATU to point to the start address of the MMU page tables + */ + if (goya_set_ddr_bar_base(hdev, (MMU_PAGE_TABLES_ADDR & + ~(prop->dram_pci_bar_size - 0x1ull))) == U64_MAX) { + dev_err(hdev->dev, + "failed to map DDR bar to MMU page tables\n"); + return -EIO; + } + + rc = goya_mmu_init(hdev); + if (rc) + return rc; + + goya_init_security(hdev); + + goya_init_dma_qmans(hdev); + + goya_init_mme_qmans(hdev); + + goya_init_tpc_qmans(hdev); + + goya_enable_timestamp(hdev); + + /* MSI-X must be enabled before CPU queues are initialized */ + rc = goya_enable_msix(hdev); + if (rc) + goto disable_queues; + + /* Perform read from the device to flush all MSI-X configuration */ + RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG); + + return 0; + +disable_queues: + goya_disable_internal_queues(hdev); + goya_disable_external_queues(hdev); + + return rc; +} + +/* + * goya_hw_fini - Goya hardware tear-down code + * + * @hdev: pointer to hl_device structure + * @hard_reset: should we do hard reset to all engines or just reset the + * compute/dma engines + */ +static void goya_hw_fini(struct hl_device *hdev, bool hard_reset) +{ + struct goya_device *goya = hdev->asic_specific; + u32 reset_timeout_ms, cpu_timeout_ms, status; + + if (hdev->pldm) { + reset_timeout_ms = GOYA_PLDM_RESET_TIMEOUT_MSEC; + cpu_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC; + } else { + reset_timeout_ms = GOYA_RESET_TIMEOUT_MSEC; + cpu_timeout_ms = GOYA_CPU_RESET_WAIT_MSEC; + } + + if (hard_reset) { + /* I don't know what is the state of the CPU so make sure it is + * stopped in any means necessary + */ + WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_GOTO_WFE); + WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, + GOYA_ASYNC_EVENT_ID_HALT_MACHINE); + + msleep(cpu_timeout_ms); + + goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE); + goya_disable_clk_rlx(hdev); + goya_set_pll_refclk(hdev); + + WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG, RESET_ALL); + dev_info(hdev->dev, + "Issued HARD reset command, going to wait %dms\n", + reset_timeout_ms); + } else { + WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG, DMA_MME_TPC_RESET); + dev_info(hdev->dev, + "Issued SOFT reset command, going to wait %dms\n", + reset_timeout_ms); + } + + /* + * After hard reset, we can't poll the BTM_FSM register because the PSOC + * itself is in reset. In either reset we need to wait until the reset + * is deasserted + */ + msleep(reset_timeout_ms); + + status = RREG32(mmPSOC_GLOBAL_CONF_BTM_FSM); + if (status & PSOC_GLOBAL_CONF_BTM_FSM_STATE_MASK) + dev_err(hdev->dev, + "Timeout while waiting for device to reset 0x%x\n", + status); + + if (!hard_reset) { + goya->hw_cap_initialized &= ~(HW_CAP_DMA | HW_CAP_MME | + HW_CAP_GOLDEN | HW_CAP_TPC); + WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, + GOYA_ASYNC_EVENT_ID_SOFT_RESET); + return; + } + + /* Chicken bit to re-initiate boot sequencer flow */ + WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START, + 1 << PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_IND_SHIFT); + /* Move boot manager FSM to pre boot sequencer init state */ + WREG32(mmPSOC_GLOBAL_CONF_SW_BTM_FSM, + 0xA << PSOC_GLOBAL_CONF_SW_BTM_FSM_CTRL_SHIFT); + + goya->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q | + HW_CAP_DDR_0 | HW_CAP_DDR_1 | + HW_CAP_DMA | HW_CAP_MME | + HW_CAP_MMU | HW_CAP_TPC_MBIST | + HW_CAP_GOLDEN | HW_CAP_TPC); + memset(goya->events_stat, 0, sizeof(goya->events_stat)); +} + +int goya_suspend(struct hl_device *hdev) +{ + int rc; + + rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS); + if (rc) + dev_err(hdev->dev, "Failed to disable PCI access from CPU\n"); + + return rc; +} + +int goya_resume(struct hl_device *hdev) +{ + return goya_init_iatu(hdev); +} + +static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size) +{ + int rc; + + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | + VM_DONTCOPY | VM_NORESERVE; + + rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr, + (dma_addr - HOST_PHYS_BASE), size); + if (rc) + dev_err(hdev->dev, "dma_mmap_coherent error %d", rc); + + return rc; +} + +void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi) +{ + u32 db_reg_offset, db_value; + + switch (hw_queue_id) { + case GOYA_QUEUE_ID_DMA_0: + db_reg_offset = mmDMA_QM_0_PQ_PI; + break; + + case GOYA_QUEUE_ID_DMA_1: + db_reg_offset = mmDMA_QM_1_PQ_PI; + break; + + case GOYA_QUEUE_ID_DMA_2: + db_reg_offset = mmDMA_QM_2_PQ_PI; + break; + + case GOYA_QUEUE_ID_DMA_3: + db_reg_offset = mmDMA_QM_3_PQ_PI; + break; + + case GOYA_QUEUE_ID_DMA_4: + db_reg_offset = mmDMA_QM_4_PQ_PI; + break; + + case GOYA_QUEUE_ID_CPU_PQ: + db_reg_offset = mmCPU_IF_PF_PQ_PI; + break; + + case GOYA_QUEUE_ID_MME: + db_reg_offset = mmMME_QM_PQ_PI; + break; + + case GOYA_QUEUE_ID_TPC0: + db_reg_offset = mmTPC0_QM_PQ_PI; + break; + + case GOYA_QUEUE_ID_TPC1: + db_reg_offset = mmTPC1_QM_PQ_PI; + break; + + case GOYA_QUEUE_ID_TPC2: + db_reg_offset = mmTPC2_QM_PQ_PI; + break; + + case GOYA_QUEUE_ID_TPC3: + db_reg_offset = mmTPC3_QM_PQ_PI; + break; + + case GOYA_QUEUE_ID_TPC4: + db_reg_offset = mmTPC4_QM_PQ_PI; + break; + + case GOYA_QUEUE_ID_TPC5: + db_reg_offset = mmTPC5_QM_PQ_PI; + break; + + case GOYA_QUEUE_ID_TPC6: + db_reg_offset = mmTPC6_QM_PQ_PI; + break; + + case GOYA_QUEUE_ID_TPC7: + db_reg_offset = mmTPC7_QM_PQ_PI; + break; + + default: + /* Should never get here */ + dev_err(hdev->dev, "H/W queue %d is invalid. Can't set pi\n", + hw_queue_id); + return; + } + + db_value = pi; + + /* ring the doorbell */ + WREG32(db_reg_offset, db_value); + + if (hw_queue_id == GOYA_QUEUE_ID_CPU_PQ) + WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, + GOYA_ASYNC_EVENT_ID_PI_UPDATE); +} + +void goya_pqe_write(struct hl_device *hdev, __le64 *pqe, struct hl_bd *bd) +{ + /* The QMANs are on the SRAM so need to copy to IO space */ + memcpy_toio((void __iomem *) pqe, bd, sizeof(struct hl_bd)); +} + +static void *goya_dma_alloc_coherent(struct hl_device *hdev, size_t size, + dma_addr_t *dma_handle, gfp_t flags) +{ + void *kernel_addr = dma_alloc_coherent(&hdev->pdev->dev, size, + dma_handle, flags); + + /* Shift to the device's base physical address of host memory */ + if (kernel_addr) + *dma_handle += HOST_PHYS_BASE; + + return kernel_addr; +} + +static void goya_dma_free_coherent(struct hl_device *hdev, size_t size, + void *cpu_addr, dma_addr_t dma_handle) +{ + /* Cancel the device's base physical address of host memory */ + dma_addr_t fixed_dma_handle = dma_handle - HOST_PHYS_BASE; + + dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, fixed_dma_handle); +} + +void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id, + dma_addr_t *dma_handle, u16 *queue_len) +{ + void *base; + u32 offset; + + *dma_handle = hdev->asic_prop.sram_base_address; + + base = (void *) hdev->pcie_bar[SRAM_CFG_BAR_ID]; + + switch (queue_id) { + case GOYA_QUEUE_ID_MME: + offset = MME_QMAN_BASE_OFFSET; + *queue_len = MME_QMAN_LENGTH; + break; + case GOYA_QUEUE_ID_TPC0: + offset = TPC0_QMAN_BASE_OFFSET; + *queue_len = TPC_QMAN_LENGTH; + break; + case GOYA_QUEUE_ID_TPC1: + offset = TPC1_QMAN_BASE_OFFSET; + *queue_len = TPC_QMAN_LENGTH; + break; + case GOYA_QUEUE_ID_TPC2: + offset = TPC2_QMAN_BASE_OFFSET; + *queue_len = TPC_QMAN_LENGTH; + break; + case GOYA_QUEUE_ID_TPC3: + offset = TPC3_QMAN_BASE_OFFSET; + *queue_len = TPC_QMAN_LENGTH; + break; + case GOYA_QUEUE_ID_TPC4: + offset = TPC4_QMAN_BASE_OFFSET; + *queue_len = TPC_QMAN_LENGTH; + break; + case GOYA_QUEUE_ID_TPC5: + offset = TPC5_QMAN_BASE_OFFSET; + *queue_len = TPC_QMAN_LENGTH; + break; + case GOYA_QUEUE_ID_TPC6: + offset = TPC6_QMAN_BASE_OFFSET; + *queue_len = TPC_QMAN_LENGTH; + break; + case GOYA_QUEUE_ID_TPC7: + offset = TPC7_QMAN_BASE_OFFSET; + *queue_len = TPC_QMAN_LENGTH; + break; + default: + dev_err(hdev->dev, "Got invalid queue id %d\n", queue_id); + return NULL; + } + + base += offset; + *dma_handle += offset; + + return base; +} + +static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job) +{ + struct packet_msg_prot *fence_pkt; + u32 *fence_ptr; + dma_addr_t fence_dma_addr; + struct hl_cb *cb; + u32 tmp, timeout; + int rc; + + if (hdev->pldm) + timeout = GOYA_PLDM_QMAN0_TIMEOUT_USEC; + else + timeout = HL_DEVICE_TIMEOUT_USEC; + + if (!hdev->asic_funcs->is_device_idle(hdev, NULL, NULL)) { + dev_err_ratelimited(hdev->dev, + "Can't send driver job on QMAN0 because the device is not idle\n"); + return -EBUSY; + } + + fence_ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL, + &fence_dma_addr); + if (!fence_ptr) { + dev_err(hdev->dev, + "Failed to allocate fence memory for QMAN0\n"); + return -ENOMEM; + } + + goya_qman0_set_security(hdev, true); + + cb = job->patched_cb; + + fence_pkt = cb->kernel_address + + job->job_cb_size - sizeof(struct packet_msg_prot); + + tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) | + (1 << GOYA_PKT_CTL_EB_SHIFT) | + (1 << GOYA_PKT_CTL_MB_SHIFT); + fence_pkt->ctl = cpu_to_le32(tmp); + fence_pkt->value = cpu_to_le32(GOYA_QMAN0_FENCE_VAL); + fence_pkt->addr = cpu_to_le64(fence_dma_addr); + + rc = hl_hw_queue_send_cb_no_cmpl(hdev, GOYA_QUEUE_ID_DMA_0, + job->job_cb_size, cb->bus_address); + if (rc) { + dev_err(hdev->dev, "Failed to send CB on QMAN0, %d\n", rc); + goto free_fence_ptr; + } + + rc = hl_poll_timeout_memory(hdev, fence_ptr, tmp, + (tmp == GOYA_QMAN0_FENCE_VAL), 1000, + timeout, true); + + hl_hw_queue_inc_ci_kernel(hdev, GOYA_QUEUE_ID_DMA_0); + + if (rc == -ETIMEDOUT) { + dev_err(hdev->dev, "QMAN0 Job timeout (0x%x)\n", tmp); + goto free_fence_ptr; + } + +free_fence_ptr: + hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_ptr, + fence_dma_addr); + + goya_qman0_set_security(hdev, false); + + return rc; +} + +int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len, + u32 timeout, long *result) +{ + struct goya_device *goya = hdev->asic_specific; + + if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q)) { + if (result) + *result = 0; + return 0; + } + + if (!timeout) + timeout = GOYA_MSG_TO_CPU_TIMEOUT_USEC; + + return hl_fw_send_cpu_message(hdev, GOYA_QUEUE_ID_CPU_PQ, msg, len, + timeout, result); +} + +int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id) +{ + struct packet_msg_prot *fence_pkt; + dma_addr_t pkt_dma_addr; + u32 fence_val, tmp; + dma_addr_t fence_dma_addr; + u32 *fence_ptr; + int rc; + + fence_val = GOYA_QMAN0_FENCE_VAL; + + fence_ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL, + &fence_dma_addr); + if (!fence_ptr) { + dev_err(hdev->dev, + "Failed to allocate memory for H/W queue %d testing\n", + hw_queue_id); + return -ENOMEM; + } + + *fence_ptr = 0; + + fence_pkt = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, + sizeof(struct packet_msg_prot), + GFP_KERNEL, &pkt_dma_addr); + if (!fence_pkt) { + dev_err(hdev->dev, + "Failed to allocate packet for H/W queue %d testing\n", + hw_queue_id); + rc = -ENOMEM; + goto free_fence_ptr; + } + + tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) | + (1 << GOYA_PKT_CTL_EB_SHIFT) | + (1 << GOYA_PKT_CTL_MB_SHIFT); + fence_pkt->ctl = cpu_to_le32(tmp); + fence_pkt->value = cpu_to_le32(fence_val); + fence_pkt->addr = cpu_to_le64(fence_dma_addr); + + rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id, + sizeof(struct packet_msg_prot), + pkt_dma_addr); + if (rc) { + dev_err(hdev->dev, + "Failed to send fence packet to H/W queue %d\n", + hw_queue_id); + goto free_pkt; + } + + rc = hl_poll_timeout_memory(hdev, fence_ptr, tmp, (tmp == fence_val), + 1000, GOYA_TEST_QUEUE_WAIT_USEC, true); + + hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id); + + if (rc == -ETIMEDOUT) { + dev_err(hdev->dev, + "H/W queue %d test failed (scratch(0x%08llX) == 0x%08X)\n", + hw_queue_id, (unsigned long long) fence_dma_addr, tmp); + rc = -EIO; + } + +free_pkt: + hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_pkt, + pkt_dma_addr); +free_fence_ptr: + hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_ptr, + fence_dma_addr); + return rc; +} + +int goya_test_cpu_queue(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + + /* + * check capability here as send_cpu_message() won't update the result + * value if no capability + */ + if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q)) + return 0; + + return hl_fw_test_cpu_queue(hdev); +} + +int goya_test_queues(struct hl_device *hdev) +{ + int i, rc, ret_val = 0; + + for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) { + rc = goya_test_queue(hdev, i); + if (rc) + ret_val = -EINVAL; + } + + return ret_val; +} + +static void *goya_dma_pool_zalloc(struct hl_device *hdev, size_t size, + gfp_t mem_flags, dma_addr_t *dma_handle) +{ + void *kernel_addr; + + if (size > GOYA_DMA_POOL_BLK_SIZE) + return NULL; + + kernel_addr = dma_pool_zalloc(hdev->dma_pool, mem_flags, dma_handle); + + /* Shift to the device's base physical address of host memory */ + if (kernel_addr) + *dma_handle += HOST_PHYS_BASE; + + return kernel_addr; +} + +static void goya_dma_pool_free(struct hl_device *hdev, void *vaddr, + dma_addr_t dma_addr) +{ + /* Cancel the device's base physical address of host memory */ + dma_addr_t fixed_dma_addr = dma_addr - HOST_PHYS_BASE; + + dma_pool_free(hdev->dma_pool, vaddr, fixed_dma_addr); +} + +void *goya_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size, + dma_addr_t *dma_handle) +{ + void *vaddr; + + vaddr = hl_fw_cpu_accessible_dma_pool_alloc(hdev, size, dma_handle); + *dma_handle = (*dma_handle) - hdev->cpu_accessible_dma_address + + VA_CPU_ACCESSIBLE_MEM_ADDR; + + return vaddr; +} + +void goya_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size, + void *vaddr) +{ + hl_fw_cpu_accessible_dma_pool_free(hdev, size, vaddr); +} + +static int goya_dma_map_sg(struct hl_device *hdev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir) +{ + struct scatterlist *sg; + int i; + + if (!dma_map_sg(&hdev->pdev->dev, sgl, nents, dir)) + return -ENOMEM; + + /* Shift to the device's base physical address of host memory */ + for_each_sg(sgl, sg, nents, i) + sg->dma_address += HOST_PHYS_BASE; + + return 0; +} + +static void goya_dma_unmap_sg(struct hl_device *hdev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir) +{ + struct scatterlist *sg; + int i; + + /* Cancel the device's base physical address of host memory */ + for_each_sg(sgl, sg, nents, i) + sg->dma_address -= HOST_PHYS_BASE; + + dma_unmap_sg(&hdev->pdev->dev, sgl, nents, dir); +} + +u32 goya_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt) +{ + struct scatterlist *sg, *sg_next_iter; + u32 count, dma_desc_cnt; + u64 len, len_next; + dma_addr_t addr, addr_next; + + dma_desc_cnt = 0; + + for_each_sg(sgt->sgl, sg, sgt->nents, count) { + + len = sg_dma_len(sg); + addr = sg_dma_address(sg); + + if (len == 0) + break; + + while ((count + 1) < sgt->nents) { + sg_next_iter = sg_next(sg); + len_next = sg_dma_len(sg_next_iter); + addr_next = sg_dma_address(sg_next_iter); + + if (len_next == 0) + break; + + if ((addr + len == addr_next) && + (len + len_next <= DMA_MAX_TRANSFER_SIZE)) { + len += len_next; + count++; + sg = sg_next_iter; + } else { + break; + } + } + + dma_desc_cnt++; + } + + return dma_desc_cnt * sizeof(struct packet_lin_dma); +} + +static int goya_pin_memory_before_cs(struct hl_device *hdev, + struct hl_cs_parser *parser, + struct packet_lin_dma *user_dma_pkt, + u64 addr, enum dma_data_direction dir) +{ + struct hl_userptr *userptr; + int rc; + + if (hl_userptr_is_pinned(hdev, addr, le32_to_cpu(user_dma_pkt->tsize), + parser->job_userptr_list, &userptr)) + goto already_pinned; + + userptr = kzalloc(sizeof(*userptr), GFP_ATOMIC); + if (!userptr) + return -ENOMEM; + + rc = hl_pin_host_memory(hdev, addr, le32_to_cpu(user_dma_pkt->tsize), + userptr); + if (rc) + goto free_userptr; + + list_add_tail(&userptr->job_node, parser->job_userptr_list); + + rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl, + userptr->sgt->nents, dir); + if (rc) { + dev_err(hdev->dev, "failed to map sgt with DMA region\n"); + goto unpin_memory; + } + + userptr->dma_mapped = true; + userptr->dir = dir; + +already_pinned: + parser->patched_cb_size += + goya_get_dma_desc_list_size(hdev, userptr->sgt); + + return 0; + +unpin_memory: + list_del(&userptr->job_node); + hl_unpin_host_memory(hdev, userptr); +free_userptr: + kfree(userptr); + return rc; +} + +static int goya_validate_dma_pkt_host(struct hl_device *hdev, + struct hl_cs_parser *parser, + struct packet_lin_dma *user_dma_pkt) +{ + u64 device_memory_addr, addr; + enum dma_data_direction dir; + enum goya_dma_direction user_dir; + bool sram_addr = true; + bool skip_host_mem_pin = false; + bool user_memset; + u32 ctl; + int rc = 0; + + ctl = le32_to_cpu(user_dma_pkt->ctl); + + user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >> + GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT; + + user_memset = (ctl & GOYA_PKT_LIN_DMA_CTL_MEMSET_MASK) >> + GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT; + + switch (user_dir) { + case DMA_HOST_TO_DRAM: + dev_dbg(hdev->dev, "DMA direction is HOST --> DRAM\n"); + dir = DMA_TO_DEVICE; + sram_addr = false; + addr = le64_to_cpu(user_dma_pkt->src_addr); + device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr); + if (user_memset) + skip_host_mem_pin = true; + break; + + case DMA_DRAM_TO_HOST: + dev_dbg(hdev->dev, "DMA direction is DRAM --> HOST\n"); + dir = DMA_FROM_DEVICE; + sram_addr = false; + addr = le64_to_cpu(user_dma_pkt->dst_addr); + device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr); + break; + + case DMA_HOST_TO_SRAM: + dev_dbg(hdev->dev, "DMA direction is HOST --> SRAM\n"); + dir = DMA_TO_DEVICE; + addr = le64_to_cpu(user_dma_pkt->src_addr); + device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr); + if (user_memset) + skip_host_mem_pin = true; + break; + + case DMA_SRAM_TO_HOST: + dev_dbg(hdev->dev, "DMA direction is SRAM --> HOST\n"); + dir = DMA_FROM_DEVICE; + addr = le64_to_cpu(user_dma_pkt->dst_addr); + device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr); + break; + default: + dev_err(hdev->dev, "DMA direction is undefined\n"); + return -EFAULT; + } + + if (sram_addr) { + if (!hl_mem_area_inside_range(device_memory_addr, + le32_to_cpu(user_dma_pkt->tsize), + hdev->asic_prop.sram_user_base_address, + hdev->asic_prop.sram_end_address)) { + + dev_err(hdev->dev, + "SRAM address 0x%llx + 0x%x is invalid\n", + device_memory_addr, + user_dma_pkt->tsize); + return -EFAULT; + } + } else { + if (!hl_mem_area_inside_range(device_memory_addr, + le32_to_cpu(user_dma_pkt->tsize), + hdev->asic_prop.dram_user_base_address, + hdev->asic_prop.dram_end_address)) { + + dev_err(hdev->dev, + "DRAM address 0x%llx + 0x%x is invalid\n", + device_memory_addr, + user_dma_pkt->tsize); + return -EFAULT; + } + } + + if (skip_host_mem_pin) + parser->patched_cb_size += sizeof(*user_dma_pkt); + else { + if ((dir == DMA_TO_DEVICE) && + (parser->hw_queue_id > GOYA_QUEUE_ID_DMA_1)) { + dev_err(hdev->dev, + "Can't DMA from host on queue other then 1\n"); + return -EFAULT; + } + + rc = goya_pin_memory_before_cs(hdev, parser, user_dma_pkt, + addr, dir); + } + + return rc; +} + +static int goya_validate_dma_pkt_no_host(struct hl_device *hdev, + struct hl_cs_parser *parser, + struct packet_lin_dma *user_dma_pkt) +{ + u64 sram_memory_addr, dram_memory_addr; + enum goya_dma_direction user_dir; + u32 ctl; + + ctl = le32_to_cpu(user_dma_pkt->ctl); + user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >> + GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT; + + if (user_dir == DMA_DRAM_TO_SRAM) { + dev_dbg(hdev->dev, "DMA direction is DRAM --> SRAM\n"); + dram_memory_addr = le64_to_cpu(user_dma_pkt->src_addr); + sram_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr); + } else { + dev_dbg(hdev->dev, "DMA direction is SRAM --> DRAM\n"); + sram_memory_addr = le64_to_cpu(user_dma_pkt->src_addr); + dram_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr); + } + + if (!hl_mem_area_inside_range(sram_memory_addr, + le32_to_cpu(user_dma_pkt->tsize), + hdev->asic_prop.sram_user_base_address, + hdev->asic_prop.sram_end_address)) { + dev_err(hdev->dev, "SRAM address 0x%llx + 0x%x is invalid\n", + sram_memory_addr, user_dma_pkt->tsize); + return -EFAULT; + } + + if (!hl_mem_area_inside_range(dram_memory_addr, + le32_to_cpu(user_dma_pkt->tsize), + hdev->asic_prop.dram_user_base_address, + hdev->asic_prop.dram_end_address)) { + dev_err(hdev->dev, "DRAM address 0x%llx + 0x%x is invalid\n", + dram_memory_addr, user_dma_pkt->tsize); + return -EFAULT; + } + + parser->patched_cb_size += sizeof(*user_dma_pkt); + + return 0; +} + +static int goya_validate_dma_pkt_no_mmu(struct hl_device *hdev, + struct hl_cs_parser *parser, + struct packet_lin_dma *user_dma_pkt) +{ + enum goya_dma_direction user_dir; + u32 ctl; + int rc; + + dev_dbg(hdev->dev, "DMA packet details:\n"); + dev_dbg(hdev->dev, "source == 0x%llx\n", + le64_to_cpu(user_dma_pkt->src_addr)); + dev_dbg(hdev->dev, "destination == 0x%llx\n", + le64_to_cpu(user_dma_pkt->dst_addr)); + dev_dbg(hdev->dev, "size == %u\n", le32_to_cpu(user_dma_pkt->tsize)); + + ctl = le32_to_cpu(user_dma_pkt->ctl); + user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >> + GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT; + + /* + * Special handling for DMA with size 0. The H/W has a bug where + * this can cause the QMAN DMA to get stuck, so block it here. + */ + if (user_dma_pkt->tsize == 0) { + dev_err(hdev->dev, + "Got DMA with size 0, might reset the device\n"); + return -EINVAL; + } + + if ((user_dir == DMA_DRAM_TO_SRAM) || (user_dir == DMA_SRAM_TO_DRAM)) + rc = goya_validate_dma_pkt_no_host(hdev, parser, user_dma_pkt); + else + rc = goya_validate_dma_pkt_host(hdev, parser, user_dma_pkt); + + return rc; +} + +static int goya_validate_dma_pkt_mmu(struct hl_device *hdev, + struct hl_cs_parser *parser, + struct packet_lin_dma *user_dma_pkt) +{ + dev_dbg(hdev->dev, "DMA packet details:\n"); + dev_dbg(hdev->dev, "source == 0x%llx\n", + le64_to_cpu(user_dma_pkt->src_addr)); + dev_dbg(hdev->dev, "destination == 0x%llx\n", + le64_to_cpu(user_dma_pkt->dst_addr)); + dev_dbg(hdev->dev, "size == %u\n", le32_to_cpu(user_dma_pkt->tsize)); + + /* + * WA for HW-23. + * We can't allow user to read from Host using QMANs other than 1. + * PMMU and HPMMU addresses are equal, check only one of them. + */ + if (parser->hw_queue_id != GOYA_QUEUE_ID_DMA_1 && + hl_mem_area_inside_range(le64_to_cpu(user_dma_pkt->src_addr), + le32_to_cpu(user_dma_pkt->tsize), + hdev->asic_prop.pmmu.start_addr, + hdev->asic_prop.pmmu.end_addr)) { + dev_err(hdev->dev, + "Can't DMA from host on queue other then 1\n"); + return -EFAULT; + } + + if (user_dma_pkt->tsize == 0) { + dev_err(hdev->dev, + "Got DMA with size 0, might reset the device\n"); + return -EINVAL; + } + + parser->patched_cb_size += sizeof(*user_dma_pkt); + + return 0; +} + +static int goya_validate_wreg32(struct hl_device *hdev, + struct hl_cs_parser *parser, + struct packet_wreg32 *wreg_pkt) +{ + struct goya_device *goya = hdev->asic_specific; + u32 sob_start_addr, sob_end_addr; + u16 reg_offset; + + reg_offset = le32_to_cpu(wreg_pkt->ctl) & + GOYA_PKT_WREG32_CTL_REG_OFFSET_MASK; + + dev_dbg(hdev->dev, "WREG32 packet details:\n"); + dev_dbg(hdev->dev, "reg_offset == 0x%x\n", reg_offset); + dev_dbg(hdev->dev, "value == 0x%x\n", + le32_to_cpu(wreg_pkt->value)); + + if (reg_offset != (mmDMA_CH_0_WR_COMP_ADDR_LO & 0x1FFF)) { + dev_err(hdev->dev, "WREG32 packet with illegal address 0x%x\n", + reg_offset); + return -EPERM; + } + + /* + * With MMU, DMA channels are not secured, so it doesn't matter where + * the WR COMP will be written to because it will go out with + * non-secured property + */ + if (goya->hw_cap_initialized & HW_CAP_MMU) + return 0; + + sob_start_addr = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0); + sob_end_addr = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1023); + + if ((le32_to_cpu(wreg_pkt->value) < sob_start_addr) || + (le32_to_cpu(wreg_pkt->value) > sob_end_addr)) { + + dev_err(hdev->dev, "WREG32 packet with illegal value 0x%x\n", + wreg_pkt->value); + return -EPERM; + } + + return 0; +} + +static int goya_validate_cb(struct hl_device *hdev, + struct hl_cs_parser *parser, bool is_mmu) +{ + u32 cb_parsed_length = 0; + int rc = 0; + + parser->patched_cb_size = 0; + + /* cb_user_size is more than 0 so loop will always be executed */ + while (cb_parsed_length < parser->user_cb_size) { + enum packet_id pkt_id; + u16 pkt_size; + struct goya_packet *user_pkt; + + user_pkt = parser->user_cb->kernel_address + cb_parsed_length; + + pkt_id = (enum packet_id) ( + (le64_to_cpu(user_pkt->header) & + PACKET_HEADER_PACKET_ID_MASK) >> + PACKET_HEADER_PACKET_ID_SHIFT); + + if (!validate_packet_id(pkt_id)) { + dev_err(hdev->dev, "Invalid packet id %u\n", pkt_id); + rc = -EINVAL; + break; + } + + pkt_size = goya_packet_sizes[pkt_id]; + cb_parsed_length += pkt_size; + if (cb_parsed_length > parser->user_cb_size) { + dev_err(hdev->dev, + "packet 0x%x is out of CB boundary\n", pkt_id); + rc = -EINVAL; + break; + } + + switch (pkt_id) { + case PACKET_WREG_32: + /* + * Although it is validated after copy in patch_cb(), + * need to validate here as well because patch_cb() is + * not called in MMU path while this function is called + */ + rc = goya_validate_wreg32(hdev, + parser, (struct packet_wreg32 *) user_pkt); + parser->patched_cb_size += pkt_size; + break; + + case PACKET_WREG_BULK: + dev_err(hdev->dev, + "User not allowed to use WREG_BULK\n"); + rc = -EPERM; + break; + + case PACKET_MSG_PROT: + dev_err(hdev->dev, + "User not allowed to use MSG_PROT\n"); + rc = -EPERM; + break; + + case PACKET_CP_DMA: + dev_err(hdev->dev, "User not allowed to use CP_DMA\n"); + rc = -EPERM; + break; + + case PACKET_STOP: + dev_err(hdev->dev, "User not allowed to use STOP\n"); + rc = -EPERM; + break; + + case PACKET_LIN_DMA: + if (is_mmu) + rc = goya_validate_dma_pkt_mmu(hdev, parser, + (struct packet_lin_dma *) user_pkt); + else + rc = goya_validate_dma_pkt_no_mmu(hdev, parser, + (struct packet_lin_dma *) user_pkt); + break; + + case PACKET_MSG_LONG: + case PACKET_MSG_SHORT: + case PACKET_FENCE: + case PACKET_NOP: + parser->patched_cb_size += pkt_size; + break; + + default: + dev_err(hdev->dev, "Invalid packet header 0x%x\n", + pkt_id); + rc = -EINVAL; + break; + } + + if (rc) + break; + } + + /* + * The new CB should have space at the end for two MSG_PROT packets: + * 1. A packet that will act as a completion packet + * 2. A packet that will generate MSI-X interrupt + */ + parser->patched_cb_size += sizeof(struct packet_msg_prot) * 2; + + return rc; +} + +static int goya_patch_dma_packet(struct hl_device *hdev, + struct hl_cs_parser *parser, + struct packet_lin_dma *user_dma_pkt, + struct packet_lin_dma *new_dma_pkt, + u32 *new_dma_pkt_size) +{ + struct hl_userptr *userptr; + struct scatterlist *sg, *sg_next_iter; + u32 count, dma_desc_cnt; + u64 len, len_next; + dma_addr_t dma_addr, dma_addr_next; + enum goya_dma_direction user_dir; + u64 device_memory_addr, addr; + enum dma_data_direction dir; + struct sg_table *sgt; + bool skip_host_mem_pin = false; + bool user_memset; + u32 user_rdcomp_mask, user_wrcomp_mask, ctl; + + ctl = le32_to_cpu(user_dma_pkt->ctl); + + user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >> + GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT; + + user_memset = (ctl & GOYA_PKT_LIN_DMA_CTL_MEMSET_MASK) >> + GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT; + + if ((user_dir == DMA_DRAM_TO_SRAM) || (user_dir == DMA_SRAM_TO_DRAM) || + (user_dma_pkt->tsize == 0)) { + memcpy(new_dma_pkt, user_dma_pkt, sizeof(*new_dma_pkt)); + *new_dma_pkt_size = sizeof(*new_dma_pkt); + return 0; + } + + if ((user_dir == DMA_HOST_TO_DRAM) || (user_dir == DMA_HOST_TO_SRAM)) { + addr = le64_to_cpu(user_dma_pkt->src_addr); + device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr); + dir = DMA_TO_DEVICE; + if (user_memset) + skip_host_mem_pin = true; + } else { + addr = le64_to_cpu(user_dma_pkt->dst_addr); + device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr); + dir = DMA_FROM_DEVICE; + } + + if ((!skip_host_mem_pin) && + (hl_userptr_is_pinned(hdev, addr, + le32_to_cpu(user_dma_pkt->tsize), + parser->job_userptr_list, &userptr) == false)) { + dev_err(hdev->dev, "Userptr 0x%llx + 0x%x NOT mapped\n", + addr, user_dma_pkt->tsize); + return -EFAULT; + } + + if ((user_memset) && (dir == DMA_TO_DEVICE)) { + memcpy(new_dma_pkt, user_dma_pkt, sizeof(*user_dma_pkt)); + *new_dma_pkt_size = sizeof(*user_dma_pkt); + return 0; + } + + user_rdcomp_mask = ctl & GOYA_PKT_LIN_DMA_CTL_RDCOMP_MASK; + + user_wrcomp_mask = ctl & GOYA_PKT_LIN_DMA_CTL_WRCOMP_MASK; + + sgt = userptr->sgt; + dma_desc_cnt = 0; + + for_each_sg(sgt->sgl, sg, sgt->nents, count) { + len = sg_dma_len(sg); + dma_addr = sg_dma_address(sg); + + if (len == 0) + break; + + while ((count + 1) < sgt->nents) { + sg_next_iter = sg_next(sg); + len_next = sg_dma_len(sg_next_iter); + dma_addr_next = sg_dma_address(sg_next_iter); + + if (len_next == 0) + break; + + if ((dma_addr + len == dma_addr_next) && + (len + len_next <= DMA_MAX_TRANSFER_SIZE)) { + len += len_next; + count++; + sg = sg_next_iter; + } else { + break; + } + } + + ctl = le32_to_cpu(user_dma_pkt->ctl); + if (likely(dma_desc_cnt)) + ctl &= ~GOYA_PKT_CTL_EB_MASK; + ctl &= ~(GOYA_PKT_LIN_DMA_CTL_RDCOMP_MASK | + GOYA_PKT_LIN_DMA_CTL_WRCOMP_MASK); + new_dma_pkt->ctl = cpu_to_le32(ctl); + new_dma_pkt->tsize = cpu_to_le32((u32) len); + + if (dir == DMA_TO_DEVICE) { + new_dma_pkt->src_addr = cpu_to_le64(dma_addr); + new_dma_pkt->dst_addr = cpu_to_le64(device_memory_addr); + } else { + new_dma_pkt->src_addr = cpu_to_le64(device_memory_addr); + new_dma_pkt->dst_addr = cpu_to_le64(dma_addr); + } + + if (!user_memset) + device_memory_addr += len; + dma_desc_cnt++; + new_dma_pkt++; + } + + if (!dma_desc_cnt) { + dev_err(hdev->dev, + "Error of 0 SG entries when patching DMA packet\n"); + return -EFAULT; + } + + /* Fix the last dma packet - rdcomp/wrcomp must be as user set them */ + new_dma_pkt--; + new_dma_pkt->ctl |= cpu_to_le32(user_rdcomp_mask | user_wrcomp_mask); + + *new_dma_pkt_size = dma_desc_cnt * sizeof(struct packet_lin_dma); + + return 0; +} + +static int goya_patch_cb(struct hl_device *hdev, + struct hl_cs_parser *parser) +{ + u32 cb_parsed_length = 0; + u32 cb_patched_cur_length = 0; + int rc = 0; + + /* cb_user_size is more than 0 so loop will always be executed */ + while (cb_parsed_length < parser->user_cb_size) { + enum packet_id pkt_id; + u16 pkt_size; + u32 new_pkt_size = 0; + struct goya_packet *user_pkt, *kernel_pkt; + + user_pkt = parser->user_cb->kernel_address + cb_parsed_length; + kernel_pkt = parser->patched_cb->kernel_address + + cb_patched_cur_length; + + pkt_id = (enum packet_id) ( + (le64_to_cpu(user_pkt->header) & + PACKET_HEADER_PACKET_ID_MASK) >> + PACKET_HEADER_PACKET_ID_SHIFT); + + if (!validate_packet_id(pkt_id)) { + dev_err(hdev->dev, "Invalid packet id %u\n", pkt_id); + rc = -EINVAL; + break; + } + + pkt_size = goya_packet_sizes[pkt_id]; + cb_parsed_length += pkt_size; + if (cb_parsed_length > parser->user_cb_size) { + dev_err(hdev->dev, + "packet 0x%x is out of CB boundary\n", pkt_id); + rc = -EINVAL; + break; + } + + switch (pkt_id) { + case PACKET_LIN_DMA: + rc = goya_patch_dma_packet(hdev, parser, + (struct packet_lin_dma *) user_pkt, + (struct packet_lin_dma *) kernel_pkt, + &new_pkt_size); + cb_patched_cur_length += new_pkt_size; + break; + + case PACKET_WREG_32: + memcpy(kernel_pkt, user_pkt, pkt_size); + cb_patched_cur_length += pkt_size; + rc = goya_validate_wreg32(hdev, parser, + (struct packet_wreg32 *) kernel_pkt); + break; + + case PACKET_WREG_BULK: + dev_err(hdev->dev, + "User not allowed to use WREG_BULK\n"); + rc = -EPERM; + break; + + case PACKET_MSG_PROT: + dev_err(hdev->dev, + "User not allowed to use MSG_PROT\n"); + rc = -EPERM; + break; + + case PACKET_CP_DMA: + dev_err(hdev->dev, "User not allowed to use CP_DMA\n"); + rc = -EPERM; + break; + + case PACKET_STOP: + dev_err(hdev->dev, "User not allowed to use STOP\n"); + rc = -EPERM; + break; + + case PACKET_MSG_LONG: + case PACKET_MSG_SHORT: + case PACKET_FENCE: + case PACKET_NOP: + memcpy(kernel_pkt, user_pkt, pkt_size); + cb_patched_cur_length += pkt_size; + break; + + default: + dev_err(hdev->dev, "Invalid packet header 0x%x\n", + pkt_id); + rc = -EINVAL; + break; + } + + if (rc) + break; + } + + return rc; +} + +static int goya_parse_cb_mmu(struct hl_device *hdev, + struct hl_cs_parser *parser) +{ + u64 patched_cb_handle; + u32 patched_cb_size; + struct hl_cb *user_cb; + int rc; + + /* + * The new CB should have space at the end for two MSG_PROT pkt: + * 1. A packet that will act as a completion packet + * 2. A packet that will generate MSI-X interrupt + */ + parser->patched_cb_size = parser->user_cb_size + + sizeof(struct packet_msg_prot) * 2; + + rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx, + parser->patched_cb_size, false, false, + &patched_cb_handle); + + if (rc) { + dev_err(hdev->dev, + "Failed to allocate patched CB for DMA CS %d\n", + rc); + return rc; + } + + patched_cb_handle >>= PAGE_SHIFT; + parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, + (u32) patched_cb_handle); + /* hl_cb_get should never fail here so use kernel WARN */ + WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n", + (u32) patched_cb_handle); + if (!parser->patched_cb) { + rc = -EFAULT; + goto out; + } + + /* + * The check that parser->user_cb_size <= parser->user_cb->size was done + * in validate_queue_index(). + */ + memcpy(parser->patched_cb->kernel_address, + parser->user_cb->kernel_address, + parser->user_cb_size); + + patched_cb_size = parser->patched_cb_size; + + /* validate patched CB instead of user CB */ + user_cb = parser->user_cb; + parser->user_cb = parser->patched_cb; + rc = goya_validate_cb(hdev, parser, true); + parser->user_cb = user_cb; + + if (rc) { + hl_cb_put(parser->patched_cb); + goto out; + } + + if (patched_cb_size != parser->patched_cb_size) { + dev_err(hdev->dev, "user CB size mismatch\n"); + hl_cb_put(parser->patched_cb); + rc = -EINVAL; + goto out; + } + +out: + /* + * Always call cb destroy here because we still have 1 reference + * to it by calling cb_get earlier. After the job will be completed, + * cb_put will release it, but here we want to remove it from the + * idr + */ + hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, + patched_cb_handle << PAGE_SHIFT); + + return rc; +} + +static int goya_parse_cb_no_mmu(struct hl_device *hdev, + struct hl_cs_parser *parser) +{ + u64 patched_cb_handle; + int rc; + + rc = goya_validate_cb(hdev, parser, false); + + if (rc) + goto free_userptr; + + rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx, + parser->patched_cb_size, false, false, + &patched_cb_handle); + if (rc) { + dev_err(hdev->dev, + "Failed to allocate patched CB for DMA CS %d\n", rc); + goto free_userptr; + } + + patched_cb_handle >>= PAGE_SHIFT; + parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, + (u32) patched_cb_handle); + /* hl_cb_get should never fail here so use kernel WARN */ + WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n", + (u32) patched_cb_handle); + if (!parser->patched_cb) { + rc = -EFAULT; + goto out; + } + + rc = goya_patch_cb(hdev, parser); + + if (rc) + hl_cb_put(parser->patched_cb); + +out: + /* + * Always call cb destroy here because we still have 1 reference + * to it by calling cb_get earlier. After the job will be completed, + * cb_put will release it, but here we want to remove it from the + * idr + */ + hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, + patched_cb_handle << PAGE_SHIFT); + +free_userptr: + if (rc) + hl_userptr_delete_list(hdev, parser->job_userptr_list); + return rc; +} + +static int goya_parse_cb_no_ext_queue(struct hl_device *hdev, + struct hl_cs_parser *parser) +{ + struct asic_fixed_properties *asic_prop = &hdev->asic_prop; + struct goya_device *goya = hdev->asic_specific; + + if (goya->hw_cap_initialized & HW_CAP_MMU) + return 0; + + /* For internal queue jobs, just check if CB address is valid */ + if (hl_mem_area_inside_range( + (u64) (uintptr_t) parser->user_cb, + parser->user_cb_size, + asic_prop->sram_user_base_address, + asic_prop->sram_end_address)) + return 0; + + if (hl_mem_area_inside_range( + (u64) (uintptr_t) parser->user_cb, + parser->user_cb_size, + asic_prop->dram_user_base_address, + asic_prop->dram_end_address)) + return 0; + + dev_err(hdev->dev, + "Internal CB address 0x%px + 0x%x is not in SRAM nor in DRAM\n", + parser->user_cb, parser->user_cb_size); + + return -EFAULT; +} + +int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser) +{ + struct goya_device *goya = hdev->asic_specific; + + if (parser->queue_type == QUEUE_TYPE_INT) + return goya_parse_cb_no_ext_queue(hdev, parser); + + if (goya->hw_cap_initialized & HW_CAP_MMU) + return goya_parse_cb_mmu(hdev, parser); + else + return goya_parse_cb_no_mmu(hdev, parser); +} + +void goya_add_end_of_cb_packets(struct hl_device *hdev, void *kernel_address, + u32 len, u64 cq_addr, u32 cq_val, u32 msix_vec, + bool eb) +{ + struct packet_msg_prot *cq_pkt; + u32 tmp; + + cq_pkt = kernel_address + len - (sizeof(struct packet_msg_prot) * 2); + + tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) | + (1 << GOYA_PKT_CTL_EB_SHIFT) | + (1 << GOYA_PKT_CTL_MB_SHIFT); + cq_pkt->ctl = cpu_to_le32(tmp); + cq_pkt->value = cpu_to_le32(cq_val); + cq_pkt->addr = cpu_to_le64(cq_addr); + + cq_pkt++; + + tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) | + (1 << GOYA_PKT_CTL_MB_SHIFT); + cq_pkt->ctl = cpu_to_le32(tmp); + cq_pkt->value = cpu_to_le32(msix_vec & 0x7FF); + cq_pkt->addr = cpu_to_le64(CFG_BASE + mmPCIE_DBI_MSIX_DOORBELL_OFF); +} + +void goya_update_eq_ci(struct hl_device *hdev, u32 val) +{ + WREG32(mmCPU_EQ_CI, val); +} + +void goya_restore_phase_topology(struct hl_device *hdev) +{ + +} + +static void goya_clear_sm_regs(struct hl_device *hdev) +{ + int i, num_of_sob_in_longs, num_of_mon_in_longs; + + num_of_sob_in_longs = + ((mmSYNC_MNGR_SOB_OBJ_1023 - mmSYNC_MNGR_SOB_OBJ_0) + 4); + + num_of_mon_in_longs = + ((mmSYNC_MNGR_MON_STATUS_255 - mmSYNC_MNGR_MON_STATUS_0) + 4); + + for (i = 0 ; i < num_of_sob_in_longs ; i += 4) + WREG32(mmSYNC_MNGR_SOB_OBJ_0 + i, 0); + + for (i = 0 ; i < num_of_mon_in_longs ; i += 4) + WREG32(mmSYNC_MNGR_MON_STATUS_0 + i, 0); + + /* Flush all WREG to prevent race */ + i = RREG32(mmSYNC_MNGR_SOB_OBJ_0); +} + +/* + * goya_debugfs_read32 - read a 32bit value from a given device or a host mapped + * address. + * + * @hdev: pointer to hl_device structure + * @addr: device or host mapped address + * @val: returned value + * + * In case of DDR address that is not mapped into the default aperture that + * the DDR bar exposes, the function will configure the iATU so that the DDR + * bar will be positioned at a base address that allows reading from the + * required address. Configuring the iATU during normal operation can + * lead to undefined behavior and therefore, should be done with extreme care + * + */ +static int goya_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + u64 ddr_bar_addr; + int rc = 0; + + if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) { + *val = RREG32(addr - CFG_BASE); + + } else if ((addr >= SRAM_BASE_ADDR) && + (addr < SRAM_BASE_ADDR + SRAM_SIZE)) { + + *val = readl(hdev->pcie_bar[SRAM_CFG_BAR_ID] + + (addr - SRAM_BASE_ADDR)); + + } else if (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size) { + + u64 bar_base_addr = DRAM_PHYS_BASE + + (addr & ~(prop->dram_pci_bar_size - 0x1ull)); + + ddr_bar_addr = goya_set_ddr_bar_base(hdev, bar_base_addr); + if (ddr_bar_addr != U64_MAX) { + *val = readl(hdev->pcie_bar[DDR_BAR_ID] + + (addr - bar_base_addr)); + + ddr_bar_addr = goya_set_ddr_bar_base(hdev, + ddr_bar_addr); + } + if (ddr_bar_addr == U64_MAX) + rc = -EIO; + + } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) { + *val = *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE); + + } else { + rc = -EFAULT; + } + + return rc; +} + +/* + * goya_debugfs_write32 - write a 32bit value to a given device or a host mapped + * address. + * + * @hdev: pointer to hl_device structure + * @addr: device or host mapped address + * @val: returned value + * + * In case of DDR address that is not mapped into the default aperture that + * the DDR bar exposes, the function will configure the iATU so that the DDR + * bar will be positioned at a base address that allows writing to the + * required address. Configuring the iATU during normal operation can + * lead to undefined behavior and therefore, should be done with extreme care + * + */ +static int goya_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + u64 ddr_bar_addr; + int rc = 0; + + if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) { + WREG32(addr - CFG_BASE, val); + + } else if ((addr >= SRAM_BASE_ADDR) && + (addr < SRAM_BASE_ADDR + SRAM_SIZE)) { + + writel(val, hdev->pcie_bar[SRAM_CFG_BAR_ID] + + (addr - SRAM_BASE_ADDR)); + + } else if (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size) { + + u64 bar_base_addr = DRAM_PHYS_BASE + + (addr & ~(prop->dram_pci_bar_size - 0x1ull)); + + ddr_bar_addr = goya_set_ddr_bar_base(hdev, bar_base_addr); + if (ddr_bar_addr != U64_MAX) { + writel(val, hdev->pcie_bar[DDR_BAR_ID] + + (addr - bar_base_addr)); + + ddr_bar_addr = goya_set_ddr_bar_base(hdev, + ddr_bar_addr); + } + if (ddr_bar_addr == U64_MAX) + rc = -EIO; + + } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) { + *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE) = val; + + } else { + rc = -EFAULT; + } + + return rc; +} + +static int goya_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + u64 ddr_bar_addr; + int rc = 0; + + if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) { + u32 val_l = RREG32(addr - CFG_BASE); + u32 val_h = RREG32(addr + sizeof(u32) - CFG_BASE); + + *val = (((u64) val_h) << 32) | val_l; + + } else if ((addr >= SRAM_BASE_ADDR) && + (addr <= SRAM_BASE_ADDR + SRAM_SIZE - sizeof(u64))) { + + *val = readq(hdev->pcie_bar[SRAM_CFG_BAR_ID] + + (addr - SRAM_BASE_ADDR)); + + } else if (addr <= + DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64)) { + + u64 bar_base_addr = DRAM_PHYS_BASE + + (addr & ~(prop->dram_pci_bar_size - 0x1ull)); + + ddr_bar_addr = goya_set_ddr_bar_base(hdev, bar_base_addr); + if (ddr_bar_addr != U64_MAX) { + *val = readq(hdev->pcie_bar[DDR_BAR_ID] + + (addr - bar_base_addr)); + + ddr_bar_addr = goya_set_ddr_bar_base(hdev, + ddr_bar_addr); + } + if (ddr_bar_addr == U64_MAX) + rc = -EIO; + + } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) { + *val = *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE); + + } else { + rc = -EFAULT; + } + + return rc; +} + +static int goya_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + u64 ddr_bar_addr; + int rc = 0; + + if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) { + WREG32(addr - CFG_BASE, lower_32_bits(val)); + WREG32(addr + sizeof(u32) - CFG_BASE, upper_32_bits(val)); + + } else if ((addr >= SRAM_BASE_ADDR) && + (addr <= SRAM_BASE_ADDR + SRAM_SIZE - sizeof(u64))) { + + writeq(val, hdev->pcie_bar[SRAM_CFG_BAR_ID] + + (addr - SRAM_BASE_ADDR)); + + } else if (addr <= + DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64)) { + + u64 bar_base_addr = DRAM_PHYS_BASE + + (addr & ~(prop->dram_pci_bar_size - 0x1ull)); + + ddr_bar_addr = goya_set_ddr_bar_base(hdev, bar_base_addr); + if (ddr_bar_addr != U64_MAX) { + writeq(val, hdev->pcie_bar[DDR_BAR_ID] + + (addr - bar_base_addr)); + + ddr_bar_addr = goya_set_ddr_bar_base(hdev, + ddr_bar_addr); + } + if (ddr_bar_addr == U64_MAX) + rc = -EIO; + + } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) { + *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE) = val; + + } else { + rc = -EFAULT; + } + + return rc; +} + +static u64 goya_read_pte(struct hl_device *hdev, u64 addr) +{ + struct goya_device *goya = hdev->asic_specific; + + if (hdev->hard_reset_pending) + return U64_MAX; + + return readq(hdev->pcie_bar[DDR_BAR_ID] + + (addr - goya->ddr_bar_cur_addr)); +} + +static void goya_write_pte(struct hl_device *hdev, u64 addr, u64 val) +{ + struct goya_device *goya = hdev->asic_specific; + + if (hdev->hard_reset_pending) + return; + + writeq(val, hdev->pcie_bar[DDR_BAR_ID] + + (addr - goya->ddr_bar_cur_addr)); +} + +static const char *_goya_get_event_desc(u16 event_type) +{ + switch (event_type) { + case GOYA_ASYNC_EVENT_ID_PCIE_IF: + return "PCIe_if"; + case GOYA_ASYNC_EVENT_ID_TPC0_ECC: + case GOYA_ASYNC_EVENT_ID_TPC1_ECC: + case GOYA_ASYNC_EVENT_ID_TPC2_ECC: + case GOYA_ASYNC_EVENT_ID_TPC3_ECC: + case GOYA_ASYNC_EVENT_ID_TPC4_ECC: + case GOYA_ASYNC_EVENT_ID_TPC5_ECC: + case GOYA_ASYNC_EVENT_ID_TPC6_ECC: + case GOYA_ASYNC_EVENT_ID_TPC7_ECC: + return "TPC%d_ecc"; + case GOYA_ASYNC_EVENT_ID_MME_ECC: + return "MME_ecc"; + case GOYA_ASYNC_EVENT_ID_MME_ECC_EXT: + return "MME_ecc_ext"; + case GOYA_ASYNC_EVENT_ID_MMU_ECC: + return "MMU_ecc"; + case GOYA_ASYNC_EVENT_ID_DMA_MACRO: + return "DMA_macro"; + case GOYA_ASYNC_EVENT_ID_DMA_ECC: + return "DMA_ecc"; + case GOYA_ASYNC_EVENT_ID_CPU_IF_ECC: + return "CPU_if_ecc"; + case GOYA_ASYNC_EVENT_ID_PSOC_MEM: + return "PSOC_mem"; + case GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT: + return "PSOC_coresight"; + case GOYA_ASYNC_EVENT_ID_SRAM0 ... GOYA_ASYNC_EVENT_ID_SRAM29: + return "SRAM%d"; + case GOYA_ASYNC_EVENT_ID_GIC500: + return "GIC500"; + case GOYA_ASYNC_EVENT_ID_PLL0 ... GOYA_ASYNC_EVENT_ID_PLL6: + return "PLL%d"; + case GOYA_ASYNC_EVENT_ID_AXI_ECC: + return "AXI_ecc"; + case GOYA_ASYNC_EVENT_ID_L2_RAM_ECC: + return "L2_ram_ecc"; + case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET: + return "PSOC_gpio_05_sw_reset"; + case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT: + return "PSOC_gpio_10_vrhot_icrit"; + case GOYA_ASYNC_EVENT_ID_PCIE_DEC: + return "PCIe_dec"; + case GOYA_ASYNC_EVENT_ID_TPC0_DEC: + case GOYA_ASYNC_EVENT_ID_TPC1_DEC: + case GOYA_ASYNC_EVENT_ID_TPC2_DEC: + case GOYA_ASYNC_EVENT_ID_TPC3_DEC: + case GOYA_ASYNC_EVENT_ID_TPC4_DEC: + case GOYA_ASYNC_EVENT_ID_TPC5_DEC: + case GOYA_ASYNC_EVENT_ID_TPC6_DEC: + case GOYA_ASYNC_EVENT_ID_TPC7_DEC: + return "TPC%d_dec"; + case GOYA_ASYNC_EVENT_ID_MME_WACS: + return "MME_wacs"; + case GOYA_ASYNC_EVENT_ID_MME_WACSD: + return "MME_wacsd"; + case GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER: + return "CPU_axi_splitter"; + case GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC: + return "PSOC_axi_dec"; + case GOYA_ASYNC_EVENT_ID_PSOC: + return "PSOC"; + case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR: + case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR: + case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR: + case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR: + case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR: + case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR: + case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR: + case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR: + return "TPC%d_krn_err"; + case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_CMDQ: + return "TPC%d_cq"; + case GOYA_ASYNC_EVENT_ID_TPC0_QM ... GOYA_ASYNC_EVENT_ID_TPC7_QM: + return "TPC%d_qm"; + case GOYA_ASYNC_EVENT_ID_MME_QM: + return "MME_qm"; + case GOYA_ASYNC_EVENT_ID_MME_CMDQ: + return "MME_cq"; + case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM: + return "DMA%d_qm"; + case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH: + return "DMA%d_ch"; + case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU: + case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU: + case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU: + case GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU: + case GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU: + case GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU: + case GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU: + case GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU: + return "TPC%d_bmon_spmu"; + case GOYA_ASYNC_EVENT_ID_DMA_BM_CH0 ... GOYA_ASYNC_EVENT_ID_DMA_BM_CH4: + return "DMA_bm_ch%d"; + case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_S: + return "POWER_ENV_S"; + case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_E: + return "POWER_ENV_E"; + case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_S: + return "THERMAL_ENV_S"; + case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E: + return "THERMAL_ENV_E"; + default: + return "N/A"; + } +} + +static void goya_get_event_desc(u16 event_type, char *desc, size_t size) +{ + u8 index; + + switch (event_type) { + case GOYA_ASYNC_EVENT_ID_TPC0_ECC: + case GOYA_ASYNC_EVENT_ID_TPC1_ECC: + case GOYA_ASYNC_EVENT_ID_TPC2_ECC: + case GOYA_ASYNC_EVENT_ID_TPC3_ECC: + case GOYA_ASYNC_EVENT_ID_TPC4_ECC: + case GOYA_ASYNC_EVENT_ID_TPC5_ECC: + case GOYA_ASYNC_EVENT_ID_TPC6_ECC: + case GOYA_ASYNC_EVENT_ID_TPC7_ECC: + index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_ECC) / 3; + snprintf(desc, size, _goya_get_event_desc(event_type), index); + break; + case GOYA_ASYNC_EVENT_ID_SRAM0 ... GOYA_ASYNC_EVENT_ID_SRAM29: + index = event_type - GOYA_ASYNC_EVENT_ID_SRAM0; + snprintf(desc, size, _goya_get_event_desc(event_type), index); + break; + case GOYA_ASYNC_EVENT_ID_PLL0 ... GOYA_ASYNC_EVENT_ID_PLL6: + index = event_type - GOYA_ASYNC_EVENT_ID_PLL0; + snprintf(desc, size, _goya_get_event_desc(event_type), index); + break; + case GOYA_ASYNC_EVENT_ID_TPC0_DEC: + case GOYA_ASYNC_EVENT_ID_TPC1_DEC: + case GOYA_ASYNC_EVENT_ID_TPC2_DEC: + case GOYA_ASYNC_EVENT_ID_TPC3_DEC: + case GOYA_ASYNC_EVENT_ID_TPC4_DEC: + case GOYA_ASYNC_EVENT_ID_TPC5_DEC: + case GOYA_ASYNC_EVENT_ID_TPC6_DEC: + case GOYA_ASYNC_EVENT_ID_TPC7_DEC: + index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_DEC) / 3; + snprintf(desc, size, _goya_get_event_desc(event_type), index); + break; + case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR: + case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR: + case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR: + case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR: + case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR: + case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR: + case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR: + case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR: + index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR) / 10; + snprintf(desc, size, _goya_get_event_desc(event_type), index); + break; + case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_CMDQ: + index = event_type - GOYA_ASYNC_EVENT_ID_TPC0_CMDQ; + snprintf(desc, size, _goya_get_event_desc(event_type), index); + break; + case GOYA_ASYNC_EVENT_ID_TPC0_QM ... GOYA_ASYNC_EVENT_ID_TPC7_QM: + index = event_type - GOYA_ASYNC_EVENT_ID_TPC0_QM; + snprintf(desc, size, _goya_get_event_desc(event_type), index); + break; + case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM: + index = event_type - GOYA_ASYNC_EVENT_ID_DMA0_QM; + snprintf(desc, size, _goya_get_event_desc(event_type), index); + break; + case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH: + index = event_type - GOYA_ASYNC_EVENT_ID_DMA0_CH; + snprintf(desc, size, _goya_get_event_desc(event_type), index); + break; + case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU: + case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU: + case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU: + case GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU: + case GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU: + case GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU: + case GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU: + case GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU: + index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU) / 10; + snprintf(desc, size, _goya_get_event_desc(event_type), index); + break; + case GOYA_ASYNC_EVENT_ID_DMA_BM_CH0 ... GOYA_ASYNC_EVENT_ID_DMA_BM_CH4: + index = event_type - GOYA_ASYNC_EVENT_ID_DMA_BM_CH0; + snprintf(desc, size, _goya_get_event_desc(event_type), index); + break; + default: + snprintf(desc, size, _goya_get_event_desc(event_type)); + break; + } +} + +static void goya_print_razwi_info(struct hl_device *hdev) +{ + if (RREG32(mmDMA_MACRO_RAZWI_LBW_WT_VLD)) { + dev_err_ratelimited(hdev->dev, "Illegal write to LBW\n"); + WREG32(mmDMA_MACRO_RAZWI_LBW_WT_VLD, 0); + } + + if (RREG32(mmDMA_MACRO_RAZWI_LBW_RD_VLD)) { + dev_err_ratelimited(hdev->dev, "Illegal read from LBW\n"); + WREG32(mmDMA_MACRO_RAZWI_LBW_RD_VLD, 0); + } + + if (RREG32(mmDMA_MACRO_RAZWI_HBW_WT_VLD)) { + dev_err_ratelimited(hdev->dev, "Illegal write to HBW\n"); + WREG32(mmDMA_MACRO_RAZWI_HBW_WT_VLD, 0); + } + + if (RREG32(mmDMA_MACRO_RAZWI_HBW_RD_VLD)) { + dev_err_ratelimited(hdev->dev, "Illegal read from HBW\n"); + WREG32(mmDMA_MACRO_RAZWI_HBW_RD_VLD, 0); + } +} + +static void goya_print_mmu_error_info(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + u64 addr; + u32 val; + + if (!(goya->hw_cap_initialized & HW_CAP_MMU)) + return; + + val = RREG32(mmMMU_PAGE_ERROR_CAPTURE); + if (val & MMU_PAGE_ERROR_CAPTURE_ENTRY_VALID_MASK) { + addr = val & MMU_PAGE_ERROR_CAPTURE_VA_49_32_MASK; + addr <<= 32; + addr |= RREG32(mmMMU_PAGE_ERROR_CAPTURE_VA); + + dev_err_ratelimited(hdev->dev, "MMU page fault on va 0x%llx\n", + addr); + + WREG32(mmMMU_PAGE_ERROR_CAPTURE, 0); + } +} + +static void goya_print_irq_info(struct hl_device *hdev, u16 event_type, + bool razwi) +{ + char desc[20] = ""; + + goya_get_event_desc(event_type, desc, sizeof(desc)); + dev_err_ratelimited(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n", + event_type, desc); + + if (razwi) { + goya_print_razwi_info(hdev); + goya_print_mmu_error_info(hdev); + } +} + +static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr, + size_t irq_arr_size) +{ + struct cpucp_unmask_irq_arr_packet *pkt; + size_t total_pkt_size; + long result; + int rc; + int irq_num_entries, irq_arr_index; + __le32 *goya_irq_arr; + + total_pkt_size = sizeof(struct cpucp_unmask_irq_arr_packet) + + irq_arr_size; + + /* data should be aligned to 8 bytes in order to CPU-CP to copy it */ + total_pkt_size = (total_pkt_size + 0x7) & ~0x7; + + /* total_pkt_size is casted to u16 later on */ + if (total_pkt_size > USHRT_MAX) { + dev_err(hdev->dev, "too many elements in IRQ array\n"); + return -EINVAL; + } + + pkt = kzalloc(total_pkt_size, GFP_KERNEL); + if (!pkt) + return -ENOMEM; + + irq_num_entries = irq_arr_size / sizeof(irq_arr[0]); + pkt->length = cpu_to_le32(irq_num_entries); + + /* We must perform any necessary endianness conversation on the irq + * array being passed to the goya hardware + */ + for (irq_arr_index = 0, goya_irq_arr = (__le32 *) &pkt->irqs; + irq_arr_index < irq_num_entries ; irq_arr_index++) + goya_irq_arr[irq_arr_index] = + cpu_to_le32(irq_arr[irq_arr_index]); + + pkt->cpucp_pkt.ctl = cpu_to_le32(CPUCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY << + CPUCP_PKT_CTL_OPCODE_SHIFT); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt, + total_pkt_size, 0, &result); + + if (rc) + dev_err(hdev->dev, "failed to unmask IRQ array\n"); + + kfree(pkt); + + return rc; +} + +static int goya_soft_reset_late_init(struct hl_device *hdev) +{ + /* + * Unmask all IRQs since some could have been received + * during the soft reset + */ + return goya_unmask_irq_arr(hdev, goya_all_events, + sizeof(goya_all_events)); +} + +static int goya_unmask_irq(struct hl_device *hdev, u16 event_type) +{ + struct cpucp_packet pkt; + long result; + int rc; + + memset(&pkt, 0, sizeof(pkt)); + + pkt.ctl = cpu_to_le32(CPUCP_PACKET_UNMASK_RAZWI_IRQ << + CPUCP_PKT_CTL_OPCODE_SHIFT); + pkt.value = cpu_to_le64(event_type); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + 0, &result); + + if (rc) + dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type); + + return rc; +} + +static void goya_print_clk_change_info(struct hl_device *hdev, u16 event_type) +{ + switch (event_type) { + case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_S: + hdev->clk_throttling_reason |= HL_CLK_THROTTLE_POWER; + dev_info_ratelimited(hdev->dev, + "Clock throttling due to power consumption\n"); + break; + case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_E: + hdev->clk_throttling_reason &= ~HL_CLK_THROTTLE_POWER; + dev_info_ratelimited(hdev->dev, + "Power envelop is safe, back to optimal clock\n"); + break; + case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_S: + hdev->clk_throttling_reason |= HL_CLK_THROTTLE_THERMAL; + dev_info_ratelimited(hdev->dev, + "Clock throttling due to overheating\n"); + break; + case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E: + hdev->clk_throttling_reason &= ~HL_CLK_THROTTLE_THERMAL; + dev_info_ratelimited(hdev->dev, + "Thermal envelop is safe, back to optimal clock\n"); + break; + + default: + dev_err(hdev->dev, "Received invalid clock change event %d\n", + event_type); + break; + } +} + +void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry) +{ + u32 ctl = le32_to_cpu(eq_entry->hdr.ctl); + u16 event_type = ((ctl & EQ_CTL_EVENT_TYPE_MASK) + >> EQ_CTL_EVENT_TYPE_SHIFT); + struct goya_device *goya = hdev->asic_specific; + + if (event_type >= GOYA_ASYNC_EVENT_ID_SIZE) { + dev_err(hdev->dev, "Event type %u exceeds maximum of %u", + event_type, GOYA_ASYNC_EVENT_ID_SIZE - 1); + return; + } + + goya->events_stat[event_type]++; + goya->events_stat_aggregate[event_type]++; + + switch (event_type) { + case GOYA_ASYNC_EVENT_ID_PCIE_IF: + case GOYA_ASYNC_EVENT_ID_TPC0_ECC: + case GOYA_ASYNC_EVENT_ID_TPC1_ECC: + case GOYA_ASYNC_EVENT_ID_TPC2_ECC: + case GOYA_ASYNC_EVENT_ID_TPC3_ECC: + case GOYA_ASYNC_EVENT_ID_TPC4_ECC: + case GOYA_ASYNC_EVENT_ID_TPC5_ECC: + case GOYA_ASYNC_EVENT_ID_TPC6_ECC: + case GOYA_ASYNC_EVENT_ID_TPC7_ECC: + case GOYA_ASYNC_EVENT_ID_MME_ECC: + case GOYA_ASYNC_EVENT_ID_MME_ECC_EXT: + case GOYA_ASYNC_EVENT_ID_MMU_ECC: + case GOYA_ASYNC_EVENT_ID_DMA_MACRO: + case GOYA_ASYNC_EVENT_ID_DMA_ECC: + case GOYA_ASYNC_EVENT_ID_CPU_IF_ECC: + case GOYA_ASYNC_EVENT_ID_PSOC_MEM: + case GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT: + case GOYA_ASYNC_EVENT_ID_SRAM0 ... GOYA_ASYNC_EVENT_ID_SRAM29: + case GOYA_ASYNC_EVENT_ID_GIC500: + case GOYA_ASYNC_EVENT_ID_PLL0 ... GOYA_ASYNC_EVENT_ID_PLL6: + case GOYA_ASYNC_EVENT_ID_AXI_ECC: + case GOYA_ASYNC_EVENT_ID_L2_RAM_ECC: + case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET: + goya_print_irq_info(hdev, event_type, false); + if (hdev->hard_reset_on_fw_events) + hl_device_reset(hdev, true, false); + break; + + case GOYA_ASYNC_EVENT_ID_PCIE_DEC: + case GOYA_ASYNC_EVENT_ID_TPC0_DEC: + case GOYA_ASYNC_EVENT_ID_TPC1_DEC: + case GOYA_ASYNC_EVENT_ID_TPC2_DEC: + case GOYA_ASYNC_EVENT_ID_TPC3_DEC: + case GOYA_ASYNC_EVENT_ID_TPC4_DEC: + case GOYA_ASYNC_EVENT_ID_TPC5_DEC: + case GOYA_ASYNC_EVENT_ID_TPC6_DEC: + case GOYA_ASYNC_EVENT_ID_TPC7_DEC: + case GOYA_ASYNC_EVENT_ID_MME_WACS: + case GOYA_ASYNC_EVENT_ID_MME_WACSD: + case GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER: + case GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC: + case GOYA_ASYNC_EVENT_ID_PSOC: + case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR: + case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR: + case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR: + case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR: + case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR: + case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR: + case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR: + case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR: + case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_QM: + case GOYA_ASYNC_EVENT_ID_MME_QM: + case GOYA_ASYNC_EVENT_ID_MME_CMDQ: + case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM: + case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH: + goya_print_irq_info(hdev, event_type, true); + goya_unmask_irq(hdev, event_type); + break; + + case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT: + case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU: + case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU: + case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU: + case GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU: + case GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU: + case GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU: + case GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU: + case GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU: + case GOYA_ASYNC_EVENT_ID_DMA_BM_CH0 ... GOYA_ASYNC_EVENT_ID_DMA_BM_CH4: + goya_print_irq_info(hdev, event_type, false); + goya_unmask_irq(hdev, event_type); + break; + + case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_S: + case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_E: + case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_S: + case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E: + goya_print_clk_change_info(hdev, event_type); + goya_unmask_irq(hdev, event_type); + break; + + default: + dev_err(hdev->dev, "Received invalid H/W interrupt %d\n", + event_type); + break; + } +} + +void *goya_get_events_stat(struct hl_device *hdev, bool aggregate, u32 *size) +{ + struct goya_device *goya = hdev->asic_specific; + + if (aggregate) { + *size = (u32) sizeof(goya->events_stat_aggregate); + return goya->events_stat_aggregate; + } + + *size = (u32) sizeof(goya->events_stat); + return goya->events_stat; +} + +static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u64 size, + u64 val, bool is_dram) +{ + struct packet_lin_dma *lin_dma_pkt; + struct hl_cs_job *job; + u32 cb_size, ctl; + struct hl_cb *cb; + int rc, lin_dma_pkts_cnt; + + lin_dma_pkts_cnt = DIV_ROUND_UP_ULL(size, SZ_2G); + cb_size = lin_dma_pkts_cnt * sizeof(struct packet_lin_dma) + + sizeof(struct packet_msg_prot); + cb = hl_cb_kernel_create(hdev, cb_size, false); + if (!cb) + return -ENOMEM; + + lin_dma_pkt = cb->kernel_address; + + do { + memset(lin_dma_pkt, 0, sizeof(*lin_dma_pkt)); + + ctl = ((PACKET_LIN_DMA << GOYA_PKT_CTL_OPCODE_SHIFT) | + (1 << GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT) | + (1 << GOYA_PKT_LIN_DMA_CTL_WO_SHIFT) | + (1 << GOYA_PKT_CTL_RB_SHIFT) | + (1 << GOYA_PKT_CTL_MB_SHIFT)); + ctl |= (is_dram ? DMA_HOST_TO_DRAM : DMA_HOST_TO_SRAM) << + GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT; + lin_dma_pkt->ctl = cpu_to_le32(ctl); + + lin_dma_pkt->src_addr = cpu_to_le64(val); + lin_dma_pkt->dst_addr = cpu_to_le64(addr); + if (lin_dma_pkts_cnt > 1) + lin_dma_pkt->tsize = cpu_to_le32(SZ_2G); + else + lin_dma_pkt->tsize = cpu_to_le32(size); + + size -= SZ_2G; + addr += SZ_2G; + lin_dma_pkt++; + } while (--lin_dma_pkts_cnt); + + job = hl_cs_allocate_job(hdev, QUEUE_TYPE_EXT, true); + if (!job) { + dev_err(hdev->dev, "Failed to allocate a new job\n"); + rc = -ENOMEM; + goto release_cb; + } + + job->id = 0; + job->user_cb = cb; + job->user_cb->cs_cnt++; + job->user_cb_size = cb_size; + job->hw_queue_id = GOYA_QUEUE_ID_DMA_0; + job->patched_cb = job->user_cb; + job->job_cb_size = job->user_cb_size; + + hl_debugfs_add_job(hdev, job); + + rc = goya_send_job_on_qman0(hdev, job); + + hl_debugfs_remove_job(hdev, job); + kfree(job); + cb->cs_cnt--; + +release_cb: + hl_cb_put(cb); + hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT); + + return rc; +} + +int goya_context_switch(struct hl_device *hdev, u32 asid) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + u64 addr = prop->sram_base_address, sob_addr; + u32 size = hdev->pldm ? 0x10000 : prop->sram_size; + u64 val = 0x7777777777777777ull; + int rc, dma_id; + u32 channel_off = mmDMA_CH_1_WR_COMP_ADDR_LO - + mmDMA_CH_0_WR_COMP_ADDR_LO; + + rc = goya_memset_device_memory(hdev, addr, size, val, false); + if (rc) { + dev_err(hdev->dev, "Failed to clear SRAM in context switch\n"); + return rc; + } + + /* we need to reset registers that the user is allowed to change */ + sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1007; + WREG32(mmDMA_CH_0_WR_COMP_ADDR_LO, lower_32_bits(sob_addr)); + + for (dma_id = 1 ; dma_id < NUMBER_OF_EXT_HW_QUEUES ; dma_id++) { + sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1000 + + (dma_id - 1) * 4; + WREG32(mmDMA_CH_0_WR_COMP_ADDR_LO + channel_off * dma_id, + lower_32_bits(sob_addr)); + } + + WREG32(mmTPC_PLL_CLK_RLX_0, 0x200020); + + goya_mmu_prepare(hdev, asid); + + goya_clear_sm_regs(hdev); + + return 0; +} + +static int goya_mmu_clear_pgt_range(struct hl_device *hdev) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct goya_device *goya = hdev->asic_specific; + u64 addr = prop->mmu_pgt_addr; + u32 size = prop->mmu_pgt_size + MMU_DRAM_DEFAULT_PAGE_SIZE + + MMU_CACHE_MNG_SIZE; + + if (!(goya->hw_cap_initialized & HW_CAP_MMU)) + return 0; + + return goya_memset_device_memory(hdev, addr, size, 0, true); +} + +static int goya_mmu_set_dram_default_page(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + u64 addr = hdev->asic_prop.mmu_dram_default_page_addr; + u32 size = MMU_DRAM_DEFAULT_PAGE_SIZE; + u64 val = 0x9999999999999999ull; + + if (!(goya->hw_cap_initialized & HW_CAP_MMU)) + return 0; + + return goya_memset_device_memory(hdev, addr, size, val, true); +} + +static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct goya_device *goya = hdev->asic_specific; + s64 off, cpu_off; + int rc; + + if (!(goya->hw_cap_initialized & HW_CAP_MMU)) + return 0; + + for (off = 0 ; off < CPU_FW_IMAGE_SIZE ; off += PAGE_SIZE_2MB) { + rc = hl_mmu_map(hdev->kernel_ctx, prop->dram_base_address + off, + prop->dram_base_address + off, PAGE_SIZE_2MB, + (off + PAGE_SIZE_2MB) == CPU_FW_IMAGE_SIZE); + if (rc) { + dev_err(hdev->dev, "Map failed for address 0x%llx\n", + prop->dram_base_address + off); + goto unmap; + } + } + + if (!(hdev->cpu_accessible_dma_address & (PAGE_SIZE_2MB - 1))) { + rc = hl_mmu_map(hdev->kernel_ctx, VA_CPU_ACCESSIBLE_MEM_ADDR, + hdev->cpu_accessible_dma_address, PAGE_SIZE_2MB, true); + + if (rc) { + dev_err(hdev->dev, + "Map failed for CPU accessible memory\n"); + off -= PAGE_SIZE_2MB; + goto unmap; + } + } else { + for (cpu_off = 0 ; cpu_off < SZ_2M ; cpu_off += PAGE_SIZE_4KB) { + rc = hl_mmu_map(hdev->kernel_ctx, + VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off, + hdev->cpu_accessible_dma_address + cpu_off, + PAGE_SIZE_4KB, true); + if (rc) { + dev_err(hdev->dev, + "Map failed for CPU accessible memory\n"); + cpu_off -= PAGE_SIZE_4KB; + goto unmap_cpu; + } + } + } + + goya_mmu_prepare_reg(hdev, mmCPU_IF_ARUSER_OVR, HL_KERNEL_ASID_ID); + goya_mmu_prepare_reg(hdev, mmCPU_IF_AWUSER_OVR, HL_KERNEL_ASID_ID); + WREG32(mmCPU_IF_ARUSER_OVR_EN, 0x7FF); + WREG32(mmCPU_IF_AWUSER_OVR_EN, 0x7FF); + + /* Make sure configuration is flushed to device */ + RREG32(mmCPU_IF_AWUSER_OVR_EN); + + goya->device_cpu_mmu_mappings_done = true; + + return 0; + +unmap_cpu: + for (; cpu_off >= 0 ; cpu_off -= PAGE_SIZE_4KB) + if (hl_mmu_unmap(hdev->kernel_ctx, + VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off, + PAGE_SIZE_4KB, true)) + dev_warn_ratelimited(hdev->dev, + "failed to unmap address 0x%llx\n", + VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off); +unmap: + for (; off >= 0 ; off -= PAGE_SIZE_2MB) + if (hl_mmu_unmap(hdev->kernel_ctx, + prop->dram_base_address + off, PAGE_SIZE_2MB, + true)) + dev_warn_ratelimited(hdev->dev, + "failed to unmap address 0x%llx\n", + prop->dram_base_address + off); + + return rc; +} + +void goya_mmu_remove_device_cpu_mappings(struct hl_device *hdev) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct goya_device *goya = hdev->asic_specific; + u32 off, cpu_off; + + if (!(goya->hw_cap_initialized & HW_CAP_MMU)) + return; + + if (!goya->device_cpu_mmu_mappings_done) + return; + + WREG32(mmCPU_IF_ARUSER_OVR_EN, 0); + WREG32(mmCPU_IF_AWUSER_OVR_EN, 0); + + if (!(hdev->cpu_accessible_dma_address & (PAGE_SIZE_2MB - 1))) { + if (hl_mmu_unmap(hdev->kernel_ctx, VA_CPU_ACCESSIBLE_MEM_ADDR, + PAGE_SIZE_2MB, true)) + dev_warn(hdev->dev, + "Failed to unmap CPU accessible memory\n"); + } else { + for (cpu_off = 0 ; cpu_off < SZ_2M ; cpu_off += PAGE_SIZE_4KB) + if (hl_mmu_unmap(hdev->kernel_ctx, + VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off, + PAGE_SIZE_4KB, + (cpu_off + PAGE_SIZE_4KB) >= SZ_2M)) + dev_warn_ratelimited(hdev->dev, + "failed to unmap address 0x%llx\n", + VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off); + } + + for (off = 0 ; off < CPU_FW_IMAGE_SIZE ; off += PAGE_SIZE_2MB) + if (hl_mmu_unmap(hdev->kernel_ctx, + prop->dram_base_address + off, PAGE_SIZE_2MB, + (off + PAGE_SIZE_2MB) >= CPU_FW_IMAGE_SIZE)) + dev_warn_ratelimited(hdev->dev, + "Failed to unmap address 0x%llx\n", + prop->dram_base_address + off); + + goya->device_cpu_mmu_mappings_done = false; +} + +static void goya_mmu_prepare(struct hl_device *hdev, u32 asid) +{ + struct goya_device *goya = hdev->asic_specific; + int i; + + if (!(goya->hw_cap_initialized & HW_CAP_MMU)) + return; + + if (asid & ~MME_QM_GLBL_SECURE_PROPS_ASID_MASK) { + WARN(1, "asid %u is too big\n", asid); + return; + } + + /* zero the MMBP and ASID bits and then set the ASID */ + for (i = 0 ; i < GOYA_MMU_REGS_NUM ; i++) + goya_mmu_prepare_reg(hdev, goya_mmu_regs[i], asid); +} + +static int goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard, + u32 flags) +{ + struct goya_device *goya = hdev->asic_specific; + u32 status, timeout_usec; + int rc; + + if (!(goya->hw_cap_initialized & HW_CAP_MMU) || + hdev->hard_reset_pending) + return 0; + + /* no need in L1 only invalidation in Goya */ + if (!is_hard) + return 0; + + if (hdev->pldm) + timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC; + else + timeout_usec = MMU_CONFIG_TIMEOUT_USEC; + + mutex_lock(&hdev->mmu_cache_lock); + + /* L0 & L1 invalidation */ + WREG32(mmSTLB_INV_ALL_START, 1); + + rc = hl_poll_timeout( + hdev, + mmSTLB_INV_ALL_START, + status, + !status, + 1000, + timeout_usec); + + mutex_unlock(&hdev->mmu_cache_lock); + + if (rc) { + dev_err_ratelimited(hdev->dev, + "MMU cache invalidation timeout\n"); + hl_device_reset(hdev, true, false); + } + + return rc; +} + +static int goya_mmu_invalidate_cache_range(struct hl_device *hdev, + bool is_hard, u32 asid, u64 va, u64 size) +{ + struct goya_device *goya = hdev->asic_specific; + u32 status, timeout_usec, inv_data, pi; + int rc; + + if (!(goya->hw_cap_initialized & HW_CAP_MMU) || + hdev->hard_reset_pending) + return 0; + + /* no need in L1 only invalidation in Goya */ + if (!is_hard) + return 0; + + if (hdev->pldm) + timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC; + else + timeout_usec = MMU_CONFIG_TIMEOUT_USEC; + + mutex_lock(&hdev->mmu_cache_lock); + + /* + * TODO: currently invalidate entire L0 & L1 as in regular hard + * invalidation. Need to apply invalidation of specific cache lines with + * mask of ASID & VA & size. + * Note that L1 with be flushed entirely in any case. + */ + + /* L0 & L1 invalidation */ + inv_data = RREG32(mmSTLB_CACHE_INV); + /* PI is 8 bit */ + pi = ((inv_data & STLB_CACHE_INV_PRODUCER_INDEX_MASK) + 1) & 0xFF; + WREG32(mmSTLB_CACHE_INV, + (inv_data & STLB_CACHE_INV_INDEX_MASK_MASK) | pi); + + rc = hl_poll_timeout( + hdev, + mmSTLB_INV_CONSUMER_INDEX, + status, + status == pi, + 1000, + timeout_usec); + + mutex_unlock(&hdev->mmu_cache_lock); + + if (rc) { + dev_err_ratelimited(hdev->dev, + "MMU cache invalidation timeout\n"); + hl_device_reset(hdev, true, false); + } + + return rc; +} + +int goya_send_heartbeat(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + + if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q)) + return 0; + + return hl_fw_send_heartbeat(hdev); +} + +int goya_cpucp_info_get(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + struct asic_fixed_properties *prop = &hdev->asic_prop; + u64 dram_size; + int rc; + + if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q)) + return 0; + + rc = hl_fw_cpucp_info_get(hdev); + if (rc) + return rc; + + dram_size = le64_to_cpu(prop->cpucp_info.dram_size); + if (dram_size) { + if ((!is_power_of_2(dram_size)) || + (dram_size < DRAM_PHYS_DEFAULT_SIZE)) { + dev_err(hdev->dev, + "F/W reported invalid DRAM size %llu. Trying to use default size\n", + dram_size); + dram_size = DRAM_PHYS_DEFAULT_SIZE; + } + + prop->dram_size = dram_size; + prop->dram_end_address = prop->dram_base_address + dram_size; + } + + if (!strlen(prop->cpucp_info.card_name)) + strncpy(prop->cpucp_info.card_name, GOYA_DEFAULT_CARD_NAME, + CARD_NAME_MAX_LEN); + + return 0; +} + +static void goya_set_clock_gating(struct hl_device *hdev) +{ + /* clock gating not supported in Goya */ +} + +static void goya_disable_clock_gating(struct hl_device *hdev) +{ + /* clock gating not supported in Goya */ +} + +static bool goya_is_device_idle(struct hl_device *hdev, u64 *mask, + struct seq_file *s) +{ + const char *fmt = "%-5d%-9s%#-14x%#-16x%#x\n"; + const char *dma_fmt = "%-5d%-9s%#-14x%#x\n"; + u32 qm_glbl_sts0, cmdq_glbl_sts0, dma_core_sts0, tpc_cfg_sts, + mme_arch_sts; + bool is_idle = true, is_eng_idle; + u64 offset; + int i; + + if (s) + seq_puts(s, "\nDMA is_idle QM_GLBL_STS0 DMA_CORE_STS0\n" + "--- ------- ------------ -------------\n"); + + offset = mmDMA_QM_1_GLBL_STS0 - mmDMA_QM_0_GLBL_STS0; + + for (i = 0 ; i < DMA_MAX_NUM ; i++) { + qm_glbl_sts0 = RREG32(mmDMA_QM_0_GLBL_STS0 + i * offset); + dma_core_sts0 = RREG32(mmDMA_CH_0_STS0 + i * offset); + is_eng_idle = IS_DMA_QM_IDLE(qm_glbl_sts0) && + IS_DMA_IDLE(dma_core_sts0); + is_idle &= is_eng_idle; + + if (mask) + *mask |= ((u64) !is_eng_idle) << + (GOYA_ENGINE_ID_DMA_0 + i); + if (s) + seq_printf(s, dma_fmt, i, is_eng_idle ? "Y" : "N", + qm_glbl_sts0, dma_core_sts0); + } + + if (s) + seq_puts(s, + "\nTPC is_idle QM_GLBL_STS0 CMDQ_GLBL_STS0 CFG_STATUS\n" + "--- ------- ------------ -------------- ----------\n"); + + offset = mmTPC1_QM_GLBL_STS0 - mmTPC0_QM_GLBL_STS0; + + for (i = 0 ; i < TPC_MAX_NUM ; i++) { + qm_glbl_sts0 = RREG32(mmTPC0_QM_GLBL_STS0 + i * offset); + cmdq_glbl_sts0 = RREG32(mmTPC0_CMDQ_GLBL_STS0 + i * offset); + tpc_cfg_sts = RREG32(mmTPC0_CFG_STATUS + i * offset); + is_eng_idle = IS_TPC_QM_IDLE(qm_glbl_sts0) && + IS_TPC_CMDQ_IDLE(cmdq_glbl_sts0) && + IS_TPC_IDLE(tpc_cfg_sts); + is_idle &= is_eng_idle; + + if (mask) + *mask |= ((u64) !is_eng_idle) << + (GOYA_ENGINE_ID_TPC_0 + i); + if (s) + seq_printf(s, fmt, i, is_eng_idle ? "Y" : "N", + qm_glbl_sts0, cmdq_glbl_sts0, tpc_cfg_sts); + } + + if (s) + seq_puts(s, + "\nMME is_idle QM_GLBL_STS0 CMDQ_GLBL_STS0 ARCH_STATUS\n" + "--- ------- ------------ -------------- -----------\n"); + + qm_glbl_sts0 = RREG32(mmMME_QM_GLBL_STS0); + cmdq_glbl_sts0 = RREG32(mmMME_CMDQ_GLBL_STS0); + mme_arch_sts = RREG32(mmMME_ARCH_STATUS); + is_eng_idle = IS_MME_QM_IDLE(qm_glbl_sts0) && + IS_MME_CMDQ_IDLE(cmdq_glbl_sts0) && + IS_MME_IDLE(mme_arch_sts); + is_idle &= is_eng_idle; + + if (mask) + *mask |= ((u64) !is_eng_idle) << GOYA_ENGINE_ID_MME_0; + if (s) { + seq_printf(s, fmt, 0, is_eng_idle ? "Y" : "N", qm_glbl_sts0, + cmdq_glbl_sts0, mme_arch_sts); + seq_puts(s, "\n"); + } + + return is_idle; +} + +static void goya_hw_queues_lock(struct hl_device *hdev) + __acquires(&goya->hw_queues_lock) +{ + struct goya_device *goya = hdev->asic_specific; + + spin_lock(&goya->hw_queues_lock); +} + +static void goya_hw_queues_unlock(struct hl_device *hdev) + __releases(&goya->hw_queues_lock) +{ + struct goya_device *goya = hdev->asic_specific; + + spin_unlock(&goya->hw_queues_lock); +} + +static u32 goya_get_pci_id(struct hl_device *hdev) +{ + return hdev->pdev->device; +} + +static int goya_get_eeprom_data(struct hl_device *hdev, void *data, + size_t max_size) +{ + struct goya_device *goya = hdev->asic_specific; + + if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q)) + return 0; + + return hl_fw_get_eeprom_data(hdev, data, max_size); +} + +static enum hl_device_hw_state goya_get_hw_state(struct hl_device *hdev) +{ + return RREG32(mmHW_STATE); +} + +static int goya_ctx_init(struct hl_ctx *ctx) +{ + return 0; +} + +u32 goya_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx) +{ + return cq_idx; +} + +static u32 goya_get_signal_cb_size(struct hl_device *hdev) +{ + return 0; +} + +static u32 goya_get_wait_cb_size(struct hl_device *hdev) +{ + return 0; +} + +static void goya_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id) +{ + +} + +static void goya_gen_wait_cb(struct hl_device *hdev, void *data, u16 sob_id, + u16 sob_val, u16 mon_id, u32 q_idx) +{ + +} + +static void goya_reset_sob(struct hl_device *hdev, void *data) +{ + +} + +static void goya_set_dma_mask_from_fw(struct hl_device *hdev) +{ + if (RREG32(mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_0) == + HL_POWER9_HOST_MAGIC) { + dev_dbg(hdev->dev, "Working in 64-bit DMA mode\n"); + hdev->power9_64bit_dma_enable = 1; + hdev->dma_mask = 64; + } else { + dev_dbg(hdev->dev, "Working in 48-bit DMA mode\n"); + hdev->power9_64bit_dma_enable = 0; + hdev->dma_mask = 48; + } +} + +u64 goya_get_device_time(struct hl_device *hdev) +{ + u64 device_time = ((u64) RREG32(mmPSOC_TIMESTAMP_CNTCVU)) << 32; + + return device_time | RREG32(mmPSOC_TIMESTAMP_CNTCVL); +} + +static const struct hl_asic_funcs goya_funcs = { + .early_init = goya_early_init, + .early_fini = goya_early_fini, + .late_init = goya_late_init, + .late_fini = goya_late_fini, + .sw_init = goya_sw_init, + .sw_fini = goya_sw_fini, + .hw_init = goya_hw_init, + .hw_fini = goya_hw_fini, + .halt_engines = goya_halt_engines, + .suspend = goya_suspend, + .resume = goya_resume, + .cb_mmap = goya_cb_mmap, + .ring_doorbell = goya_ring_doorbell, + .pqe_write = goya_pqe_write, + .asic_dma_alloc_coherent = goya_dma_alloc_coherent, + .asic_dma_free_coherent = goya_dma_free_coherent, + .get_int_queue_base = goya_get_int_queue_base, + .test_queues = goya_test_queues, + .asic_dma_pool_zalloc = goya_dma_pool_zalloc, + .asic_dma_pool_free = goya_dma_pool_free, + .cpu_accessible_dma_pool_alloc = goya_cpu_accessible_dma_pool_alloc, + .cpu_accessible_dma_pool_free = goya_cpu_accessible_dma_pool_free, + .hl_dma_unmap_sg = goya_dma_unmap_sg, + .cs_parser = goya_cs_parser, + .asic_dma_map_sg = goya_dma_map_sg, + .get_dma_desc_list_size = goya_get_dma_desc_list_size, + .add_end_of_cb_packets = goya_add_end_of_cb_packets, + .update_eq_ci = goya_update_eq_ci, + .context_switch = goya_context_switch, + .restore_phase_topology = goya_restore_phase_topology, + .debugfs_read32 = goya_debugfs_read32, + .debugfs_write32 = goya_debugfs_write32, + .debugfs_read64 = goya_debugfs_read64, + .debugfs_write64 = goya_debugfs_write64, + .add_device_attr = goya_add_device_attr, + .handle_eqe = goya_handle_eqe, + .set_pll_profile = goya_set_pll_profile, + .get_events_stat = goya_get_events_stat, + .read_pte = goya_read_pte, + .write_pte = goya_write_pte, + .mmu_invalidate_cache = goya_mmu_invalidate_cache, + .mmu_invalidate_cache_range = goya_mmu_invalidate_cache_range, + .send_heartbeat = goya_send_heartbeat, + .set_clock_gating = goya_set_clock_gating, + .disable_clock_gating = goya_disable_clock_gating, + .debug_coresight = goya_debug_coresight, + .is_device_idle = goya_is_device_idle, + .soft_reset_late_init = goya_soft_reset_late_init, + .hw_queues_lock = goya_hw_queues_lock, + .hw_queues_unlock = goya_hw_queues_unlock, + .get_pci_id = goya_get_pci_id, + .get_eeprom_data = goya_get_eeprom_data, + .send_cpu_message = goya_send_cpu_message, + .get_hw_state = goya_get_hw_state, + .pci_bars_map = goya_pci_bars_map, + .init_iatu = goya_init_iatu, + .rreg = hl_rreg, + .wreg = hl_wreg, + .halt_coresight = goya_halt_coresight, + .ctx_init = goya_ctx_init, + .get_clk_rate = goya_get_clk_rate, + .get_queue_id_for_cq = goya_get_queue_id_for_cq, + .read_device_fw_version = goya_read_device_fw_version, + .load_firmware_to_device = goya_load_firmware_to_device, + .load_boot_fit_to_device = goya_load_boot_fit_to_device, + .get_signal_cb_size = goya_get_signal_cb_size, + .get_wait_cb_size = goya_get_wait_cb_size, + .gen_signal_cb = goya_gen_signal_cb, + .gen_wait_cb = goya_gen_wait_cb, + .reset_sob = goya_reset_sob, + .set_dma_mask_from_fw = goya_set_dma_mask_from_fw, + .get_device_time = goya_get_device_time +}; + +/* + * goya_set_asic_funcs - set Goya function pointers + * + * @*hdev: pointer to hl_device structure + * + */ +void goya_set_asic_funcs(struct hl_device *hdev) +{ + hdev->asic_funcs = &goya_funcs; +} diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h new file mode 100644 index 000000000..def86c75e --- /dev/null +++ b/drivers/misc/habanalabs/goya/goyaP.h @@ -0,0 +1,238 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef GOYAP_H_ +#define GOYAP_H_ + +#include <uapi/misc/habanalabs.h> +#include "../common/habanalabs.h" +#include "../include/common/hl_boot_if.h" +#include "../include/goya/goya_packets.h" +#include "../include/goya/goya.h" +#include "../include/goya/goya_async_events.h" +#include "../include/goya/goya_fw_if.h" + +#define NUMBER_OF_CMPLT_QUEUES 5 +#define NUMBER_OF_EXT_HW_QUEUES 5 +#define NUMBER_OF_CPU_HW_QUEUES 1 +#define NUMBER_OF_INT_HW_QUEUES 9 +#define NUMBER_OF_HW_QUEUES (NUMBER_OF_EXT_HW_QUEUES + \ + NUMBER_OF_CPU_HW_QUEUES + \ + NUMBER_OF_INT_HW_QUEUES) + +/* + * Number of MSIX interrupts IDS: + * Each completion queue has 1 ID + * The event queue has 1 ID + */ +#define NUMBER_OF_INTERRUPTS (NUMBER_OF_CMPLT_QUEUES + 1) + +#if (NUMBER_OF_INTERRUPTS > GOYA_MSIX_ENTRIES) +#error "Number of MSIX interrupts must be smaller or equal to GOYA_MSIX_ENTRIES" +#endif + +#define QMAN_FENCE_TIMEOUT_USEC 10000 /* 10 ms */ + +#define QMAN_STOP_TIMEOUT_USEC 100000 /* 100 ms */ + +#define CORESIGHT_TIMEOUT_USEC 100000 /* 100 ms */ + +#define GOYA_CPU_TIMEOUT_USEC 15000000 /* 15s */ + +#define TPC_ENABLED_MASK 0xFF + +#define PLL_HIGH_DEFAULT 1575000000 /* 1.575 GHz */ + +#define MAX_POWER_DEFAULT 200000 /* 200W */ + +#define DRAM_PHYS_DEFAULT_SIZE 0x100000000ull /* 4GB */ + +#define GOYA_DEFAULT_CARD_NAME "HL1000" + +#define GOYA_MAX_PENDING_CS 64 + +#if !IS_MAX_PENDING_CS_VALID(GOYA_MAX_PENDING_CS) +#error "GOYA_MAX_PENDING_CS must be power of 2 and greater than 1" +#endif + +/* DRAM Memory Map */ + +#define CPU_FW_IMAGE_SIZE 0x10000000 /* 256MB */ +#define MMU_PAGE_TABLES_SIZE 0x0FC00000 /* 252MB */ +#define MMU_DRAM_DEFAULT_PAGE_SIZE 0x00200000 /* 2MB */ +#define MMU_CACHE_MNG_SIZE 0x00001000 /* 4KB */ + +#define CPU_FW_IMAGE_ADDR DRAM_PHYS_BASE +#define MMU_PAGE_TABLES_ADDR (CPU_FW_IMAGE_ADDR + CPU_FW_IMAGE_SIZE) +#define MMU_DRAM_DEFAULT_PAGE_ADDR (MMU_PAGE_TABLES_ADDR + \ + MMU_PAGE_TABLES_SIZE) +#define MMU_CACHE_MNG_ADDR (MMU_DRAM_DEFAULT_PAGE_ADDR + \ + MMU_DRAM_DEFAULT_PAGE_SIZE) +#define DRAM_DRIVER_END_ADDR (MMU_CACHE_MNG_ADDR + \ + MMU_CACHE_MNG_SIZE) + +#define DRAM_BASE_ADDR_USER 0x20000000 + +#if (DRAM_DRIVER_END_ADDR > DRAM_BASE_ADDR_USER) +#error "Driver must reserve no more than 512MB" +#endif + +/* + * SRAM Memory Map for Driver + * + * Driver occupies DRIVER_SRAM_SIZE bytes from the start of SRAM. It is used for + * MME/TPC QMANs + * + */ + +#define MME_QMAN_BASE_OFFSET 0x000000 /* Must be 0 */ +#define MME_QMAN_LENGTH 64 +#define TPC_QMAN_LENGTH 64 + +#define TPC0_QMAN_BASE_OFFSET (MME_QMAN_BASE_OFFSET + \ + (MME_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)) +#define TPC1_QMAN_BASE_OFFSET (TPC0_QMAN_BASE_OFFSET + \ + (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)) +#define TPC2_QMAN_BASE_OFFSET (TPC1_QMAN_BASE_OFFSET + \ + (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)) +#define TPC3_QMAN_BASE_OFFSET (TPC2_QMAN_BASE_OFFSET + \ + (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)) +#define TPC4_QMAN_BASE_OFFSET (TPC3_QMAN_BASE_OFFSET + \ + (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)) +#define TPC5_QMAN_BASE_OFFSET (TPC4_QMAN_BASE_OFFSET + \ + (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)) +#define TPC6_QMAN_BASE_OFFSET (TPC5_QMAN_BASE_OFFSET + \ + (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)) +#define TPC7_QMAN_BASE_OFFSET (TPC6_QMAN_BASE_OFFSET + \ + (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)) + +#define SRAM_DRIVER_RES_OFFSET (TPC7_QMAN_BASE_OFFSET + \ + (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)) + +#if (SRAM_DRIVER_RES_OFFSET >= GOYA_KMD_SRAM_RESERVED_SIZE_FROM_START) +#error "MME/TPC QMANs SRAM space exceeds limit" +#endif + +#define SRAM_USER_BASE_OFFSET GOYA_KMD_SRAM_RESERVED_SIZE_FROM_START + +/* Virtual address space */ +#define VA_HOST_SPACE_START 0x1000000000000ull /* 256TB */ +#define VA_HOST_SPACE_END 0x3FF8000000000ull /* 1PB - 1TB */ +#define VA_HOST_SPACE_SIZE (VA_HOST_SPACE_END - \ + VA_HOST_SPACE_START) /* 767TB */ + +#define VA_DDR_SPACE_START 0x800000000ull /* 32GB */ +#define VA_DDR_SPACE_END 0x2000000000ull /* 128GB */ +#define VA_DDR_SPACE_SIZE (VA_DDR_SPACE_END - \ + VA_DDR_SPACE_START) /* 128GB */ + +#if (HL_CPU_ACCESSIBLE_MEM_SIZE != SZ_2M) +#error "HL_CPU_ACCESSIBLE_MEM_SIZE must be exactly 2MB to enable MMU mapping" +#endif + +#define VA_CPU_ACCESSIBLE_MEM_ADDR 0x8000000000ull + +#define DMA_MAX_TRANSFER_SIZE U32_MAX + +#define HW_CAP_PLL 0x00000001 +#define HW_CAP_DDR_0 0x00000002 +#define HW_CAP_DDR_1 0x00000004 +#define HW_CAP_MME 0x00000008 +#define HW_CAP_CPU 0x00000010 +#define HW_CAP_DMA 0x00000020 +#define HW_CAP_MSIX 0x00000040 +#define HW_CAP_CPU_Q 0x00000080 +#define HW_CAP_MMU 0x00000100 +#define HW_CAP_TPC_MBIST 0x00000200 +#define HW_CAP_GOLDEN 0x00000400 +#define HW_CAP_TPC 0x00000800 + +struct goya_device { + /* TODO: remove hw_queues_lock after moving to scheduler code */ + spinlock_t hw_queues_lock; + + u64 mme_clk; + u64 tpc_clk; + u64 ic_clk; + + u64 ddr_bar_cur_addr; + u32 events_stat[GOYA_ASYNC_EVENT_ID_SIZE]; + u32 events_stat_aggregate[GOYA_ASYNC_EVENT_ID_SIZE]; + u32 hw_cap_initialized; + u8 device_cpu_mmu_mappings_done; +}; + +int goya_get_fixed_properties(struct hl_device *hdev); +int goya_mmu_init(struct hl_device *hdev); +void goya_init_dma_qmans(struct hl_device *hdev); +void goya_init_mme_qmans(struct hl_device *hdev); +void goya_init_tpc_qmans(struct hl_device *hdev); +int goya_init_cpu_queues(struct hl_device *hdev); +void goya_init_security(struct hl_device *hdev); +int goya_late_init(struct hl_device *hdev); +void goya_late_fini(struct hl_device *hdev); + +void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi); +void goya_pqe_write(struct hl_device *hdev, __le64 *pqe, struct hl_bd *bd); +void goya_update_eq_ci(struct hl_device *hdev, u32 val); +void goya_restore_phase_topology(struct hl_device *hdev); +int goya_context_switch(struct hl_device *hdev, u32 asid); + +int goya_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, + u8 i2c_addr, u8 i2c_reg, u32 *val); +int goya_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, + u8 i2c_addr, u8 i2c_reg, u32 val); +void goya_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state); + +int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id); +int goya_test_queues(struct hl_device *hdev); +int goya_test_cpu_queue(struct hl_device *hdev); +int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len, + u32 timeout, long *result); + +long goya_get_temperature(struct hl_device *hdev, int sensor_index, u32 attr); +long goya_get_voltage(struct hl_device *hdev, int sensor_index, u32 attr); +long goya_get_current(struct hl_device *hdev, int sensor_index, u32 attr); +long goya_get_fan_speed(struct hl_device *hdev, int sensor_index, u32 attr); +long goya_get_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr); +void goya_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr, + long value); +u64 goya_get_max_power(struct hl_device *hdev); +void goya_set_max_power(struct hl_device *hdev, u64 value); + +void goya_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq); +void goya_add_device_attr(struct hl_device *hdev, + struct attribute_group *dev_attr_grp); +int goya_cpucp_info_get(struct hl_device *hdev); +int goya_debug_coresight(struct hl_device *hdev, void *data); +void goya_halt_coresight(struct hl_device *hdev); + +int goya_suspend(struct hl_device *hdev); +int goya_resume(struct hl_device *hdev); + +void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry); +void *goya_get_events_stat(struct hl_device *hdev, bool aggregate, u32 *size); + +void goya_add_end_of_cb_packets(struct hl_device *hdev, void *kernel_address, + u32 len, u64 cq_addr, u32 cq_val, u32 msix_vec, + bool eb); +int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser); +void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id, + dma_addr_t *dma_handle, u16 *queue_len); +u32 goya_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt); +int goya_send_heartbeat(struct hl_device *hdev); +void *goya_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size, + dma_addr_t *dma_handle); +void goya_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size, + void *vaddr); +void goya_mmu_remove_device_cpu_mappings(struct hl_device *hdev); + +int goya_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk); +u32 goya_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx); +u64 goya_get_device_time(struct hl_device *hdev); + +#endif /* GOYAP_H_ */ diff --git a/drivers/misc/habanalabs/goya/goya_coresight.c b/drivers/misc/habanalabs/goya/goya_coresight.c new file mode 100644 index 000000000..4027a6a33 --- /dev/null +++ b/drivers/misc/habanalabs/goya/goya_coresight.c @@ -0,0 +1,704 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include "goyaP.h" +#include "../include/goya/goya_coresight.h" +#include "../include/goya/asic_reg/goya_regs.h" +#include "../include/goya/asic_reg/goya_masks.h" + +#include <uapi/misc/habanalabs.h> + +#include <linux/coresight.h> + +#define GOYA_PLDM_CORESIGHT_TIMEOUT_USEC (CORESIGHT_TIMEOUT_USEC * 100) + +#define SPMU_SECTION_SIZE DMA_CH_0_CS_SPMU_MAX_OFFSET +#define SPMU_EVENT_TYPES_OFFSET 0x400 +#define SPMU_MAX_COUNTERS 6 + +static u64 debug_stm_regs[GOYA_STM_LAST + 1] = { + [GOYA_STM_CPU] = mmCPU_STM_BASE, + [GOYA_STM_DMA_CH_0_CS] = mmDMA_CH_0_CS_STM_BASE, + [GOYA_STM_DMA_CH_1_CS] = mmDMA_CH_1_CS_STM_BASE, + [GOYA_STM_DMA_CH_2_CS] = mmDMA_CH_2_CS_STM_BASE, + [GOYA_STM_DMA_CH_3_CS] = mmDMA_CH_3_CS_STM_BASE, + [GOYA_STM_DMA_CH_4_CS] = mmDMA_CH_4_CS_STM_BASE, + [GOYA_STM_DMA_MACRO_CS] = mmDMA_MACRO_CS_STM_BASE, + [GOYA_STM_MME1_SBA] = mmMME1_SBA_STM_BASE, + [GOYA_STM_MME3_SBB] = mmMME3_SBB_STM_BASE, + [GOYA_STM_MME4_WACS2] = mmMME4_WACS2_STM_BASE, + [GOYA_STM_MME4_WACS] = mmMME4_WACS_STM_BASE, + [GOYA_STM_MMU_CS] = mmMMU_CS_STM_BASE, + [GOYA_STM_PCIE] = mmPCIE_STM_BASE, + [GOYA_STM_PSOC] = mmPSOC_STM_BASE, + [GOYA_STM_TPC0_EML] = mmTPC0_EML_STM_BASE, + [GOYA_STM_TPC1_EML] = mmTPC1_EML_STM_BASE, + [GOYA_STM_TPC2_EML] = mmTPC2_EML_STM_BASE, + [GOYA_STM_TPC3_EML] = mmTPC3_EML_STM_BASE, + [GOYA_STM_TPC4_EML] = mmTPC4_EML_STM_BASE, + [GOYA_STM_TPC5_EML] = mmTPC5_EML_STM_BASE, + [GOYA_STM_TPC6_EML] = mmTPC6_EML_STM_BASE, + [GOYA_STM_TPC7_EML] = mmTPC7_EML_STM_BASE +}; + +static u64 debug_etf_regs[GOYA_ETF_LAST + 1] = { + [GOYA_ETF_CPU_0] = mmCPU_ETF_0_BASE, + [GOYA_ETF_CPU_1] = mmCPU_ETF_1_BASE, + [GOYA_ETF_CPU_TRACE] = mmCPU_ETF_TRACE_BASE, + [GOYA_ETF_DMA_CH_0_CS] = mmDMA_CH_0_CS_ETF_BASE, + [GOYA_ETF_DMA_CH_1_CS] = mmDMA_CH_1_CS_ETF_BASE, + [GOYA_ETF_DMA_CH_2_CS] = mmDMA_CH_2_CS_ETF_BASE, + [GOYA_ETF_DMA_CH_3_CS] = mmDMA_CH_3_CS_ETF_BASE, + [GOYA_ETF_DMA_CH_4_CS] = mmDMA_CH_4_CS_ETF_BASE, + [GOYA_ETF_DMA_MACRO_CS] = mmDMA_MACRO_CS_ETF_BASE, + [GOYA_ETF_MME1_SBA] = mmMME1_SBA_ETF_BASE, + [GOYA_ETF_MME3_SBB] = mmMME3_SBB_ETF_BASE, + [GOYA_ETF_MME4_WACS2] = mmMME4_WACS2_ETF_BASE, + [GOYA_ETF_MME4_WACS] = mmMME4_WACS_ETF_BASE, + [GOYA_ETF_MMU_CS] = mmMMU_CS_ETF_BASE, + [GOYA_ETF_PCIE] = mmPCIE_ETF_BASE, + [GOYA_ETF_PSOC] = mmPSOC_ETF_BASE, + [GOYA_ETF_TPC0_EML] = mmTPC0_EML_ETF_BASE, + [GOYA_ETF_TPC1_EML] = mmTPC1_EML_ETF_BASE, + [GOYA_ETF_TPC2_EML] = mmTPC2_EML_ETF_BASE, + [GOYA_ETF_TPC3_EML] = mmTPC3_EML_ETF_BASE, + [GOYA_ETF_TPC4_EML] = mmTPC4_EML_ETF_BASE, + [GOYA_ETF_TPC5_EML] = mmTPC5_EML_ETF_BASE, + [GOYA_ETF_TPC6_EML] = mmTPC6_EML_ETF_BASE, + [GOYA_ETF_TPC7_EML] = mmTPC7_EML_ETF_BASE +}; + +static u64 debug_funnel_regs[GOYA_FUNNEL_LAST + 1] = { + [GOYA_FUNNEL_CPU] = mmCPU_FUNNEL_BASE, + [GOYA_FUNNEL_DMA_CH_6_1] = mmDMA_CH_FUNNEL_6_1_BASE, + [GOYA_FUNNEL_DMA_MACRO_3_1] = mmDMA_MACRO_FUNNEL_3_1_BASE, + [GOYA_FUNNEL_MME0_RTR] = mmMME0_RTR_FUNNEL_BASE, + [GOYA_FUNNEL_MME1_RTR] = mmMME1_RTR_FUNNEL_BASE, + [GOYA_FUNNEL_MME2_RTR] = mmMME2_RTR_FUNNEL_BASE, + [GOYA_FUNNEL_MME3_RTR] = mmMME3_RTR_FUNNEL_BASE, + [GOYA_FUNNEL_MME4_RTR] = mmMME4_RTR_FUNNEL_BASE, + [GOYA_FUNNEL_MME5_RTR] = mmMME5_RTR_FUNNEL_BASE, + [GOYA_FUNNEL_PCIE] = mmPCIE_FUNNEL_BASE, + [GOYA_FUNNEL_PSOC] = mmPSOC_FUNNEL_BASE, + [GOYA_FUNNEL_TPC0_EML] = mmTPC0_EML_FUNNEL_BASE, + [GOYA_FUNNEL_TPC1_EML] = mmTPC1_EML_FUNNEL_BASE, + [GOYA_FUNNEL_TPC1_RTR] = mmTPC1_RTR_FUNNEL_BASE, + [GOYA_FUNNEL_TPC2_EML] = mmTPC2_EML_FUNNEL_BASE, + [GOYA_FUNNEL_TPC2_RTR] = mmTPC2_RTR_FUNNEL_BASE, + [GOYA_FUNNEL_TPC3_EML] = mmTPC3_EML_FUNNEL_BASE, + [GOYA_FUNNEL_TPC3_RTR] = mmTPC3_RTR_FUNNEL_BASE, + [GOYA_FUNNEL_TPC4_EML] = mmTPC4_EML_FUNNEL_BASE, + [GOYA_FUNNEL_TPC4_RTR] = mmTPC4_RTR_FUNNEL_BASE, + [GOYA_FUNNEL_TPC5_EML] = mmTPC5_EML_FUNNEL_BASE, + [GOYA_FUNNEL_TPC5_RTR] = mmTPC5_RTR_FUNNEL_BASE, + [GOYA_FUNNEL_TPC6_EML] = mmTPC6_EML_FUNNEL_BASE, + [GOYA_FUNNEL_TPC6_RTR] = mmTPC6_RTR_FUNNEL_BASE, + [GOYA_FUNNEL_TPC7_EML] = mmTPC7_EML_FUNNEL_BASE +}; + +static u64 debug_bmon_regs[GOYA_BMON_LAST + 1] = { + [GOYA_BMON_CPU_RD] = mmCPU_RD_BMON_BASE, + [GOYA_BMON_CPU_WR] = mmCPU_WR_BMON_BASE, + [GOYA_BMON_DMA_CH_0_0] = mmDMA_CH_0_BMON_0_BASE, + [GOYA_BMON_DMA_CH_0_1] = mmDMA_CH_0_BMON_1_BASE, + [GOYA_BMON_DMA_CH_1_0] = mmDMA_CH_1_BMON_0_BASE, + [GOYA_BMON_DMA_CH_1_1] = mmDMA_CH_1_BMON_1_BASE, + [GOYA_BMON_DMA_CH_2_0] = mmDMA_CH_2_BMON_0_BASE, + [GOYA_BMON_DMA_CH_2_1] = mmDMA_CH_2_BMON_1_BASE, + [GOYA_BMON_DMA_CH_3_0] = mmDMA_CH_3_BMON_0_BASE, + [GOYA_BMON_DMA_CH_3_1] = mmDMA_CH_3_BMON_1_BASE, + [GOYA_BMON_DMA_CH_4_0] = mmDMA_CH_4_BMON_0_BASE, + [GOYA_BMON_DMA_CH_4_1] = mmDMA_CH_4_BMON_1_BASE, + [GOYA_BMON_DMA_MACRO_0] = mmDMA_MACRO_BMON_0_BASE, + [GOYA_BMON_DMA_MACRO_1] = mmDMA_MACRO_BMON_1_BASE, + [GOYA_BMON_DMA_MACRO_2] = mmDMA_MACRO_BMON_2_BASE, + [GOYA_BMON_DMA_MACRO_3] = mmDMA_MACRO_BMON_3_BASE, + [GOYA_BMON_DMA_MACRO_4] = mmDMA_MACRO_BMON_4_BASE, + [GOYA_BMON_DMA_MACRO_5] = mmDMA_MACRO_BMON_5_BASE, + [GOYA_BMON_DMA_MACRO_6] = mmDMA_MACRO_BMON_6_BASE, + [GOYA_BMON_DMA_MACRO_7] = mmDMA_MACRO_BMON_7_BASE, + [GOYA_BMON_MME1_SBA_0] = mmMME1_SBA_BMON0_BASE, + [GOYA_BMON_MME1_SBA_1] = mmMME1_SBA_BMON1_BASE, + [GOYA_BMON_MME3_SBB_0] = mmMME3_SBB_BMON0_BASE, + [GOYA_BMON_MME3_SBB_1] = mmMME3_SBB_BMON1_BASE, + [GOYA_BMON_MME4_WACS2_0] = mmMME4_WACS2_BMON0_BASE, + [GOYA_BMON_MME4_WACS2_1] = mmMME4_WACS2_BMON1_BASE, + [GOYA_BMON_MME4_WACS2_2] = mmMME4_WACS2_BMON2_BASE, + [GOYA_BMON_MME4_WACS_0] = mmMME4_WACS_BMON0_BASE, + [GOYA_BMON_MME4_WACS_1] = mmMME4_WACS_BMON1_BASE, + [GOYA_BMON_MME4_WACS_2] = mmMME4_WACS_BMON2_BASE, + [GOYA_BMON_MME4_WACS_3] = mmMME4_WACS_BMON3_BASE, + [GOYA_BMON_MME4_WACS_4] = mmMME4_WACS_BMON4_BASE, + [GOYA_BMON_MME4_WACS_5] = mmMME4_WACS_BMON5_BASE, + [GOYA_BMON_MME4_WACS_6] = mmMME4_WACS_BMON6_BASE, + [GOYA_BMON_MMU_0] = mmMMU_BMON_0_BASE, + [GOYA_BMON_MMU_1] = mmMMU_BMON_1_BASE, + [GOYA_BMON_PCIE_MSTR_RD] = mmPCIE_BMON_MSTR_RD_BASE, + [GOYA_BMON_PCIE_MSTR_WR] = mmPCIE_BMON_MSTR_WR_BASE, + [GOYA_BMON_PCIE_SLV_RD] = mmPCIE_BMON_SLV_RD_BASE, + [GOYA_BMON_PCIE_SLV_WR] = mmPCIE_BMON_SLV_WR_BASE, + [GOYA_BMON_TPC0_EML_0] = mmTPC0_EML_BUSMON_0_BASE, + [GOYA_BMON_TPC0_EML_1] = mmTPC0_EML_BUSMON_1_BASE, + [GOYA_BMON_TPC0_EML_2] = mmTPC0_EML_BUSMON_2_BASE, + [GOYA_BMON_TPC0_EML_3] = mmTPC0_EML_BUSMON_3_BASE, + [GOYA_BMON_TPC1_EML_0] = mmTPC1_EML_BUSMON_0_BASE, + [GOYA_BMON_TPC1_EML_1] = mmTPC1_EML_BUSMON_1_BASE, + [GOYA_BMON_TPC1_EML_2] = mmTPC1_EML_BUSMON_2_BASE, + [GOYA_BMON_TPC1_EML_3] = mmTPC1_EML_BUSMON_3_BASE, + [GOYA_BMON_TPC2_EML_0] = mmTPC2_EML_BUSMON_0_BASE, + [GOYA_BMON_TPC2_EML_1] = mmTPC2_EML_BUSMON_1_BASE, + [GOYA_BMON_TPC2_EML_2] = mmTPC2_EML_BUSMON_2_BASE, + [GOYA_BMON_TPC2_EML_3] = mmTPC2_EML_BUSMON_3_BASE, + [GOYA_BMON_TPC3_EML_0] = mmTPC3_EML_BUSMON_0_BASE, + [GOYA_BMON_TPC3_EML_1] = mmTPC3_EML_BUSMON_1_BASE, + [GOYA_BMON_TPC3_EML_2] = mmTPC3_EML_BUSMON_2_BASE, + [GOYA_BMON_TPC3_EML_3] = mmTPC3_EML_BUSMON_3_BASE, + [GOYA_BMON_TPC4_EML_0] = mmTPC4_EML_BUSMON_0_BASE, + [GOYA_BMON_TPC4_EML_1] = mmTPC4_EML_BUSMON_1_BASE, + [GOYA_BMON_TPC4_EML_2] = mmTPC4_EML_BUSMON_2_BASE, + [GOYA_BMON_TPC4_EML_3] = mmTPC4_EML_BUSMON_3_BASE, + [GOYA_BMON_TPC5_EML_0] = mmTPC5_EML_BUSMON_0_BASE, + [GOYA_BMON_TPC5_EML_1] = mmTPC5_EML_BUSMON_1_BASE, + [GOYA_BMON_TPC5_EML_2] = mmTPC5_EML_BUSMON_2_BASE, + [GOYA_BMON_TPC5_EML_3] = mmTPC5_EML_BUSMON_3_BASE, + [GOYA_BMON_TPC6_EML_0] = mmTPC6_EML_BUSMON_0_BASE, + [GOYA_BMON_TPC6_EML_1] = mmTPC6_EML_BUSMON_1_BASE, + [GOYA_BMON_TPC6_EML_2] = mmTPC6_EML_BUSMON_2_BASE, + [GOYA_BMON_TPC6_EML_3] = mmTPC6_EML_BUSMON_3_BASE, + [GOYA_BMON_TPC7_EML_0] = mmTPC7_EML_BUSMON_0_BASE, + [GOYA_BMON_TPC7_EML_1] = mmTPC7_EML_BUSMON_1_BASE, + [GOYA_BMON_TPC7_EML_2] = mmTPC7_EML_BUSMON_2_BASE, + [GOYA_BMON_TPC7_EML_3] = mmTPC7_EML_BUSMON_3_BASE +}; + +static u64 debug_spmu_regs[GOYA_SPMU_LAST + 1] = { + [GOYA_SPMU_DMA_CH_0_CS] = mmDMA_CH_0_CS_SPMU_BASE, + [GOYA_SPMU_DMA_CH_1_CS] = mmDMA_CH_1_CS_SPMU_BASE, + [GOYA_SPMU_DMA_CH_2_CS] = mmDMA_CH_2_CS_SPMU_BASE, + [GOYA_SPMU_DMA_CH_3_CS] = mmDMA_CH_3_CS_SPMU_BASE, + [GOYA_SPMU_DMA_CH_4_CS] = mmDMA_CH_4_CS_SPMU_BASE, + [GOYA_SPMU_DMA_MACRO_CS] = mmDMA_MACRO_CS_SPMU_BASE, + [GOYA_SPMU_MME1_SBA] = mmMME1_SBA_SPMU_BASE, + [GOYA_SPMU_MME3_SBB] = mmMME3_SBB_SPMU_BASE, + [GOYA_SPMU_MME4_WACS2] = mmMME4_WACS2_SPMU_BASE, + [GOYA_SPMU_MME4_WACS] = mmMME4_WACS_SPMU_BASE, + [GOYA_SPMU_MMU_CS] = mmMMU_CS_SPMU_BASE, + [GOYA_SPMU_PCIE] = mmPCIE_SPMU_BASE, + [GOYA_SPMU_TPC0_EML] = mmTPC0_EML_SPMU_BASE, + [GOYA_SPMU_TPC1_EML] = mmTPC1_EML_SPMU_BASE, + [GOYA_SPMU_TPC2_EML] = mmTPC2_EML_SPMU_BASE, + [GOYA_SPMU_TPC3_EML] = mmTPC3_EML_SPMU_BASE, + [GOYA_SPMU_TPC4_EML] = mmTPC4_EML_SPMU_BASE, + [GOYA_SPMU_TPC5_EML] = mmTPC5_EML_SPMU_BASE, + [GOYA_SPMU_TPC6_EML] = mmTPC6_EML_SPMU_BASE, + [GOYA_SPMU_TPC7_EML] = mmTPC7_EML_SPMU_BASE +}; + +static int goya_coresight_timeout(struct hl_device *hdev, u64 addr, + int position, bool up) +{ + int rc; + u32 val, timeout_usec; + + if (hdev->pldm) + timeout_usec = GOYA_PLDM_CORESIGHT_TIMEOUT_USEC; + else + timeout_usec = CORESIGHT_TIMEOUT_USEC; + + rc = hl_poll_timeout( + hdev, + addr, + val, + up ? val & BIT(position) : !(val & BIT(position)), + 1000, + timeout_usec); + + if (rc) { + dev_err(hdev->dev, + "Timeout while waiting for coresight, addr: 0x%llx, position: %d, up: %d\n", + addr, position, up); + return -EFAULT; + } + + return 0; +} + +static int goya_config_stm(struct hl_device *hdev, + struct hl_debug_params *params) +{ + struct hl_debug_params_stm *input; + u64 base_reg; + u32 frequency; + int rc; + + if (params->reg_idx >= ARRAY_SIZE(debug_stm_regs)) { + dev_err(hdev->dev, "Invalid register index in STM\n"); + return -EINVAL; + } + + base_reg = debug_stm_regs[params->reg_idx] - CFG_BASE; + + WREG32(base_reg + 0xFB0, CORESIGHT_UNLOCK); + + if (params->enable) { + input = params->input; + + if (!input) + return -EINVAL; + + WREG32(base_reg + 0xE80, 0x80004); + WREG32(base_reg + 0xD64, 7); + WREG32(base_reg + 0xD60, 0); + WREG32(base_reg + 0xD00, lower_32_bits(input->he_mask)); + WREG32(base_reg + 0xD20, lower_32_bits(input->sp_mask)); + WREG32(base_reg + 0xD60, 1); + WREG32(base_reg + 0xD00, upper_32_bits(input->he_mask)); + WREG32(base_reg + 0xD20, upper_32_bits(input->sp_mask)); + WREG32(base_reg + 0xE70, 0x10); + WREG32(base_reg + 0xE60, 0); + WREG32(base_reg + 0xE64, 0x420000); + WREG32(base_reg + 0xE00, 0xFFFFFFFF); + WREG32(base_reg + 0xE20, 0xFFFFFFFF); + WREG32(base_reg + 0xEF4, input->id); + WREG32(base_reg + 0xDF4, 0x80); + frequency = hdev->asic_prop.psoc_timestamp_frequency; + if (frequency == 0) + frequency = input->frequency; + WREG32(base_reg + 0xE8C, frequency); + WREG32(base_reg + 0xE90, 0x7FF); + WREG32(base_reg + 0xE80, 0x27 | (input->id << 16)); + } else { + WREG32(base_reg + 0xE80, 4); + WREG32(base_reg + 0xD64, 0); + WREG32(base_reg + 0xD60, 1); + WREG32(base_reg + 0xD00, 0); + WREG32(base_reg + 0xD20, 0); + WREG32(base_reg + 0xD60, 0); + WREG32(base_reg + 0xE20, 0); + WREG32(base_reg + 0xE00, 0); + WREG32(base_reg + 0xDF4, 0x80); + WREG32(base_reg + 0xE70, 0); + WREG32(base_reg + 0xE60, 0); + WREG32(base_reg + 0xE64, 0); + WREG32(base_reg + 0xE8C, 0); + + rc = goya_coresight_timeout(hdev, base_reg + 0xE80, 23, false); + if (rc) { + dev_err(hdev->dev, + "Failed to disable STM on timeout, error %d\n", + rc); + return rc; + } + + WREG32(base_reg + 0xE80, 4); + } + + return 0; +} + +static int goya_config_etf(struct hl_device *hdev, + struct hl_debug_params *params) +{ + struct hl_debug_params_etf *input; + u64 base_reg; + u32 val; + int rc; + + if (params->reg_idx >= ARRAY_SIZE(debug_etf_regs)) { + dev_err(hdev->dev, "Invalid register index in ETF\n"); + return -EINVAL; + } + + base_reg = debug_etf_regs[params->reg_idx] - CFG_BASE; + + WREG32(base_reg + 0xFB0, CORESIGHT_UNLOCK); + + val = RREG32(base_reg + 0x304); + val |= 0x1000; + WREG32(base_reg + 0x304, val); + val |= 0x40; + WREG32(base_reg + 0x304, val); + + rc = goya_coresight_timeout(hdev, base_reg + 0x304, 6, false); + if (rc) { + dev_err(hdev->dev, + "Failed to %s ETF on timeout, error %d\n", + params->enable ? "enable" : "disable", rc); + return rc; + } + + rc = goya_coresight_timeout(hdev, base_reg + 0xC, 2, true); + if (rc) { + dev_err(hdev->dev, + "Failed to %s ETF on timeout, error %d\n", + params->enable ? "enable" : "disable", rc); + return rc; + } + + WREG32(base_reg + 0x20, 0); + + if (params->enable) { + input = params->input; + + if (!input) + return -EINVAL; + + WREG32(base_reg + 0x34, 0x3FFC); + WREG32(base_reg + 0x28, input->sink_mode); + WREG32(base_reg + 0x304, 0x4001); + WREG32(base_reg + 0x308, 0xA); + WREG32(base_reg + 0x20, 1); + } else { + WREG32(base_reg + 0x34, 0); + WREG32(base_reg + 0x28, 0); + WREG32(base_reg + 0x304, 0); + } + + return 0; +} + +static int goya_etr_validate_address(struct hl_device *hdev, u64 addr, + u64 size) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + u64 range_start, range_end; + + if (addr > (addr + size)) { + dev_err(hdev->dev, + "ETR buffer size %llu overflow\n", size); + return false; + } + + if (hdev->mmu_enable) { + range_start = prop->dmmu.start_addr; + range_end = prop->dmmu.end_addr; + } else { + range_start = prop->dram_user_base_address; + range_end = prop->dram_end_address; + } + + return hl_mem_area_inside_range(addr, size, range_start, range_end); +} + +static int goya_config_etr(struct hl_device *hdev, + struct hl_debug_params *params) +{ + struct hl_debug_params_etr *input; + u32 val; + int rc; + + WREG32(mmPSOC_ETR_LAR, CORESIGHT_UNLOCK); + + val = RREG32(mmPSOC_ETR_FFCR); + val |= 0x1000; + WREG32(mmPSOC_ETR_FFCR, val); + val |= 0x40; + WREG32(mmPSOC_ETR_FFCR, val); + + rc = goya_coresight_timeout(hdev, mmPSOC_ETR_FFCR, 6, false); + if (rc) { + dev_err(hdev->dev, "Failed to %s ETR on timeout, error %d\n", + params->enable ? "enable" : "disable", rc); + return rc; + } + + rc = goya_coresight_timeout(hdev, mmPSOC_ETR_STS, 2, true); + if (rc) { + dev_err(hdev->dev, "Failed to %s ETR on timeout, error %d\n", + params->enable ? "enable" : "disable", rc); + return rc; + } + + WREG32(mmPSOC_ETR_CTL, 0); + + if (params->enable) { + input = params->input; + + if (!input) + return -EINVAL; + + if (input->buffer_size == 0) { + dev_err(hdev->dev, + "ETR buffer size should be bigger than 0\n"); + return -EINVAL; + } + + if (!goya_etr_validate_address(hdev, + input->buffer_address, input->buffer_size)) { + dev_err(hdev->dev, "buffer address is not valid\n"); + return -EINVAL; + } + + WREG32(mmPSOC_ETR_BUFWM, 0x3FFC); + WREG32(mmPSOC_ETR_RSZ, input->buffer_size); + WREG32(mmPSOC_ETR_MODE, input->sink_mode); + WREG32(mmPSOC_ETR_AXICTL, + 0x700 | PSOC_ETR_AXICTL_PROTCTRLBIT1_SHIFT); + WREG32(mmPSOC_ETR_DBALO, + lower_32_bits(input->buffer_address)); + WREG32(mmPSOC_ETR_DBAHI, + upper_32_bits(input->buffer_address)); + WREG32(mmPSOC_ETR_FFCR, 3); + WREG32(mmPSOC_ETR_PSCR, 0xA); + WREG32(mmPSOC_ETR_CTL, 1); + } else { + WREG32(mmPSOC_ETR_BUFWM, 0); + WREG32(mmPSOC_ETR_RSZ, 0x400); + WREG32(mmPSOC_ETR_DBALO, 0); + WREG32(mmPSOC_ETR_DBAHI, 0); + WREG32(mmPSOC_ETR_PSCR, 0); + WREG32(mmPSOC_ETR_MODE, 0); + WREG32(mmPSOC_ETR_FFCR, 0); + + if (params->output_size >= sizeof(u64)) { + u32 rwp, rwphi; + + /* + * The trace buffer address is 40 bits wide. The end of + * the buffer is set in the RWP register (lower 32 + * bits), and in the RWPHI register (upper 8 bits). + */ + rwp = RREG32(mmPSOC_ETR_RWP); + rwphi = RREG32(mmPSOC_ETR_RWPHI) & 0xff; + *(u64 *) params->output = ((u64) rwphi << 32) | rwp; + } + } + + return 0; +} + +static int goya_config_funnel(struct hl_device *hdev, + struct hl_debug_params *params) +{ + u64 base_reg; + + if (params->reg_idx >= ARRAY_SIZE(debug_funnel_regs)) { + dev_err(hdev->dev, "Invalid register index in FUNNEL\n"); + return -EINVAL; + } + + base_reg = debug_funnel_regs[params->reg_idx] - CFG_BASE; + + WREG32(base_reg + 0xFB0, CORESIGHT_UNLOCK); + + WREG32(base_reg, params->enable ? 0x33F : 0); + + return 0; +} + +static int goya_config_bmon(struct hl_device *hdev, + struct hl_debug_params *params) +{ + struct hl_debug_params_bmon *input; + u64 base_reg; + u32 pcie_base = 0; + + if (params->reg_idx >= ARRAY_SIZE(debug_bmon_regs)) { + dev_err(hdev->dev, "Invalid register index in BMON\n"); + return -EINVAL; + } + + base_reg = debug_bmon_regs[params->reg_idx] - CFG_BASE; + + WREG32(base_reg + 0x104, 1); + + if (params->enable) { + input = params->input; + + if (!input) + return -EINVAL; + + WREG32(base_reg + 0x200, lower_32_bits(input->start_addr0)); + WREG32(base_reg + 0x204, upper_32_bits(input->start_addr0)); + WREG32(base_reg + 0x208, lower_32_bits(input->addr_mask0)); + WREG32(base_reg + 0x20C, upper_32_bits(input->addr_mask0)); + WREG32(base_reg + 0x240, lower_32_bits(input->start_addr1)); + WREG32(base_reg + 0x244, upper_32_bits(input->start_addr1)); + WREG32(base_reg + 0x248, lower_32_bits(input->addr_mask1)); + WREG32(base_reg + 0x24C, upper_32_bits(input->addr_mask1)); + WREG32(base_reg + 0x224, 0); + WREG32(base_reg + 0x234, 0); + WREG32(base_reg + 0x30C, input->bw_win); + WREG32(base_reg + 0x308, input->win_capture); + + /* PCIE IF BMON bug WA */ + if (params->reg_idx != GOYA_BMON_PCIE_MSTR_RD && + params->reg_idx != GOYA_BMON_PCIE_MSTR_WR && + params->reg_idx != GOYA_BMON_PCIE_SLV_RD && + params->reg_idx != GOYA_BMON_PCIE_SLV_WR) + pcie_base = 0xA000000; + + WREG32(base_reg + 0x700, pcie_base | 0xB00 | (input->id << 12)); + WREG32(base_reg + 0x708, pcie_base | 0xA00 | (input->id << 12)); + WREG32(base_reg + 0x70C, pcie_base | 0xC00 | (input->id << 12)); + + WREG32(base_reg + 0x100, 0x11); + WREG32(base_reg + 0x304, 0x1); + } else { + WREG32(base_reg + 0x200, 0); + WREG32(base_reg + 0x204, 0); + WREG32(base_reg + 0x208, 0xFFFFFFFF); + WREG32(base_reg + 0x20C, 0xFFFFFFFF); + WREG32(base_reg + 0x240, 0); + WREG32(base_reg + 0x244, 0); + WREG32(base_reg + 0x248, 0xFFFFFFFF); + WREG32(base_reg + 0x24C, 0xFFFFFFFF); + WREG32(base_reg + 0x224, 0xFFFFFFFF); + WREG32(base_reg + 0x234, 0x1070F); + WREG32(base_reg + 0x30C, 0); + WREG32(base_reg + 0x308, 0xFFFF); + WREG32(base_reg + 0x700, 0xA000B00); + WREG32(base_reg + 0x708, 0xA000A00); + WREG32(base_reg + 0x70C, 0xA000C00); + WREG32(base_reg + 0x100, 1); + WREG32(base_reg + 0x304, 0); + WREG32(base_reg + 0x104, 0); + } + + return 0; +} + +static int goya_config_spmu(struct hl_device *hdev, + struct hl_debug_params *params) +{ + u64 base_reg; + struct hl_debug_params_spmu *input = params->input; + u64 *output; + u32 output_arr_len; + u32 events_num; + u32 overflow_idx; + u32 cycle_cnt_idx; + int i; + + if (params->reg_idx >= ARRAY_SIZE(debug_spmu_regs)) { + dev_err(hdev->dev, "Invalid register index in SPMU\n"); + return -EINVAL; + } + + base_reg = debug_spmu_regs[params->reg_idx] - CFG_BASE; + + if (params->enable) { + input = params->input; + + if (!input) + return -EINVAL; + + if (input->event_types_num < 3) { + dev_err(hdev->dev, + "not enough event types values for SPMU enable\n"); + return -EINVAL; + } + + if (input->event_types_num > SPMU_MAX_COUNTERS) { + dev_err(hdev->dev, + "too many event types values for SPMU enable\n"); + return -EINVAL; + } + + WREG32(base_reg + 0xE04, 0x41013046); + WREG32(base_reg + 0xE04, 0x41013040); + + for (i = 0 ; i < input->event_types_num ; i++) + WREG32(base_reg + SPMU_EVENT_TYPES_OFFSET + i * 4, + input->event_types[i]); + + WREG32(base_reg + 0xE04, 0x41013041); + WREG32(base_reg + 0xC00, 0x8000003F); + } else { + output = params->output; + output_arr_len = params->output_size / 8; + events_num = output_arr_len - 2; + overflow_idx = output_arr_len - 2; + cycle_cnt_idx = output_arr_len - 1; + + if (!output) + return -EINVAL; + + if (output_arr_len < 3) { + dev_err(hdev->dev, + "not enough values for SPMU disable\n"); + return -EINVAL; + } + + if (events_num > SPMU_MAX_COUNTERS) { + dev_err(hdev->dev, + "too many events values for SPMU disable\n"); + return -EINVAL; + } + + WREG32(base_reg + 0xE04, 0x41013040); + + for (i = 0 ; i < events_num ; i++) + output[i] = RREG32(base_reg + i * 8); + + output[overflow_idx] = RREG32(base_reg + 0xCC0); + + output[cycle_cnt_idx] = RREG32(base_reg + 0xFC); + output[cycle_cnt_idx] <<= 32; + output[cycle_cnt_idx] |= RREG32(base_reg + 0xF8); + + WREG32(base_reg + 0xCC0, 0); + } + + return 0; +} + +int goya_debug_coresight(struct hl_device *hdev, void *data) +{ + struct hl_debug_params *params = data; + int rc = 0; + + switch (params->op) { + case HL_DEBUG_OP_STM: + rc = goya_config_stm(hdev, params); + break; + case HL_DEBUG_OP_ETF: + rc = goya_config_etf(hdev, params); + break; + case HL_DEBUG_OP_ETR: + rc = goya_config_etr(hdev, params); + break; + case HL_DEBUG_OP_FUNNEL: + rc = goya_config_funnel(hdev, params); + break; + case HL_DEBUG_OP_BMON: + rc = goya_config_bmon(hdev, params); + break; + case HL_DEBUG_OP_SPMU: + rc = goya_config_spmu(hdev, params); + break; + case HL_DEBUG_OP_TIMESTAMP: + /* Do nothing as this opcode is deprecated */ + break; + + default: + dev_err(hdev->dev, "Unknown coresight id %d\n", params->op); + return -EINVAL; + } + + /* Perform read from the device to flush all configuration */ + RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG); + + return rc; +} + +void goya_halt_coresight(struct hl_device *hdev) +{ + struct hl_debug_params params = {}; + int i, rc; + + for (i = GOYA_ETF_FIRST ; i <= GOYA_ETF_LAST ; i++) { + params.reg_idx = i; + rc = goya_config_etf(hdev, ¶ms); + if (rc) + dev_err(hdev->dev, "halt ETF failed, %d/%d\n", rc, i); + } + + rc = goya_config_etr(hdev, ¶ms); + if (rc) + dev_err(hdev->dev, "halt ETR failed, %d\n", rc); +} diff --git a/drivers/misc/habanalabs/goya/goya_hwmgr.c b/drivers/misc/habanalabs/goya/goya_hwmgr.c new file mode 100644 index 000000000..cdd4903e4 --- /dev/null +++ b/drivers/misc/habanalabs/goya/goya_hwmgr.c @@ -0,0 +1,394 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include "goyaP.h" + +void goya_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq) +{ + struct goya_device *goya = hdev->asic_specific; + + switch (freq) { + case PLL_HIGH: + hl_set_frequency(hdev, MME_PLL, hdev->high_pll); + hl_set_frequency(hdev, TPC_PLL, hdev->high_pll); + hl_set_frequency(hdev, IC_PLL, hdev->high_pll); + break; + case PLL_LOW: + hl_set_frequency(hdev, MME_PLL, GOYA_PLL_FREQ_LOW); + hl_set_frequency(hdev, TPC_PLL, GOYA_PLL_FREQ_LOW); + hl_set_frequency(hdev, IC_PLL, GOYA_PLL_FREQ_LOW); + break; + case PLL_LAST: + hl_set_frequency(hdev, MME_PLL, goya->mme_clk); + hl_set_frequency(hdev, TPC_PLL, goya->tpc_clk); + hl_set_frequency(hdev, IC_PLL, goya->ic_clk); + break; + default: + dev_err(hdev->dev, "unknown frequency setting\n"); + } +} + +int goya_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk) +{ + long value; + + if (hl_device_disabled_or_in_reset(hdev)) + return -ENODEV; + + value = hl_get_frequency(hdev, MME_PLL, false); + + if (value < 0) { + dev_err(hdev->dev, "Failed to retrieve device max clock %ld\n", + value); + return value; + } + + *max_clk = (value / 1000 / 1000); + + value = hl_get_frequency(hdev, MME_PLL, true); + + if (value < 0) { + dev_err(hdev->dev, + "Failed to retrieve device current clock %ld\n", + value); + return value; + } + + *cur_clk = (value / 1000 / 1000); + + return 0; +} + +static ssize_t mme_clk_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + long value; + + if (hl_device_disabled_or_in_reset(hdev)) + return -ENODEV; + + value = hl_get_frequency(hdev, MME_PLL, false); + + if (value < 0) + return value; + + return sprintf(buf, "%lu\n", value); +} + +static ssize_t mme_clk_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + struct goya_device *goya = hdev->asic_specific; + int rc; + long value; + + if (hl_device_disabled_or_in_reset(hdev)) { + count = -ENODEV; + goto fail; + } + + if (hdev->pm_mng_profile == PM_AUTO) { + count = -EPERM; + goto fail; + } + + rc = kstrtoul(buf, 0, &value); + + if (rc) { + count = -EINVAL; + goto fail; + } + + hl_set_frequency(hdev, MME_PLL, value); + goya->mme_clk = value; + +fail: + return count; +} + +static ssize_t tpc_clk_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + long value; + + if (hl_device_disabled_or_in_reset(hdev)) + return -ENODEV; + + value = hl_get_frequency(hdev, TPC_PLL, false); + + if (value < 0) + return value; + + return sprintf(buf, "%lu\n", value); +} + +static ssize_t tpc_clk_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + struct goya_device *goya = hdev->asic_specific; + int rc; + long value; + + if (hl_device_disabled_or_in_reset(hdev)) { + count = -ENODEV; + goto fail; + } + + if (hdev->pm_mng_profile == PM_AUTO) { + count = -EPERM; + goto fail; + } + + rc = kstrtoul(buf, 0, &value); + + if (rc) { + count = -EINVAL; + goto fail; + } + + hl_set_frequency(hdev, TPC_PLL, value); + goya->tpc_clk = value; + +fail: + return count; +} + +static ssize_t ic_clk_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + long value; + + if (hl_device_disabled_or_in_reset(hdev)) + return -ENODEV; + + value = hl_get_frequency(hdev, IC_PLL, false); + + if (value < 0) + return value; + + return sprintf(buf, "%lu\n", value); +} + +static ssize_t ic_clk_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + struct goya_device *goya = hdev->asic_specific; + int rc; + long value; + + if (hl_device_disabled_or_in_reset(hdev)) { + count = -ENODEV; + goto fail; + } + + if (hdev->pm_mng_profile == PM_AUTO) { + count = -EPERM; + goto fail; + } + + rc = kstrtoul(buf, 0, &value); + + if (rc) { + count = -EINVAL; + goto fail; + } + + hl_set_frequency(hdev, IC_PLL, value); + goya->ic_clk = value; + +fail: + return count; +} + +static ssize_t mme_clk_curr_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + long value; + + if (hl_device_disabled_or_in_reset(hdev)) + return -ENODEV; + + value = hl_get_frequency(hdev, MME_PLL, true); + + if (value < 0) + return value; + + return sprintf(buf, "%lu\n", value); +} + +static ssize_t tpc_clk_curr_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + long value; + + if (hl_device_disabled_or_in_reset(hdev)) + return -ENODEV; + + value = hl_get_frequency(hdev, TPC_PLL, true); + + if (value < 0) + return value; + + return sprintf(buf, "%lu\n", value); +} + +static ssize_t ic_clk_curr_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + long value; + + if (hl_device_disabled_or_in_reset(hdev)) + return -ENODEV; + + value = hl_get_frequency(hdev, IC_PLL, true); + + if (value < 0) + return value; + + return sprintf(buf, "%lu\n", value); +} + +static ssize_t pm_mng_profile_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + + if (hl_device_disabled_or_in_reset(hdev)) + return -ENODEV; + + return sprintf(buf, "%s\n", + (hdev->pm_mng_profile == PM_AUTO) ? "auto" : + (hdev->pm_mng_profile == PM_MANUAL) ? "manual" : + "unknown"); +} + +static ssize_t pm_mng_profile_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + + if (hl_device_disabled_or_in_reset(hdev)) { + count = -ENODEV; + goto out; + } + + mutex_lock(&hdev->fpriv_list_lock); + + if (hdev->compute_ctx) { + dev_err(hdev->dev, + "Can't change PM profile while compute context is opened on the device\n"); + count = -EPERM; + goto unlock_mutex; + } + + if (strncmp("auto", buf, strlen("auto")) == 0) { + /* Make sure we are in LOW PLL when changing modes */ + if (hdev->pm_mng_profile == PM_MANUAL) { + hdev->curr_pll_profile = PLL_HIGH; + hdev->pm_mng_profile = PM_AUTO; + hl_device_set_frequency(hdev, PLL_LOW); + } + } else if (strncmp("manual", buf, strlen("manual")) == 0) { + if (hdev->pm_mng_profile == PM_AUTO) { + /* Must release the lock because the work thread also + * takes this lock. But before we release it, set + * the mode to manual so nothing will change if a user + * suddenly opens the device + */ + hdev->pm_mng_profile = PM_MANUAL; + + mutex_unlock(&hdev->fpriv_list_lock); + + /* Flush the current work so we can return to the user + * knowing that he is the only one changing frequencies + */ + flush_delayed_work(&hdev->work_freq); + + return count; + } + } else { + dev_err(hdev->dev, "value should be auto or manual\n"); + count = -EINVAL; + } + +unlock_mutex: + mutex_unlock(&hdev->fpriv_list_lock); +out: + return count; +} + +static ssize_t high_pll_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + + if (hl_device_disabled_or_in_reset(hdev)) + return -ENODEV; + + return sprintf(buf, "%u\n", hdev->high_pll); +} + +static ssize_t high_pll_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + long value; + int rc; + + if (hl_device_disabled_or_in_reset(hdev)) { + count = -ENODEV; + goto out; + } + + rc = kstrtoul(buf, 0, &value); + + if (rc) { + count = -EINVAL; + goto out; + } + + hdev->high_pll = value; + +out: + return count; +} + +static DEVICE_ATTR_RW(high_pll); +static DEVICE_ATTR_RW(ic_clk); +static DEVICE_ATTR_RO(ic_clk_curr); +static DEVICE_ATTR_RW(mme_clk); +static DEVICE_ATTR_RO(mme_clk_curr); +static DEVICE_ATTR_RW(pm_mng_profile); +static DEVICE_ATTR_RW(tpc_clk); +static DEVICE_ATTR_RO(tpc_clk_curr); + +static struct attribute *goya_dev_attrs[] = { + &dev_attr_high_pll.attr, + &dev_attr_ic_clk.attr, + &dev_attr_ic_clk_curr.attr, + &dev_attr_mme_clk.attr, + &dev_attr_mme_clk_curr.attr, + &dev_attr_pm_mng_profile.attr, + &dev_attr_tpc_clk.attr, + &dev_attr_tpc_clk_curr.attr, + NULL, +}; + +void goya_add_device_attr(struct hl_device *hdev, + struct attribute_group *dev_attr_grp) +{ + dev_attr_grp->attrs = goya_dev_attrs; +} diff --git a/drivers/misc/habanalabs/goya/goya_security.c b/drivers/misc/habanalabs/goya/goya_security.c new file mode 100644 index 000000000..14701836f --- /dev/null +++ b/drivers/misc/habanalabs/goya/goya_security.c @@ -0,0 +1,3122 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include "goyaP.h" +#include "../include/goya/asic_reg/goya_regs.h" + +/* + * goya_set_block_as_protected - set the given block as protected + * + * @hdev: pointer to hl_device structure + * @block: block base address + * + */ +static void goya_pb_set_block(struct hl_device *hdev, u64 base) +{ + u32 pb_addr = base - CFG_BASE + PROT_BITS_OFFS; + + while (pb_addr & 0xFFF) { + WREG32(pb_addr, 0); + pb_addr += 4; + } +} + +static void goya_init_mme_protection_bits(struct hl_device *hdev) +{ + u32 pb_addr, mask; + u8 word_offset; + + /* TODO: change to real reg name when Soc Online is updated */ + u64 mmMME_SBB_POWER_ECO1 = 0xDFF60, + mmMME_SBB_POWER_ECO2 = 0xDFF64; + + goya_pb_set_block(hdev, mmACC_MS_ECC_MEM_0_BASE); + goya_pb_set_block(hdev, mmACC_MS_ECC_MEM_1_BASE); + goya_pb_set_block(hdev, mmACC_MS_ECC_MEM_2_BASE); + goya_pb_set_block(hdev, mmACC_MS_ECC_MEM_3_BASE); + + goya_pb_set_block(hdev, mmSBA_ECC_MEM_BASE); + goya_pb_set_block(hdev, mmSBB_ECC_MEM_BASE); + + goya_pb_set_block(hdev, mmMME1_RTR_BASE); + goya_pb_set_block(hdev, mmMME1_RD_REGULATOR_BASE); + goya_pb_set_block(hdev, mmMME1_WR_REGULATOR_BASE); + goya_pb_set_block(hdev, mmMME2_RTR_BASE); + goya_pb_set_block(hdev, mmMME2_RD_REGULATOR_BASE); + goya_pb_set_block(hdev, mmMME2_WR_REGULATOR_BASE); + goya_pb_set_block(hdev, mmMME3_RTR_BASE); + goya_pb_set_block(hdev, mmMME3_RD_REGULATOR_BASE); + goya_pb_set_block(hdev, mmMME3_WR_REGULATOR_BASE); + + goya_pb_set_block(hdev, mmMME4_RTR_BASE); + goya_pb_set_block(hdev, mmMME4_RD_REGULATOR_BASE); + goya_pb_set_block(hdev, mmMME4_WR_REGULATOR_BASE); + + goya_pb_set_block(hdev, mmMME5_RTR_BASE); + goya_pb_set_block(hdev, mmMME5_RD_REGULATOR_BASE); + goya_pb_set_block(hdev, mmMME5_WR_REGULATOR_BASE); + + goya_pb_set_block(hdev, mmMME6_RTR_BASE); + goya_pb_set_block(hdev, mmMME6_RD_REGULATOR_BASE); + goya_pb_set_block(hdev, mmMME6_WR_REGULATOR_BASE); + + pb_addr = (mmMME_DUMMY & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME_DUMMY & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmMME_DUMMY & 0x7F) >> 2); + mask |= 1 << ((mmMME_RESET & 0x7F) >> 2); + mask |= 1 << ((mmMME_STALL & 0x7F) >> 2); + mask |= 1 << ((mmMME_SM_BASE_ADDRESS_LOW & 0x7F) >> 2); + mask |= 1 << ((mmMME_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1 << ((mmMME_DBGMEM_ADD & 0x7F) >> 2); + mask |= 1 << ((mmMME_DBGMEM_DATA_WR & 0x7F) >> 2); + mask |= 1 << ((mmMME_DBGMEM_DATA_RD & 0x7F) >> 2); + mask |= 1 << ((mmMME_DBGMEM_CTRL & 0x7F) >> 2); + mask |= 1 << ((mmMME_DBGMEM_RC & 0x7F) >> 2); + mask |= 1 << ((mmMME_LOG_SHADOW & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME_STORE_MAX_CREDIT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME_STORE_MAX_CREDIT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmMME_STORE_MAX_CREDIT & 0x7F) >> 2); + mask |= 1 << ((mmMME_AGU & 0x7F) >> 2); + mask |= 1 << ((mmMME_SBA & 0x7F) >> 2); + mask |= 1 << ((mmMME_SBB & 0x7F) >> 2); + mask |= 1 << ((mmMME_SBC & 0x7F) >> 2); + mask |= 1 << ((mmMME_WBC & 0x7F) >> 2); + mask |= 1 << ((mmMME_SBA_CONTROL_DATA & 0x7F) >> 2); + mask |= 1 << ((mmMME_SBB_CONTROL_DATA & 0x7F) >> 2); + mask |= 1 << ((mmMME_SBC_CONTROL_DATA & 0x7F) >> 2); + mask |= 1 << ((mmMME_WBC_CONTROL_DATA & 0x7F) >> 2); + mask |= 1 << ((mmMME_TE & 0x7F) >> 2); + mask |= 1 << ((mmMME_TE2DEC & 0x7F) >> 2); + mask |= 1 << ((mmMME_REI_STATUS & 0x7F) >> 2); + mask |= 1 << ((mmMME_REI_MASK & 0x7F) >> 2); + mask |= 1 << ((mmMME_SEI_STATUS & 0x7F) >> 2); + mask |= 1 << ((mmMME_SEI_MASK & 0x7F) >> 2); + mask |= 1 << ((mmMME_SPI_STATUS & 0x7F) >> 2); + mask |= 1 << ((mmMME_SPI_MASK & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmMME_QM_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_GLBL_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_PQ_BASE_LO & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_PQ_BASE_HI & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_PQ_SIZE & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_PQ_PI & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_PQ_CI & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_PQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_PQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_PQ_ARUSER & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmMME_QM_PQ_PUSH0 & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_PQ_PUSH1 & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_PQ_PUSH2 & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_PQ_PUSH3 & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_PQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_PQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CQ_PTR_LO & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CQ_PTR_HI & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CQ_TSIZE & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CQ_CTL & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmMME_QM_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME_QM_CP_STS & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME_QM_CP_STS & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmMME_QM_CP_STS & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CP_CURRENT_INST_LO & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CP_CURRENT_INST_HI & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CP_BARRIER_CFG & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CP_DBG_0 & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_PQ_BUF_ADDR & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_PQ_BUF_RDATA & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CQ_BUF_ADDR & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CQ_BUF_RDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmMME_CMDQ_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_GLBL_STS1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmMME_CMDQ_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME_CMDQ_CQ_IFIFO_CNT & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmMME_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CP_STS & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1 << ((mmMME_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CP_DBG_0 & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME_SBB_POWER_ECO1 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME_SBB_POWER_ECO1 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmMME_SBB_POWER_ECO1 & 0x7F) >> 2); + mask |= 1 << ((mmMME_SBB_POWER_ECO2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); +} + +static void goya_init_dma_protection_bits(struct hl_device *hdev) +{ + u32 pb_addr, mask; + u8 word_offset; + + goya_pb_set_block(hdev, mmDMA_NRTR_BASE); + goya_pb_set_block(hdev, mmDMA_RD_REGULATOR_BASE); + goya_pb_set_block(hdev, mmDMA_WR_REGULATOR_BASE); + + pb_addr = (mmDMA_QM_0_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA_QM_0_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmDMA_QM_0_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_GLBL_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_PQ_BASE_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_PQ_BASE_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_PQ_SIZE & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_PQ_PI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_PQ_CI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_PQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_PQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_PQ_ARUSER & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA_QM_0_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA_QM_0_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmDMA_QM_0_PQ_PUSH0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_PQ_PUSH1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_PQ_PUSH2 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_PQ_PUSH3 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_PQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_PQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_PQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CQ_PTR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CQ_PTR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CQ_TSIZE & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CQ_CTL & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA_QM_0_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA_QM_0_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmDMA_QM_0_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + goya_pb_set_block(hdev, mmDMA_CH_0_BASE); + + pb_addr = (mmDMA_QM_1_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA_QM_1_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmDMA_QM_1_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_GLBL_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_PQ_BASE_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_PQ_BASE_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_PQ_SIZE & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_PQ_PI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_PQ_CI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_PQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_PQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_PQ_ARUSER & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA_QM_1_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA_QM_1_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmDMA_QM_1_PQ_PUSH0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_PQ_PUSH1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_PQ_PUSH2 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_PQ_PUSH3 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_PQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_PQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_PQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CQ_PTR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CQ_PTR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CQ_TSIZE & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CQ_CTL & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA_QM_1_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA_QM_1_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmDMA_QM_1_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + goya_pb_set_block(hdev, mmDMA_CH_1_BASE); + + pb_addr = (mmDMA_QM_2_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA_QM_2_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmDMA_QM_2_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_GLBL_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_PQ_BASE_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_PQ_BASE_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_PQ_SIZE & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_PQ_PI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_PQ_CI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_PQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_PQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_PQ_ARUSER & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA_QM_2_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA_QM_2_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmDMA_QM_2_PQ_PUSH0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_PQ_PUSH1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_PQ_PUSH2 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_PQ_PUSH3 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_PQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_PQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_PQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CQ_PTR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CQ_PTR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CQ_TSIZE & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CQ_CTL & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA_QM_2_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA_QM_2_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmDMA_QM_2_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + goya_pb_set_block(hdev, mmDMA_CH_2_BASE); + + pb_addr = (mmDMA_QM_3_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA_QM_3_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmDMA_QM_3_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_GLBL_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_PQ_BASE_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_PQ_BASE_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_PQ_SIZE & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_PQ_PI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_PQ_CI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_PQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_PQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_PQ_ARUSER & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA_QM_3_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA_QM_3_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmDMA_QM_3_PQ_PUSH0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_PQ_PUSH1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_PQ_PUSH2 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_PQ_PUSH3 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_PQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_PQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_PQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CQ_PTR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CQ_PTR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CQ_TSIZE & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CQ_CTL & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA_QM_3_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA_QM_3_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmDMA_QM_3_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + goya_pb_set_block(hdev, mmDMA_CH_3_BASE); + + pb_addr = (mmDMA_QM_4_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA_QM_4_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmDMA_QM_4_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_GLBL_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_PQ_BASE_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_PQ_BASE_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_PQ_SIZE & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_PQ_PI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_PQ_CI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_PQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_PQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_PQ_ARUSER & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA_QM_4_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA_QM_4_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmDMA_QM_4_PQ_PUSH0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_PQ_PUSH1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_PQ_PUSH2 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_PQ_PUSH3 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_PQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_PQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_PQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CQ_PTR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CQ_PTR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CQ_TSIZE & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CQ_CTL & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA_QM_4_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA_QM_4_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmDMA_QM_4_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + goya_pb_set_block(hdev, mmDMA_CH_4_BASE); +} + +static void goya_init_tpc_protection_bits(struct hl_device *hdev) +{ + u32 pb_addr, mask; + u8 word_offset; + + goya_pb_set_block(hdev, mmTPC0_RD_REGULATOR_BASE); + goya_pb_set_block(hdev, mmTPC0_WR_REGULATOR_BASE); + + pb_addr = (mmTPC0_CFG_SEMAPHORE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC0_CFG_SEMAPHORE & PROT_BITS_OFFS) >> 7) << 2; + + mask = 1 << ((mmTPC0_CFG_SEMAPHORE & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CFG_VFLAGS & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CFG_SFLAGS & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CFG_STATUS & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC0_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC0_CFG_CFG_BASE_ADDRESS_HIGH & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC0_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CFG_TPC_STALL & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CFG_MSS_CONFIG & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CFG_TPC_INTR_CAUSE & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CFG_TPC_INTR_MASK & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC0_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC0_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC0_CFG_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CFG_AWUSER & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC0_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC0_CFG_FUNC_MBIST_CNTRL & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC0_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_PAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC0_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC0_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC0_QM_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_GLBL_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_PQ_BASE_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_PQ_BASE_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_PQ_SIZE & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_PQ_PI & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_PQ_CI & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_PQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_PQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_PQ_ARUSER & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC0_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC0_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC0_QM_PQ_PUSH0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_PQ_PUSH1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_PQ_PUSH2 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_PQ_PUSH3 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_PQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_PQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CQ_PTR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CQ_PTR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CQ_TSIZE & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CQ_CTL & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC0_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC0_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC0_QM_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC0_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC0_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC0_CMDQ_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_GLBL_STS1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC0_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC0_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC0_CMDQ_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC0_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC0_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC0_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CP_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC0_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC0_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1 << ((mmTPC0_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CP_DBG_0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + goya_pb_set_block(hdev, mmTPC1_RTR_BASE); + goya_pb_set_block(hdev, mmTPC1_RD_REGULATOR_BASE); + goya_pb_set_block(hdev, mmTPC1_WR_REGULATOR_BASE); + + pb_addr = (mmTPC1_CFG_SEMAPHORE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC1_CFG_SEMAPHORE & PROT_BITS_OFFS) >> 7) << 2; + + mask = 1 << ((mmTPC1_CFG_SEMAPHORE & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_VFLAGS & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_SFLAGS & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_STATUS & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC1_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC1_CFG_CFG_BASE_ADDRESS_HIGH & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC1_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_TPC_STALL & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_MSS_CONFIG & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_TPC_INTR_CAUSE & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_TPC_INTR_MASK & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC1_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC1_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC1_CFG_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_AWUSER & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC1_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC1_CFG_FUNC_MBIST_CNTRL & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1 << ((mmTPC1_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_PAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC1_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC1_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC1_QM_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_GLBL_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_PQ_BASE_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_PQ_BASE_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_PQ_SIZE & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_PQ_PI & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_PQ_CI & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_PQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_PQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_PQ_ARUSER & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC1_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC1_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC1_QM_PQ_PUSH0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_PQ_PUSH1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_PQ_PUSH2 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_PQ_PUSH3 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_PQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_PQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CQ_PTR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CQ_PTR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CQ_TSIZE & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CQ_CTL & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC1_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC1_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC1_QM_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC1_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC1_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC1_CMDQ_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_GLBL_STS1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC1_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC1_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC1_CMDQ_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC1_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC1_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC1_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CP_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC1_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC1_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1 << ((mmTPC1_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CP_DBG_0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + goya_pb_set_block(hdev, mmTPC2_RTR_BASE); + goya_pb_set_block(hdev, mmTPC2_RD_REGULATOR_BASE); + goya_pb_set_block(hdev, mmTPC2_WR_REGULATOR_BASE); + + pb_addr = (mmTPC2_CFG_SEMAPHORE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC2_CFG_SEMAPHORE & PROT_BITS_OFFS) >> 7) << 2; + + mask = 1 << ((mmTPC2_CFG_SEMAPHORE & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_VFLAGS & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_SFLAGS & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_STATUS & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC2_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC2_CFG_CFG_BASE_ADDRESS_HIGH & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC2_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_TPC_STALL & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_MSS_CONFIG & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_TPC_INTR_CAUSE & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_TPC_INTR_MASK & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC2_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC2_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC2_CFG_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_AWUSER & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC2_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC2_CFG_FUNC_MBIST_CNTRL & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1 << ((mmTPC2_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_PAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC2_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC2_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC2_QM_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_GLBL_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_PQ_BASE_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_PQ_BASE_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_PQ_SIZE & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_PQ_PI & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_PQ_CI & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_PQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_PQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_PQ_ARUSER & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC2_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC2_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC2_QM_PQ_PUSH0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_PQ_PUSH1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_PQ_PUSH2 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_PQ_PUSH3 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_PQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_PQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CQ_PTR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CQ_PTR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CQ_TSIZE & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CQ_CTL & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC2_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC2_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC2_QM_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC2_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC2_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC2_CMDQ_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_GLBL_STS1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC2_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC2_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC2_CMDQ_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC2_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC2_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC2_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CP_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC2_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC2_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1 << ((mmTPC2_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CP_DBG_0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + goya_pb_set_block(hdev, mmTPC3_RTR_BASE); + goya_pb_set_block(hdev, mmTPC3_RD_REGULATOR_BASE); + goya_pb_set_block(hdev, mmTPC3_WR_REGULATOR_BASE); + + pb_addr = (mmTPC3_CFG_SEMAPHORE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC3_CFG_SEMAPHORE & PROT_BITS_OFFS) >> 7) << 2; + + mask = 1 << ((mmTPC3_CFG_SEMAPHORE & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_VFLAGS & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_SFLAGS & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_STATUS & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC3_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC3_CFG_CFG_BASE_ADDRESS_HIGH + & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC3_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_TPC_STALL & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_MSS_CONFIG & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_TPC_INTR_CAUSE & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_TPC_INTR_MASK & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC3_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC3_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC3_CFG_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_AWUSER & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC3_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC3_CFG_FUNC_MBIST_CNTRL + & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC3_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_PAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC3_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC3_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC3_QM_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_GLBL_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_PQ_BASE_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_PQ_BASE_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_PQ_SIZE & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_PQ_PI & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_PQ_CI & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_PQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_PQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_PQ_ARUSER & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC3_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC3_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC3_QM_PQ_PUSH0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_PQ_PUSH1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_PQ_PUSH2 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_PQ_PUSH3 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_PQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_PQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CQ_PTR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CQ_PTR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CQ_TSIZE & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CQ_CTL & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC3_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC3_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC3_QM_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC3_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC3_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC3_CMDQ_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_GLBL_STS1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC3_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC3_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC3_CMDQ_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC3_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC3_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC3_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CP_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC3_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC3_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1 << ((mmTPC3_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CP_DBG_0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + goya_pb_set_block(hdev, mmTPC4_RTR_BASE); + goya_pb_set_block(hdev, mmTPC4_RD_REGULATOR_BASE); + goya_pb_set_block(hdev, mmTPC4_WR_REGULATOR_BASE); + + pb_addr = (mmTPC4_CFG_SEMAPHORE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC4_CFG_SEMAPHORE & PROT_BITS_OFFS) >> 7) << 2; + + mask = 1 << ((mmTPC4_CFG_SEMAPHORE & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_VFLAGS & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_SFLAGS & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_STATUS & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC4_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC4_CFG_CFG_BASE_ADDRESS_HIGH & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC4_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_TPC_STALL & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_MSS_CONFIG & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_TPC_INTR_CAUSE & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_TPC_INTR_MASK & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC4_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC4_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC4_CFG_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_AWUSER & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC4_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC4_CFG_FUNC_MBIST_CNTRL & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC4_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_PAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC4_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC4_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC4_QM_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_GLBL_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_PQ_BASE_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_PQ_BASE_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_PQ_SIZE & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_PQ_PI & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_PQ_CI & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_PQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_PQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_PQ_ARUSER & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC4_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC4_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC4_QM_PQ_PUSH0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_PQ_PUSH1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_PQ_PUSH2 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_PQ_PUSH3 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_PQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_PQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CQ_PTR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CQ_PTR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CQ_TSIZE & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CQ_CTL & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC4_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC4_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC4_QM_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC4_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC4_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC4_CMDQ_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_GLBL_STS1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC4_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC4_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC4_CMDQ_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC4_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC4_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC4_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CP_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC4_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC4_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1 << ((mmTPC4_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CP_DBG_0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + goya_pb_set_block(hdev, mmTPC5_RTR_BASE); + goya_pb_set_block(hdev, mmTPC5_RD_REGULATOR_BASE); + goya_pb_set_block(hdev, mmTPC5_WR_REGULATOR_BASE); + + pb_addr = (mmTPC5_CFG_SEMAPHORE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC5_CFG_SEMAPHORE & PROT_BITS_OFFS) >> 7) << 2; + + mask = 1 << ((mmTPC5_CFG_SEMAPHORE & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_VFLAGS & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_SFLAGS & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_STATUS & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC5_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC5_CFG_CFG_BASE_ADDRESS_HIGH & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC5_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_TPC_STALL & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_MSS_CONFIG & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_TPC_INTR_CAUSE & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_TPC_INTR_MASK & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC5_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC5_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC5_CFG_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_AWUSER & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC5_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC5_CFG_FUNC_MBIST_CNTRL & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC5_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_PAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC5_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC5_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC5_QM_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_GLBL_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_PQ_BASE_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_PQ_BASE_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_PQ_SIZE & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_PQ_PI & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_PQ_CI & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_PQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_PQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_PQ_ARUSER & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC5_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC5_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC5_QM_PQ_PUSH0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_PQ_PUSH1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_PQ_PUSH2 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_PQ_PUSH3 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_PQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_PQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CQ_PTR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CQ_PTR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CQ_TSIZE & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CQ_CTL & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC5_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC5_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC5_QM_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC5_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC5_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC5_CMDQ_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_GLBL_STS1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC5_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC5_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC5_CMDQ_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC5_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC5_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC5_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CP_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC5_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC5_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1 << ((mmTPC5_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CP_DBG_0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + goya_pb_set_block(hdev, mmTPC6_RTR_BASE); + goya_pb_set_block(hdev, mmTPC6_RD_REGULATOR_BASE); + goya_pb_set_block(hdev, mmTPC6_WR_REGULATOR_BASE); + + pb_addr = (mmTPC6_CFG_SEMAPHORE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC6_CFG_SEMAPHORE & PROT_BITS_OFFS) >> 7) << 2; + + mask = 1 << ((mmTPC6_CFG_SEMAPHORE & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_VFLAGS & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_SFLAGS & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_STATUS & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC6_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC6_CFG_CFG_BASE_ADDRESS_HIGH & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC6_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_TPC_STALL & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_MSS_CONFIG & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_TPC_INTR_CAUSE & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_TPC_INTR_MASK & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC6_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC6_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC6_CFG_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_AWUSER & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC6_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC6_CFG_FUNC_MBIST_CNTRL & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC6_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_PAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC6_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC6_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC6_QM_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_GLBL_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_PQ_BASE_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_PQ_BASE_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_PQ_SIZE & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_PQ_PI & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_PQ_CI & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_PQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_PQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_PQ_ARUSER & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC6_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC6_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC6_QM_PQ_PUSH0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_PQ_PUSH1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_PQ_PUSH2 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_PQ_PUSH3 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_PQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_PQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CQ_PTR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CQ_PTR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CQ_TSIZE & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CQ_CTL & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC6_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC6_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC6_QM_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC6_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC6_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC6_CMDQ_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_GLBL_STS1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC6_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC6_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC6_CMDQ_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC6_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC6_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC6_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CP_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC6_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC6_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1 << ((mmTPC6_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CP_DBG_0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + goya_pb_set_block(hdev, mmTPC7_NRTR_BASE); + goya_pb_set_block(hdev, mmTPC7_RD_REGULATOR_BASE); + goya_pb_set_block(hdev, mmTPC7_WR_REGULATOR_BASE); + + pb_addr = (mmTPC7_CFG_SEMAPHORE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC7_CFG_SEMAPHORE & PROT_BITS_OFFS) >> 7) << 2; + + mask = 1 << ((mmTPC7_CFG_SEMAPHORE & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_VFLAGS & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_SFLAGS & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_STATUS & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC7_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC7_CFG_CFG_BASE_ADDRESS_HIGH & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC7_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_TPC_STALL & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_MSS_CONFIG & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_TPC_INTR_CAUSE & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_TPC_INTR_MASK & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC7_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC7_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC7_CFG_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_AWUSER & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC7_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC7_CFG_FUNC_MBIST_CNTRL & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC7_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_PAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC7_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC7_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC7_QM_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_GLBL_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_PQ_BASE_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_PQ_BASE_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_PQ_SIZE & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_PQ_PI & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_PQ_CI & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_PQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_PQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_PQ_ARUSER & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC7_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC7_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC7_QM_PQ_PUSH0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_PQ_PUSH1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_PQ_PUSH2 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_PQ_PUSH3 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_PQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_PQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CQ_PTR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CQ_PTR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CQ_TSIZE & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CQ_CTL & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC7_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC7_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC7_QM_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC7_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC7_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC7_CMDQ_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_GLBL_STS1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC7_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC7_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC7_CMDQ_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC7_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC7_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC7_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CP_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC7_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC7_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1 << ((mmTPC7_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CP_DBG_0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); +} + +/* + * goya_init_protection_bits - Initialize protection bits for specific registers + * + * @hdev: pointer to hl_device structure + * + * All protection bits are 1 by default, means not protected. Need to set to 0 + * each bit that belongs to a protected register. + * + */ +static void goya_init_protection_bits(struct hl_device *hdev) +{ + /* + * In each 4K block of registers, the last 128 bytes are protection + * bits - total of 1024 bits, one for each register. Each bit is related + * to a specific register, by the order of the registers. + * So in order to calculate the bit that is related to a given register, + * we need to calculate its word offset and then the exact bit inside + * the word (which is 4 bytes). + * + * Register address: + * + * 31 12 11 7 6 2 1 0 + * ----------------------------------------------------------------- + * | Don't | word | bit location | 0 | + * | care | offset | inside word | | + * ----------------------------------------------------------------- + * + * Bits 7-11 represents the word offset inside the 128 bytes. + * Bits 2-6 represents the bit location inside the word. + */ + u32 pb_addr, mask; + u8 word_offset; + + goya_pb_set_block(hdev, mmPCI_NRTR_BASE); + goya_pb_set_block(hdev, mmPCI_RD_REGULATOR_BASE); + goya_pb_set_block(hdev, mmPCI_WR_REGULATOR_BASE); + + goya_pb_set_block(hdev, mmSRAM_Y0_X0_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y0_X0_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y0_X1_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y0_X1_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y0_X2_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y0_X2_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y0_X3_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y0_X3_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y0_X4_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y0_X4_RTR_BASE); + + goya_pb_set_block(hdev, mmSRAM_Y1_X0_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y1_X0_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y1_X1_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y1_X1_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y1_X2_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y1_X2_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y1_X3_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y1_X3_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y1_X4_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y1_X4_RTR_BASE); + + goya_pb_set_block(hdev, mmSRAM_Y2_X0_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y2_X0_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y2_X1_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y2_X1_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y2_X2_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y2_X2_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y2_X3_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y2_X3_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y2_X4_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y2_X4_RTR_BASE); + + goya_pb_set_block(hdev, mmSRAM_Y3_X0_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y3_X0_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y3_X1_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y3_X1_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y3_X2_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y3_X2_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y3_X3_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y3_X3_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y3_X4_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y3_X4_RTR_BASE); + + goya_pb_set_block(hdev, mmSRAM_Y4_X0_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y4_X0_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y4_X1_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y4_X1_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y4_X2_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y4_X2_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y4_X3_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y4_X3_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y4_X4_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y4_X4_RTR_BASE); + + goya_pb_set_block(hdev, mmSRAM_Y5_X0_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y5_X0_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y5_X1_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y5_X1_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y5_X2_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y5_X2_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y5_X3_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y5_X3_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y5_X4_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y5_X4_RTR_BASE); + + goya_pb_set_block(hdev, mmPCIE_WRAP_BASE); + goya_pb_set_block(hdev, mmPCIE_CORE_BASE); + goya_pb_set_block(hdev, mmPCIE_DB_CFG_BASE); + goya_pb_set_block(hdev, mmPCIE_DB_CMD_BASE); + goya_pb_set_block(hdev, mmPCIE_AUX_BASE); + goya_pb_set_block(hdev, mmPCIE_DB_RSV_BASE); + goya_pb_set_block(hdev, mmPCIE_PHY_BASE); + goya_pb_set_block(hdev, mmTPC0_NRTR_BASE); + goya_pb_set_block(hdev, mmTPC_PLL_BASE); + + pb_addr = (mmTPC_PLL_CLK_RLX_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC_PLL_CLK_RLX_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC_PLL_CLK_RLX_0 & 0x7C) >> 2); + + WREG32(pb_addr + word_offset, mask); + + goya_init_mme_protection_bits(hdev); + + goya_init_dma_protection_bits(hdev); + + goya_init_tpc_protection_bits(hdev); +} + +/* + * goya_init_security - Initialize security model + * + * @hdev: pointer to hl_device structure + * + * Initialize the security model of the device + * That includes range registers and protection bit per register + * + */ +void goya_init_security(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + + u32 dram_addr_lo = lower_32_bits(DRAM_PHYS_BASE); + u32 dram_addr_hi = upper_32_bits(DRAM_PHYS_BASE); + + u32 lbw_rng0_base = 0xFC440000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + u32 lbw_rng0_mask = 0xFFFF0000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + + u32 lbw_rng1_base = 0xFC480000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + u32 lbw_rng1_mask = 0xFFF80000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + + u32 lbw_rng2_base = 0xFC600000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + u32 lbw_rng2_mask = 0xFFE00000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + + u32 lbw_rng3_base = 0xFC800000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + u32 lbw_rng3_mask = 0xFFF00000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + + u32 lbw_rng4_base = 0xFCC02000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + u32 lbw_rng4_mask = 0xFFFFF000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + + u32 lbw_rng5_base = 0xFCC40000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + u32 lbw_rng5_mask = 0xFFFF8000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + + u32 lbw_rng6_base = 0xFCC48000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + u32 lbw_rng6_mask = 0xFFFFF000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + + u32 lbw_rng7_base = 0xFCC4A000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + u32 lbw_rng7_mask = 0xFFFFE000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + + u32 lbw_rng8_base = 0xFCC4C000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + u32 lbw_rng8_mask = 0xFFFFC000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + + u32 lbw_rng9_base = 0xFCC50000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + u32 lbw_rng9_mask = 0xFFFF0000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + + u32 lbw_rng10_base = 0xFCC60000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + u32 lbw_rng10_mask = 0xFFFE0000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + + u32 lbw_rng11_base = 0xFCE02000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + u32 lbw_rng11_mask = 0xFFFFE000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + + u32 lbw_rng12_base = 0xFE484000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + u32 lbw_rng12_mask = 0xFFFFF000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + + u32 lbw_rng13_base = 0xFEC43000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + u32 lbw_rng13_mask = 0xFFFFF000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + + WREG32(mmDMA_MACRO_LBW_RANGE_HIT_BLOCK, 0xFFFF); + WREG32(mmDMA_MACRO_HBW_RANGE_HIT_BLOCK, 0xFF); + + if (!(goya->hw_cap_initialized & HW_CAP_MMU)) { + WREG32(mmDMA_MACRO_HBW_RANGE_HIT_BLOCK, 0xFE); + + /* Protect HOST */ + WREG32(mmDMA_MACRO_HBW_RANGE_BASE_31_0_0, 0); + WREG32(mmDMA_MACRO_HBW_RANGE_BASE_49_32_0, 0); + WREG32(mmDMA_MACRO_HBW_RANGE_MASK_31_0_0, 0); + WREG32(mmDMA_MACRO_HBW_RANGE_MASK_49_32_0, 0xFFF80); + } + + /* + * Protect DDR @ + * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END + * The mask protects the first 512MB + */ + WREG32(mmDMA_MACRO_HBW_RANGE_BASE_31_0_1, dram_addr_lo); + WREG32(mmDMA_MACRO_HBW_RANGE_BASE_49_32_1, dram_addr_hi); + WREG32(mmDMA_MACRO_HBW_RANGE_MASK_31_0_1, 0xE0000000); + WREG32(mmDMA_MACRO_HBW_RANGE_MASK_49_32_1, 0x3FFFF); + + /* Protect registers */ + + WREG32(mmDMA_MACRO_LBW_RANGE_BASE_0, lbw_rng0_base); + WREG32(mmDMA_MACRO_LBW_RANGE_MASK_0, lbw_rng0_mask); + WREG32(mmDMA_MACRO_LBW_RANGE_BASE_1, lbw_rng1_base); + WREG32(mmDMA_MACRO_LBW_RANGE_MASK_1, lbw_rng1_mask); + WREG32(mmDMA_MACRO_LBW_RANGE_BASE_2, lbw_rng2_base); + WREG32(mmDMA_MACRO_LBW_RANGE_MASK_2, lbw_rng2_mask); + WREG32(mmDMA_MACRO_LBW_RANGE_BASE_3, lbw_rng3_base); + WREG32(mmDMA_MACRO_LBW_RANGE_MASK_3, lbw_rng3_mask); + WREG32(mmDMA_MACRO_LBW_RANGE_BASE_4, lbw_rng4_base); + WREG32(mmDMA_MACRO_LBW_RANGE_MASK_4, lbw_rng4_mask); + WREG32(mmDMA_MACRO_LBW_RANGE_BASE_5, lbw_rng5_base); + WREG32(mmDMA_MACRO_LBW_RANGE_MASK_5, lbw_rng5_mask); + WREG32(mmDMA_MACRO_LBW_RANGE_BASE_6, lbw_rng6_base); + WREG32(mmDMA_MACRO_LBW_RANGE_MASK_6, lbw_rng6_mask); + WREG32(mmDMA_MACRO_LBW_RANGE_BASE_7, lbw_rng7_base); + WREG32(mmDMA_MACRO_LBW_RANGE_MASK_7, lbw_rng7_mask); + WREG32(mmDMA_MACRO_LBW_RANGE_BASE_8, lbw_rng8_base); + WREG32(mmDMA_MACRO_LBW_RANGE_MASK_8, lbw_rng8_mask); + WREG32(mmDMA_MACRO_LBW_RANGE_BASE_9, lbw_rng9_base); + WREG32(mmDMA_MACRO_LBW_RANGE_MASK_9, lbw_rng9_mask); + WREG32(mmDMA_MACRO_LBW_RANGE_BASE_10, lbw_rng10_base); + WREG32(mmDMA_MACRO_LBW_RANGE_MASK_10, lbw_rng10_mask); + WREG32(mmDMA_MACRO_LBW_RANGE_BASE_11, lbw_rng11_base); + WREG32(mmDMA_MACRO_LBW_RANGE_MASK_11, lbw_rng11_mask); + WREG32(mmDMA_MACRO_LBW_RANGE_BASE_12, lbw_rng12_base); + WREG32(mmDMA_MACRO_LBW_RANGE_MASK_12, lbw_rng12_mask); + WREG32(mmDMA_MACRO_LBW_RANGE_BASE_13, lbw_rng13_base); + WREG32(mmDMA_MACRO_LBW_RANGE_MASK_13, lbw_rng13_mask); + + WREG32(mmMME1_RTR_LBW_RANGE_HIT, 0xFFFF); + WREG32(mmMME2_RTR_LBW_RANGE_HIT, 0xFFFF); + WREG32(mmMME3_RTR_LBW_RANGE_HIT, 0xFFFF); + WREG32(mmMME4_RTR_LBW_RANGE_HIT, 0xFFFF); + WREG32(mmMME5_RTR_LBW_RANGE_HIT, 0xFFFF); + WREG32(mmMME6_RTR_LBW_RANGE_HIT, 0xFFFF); + + WREG32(mmMME1_RTR_HBW_RANGE_HIT, 0xFE); + WREG32(mmMME2_RTR_HBW_RANGE_HIT, 0xFE); + WREG32(mmMME3_RTR_HBW_RANGE_HIT, 0xFE); + WREG32(mmMME4_RTR_HBW_RANGE_HIT, 0xFE); + WREG32(mmMME5_RTR_HBW_RANGE_HIT, 0xFE); + WREG32(mmMME6_RTR_HBW_RANGE_HIT, 0xFE); + + /* Protect HOST */ + WREG32(mmMME1_RTR_HBW_RANGE_BASE_L_0, 0); + WREG32(mmMME1_RTR_HBW_RANGE_BASE_H_0, 0); + WREG32(mmMME1_RTR_HBW_RANGE_MASK_L_0, 0); + WREG32(mmMME1_RTR_HBW_RANGE_MASK_H_0, 0xFFF80); + + WREG32(mmMME2_RTR_HBW_RANGE_BASE_L_0, 0); + WREG32(mmMME2_RTR_HBW_RANGE_BASE_H_0, 0); + WREG32(mmMME2_RTR_HBW_RANGE_MASK_L_0, 0); + WREG32(mmMME2_RTR_HBW_RANGE_MASK_H_0, 0xFFF80); + + WREG32(mmMME3_RTR_HBW_RANGE_BASE_L_0, 0); + WREG32(mmMME3_RTR_HBW_RANGE_BASE_H_0, 0); + WREG32(mmMME3_RTR_HBW_RANGE_MASK_L_0, 0); + WREG32(mmMME3_RTR_HBW_RANGE_MASK_H_0, 0xFFF80); + + WREG32(mmMME4_RTR_HBW_RANGE_BASE_L_0, 0); + WREG32(mmMME4_RTR_HBW_RANGE_BASE_H_0, 0); + WREG32(mmMME4_RTR_HBW_RANGE_MASK_L_0, 0); + WREG32(mmMME4_RTR_HBW_RANGE_MASK_H_0, 0xFFF80); + + WREG32(mmMME5_RTR_HBW_RANGE_BASE_L_0, 0); + WREG32(mmMME5_RTR_HBW_RANGE_BASE_H_0, 0); + WREG32(mmMME5_RTR_HBW_RANGE_MASK_L_0, 0); + WREG32(mmMME5_RTR_HBW_RANGE_MASK_H_0, 0xFFF80); + + WREG32(mmMME6_RTR_HBW_RANGE_BASE_L_0, 0); + WREG32(mmMME6_RTR_HBW_RANGE_BASE_H_0, 0); + WREG32(mmMME6_RTR_HBW_RANGE_MASK_L_0, 0); + WREG32(mmMME6_RTR_HBW_RANGE_MASK_H_0, 0xFFF80); + + /* + * Protect DDR @ + * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END + * The mask protects the first 512MB + */ + WREG32(mmMME1_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo); + WREG32(mmMME1_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi); + WREG32(mmMME1_RTR_HBW_RANGE_MASK_L_1, 0xE0000000); + WREG32(mmMME1_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF); + + WREG32(mmMME2_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo); + WREG32(mmMME2_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi); + WREG32(mmMME2_RTR_HBW_RANGE_MASK_L_1, 0xE0000000); + WREG32(mmMME2_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF); + + WREG32(mmMME3_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo); + WREG32(mmMME3_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi); + WREG32(mmMME3_RTR_HBW_RANGE_MASK_L_1, 0xE0000000); + WREG32(mmMME3_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF); + + WREG32(mmMME4_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo); + WREG32(mmMME4_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi); + WREG32(mmMME4_RTR_HBW_RANGE_MASK_L_1, 0xE0000000); + WREG32(mmMME4_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF); + + WREG32(mmMME5_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo); + WREG32(mmMME5_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi); + WREG32(mmMME5_RTR_HBW_RANGE_MASK_L_1, 0xE0000000); + WREG32(mmMME5_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF); + + WREG32(mmMME6_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo); + WREG32(mmMME6_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi); + WREG32(mmMME6_RTR_HBW_RANGE_MASK_L_1, 0xE0000000); + WREG32(mmMME6_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF); + + WREG32(mmMME1_RTR_LBW_RANGE_BASE_0, lbw_rng0_base); + WREG32(mmMME1_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask); + WREG32(mmMME1_RTR_LBW_RANGE_BASE_1, lbw_rng1_base); + WREG32(mmMME1_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask); + WREG32(mmMME1_RTR_LBW_RANGE_BASE_2, lbw_rng2_base); + WREG32(mmMME1_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask); + WREG32(mmMME1_RTR_LBW_RANGE_BASE_3, lbw_rng3_base); + WREG32(mmMME1_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask); + WREG32(mmMME1_RTR_LBW_RANGE_BASE_4, lbw_rng4_base); + WREG32(mmMME1_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask); + WREG32(mmMME1_RTR_LBW_RANGE_BASE_5, lbw_rng5_base); + WREG32(mmMME1_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask); + WREG32(mmMME1_RTR_LBW_RANGE_BASE_6, lbw_rng6_base); + WREG32(mmMME1_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask); + WREG32(mmMME1_RTR_LBW_RANGE_BASE_7, lbw_rng7_base); + WREG32(mmMME1_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask); + WREG32(mmMME1_RTR_LBW_RANGE_BASE_8, lbw_rng8_base); + WREG32(mmMME1_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask); + WREG32(mmMME1_RTR_LBW_RANGE_BASE_9, lbw_rng9_base); + WREG32(mmMME1_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask); + WREG32(mmMME1_RTR_LBW_RANGE_BASE_10, lbw_rng10_base); + WREG32(mmMME1_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask); + WREG32(mmMME1_RTR_LBW_RANGE_BASE_11, lbw_rng11_base); + WREG32(mmMME1_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask); + WREG32(mmMME1_RTR_LBW_RANGE_BASE_12, lbw_rng12_base); + WREG32(mmMME1_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask); + WREG32(mmMME1_RTR_LBW_RANGE_BASE_13, lbw_rng13_base); + WREG32(mmMME1_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask); + + WREG32(mmMME2_RTR_LBW_RANGE_BASE_0, lbw_rng0_base); + WREG32(mmMME2_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask); + WREG32(mmMME2_RTR_LBW_RANGE_BASE_1, lbw_rng1_base); + WREG32(mmMME2_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask); + WREG32(mmMME2_RTR_LBW_RANGE_BASE_2, lbw_rng2_base); + WREG32(mmMME2_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask); + WREG32(mmMME2_RTR_LBW_RANGE_BASE_3, lbw_rng3_base); + WREG32(mmMME2_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask); + WREG32(mmMME2_RTR_LBW_RANGE_BASE_4, lbw_rng4_base); + WREG32(mmMME2_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask); + WREG32(mmMME2_RTR_LBW_RANGE_BASE_5, lbw_rng5_base); + WREG32(mmMME2_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask); + WREG32(mmMME2_RTR_LBW_RANGE_BASE_6, lbw_rng6_base); + WREG32(mmMME2_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask); + WREG32(mmMME2_RTR_LBW_RANGE_BASE_7, lbw_rng7_base); + WREG32(mmMME2_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask); + WREG32(mmMME2_RTR_LBW_RANGE_BASE_8, lbw_rng8_base); + WREG32(mmMME2_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask); + WREG32(mmMME2_RTR_LBW_RANGE_BASE_9, lbw_rng9_base); + WREG32(mmMME2_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask); + WREG32(mmMME2_RTR_LBW_RANGE_BASE_10, lbw_rng10_base); + WREG32(mmMME2_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask); + WREG32(mmMME2_RTR_LBW_RANGE_BASE_11, lbw_rng11_base); + WREG32(mmMME2_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask); + WREG32(mmMME2_RTR_LBW_RANGE_BASE_12, lbw_rng12_base); + WREG32(mmMME2_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask); + WREG32(mmMME2_RTR_LBW_RANGE_BASE_13, lbw_rng13_base); + WREG32(mmMME2_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask); + + WREG32(mmMME3_RTR_LBW_RANGE_BASE_0, lbw_rng0_base); + WREG32(mmMME3_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask); + WREG32(mmMME3_RTR_LBW_RANGE_BASE_1, lbw_rng1_base); + WREG32(mmMME3_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask); + WREG32(mmMME3_RTR_LBW_RANGE_BASE_2, lbw_rng2_base); + WREG32(mmMME3_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask); + WREG32(mmMME3_RTR_LBW_RANGE_BASE_3, lbw_rng3_base); + WREG32(mmMME3_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask); + WREG32(mmMME3_RTR_LBW_RANGE_BASE_4, lbw_rng4_base); + WREG32(mmMME3_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask); + WREG32(mmMME3_RTR_LBW_RANGE_BASE_5, lbw_rng5_base); + WREG32(mmMME3_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask); + WREG32(mmMME3_RTR_LBW_RANGE_BASE_6, lbw_rng6_base); + WREG32(mmMME3_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask); + WREG32(mmMME3_RTR_LBW_RANGE_BASE_7, lbw_rng7_base); + WREG32(mmMME3_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask); + WREG32(mmMME3_RTR_LBW_RANGE_BASE_8, lbw_rng8_base); + WREG32(mmMME3_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask); + WREG32(mmMME3_RTR_LBW_RANGE_BASE_9, lbw_rng9_base); + WREG32(mmMME3_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask); + WREG32(mmMME3_RTR_LBW_RANGE_BASE_10, lbw_rng10_base); + WREG32(mmMME3_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask); + WREG32(mmMME3_RTR_LBW_RANGE_BASE_11, lbw_rng11_base); + WREG32(mmMME3_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask); + WREG32(mmMME3_RTR_LBW_RANGE_BASE_12, lbw_rng12_base); + WREG32(mmMME3_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask); + WREG32(mmMME3_RTR_LBW_RANGE_BASE_13, lbw_rng13_base); + WREG32(mmMME3_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask); + + WREG32(mmMME4_RTR_LBW_RANGE_BASE_0, lbw_rng0_base); + WREG32(mmMME4_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask); + WREG32(mmMME4_RTR_LBW_RANGE_BASE_1, lbw_rng1_base); + WREG32(mmMME4_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask); + WREG32(mmMME4_RTR_LBW_RANGE_BASE_2, lbw_rng2_base); + WREG32(mmMME4_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask); + WREG32(mmMME4_RTR_LBW_RANGE_BASE_3, lbw_rng3_base); + WREG32(mmMME4_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask); + WREG32(mmMME4_RTR_LBW_RANGE_BASE_4, lbw_rng4_base); + WREG32(mmMME4_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask); + WREG32(mmMME4_RTR_LBW_RANGE_BASE_5, lbw_rng5_base); + WREG32(mmMME4_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask); + WREG32(mmMME4_RTR_LBW_RANGE_BASE_6, lbw_rng6_base); + WREG32(mmMME4_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask); + WREG32(mmMME4_RTR_LBW_RANGE_BASE_7, lbw_rng7_base); + WREG32(mmMME4_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask); + WREG32(mmMME4_RTR_LBW_RANGE_BASE_8, lbw_rng8_base); + WREG32(mmMME4_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask); + WREG32(mmMME4_RTR_LBW_RANGE_BASE_9, lbw_rng9_base); + WREG32(mmMME4_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask); + WREG32(mmMME4_RTR_LBW_RANGE_BASE_10, lbw_rng10_base); + WREG32(mmMME4_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask); + WREG32(mmMME4_RTR_LBW_RANGE_BASE_11, lbw_rng11_base); + WREG32(mmMME4_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask); + WREG32(mmMME4_RTR_LBW_RANGE_BASE_12, lbw_rng12_base); + WREG32(mmMME4_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask); + WREG32(mmMME4_RTR_LBW_RANGE_BASE_13, lbw_rng13_base); + WREG32(mmMME4_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask); + + WREG32(mmMME5_RTR_LBW_RANGE_BASE_0, lbw_rng0_base); + WREG32(mmMME5_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask); + WREG32(mmMME5_RTR_LBW_RANGE_BASE_1, lbw_rng1_base); + WREG32(mmMME5_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask); + WREG32(mmMME5_RTR_LBW_RANGE_BASE_2, lbw_rng2_base); + WREG32(mmMME5_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask); + WREG32(mmMME5_RTR_LBW_RANGE_BASE_3, lbw_rng3_base); + WREG32(mmMME5_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask); + WREG32(mmMME5_RTR_LBW_RANGE_BASE_4, lbw_rng4_base); + WREG32(mmMME5_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask); + WREG32(mmMME5_RTR_LBW_RANGE_BASE_5, lbw_rng5_base); + WREG32(mmMME5_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask); + WREG32(mmMME5_RTR_LBW_RANGE_BASE_6, lbw_rng6_base); + WREG32(mmMME5_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask); + WREG32(mmMME5_RTR_LBW_RANGE_BASE_7, lbw_rng7_base); + WREG32(mmMME5_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask); + WREG32(mmMME5_RTR_LBW_RANGE_BASE_8, lbw_rng8_base); + WREG32(mmMME5_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask); + WREG32(mmMME5_RTR_LBW_RANGE_BASE_9, lbw_rng9_base); + WREG32(mmMME5_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask); + WREG32(mmMME5_RTR_LBW_RANGE_BASE_10, lbw_rng10_base); + WREG32(mmMME5_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask); + WREG32(mmMME5_RTR_LBW_RANGE_BASE_11, lbw_rng11_base); + WREG32(mmMME5_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask); + WREG32(mmMME5_RTR_LBW_RANGE_BASE_12, lbw_rng12_base); + WREG32(mmMME5_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask); + WREG32(mmMME5_RTR_LBW_RANGE_BASE_13, lbw_rng13_base); + WREG32(mmMME5_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask); + + WREG32(mmMME6_RTR_LBW_RANGE_BASE_0, lbw_rng0_base); + WREG32(mmMME6_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask); + WREG32(mmMME6_RTR_LBW_RANGE_BASE_1, lbw_rng1_base); + WREG32(mmMME6_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask); + WREG32(mmMME6_RTR_LBW_RANGE_BASE_2, lbw_rng2_base); + WREG32(mmMME6_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask); + WREG32(mmMME6_RTR_LBW_RANGE_BASE_3, lbw_rng3_base); + WREG32(mmMME6_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask); + WREG32(mmMME6_RTR_LBW_RANGE_BASE_4, lbw_rng4_base); + WREG32(mmMME6_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask); + WREG32(mmMME6_RTR_LBW_RANGE_BASE_5, lbw_rng5_base); + WREG32(mmMME6_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask); + WREG32(mmMME6_RTR_LBW_RANGE_BASE_6, lbw_rng6_base); + WREG32(mmMME6_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask); + WREG32(mmMME6_RTR_LBW_RANGE_BASE_7, lbw_rng7_base); + WREG32(mmMME6_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask); + WREG32(mmMME6_RTR_LBW_RANGE_BASE_8, lbw_rng8_base); + WREG32(mmMME6_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask); + WREG32(mmMME6_RTR_LBW_RANGE_BASE_9, lbw_rng9_base); + WREG32(mmMME6_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask); + WREG32(mmMME6_RTR_LBW_RANGE_BASE_10, lbw_rng10_base); + WREG32(mmMME6_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask); + WREG32(mmMME6_RTR_LBW_RANGE_BASE_11, lbw_rng11_base); + WREG32(mmMME6_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask); + WREG32(mmMME6_RTR_LBW_RANGE_BASE_12, lbw_rng12_base); + WREG32(mmMME6_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask); + WREG32(mmMME6_RTR_LBW_RANGE_BASE_13, lbw_rng13_base); + WREG32(mmMME6_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask); + + WREG32(mmTPC0_NRTR_LBW_RANGE_HIT, 0xFFFF); + WREG32(mmTPC0_NRTR_HBW_RANGE_HIT, 0xFE); + + /* Protect HOST */ + WREG32(mmTPC0_NRTR_HBW_RANGE_BASE_L_0, 0); + WREG32(mmTPC0_NRTR_HBW_RANGE_BASE_H_0, 0); + WREG32(mmTPC0_NRTR_HBW_RANGE_MASK_L_0, 0); + WREG32(mmTPC0_NRTR_HBW_RANGE_MASK_H_0, 0xFFF80); + + /* + * Protect DDR @ + * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END + * The mask protects the first 512MB + */ + WREG32(mmTPC0_NRTR_HBW_RANGE_BASE_L_1, dram_addr_lo); + WREG32(mmTPC0_NRTR_HBW_RANGE_BASE_H_1, dram_addr_hi); + WREG32(mmTPC0_NRTR_HBW_RANGE_MASK_L_1, 0xE0000000); + WREG32(mmTPC0_NRTR_HBW_RANGE_MASK_H_1, 0x3FFFF); + + WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_0, lbw_rng0_base); + WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_0, lbw_rng0_mask); + WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_1, lbw_rng1_base); + WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_1, lbw_rng1_mask); + WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_2, lbw_rng2_base); + WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_2, lbw_rng2_mask); + WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_3, lbw_rng3_base); + WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_3, lbw_rng3_mask); + WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_4, lbw_rng4_base); + WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_4, lbw_rng4_mask); + WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_5, lbw_rng5_base); + WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_5, lbw_rng5_mask); + WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_6, lbw_rng6_base); + WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_6, lbw_rng6_mask); + WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_7, lbw_rng7_base); + WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_7, lbw_rng7_mask); + WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_8, lbw_rng8_base); + WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_8, lbw_rng8_mask); + WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_9, lbw_rng9_base); + WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_9, lbw_rng9_mask); + WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_10, lbw_rng10_base); + WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_10, lbw_rng10_mask); + WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_11, lbw_rng11_base); + WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_11, lbw_rng11_mask); + WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_12, lbw_rng12_base); + WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_12, lbw_rng12_mask); + WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_13, lbw_rng13_base); + WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_13, lbw_rng13_mask); + + WREG32(mmTPC1_RTR_LBW_RANGE_HIT, 0xFFFF); + WREG32(mmTPC1_RTR_HBW_RANGE_HIT, 0xFE); + + /* Protect HOST */ + WREG32(mmTPC1_RTR_HBW_RANGE_BASE_L_0, 0); + WREG32(mmTPC1_RTR_HBW_RANGE_BASE_H_0, 0); + WREG32(mmTPC1_RTR_HBW_RANGE_MASK_L_0, 0); + WREG32(mmTPC1_RTR_HBW_RANGE_MASK_H_0, 0xFFF80); + + /* + * Protect DDR @ + * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END + * The mask protects the first 512MB + */ + WREG32(mmTPC1_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo); + WREG32(mmTPC1_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi); + WREG32(mmTPC1_RTR_HBW_RANGE_MASK_L_1, 0xE0000000); + WREG32(mmTPC1_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF); + + WREG32(mmTPC1_RTR_LBW_RANGE_BASE_0, lbw_rng0_base); + WREG32(mmTPC1_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask); + WREG32(mmTPC1_RTR_LBW_RANGE_BASE_1, lbw_rng1_base); + WREG32(mmTPC1_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask); + WREG32(mmTPC1_RTR_LBW_RANGE_BASE_2, lbw_rng2_base); + WREG32(mmTPC1_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask); + WREG32(mmTPC1_RTR_LBW_RANGE_BASE_3, lbw_rng3_base); + WREG32(mmTPC1_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask); + WREG32(mmTPC1_RTR_LBW_RANGE_BASE_4, lbw_rng4_base); + WREG32(mmTPC1_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask); + WREG32(mmTPC1_RTR_LBW_RANGE_BASE_5, lbw_rng5_base); + WREG32(mmTPC1_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask); + WREG32(mmTPC1_RTR_LBW_RANGE_BASE_6, lbw_rng6_base); + WREG32(mmTPC1_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask); + WREG32(mmTPC1_RTR_LBW_RANGE_BASE_7, lbw_rng7_base); + WREG32(mmTPC1_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask); + WREG32(mmTPC1_RTR_LBW_RANGE_BASE_8, lbw_rng8_base); + WREG32(mmTPC1_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask); + WREG32(mmTPC1_RTR_LBW_RANGE_BASE_9, lbw_rng9_base); + WREG32(mmTPC1_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask); + WREG32(mmTPC1_RTR_LBW_RANGE_BASE_10, lbw_rng10_base); + WREG32(mmTPC1_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask); + WREG32(mmTPC1_RTR_LBW_RANGE_BASE_11, lbw_rng11_base); + WREG32(mmTPC1_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask); + WREG32(mmTPC1_RTR_LBW_RANGE_BASE_12, lbw_rng12_base); + WREG32(mmTPC1_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask); + WREG32(mmTPC1_RTR_LBW_RANGE_BASE_13, lbw_rng13_base); + WREG32(mmTPC1_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask); + + WREG32(mmTPC2_RTR_LBW_RANGE_HIT, 0xFFFF); + WREG32(mmTPC2_RTR_HBW_RANGE_HIT, 0xFE); + + /* Protect HOST */ + WREG32(mmTPC2_RTR_HBW_RANGE_BASE_L_0, 0); + WREG32(mmTPC2_RTR_HBW_RANGE_BASE_H_0, 0); + WREG32(mmTPC2_RTR_HBW_RANGE_MASK_L_0, 0); + WREG32(mmTPC2_RTR_HBW_RANGE_MASK_H_0, 0xFFF80); + + /* + * Protect DDR @ + * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END + * The mask protects the first 512MB + */ + WREG32(mmTPC2_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo); + WREG32(mmTPC2_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi); + WREG32(mmTPC2_RTR_HBW_RANGE_MASK_L_1, 0xE0000000); + WREG32(mmTPC2_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF); + + WREG32(mmTPC2_RTR_LBW_RANGE_BASE_0, lbw_rng0_base); + WREG32(mmTPC2_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask); + WREG32(mmTPC2_RTR_LBW_RANGE_BASE_1, lbw_rng1_base); + WREG32(mmTPC2_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask); + WREG32(mmTPC2_RTR_LBW_RANGE_BASE_2, lbw_rng2_base); + WREG32(mmTPC2_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask); + WREG32(mmTPC2_RTR_LBW_RANGE_BASE_3, lbw_rng3_base); + WREG32(mmTPC2_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask); + WREG32(mmTPC2_RTR_LBW_RANGE_BASE_4, lbw_rng4_base); + WREG32(mmTPC2_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask); + WREG32(mmTPC2_RTR_LBW_RANGE_BASE_5, lbw_rng5_base); + WREG32(mmTPC2_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask); + WREG32(mmTPC2_RTR_LBW_RANGE_BASE_6, lbw_rng6_base); + WREG32(mmTPC2_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask); + WREG32(mmTPC2_RTR_LBW_RANGE_BASE_7, lbw_rng7_base); + WREG32(mmTPC2_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask); + WREG32(mmTPC2_RTR_LBW_RANGE_BASE_8, lbw_rng8_base); + WREG32(mmTPC2_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask); + WREG32(mmTPC2_RTR_LBW_RANGE_BASE_9, lbw_rng9_base); + WREG32(mmTPC2_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask); + WREG32(mmTPC2_RTR_LBW_RANGE_BASE_10, lbw_rng10_base); + WREG32(mmTPC2_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask); + WREG32(mmTPC2_RTR_LBW_RANGE_BASE_11, lbw_rng11_base); + WREG32(mmTPC2_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask); + WREG32(mmTPC2_RTR_LBW_RANGE_BASE_12, lbw_rng12_base); + WREG32(mmTPC2_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask); + WREG32(mmTPC2_RTR_LBW_RANGE_BASE_13, lbw_rng13_base); + WREG32(mmTPC2_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask); + + WREG32(mmTPC3_RTR_LBW_RANGE_HIT, 0xFFFF); + WREG32(mmTPC3_RTR_HBW_RANGE_HIT, 0xFE); + + /* Protect HOST */ + WREG32(mmTPC3_RTR_HBW_RANGE_BASE_L_0, 0); + WREG32(mmTPC3_RTR_HBW_RANGE_BASE_H_0, 0); + WREG32(mmTPC3_RTR_HBW_RANGE_MASK_L_0, 0); + WREG32(mmTPC3_RTR_HBW_RANGE_MASK_H_0, 0xFFF80); + + /* + * Protect DDR @ + * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END + * The mask protects the first 512MB + */ + WREG32(mmTPC3_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo); + WREG32(mmTPC3_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi); + WREG32(mmTPC3_RTR_HBW_RANGE_MASK_L_1, 0xE0000000); + WREG32(mmTPC3_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF); + + WREG32(mmTPC3_RTR_LBW_RANGE_BASE_0, lbw_rng0_base); + WREG32(mmTPC3_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask); + WREG32(mmTPC3_RTR_LBW_RANGE_BASE_1, lbw_rng1_base); + WREG32(mmTPC3_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask); + WREG32(mmTPC3_RTR_LBW_RANGE_BASE_2, lbw_rng2_base); + WREG32(mmTPC3_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask); + WREG32(mmTPC3_RTR_LBW_RANGE_BASE_3, lbw_rng3_base); + WREG32(mmTPC3_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask); + WREG32(mmTPC3_RTR_LBW_RANGE_BASE_4, lbw_rng4_base); + WREG32(mmTPC3_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask); + WREG32(mmTPC3_RTR_LBW_RANGE_BASE_5, lbw_rng5_base); + WREG32(mmTPC3_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask); + WREG32(mmTPC3_RTR_LBW_RANGE_BASE_6, lbw_rng6_base); + WREG32(mmTPC3_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask); + WREG32(mmTPC3_RTR_LBW_RANGE_BASE_7, lbw_rng7_base); + WREG32(mmTPC3_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask); + WREG32(mmTPC3_RTR_LBW_RANGE_BASE_8, lbw_rng8_base); + WREG32(mmTPC3_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask); + WREG32(mmTPC3_RTR_LBW_RANGE_BASE_9, lbw_rng9_base); + WREG32(mmTPC3_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask); + WREG32(mmTPC3_RTR_LBW_RANGE_BASE_10, lbw_rng10_base); + WREG32(mmTPC3_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask); + WREG32(mmTPC3_RTR_LBW_RANGE_BASE_11, lbw_rng11_base); + WREG32(mmTPC3_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask); + WREG32(mmTPC3_RTR_LBW_RANGE_BASE_12, lbw_rng12_base); + WREG32(mmTPC3_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask); + WREG32(mmTPC3_RTR_LBW_RANGE_BASE_13, lbw_rng13_base); + WREG32(mmTPC3_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask); + + WREG32(mmTPC4_RTR_LBW_RANGE_HIT, 0xFFFF); + WREG32(mmTPC4_RTR_HBW_RANGE_HIT, 0xFE); + + /* Protect HOST */ + WREG32(mmTPC4_RTR_HBW_RANGE_BASE_L_0, 0); + WREG32(mmTPC4_RTR_HBW_RANGE_BASE_H_0, 0); + WREG32(mmTPC4_RTR_HBW_RANGE_MASK_L_0, 0); + WREG32(mmTPC4_RTR_HBW_RANGE_MASK_H_0, 0xFFF80); + + /* + * Protect DDR @ + * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END + * The mask protects the first 512MB + */ + WREG32(mmTPC4_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo); + WREG32(mmTPC4_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi); + WREG32(mmTPC4_RTR_HBW_RANGE_MASK_L_1, 0xE0000000); + WREG32(mmTPC4_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF); + + WREG32(mmTPC4_RTR_LBW_RANGE_BASE_0, lbw_rng0_base); + WREG32(mmTPC4_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask); + WREG32(mmTPC4_RTR_LBW_RANGE_BASE_1, lbw_rng1_base); + WREG32(mmTPC4_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask); + WREG32(mmTPC4_RTR_LBW_RANGE_BASE_2, lbw_rng2_base); + WREG32(mmTPC4_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask); + WREG32(mmTPC4_RTR_LBW_RANGE_BASE_3, lbw_rng3_base); + WREG32(mmTPC4_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask); + WREG32(mmTPC4_RTR_LBW_RANGE_BASE_4, lbw_rng4_base); + WREG32(mmTPC4_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask); + WREG32(mmTPC4_RTR_LBW_RANGE_BASE_5, lbw_rng5_base); + WREG32(mmTPC4_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask); + WREG32(mmTPC4_RTR_LBW_RANGE_BASE_6, lbw_rng6_base); + WREG32(mmTPC4_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask); + WREG32(mmTPC4_RTR_LBW_RANGE_BASE_7, lbw_rng7_base); + WREG32(mmTPC4_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask); + WREG32(mmTPC4_RTR_LBW_RANGE_BASE_8, lbw_rng8_base); + WREG32(mmTPC4_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask); + WREG32(mmTPC4_RTR_LBW_RANGE_BASE_9, lbw_rng9_base); + WREG32(mmTPC4_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask); + WREG32(mmTPC4_RTR_LBW_RANGE_BASE_10, lbw_rng10_base); + WREG32(mmTPC4_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask); + WREG32(mmTPC4_RTR_LBW_RANGE_BASE_11, lbw_rng11_base); + WREG32(mmTPC4_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask); + WREG32(mmTPC4_RTR_LBW_RANGE_BASE_12, lbw_rng12_base); + WREG32(mmTPC4_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask); + WREG32(mmTPC4_RTR_LBW_RANGE_BASE_13, lbw_rng13_base); + WREG32(mmTPC4_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask); + + WREG32(mmTPC5_RTR_LBW_RANGE_HIT, 0xFFFF); + WREG32(mmTPC5_RTR_HBW_RANGE_HIT, 0xFE); + + /* Protect HOST */ + WREG32(mmTPC5_RTR_HBW_RANGE_BASE_L_0, 0); + WREG32(mmTPC5_RTR_HBW_RANGE_BASE_H_0, 0); + WREG32(mmTPC5_RTR_HBW_RANGE_MASK_L_0, 0); + WREG32(mmTPC5_RTR_HBW_RANGE_MASK_H_0, 0xFFF80); + + /* + * Protect DDR @ + * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END + * The mask protects the first 512MB + */ + WREG32(mmTPC5_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo); + WREG32(mmTPC5_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi); + WREG32(mmTPC5_RTR_HBW_RANGE_MASK_L_1, 0xE0000000); + WREG32(mmTPC5_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF); + + WREG32(mmTPC5_RTR_LBW_RANGE_BASE_0, lbw_rng0_base); + WREG32(mmTPC5_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask); + WREG32(mmTPC5_RTR_LBW_RANGE_BASE_1, lbw_rng1_base); + WREG32(mmTPC5_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask); + WREG32(mmTPC5_RTR_LBW_RANGE_BASE_2, lbw_rng2_base); + WREG32(mmTPC5_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask); + WREG32(mmTPC5_RTR_LBW_RANGE_BASE_3, lbw_rng3_base); + WREG32(mmTPC5_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask); + WREG32(mmTPC5_RTR_LBW_RANGE_BASE_4, lbw_rng4_base); + WREG32(mmTPC5_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask); + WREG32(mmTPC5_RTR_LBW_RANGE_BASE_5, lbw_rng5_base); + WREG32(mmTPC5_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask); + WREG32(mmTPC5_RTR_LBW_RANGE_BASE_6, lbw_rng6_base); + WREG32(mmTPC5_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask); + WREG32(mmTPC5_RTR_LBW_RANGE_BASE_7, lbw_rng7_base); + WREG32(mmTPC5_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask); + WREG32(mmTPC5_RTR_LBW_RANGE_BASE_8, lbw_rng8_base); + WREG32(mmTPC5_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask); + WREG32(mmTPC5_RTR_LBW_RANGE_BASE_9, lbw_rng9_base); + WREG32(mmTPC5_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask); + WREG32(mmTPC5_RTR_LBW_RANGE_BASE_10, lbw_rng10_base); + WREG32(mmTPC5_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask); + WREG32(mmTPC5_RTR_LBW_RANGE_BASE_11, lbw_rng11_base); + WREG32(mmTPC5_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask); + WREG32(mmTPC5_RTR_LBW_RANGE_BASE_12, lbw_rng12_base); + WREG32(mmTPC5_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask); + WREG32(mmTPC5_RTR_LBW_RANGE_BASE_13, lbw_rng13_base); + WREG32(mmTPC5_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask); + + WREG32(mmTPC6_RTR_LBW_RANGE_HIT, 0xFFFF); + WREG32(mmTPC6_RTR_HBW_RANGE_HIT, 0xFE); + + /* Protect HOST */ + WREG32(mmTPC6_RTR_HBW_RANGE_BASE_L_0, 0); + WREG32(mmTPC6_RTR_HBW_RANGE_BASE_H_0, 0); + WREG32(mmTPC6_RTR_HBW_RANGE_MASK_L_0, 0); + WREG32(mmTPC6_RTR_HBW_RANGE_MASK_H_0, 0xFFF80); + + /* + * Protect DDR @ + * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END + * The mask protects the first 512MB + */ + WREG32(mmTPC6_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo); + WREG32(mmTPC6_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi); + WREG32(mmTPC6_RTR_HBW_RANGE_MASK_L_1, 0xE0000000); + WREG32(mmTPC6_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF); + + WREG32(mmTPC6_RTR_LBW_RANGE_BASE_0, lbw_rng0_base); + WREG32(mmTPC6_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask); + WREG32(mmTPC6_RTR_LBW_RANGE_BASE_1, lbw_rng1_base); + WREG32(mmTPC6_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask); + WREG32(mmTPC6_RTR_LBW_RANGE_BASE_2, lbw_rng2_base); + WREG32(mmTPC6_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask); + WREG32(mmTPC6_RTR_LBW_RANGE_BASE_3, lbw_rng3_base); + WREG32(mmTPC6_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask); + WREG32(mmTPC6_RTR_LBW_RANGE_BASE_4, lbw_rng4_base); + WREG32(mmTPC6_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask); + WREG32(mmTPC6_RTR_LBW_RANGE_BASE_5, lbw_rng5_base); + WREG32(mmTPC6_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask); + WREG32(mmTPC6_RTR_LBW_RANGE_BASE_6, lbw_rng6_base); + WREG32(mmTPC6_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask); + WREG32(mmTPC6_RTR_LBW_RANGE_BASE_7, lbw_rng7_base); + WREG32(mmTPC6_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask); + WREG32(mmTPC6_RTR_LBW_RANGE_BASE_8, lbw_rng8_base); + WREG32(mmTPC6_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask); + WREG32(mmTPC6_RTR_LBW_RANGE_BASE_9, lbw_rng9_base); + WREG32(mmTPC6_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask); + WREG32(mmTPC6_RTR_LBW_RANGE_BASE_10, lbw_rng10_base); + WREG32(mmTPC6_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask); + WREG32(mmTPC6_RTR_LBW_RANGE_BASE_11, lbw_rng11_base); + WREG32(mmTPC6_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask); + WREG32(mmTPC6_RTR_LBW_RANGE_BASE_12, lbw_rng12_base); + WREG32(mmTPC6_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask); + WREG32(mmTPC6_RTR_LBW_RANGE_BASE_13, lbw_rng13_base); + WREG32(mmTPC6_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask); + + WREG32(mmTPC7_NRTR_LBW_RANGE_HIT, 0xFFFF); + WREG32(mmTPC7_NRTR_HBW_RANGE_HIT, 0xFE); + + /* Protect HOST */ + WREG32(mmTPC7_NRTR_HBW_RANGE_BASE_L_0, 0); + WREG32(mmTPC7_NRTR_HBW_RANGE_BASE_H_0, 0); + WREG32(mmTPC7_NRTR_HBW_RANGE_MASK_L_0, 0); + WREG32(mmTPC7_NRTR_HBW_RANGE_MASK_H_0, 0xFFF80); + + /* + * Protect DDR @ + * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END + * The mask protects the first 512MB + */ + WREG32(mmTPC7_NRTR_HBW_RANGE_BASE_L_1, dram_addr_lo); + WREG32(mmTPC7_NRTR_HBW_RANGE_BASE_H_1, dram_addr_hi); + WREG32(mmTPC7_NRTR_HBW_RANGE_MASK_L_1, 0xE0000000); + WREG32(mmTPC7_NRTR_HBW_RANGE_MASK_H_1, 0x3FFFF); + + WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_0, lbw_rng0_base); + WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_0, lbw_rng0_mask); + WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_1, lbw_rng1_base); + WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_1, lbw_rng1_mask); + WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_2, lbw_rng2_base); + WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_2, lbw_rng2_mask); + WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_3, lbw_rng3_base); + WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_3, lbw_rng3_mask); + WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_4, lbw_rng4_base); + WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_4, lbw_rng4_mask); + WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_5, lbw_rng5_base); + WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_5, lbw_rng5_mask); + WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_6, lbw_rng6_base); + WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_6, lbw_rng6_mask); + WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_7, lbw_rng7_base); + WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_7, lbw_rng7_mask); + WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_8, lbw_rng8_base); + WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_8, lbw_rng8_mask); + WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_9, lbw_rng9_base); + WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_9, lbw_rng9_mask); + WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_10, lbw_rng10_base); + WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_10, lbw_rng10_mask); + WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_11, lbw_rng11_base); + WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_11, lbw_rng11_mask); + WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_12, lbw_rng12_base); + WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_12, lbw_rng12_mask); + WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_13, lbw_rng13_base); + WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_13, lbw_rng13_mask); + + goya_init_protection_bits(hdev); +} diff --git a/drivers/misc/habanalabs/include/common/cpucp_if.h b/drivers/misc/habanalabs/include/common/cpucp_if.h new file mode 100644 index 000000000..2a5c9cb3d --- /dev/null +++ b/drivers/misc/habanalabs/include/common/cpucp_if.h @@ -0,0 +1,443 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2020 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef CPUCP_IF_H +#define CPUCP_IF_H + +#include <linux/types.h> + +/* + * EVENT QUEUE + */ + +struct hl_eq_header { + __le32 reserved; + __le32 ctl; +}; + +struct hl_eq_ecc_data { + __le64 ecc_address; + __le64 ecc_syndrom; + __u8 memory_wrapper_idx; + __u8 pad[7]; +}; + +struct hl_eq_entry { + struct hl_eq_header hdr; + union { + struct hl_eq_ecc_data ecc_data; + __le64 data[7]; + }; +}; + +#define HL_EQ_ENTRY_SIZE sizeof(struct hl_eq_entry) + +#define EQ_CTL_READY_SHIFT 31 +#define EQ_CTL_READY_MASK 0x80000000 + +#define EQ_CTL_EVENT_TYPE_SHIFT 16 +#define EQ_CTL_EVENT_TYPE_MASK 0x03FF0000 + +enum pq_init_status { + PQ_INIT_STATUS_NA = 0, + PQ_INIT_STATUS_READY_FOR_CP, + PQ_INIT_STATUS_READY_FOR_HOST, + PQ_INIT_STATUS_READY_FOR_CP_SINGLE_MSI +}; + +/* + * CpuCP Primary Queue Packets + * + * During normal operation, the host's kernel driver needs to send various + * messages to CpuCP, usually either to SET some value into a H/W periphery or + * to GET the current value of some H/W periphery. For example, SET the + * frequency of MME/TPC and GET the value of the thermal sensor. + * + * These messages can be initiated either by the User application or by the + * host's driver itself, e.g. power management code. In either case, the + * communication from the host's driver to CpuCP will *always* be in + * synchronous mode, meaning that the host will send a single message and poll + * until the message was acknowledged and the results are ready (if results are + * needed). + * + * This means that only a single message can be sent at a time and the host's + * driver must wait for its result before sending the next message. Having said + * that, because these are control messages which are sent in a relatively low + * frequency, this limitation seems acceptable. It's important to note that + * in case of multiple devices, messages to different devices *can* be sent + * at the same time. + * + * The message, inputs/outputs (if relevant) and fence object will be located + * on the device DDR at an address that will be determined by the host's driver. + * During device initialization phase, the host will pass to CpuCP that address. + * Most of the message types will contain inputs/outputs inside the message + * itself. The common part of each message will contain the opcode of the + * message (its type) and a field representing a fence object. + * + * When the host's driver wishes to send a message to CPU CP, it will write the + * message contents to the device DDR, clear the fence object and then write to + * the PSOC_ARC1_AUX_SW_INTR, to issue interrupt 121 to ARC Management CPU. + * + * Upon receiving the interrupt (#121), CpuCP will read the message from the + * DDR. In case the message is a SET operation, CpuCP will first perform the + * operation and then write to the fence object on the device DDR. In case the + * message is a GET operation, CpuCP will first fill the results section on the + * device DDR and then write to the fence object. If an error occurred, CpuCP + * will fill the rc field with the right error code. + * + * In the meantime, the host's driver will poll on the fence object. Once the + * host sees that the fence object is signaled, it will read the results from + * the device DDR (if relevant) and resume the code execution in the host's + * driver. + * + * To use QMAN packets, the opcode must be the QMAN opcode, shifted by 8 + * so the value being put by the host's driver matches the value read by CpuCP + * + * Non-QMAN packets should be limited to values 1 through (2^8 - 1) + * + * Detailed description: + * + * CPUCP_PACKET_DISABLE_PCI_ACCESS - + * After receiving this packet the embedded CPU must NOT issue PCI + * transactions (read/write) towards the Host CPU. This also include + * sending MSI-X interrupts. + * This packet is usually sent before the device is moved to D3Hot state. + * + * CPUCP_PACKET_ENABLE_PCI_ACCESS - + * After receiving this packet the embedded CPU is allowed to issue PCI + * transactions towards the Host CPU, including sending MSI-X interrupts. + * This packet is usually send after the device is moved to D0 state. + * + * CPUCP_PACKET_TEMPERATURE_GET - + * Fetch the current temperature / Max / Max Hyst / Critical / + * Critical Hyst of a specified thermal sensor. The packet's + * arguments specify the desired sensor and the field to get. + * + * CPUCP_PACKET_VOLTAGE_GET - + * Fetch the voltage / Max / Min of a specified sensor. The packet's + * arguments specify the sensor and type. + * + * CPUCP_PACKET_CURRENT_GET - + * Fetch the current / Max / Min of a specified sensor. The packet's + * arguments specify the sensor and type. + * + * CPUCP_PACKET_FAN_SPEED_GET - + * Fetch the speed / Max / Min of a specified fan. The packet's + * arguments specify the sensor and type. + * + * CPUCP_PACKET_PWM_GET - + * Fetch the pwm value / mode of a specified pwm. The packet's + * arguments specify the sensor and type. + * + * CPUCP_PACKET_PWM_SET - + * Set the pwm value / mode of a specified pwm. The packet's + * arguments specify the sensor, type and value. + * + * CPUCP_PACKET_FREQUENCY_SET - + * Set the frequency of a specified PLL. The packet's arguments specify + * the PLL and the desired frequency. The actual frequency in the device + * might differ from the requested frequency. + * + * CPUCP_PACKET_FREQUENCY_GET - + * Fetch the frequency of a specified PLL. The packet's arguments specify + * the PLL. + * + * CPUCP_PACKET_LED_SET - + * Set the state of a specified led. The packet's arguments + * specify the led and the desired state. + * + * CPUCP_PACKET_I2C_WR - + * Write 32-bit value to I2C device. The packet's arguments specify the + * I2C bus, address and value. + * + * CPUCP_PACKET_I2C_RD - + * Read 32-bit value from I2C device. The packet's arguments specify the + * I2C bus and address. + * + * CPUCP_PACKET_INFO_GET - + * Fetch information from the device as specified in the packet's + * structure. The host's driver passes the max size it allows the CpuCP to + * write to the structure, to prevent data corruption in case of + * mismatched driver/FW versions. + * + * CPUCP_PACKET_FLASH_PROGRAM_REMOVED - this packet was removed + * + * CPUCP_PACKET_UNMASK_RAZWI_IRQ - + * Unmask the given IRQ. The IRQ number is specified in the value field. + * The packet is sent after receiving an interrupt and printing its + * relevant information. + * + * CPUCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY - + * Unmask the given IRQs. The IRQs numbers are specified in an array right + * after the cpucp_packet structure, where its first element is the array + * length. The packet is sent after a soft reset was done in order to + * handle any interrupts that were sent during the reset process. + * + * CPUCP_PACKET_TEST - + * Test packet for CpuCP connectivity. The CPU will put the fence value + * in the result field. + * + * CPUCP_PACKET_FREQUENCY_CURR_GET - + * Fetch the current frequency of a specified PLL. The packet's arguments + * specify the PLL. + * + * CPUCP_PACKET_MAX_POWER_GET - + * Fetch the maximal power of the device. + * + * CPUCP_PACKET_MAX_POWER_SET - + * Set the maximal power of the device. The packet's arguments specify + * the power. + * + * CPUCP_PACKET_EEPROM_DATA_GET - + * Get EEPROM data from the CpuCP kernel. The buffer is specified in the + * addr field. The CPU will put the returned data size in the result + * field. In addition, the host's driver passes the max size it allows the + * CpuCP to write to the structure, to prevent data corruption in case of + * mismatched driver/FW versions. + * + * CPUCP_PACKET_TEMPERATURE_SET - + * Set the value of the offset property of a specified thermal sensor. + * The packet's arguments specify the desired sensor and the field to + * set. + * + * CPUCP_PACKET_VOLTAGE_SET - + * Trigger the reset_history property of a specified voltage sensor. + * The packet's arguments specify the desired sensor and the field to + * set. + * + * CPUCP_PACKET_CURRENT_SET - + * Trigger the reset_history property of a specified current sensor. + * The packet's arguments specify the desired sensor and the field to + * set. + * + * CPUCP_PACKET_PLL_REG_GET + * Fetch register of PLL from the required PLL IP. + * The packet's arguments specify the PLL IP and the register to get. + * Each register is 32-bit value which is returned in result field. + * + */ + +enum cpucp_packet_id { + CPUCP_PACKET_DISABLE_PCI_ACCESS = 1, /* internal */ + CPUCP_PACKET_ENABLE_PCI_ACCESS, /* internal */ + CPUCP_PACKET_TEMPERATURE_GET, /* sysfs */ + CPUCP_PACKET_VOLTAGE_GET, /* sysfs */ + CPUCP_PACKET_CURRENT_GET, /* sysfs */ + CPUCP_PACKET_FAN_SPEED_GET, /* sysfs */ + CPUCP_PACKET_PWM_GET, /* sysfs */ + CPUCP_PACKET_PWM_SET, /* sysfs */ + CPUCP_PACKET_FREQUENCY_SET, /* sysfs */ + CPUCP_PACKET_FREQUENCY_GET, /* sysfs */ + CPUCP_PACKET_LED_SET, /* debugfs */ + CPUCP_PACKET_I2C_WR, /* debugfs */ + CPUCP_PACKET_I2C_RD, /* debugfs */ + CPUCP_PACKET_INFO_GET, /* IOCTL */ + CPUCP_PACKET_FLASH_PROGRAM_REMOVED, + CPUCP_PACKET_UNMASK_RAZWI_IRQ, /* internal */ + CPUCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY, /* internal */ + CPUCP_PACKET_TEST, /* internal */ + CPUCP_PACKET_FREQUENCY_CURR_GET, /* sysfs */ + CPUCP_PACKET_MAX_POWER_GET, /* sysfs */ + CPUCP_PACKET_MAX_POWER_SET, /* sysfs */ + CPUCP_PACKET_EEPROM_DATA_GET, /* sysfs */ + CPUCP_RESERVED, + CPUCP_PACKET_TEMPERATURE_SET, /* sysfs */ + CPUCP_PACKET_VOLTAGE_SET, /* sysfs */ + CPUCP_PACKET_CURRENT_SET, /* sysfs */ + CPUCP_PACKET_PCIE_THROUGHPUT_GET, /* internal */ + CPUCP_PACKET_PCIE_REPLAY_CNT_GET, /* internal */ + CPUCP_PACKET_TOTAL_ENERGY_GET, /* internal */ + CPUCP_PACKET_PLL_REG_GET, /* internal */ +}; + +#define CPUCP_PACKET_FENCE_VAL 0xFE8CE7A5 + +#define CPUCP_PKT_CTL_RC_SHIFT 12 +#define CPUCP_PKT_CTL_RC_MASK 0x0000F000 + +#define CPUCP_PKT_CTL_OPCODE_SHIFT 16 +#define CPUCP_PKT_CTL_OPCODE_MASK 0x1FFF0000 + +struct cpucp_packet { + union { + __le64 value; /* For SET packets */ + __le64 result; /* For GET packets */ + __le64 addr; /* For PQ */ + }; + + __le32 ctl; + + __le32 fence; /* Signal to host that message is completed */ + + union { + struct {/* For temperature/current/voltage/fan/pwm get/set */ + __le16 sensor_index; + __le16 type; + }; + + struct { /* For I2C read/write */ + __u8 i2c_bus; + __u8 i2c_addr; + __u8 i2c_reg; + __u8 pad; /* unused */ + }; + + struct {/* For PLL register fetch */ + __le16 pll_type; + __le16 pll_reg; + }; + + /* For any general request */ + __le32 index; + + /* For frequency get/set */ + __le32 pll_index; + + /* For led set */ + __le32 led_index; + + /* For get CpuCP info/EEPROM data */ + __le32 data_max_size; + }; + + __le32 reserved; +}; + +struct cpucp_unmask_irq_arr_packet { + struct cpucp_packet cpucp_pkt; + __le32 length; + __le32 irqs[0]; +}; + +enum cpucp_packet_rc { + cpucp_packet_success, + cpucp_packet_invalid, + cpucp_packet_fault +}; + +/* + * cpucp_temp_type should adhere to hwmon_temp_attributes + * defined in Linux kernel hwmon.h file + */ +enum cpucp_temp_type { + cpucp_temp_input, + cpucp_temp_max = 6, + cpucp_temp_max_hyst, + cpucp_temp_crit, + cpucp_temp_crit_hyst, + cpucp_temp_offset = 19, + cpucp_temp_highest = 22, + cpucp_temp_reset_history = 23 +}; + +enum cpucp_in_attributes { + cpucp_in_input, + cpucp_in_min, + cpucp_in_max, + cpucp_in_highest = 7, + cpucp_in_reset_history +}; + +enum cpucp_curr_attributes { + cpucp_curr_input, + cpucp_curr_min, + cpucp_curr_max, + cpucp_curr_highest = 7, + cpucp_curr_reset_history +}; + +enum cpucp_fan_attributes { + cpucp_fan_input, + cpucp_fan_min = 2, + cpucp_fan_max +}; + +enum cpucp_pwm_attributes { + cpucp_pwm_input, + cpucp_pwm_enable +}; + +enum cpucp_pcie_throughput_attributes { + cpucp_pcie_throughput_tx, + cpucp_pcie_throughput_rx +}; + +enum cpucp_pll_reg_attributes { + cpucp_pll_nr_reg, + cpucp_pll_nf_reg, + cpucp_pll_od_reg, + cpucp_pll_div_factor_reg, + cpucp_pll_div_sel_reg +}; + +enum cpucp_pll_type_attributes { + cpucp_pll_cpu, + cpucp_pll_pci, +}; + +/* Event Queue Packets */ + +struct eq_generic_event { + __le64 data[7]; +}; + +/* + * CpuCP info + */ + +#define CARD_NAME_MAX_LEN 16 +#define VERSION_MAX_LEN 128 +#define CPUCP_MAX_SENSORS 128 + +struct cpucp_sensor { + __le32 type; + __le32 flags; +}; + +/** + * struct cpucp_card_types - ASIC card type. + * @cpucp_card_type_pci: PCI card. + * @cpucp_card_type_pmc: PCI Mezzanine Card. + */ +enum cpucp_card_types { + cpucp_card_type_pci, + cpucp_card_type_pmc +}; + +/** + * struct cpucp_info - Info from CpuCP that is necessary to the host's driver + * @sensors: available sensors description. + * @kernel_version: CpuCP linux kernel version. + * @reserved: reserved field. + * @card_type: card configuration type. + * @card_location: in a server, each card has different connections topology + * depending on its location (relevant for PMC card type) + * @cpld_version: CPLD programmed F/W version. + * @infineon_version: Infineon main DC-DC version. + * @fuse_version: silicon production FUSE information. + * @thermal_version: thermald S/W version. + * @cpucp_version: CpuCP S/W version. + * @dram_size: available DRAM size. + * @card_name: card name that will be displayed in HWMON subsystem on the host + */ +struct cpucp_info { + struct cpucp_sensor sensors[CPUCP_MAX_SENSORS]; + __u8 kernel_version[VERSION_MAX_LEN]; + __le32 reserved; + __le32 card_type; + __le32 card_location; + __le32 cpld_version; + __le32 infineon_version; + __u8 fuse_version[VERSION_MAX_LEN]; + __u8 thermal_version[VERSION_MAX_LEN]; + __u8 cpucp_version[VERSION_MAX_LEN]; + __le32 reserved2; + __le64 dram_size; + char card_name[CARD_NAME_MAX_LEN]; +}; + +#endif /* CPUCP_IF_H */ diff --git a/drivers/misc/habanalabs/include/common/hl_boot_if.h b/drivers/misc/habanalabs/include/common/hl_boot_if.h new file mode 100644 index 000000000..bb67cafc6 --- /dev/null +++ b/drivers/misc/habanalabs/include/common/hl_boot_if.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2018-2020 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef HL_BOOT_IF_H +#define HL_BOOT_IF_H + +#define LKD_HARD_RESET_MAGIC 0xED7BD694 +#define HL_POWER9_HOST_MAGIC 0x1DA30009 + +#define BOOT_FIT_SRAM_OFFSET 0x200000 + +/* + * CPU error bits in BOOT_ERROR registers + * + * CPU_BOOT_ERR0_DRAM_INIT_FAIL DRAM initialization failed. + * DRAM is not reliable to use. + * + * CPU_BOOT_ERR0_FIT_CORRUPTED FIT data integrity verification of the + * image provided by the host has failed. + * + * CPU_BOOT_ERR0_TS_INIT_FAIL Thermal Sensor initialization failed. + * Boot continues as usual, but keep in + * mind this is a warning. + * + * CPU_BOOT_ERR0_DRAM_SKIPPED DRAM initialization has been skipped. + * Skipping DRAM initialization has been + * requested (e.g. strap, command, etc.) + * and FW skipped the DRAM initialization. + * Host can initialize the DRAM. + * + * CPU_BOOT_ERR0_BMC_WAIT_SKIPPED Waiting for BMC data will be skipped. + * Meaning the BMC data might not be + * available until reset. + * + * CPU_BOOT_ERR0_NIC_DATA_NOT_RDY NIC data from BMC is not ready. + * BMC has not provided the NIC data yet. + * Once provided this bit will be cleared. + * + * CPU_BOOT_ERR0_NIC_FW_FAIL NIC FW loading failed. + * The NIC FW loading and initialization + * failed. This means NICs are not usable. + * + * CPU_BOOT_ERR0_SECURITY_NOT_RDY Chip security initialization has been + * started, but is not ready yet - chip + * cannot be accessed. + * + * CPU_BOOT_ERR0_SECURITY_FAIL Security related tasks have failed. + * The tasks are security init (root of + * trust), boot authentication (chain of + * trust), data packets authentication. + * + * CPU_BOOT_ERR0_ENABLED Error registers enabled. + * This is a main indication that the + * running FW populates the error + * registers. Meaning the error bits are + * not garbage, but actual error statuses. + */ +#define CPU_BOOT_ERR0_DRAM_INIT_FAIL (1 << 0) +#define CPU_BOOT_ERR0_FIT_CORRUPTED (1 << 1) +#define CPU_BOOT_ERR0_TS_INIT_FAIL (1 << 2) +#define CPU_BOOT_ERR0_DRAM_SKIPPED (1 << 3) +#define CPU_BOOT_ERR0_BMC_WAIT_SKIPPED (1 << 4) +#define CPU_BOOT_ERR0_NIC_DATA_NOT_RDY (1 << 5) +#define CPU_BOOT_ERR0_NIC_FW_FAIL (1 << 6) +#define CPU_BOOT_ERR0_SECURITY_NOT_RDY (1 << 7) +#define CPU_BOOT_ERR0_SECURITY_FAIL (1 << 8) +#define CPU_BOOT_ERR0_ENABLED (1 << 31) + +enum cpu_boot_status { + CPU_BOOT_STATUS_NA = 0, /* Default value after reset of chip */ + CPU_BOOT_STATUS_IN_WFE = 1, + CPU_BOOT_STATUS_DRAM_RDY = 2, + CPU_BOOT_STATUS_SRAM_AVAIL = 3, + CPU_BOOT_STATUS_IN_BTL = 4, /* BTL is H/W FSM */ + CPU_BOOT_STATUS_IN_PREBOOT = 5, + CPU_BOOT_STATUS_IN_SPL, /* deprecated - not reported */ + CPU_BOOT_STATUS_IN_UBOOT = 7, + CPU_BOOT_STATUS_DRAM_INIT_FAIL, /* deprecated - will be removed */ + CPU_BOOT_STATUS_FIT_CORRUPTED, /* deprecated - will be removed */ + /* U-Boot console prompt activated, commands are not processed */ + CPU_BOOT_STATUS_UBOOT_NOT_READY = 10, + /* Finished NICs init, reported after DRAM and NICs */ + CPU_BOOT_STATUS_NIC_FW_RDY = 11, + CPU_BOOT_STATUS_TS_INIT_FAIL, /* deprecated - will be removed */ + CPU_BOOT_STATUS_DRAM_SKIPPED, /* deprecated - will be removed */ + CPU_BOOT_STATUS_BMC_WAITING_SKIPPED, /* deprecated - will be removed */ + /* Last boot loader progress status, ready to receive commands */ + CPU_BOOT_STATUS_READY_TO_BOOT = 15, + /* Internal Boot finished, ready for boot-fit */ + CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT = 16, + /* Internal Security has been initialized, device can be accessed */ + CPU_BOOT_STATUS_SECURITY_READY = 17, +}; + +enum kmd_msg { + KMD_MSG_NA = 0, + KMD_MSG_GOTO_WFE, + KMD_MSG_FIT_RDY, + KMD_MSG_SKIP_BMC, +}; + +enum cpu_msg_status { + CPU_MSG_CLR = 0, + CPU_MSG_OK, + CPU_MSG_ERR, +}; + +#endif /* HL_BOOT_IF_H */ diff --git a/drivers/misc/habanalabs/include/common/qman_if.h b/drivers/misc/habanalabs/include/common/qman_if.h new file mode 100644 index 000000000..7ed773957 --- /dev/null +++ b/drivers/misc/habanalabs/include/common/qman_if.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef QMAN_IF_H +#define QMAN_IF_H + +#include <linux/types.h> + +/* + * PRIMARY QUEUE + */ + +struct hl_bd { + __le64 ptr; + __le32 len; + __le32 ctl; +}; + +#define HL_BD_SIZE sizeof(struct hl_bd) + +/* + * S/W CTL FIELDS. + * + * BD_CTL_REPEAT_VALID tells the CP whether the repeat field in the BD CTL is + * valid. 1 means the repeat field is valid, 0 means not-valid, + * i.e. repeat == 1 + */ +#define BD_CTL_REPEAT_VALID_SHIFT 24 +#define BD_CTL_REPEAT_VALID_MASK 0x01000000 + +#define BD_CTL_SHADOW_INDEX_SHIFT 0 +#define BD_CTL_SHADOW_INDEX_MASK 0x00000FFF + +/* + * H/W CTL FIELDS + */ + +#define BD_CTL_COMP_OFFSET_SHIFT 16 +#define BD_CTL_COMP_OFFSET_MASK 0x0FFF0000 + +#define BD_CTL_COMP_DATA_SHIFT 0 +#define BD_CTL_COMP_DATA_MASK 0x0000FFFF + +/* + * COMPLETION QUEUE + */ + +struct hl_cq_entry { + __le32 data; +}; + +#define HL_CQ_ENTRY_SIZE sizeof(struct hl_cq_entry) + +#define CQ_ENTRY_READY_SHIFT 31 +#define CQ_ENTRY_READY_MASK 0x80000000 + +#define CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT 30 +#define CQ_ENTRY_SHADOW_INDEX_VALID_MASK 0x40000000 + +#define CQ_ENTRY_SHADOW_INDEX_SHIFT BD_CTL_SHADOW_INDEX_SHIFT +#define CQ_ENTRY_SHADOW_INDEX_MASK BD_CTL_SHADOW_INDEX_MASK + + +#endif /* QMAN_IF_H */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/cpu_if_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/cpu_if_regs.h new file mode 100644 index 000000000..cf80e3131 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/cpu_if_regs.h @@ -0,0 +1,174 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_CPU_IF_REGS_H_ +#define ASIC_REG_CPU_IF_REGS_H_ + +/* + ***************************************** + * CPU_IF (Prototype: CPU_IF) + ***************************************** + */ + +#define mmCPU_IF_ARUSER_OVR 0x442104 + +#define mmCPU_IF_ARUSER_OVR_EN 0x442108 + +#define mmCPU_IF_AWUSER_OVR 0x44210C + +#define mmCPU_IF_AWUSER_OVR_EN 0x442110 + +#define mmCPU_IF_AXCACHE_OVR 0x442114 + +#define mmCPU_IF_LOCK_OVR 0x442118 + +#define mmCPU_IF_PROT_OVR 0x44211C + +#define mmCPU_IF_MAX_OUTSTANDING 0x442120 + +#define mmCPU_IF_EARLY_BRESP_EN 0x442124 + +#define mmCPU_IF_FORCE_RSP_OK 0x442128 + +#define mmCPU_IF_CPU_MSB_ADDR 0x44212C + +#define mmCPU_IF_AXI_SPLIT_INTR 0x442130 + +#define mmCPU_IF_TOTAL_WR_CNT 0x442140 + +#define mmCPU_IF_INFLIGHT_WR_CNT 0x442144 + +#define mmCPU_IF_TOTAL_RD_CNT 0x442150 + +#define mmCPU_IF_INFLIGHT_RD_CNT 0x442154 + +#define mmCPU_IF_PF_PQ_PI 0x442200 + +#define mmCPU_IF_PQ_BASE_ADDR_LOW 0x442204 + +#define mmCPU_IF_PQ_BASE_ADDR_HIGH 0x442208 + +#define mmCPU_IF_PQ_LENGTH 0x44220C + +#define mmCPU_IF_CQ_BASE_ADDR_LOW 0x442210 + +#define mmCPU_IF_CQ_BASE_ADDR_HIGH 0x442214 + +#define mmCPU_IF_CQ_LENGTH 0x442218 + +#define mmCPU_IF_EQ_BASE_ADDR_LOW 0x442220 + +#define mmCPU_IF_EQ_BASE_ADDR_HIGH 0x442224 + +#define mmCPU_IF_EQ_LENGTH 0x442228 + +#define mmCPU_IF_EQ_RD_OFFS 0x44222C + +#define mmCPU_IF_QUEUE_INIT 0x442230 + +#define mmCPU_IF_TPC_SERR_INTR_STS 0x442300 + +#define mmCPU_IF_TPC_SERR_INTR_CLR 0x442304 + +#define mmCPU_IF_TPC_SERR_INTR_MASK 0x442308 + +#define mmCPU_IF_TPC_DERR_INTR_STS 0x442310 + +#define mmCPU_IF_TPC_DERR_INTR_CLR 0x442314 + +#define mmCPU_IF_TPC_DERR_INTR_MASK 0x442318 + +#define mmCPU_IF_DMA_SERR_INTR_STS 0x442320 + +#define mmCPU_IF_DMA_SERR_INTR_CLR 0x442324 + +#define mmCPU_IF_DMA_SERR_INTR_MASK 0x442328 + +#define mmCPU_IF_DMA_DERR_INTR_STS 0x442330 + +#define mmCPU_IF_DMA_DERR_INTR_CLR 0x442334 + +#define mmCPU_IF_DMA_DERR_INTR_MASK 0x442338 + +#define mmCPU_IF_SRAM_SERR_INTR_STS 0x442340 + +#define mmCPU_IF_SRAM_SERR_INTR_CLR 0x442344 + +#define mmCPU_IF_SRAM_SERR_INTR_MASK 0x442348 + +#define mmCPU_IF_SRAM_DERR_INTR_STS 0x442350 + +#define mmCPU_IF_SRAM_DERR_INTR_CLR 0x442354 + +#define mmCPU_IF_SRAM_DERR_INTR_MASK 0x442358 + +#define mmCPU_IF_NIC_SERR_INTR_STS 0x442360 + +#define mmCPU_IF_NIC_SERR_INTR_CLR 0x442364 + +#define mmCPU_IF_NIC_SERR_INTR_MASK 0x442368 + +#define mmCPU_IF_NIC_DERR_INTR_STS 0x442370 + +#define mmCPU_IF_NIC_DERR_INTR_CLR 0x442374 + +#define mmCPU_IF_NIC_DERR_INTR_MASK 0x442378 + +#define mmCPU_IF_DMA_IF_SERR_INTR_STS 0x442380 + +#define mmCPU_IF_DMA_IF_SERR_INTR_CLR 0x442384 + +#define mmCPU_IF_DMA_IF_SERR_INTR_MASK 0x442388 + +#define mmCPU_IF_DMA_IF_DERR_INTR_STS 0x442390 + +#define mmCPU_IF_DMA_IF_DERR_INTR_CLR 0x442394 + +#define mmCPU_IF_DMA_IF_DERR_INTR_MASK 0x442398 + +#define mmCPU_IF_HBM_SERR_INTR_STS 0x4423A0 + +#define mmCPU_IF_HBM_SERR_INTR_CLR 0x4423A4 + +#define mmCPU_IF_HBM_SERR_INTR_MASK 0x4423A8 + +#define mmCPU_IF_HBM_DERR_INTR_STS 0x4423B0 + +#define mmCPU_IF_HBM_DERR_INTR_CLR 0x4423B4 + +#define mmCPU_IF_HBM_DERR_INTR_MASK 0x4423B8 + +#define mmCPU_IF_PLL_SEI_INTR_STS 0x442400 + +#define mmCPU_IF_PLL_SEI_INTR_CLR 0x442404 + +#define mmCPU_IF_PLL_SEI_INTR_MASK 0x442408 + +#define mmCPU_IF_NIC_SEI_INTR_STS 0x442410 + +#define mmCPU_IF_NIC_SEI_INTR_CLR 0x442414 + +#define mmCPU_IF_NIC_SEI_INTR_MASK 0x442418 + +#define mmCPU_IF_DMA_SEI_INTR_STS 0x442420 + +#define mmCPU_IF_DMA_SEI_INTR_CLR 0x442424 + +#define mmCPU_IF_DMA_SEI_INTR_MASK 0x442428 + +#define mmCPU_IF_DMA_IF_SEI_INTR_STS 0x442430 + +#define mmCPU_IF_DMA_IF_SEI_INTR_CLR 0x442434 + +#define mmCPU_IF_DMA_IF_SEI_INTR_MASK 0x442438 + +#endif /* ASIC_REG_CPU_IF_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_core_masks.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_core_masks.h new file mode 100644 index 000000000..d079a37ac --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_core_masks.h @@ -0,0 +1,348 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA0_CORE_MASKS_H_ +#define ASIC_REG_DMA0_CORE_MASKS_H_ + +/* + ***************************************** + * DMA0_CORE (Prototype: DMA_CORE) + ***************************************** + */ + +/* DMA0_CORE_CFG_0 */ +#define DMA0_CORE_CFG_0_EN_SHIFT 0 +#define DMA0_CORE_CFG_0_EN_MASK 0x1 + +/* DMA0_CORE_CFG_1 */ +#define DMA0_CORE_CFG_1_HALT_SHIFT 0 +#define DMA0_CORE_CFG_1_HALT_MASK 0x1 +#define DMA0_CORE_CFG_1_FLUSH_SHIFT 1 +#define DMA0_CORE_CFG_1_FLUSH_MASK 0x2 +#define DMA0_CORE_CFG_1_SB_FORCE_MISS_SHIFT 2 +#define DMA0_CORE_CFG_1_SB_FORCE_MISS_MASK 0x4 + +/* DMA0_CORE_LBW_MAX_OUTSTAND */ +#define DMA0_CORE_LBW_MAX_OUTSTAND_VAL_SHIFT 0 +#define DMA0_CORE_LBW_MAX_OUTSTAND_VAL_MASK 0x1F + +/* DMA0_CORE_SRC_BASE_LO */ +#define DMA0_CORE_SRC_BASE_LO_VAL_SHIFT 0 +#define DMA0_CORE_SRC_BASE_LO_VAL_MASK 0xFFFFFFFF + +/* DMA0_CORE_SRC_BASE_HI */ +#define DMA0_CORE_SRC_BASE_HI_VAL_SHIFT 0 +#define DMA0_CORE_SRC_BASE_HI_VAL_MASK 0xFFFFFFFF + +/* DMA0_CORE_DST_BASE_LO */ +#define DMA0_CORE_DST_BASE_LO_VAL_SHIFT 0 +#define DMA0_CORE_DST_BASE_LO_VAL_MASK 0xFFFFFFFF + +/* DMA0_CORE_DST_BASE_HI */ +#define DMA0_CORE_DST_BASE_HI_VAL_SHIFT 0 +#define DMA0_CORE_DST_BASE_HI_VAL_MASK 0xFFFFFF +#define DMA0_CORE_DST_BASE_HI_CTX_ID_HI_SHIFT 24 +#define DMA0_CORE_DST_BASE_HI_CTX_ID_HI_MASK 0xFF000000 + +/* DMA0_CORE_SRC_TSIZE_1 */ +#define DMA0_CORE_SRC_TSIZE_1_VAL_SHIFT 0 +#define DMA0_CORE_SRC_TSIZE_1_VAL_MASK 0xFFFFFFFF + +/* DMA0_CORE_SRC_STRIDE_1 */ +#define DMA0_CORE_SRC_STRIDE_1_VAL_SHIFT 0 +#define DMA0_CORE_SRC_STRIDE_1_VAL_MASK 0xFFFFFFFF + +/* DMA0_CORE_SRC_TSIZE_2 */ +#define DMA0_CORE_SRC_TSIZE_2_VAL_SHIFT 0 +#define DMA0_CORE_SRC_TSIZE_2_VAL_MASK 0xFFFFFFFF + +/* DMA0_CORE_SRC_STRIDE_2 */ +#define DMA0_CORE_SRC_STRIDE_2_VAL_SHIFT 0 +#define DMA0_CORE_SRC_STRIDE_2_VAL_MASK 0xFFFFFFFF + +/* DMA0_CORE_SRC_TSIZE_3 */ +#define DMA0_CORE_SRC_TSIZE_3_VAL_SHIFT 0 +#define DMA0_CORE_SRC_TSIZE_3_VAL_MASK 0xFFFFFFFF + +/* DMA0_CORE_SRC_STRIDE_3 */ +#define DMA0_CORE_SRC_STRIDE_3_VAL_SHIFT 0 +#define DMA0_CORE_SRC_STRIDE_3_VAL_MASK 0xFFFFFFFF + +/* DMA0_CORE_SRC_TSIZE_4 */ +#define DMA0_CORE_SRC_TSIZE_4_VAL_SHIFT 0 +#define DMA0_CORE_SRC_TSIZE_4_VAL_MASK 0xFFFFFFFF + +/* DMA0_CORE_SRC_STRIDE_4 */ +#define DMA0_CORE_SRC_STRIDE_4_VAL_SHIFT 0 +#define DMA0_CORE_SRC_STRIDE_4_VAL_MASK 0xFFFFFFFF + +/* DMA0_CORE_SRC_TSIZE_0 */ +#define DMA0_CORE_SRC_TSIZE_0_VAL_SHIFT 0 +#define DMA0_CORE_SRC_TSIZE_0_VAL_MASK 0xFFFFFFFF + +/* DMA0_CORE_DST_TSIZE_1 */ +#define DMA0_CORE_DST_TSIZE_1_VAL_SHIFT 0 +#define DMA0_CORE_DST_TSIZE_1_VAL_MASK 0xFFFFFFFF + +/* DMA0_CORE_DST_STRIDE_1 */ +#define DMA0_CORE_DST_STRIDE_1_VAL_SHIFT 0 +#define DMA0_CORE_DST_STRIDE_1_VAL_MASK 0xFFFFFFFF + +/* DMA0_CORE_DST_TSIZE_2 */ +#define DMA0_CORE_DST_TSIZE_2_VAL_SHIFT 0 +#define DMA0_CORE_DST_TSIZE_2_VAL_MASK 0xFFFFFFFF + +/* DMA0_CORE_DST_STRIDE_2 */ +#define DMA0_CORE_DST_STRIDE_2_VAL_SHIFT 0 +#define DMA0_CORE_DST_STRIDE_2_VAL_MASK 0xFFFFFFFF + +/* DMA0_CORE_DST_TSIZE_3 */ +#define DMA0_CORE_DST_TSIZE_3_VAL_SHIFT 0 +#define DMA0_CORE_DST_TSIZE_3_VAL_MASK 0xFFFFFFFF + +/* DMA0_CORE_DST_STRIDE_3 */ +#define DMA0_CORE_DST_STRIDE_3_VAL_SHIFT 0 +#define DMA0_CORE_DST_STRIDE_3_VAL_MASK 0xFFFFFFFF + +/* DMA0_CORE_DST_TSIZE_4 */ +#define DMA0_CORE_DST_TSIZE_4_VAL_SHIFT 0 +#define DMA0_CORE_DST_TSIZE_4_VAL_MASK 0xFFFFFFFF + +/* DMA0_CORE_DST_STRIDE_4 */ +#define DMA0_CORE_DST_STRIDE_4_VAL_SHIFT 0 +#define DMA0_CORE_DST_STRIDE_4_VAL_MASK 0xFFFFFFFF + +/* DMA0_CORE_DST_TSIZE_0 */ +#define DMA0_CORE_DST_TSIZE_0_VAL_SHIFT 0 +#define DMA0_CORE_DST_TSIZE_0_VAL_MASK 0xFFFFFFFF + +/* DMA0_CORE_COMMIT */ +#define DMA0_CORE_COMMIT_WR_COMP_EN_SHIFT 0 +#define DMA0_CORE_COMMIT_WR_COMP_EN_MASK 0x1 +#define DMA0_CORE_COMMIT_TRANSPOSE_SHIFT 1 +#define DMA0_CORE_COMMIT_TRANSPOSE_MASK 0x2 +#define DMA0_CORE_COMMIT_DTYPE_SHIFT 2 +#define DMA0_CORE_COMMIT_DTYPE_MASK 0x4 +#define DMA0_CORE_COMMIT_LIN_SHIFT 3 +#define DMA0_CORE_COMMIT_LIN_MASK 0x8 +#define DMA0_CORE_COMMIT_MEM_SET_SHIFT 4 +#define DMA0_CORE_COMMIT_MEM_SET_MASK 0x10 +#define DMA0_CORE_COMMIT_COMPRESS_SHIFT 5 +#define DMA0_CORE_COMMIT_COMPRESS_MASK 0x20 +#define DMA0_CORE_COMMIT_DECOMPRESS_SHIFT 6 +#define DMA0_CORE_COMMIT_DECOMPRESS_MASK 0x40 +#define DMA0_CORE_COMMIT_CTX_ID_SHIFT 16 +#define DMA0_CORE_COMMIT_CTX_ID_MASK 0xFF0000 + +/* DMA0_CORE_WR_COMP_WDATA */ +#define DMA0_CORE_WR_COMP_WDATA_VAL_SHIFT 0 +#define DMA0_CORE_WR_COMP_WDATA_VAL_MASK 0xFFFFFFFF + +/* DMA0_CORE_WR_COMP_ADDR_LO */ +#define DMA0_CORE_WR_COMP_ADDR_LO_VAL_SHIFT 0 +#define DMA0_CORE_WR_COMP_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* DMA0_CORE_WR_COMP_ADDR_HI */ +#define DMA0_CORE_WR_COMP_ADDR_HI_VAL_SHIFT 0 +#define DMA0_CORE_WR_COMP_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* DMA0_CORE_WR_COMP_AWUSER_31_11 */ +#define DMA0_CORE_WR_COMP_AWUSER_31_11_VAL_SHIFT 0 +#define DMA0_CORE_WR_COMP_AWUSER_31_11_VAL_MASK 0x1FFFFF + +/* DMA0_CORE_TE_NUMROWS */ +#define DMA0_CORE_TE_NUMROWS_VAL_SHIFT 0 +#define DMA0_CORE_TE_NUMROWS_VAL_MASK 0xFFFFFFFF + +/* DMA0_CORE_PROT */ +#define DMA0_CORE_PROT_VAL_SHIFT 0 +#define DMA0_CORE_PROT_VAL_MASK 0x1 +#define DMA0_CORE_PROT_ERR_VAL_SHIFT 1 +#define DMA0_CORE_PROT_ERR_VAL_MASK 0x2 + +/* DMA0_CORE_SECURE_PROPS */ +#define DMA0_CORE_SECURE_PROPS_ASID_SHIFT 0 +#define DMA0_CORE_SECURE_PROPS_ASID_MASK 0x3FF +#define DMA0_CORE_SECURE_PROPS_MMBP_SHIFT 10 +#define DMA0_CORE_SECURE_PROPS_MMBP_MASK 0x400 + +/* DMA0_CORE_NON_SECURE_PROPS */ +#define DMA0_CORE_NON_SECURE_PROPS_ASID_SHIFT 0 +#define DMA0_CORE_NON_SECURE_PROPS_ASID_MASK 0x3FF +#define DMA0_CORE_NON_SECURE_PROPS_MMBP_SHIFT 10 +#define DMA0_CORE_NON_SECURE_PROPS_MMBP_MASK 0x400 + +/* DMA0_CORE_RD_MAX_OUTSTAND */ +#define DMA0_CORE_RD_MAX_OUTSTAND_VAL_SHIFT 0 +#define DMA0_CORE_RD_MAX_OUTSTAND_VAL_MASK 0xFFF + +/* DMA0_CORE_RD_MAX_SIZE */ +#define DMA0_CORE_RD_MAX_SIZE_DATA_SHIFT 0 +#define DMA0_CORE_RD_MAX_SIZE_DATA_MASK 0x7FF +#define DMA0_CORE_RD_MAX_SIZE_MD_SHIFT 16 +#define DMA0_CORE_RD_MAX_SIZE_MD_MASK 0x7FF0000 + +/* DMA0_CORE_RD_ARCACHE */ +#define DMA0_CORE_RD_ARCACHE_VAL_SHIFT 0 +#define DMA0_CORE_RD_ARCACHE_VAL_MASK 0xF + +/* DMA0_CORE_RD_ARUSER_31_11 */ +#define DMA0_CORE_RD_ARUSER_31_11_VAL_SHIFT 0 +#define DMA0_CORE_RD_ARUSER_31_11_VAL_MASK 0x1FFFFF + +/* DMA0_CORE_RD_INFLIGHTS */ +#define DMA0_CORE_RD_INFLIGHTS_VAL_SHIFT 0 +#define DMA0_CORE_RD_INFLIGHTS_VAL_MASK 0xFFFFFFFF + +/* DMA0_CORE_WR_MAX_OUTSTAND */ +#define DMA0_CORE_WR_MAX_OUTSTAND_VAL_SHIFT 0 +#define DMA0_CORE_WR_MAX_OUTSTAND_VAL_MASK 0xFFF + +/* DMA0_CORE_WR_MAX_AWID */ +#define DMA0_CORE_WR_MAX_AWID_VAL_SHIFT 0 +#define DMA0_CORE_WR_MAX_AWID_VAL_MASK 0xFFFF + +/* DMA0_CORE_WR_AWCACHE */ +#define DMA0_CORE_WR_AWCACHE_VAL_SHIFT 0 +#define DMA0_CORE_WR_AWCACHE_VAL_MASK 0xF + +/* DMA0_CORE_WR_AWUSER_31_11 */ +#define DMA0_CORE_WR_AWUSER_31_11_VAL_SHIFT 0 +#define DMA0_CORE_WR_AWUSER_31_11_VAL_MASK 0x1FFFFF + +/* DMA0_CORE_WR_INFLIGHTS */ +#define DMA0_CORE_WR_INFLIGHTS_VAL_SHIFT 0 +#define DMA0_CORE_WR_INFLIGHTS_VAL_MASK 0xFFFF + +/* DMA0_CORE_RD_RATE_LIM_CFG_0 */ +#define DMA0_CORE_RD_RATE_LIM_CFG_0_RST_TOKEN_SHIFT 0 +#define DMA0_CORE_RD_RATE_LIM_CFG_0_RST_TOKEN_MASK 0xFF +#define DMA0_CORE_RD_RATE_LIM_CFG_0_SAT_SHIFT 16 +#define DMA0_CORE_RD_RATE_LIM_CFG_0_SAT_MASK 0xFF0000 + +/* DMA0_CORE_RD_RATE_LIM_CFG_1 */ +#define DMA0_CORE_RD_RATE_LIM_CFG_1_TOUT_SHIFT 0 +#define DMA0_CORE_RD_RATE_LIM_CFG_1_TOUT_MASK 0xFF +#define DMA0_CORE_RD_RATE_LIM_CFG_1_EN_SHIFT 31 +#define DMA0_CORE_RD_RATE_LIM_CFG_1_EN_MASK 0x80000000 + +/* DMA0_CORE_WR_RATE_LIM_CFG_0 */ +#define DMA0_CORE_WR_RATE_LIM_CFG_0_RST_TOKEN_SHIFT 0 +#define DMA0_CORE_WR_RATE_LIM_CFG_0_RST_TOKEN_MASK 0xFF +#define DMA0_CORE_WR_RATE_LIM_CFG_0_SAT_SHIFT 16 +#define DMA0_CORE_WR_RATE_LIM_CFG_0_SAT_MASK 0xFF0000 + +/* DMA0_CORE_WR_RATE_LIM_CFG_1 */ +#define DMA0_CORE_WR_RATE_LIM_CFG_1_TOUT_SHIFT 0 +#define DMA0_CORE_WR_RATE_LIM_CFG_1_TOUT_MASK 0xFF +#define DMA0_CORE_WR_RATE_LIM_CFG_1_EN_SHIFT 31 +#define DMA0_CORE_WR_RATE_LIM_CFG_1_EN_MASK 0x80000000 + +/* DMA0_CORE_ERR_CFG */ +#define DMA0_CORE_ERR_CFG_ERR_MSG_EN_SHIFT 0 +#define DMA0_CORE_ERR_CFG_ERR_MSG_EN_MASK 0x1 +#define DMA0_CORE_ERR_CFG_STOP_ON_ERR_SHIFT 1 +#define DMA0_CORE_ERR_CFG_STOP_ON_ERR_MASK 0x2 + +/* DMA0_CORE_ERR_CAUSE */ +#define DMA0_CORE_ERR_CAUSE_HBW_RD_ERR_SHIFT 0 +#define DMA0_CORE_ERR_CAUSE_HBW_RD_ERR_MASK 0x1 +#define DMA0_CORE_ERR_CAUSE_HBW_WR_ERR_SHIFT 1 +#define DMA0_CORE_ERR_CAUSE_HBW_WR_ERR_MASK 0x2 +#define DMA0_CORE_ERR_CAUSE_LBW_WR_ERR_SHIFT 2 +#define DMA0_CORE_ERR_CAUSE_LBW_WR_ERR_MASK 0x4 +#define DMA0_CORE_ERR_CAUSE_DESC_OVF_SHIFT 3 +#define DMA0_CORE_ERR_CAUSE_DESC_OVF_MASK 0x8 + +/* DMA0_CORE_ERRMSG_ADDR_LO */ +#define DMA0_CORE_ERRMSG_ADDR_LO_VAL_SHIFT 0 +#define DMA0_CORE_ERRMSG_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* DMA0_CORE_ERRMSG_ADDR_HI */ +#define DMA0_CORE_ERRMSG_ADDR_HI_VAL_SHIFT 0 +#define DMA0_CORE_ERRMSG_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* DMA0_CORE_ERRMSG_WDATA */ +#define DMA0_CORE_ERRMSG_WDATA_VAL_SHIFT 0 +#define DMA0_CORE_ERRMSG_WDATA_VAL_MASK 0xFFFFFFFF + +/* DMA0_CORE_STS0 */ +#define DMA0_CORE_STS0_RD_REQ_CNT_SHIFT 0 +#define DMA0_CORE_STS0_RD_REQ_CNT_MASK 0x7FFF +#define DMA0_CORE_STS0_WR_REQ_CNT_SHIFT 16 +#define DMA0_CORE_STS0_WR_REQ_CNT_MASK 0x7FFF0000 +#define DMA0_CORE_STS0_BUSY_SHIFT 31 +#define DMA0_CORE_STS0_BUSY_MASK 0x80000000 + +/* DMA0_CORE_STS1 */ +#define DMA0_CORE_STS1_IS_HALT_SHIFT 0 +#define DMA0_CORE_STS1_IS_HALT_MASK 0x1 + +/* DMA0_CORE_RD_DBGMEM_ADD */ +#define DMA0_CORE_RD_DBGMEM_ADD_VAL_SHIFT 0 +#define DMA0_CORE_RD_DBGMEM_ADD_VAL_MASK 0xFFFFFFFF + +/* DMA0_CORE_RD_DBGMEM_DATA_WR */ +#define DMA0_CORE_RD_DBGMEM_DATA_WR_VAL_SHIFT 0 +#define DMA0_CORE_RD_DBGMEM_DATA_WR_VAL_MASK 0xFFFFFFFF + +/* DMA0_CORE_RD_DBGMEM_DATA_RD */ +#define DMA0_CORE_RD_DBGMEM_DATA_RD_VAL_SHIFT 0 +#define DMA0_CORE_RD_DBGMEM_DATA_RD_VAL_MASK 0xFFFFFFFF + +/* DMA0_CORE_RD_DBGMEM_CTRL */ +#define DMA0_CORE_RD_DBGMEM_CTRL_WR_NRD_SHIFT 0 +#define DMA0_CORE_RD_DBGMEM_CTRL_WR_NRD_MASK 0x1 + +/* DMA0_CORE_RD_DBGMEM_RC */ +#define DMA0_CORE_RD_DBGMEM_RC_VALID_SHIFT 0 +#define DMA0_CORE_RD_DBGMEM_RC_VALID_MASK 0x1 + +/* DMA0_CORE_DBG_HBW_AXI_AR_CNT */ + +/* DMA0_CORE_DBG_HBW_AXI_AW_CNT */ + +/* DMA0_CORE_DBG_LBW_AXI_AW_CNT */ + +/* DMA0_CORE_DBG_DESC_CNT */ +#define DMA0_CORE_DBG_DESC_CNT_RD_STS_CTX_CNT_SHIFT 0 +#define DMA0_CORE_DBG_DESC_CNT_RD_STS_CTX_CNT_MASK 0xFFFFFFFF + +/* DMA0_CORE_DBG_STS */ +#define DMA0_CORE_DBG_STS_RD_CTX_FULL_SHIFT 0 +#define DMA0_CORE_DBG_STS_RD_CTX_FULL_MASK 0x1 +#define DMA0_CORE_DBG_STS_WR_CTX_FULL_SHIFT 1 +#define DMA0_CORE_DBG_STS_WR_CTX_FULL_MASK 0x2 +#define DMA0_CORE_DBG_STS_WR_COMP_FULL_SHIFT 2 +#define DMA0_CORE_DBG_STS_WR_COMP_FULL_MASK 0x4 +#define DMA0_CORE_DBG_STS_RD_CTX_EMPTY_SHIFT 3 +#define DMA0_CORE_DBG_STS_RD_CTX_EMPTY_MASK 0x8 +#define DMA0_CORE_DBG_STS_WR_CTX_EMPTY_SHIFT 4 +#define DMA0_CORE_DBG_STS_WR_CTX_EMPTY_MASK 0x10 +#define DMA0_CORE_DBG_STS_WR_COMP_EMPTY_SHIFT 5 +#define DMA0_CORE_DBG_STS_WR_COMP_EMPTY_MASK 0x20 +#define DMA0_CORE_DBG_STS_TE_EMPTY_SHIFT 6 +#define DMA0_CORE_DBG_STS_TE_EMPTY_MASK 0x40 +#define DMA0_CORE_DBG_STS_TE_BUSY_SHIFT 7 +#define DMA0_CORE_DBG_STS_TE_BUSY_MASK 0x80 +#define DMA0_CORE_DBG_STS_GSKT_EMPTY_SHIFT 8 +#define DMA0_CORE_DBG_STS_GSKT_EMPTY_MASK 0x100 +#define DMA0_CORE_DBG_STS_GSKT_FULL_SHIFT 9 +#define DMA0_CORE_DBG_STS_GSKT_FULL_MASK 0x200 +#define DMA0_CORE_DBG_STS_RDBUF_FULLNESS_SHIFT 20 +#define DMA0_CORE_DBG_STS_RDBUF_FULLNESS_MASK 0x7FF00000 + +/* DMA0_CORE_DBG_RD_DESC_ID */ + +/* DMA0_CORE_DBG_WR_DESC_ID */ + +#endif /* ASIC_REG_DMA0_CORE_MASKS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_core_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_core_regs.h new file mode 100644 index 000000000..1fdd5d5fc --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_core_regs.h @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA0_CORE_REGS_H_ +#define ASIC_REG_DMA0_CORE_REGS_H_ + +/* + ***************************************** + * DMA0_CORE (Prototype: DMA_CORE) + ***************************************** + */ + +#define mmDMA0_CORE_CFG_0 0x500000 + +#define mmDMA0_CORE_CFG_1 0x500004 + +#define mmDMA0_CORE_LBW_MAX_OUTSTAND 0x500008 + +#define mmDMA0_CORE_SRC_BASE_LO 0x500014 + +#define mmDMA0_CORE_SRC_BASE_HI 0x500018 + +#define mmDMA0_CORE_DST_BASE_LO 0x50001C + +#define mmDMA0_CORE_DST_BASE_HI 0x500020 + +#define mmDMA0_CORE_SRC_TSIZE_1 0x50002C + +#define mmDMA0_CORE_SRC_STRIDE_1 0x500030 + +#define mmDMA0_CORE_SRC_TSIZE_2 0x500034 + +#define mmDMA0_CORE_SRC_STRIDE_2 0x500038 + +#define mmDMA0_CORE_SRC_TSIZE_3 0x50003C + +#define mmDMA0_CORE_SRC_STRIDE_3 0x500040 + +#define mmDMA0_CORE_SRC_TSIZE_4 0x500044 + +#define mmDMA0_CORE_SRC_STRIDE_4 0x500048 + +#define mmDMA0_CORE_SRC_TSIZE_0 0x50004C + +#define mmDMA0_CORE_DST_TSIZE_1 0x500054 + +#define mmDMA0_CORE_DST_STRIDE_1 0x500058 + +#define mmDMA0_CORE_DST_TSIZE_2 0x50005C + +#define mmDMA0_CORE_DST_STRIDE_2 0x500060 + +#define mmDMA0_CORE_DST_TSIZE_3 0x500064 + +#define mmDMA0_CORE_DST_STRIDE_3 0x500068 + +#define mmDMA0_CORE_DST_TSIZE_4 0x50006C + +#define mmDMA0_CORE_DST_STRIDE_4 0x500070 + +#define mmDMA0_CORE_DST_TSIZE_0 0x500074 + +#define mmDMA0_CORE_COMMIT 0x500078 + +#define mmDMA0_CORE_WR_COMP_WDATA 0x50007C + +#define mmDMA0_CORE_WR_COMP_ADDR_LO 0x500080 + +#define mmDMA0_CORE_WR_COMP_ADDR_HI 0x500084 + +#define mmDMA0_CORE_WR_COMP_AWUSER_31_11 0x500088 + +#define mmDMA0_CORE_TE_NUMROWS 0x500094 + +#define mmDMA0_CORE_PROT 0x5000B8 + +#define mmDMA0_CORE_SECURE_PROPS 0x5000F0 + +#define mmDMA0_CORE_NON_SECURE_PROPS 0x5000F4 + +#define mmDMA0_CORE_RD_MAX_OUTSTAND 0x500100 + +#define mmDMA0_CORE_RD_MAX_SIZE 0x500104 + +#define mmDMA0_CORE_RD_ARCACHE 0x500108 + +#define mmDMA0_CORE_RD_ARUSER_31_11 0x500110 + +#define mmDMA0_CORE_RD_INFLIGHTS 0x500114 + +#define mmDMA0_CORE_WR_MAX_OUTSTAND 0x500120 + +#define mmDMA0_CORE_WR_MAX_AWID 0x500124 + +#define mmDMA0_CORE_WR_AWCACHE 0x500128 + +#define mmDMA0_CORE_WR_AWUSER_31_11 0x500130 + +#define mmDMA0_CORE_WR_INFLIGHTS 0x500134 + +#define mmDMA0_CORE_RD_RATE_LIM_CFG_0 0x500150 + +#define mmDMA0_CORE_RD_RATE_LIM_CFG_1 0x500154 + +#define mmDMA0_CORE_WR_RATE_LIM_CFG_0 0x500158 + +#define mmDMA0_CORE_WR_RATE_LIM_CFG_1 0x50015C + +#define mmDMA0_CORE_ERR_CFG 0x500160 + +#define mmDMA0_CORE_ERR_CAUSE 0x500164 + +#define mmDMA0_CORE_ERRMSG_ADDR_LO 0x500170 + +#define mmDMA0_CORE_ERRMSG_ADDR_HI 0x500174 + +#define mmDMA0_CORE_ERRMSG_WDATA 0x500178 + +#define mmDMA0_CORE_STS0 0x500190 + +#define mmDMA0_CORE_STS1 0x500194 + +#define mmDMA0_CORE_RD_DBGMEM_ADD 0x500200 + +#define mmDMA0_CORE_RD_DBGMEM_DATA_WR 0x500204 + +#define mmDMA0_CORE_RD_DBGMEM_DATA_RD 0x500208 + +#define mmDMA0_CORE_RD_DBGMEM_CTRL 0x50020C + +#define mmDMA0_CORE_RD_DBGMEM_RC 0x500210 + +#define mmDMA0_CORE_DBG_HBW_AXI_AR_CNT 0x500220 + +#define mmDMA0_CORE_DBG_HBW_AXI_AW_CNT 0x500224 + +#define mmDMA0_CORE_DBG_LBW_AXI_AW_CNT 0x500228 + +#define mmDMA0_CORE_DBG_DESC_CNT 0x50022C + +#define mmDMA0_CORE_DBG_STS 0x500230 + +#define mmDMA0_CORE_DBG_RD_DESC_ID 0x500234 + +#define mmDMA0_CORE_DBG_WR_DESC_ID 0x500238 + +#endif /* ASIC_REG_DMA0_CORE_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_qm_masks.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_qm_masks.h new file mode 100644 index 000000000..48376aabc --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_qm_masks.h @@ -0,0 +1,800 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA0_QM_MASKS_H_ +#define ASIC_REG_DMA0_QM_MASKS_H_ + +/* + ***************************************** + * DMA0_QM (Prototype: QMAN) + ***************************************** + */ + +/* DMA0_QM_GLBL_CFG0 */ +#define DMA0_QM_GLBL_CFG0_PQF_EN_SHIFT 0 +#define DMA0_QM_GLBL_CFG0_PQF_EN_MASK 0xF +#define DMA0_QM_GLBL_CFG0_CQF_EN_SHIFT 4 +#define DMA0_QM_GLBL_CFG0_CQF_EN_MASK 0x1F0 +#define DMA0_QM_GLBL_CFG0_CP_EN_SHIFT 9 +#define DMA0_QM_GLBL_CFG0_CP_EN_MASK 0x3E00 + +/* DMA0_QM_GLBL_CFG1 */ +#define DMA0_QM_GLBL_CFG1_PQF_STOP_SHIFT 0 +#define DMA0_QM_GLBL_CFG1_PQF_STOP_MASK 0xF +#define DMA0_QM_GLBL_CFG1_CQF_STOP_SHIFT 4 +#define DMA0_QM_GLBL_CFG1_CQF_STOP_MASK 0x1F0 +#define DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT 9 +#define DMA0_QM_GLBL_CFG1_CP_STOP_MASK 0x3E00 +#define DMA0_QM_GLBL_CFG1_PQF_FLUSH_SHIFT 16 +#define DMA0_QM_GLBL_CFG1_PQF_FLUSH_MASK 0xF0000 +#define DMA0_QM_GLBL_CFG1_CQF_FLUSH_SHIFT 20 +#define DMA0_QM_GLBL_CFG1_CQF_FLUSH_MASK 0x1F00000 +#define DMA0_QM_GLBL_CFG1_CP_FLUSH_SHIFT 25 +#define DMA0_QM_GLBL_CFG1_CP_FLUSH_MASK 0x3E000000 + +/* DMA0_QM_GLBL_PROT */ +#define DMA0_QM_GLBL_PROT_PQF_SHIFT 0 +#define DMA0_QM_GLBL_PROT_PQF_MASK 0xF +#define DMA0_QM_GLBL_PROT_CQF_SHIFT 4 +#define DMA0_QM_GLBL_PROT_CQF_MASK 0x1F0 +#define DMA0_QM_GLBL_PROT_CP_SHIFT 9 +#define DMA0_QM_GLBL_PROT_CP_MASK 0x3E00 +#define DMA0_QM_GLBL_PROT_ERR_SHIFT 14 +#define DMA0_QM_GLBL_PROT_ERR_MASK 0x4000 +#define DMA0_QM_GLBL_PROT_ARB_SHIFT 15 +#define DMA0_QM_GLBL_PROT_ARB_MASK 0x8000 + +/* DMA0_QM_GLBL_ERR_CFG */ +#define DMA0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT 0 +#define DMA0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK 0xF +#define DMA0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT 4 +#define DMA0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK 0x1F0 +#define DMA0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT 9 +#define DMA0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK 0x3E00 +#define DMA0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT 16 +#define DMA0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK 0xF0000 +#define DMA0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT 20 +#define DMA0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK 0x1F00000 +#define DMA0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT 25 +#define DMA0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK 0x3E000000 +#define DMA0_QM_GLBL_ERR_CFG_ARB_STOP_ON_ERR_SHIFT 31 +#define DMA0_QM_GLBL_ERR_CFG_ARB_STOP_ON_ERR_MASK 0x80000000 + +/* DMA0_QM_GLBL_SECURE_PROPS */ +#define DMA0_QM_GLBL_SECURE_PROPS_0_ASID_SHIFT 0 +#define DMA0_QM_GLBL_SECURE_PROPS_0_ASID_MASK 0x3FF +#define DMA0_QM_GLBL_SECURE_PROPS_1_ASID_SHIFT 0 +#define DMA0_QM_GLBL_SECURE_PROPS_1_ASID_MASK 0x3FF +#define DMA0_QM_GLBL_SECURE_PROPS_2_ASID_SHIFT 0 +#define DMA0_QM_GLBL_SECURE_PROPS_2_ASID_MASK 0x3FF +#define DMA0_QM_GLBL_SECURE_PROPS_3_ASID_SHIFT 0 +#define DMA0_QM_GLBL_SECURE_PROPS_3_ASID_MASK 0x3FF +#define DMA0_QM_GLBL_SECURE_PROPS_4_ASID_SHIFT 0 +#define DMA0_QM_GLBL_SECURE_PROPS_4_ASID_MASK 0x3FF +#define DMA0_QM_GLBL_SECURE_PROPS_0_MMBP_SHIFT 10 +#define DMA0_QM_GLBL_SECURE_PROPS_0_MMBP_MASK 0x400 +#define DMA0_QM_GLBL_SECURE_PROPS_1_MMBP_SHIFT 10 +#define DMA0_QM_GLBL_SECURE_PROPS_1_MMBP_MASK 0x400 +#define DMA0_QM_GLBL_SECURE_PROPS_2_MMBP_SHIFT 10 +#define DMA0_QM_GLBL_SECURE_PROPS_2_MMBP_MASK 0x400 +#define DMA0_QM_GLBL_SECURE_PROPS_3_MMBP_SHIFT 10 +#define DMA0_QM_GLBL_SECURE_PROPS_3_MMBP_MASK 0x400 +#define DMA0_QM_GLBL_SECURE_PROPS_4_MMBP_SHIFT 10 +#define DMA0_QM_GLBL_SECURE_PROPS_4_MMBP_MASK 0x400 + +/* DMA0_QM_GLBL_NON_SECURE_PROPS */ +#define DMA0_QM_GLBL_NON_SECURE_PROPS_0_ASID_SHIFT 0 +#define DMA0_QM_GLBL_NON_SECURE_PROPS_0_ASID_MASK 0x3FF +#define DMA0_QM_GLBL_NON_SECURE_PROPS_1_ASID_SHIFT 0 +#define DMA0_QM_GLBL_NON_SECURE_PROPS_1_ASID_MASK 0x3FF +#define DMA0_QM_GLBL_NON_SECURE_PROPS_2_ASID_SHIFT 0 +#define DMA0_QM_GLBL_NON_SECURE_PROPS_2_ASID_MASK 0x3FF +#define DMA0_QM_GLBL_NON_SECURE_PROPS_3_ASID_SHIFT 0 +#define DMA0_QM_GLBL_NON_SECURE_PROPS_3_ASID_MASK 0x3FF +#define DMA0_QM_GLBL_NON_SECURE_PROPS_4_ASID_SHIFT 0 +#define DMA0_QM_GLBL_NON_SECURE_PROPS_4_ASID_MASK 0x3FF +#define DMA0_QM_GLBL_NON_SECURE_PROPS_0_MMBP_SHIFT 10 +#define DMA0_QM_GLBL_NON_SECURE_PROPS_0_MMBP_MASK 0x400 +#define DMA0_QM_GLBL_NON_SECURE_PROPS_1_MMBP_SHIFT 10 +#define DMA0_QM_GLBL_NON_SECURE_PROPS_1_MMBP_MASK 0x400 +#define DMA0_QM_GLBL_NON_SECURE_PROPS_2_MMBP_SHIFT 10 +#define DMA0_QM_GLBL_NON_SECURE_PROPS_2_MMBP_MASK 0x400 +#define DMA0_QM_GLBL_NON_SECURE_PROPS_3_MMBP_SHIFT 10 +#define DMA0_QM_GLBL_NON_SECURE_PROPS_3_MMBP_MASK 0x400 +#define DMA0_QM_GLBL_NON_SECURE_PROPS_4_MMBP_SHIFT 10 +#define DMA0_QM_GLBL_NON_SECURE_PROPS_4_MMBP_MASK 0x400 + +/* DMA0_QM_GLBL_STS0 */ +#define DMA0_QM_GLBL_STS0_PQF_IDLE_SHIFT 0 +#define DMA0_QM_GLBL_STS0_PQF_IDLE_MASK 0xF +#define DMA0_QM_GLBL_STS0_CQF_IDLE_SHIFT 4 +#define DMA0_QM_GLBL_STS0_CQF_IDLE_MASK 0x1F0 +#define DMA0_QM_GLBL_STS0_CP_IDLE_SHIFT 9 +#define DMA0_QM_GLBL_STS0_CP_IDLE_MASK 0x3E00 +#define DMA0_QM_GLBL_STS0_PQF_IS_STOP_SHIFT 16 +#define DMA0_QM_GLBL_STS0_PQF_IS_STOP_MASK 0xF0000 +#define DMA0_QM_GLBL_STS0_CQF_IS_STOP_SHIFT 20 +#define DMA0_QM_GLBL_STS0_CQF_IS_STOP_MASK 0x1F00000 +#define DMA0_QM_GLBL_STS0_CP_IS_STOP_SHIFT 25 +#define DMA0_QM_GLBL_STS0_CP_IS_STOP_MASK 0x3E000000 +#define DMA0_QM_GLBL_STS0_ARB_IS_STOP_SHIFT 31 +#define DMA0_QM_GLBL_STS0_ARB_IS_STOP_MASK 0x80000000 + +/* DMA0_QM_GLBL_STS1 */ +#define DMA0_QM_GLBL_STS1_PQF_RD_ERR_SHIFT 0 +#define DMA0_QM_GLBL_STS1_PQF_RD_ERR_MASK 0x1 +#define DMA0_QM_GLBL_STS1_CQF_RD_ERR_SHIFT 1 +#define DMA0_QM_GLBL_STS1_CQF_RD_ERR_MASK 0x2 +#define DMA0_QM_GLBL_STS1_CP_RD_ERR_SHIFT 2 +#define DMA0_QM_GLBL_STS1_CP_RD_ERR_MASK 0x4 +#define DMA0_QM_GLBL_STS1_CP_UNDEF_CMD_ERR_SHIFT 3 +#define DMA0_QM_GLBL_STS1_CP_UNDEF_CMD_ERR_MASK 0x8 +#define DMA0_QM_GLBL_STS1_CP_STOP_OP_SHIFT 4 +#define DMA0_QM_GLBL_STS1_CP_STOP_OP_MASK 0x10 +#define DMA0_QM_GLBL_STS1_CP_MSG_WR_ERR_SHIFT 5 +#define DMA0_QM_GLBL_STS1_CP_MSG_WR_ERR_MASK 0x20 +#define DMA0_QM_GLBL_STS1_CP_WREG_ERR_SHIFT 6 +#define DMA0_QM_GLBL_STS1_CP_WREG_ERR_MASK 0x40 +#define DMA0_QM_GLBL_STS1_CP_FENCE0_OVF_ERR_SHIFT 8 +#define DMA0_QM_GLBL_STS1_CP_FENCE0_OVF_ERR_MASK 0x100 +#define DMA0_QM_GLBL_STS1_CP_FENCE1_OVF_ERR_SHIFT 9 +#define DMA0_QM_GLBL_STS1_CP_FENCE1_OVF_ERR_MASK 0x200 +#define DMA0_QM_GLBL_STS1_CP_FENCE2_OVF_ERR_SHIFT 10 +#define DMA0_QM_GLBL_STS1_CP_FENCE2_OVF_ERR_MASK 0x400 +#define DMA0_QM_GLBL_STS1_CP_FENCE3_OVF_ERR_SHIFT 11 +#define DMA0_QM_GLBL_STS1_CP_FENCE3_OVF_ERR_MASK 0x800 +#define DMA0_QM_GLBL_STS1_CP_FENCE0_UDF_ERR_SHIFT 12 +#define DMA0_QM_GLBL_STS1_CP_FENCE0_UDF_ERR_MASK 0x1000 +#define DMA0_QM_GLBL_STS1_CP_FENCE1_UDF_ERR_SHIFT 13 +#define DMA0_QM_GLBL_STS1_CP_FENCE1_UDF_ERR_MASK 0x2000 +#define DMA0_QM_GLBL_STS1_CP_FENCE2_UDF_ERR_SHIFT 14 +#define DMA0_QM_GLBL_STS1_CP_FENCE2_UDF_ERR_MASK 0x4000 +#define DMA0_QM_GLBL_STS1_CP_FENCE3_UDF_ERR_SHIFT 15 +#define DMA0_QM_GLBL_STS1_CP_FENCE3_UDF_ERR_MASK 0x8000 + +/* DMA0_QM_GLBL_STS1_4 */ +#define DMA0_QM_GLBL_STS1_4_CQF_RD_ERR_SHIFT 1 +#define DMA0_QM_GLBL_STS1_4_CQF_RD_ERR_MASK 0x2 +#define DMA0_QM_GLBL_STS1_4_CP_RD_ERR_SHIFT 2 +#define DMA0_QM_GLBL_STS1_4_CP_RD_ERR_MASK 0x4 +#define DMA0_QM_GLBL_STS1_4_CP_UNDEF_CMD_ERR_SHIFT 3 +#define DMA0_QM_GLBL_STS1_4_CP_UNDEF_CMD_ERR_MASK 0x8 +#define DMA0_QM_GLBL_STS1_4_CP_STOP_OP_SHIFT 4 +#define DMA0_QM_GLBL_STS1_4_CP_STOP_OP_MASK 0x10 +#define DMA0_QM_GLBL_STS1_4_CP_MSG_WR_ERR_SHIFT 5 +#define DMA0_QM_GLBL_STS1_4_CP_MSG_WR_ERR_MASK 0x20 +#define DMA0_QM_GLBL_STS1_4_CP_WREG_ERR_SHIFT 6 +#define DMA0_QM_GLBL_STS1_4_CP_WREG_ERR_MASK 0x40 +#define DMA0_QM_GLBL_STS1_4_CP_FENCE0_OVF_ERR_SHIFT 8 +#define DMA0_QM_GLBL_STS1_4_CP_FENCE0_OVF_ERR_MASK 0x100 +#define DMA0_QM_GLBL_STS1_4_CP_FENCE1_OVF_ERR_SHIFT 9 +#define DMA0_QM_GLBL_STS1_4_CP_FENCE1_OVF_ERR_MASK 0x200 +#define DMA0_QM_GLBL_STS1_4_CP_FENCE2_OVF_ERR_SHIFT 10 +#define DMA0_QM_GLBL_STS1_4_CP_FENCE2_OVF_ERR_MASK 0x400 +#define DMA0_QM_GLBL_STS1_4_CP_FENCE3_OVF_ERR_SHIFT 11 +#define DMA0_QM_GLBL_STS1_4_CP_FENCE3_OVF_ERR_MASK 0x800 +#define DMA0_QM_GLBL_STS1_4_CP_FENCE0_UDF_ERR_SHIFT 12 +#define DMA0_QM_GLBL_STS1_4_CP_FENCE0_UDF_ERR_MASK 0x1000 +#define DMA0_QM_GLBL_STS1_4_CP_FENCE1_UDF_ERR_SHIFT 13 +#define DMA0_QM_GLBL_STS1_4_CP_FENCE1_UDF_ERR_MASK 0x2000 +#define DMA0_QM_GLBL_STS1_4_CP_FENCE2_UDF_ERR_SHIFT 14 +#define DMA0_QM_GLBL_STS1_4_CP_FENCE2_UDF_ERR_MASK 0x4000 +#define DMA0_QM_GLBL_STS1_4_CP_FENCE3_UDF_ERR_SHIFT 15 +#define DMA0_QM_GLBL_STS1_4_CP_FENCE3_UDF_ERR_MASK 0x8000 + +/* DMA0_QM_GLBL_MSG_EN */ +#define DMA0_QM_GLBL_MSG_EN_PQF_RD_ERR_SHIFT 0 +#define DMA0_QM_GLBL_MSG_EN_PQF_RD_ERR_MASK 0x1 +#define DMA0_QM_GLBL_MSG_EN_CQF_RD_ERR_SHIFT 1 +#define DMA0_QM_GLBL_MSG_EN_CQF_RD_ERR_MASK 0x2 +#define DMA0_QM_GLBL_MSG_EN_CP_RD_ERR_SHIFT 2 +#define DMA0_QM_GLBL_MSG_EN_CP_RD_ERR_MASK 0x4 +#define DMA0_QM_GLBL_MSG_EN_CP_UNDEF_CMD_ERR_SHIFT 3 +#define DMA0_QM_GLBL_MSG_EN_CP_UNDEF_CMD_ERR_MASK 0x8 +#define DMA0_QM_GLBL_MSG_EN_CP_STOP_OP_SHIFT 4 +#define DMA0_QM_GLBL_MSG_EN_CP_STOP_OP_MASK 0x10 +#define DMA0_QM_GLBL_MSG_EN_CP_MSG_WR_ERR_SHIFT 5 +#define DMA0_QM_GLBL_MSG_EN_CP_MSG_WR_ERR_MASK 0x20 +#define DMA0_QM_GLBL_MSG_EN_CP_WREG_ERR_SHIFT 6 +#define DMA0_QM_GLBL_MSG_EN_CP_WREG_ERR_MASK 0x40 +#define DMA0_QM_GLBL_MSG_EN_CP_FENCE0_OVF_ERR_SHIFT 8 +#define DMA0_QM_GLBL_MSG_EN_CP_FENCE0_OVF_ERR_MASK 0x100 +#define DMA0_QM_GLBL_MSG_EN_CP_FENCE1_OVF_ERR_SHIFT 9 +#define DMA0_QM_GLBL_MSG_EN_CP_FENCE1_OVF_ERR_MASK 0x200 +#define DMA0_QM_GLBL_MSG_EN_CP_FENCE2_OVF_ERR_SHIFT 10 +#define DMA0_QM_GLBL_MSG_EN_CP_FENCE2_OVF_ERR_MASK 0x400 +#define DMA0_QM_GLBL_MSG_EN_CP_FENCE3_OVF_ERR_SHIFT 11 +#define DMA0_QM_GLBL_MSG_EN_CP_FENCE3_OVF_ERR_MASK 0x800 +#define DMA0_QM_GLBL_MSG_EN_CP_FENCE0_UDF_ERR_SHIFT 12 +#define DMA0_QM_GLBL_MSG_EN_CP_FENCE0_UDF_ERR_MASK 0x1000 +#define DMA0_QM_GLBL_MSG_EN_CP_FENCE1_UDF_ERR_SHIFT 13 +#define DMA0_QM_GLBL_MSG_EN_CP_FENCE1_UDF_ERR_MASK 0x2000 +#define DMA0_QM_GLBL_MSG_EN_CP_FENCE2_UDF_ERR_SHIFT 14 +#define DMA0_QM_GLBL_MSG_EN_CP_FENCE2_UDF_ERR_MASK 0x4000 +#define DMA0_QM_GLBL_MSG_EN_CP_FENCE3_UDF_ERR_SHIFT 15 +#define DMA0_QM_GLBL_MSG_EN_CP_FENCE3_UDF_ERR_MASK 0x8000 + +/* DMA0_QM_GLBL_MSG_EN_4 */ +#define DMA0_QM_GLBL_MSG_EN_4_CQF_RD_ERR_SHIFT 1 +#define DMA0_QM_GLBL_MSG_EN_4_CQF_RD_ERR_MASK 0x2 +#define DMA0_QM_GLBL_MSG_EN_4_CP_RD_ERR_SHIFT 2 +#define DMA0_QM_GLBL_MSG_EN_4_CP_RD_ERR_MASK 0x4 +#define DMA0_QM_GLBL_MSG_EN_4_CP_UNDEF_CMD_ERR_SHIFT 3 +#define DMA0_QM_GLBL_MSG_EN_4_CP_UNDEF_CMD_ERR_MASK 0x8 +#define DMA0_QM_GLBL_MSG_EN_4_CP_STOP_OP_SHIFT 4 +#define DMA0_QM_GLBL_MSG_EN_4_CP_STOP_OP_MASK 0x10 +#define DMA0_QM_GLBL_MSG_EN_4_CP_MSG_WR_ERR_SHIFT 5 +#define DMA0_QM_GLBL_MSG_EN_4_CP_MSG_WR_ERR_MASK 0x20 +#define DMA0_QM_GLBL_MSG_EN_4_CP_WREG_ERR_SHIFT 6 +#define DMA0_QM_GLBL_MSG_EN_4_CP_WREG_ERR_MASK 0x40 +#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE0_OVF_ERR_SHIFT 8 +#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE0_OVF_ERR_MASK 0x100 +#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE1_OVF_ERR_SHIFT 9 +#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE1_OVF_ERR_MASK 0x200 +#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE2_OVF_ERR_SHIFT 10 +#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE2_OVF_ERR_MASK 0x400 +#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE3_OVF_ERR_SHIFT 11 +#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE3_OVF_ERR_MASK 0x800 +#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE0_UDF_ERR_SHIFT 12 +#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE0_UDF_ERR_MASK 0x1000 +#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE1_UDF_ERR_SHIFT 13 +#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE1_UDF_ERR_MASK 0x2000 +#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE2_UDF_ERR_SHIFT 14 +#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE2_UDF_ERR_MASK 0x4000 +#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE3_UDF_ERR_SHIFT 15 +#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE3_UDF_ERR_MASK 0x8000 + +/* DMA0_QM_PQ_BASE_LO */ +#define DMA0_QM_PQ_BASE_LO_VAL_SHIFT 0 +#define DMA0_QM_PQ_BASE_LO_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_PQ_BASE_HI */ +#define DMA0_QM_PQ_BASE_HI_VAL_SHIFT 0 +#define DMA0_QM_PQ_BASE_HI_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_PQ_SIZE */ +#define DMA0_QM_PQ_SIZE_VAL_SHIFT 0 +#define DMA0_QM_PQ_SIZE_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_PQ_PI */ +#define DMA0_QM_PQ_PI_VAL_SHIFT 0 +#define DMA0_QM_PQ_PI_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_PQ_CI */ +#define DMA0_QM_PQ_CI_VAL_SHIFT 0 +#define DMA0_QM_PQ_CI_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_PQ_CFG0 */ +#define DMA0_QM_PQ_CFG0_RESERVED_SHIFT 0 +#define DMA0_QM_PQ_CFG0_RESERVED_MASK 0x1 + +/* DMA0_QM_PQ_CFG1 */ +#define DMA0_QM_PQ_CFG1_CREDIT_LIM_SHIFT 0 +#define DMA0_QM_PQ_CFG1_CREDIT_LIM_MASK 0xFFFF +#define DMA0_QM_PQ_CFG1_MAX_INFLIGHT_SHIFT 16 +#define DMA0_QM_PQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000 + +/* DMA0_QM_PQ_ARUSER_31_11 */ +#define DMA0_QM_PQ_ARUSER_31_11_VAL_SHIFT 0 +#define DMA0_QM_PQ_ARUSER_31_11_VAL_MASK 0x1FFFFF + +/* DMA0_QM_PQ_STS0 */ +#define DMA0_QM_PQ_STS0_PQ_CREDIT_CNT_SHIFT 0 +#define DMA0_QM_PQ_STS0_PQ_CREDIT_CNT_MASK 0xFFFF +#define DMA0_QM_PQ_STS0_PQ_FREE_CNT_SHIFT 16 +#define DMA0_QM_PQ_STS0_PQ_FREE_CNT_MASK 0xFFFF0000 + +/* DMA0_QM_PQ_STS1 */ +#define DMA0_QM_PQ_STS1_PQ_INFLIGHT_CNT_SHIFT 0 +#define DMA0_QM_PQ_STS1_PQ_INFLIGHT_CNT_MASK 0xFFFF +#define DMA0_QM_PQ_STS1_PQ_BUF_EMPTY_SHIFT 30 +#define DMA0_QM_PQ_STS1_PQ_BUF_EMPTY_MASK 0x40000000 +#define DMA0_QM_PQ_STS1_PQ_BUSY_SHIFT 31 +#define DMA0_QM_PQ_STS1_PQ_BUSY_MASK 0x80000000 + +/* DMA0_QM_CQ_CFG0 */ +#define DMA0_QM_CQ_CFG0_RESERVED_SHIFT 0 +#define DMA0_QM_CQ_CFG0_RESERVED_MASK 0x1 + +/* DMA0_QM_CQ_CFG1 */ +#define DMA0_QM_CQ_CFG1_CREDIT_LIM_SHIFT 0 +#define DMA0_QM_CQ_CFG1_CREDIT_LIM_MASK 0xFFFF +#define DMA0_QM_CQ_CFG1_MAX_INFLIGHT_SHIFT 16 +#define DMA0_QM_CQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000 + +/* DMA0_QM_CQ_ARUSER_31_11 */ +#define DMA0_QM_CQ_ARUSER_31_11_VAL_SHIFT 0 +#define DMA0_QM_CQ_ARUSER_31_11_VAL_MASK 0x1FFFFF + +/* DMA0_QM_CQ_STS0 */ +#define DMA0_QM_CQ_STS0_CQ_CREDIT_CNT_SHIFT 0 +#define DMA0_QM_CQ_STS0_CQ_CREDIT_CNT_MASK 0xFFFF +#define DMA0_QM_CQ_STS0_CQ_FREE_CNT_SHIFT 16 +#define DMA0_QM_CQ_STS0_CQ_FREE_CNT_MASK 0xFFFF0000 + +/* DMA0_QM_CQ_STS1 */ +#define DMA0_QM_CQ_STS1_CQ_INFLIGHT_CNT_SHIFT 0 +#define DMA0_QM_CQ_STS1_CQ_INFLIGHT_CNT_MASK 0xFFFF +#define DMA0_QM_CQ_STS1_CQ_BUF_EMPTY_SHIFT 30 +#define DMA0_QM_CQ_STS1_CQ_BUF_EMPTY_MASK 0x40000000 +#define DMA0_QM_CQ_STS1_CQ_BUSY_SHIFT 31 +#define DMA0_QM_CQ_STS1_CQ_BUSY_MASK 0x80000000 + +/* DMA0_QM_CQ_PTR_LO_0 */ +#define DMA0_QM_CQ_PTR_LO_0_VAL_SHIFT 0 +#define DMA0_QM_CQ_PTR_LO_0_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_CQ_PTR_HI_0 */ +#define DMA0_QM_CQ_PTR_HI_0_VAL_SHIFT 0 +#define DMA0_QM_CQ_PTR_HI_0_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_CQ_TSIZE_0 */ +#define DMA0_QM_CQ_TSIZE_0_VAL_SHIFT 0 +#define DMA0_QM_CQ_TSIZE_0_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_CQ_CTL_0 */ +#define DMA0_QM_CQ_CTL_0_RPT_SHIFT 0 +#define DMA0_QM_CQ_CTL_0_RPT_MASK 0xFFFF +#define DMA0_QM_CQ_CTL_0_CTL_SHIFT 16 +#define DMA0_QM_CQ_CTL_0_CTL_MASK 0xFFFF0000 + +/* DMA0_QM_CQ_PTR_LO_1 */ +#define DMA0_QM_CQ_PTR_LO_1_VAL_SHIFT 0 +#define DMA0_QM_CQ_PTR_LO_1_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_CQ_PTR_HI_1 */ +#define DMA0_QM_CQ_PTR_HI_1_VAL_SHIFT 0 +#define DMA0_QM_CQ_PTR_HI_1_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_CQ_TSIZE_1 */ +#define DMA0_QM_CQ_TSIZE_1_VAL_SHIFT 0 +#define DMA0_QM_CQ_TSIZE_1_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_CQ_CTL_1 */ +#define DMA0_QM_CQ_CTL_1_RPT_SHIFT 0 +#define DMA0_QM_CQ_CTL_1_RPT_MASK 0xFFFF +#define DMA0_QM_CQ_CTL_1_CTL_SHIFT 16 +#define DMA0_QM_CQ_CTL_1_CTL_MASK 0xFFFF0000 + +/* DMA0_QM_CQ_PTR_LO_2 */ +#define DMA0_QM_CQ_PTR_LO_2_VAL_SHIFT 0 +#define DMA0_QM_CQ_PTR_LO_2_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_CQ_PTR_HI_2 */ +#define DMA0_QM_CQ_PTR_HI_2_VAL_SHIFT 0 +#define DMA0_QM_CQ_PTR_HI_2_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_CQ_TSIZE_2 */ +#define DMA0_QM_CQ_TSIZE_2_VAL_SHIFT 0 +#define DMA0_QM_CQ_TSIZE_2_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_CQ_CTL_2 */ +#define DMA0_QM_CQ_CTL_2_RPT_SHIFT 0 +#define DMA0_QM_CQ_CTL_2_RPT_MASK 0xFFFF +#define DMA0_QM_CQ_CTL_2_CTL_SHIFT 16 +#define DMA0_QM_CQ_CTL_2_CTL_MASK 0xFFFF0000 + +/* DMA0_QM_CQ_PTR_LO_3 */ +#define DMA0_QM_CQ_PTR_LO_3_VAL_SHIFT 0 +#define DMA0_QM_CQ_PTR_LO_3_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_CQ_PTR_HI_3 */ +#define DMA0_QM_CQ_PTR_HI_3_VAL_SHIFT 0 +#define DMA0_QM_CQ_PTR_HI_3_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_CQ_TSIZE_3 */ +#define DMA0_QM_CQ_TSIZE_3_VAL_SHIFT 0 +#define DMA0_QM_CQ_TSIZE_3_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_CQ_CTL_3 */ +#define DMA0_QM_CQ_CTL_3_RPT_SHIFT 0 +#define DMA0_QM_CQ_CTL_3_RPT_MASK 0xFFFF +#define DMA0_QM_CQ_CTL_3_CTL_SHIFT 16 +#define DMA0_QM_CQ_CTL_3_CTL_MASK 0xFFFF0000 + +/* DMA0_QM_CQ_PTR_LO_4 */ +#define DMA0_QM_CQ_PTR_LO_4_VAL_SHIFT 0 +#define DMA0_QM_CQ_PTR_LO_4_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_CQ_PTR_HI_4 */ +#define DMA0_QM_CQ_PTR_HI_4_VAL_SHIFT 0 +#define DMA0_QM_CQ_PTR_HI_4_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_CQ_TSIZE_4 */ +#define DMA0_QM_CQ_TSIZE_4_VAL_SHIFT 0 +#define DMA0_QM_CQ_TSIZE_4_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_CQ_CTL_4 */ +#define DMA0_QM_CQ_CTL_4_RPT_SHIFT 0 +#define DMA0_QM_CQ_CTL_4_RPT_MASK 0xFFFF +#define DMA0_QM_CQ_CTL_4_CTL_SHIFT 16 +#define DMA0_QM_CQ_CTL_4_CTL_MASK 0xFFFF0000 + +/* DMA0_QM_CQ_PTR_LO_STS */ +#define DMA0_QM_CQ_PTR_LO_STS_VAL_SHIFT 0 +#define DMA0_QM_CQ_PTR_LO_STS_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_CQ_PTR_HI_STS */ +#define DMA0_QM_CQ_PTR_HI_STS_VAL_SHIFT 0 +#define DMA0_QM_CQ_PTR_HI_STS_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_CQ_TSIZE_STS */ +#define DMA0_QM_CQ_TSIZE_STS_VAL_SHIFT 0 +#define DMA0_QM_CQ_TSIZE_STS_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_CQ_CTL_STS */ +#define DMA0_QM_CQ_CTL_STS_RPT_SHIFT 0 +#define DMA0_QM_CQ_CTL_STS_RPT_MASK 0xFFFF +#define DMA0_QM_CQ_CTL_STS_CTL_SHIFT 16 +#define DMA0_QM_CQ_CTL_STS_CTL_MASK 0xFFFF0000 + +/* DMA0_QM_CQ_IFIFO_CNT */ +#define DMA0_QM_CQ_IFIFO_CNT_VAL_SHIFT 0 +#define DMA0_QM_CQ_IFIFO_CNT_VAL_MASK 0x3 + +/* DMA0_QM_CP_MSG_BASE0_ADDR_LO */ +#define DMA0_QM_CP_MSG_BASE0_ADDR_LO_VAL_SHIFT 0 +#define DMA0_QM_CP_MSG_BASE0_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_CP_MSG_BASE0_ADDR_HI */ +#define DMA0_QM_CP_MSG_BASE0_ADDR_HI_VAL_SHIFT 0 +#define DMA0_QM_CP_MSG_BASE0_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_CP_MSG_BASE1_ADDR_LO */ +#define DMA0_QM_CP_MSG_BASE1_ADDR_LO_VAL_SHIFT 0 +#define DMA0_QM_CP_MSG_BASE1_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_CP_MSG_BASE1_ADDR_HI */ +#define DMA0_QM_CP_MSG_BASE1_ADDR_HI_VAL_SHIFT 0 +#define DMA0_QM_CP_MSG_BASE1_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_CP_MSG_BASE2_ADDR_LO */ +#define DMA0_QM_CP_MSG_BASE2_ADDR_LO_VAL_SHIFT 0 +#define DMA0_QM_CP_MSG_BASE2_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_CP_MSG_BASE2_ADDR_HI */ +#define DMA0_QM_CP_MSG_BASE2_ADDR_HI_VAL_SHIFT 0 +#define DMA0_QM_CP_MSG_BASE2_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_CP_MSG_BASE3_ADDR_LO */ +#define DMA0_QM_CP_MSG_BASE3_ADDR_LO_VAL_SHIFT 0 +#define DMA0_QM_CP_MSG_BASE3_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_CP_MSG_BASE3_ADDR_HI */ +#define DMA0_QM_CP_MSG_BASE3_ADDR_HI_VAL_SHIFT 0 +#define DMA0_QM_CP_MSG_BASE3_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_CP_LDMA_TSIZE_OFFSET */ +#define DMA0_QM_CP_LDMA_TSIZE_OFFSET_VAL_SHIFT 0 +#define DMA0_QM_CP_LDMA_TSIZE_OFFSET_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET */ +#define DMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_SHIFT 0 +#define DMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET */ +#define DMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_VAL_SHIFT 0 +#define DMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_CP_FENCE0_RDATA */ +#define DMA0_QM_CP_FENCE0_RDATA_INC_VAL_SHIFT 0 +#define DMA0_QM_CP_FENCE0_RDATA_INC_VAL_MASK 0xF + +/* DMA0_QM_CP_FENCE1_RDATA */ +#define DMA0_QM_CP_FENCE1_RDATA_INC_VAL_SHIFT 0 +#define DMA0_QM_CP_FENCE1_RDATA_INC_VAL_MASK 0xF + +/* DMA0_QM_CP_FENCE2_RDATA */ +#define DMA0_QM_CP_FENCE2_RDATA_INC_VAL_SHIFT 0 +#define DMA0_QM_CP_FENCE2_RDATA_INC_VAL_MASK 0xF + +/* DMA0_QM_CP_FENCE3_RDATA */ +#define DMA0_QM_CP_FENCE3_RDATA_INC_VAL_SHIFT 0 +#define DMA0_QM_CP_FENCE3_RDATA_INC_VAL_MASK 0xF + +/* DMA0_QM_CP_FENCE0_CNT */ +#define DMA0_QM_CP_FENCE0_CNT_VAL_SHIFT 0 +#define DMA0_QM_CP_FENCE0_CNT_VAL_MASK 0x3FFF + +/* DMA0_QM_CP_FENCE1_CNT */ +#define DMA0_QM_CP_FENCE1_CNT_VAL_SHIFT 0 +#define DMA0_QM_CP_FENCE1_CNT_VAL_MASK 0x3FFF + +/* DMA0_QM_CP_FENCE2_CNT */ +#define DMA0_QM_CP_FENCE2_CNT_VAL_SHIFT 0 +#define DMA0_QM_CP_FENCE2_CNT_VAL_MASK 0x3FFF + +/* DMA0_QM_CP_FENCE3_CNT */ +#define DMA0_QM_CP_FENCE3_CNT_VAL_SHIFT 0 +#define DMA0_QM_CP_FENCE3_CNT_VAL_MASK 0x3FFF + +/* DMA0_QM_CP_STS */ +#define DMA0_QM_CP_STS_MSG_INFLIGHT_CNT_SHIFT 0 +#define DMA0_QM_CP_STS_MSG_INFLIGHT_CNT_MASK 0xFFFF +#define DMA0_QM_CP_STS_ERDY_SHIFT 16 +#define DMA0_QM_CP_STS_ERDY_MASK 0x10000 +#define DMA0_QM_CP_STS_RRDY_SHIFT 17 +#define DMA0_QM_CP_STS_RRDY_MASK 0x20000 +#define DMA0_QM_CP_STS_MRDY_SHIFT 18 +#define DMA0_QM_CP_STS_MRDY_MASK 0x40000 +#define DMA0_QM_CP_STS_SW_STOP_SHIFT 19 +#define DMA0_QM_CP_STS_SW_STOP_MASK 0x80000 +#define DMA0_QM_CP_STS_FENCE_ID_SHIFT 20 +#define DMA0_QM_CP_STS_FENCE_ID_MASK 0x300000 +#define DMA0_QM_CP_STS_FENCE_IN_PROGRESS_SHIFT 22 +#define DMA0_QM_CP_STS_FENCE_IN_PROGRESS_MASK 0x400000 + +/* DMA0_QM_CP_CURRENT_INST_LO */ +#define DMA0_QM_CP_CURRENT_INST_LO_VAL_SHIFT 0 +#define DMA0_QM_CP_CURRENT_INST_LO_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_CP_CURRENT_INST_HI */ +#define DMA0_QM_CP_CURRENT_INST_HI_VAL_SHIFT 0 +#define DMA0_QM_CP_CURRENT_INST_HI_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_CP_BARRIER_CFG */ +#define DMA0_QM_CP_BARRIER_CFG_EBGUARD_SHIFT 0 +#define DMA0_QM_CP_BARRIER_CFG_EBGUARD_MASK 0xFFF +#define DMA0_QM_CP_BARRIER_CFG_RBGUARD_SHIFT 16 +#define DMA0_QM_CP_BARRIER_CFG_RBGUARD_MASK 0xF0000 + +/* DMA0_QM_CP_DBG_0 */ +#define DMA0_QM_CP_DBG_0_CS_SHIFT 0 +#define DMA0_QM_CP_DBG_0_CS_MASK 0xF +#define DMA0_QM_CP_DBG_0_EB_CNT_NOT_ZERO_SHIFT 4 +#define DMA0_QM_CP_DBG_0_EB_CNT_NOT_ZERO_MASK 0x10 +#define DMA0_QM_CP_DBG_0_BULK_CNT_NOT_ZERO_SHIFT 5 +#define DMA0_QM_CP_DBG_0_BULK_CNT_NOT_ZERO_MASK 0x20 +#define DMA0_QM_CP_DBG_0_MREB_STALL_SHIFT 6 +#define DMA0_QM_CP_DBG_0_MREB_STALL_MASK 0x40 +#define DMA0_QM_CP_DBG_0_STALL_SHIFT 7 +#define DMA0_QM_CP_DBG_0_STALL_MASK 0x80 + +/* DMA0_QM_CP_ARUSER_31_11 */ +#define DMA0_QM_CP_ARUSER_31_11_VAL_SHIFT 0 +#define DMA0_QM_CP_ARUSER_31_11_VAL_MASK 0x1FFFFF + +/* DMA0_QM_CP_AWUSER_31_11 */ +#define DMA0_QM_CP_AWUSER_31_11_VAL_SHIFT 0 +#define DMA0_QM_CP_AWUSER_31_11_VAL_MASK 0x1FFFFF + +/* DMA0_QM_ARB_CFG_0 */ +#define DMA0_QM_ARB_CFG_0_TYPE_SHIFT 0 +#define DMA0_QM_ARB_CFG_0_TYPE_MASK 0x1 +#define DMA0_QM_ARB_CFG_0_IS_MASTER_SHIFT 4 +#define DMA0_QM_ARB_CFG_0_IS_MASTER_MASK 0x10 +#define DMA0_QM_ARB_CFG_0_EN_SHIFT 8 +#define DMA0_QM_ARB_CFG_0_EN_MASK 0x100 +#define DMA0_QM_ARB_CFG_0_MASK_SHIFT 12 +#define DMA0_QM_ARB_CFG_0_MASK_MASK 0xF000 +#define DMA0_QM_ARB_CFG_0_MST_MSG_NOSTALL_SHIFT 16 +#define DMA0_QM_ARB_CFG_0_MST_MSG_NOSTALL_MASK 0x10000 + +/* DMA0_QM_ARB_CHOISE_Q_PUSH */ +#define DMA0_QM_ARB_CHOISE_Q_PUSH_VAL_SHIFT 0 +#define DMA0_QM_ARB_CHOISE_Q_PUSH_VAL_MASK 0x3 + +/* DMA0_QM_ARB_WRR_WEIGHT */ +#define DMA0_QM_ARB_WRR_WEIGHT_VAL_SHIFT 0 +#define DMA0_QM_ARB_WRR_WEIGHT_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_ARB_CFG_1 */ +#define DMA0_QM_ARB_CFG_1_CLR_SHIFT 0 +#define DMA0_QM_ARB_CFG_1_CLR_MASK 0x1 + +/* DMA0_QM_ARB_MST_AVAIL_CRED */ +#define DMA0_QM_ARB_MST_AVAIL_CRED_VAL_SHIFT 0 +#define DMA0_QM_ARB_MST_AVAIL_CRED_VAL_MASK 0x7F + +/* DMA0_QM_ARB_MST_CRED_INC */ +#define DMA0_QM_ARB_MST_CRED_INC_VAL_SHIFT 0 +#define DMA0_QM_ARB_MST_CRED_INC_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_ARB_MST_CHOISE_PUSH_OFST */ +#define DMA0_QM_ARB_MST_CHOISE_PUSH_OFST_VAL_SHIFT 0 +#define DMA0_QM_ARB_MST_CHOISE_PUSH_OFST_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_ARB_SLV_MASTER_INC_CRED_OFST */ +#define DMA0_QM_ARB_SLV_MASTER_INC_CRED_OFST_VAL_SHIFT 0 +#define DMA0_QM_ARB_SLV_MASTER_INC_CRED_OFST_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_ARB_MST_SLAVE_EN */ +#define DMA0_QM_ARB_MST_SLAVE_EN_VAL_SHIFT 0 +#define DMA0_QM_ARB_MST_SLAVE_EN_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_ARB_MST_QUIET_PER */ +#define DMA0_QM_ARB_MST_QUIET_PER_VAL_SHIFT 0 +#define DMA0_QM_ARB_MST_QUIET_PER_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_ARB_SLV_CHOISE_WDT */ +#define DMA0_QM_ARB_SLV_CHOISE_WDT_VAL_SHIFT 0 +#define DMA0_QM_ARB_SLV_CHOISE_WDT_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_ARB_SLV_ID */ +#define DMA0_QM_ARB_SLV_ID_VAL_SHIFT 0 +#define DMA0_QM_ARB_SLV_ID_VAL_MASK 0x1F + +/* DMA0_QM_ARB_MSG_MAX_INFLIGHT */ +#define DMA0_QM_ARB_MSG_MAX_INFLIGHT_VAL_SHIFT 0 +#define DMA0_QM_ARB_MSG_MAX_INFLIGHT_VAL_MASK 0x3F + +/* DMA0_QM_ARB_MSG_AWUSER_31_11 */ +#define DMA0_QM_ARB_MSG_AWUSER_31_11_VAL_SHIFT 0 +#define DMA0_QM_ARB_MSG_AWUSER_31_11_VAL_MASK 0x1FFFFF + +/* DMA0_QM_ARB_MSG_AWUSER_SEC_PROP */ +#define DMA0_QM_ARB_MSG_AWUSER_SEC_PROP_ASID_SHIFT 0 +#define DMA0_QM_ARB_MSG_AWUSER_SEC_PROP_ASID_MASK 0x3FF +#define DMA0_QM_ARB_MSG_AWUSER_SEC_PROP_MMBP_SHIFT 10 +#define DMA0_QM_ARB_MSG_AWUSER_SEC_PROP_MMBP_MASK 0x400 + +/* DMA0_QM_ARB_MSG_AWUSER_NON_SEC_PROP */ +#define DMA0_QM_ARB_MSG_AWUSER_NON_SEC_PROP_ASID_SHIFT 0 +#define DMA0_QM_ARB_MSG_AWUSER_NON_SEC_PROP_ASID_MASK 0x3FF +#define DMA0_QM_ARB_MSG_AWUSER_NON_SEC_PROP_MMBP_SHIFT 10 +#define DMA0_QM_ARB_MSG_AWUSER_NON_SEC_PROP_MMBP_MASK 0x400 + +/* DMA0_QM_ARB_BASE_LO */ +#define DMA0_QM_ARB_BASE_LO_VAL_SHIFT 0 +#define DMA0_QM_ARB_BASE_LO_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_ARB_BASE_HI */ +#define DMA0_QM_ARB_BASE_HI_VAL_SHIFT 0 +#define DMA0_QM_ARB_BASE_HI_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_ARB_STATE_STS */ +#define DMA0_QM_ARB_STATE_STS_VAL_SHIFT 0 +#define DMA0_QM_ARB_STATE_STS_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_ARB_CHOISE_FULLNESS_STS */ +#define DMA0_QM_ARB_CHOISE_FULLNESS_STS_VAL_SHIFT 0 +#define DMA0_QM_ARB_CHOISE_FULLNESS_STS_VAL_MASK 0x7F + +/* DMA0_QM_ARB_MSG_STS */ +#define DMA0_QM_ARB_MSG_STS_FULL_SHIFT 0 +#define DMA0_QM_ARB_MSG_STS_FULL_MASK 0x1 +#define DMA0_QM_ARB_MSG_STS_NO_INFLIGHT_SHIFT 1 +#define DMA0_QM_ARB_MSG_STS_NO_INFLIGHT_MASK 0x2 + +/* DMA0_QM_ARB_SLV_CHOISE_Q_HEAD */ +#define DMA0_QM_ARB_SLV_CHOISE_Q_HEAD_VAL_SHIFT 0 +#define DMA0_QM_ARB_SLV_CHOISE_Q_HEAD_VAL_MASK 0x3 + +/* DMA0_QM_ARB_ERR_CAUSE */ +#define DMA0_QM_ARB_ERR_CAUSE_CHOISE_OVF_SHIFT 0 +#define DMA0_QM_ARB_ERR_CAUSE_CHOISE_OVF_MASK 0x1 +#define DMA0_QM_ARB_ERR_CAUSE_CHOISE_WDT_SHIFT 1 +#define DMA0_QM_ARB_ERR_CAUSE_CHOISE_WDT_MASK 0x2 +#define DMA0_QM_ARB_ERR_CAUSE_AXI_LBW_ERR_SHIFT 2 +#define DMA0_QM_ARB_ERR_CAUSE_AXI_LBW_ERR_MASK 0x4 + +/* DMA0_QM_ARB_ERR_MSG_EN */ +#define DMA0_QM_ARB_ERR_MSG_EN_CHOISE_OVF_SHIFT 0 +#define DMA0_QM_ARB_ERR_MSG_EN_CHOISE_OVF_MASK 0x1 +#define DMA0_QM_ARB_ERR_MSG_EN_CHOISE_WDT_SHIFT 1 +#define DMA0_QM_ARB_ERR_MSG_EN_CHOISE_WDT_MASK 0x2 +#define DMA0_QM_ARB_ERR_MSG_EN_AXI_LBW_ERR_SHIFT 2 +#define DMA0_QM_ARB_ERR_MSG_EN_AXI_LBW_ERR_MASK 0x4 + +/* DMA0_QM_ARB_ERR_STS_DRP */ +#define DMA0_QM_ARB_ERR_STS_DRP_VAL_SHIFT 0 +#define DMA0_QM_ARB_ERR_STS_DRP_VAL_MASK 0x3 + +/* DMA0_QM_ARB_MST_CRED_STS */ +#define DMA0_QM_ARB_MST_CRED_STS_VAL_SHIFT 0 +#define DMA0_QM_ARB_MST_CRED_STS_VAL_MASK 0x7F + +/* DMA0_QM_CGM_CFG */ +#define DMA0_QM_CGM_CFG_IDLE_TH_SHIFT 0 +#define DMA0_QM_CGM_CFG_IDLE_TH_MASK 0xFFF +#define DMA0_QM_CGM_CFG_G2F_TH_SHIFT 16 +#define DMA0_QM_CGM_CFG_G2F_TH_MASK 0xFF0000 +#define DMA0_QM_CGM_CFG_CP_IDLE_MASK_SHIFT 24 +#define DMA0_QM_CGM_CFG_CP_IDLE_MASK_MASK 0x1F000000 +#define DMA0_QM_CGM_CFG_EN_SHIFT 31 +#define DMA0_QM_CGM_CFG_EN_MASK 0x80000000 + +/* DMA0_QM_CGM_STS */ +#define DMA0_QM_CGM_STS_ST_SHIFT 0 +#define DMA0_QM_CGM_STS_ST_MASK 0x3 +#define DMA0_QM_CGM_STS_CG_SHIFT 4 +#define DMA0_QM_CGM_STS_CG_MASK 0x10 +#define DMA0_QM_CGM_STS_AGENT_IDLE_SHIFT 8 +#define DMA0_QM_CGM_STS_AGENT_IDLE_MASK 0x100 +#define DMA0_QM_CGM_STS_AXI_IDLE_SHIFT 9 +#define DMA0_QM_CGM_STS_AXI_IDLE_MASK 0x200 +#define DMA0_QM_CGM_STS_CP_IDLE_SHIFT 10 +#define DMA0_QM_CGM_STS_CP_IDLE_MASK 0x400 + +/* DMA0_QM_CGM_CFG1 */ +#define DMA0_QM_CGM_CFG1_MASK_TH_SHIFT 0 +#define DMA0_QM_CGM_CFG1_MASK_TH_MASK 0xFF + +/* DMA0_QM_LOCAL_RANGE_BASE */ +#define DMA0_QM_LOCAL_RANGE_BASE_VAL_SHIFT 0 +#define DMA0_QM_LOCAL_RANGE_BASE_VAL_MASK 0xFFFF + +/* DMA0_QM_LOCAL_RANGE_SIZE */ +#define DMA0_QM_LOCAL_RANGE_SIZE_VAL_SHIFT 0 +#define DMA0_QM_LOCAL_RANGE_SIZE_VAL_MASK 0xFFFF + +/* DMA0_QM_CSMR_STRICT_PRIO_CFG */ +#define DMA0_QM_CSMR_STRICT_PRIO_CFG_TYPE_SHIFT 0 +#define DMA0_QM_CSMR_STRICT_PRIO_CFG_TYPE_MASK 0x1 + +/* DMA0_QM_HBW_RD_RATE_LIM_CFG_1 */ +#define DMA0_QM_HBW_RD_RATE_LIM_CFG_1_TOUT_SHIFT 0 +#define DMA0_QM_HBW_RD_RATE_LIM_CFG_1_TOUT_MASK 0xFF +#define DMA0_QM_HBW_RD_RATE_LIM_CFG_1_EN_SHIFT 31 +#define DMA0_QM_HBW_RD_RATE_LIM_CFG_1_EN_MASK 0x80000000 + +/* DMA0_QM_LBW_WR_RATE_LIM_CFG_0 */ +#define DMA0_QM_LBW_WR_RATE_LIM_CFG_0_RST_TOKEN_SHIFT 0 +#define DMA0_QM_LBW_WR_RATE_LIM_CFG_0_RST_TOKEN_MASK 0xFF +#define DMA0_QM_LBW_WR_RATE_LIM_CFG_0_SAT_SHIFT 16 +#define DMA0_QM_LBW_WR_RATE_LIM_CFG_0_SAT_MASK 0xFF0000 + +/* DMA0_QM_LBW_WR_RATE_LIM_CFG_1 */ +#define DMA0_QM_LBW_WR_RATE_LIM_CFG_1_TOUT_SHIFT 0 +#define DMA0_QM_LBW_WR_RATE_LIM_CFG_1_TOUT_MASK 0xFF +#define DMA0_QM_LBW_WR_RATE_LIM_CFG_1_EN_SHIFT 31 +#define DMA0_QM_LBW_WR_RATE_LIM_CFG_1_EN_MASK 0x80000000 + +/* DMA0_QM_HBW_RD_RATE_LIM_CFG_0 */ +#define DMA0_QM_HBW_RD_RATE_LIM_CFG_0_RST_TOKEN_SHIFT 0 +#define DMA0_QM_HBW_RD_RATE_LIM_CFG_0_RST_TOKEN_MASK 0xFF +#define DMA0_QM_HBW_RD_RATE_LIM_CFG_0_SAT_SHIFT 16 +#define DMA0_QM_HBW_RD_RATE_LIM_CFG_0_SAT_MASK 0xFF0000 + +/* DMA0_QM_GLBL_AXCACHE */ +#define DMA0_QM_GLBL_AXCACHE_AR_SHIFT 0 +#define DMA0_QM_GLBL_AXCACHE_AR_MASK 0xF +#define DMA0_QM_GLBL_AXCACHE_AW_SHIFT 16 +#define DMA0_QM_GLBL_AXCACHE_AW_MASK 0xF0000 + +/* DMA0_QM_IND_GW_APB_CFG */ +#define DMA0_QM_IND_GW_APB_CFG_ADDR_SHIFT 0 +#define DMA0_QM_IND_GW_APB_CFG_ADDR_MASK 0x7FFFFFFF +#define DMA0_QM_IND_GW_APB_CFG_CMD_SHIFT 31 +#define DMA0_QM_IND_GW_APB_CFG_CMD_MASK 0x80000000 + +/* DMA0_QM_IND_GW_APB_WDATA */ +#define DMA0_QM_IND_GW_APB_WDATA_VAL_SHIFT 0 +#define DMA0_QM_IND_GW_APB_WDATA_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_IND_GW_APB_RDATA */ +#define DMA0_QM_IND_GW_APB_RDATA_VAL_SHIFT 0 +#define DMA0_QM_IND_GW_APB_RDATA_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_IND_GW_APB_STATUS */ +#define DMA0_QM_IND_GW_APB_STATUS_RDY_SHIFT 0 +#define DMA0_QM_IND_GW_APB_STATUS_RDY_MASK 0x1 +#define DMA0_QM_IND_GW_APB_STATUS_ERR_SHIFT 1 +#define DMA0_QM_IND_GW_APB_STATUS_ERR_MASK 0x2 + +/* DMA0_QM_GLBL_ERR_ADDR_LO */ +#define DMA0_QM_GLBL_ERR_ADDR_LO_VAL_SHIFT 0 +#define DMA0_QM_GLBL_ERR_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_GLBL_ERR_ADDR_HI */ +#define DMA0_QM_GLBL_ERR_ADDR_HI_VAL_SHIFT 0 +#define DMA0_QM_GLBL_ERR_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_GLBL_ERR_WDATA */ +#define DMA0_QM_GLBL_ERR_WDATA_VAL_SHIFT 0 +#define DMA0_QM_GLBL_ERR_WDATA_VAL_MASK 0xFFFFFFFF + +/* DMA0_QM_GLBL_MEM_INIT_BUSY */ +#define DMA0_QM_GLBL_MEM_INIT_BUSY_RBUF_SHIFT 0 +#define DMA0_QM_GLBL_MEM_INIT_BUSY_RBUF_MASK 0xF + +#endif /* ASIC_REG_DMA0_QM_MASKS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_qm_regs.h new file mode 100644 index 000000000..8e56a93d8 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_qm_regs.h @@ -0,0 +1,834 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA0_QM_REGS_H_ +#define ASIC_REG_DMA0_QM_REGS_H_ + +/* + ***************************************** + * DMA0_QM (Prototype: QMAN) + ***************************************** + */ + +#define mmDMA0_QM_GLBL_CFG0 0x508000 + +#define mmDMA0_QM_GLBL_CFG1 0x508004 + +#define mmDMA0_QM_GLBL_PROT 0x508008 + +#define mmDMA0_QM_GLBL_ERR_CFG 0x50800C + +#define mmDMA0_QM_GLBL_SECURE_PROPS_0 0x508010 + +#define mmDMA0_QM_GLBL_SECURE_PROPS_1 0x508014 + +#define mmDMA0_QM_GLBL_SECURE_PROPS_2 0x508018 + +#define mmDMA0_QM_GLBL_SECURE_PROPS_3 0x50801C + +#define mmDMA0_QM_GLBL_SECURE_PROPS_4 0x508020 + +#define mmDMA0_QM_GLBL_NON_SECURE_PROPS_0 0x508024 + +#define mmDMA0_QM_GLBL_NON_SECURE_PROPS_1 0x508028 + +#define mmDMA0_QM_GLBL_NON_SECURE_PROPS_2 0x50802C + +#define mmDMA0_QM_GLBL_NON_SECURE_PROPS_3 0x508030 + +#define mmDMA0_QM_GLBL_NON_SECURE_PROPS_4 0x508034 + +#define mmDMA0_QM_GLBL_STS0 0x508038 + +#define mmDMA0_QM_GLBL_STS1_0 0x508040 + +#define mmDMA0_QM_GLBL_STS1_1 0x508044 + +#define mmDMA0_QM_GLBL_STS1_2 0x508048 + +#define mmDMA0_QM_GLBL_STS1_3 0x50804C + +#define mmDMA0_QM_GLBL_STS1_4 0x508050 + +#define mmDMA0_QM_GLBL_MSG_EN_0 0x508054 + +#define mmDMA0_QM_GLBL_MSG_EN_1 0x508058 + +#define mmDMA0_QM_GLBL_MSG_EN_2 0x50805C + +#define mmDMA0_QM_GLBL_MSG_EN_3 0x508060 + +#define mmDMA0_QM_GLBL_MSG_EN_4 0x508068 + +#define mmDMA0_QM_PQ_BASE_LO_0 0x508070 + +#define mmDMA0_QM_PQ_BASE_LO_1 0x508074 + +#define mmDMA0_QM_PQ_BASE_LO_2 0x508078 + +#define mmDMA0_QM_PQ_BASE_LO_3 0x50807C + +#define mmDMA0_QM_PQ_BASE_HI_0 0x508080 + +#define mmDMA0_QM_PQ_BASE_HI_1 0x508084 + +#define mmDMA0_QM_PQ_BASE_HI_2 0x508088 + +#define mmDMA0_QM_PQ_BASE_HI_3 0x50808C + +#define mmDMA0_QM_PQ_SIZE_0 0x508090 + +#define mmDMA0_QM_PQ_SIZE_1 0x508094 + +#define mmDMA0_QM_PQ_SIZE_2 0x508098 + +#define mmDMA0_QM_PQ_SIZE_3 0x50809C + +#define mmDMA0_QM_PQ_PI_0 0x5080A0 + +#define mmDMA0_QM_PQ_PI_1 0x5080A4 + +#define mmDMA0_QM_PQ_PI_2 0x5080A8 + +#define mmDMA0_QM_PQ_PI_3 0x5080AC + +#define mmDMA0_QM_PQ_CI_0 0x5080B0 + +#define mmDMA0_QM_PQ_CI_1 0x5080B4 + +#define mmDMA0_QM_PQ_CI_2 0x5080B8 + +#define mmDMA0_QM_PQ_CI_3 0x5080BC + +#define mmDMA0_QM_PQ_CFG0_0 0x5080C0 + +#define mmDMA0_QM_PQ_CFG0_1 0x5080C4 + +#define mmDMA0_QM_PQ_CFG0_2 0x5080C8 + +#define mmDMA0_QM_PQ_CFG0_3 0x5080CC + +#define mmDMA0_QM_PQ_CFG1_0 0x5080D0 + +#define mmDMA0_QM_PQ_CFG1_1 0x5080D4 + +#define mmDMA0_QM_PQ_CFG1_2 0x5080D8 + +#define mmDMA0_QM_PQ_CFG1_3 0x5080DC + +#define mmDMA0_QM_PQ_ARUSER_31_11_0 0x5080E0 + +#define mmDMA0_QM_PQ_ARUSER_31_11_1 0x5080E4 + +#define mmDMA0_QM_PQ_ARUSER_31_11_2 0x5080E8 + +#define mmDMA0_QM_PQ_ARUSER_31_11_3 0x5080EC + +#define mmDMA0_QM_PQ_STS0_0 0x5080F0 + +#define mmDMA0_QM_PQ_STS0_1 0x5080F4 + +#define mmDMA0_QM_PQ_STS0_2 0x5080F8 + +#define mmDMA0_QM_PQ_STS0_3 0x5080FC + +#define mmDMA0_QM_PQ_STS1_0 0x508100 + +#define mmDMA0_QM_PQ_STS1_1 0x508104 + +#define mmDMA0_QM_PQ_STS1_2 0x508108 + +#define mmDMA0_QM_PQ_STS1_3 0x50810C + +#define mmDMA0_QM_CQ_CFG0_0 0x508110 + +#define mmDMA0_QM_CQ_CFG0_1 0x508114 + +#define mmDMA0_QM_CQ_CFG0_2 0x508118 + +#define mmDMA0_QM_CQ_CFG0_3 0x50811C + +#define mmDMA0_QM_CQ_CFG0_4 0x508120 + +#define mmDMA0_QM_CQ_CFG1_0 0x508124 + +#define mmDMA0_QM_CQ_CFG1_1 0x508128 + +#define mmDMA0_QM_CQ_CFG1_2 0x50812C + +#define mmDMA0_QM_CQ_CFG1_3 0x508130 + +#define mmDMA0_QM_CQ_CFG1_4 0x508134 + +#define mmDMA0_QM_CQ_ARUSER_31_11_0 0x508138 + +#define mmDMA0_QM_CQ_ARUSER_31_11_1 0x50813C + +#define mmDMA0_QM_CQ_ARUSER_31_11_2 0x508140 + +#define mmDMA0_QM_CQ_ARUSER_31_11_3 0x508144 + +#define mmDMA0_QM_CQ_ARUSER_31_11_4 0x508148 + +#define mmDMA0_QM_CQ_STS0_0 0x50814C + +#define mmDMA0_QM_CQ_STS0_1 0x508150 + +#define mmDMA0_QM_CQ_STS0_2 0x508154 + +#define mmDMA0_QM_CQ_STS0_3 0x508158 + +#define mmDMA0_QM_CQ_STS0_4 0x50815C + +#define mmDMA0_QM_CQ_STS1_0 0x508160 + +#define mmDMA0_QM_CQ_STS1_1 0x508164 + +#define mmDMA0_QM_CQ_STS1_2 0x508168 + +#define mmDMA0_QM_CQ_STS1_3 0x50816C + +#define mmDMA0_QM_CQ_STS1_4 0x508170 + +#define mmDMA0_QM_CQ_PTR_LO_0 0x508174 + +#define mmDMA0_QM_CQ_PTR_HI_0 0x508178 + +#define mmDMA0_QM_CQ_TSIZE_0 0x50817C + +#define mmDMA0_QM_CQ_CTL_0 0x508180 + +#define mmDMA0_QM_CQ_PTR_LO_1 0x508184 + +#define mmDMA0_QM_CQ_PTR_HI_1 0x508188 + +#define mmDMA0_QM_CQ_TSIZE_1 0x50818C + +#define mmDMA0_QM_CQ_CTL_1 0x508190 + +#define mmDMA0_QM_CQ_PTR_LO_2 0x508194 + +#define mmDMA0_QM_CQ_PTR_HI_2 0x508198 + +#define mmDMA0_QM_CQ_TSIZE_2 0x50819C + +#define mmDMA0_QM_CQ_CTL_2 0x5081A0 + +#define mmDMA0_QM_CQ_PTR_LO_3 0x5081A4 + +#define mmDMA0_QM_CQ_PTR_HI_3 0x5081A8 + +#define mmDMA0_QM_CQ_TSIZE_3 0x5081AC + +#define mmDMA0_QM_CQ_CTL_3 0x5081B0 + +#define mmDMA0_QM_CQ_PTR_LO_4 0x5081B4 + +#define mmDMA0_QM_CQ_PTR_HI_4 0x5081B8 + +#define mmDMA0_QM_CQ_TSIZE_4 0x5081BC + +#define mmDMA0_QM_CQ_CTL_4 0x5081C0 + +#define mmDMA0_QM_CQ_PTR_LO_STS_0 0x5081C4 + +#define mmDMA0_QM_CQ_PTR_LO_STS_1 0x5081C8 + +#define mmDMA0_QM_CQ_PTR_LO_STS_2 0x5081CC + +#define mmDMA0_QM_CQ_PTR_LO_STS_3 0x5081D0 + +#define mmDMA0_QM_CQ_PTR_LO_STS_4 0x5081D4 + +#define mmDMA0_QM_CQ_PTR_HI_STS_0 0x5081D8 + +#define mmDMA0_QM_CQ_PTR_HI_STS_1 0x5081DC + +#define mmDMA0_QM_CQ_PTR_HI_STS_2 0x5081E0 + +#define mmDMA0_QM_CQ_PTR_HI_STS_3 0x5081E4 + +#define mmDMA0_QM_CQ_PTR_HI_STS_4 0x5081E8 + +#define mmDMA0_QM_CQ_TSIZE_STS_0 0x5081EC + +#define mmDMA0_QM_CQ_TSIZE_STS_1 0x5081F0 + +#define mmDMA0_QM_CQ_TSIZE_STS_2 0x5081F4 + +#define mmDMA0_QM_CQ_TSIZE_STS_3 0x5081F8 + +#define mmDMA0_QM_CQ_TSIZE_STS_4 0x5081FC + +#define mmDMA0_QM_CQ_CTL_STS_0 0x508200 + +#define mmDMA0_QM_CQ_CTL_STS_1 0x508204 + +#define mmDMA0_QM_CQ_CTL_STS_2 0x508208 + +#define mmDMA0_QM_CQ_CTL_STS_3 0x50820C + +#define mmDMA0_QM_CQ_CTL_STS_4 0x508210 + +#define mmDMA0_QM_CQ_IFIFO_CNT_0 0x508214 + +#define mmDMA0_QM_CQ_IFIFO_CNT_1 0x508218 + +#define mmDMA0_QM_CQ_IFIFO_CNT_2 0x50821C + +#define mmDMA0_QM_CQ_IFIFO_CNT_3 0x508220 + +#define mmDMA0_QM_CQ_IFIFO_CNT_4 0x508224 + +#define mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_0 0x508228 + +#define mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_1 0x50822C + +#define mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_2 0x508230 + +#define mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_3 0x508234 + +#define mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_4 0x508238 + +#define mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_0 0x50823C + +#define mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_1 0x508240 + +#define mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_2 0x508244 + +#define mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_3 0x508248 + +#define mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_4 0x50824C + +#define mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_0 0x508250 + +#define mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_1 0x508254 + +#define mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_2 0x508258 + +#define mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_3 0x50825C + +#define mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_4 0x508260 + +#define mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_0 0x508264 + +#define mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_1 0x508268 + +#define mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_2 0x50826C + +#define mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_3 0x508270 + +#define mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_4 0x508274 + +#define mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_0 0x508278 + +#define mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_1 0x50827C + +#define mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_2 0x508280 + +#define mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_3 0x508284 + +#define mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_4 0x508288 + +#define mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_0 0x50828C + +#define mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_1 0x508290 + +#define mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_2 0x508294 + +#define mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_3 0x508298 + +#define mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_4 0x50829C + +#define mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_0 0x5082A0 + +#define mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_1 0x5082A4 + +#define mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_2 0x5082A8 + +#define mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_3 0x5082AC + +#define mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_4 0x5082B0 + +#define mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_0 0x5082B4 + +#define mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_1 0x5082B8 + +#define mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_2 0x5082BC + +#define mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_3 0x5082C0 + +#define mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_4 0x5082C4 + +#define mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_0 0x5082C8 + +#define mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_1 0x5082CC + +#define mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_2 0x5082D0 + +#define mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_3 0x5082D4 + +#define mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_4 0x5082D8 + +#define mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0x5082E0 + +#define mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0x5082E4 + +#define mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0x5082E8 + +#define mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0x5082EC + +#define mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0x5082F0 + +#define mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0x5082F4 + +#define mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0x5082F8 + +#define mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0x5082FC + +#define mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0x508300 + +#define mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0x508304 + +#define mmDMA0_QM_CP_FENCE0_RDATA_0 0x508308 + +#define mmDMA0_QM_CP_FENCE0_RDATA_1 0x50830C + +#define mmDMA0_QM_CP_FENCE0_RDATA_2 0x508310 + +#define mmDMA0_QM_CP_FENCE0_RDATA_3 0x508314 + +#define mmDMA0_QM_CP_FENCE0_RDATA_4 0x508318 + +#define mmDMA0_QM_CP_FENCE1_RDATA_0 0x50831C + +#define mmDMA0_QM_CP_FENCE1_RDATA_1 0x508320 + +#define mmDMA0_QM_CP_FENCE1_RDATA_2 0x508324 + +#define mmDMA0_QM_CP_FENCE1_RDATA_3 0x508328 + +#define mmDMA0_QM_CP_FENCE1_RDATA_4 0x50832C + +#define mmDMA0_QM_CP_FENCE2_RDATA_0 0x508330 + +#define mmDMA0_QM_CP_FENCE2_RDATA_1 0x508334 + +#define mmDMA0_QM_CP_FENCE2_RDATA_2 0x508338 + +#define mmDMA0_QM_CP_FENCE2_RDATA_3 0x50833C + +#define mmDMA0_QM_CP_FENCE2_RDATA_4 0x508340 + +#define mmDMA0_QM_CP_FENCE3_RDATA_0 0x508344 + +#define mmDMA0_QM_CP_FENCE3_RDATA_1 0x508348 + +#define mmDMA0_QM_CP_FENCE3_RDATA_2 0x50834C + +#define mmDMA0_QM_CP_FENCE3_RDATA_3 0x508350 + +#define mmDMA0_QM_CP_FENCE3_RDATA_4 0x508354 + +#define mmDMA0_QM_CP_FENCE0_CNT_0 0x508358 + +#define mmDMA0_QM_CP_FENCE0_CNT_1 0x50835C + +#define mmDMA0_QM_CP_FENCE0_CNT_2 0x508360 + +#define mmDMA0_QM_CP_FENCE0_CNT_3 0x508364 + +#define mmDMA0_QM_CP_FENCE0_CNT_4 0x508368 + +#define mmDMA0_QM_CP_FENCE1_CNT_0 0x50836C + +#define mmDMA0_QM_CP_FENCE1_CNT_1 0x508370 + +#define mmDMA0_QM_CP_FENCE1_CNT_2 0x508374 + +#define mmDMA0_QM_CP_FENCE1_CNT_3 0x508378 + +#define mmDMA0_QM_CP_FENCE1_CNT_4 0x50837C + +#define mmDMA0_QM_CP_FENCE2_CNT_0 0x508380 + +#define mmDMA0_QM_CP_FENCE2_CNT_1 0x508384 + +#define mmDMA0_QM_CP_FENCE2_CNT_2 0x508388 + +#define mmDMA0_QM_CP_FENCE2_CNT_3 0x50838C + +#define mmDMA0_QM_CP_FENCE2_CNT_4 0x508390 + +#define mmDMA0_QM_CP_FENCE3_CNT_0 0x508394 + +#define mmDMA0_QM_CP_FENCE3_CNT_1 0x508398 + +#define mmDMA0_QM_CP_FENCE3_CNT_2 0x50839C + +#define mmDMA0_QM_CP_FENCE3_CNT_3 0x5083A0 + +#define mmDMA0_QM_CP_FENCE3_CNT_4 0x5083A4 + +#define mmDMA0_QM_CP_STS_0 0x5083A8 + +#define mmDMA0_QM_CP_STS_1 0x5083AC + +#define mmDMA0_QM_CP_STS_2 0x5083B0 + +#define mmDMA0_QM_CP_STS_3 0x5083B4 + +#define mmDMA0_QM_CP_STS_4 0x5083B8 + +#define mmDMA0_QM_CP_CURRENT_INST_LO_0 0x5083BC + +#define mmDMA0_QM_CP_CURRENT_INST_LO_1 0x5083C0 + +#define mmDMA0_QM_CP_CURRENT_INST_LO_2 0x5083C4 + +#define mmDMA0_QM_CP_CURRENT_INST_LO_3 0x5083C8 + +#define mmDMA0_QM_CP_CURRENT_INST_LO_4 0x5083CC + +#define mmDMA0_QM_CP_CURRENT_INST_HI_0 0x5083D0 + +#define mmDMA0_QM_CP_CURRENT_INST_HI_1 0x5083D4 + +#define mmDMA0_QM_CP_CURRENT_INST_HI_2 0x5083D8 + +#define mmDMA0_QM_CP_CURRENT_INST_HI_3 0x5083DC + +#define mmDMA0_QM_CP_CURRENT_INST_HI_4 0x5083E0 + +#define mmDMA0_QM_CP_BARRIER_CFG_0 0x5083F4 + +#define mmDMA0_QM_CP_BARRIER_CFG_1 0x5083F8 + +#define mmDMA0_QM_CP_BARRIER_CFG_2 0x5083FC + +#define mmDMA0_QM_CP_BARRIER_CFG_3 0x508400 + +#define mmDMA0_QM_CP_BARRIER_CFG_4 0x508404 + +#define mmDMA0_QM_CP_DBG_0_0 0x508408 + +#define mmDMA0_QM_CP_DBG_0_1 0x50840C + +#define mmDMA0_QM_CP_DBG_0_2 0x508410 + +#define mmDMA0_QM_CP_DBG_0_3 0x508414 + +#define mmDMA0_QM_CP_DBG_0_4 0x508418 + +#define mmDMA0_QM_CP_ARUSER_31_11_0 0x50841C + +#define mmDMA0_QM_CP_ARUSER_31_11_1 0x508420 + +#define mmDMA0_QM_CP_ARUSER_31_11_2 0x508424 + +#define mmDMA0_QM_CP_ARUSER_31_11_3 0x508428 + +#define mmDMA0_QM_CP_ARUSER_31_11_4 0x50842C + +#define mmDMA0_QM_CP_AWUSER_31_11_0 0x508430 + +#define mmDMA0_QM_CP_AWUSER_31_11_1 0x508434 + +#define mmDMA0_QM_CP_AWUSER_31_11_2 0x508438 + +#define mmDMA0_QM_CP_AWUSER_31_11_3 0x50843C + +#define mmDMA0_QM_CP_AWUSER_31_11_4 0x508440 + +#define mmDMA0_QM_ARB_CFG_0 0x508A00 + +#define mmDMA0_QM_ARB_CHOISE_Q_PUSH 0x508A04 + +#define mmDMA0_QM_ARB_WRR_WEIGHT_0 0x508A08 + +#define mmDMA0_QM_ARB_WRR_WEIGHT_1 0x508A0C + +#define mmDMA0_QM_ARB_WRR_WEIGHT_2 0x508A10 + +#define mmDMA0_QM_ARB_WRR_WEIGHT_3 0x508A14 + +#define mmDMA0_QM_ARB_CFG_1 0x508A18 + +#define mmDMA0_QM_ARB_MST_AVAIL_CRED_0 0x508A20 + +#define mmDMA0_QM_ARB_MST_AVAIL_CRED_1 0x508A24 + +#define mmDMA0_QM_ARB_MST_AVAIL_CRED_2 0x508A28 + +#define mmDMA0_QM_ARB_MST_AVAIL_CRED_3 0x508A2C + +#define mmDMA0_QM_ARB_MST_AVAIL_CRED_4 0x508A30 + +#define mmDMA0_QM_ARB_MST_AVAIL_CRED_5 0x508A34 + +#define mmDMA0_QM_ARB_MST_AVAIL_CRED_6 0x508A38 + +#define mmDMA0_QM_ARB_MST_AVAIL_CRED_7 0x508A3C + +#define mmDMA0_QM_ARB_MST_AVAIL_CRED_8 0x508A40 + +#define mmDMA0_QM_ARB_MST_AVAIL_CRED_9 0x508A44 + +#define mmDMA0_QM_ARB_MST_AVAIL_CRED_10 0x508A48 + +#define mmDMA0_QM_ARB_MST_AVAIL_CRED_11 0x508A4C + +#define mmDMA0_QM_ARB_MST_AVAIL_CRED_12 0x508A50 + +#define mmDMA0_QM_ARB_MST_AVAIL_CRED_13 0x508A54 + +#define mmDMA0_QM_ARB_MST_AVAIL_CRED_14 0x508A58 + +#define mmDMA0_QM_ARB_MST_AVAIL_CRED_15 0x508A5C + +#define mmDMA0_QM_ARB_MST_AVAIL_CRED_16 0x508A60 + +#define mmDMA0_QM_ARB_MST_AVAIL_CRED_17 0x508A64 + +#define mmDMA0_QM_ARB_MST_AVAIL_CRED_18 0x508A68 + +#define mmDMA0_QM_ARB_MST_AVAIL_CRED_19 0x508A6C + +#define mmDMA0_QM_ARB_MST_AVAIL_CRED_20 0x508A70 + +#define mmDMA0_QM_ARB_MST_AVAIL_CRED_21 0x508A74 + +#define mmDMA0_QM_ARB_MST_AVAIL_CRED_22 0x508A78 + +#define mmDMA0_QM_ARB_MST_AVAIL_CRED_23 0x508A7C + +#define mmDMA0_QM_ARB_MST_AVAIL_CRED_24 0x508A80 + +#define mmDMA0_QM_ARB_MST_AVAIL_CRED_25 0x508A84 + +#define mmDMA0_QM_ARB_MST_AVAIL_CRED_26 0x508A88 + +#define mmDMA0_QM_ARB_MST_AVAIL_CRED_27 0x508A8C + +#define mmDMA0_QM_ARB_MST_AVAIL_CRED_28 0x508A90 + +#define mmDMA0_QM_ARB_MST_AVAIL_CRED_29 0x508A94 + +#define mmDMA0_QM_ARB_MST_AVAIL_CRED_30 0x508A98 + +#define mmDMA0_QM_ARB_MST_AVAIL_CRED_31 0x508A9C + +#define mmDMA0_QM_ARB_MST_CRED_INC 0x508AA0 + +#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_0 0x508AA4 + +#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_1 0x508AA8 + +#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_2 0x508AAC + +#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_3 0x508AB0 + +#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_4 0x508AB4 + +#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_5 0x508AB8 + +#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_6 0x508ABC + +#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_7 0x508AC0 + +#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_8 0x508AC4 + +#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_9 0x508AC8 + +#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_10 0x508ACC + +#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_11 0x508AD0 + +#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_12 0x508AD4 + +#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_13 0x508AD8 + +#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_14 0x508ADC + +#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_15 0x508AE0 + +#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_16 0x508AE4 + +#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_17 0x508AE8 + +#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_18 0x508AEC + +#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_19 0x508AF0 + +#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_20 0x508AF4 + +#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_21 0x508AF8 + +#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_22 0x508AFC + +#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_23 0x508B00 + +#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_24 0x508B04 + +#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_25 0x508B08 + +#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_26 0x508B0C + +#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_27 0x508B10 + +#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_28 0x508B14 + +#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_29 0x508B18 + +#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_30 0x508B1C + +#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_31 0x508B20 + +#define mmDMA0_QM_ARB_SLV_MASTER_INC_CRED_OFST 0x508B28 + +#define mmDMA0_QM_ARB_MST_SLAVE_EN 0x508B2C + +#define mmDMA0_QM_ARB_MST_QUIET_PER 0x508B34 + +#define mmDMA0_QM_ARB_SLV_CHOISE_WDT 0x508B38 + +#define mmDMA0_QM_ARB_SLV_ID 0x508B3C + +#define mmDMA0_QM_ARB_MSG_MAX_INFLIGHT 0x508B44 + +#define mmDMA0_QM_ARB_MSG_AWUSER_31_11 0x508B48 + +#define mmDMA0_QM_ARB_MSG_AWUSER_SEC_PROP 0x508B4C + +#define mmDMA0_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0x508B50 + +#define mmDMA0_QM_ARB_BASE_LO 0x508B54 + +#define mmDMA0_QM_ARB_BASE_HI 0x508B58 + +#define mmDMA0_QM_ARB_STATE_STS 0x508B80 + +#define mmDMA0_QM_ARB_CHOISE_FULLNESS_STS 0x508B84 + +#define mmDMA0_QM_ARB_MSG_STS 0x508B88 + +#define mmDMA0_QM_ARB_SLV_CHOISE_Q_HEAD 0x508B8C + +#define mmDMA0_QM_ARB_ERR_CAUSE 0x508B9C + +#define mmDMA0_QM_ARB_ERR_MSG_EN 0x508BA0 + +#define mmDMA0_QM_ARB_ERR_STS_DRP 0x508BA8 + +#define mmDMA0_QM_ARB_MST_CRED_STS_0 0x508BB0 + +#define mmDMA0_QM_ARB_MST_CRED_STS_1 0x508BB4 + +#define mmDMA0_QM_ARB_MST_CRED_STS_2 0x508BB8 + +#define mmDMA0_QM_ARB_MST_CRED_STS_3 0x508BBC + +#define mmDMA0_QM_ARB_MST_CRED_STS_4 0x508BC0 + +#define mmDMA0_QM_ARB_MST_CRED_STS_5 0x508BC4 + +#define mmDMA0_QM_ARB_MST_CRED_STS_6 0x508BC8 + +#define mmDMA0_QM_ARB_MST_CRED_STS_7 0x508BCC + +#define mmDMA0_QM_ARB_MST_CRED_STS_8 0x508BD0 + +#define mmDMA0_QM_ARB_MST_CRED_STS_9 0x508BD4 + +#define mmDMA0_QM_ARB_MST_CRED_STS_10 0x508BD8 + +#define mmDMA0_QM_ARB_MST_CRED_STS_11 0x508BDC + +#define mmDMA0_QM_ARB_MST_CRED_STS_12 0x508BE0 + +#define mmDMA0_QM_ARB_MST_CRED_STS_13 0x508BE4 + +#define mmDMA0_QM_ARB_MST_CRED_STS_14 0x508BE8 + +#define mmDMA0_QM_ARB_MST_CRED_STS_15 0x508BEC + +#define mmDMA0_QM_ARB_MST_CRED_STS_16 0x508BF0 + +#define mmDMA0_QM_ARB_MST_CRED_STS_17 0x508BF4 + +#define mmDMA0_QM_ARB_MST_CRED_STS_18 0x508BF8 + +#define mmDMA0_QM_ARB_MST_CRED_STS_19 0x508BFC + +#define mmDMA0_QM_ARB_MST_CRED_STS_20 0x508C00 + +#define mmDMA0_QM_ARB_MST_CRED_STS_21 0x508C04 + +#define mmDMA0_QM_ARB_MST_CRED_STS_22 0x508C08 + +#define mmDMA0_QM_ARB_MST_CRED_STS_23 0x508C0C + +#define mmDMA0_QM_ARB_MST_CRED_STS_24 0x508C10 + +#define mmDMA0_QM_ARB_MST_CRED_STS_25 0x508C14 + +#define mmDMA0_QM_ARB_MST_CRED_STS_26 0x508C18 + +#define mmDMA0_QM_ARB_MST_CRED_STS_27 0x508C1C + +#define mmDMA0_QM_ARB_MST_CRED_STS_28 0x508C20 + +#define mmDMA0_QM_ARB_MST_CRED_STS_29 0x508C24 + +#define mmDMA0_QM_ARB_MST_CRED_STS_30 0x508C28 + +#define mmDMA0_QM_ARB_MST_CRED_STS_31 0x508C2C + +#define mmDMA0_QM_CGM_CFG 0x508C70 + +#define mmDMA0_QM_CGM_STS 0x508C74 + +#define mmDMA0_QM_CGM_CFG1 0x508C78 + +#define mmDMA0_QM_LOCAL_RANGE_BASE 0x508C80 + +#define mmDMA0_QM_LOCAL_RANGE_SIZE 0x508C84 + +#define mmDMA0_QM_CSMR_STRICT_PRIO_CFG 0x508C90 + +#define mmDMA0_QM_HBW_RD_RATE_LIM_CFG_1 0x508C94 + +#define mmDMA0_QM_LBW_WR_RATE_LIM_CFG_0 0x508C98 + +#define mmDMA0_QM_LBW_WR_RATE_LIM_CFG_1 0x508C9C + +#define mmDMA0_QM_HBW_RD_RATE_LIM_CFG_0 0x508CA0 + +#define mmDMA0_QM_GLBL_AXCACHE 0x508CA4 + +#define mmDMA0_QM_IND_GW_APB_CFG 0x508CB0 + +#define mmDMA0_QM_IND_GW_APB_WDATA 0x508CB4 + +#define mmDMA0_QM_IND_GW_APB_RDATA 0x508CB8 + +#define mmDMA0_QM_IND_GW_APB_STATUS 0x508CBC + +#define mmDMA0_QM_GLBL_ERR_ADDR_LO 0x508CD0 + +#define mmDMA0_QM_GLBL_ERR_ADDR_HI 0x508CD4 + +#define mmDMA0_QM_GLBL_ERR_WDATA 0x508CD8 + +#define mmDMA0_QM_GLBL_MEM_INIT_BUSY 0x508D00 + +#endif /* ASIC_REG_DMA0_QM_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma1_core_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma1_core_regs.h new file mode 100644 index 000000000..4d8d8f26c --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma1_core_regs.h @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA1_CORE_REGS_H_ +#define ASIC_REG_DMA1_CORE_REGS_H_ + +/* + ***************************************** + * DMA1_CORE (Prototype: DMA_CORE) + ***************************************** + */ + +#define mmDMA1_CORE_CFG_0 0x520000 + +#define mmDMA1_CORE_CFG_1 0x520004 + +#define mmDMA1_CORE_LBW_MAX_OUTSTAND 0x520008 + +#define mmDMA1_CORE_SRC_BASE_LO 0x520014 + +#define mmDMA1_CORE_SRC_BASE_HI 0x520018 + +#define mmDMA1_CORE_DST_BASE_LO 0x52001C + +#define mmDMA1_CORE_DST_BASE_HI 0x520020 + +#define mmDMA1_CORE_SRC_TSIZE_1 0x52002C + +#define mmDMA1_CORE_SRC_STRIDE_1 0x520030 + +#define mmDMA1_CORE_SRC_TSIZE_2 0x520034 + +#define mmDMA1_CORE_SRC_STRIDE_2 0x520038 + +#define mmDMA1_CORE_SRC_TSIZE_3 0x52003C + +#define mmDMA1_CORE_SRC_STRIDE_3 0x520040 + +#define mmDMA1_CORE_SRC_TSIZE_4 0x520044 + +#define mmDMA1_CORE_SRC_STRIDE_4 0x520048 + +#define mmDMA1_CORE_SRC_TSIZE_0 0x52004C + +#define mmDMA1_CORE_DST_TSIZE_1 0x520054 + +#define mmDMA1_CORE_DST_STRIDE_1 0x520058 + +#define mmDMA1_CORE_DST_TSIZE_2 0x52005C + +#define mmDMA1_CORE_DST_STRIDE_2 0x520060 + +#define mmDMA1_CORE_DST_TSIZE_3 0x520064 + +#define mmDMA1_CORE_DST_STRIDE_3 0x520068 + +#define mmDMA1_CORE_DST_TSIZE_4 0x52006C + +#define mmDMA1_CORE_DST_STRIDE_4 0x520070 + +#define mmDMA1_CORE_DST_TSIZE_0 0x520074 + +#define mmDMA1_CORE_COMMIT 0x520078 + +#define mmDMA1_CORE_WR_COMP_WDATA 0x52007C + +#define mmDMA1_CORE_WR_COMP_ADDR_LO 0x520080 + +#define mmDMA1_CORE_WR_COMP_ADDR_HI 0x520084 + +#define mmDMA1_CORE_WR_COMP_AWUSER_31_11 0x520088 + +#define mmDMA1_CORE_TE_NUMROWS 0x520094 + +#define mmDMA1_CORE_PROT 0x5200B8 + +#define mmDMA1_CORE_SECURE_PROPS 0x5200F0 + +#define mmDMA1_CORE_NON_SECURE_PROPS 0x5200F4 + +#define mmDMA1_CORE_RD_MAX_OUTSTAND 0x520100 + +#define mmDMA1_CORE_RD_MAX_SIZE 0x520104 + +#define mmDMA1_CORE_RD_ARCACHE 0x520108 + +#define mmDMA1_CORE_RD_ARUSER_31_11 0x520110 + +#define mmDMA1_CORE_RD_INFLIGHTS 0x520114 + +#define mmDMA1_CORE_WR_MAX_OUTSTAND 0x520120 + +#define mmDMA1_CORE_WR_MAX_AWID 0x520124 + +#define mmDMA1_CORE_WR_AWCACHE 0x520128 + +#define mmDMA1_CORE_WR_AWUSER_31_11 0x520130 + +#define mmDMA1_CORE_WR_INFLIGHTS 0x520134 + +#define mmDMA1_CORE_RD_RATE_LIM_CFG_0 0x520150 + +#define mmDMA1_CORE_RD_RATE_LIM_CFG_1 0x520154 + +#define mmDMA1_CORE_WR_RATE_LIM_CFG_0 0x520158 + +#define mmDMA1_CORE_WR_RATE_LIM_CFG_1 0x52015C + +#define mmDMA1_CORE_ERR_CFG 0x520160 + +#define mmDMA1_CORE_ERR_CAUSE 0x520164 + +#define mmDMA1_CORE_ERRMSG_ADDR_LO 0x520170 + +#define mmDMA1_CORE_ERRMSG_ADDR_HI 0x520174 + +#define mmDMA1_CORE_ERRMSG_WDATA 0x520178 + +#define mmDMA1_CORE_STS0 0x520190 + +#define mmDMA1_CORE_STS1 0x520194 + +#define mmDMA1_CORE_RD_DBGMEM_ADD 0x520200 + +#define mmDMA1_CORE_RD_DBGMEM_DATA_WR 0x520204 + +#define mmDMA1_CORE_RD_DBGMEM_DATA_RD 0x520208 + +#define mmDMA1_CORE_RD_DBGMEM_CTRL 0x52020C + +#define mmDMA1_CORE_RD_DBGMEM_RC 0x520210 + +#define mmDMA1_CORE_DBG_HBW_AXI_AR_CNT 0x520220 + +#define mmDMA1_CORE_DBG_HBW_AXI_AW_CNT 0x520224 + +#define mmDMA1_CORE_DBG_LBW_AXI_AW_CNT 0x520228 + +#define mmDMA1_CORE_DBG_DESC_CNT 0x52022C + +#define mmDMA1_CORE_DBG_STS 0x520230 + +#define mmDMA1_CORE_DBG_RD_DESC_ID 0x520234 + +#define mmDMA1_CORE_DBG_WR_DESC_ID 0x520238 + +#endif /* ASIC_REG_DMA1_CORE_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma1_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma1_qm_regs.h new file mode 100644 index 000000000..c3ef30084 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma1_qm_regs.h @@ -0,0 +1,834 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA1_QM_REGS_H_ +#define ASIC_REG_DMA1_QM_REGS_H_ + +/* + ***************************************** + * DMA1_QM (Prototype: QMAN) + ***************************************** + */ + +#define mmDMA1_QM_GLBL_CFG0 0x528000 + +#define mmDMA1_QM_GLBL_CFG1 0x528004 + +#define mmDMA1_QM_GLBL_PROT 0x528008 + +#define mmDMA1_QM_GLBL_ERR_CFG 0x52800C + +#define mmDMA1_QM_GLBL_SECURE_PROPS_0 0x528010 + +#define mmDMA1_QM_GLBL_SECURE_PROPS_1 0x528014 + +#define mmDMA1_QM_GLBL_SECURE_PROPS_2 0x528018 + +#define mmDMA1_QM_GLBL_SECURE_PROPS_3 0x52801C + +#define mmDMA1_QM_GLBL_SECURE_PROPS_4 0x528020 + +#define mmDMA1_QM_GLBL_NON_SECURE_PROPS_0 0x528024 + +#define mmDMA1_QM_GLBL_NON_SECURE_PROPS_1 0x528028 + +#define mmDMA1_QM_GLBL_NON_SECURE_PROPS_2 0x52802C + +#define mmDMA1_QM_GLBL_NON_SECURE_PROPS_3 0x528030 + +#define mmDMA1_QM_GLBL_NON_SECURE_PROPS_4 0x528034 + +#define mmDMA1_QM_GLBL_STS0 0x528038 + +#define mmDMA1_QM_GLBL_STS1_0 0x528040 + +#define mmDMA1_QM_GLBL_STS1_1 0x528044 + +#define mmDMA1_QM_GLBL_STS1_2 0x528048 + +#define mmDMA1_QM_GLBL_STS1_3 0x52804C + +#define mmDMA1_QM_GLBL_STS1_4 0x528050 + +#define mmDMA1_QM_GLBL_MSG_EN_0 0x528054 + +#define mmDMA1_QM_GLBL_MSG_EN_1 0x528058 + +#define mmDMA1_QM_GLBL_MSG_EN_2 0x52805C + +#define mmDMA1_QM_GLBL_MSG_EN_3 0x528060 + +#define mmDMA1_QM_GLBL_MSG_EN_4 0x528068 + +#define mmDMA1_QM_PQ_BASE_LO_0 0x528070 + +#define mmDMA1_QM_PQ_BASE_LO_1 0x528074 + +#define mmDMA1_QM_PQ_BASE_LO_2 0x528078 + +#define mmDMA1_QM_PQ_BASE_LO_3 0x52807C + +#define mmDMA1_QM_PQ_BASE_HI_0 0x528080 + +#define mmDMA1_QM_PQ_BASE_HI_1 0x528084 + +#define mmDMA1_QM_PQ_BASE_HI_2 0x528088 + +#define mmDMA1_QM_PQ_BASE_HI_3 0x52808C + +#define mmDMA1_QM_PQ_SIZE_0 0x528090 + +#define mmDMA1_QM_PQ_SIZE_1 0x528094 + +#define mmDMA1_QM_PQ_SIZE_2 0x528098 + +#define mmDMA1_QM_PQ_SIZE_3 0x52809C + +#define mmDMA1_QM_PQ_PI_0 0x5280A0 + +#define mmDMA1_QM_PQ_PI_1 0x5280A4 + +#define mmDMA1_QM_PQ_PI_2 0x5280A8 + +#define mmDMA1_QM_PQ_PI_3 0x5280AC + +#define mmDMA1_QM_PQ_CI_0 0x5280B0 + +#define mmDMA1_QM_PQ_CI_1 0x5280B4 + +#define mmDMA1_QM_PQ_CI_2 0x5280B8 + +#define mmDMA1_QM_PQ_CI_3 0x5280BC + +#define mmDMA1_QM_PQ_CFG0_0 0x5280C0 + +#define mmDMA1_QM_PQ_CFG0_1 0x5280C4 + +#define mmDMA1_QM_PQ_CFG0_2 0x5280C8 + +#define mmDMA1_QM_PQ_CFG0_3 0x5280CC + +#define mmDMA1_QM_PQ_CFG1_0 0x5280D0 + +#define mmDMA1_QM_PQ_CFG1_1 0x5280D4 + +#define mmDMA1_QM_PQ_CFG1_2 0x5280D8 + +#define mmDMA1_QM_PQ_CFG1_3 0x5280DC + +#define mmDMA1_QM_PQ_ARUSER_31_11_0 0x5280E0 + +#define mmDMA1_QM_PQ_ARUSER_31_11_1 0x5280E4 + +#define mmDMA1_QM_PQ_ARUSER_31_11_2 0x5280E8 + +#define mmDMA1_QM_PQ_ARUSER_31_11_3 0x5280EC + +#define mmDMA1_QM_PQ_STS0_0 0x5280F0 + +#define mmDMA1_QM_PQ_STS0_1 0x5280F4 + +#define mmDMA1_QM_PQ_STS0_2 0x5280F8 + +#define mmDMA1_QM_PQ_STS0_3 0x5280FC + +#define mmDMA1_QM_PQ_STS1_0 0x528100 + +#define mmDMA1_QM_PQ_STS1_1 0x528104 + +#define mmDMA1_QM_PQ_STS1_2 0x528108 + +#define mmDMA1_QM_PQ_STS1_3 0x52810C + +#define mmDMA1_QM_CQ_CFG0_0 0x528110 + +#define mmDMA1_QM_CQ_CFG0_1 0x528114 + +#define mmDMA1_QM_CQ_CFG0_2 0x528118 + +#define mmDMA1_QM_CQ_CFG0_3 0x52811C + +#define mmDMA1_QM_CQ_CFG0_4 0x528120 + +#define mmDMA1_QM_CQ_CFG1_0 0x528124 + +#define mmDMA1_QM_CQ_CFG1_1 0x528128 + +#define mmDMA1_QM_CQ_CFG1_2 0x52812C + +#define mmDMA1_QM_CQ_CFG1_3 0x528130 + +#define mmDMA1_QM_CQ_CFG1_4 0x528134 + +#define mmDMA1_QM_CQ_ARUSER_31_11_0 0x528138 + +#define mmDMA1_QM_CQ_ARUSER_31_11_1 0x52813C + +#define mmDMA1_QM_CQ_ARUSER_31_11_2 0x528140 + +#define mmDMA1_QM_CQ_ARUSER_31_11_3 0x528144 + +#define mmDMA1_QM_CQ_ARUSER_31_11_4 0x528148 + +#define mmDMA1_QM_CQ_STS0_0 0x52814C + +#define mmDMA1_QM_CQ_STS0_1 0x528150 + +#define mmDMA1_QM_CQ_STS0_2 0x528154 + +#define mmDMA1_QM_CQ_STS0_3 0x528158 + +#define mmDMA1_QM_CQ_STS0_4 0x52815C + +#define mmDMA1_QM_CQ_STS1_0 0x528160 + +#define mmDMA1_QM_CQ_STS1_1 0x528164 + +#define mmDMA1_QM_CQ_STS1_2 0x528168 + +#define mmDMA1_QM_CQ_STS1_3 0x52816C + +#define mmDMA1_QM_CQ_STS1_4 0x528170 + +#define mmDMA1_QM_CQ_PTR_LO_0 0x528174 + +#define mmDMA1_QM_CQ_PTR_HI_0 0x528178 + +#define mmDMA1_QM_CQ_TSIZE_0 0x52817C + +#define mmDMA1_QM_CQ_CTL_0 0x528180 + +#define mmDMA1_QM_CQ_PTR_LO_1 0x528184 + +#define mmDMA1_QM_CQ_PTR_HI_1 0x528188 + +#define mmDMA1_QM_CQ_TSIZE_1 0x52818C + +#define mmDMA1_QM_CQ_CTL_1 0x528190 + +#define mmDMA1_QM_CQ_PTR_LO_2 0x528194 + +#define mmDMA1_QM_CQ_PTR_HI_2 0x528198 + +#define mmDMA1_QM_CQ_TSIZE_2 0x52819C + +#define mmDMA1_QM_CQ_CTL_2 0x5281A0 + +#define mmDMA1_QM_CQ_PTR_LO_3 0x5281A4 + +#define mmDMA1_QM_CQ_PTR_HI_3 0x5281A8 + +#define mmDMA1_QM_CQ_TSIZE_3 0x5281AC + +#define mmDMA1_QM_CQ_CTL_3 0x5281B0 + +#define mmDMA1_QM_CQ_PTR_LO_4 0x5281B4 + +#define mmDMA1_QM_CQ_PTR_HI_4 0x5281B8 + +#define mmDMA1_QM_CQ_TSIZE_4 0x5281BC + +#define mmDMA1_QM_CQ_CTL_4 0x5281C0 + +#define mmDMA1_QM_CQ_PTR_LO_STS_0 0x5281C4 + +#define mmDMA1_QM_CQ_PTR_LO_STS_1 0x5281C8 + +#define mmDMA1_QM_CQ_PTR_LO_STS_2 0x5281CC + +#define mmDMA1_QM_CQ_PTR_LO_STS_3 0x5281D0 + +#define mmDMA1_QM_CQ_PTR_LO_STS_4 0x5281D4 + +#define mmDMA1_QM_CQ_PTR_HI_STS_0 0x5281D8 + +#define mmDMA1_QM_CQ_PTR_HI_STS_1 0x5281DC + +#define mmDMA1_QM_CQ_PTR_HI_STS_2 0x5281E0 + +#define mmDMA1_QM_CQ_PTR_HI_STS_3 0x5281E4 + +#define mmDMA1_QM_CQ_PTR_HI_STS_4 0x5281E8 + +#define mmDMA1_QM_CQ_TSIZE_STS_0 0x5281EC + +#define mmDMA1_QM_CQ_TSIZE_STS_1 0x5281F0 + +#define mmDMA1_QM_CQ_TSIZE_STS_2 0x5281F4 + +#define mmDMA1_QM_CQ_TSIZE_STS_3 0x5281F8 + +#define mmDMA1_QM_CQ_TSIZE_STS_4 0x5281FC + +#define mmDMA1_QM_CQ_CTL_STS_0 0x528200 + +#define mmDMA1_QM_CQ_CTL_STS_1 0x528204 + +#define mmDMA1_QM_CQ_CTL_STS_2 0x528208 + +#define mmDMA1_QM_CQ_CTL_STS_3 0x52820C + +#define mmDMA1_QM_CQ_CTL_STS_4 0x528210 + +#define mmDMA1_QM_CQ_IFIFO_CNT_0 0x528214 + +#define mmDMA1_QM_CQ_IFIFO_CNT_1 0x528218 + +#define mmDMA1_QM_CQ_IFIFO_CNT_2 0x52821C + +#define mmDMA1_QM_CQ_IFIFO_CNT_3 0x528220 + +#define mmDMA1_QM_CQ_IFIFO_CNT_4 0x528224 + +#define mmDMA1_QM_CP_MSG_BASE0_ADDR_LO_0 0x528228 + +#define mmDMA1_QM_CP_MSG_BASE0_ADDR_LO_1 0x52822C + +#define mmDMA1_QM_CP_MSG_BASE0_ADDR_LO_2 0x528230 + +#define mmDMA1_QM_CP_MSG_BASE0_ADDR_LO_3 0x528234 + +#define mmDMA1_QM_CP_MSG_BASE0_ADDR_LO_4 0x528238 + +#define mmDMA1_QM_CP_MSG_BASE0_ADDR_HI_0 0x52823C + +#define mmDMA1_QM_CP_MSG_BASE0_ADDR_HI_1 0x528240 + +#define mmDMA1_QM_CP_MSG_BASE0_ADDR_HI_2 0x528244 + +#define mmDMA1_QM_CP_MSG_BASE0_ADDR_HI_3 0x528248 + +#define mmDMA1_QM_CP_MSG_BASE0_ADDR_HI_4 0x52824C + +#define mmDMA1_QM_CP_MSG_BASE1_ADDR_LO_0 0x528250 + +#define mmDMA1_QM_CP_MSG_BASE1_ADDR_LO_1 0x528254 + +#define mmDMA1_QM_CP_MSG_BASE1_ADDR_LO_2 0x528258 + +#define mmDMA1_QM_CP_MSG_BASE1_ADDR_LO_3 0x52825C + +#define mmDMA1_QM_CP_MSG_BASE1_ADDR_LO_4 0x528260 + +#define mmDMA1_QM_CP_MSG_BASE1_ADDR_HI_0 0x528264 + +#define mmDMA1_QM_CP_MSG_BASE1_ADDR_HI_1 0x528268 + +#define mmDMA1_QM_CP_MSG_BASE1_ADDR_HI_2 0x52826C + +#define mmDMA1_QM_CP_MSG_BASE1_ADDR_HI_3 0x528270 + +#define mmDMA1_QM_CP_MSG_BASE1_ADDR_HI_4 0x528274 + +#define mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_0 0x528278 + +#define mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_1 0x52827C + +#define mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_2 0x528280 + +#define mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_3 0x528284 + +#define mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_4 0x528288 + +#define mmDMA1_QM_CP_MSG_BASE2_ADDR_HI_0 0x52828C + +#define mmDMA1_QM_CP_MSG_BASE2_ADDR_HI_1 0x528290 + +#define mmDMA1_QM_CP_MSG_BASE2_ADDR_HI_2 0x528294 + +#define mmDMA1_QM_CP_MSG_BASE2_ADDR_HI_3 0x528298 + +#define mmDMA1_QM_CP_MSG_BASE2_ADDR_HI_4 0x52829C + +#define mmDMA1_QM_CP_MSG_BASE3_ADDR_LO_0 0x5282A0 + +#define mmDMA1_QM_CP_MSG_BASE3_ADDR_LO_1 0x5282A4 + +#define mmDMA1_QM_CP_MSG_BASE3_ADDR_LO_2 0x5282A8 + +#define mmDMA1_QM_CP_MSG_BASE3_ADDR_LO_3 0x5282AC + +#define mmDMA1_QM_CP_MSG_BASE3_ADDR_LO_4 0x5282B0 + +#define mmDMA1_QM_CP_MSG_BASE3_ADDR_HI_0 0x5282B4 + +#define mmDMA1_QM_CP_MSG_BASE3_ADDR_HI_1 0x5282B8 + +#define mmDMA1_QM_CP_MSG_BASE3_ADDR_HI_2 0x5282BC + +#define mmDMA1_QM_CP_MSG_BASE3_ADDR_HI_3 0x5282C0 + +#define mmDMA1_QM_CP_MSG_BASE3_ADDR_HI_4 0x5282C4 + +#define mmDMA1_QM_CP_LDMA_TSIZE_OFFSET_0 0x5282C8 + +#define mmDMA1_QM_CP_LDMA_TSIZE_OFFSET_1 0x5282CC + +#define mmDMA1_QM_CP_LDMA_TSIZE_OFFSET_2 0x5282D0 + +#define mmDMA1_QM_CP_LDMA_TSIZE_OFFSET_3 0x5282D4 + +#define mmDMA1_QM_CP_LDMA_TSIZE_OFFSET_4 0x5282D8 + +#define mmDMA1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0x5282E0 + +#define mmDMA1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0x5282E4 + +#define mmDMA1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0x5282E8 + +#define mmDMA1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0x5282EC + +#define mmDMA1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0x5282F0 + +#define mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0x5282F4 + +#define mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0x5282F8 + +#define mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0x5282FC + +#define mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0x528300 + +#define mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0x528304 + +#define mmDMA1_QM_CP_FENCE0_RDATA_0 0x528308 + +#define mmDMA1_QM_CP_FENCE0_RDATA_1 0x52830C + +#define mmDMA1_QM_CP_FENCE0_RDATA_2 0x528310 + +#define mmDMA1_QM_CP_FENCE0_RDATA_3 0x528314 + +#define mmDMA1_QM_CP_FENCE0_RDATA_4 0x528318 + +#define mmDMA1_QM_CP_FENCE1_RDATA_0 0x52831C + +#define mmDMA1_QM_CP_FENCE1_RDATA_1 0x528320 + +#define mmDMA1_QM_CP_FENCE1_RDATA_2 0x528324 + +#define mmDMA1_QM_CP_FENCE1_RDATA_3 0x528328 + +#define mmDMA1_QM_CP_FENCE1_RDATA_4 0x52832C + +#define mmDMA1_QM_CP_FENCE2_RDATA_0 0x528330 + +#define mmDMA1_QM_CP_FENCE2_RDATA_1 0x528334 + +#define mmDMA1_QM_CP_FENCE2_RDATA_2 0x528338 + +#define mmDMA1_QM_CP_FENCE2_RDATA_3 0x52833C + +#define mmDMA1_QM_CP_FENCE2_RDATA_4 0x528340 + +#define mmDMA1_QM_CP_FENCE3_RDATA_0 0x528344 + +#define mmDMA1_QM_CP_FENCE3_RDATA_1 0x528348 + +#define mmDMA1_QM_CP_FENCE3_RDATA_2 0x52834C + +#define mmDMA1_QM_CP_FENCE3_RDATA_3 0x528350 + +#define mmDMA1_QM_CP_FENCE3_RDATA_4 0x528354 + +#define mmDMA1_QM_CP_FENCE0_CNT_0 0x528358 + +#define mmDMA1_QM_CP_FENCE0_CNT_1 0x52835C + +#define mmDMA1_QM_CP_FENCE0_CNT_2 0x528360 + +#define mmDMA1_QM_CP_FENCE0_CNT_3 0x528364 + +#define mmDMA1_QM_CP_FENCE0_CNT_4 0x528368 + +#define mmDMA1_QM_CP_FENCE1_CNT_0 0x52836C + +#define mmDMA1_QM_CP_FENCE1_CNT_1 0x528370 + +#define mmDMA1_QM_CP_FENCE1_CNT_2 0x528374 + +#define mmDMA1_QM_CP_FENCE1_CNT_3 0x528378 + +#define mmDMA1_QM_CP_FENCE1_CNT_4 0x52837C + +#define mmDMA1_QM_CP_FENCE2_CNT_0 0x528380 + +#define mmDMA1_QM_CP_FENCE2_CNT_1 0x528384 + +#define mmDMA1_QM_CP_FENCE2_CNT_2 0x528388 + +#define mmDMA1_QM_CP_FENCE2_CNT_3 0x52838C + +#define mmDMA1_QM_CP_FENCE2_CNT_4 0x528390 + +#define mmDMA1_QM_CP_FENCE3_CNT_0 0x528394 + +#define mmDMA1_QM_CP_FENCE3_CNT_1 0x528398 + +#define mmDMA1_QM_CP_FENCE3_CNT_2 0x52839C + +#define mmDMA1_QM_CP_FENCE3_CNT_3 0x5283A0 + +#define mmDMA1_QM_CP_FENCE3_CNT_4 0x5283A4 + +#define mmDMA1_QM_CP_STS_0 0x5283A8 + +#define mmDMA1_QM_CP_STS_1 0x5283AC + +#define mmDMA1_QM_CP_STS_2 0x5283B0 + +#define mmDMA1_QM_CP_STS_3 0x5283B4 + +#define mmDMA1_QM_CP_STS_4 0x5283B8 + +#define mmDMA1_QM_CP_CURRENT_INST_LO_0 0x5283BC + +#define mmDMA1_QM_CP_CURRENT_INST_LO_1 0x5283C0 + +#define mmDMA1_QM_CP_CURRENT_INST_LO_2 0x5283C4 + +#define mmDMA1_QM_CP_CURRENT_INST_LO_3 0x5283C8 + +#define mmDMA1_QM_CP_CURRENT_INST_LO_4 0x5283CC + +#define mmDMA1_QM_CP_CURRENT_INST_HI_0 0x5283D0 + +#define mmDMA1_QM_CP_CURRENT_INST_HI_1 0x5283D4 + +#define mmDMA1_QM_CP_CURRENT_INST_HI_2 0x5283D8 + +#define mmDMA1_QM_CP_CURRENT_INST_HI_3 0x5283DC + +#define mmDMA1_QM_CP_CURRENT_INST_HI_4 0x5283E0 + +#define mmDMA1_QM_CP_BARRIER_CFG_0 0x5283F4 + +#define mmDMA1_QM_CP_BARRIER_CFG_1 0x5283F8 + +#define mmDMA1_QM_CP_BARRIER_CFG_2 0x5283FC + +#define mmDMA1_QM_CP_BARRIER_CFG_3 0x528400 + +#define mmDMA1_QM_CP_BARRIER_CFG_4 0x528404 + +#define mmDMA1_QM_CP_DBG_0_0 0x528408 + +#define mmDMA1_QM_CP_DBG_0_1 0x52840C + +#define mmDMA1_QM_CP_DBG_0_2 0x528410 + +#define mmDMA1_QM_CP_DBG_0_3 0x528414 + +#define mmDMA1_QM_CP_DBG_0_4 0x528418 + +#define mmDMA1_QM_CP_ARUSER_31_11_0 0x52841C + +#define mmDMA1_QM_CP_ARUSER_31_11_1 0x528420 + +#define mmDMA1_QM_CP_ARUSER_31_11_2 0x528424 + +#define mmDMA1_QM_CP_ARUSER_31_11_3 0x528428 + +#define mmDMA1_QM_CP_ARUSER_31_11_4 0x52842C + +#define mmDMA1_QM_CP_AWUSER_31_11_0 0x528430 + +#define mmDMA1_QM_CP_AWUSER_31_11_1 0x528434 + +#define mmDMA1_QM_CP_AWUSER_31_11_2 0x528438 + +#define mmDMA1_QM_CP_AWUSER_31_11_3 0x52843C + +#define mmDMA1_QM_CP_AWUSER_31_11_4 0x528440 + +#define mmDMA1_QM_ARB_CFG_0 0x528A00 + +#define mmDMA1_QM_ARB_CHOISE_Q_PUSH 0x528A04 + +#define mmDMA1_QM_ARB_WRR_WEIGHT_0 0x528A08 + +#define mmDMA1_QM_ARB_WRR_WEIGHT_1 0x528A0C + +#define mmDMA1_QM_ARB_WRR_WEIGHT_2 0x528A10 + +#define mmDMA1_QM_ARB_WRR_WEIGHT_3 0x528A14 + +#define mmDMA1_QM_ARB_CFG_1 0x528A18 + +#define mmDMA1_QM_ARB_MST_AVAIL_CRED_0 0x528A20 + +#define mmDMA1_QM_ARB_MST_AVAIL_CRED_1 0x528A24 + +#define mmDMA1_QM_ARB_MST_AVAIL_CRED_2 0x528A28 + +#define mmDMA1_QM_ARB_MST_AVAIL_CRED_3 0x528A2C + +#define mmDMA1_QM_ARB_MST_AVAIL_CRED_4 0x528A30 + +#define mmDMA1_QM_ARB_MST_AVAIL_CRED_5 0x528A34 + +#define mmDMA1_QM_ARB_MST_AVAIL_CRED_6 0x528A38 + +#define mmDMA1_QM_ARB_MST_AVAIL_CRED_7 0x528A3C + +#define mmDMA1_QM_ARB_MST_AVAIL_CRED_8 0x528A40 + +#define mmDMA1_QM_ARB_MST_AVAIL_CRED_9 0x528A44 + +#define mmDMA1_QM_ARB_MST_AVAIL_CRED_10 0x528A48 + +#define mmDMA1_QM_ARB_MST_AVAIL_CRED_11 0x528A4C + +#define mmDMA1_QM_ARB_MST_AVAIL_CRED_12 0x528A50 + +#define mmDMA1_QM_ARB_MST_AVAIL_CRED_13 0x528A54 + +#define mmDMA1_QM_ARB_MST_AVAIL_CRED_14 0x528A58 + +#define mmDMA1_QM_ARB_MST_AVAIL_CRED_15 0x528A5C + +#define mmDMA1_QM_ARB_MST_AVAIL_CRED_16 0x528A60 + +#define mmDMA1_QM_ARB_MST_AVAIL_CRED_17 0x528A64 + +#define mmDMA1_QM_ARB_MST_AVAIL_CRED_18 0x528A68 + +#define mmDMA1_QM_ARB_MST_AVAIL_CRED_19 0x528A6C + +#define mmDMA1_QM_ARB_MST_AVAIL_CRED_20 0x528A70 + +#define mmDMA1_QM_ARB_MST_AVAIL_CRED_21 0x528A74 + +#define mmDMA1_QM_ARB_MST_AVAIL_CRED_22 0x528A78 + +#define mmDMA1_QM_ARB_MST_AVAIL_CRED_23 0x528A7C + +#define mmDMA1_QM_ARB_MST_AVAIL_CRED_24 0x528A80 + +#define mmDMA1_QM_ARB_MST_AVAIL_CRED_25 0x528A84 + +#define mmDMA1_QM_ARB_MST_AVAIL_CRED_26 0x528A88 + +#define mmDMA1_QM_ARB_MST_AVAIL_CRED_27 0x528A8C + +#define mmDMA1_QM_ARB_MST_AVAIL_CRED_28 0x528A90 + +#define mmDMA1_QM_ARB_MST_AVAIL_CRED_29 0x528A94 + +#define mmDMA1_QM_ARB_MST_AVAIL_CRED_30 0x528A98 + +#define mmDMA1_QM_ARB_MST_AVAIL_CRED_31 0x528A9C + +#define mmDMA1_QM_ARB_MST_CRED_INC 0x528AA0 + +#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_0 0x528AA4 + +#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_1 0x528AA8 + +#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_2 0x528AAC + +#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_3 0x528AB0 + +#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_4 0x528AB4 + +#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_5 0x528AB8 + +#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_6 0x528ABC + +#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_7 0x528AC0 + +#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_8 0x528AC4 + +#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_9 0x528AC8 + +#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_10 0x528ACC + +#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_11 0x528AD0 + +#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_12 0x528AD4 + +#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_13 0x528AD8 + +#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_14 0x528ADC + +#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_15 0x528AE0 + +#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_16 0x528AE4 + +#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_17 0x528AE8 + +#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_18 0x528AEC + +#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_19 0x528AF0 + +#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_20 0x528AF4 + +#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_21 0x528AF8 + +#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_22 0x528AFC + +#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_23 0x528B00 + +#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_24 0x528B04 + +#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_25 0x528B08 + +#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_26 0x528B0C + +#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_27 0x528B10 + +#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_28 0x528B14 + +#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_29 0x528B18 + +#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_30 0x528B1C + +#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_31 0x528B20 + +#define mmDMA1_QM_ARB_SLV_MASTER_INC_CRED_OFST 0x528B28 + +#define mmDMA1_QM_ARB_MST_SLAVE_EN 0x528B2C + +#define mmDMA1_QM_ARB_MST_QUIET_PER 0x528B34 + +#define mmDMA1_QM_ARB_SLV_CHOISE_WDT 0x528B38 + +#define mmDMA1_QM_ARB_SLV_ID 0x528B3C + +#define mmDMA1_QM_ARB_MSG_MAX_INFLIGHT 0x528B44 + +#define mmDMA1_QM_ARB_MSG_AWUSER_31_11 0x528B48 + +#define mmDMA1_QM_ARB_MSG_AWUSER_SEC_PROP 0x528B4C + +#define mmDMA1_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0x528B50 + +#define mmDMA1_QM_ARB_BASE_LO 0x528B54 + +#define mmDMA1_QM_ARB_BASE_HI 0x528B58 + +#define mmDMA1_QM_ARB_STATE_STS 0x528B80 + +#define mmDMA1_QM_ARB_CHOISE_FULLNESS_STS 0x528B84 + +#define mmDMA1_QM_ARB_MSG_STS 0x528B88 + +#define mmDMA1_QM_ARB_SLV_CHOISE_Q_HEAD 0x528B8C + +#define mmDMA1_QM_ARB_ERR_CAUSE 0x528B9C + +#define mmDMA1_QM_ARB_ERR_MSG_EN 0x528BA0 + +#define mmDMA1_QM_ARB_ERR_STS_DRP 0x528BA8 + +#define mmDMA1_QM_ARB_MST_CRED_STS_0 0x528BB0 + +#define mmDMA1_QM_ARB_MST_CRED_STS_1 0x528BB4 + +#define mmDMA1_QM_ARB_MST_CRED_STS_2 0x528BB8 + +#define mmDMA1_QM_ARB_MST_CRED_STS_3 0x528BBC + +#define mmDMA1_QM_ARB_MST_CRED_STS_4 0x528BC0 + +#define mmDMA1_QM_ARB_MST_CRED_STS_5 0x528BC4 + +#define mmDMA1_QM_ARB_MST_CRED_STS_6 0x528BC8 + +#define mmDMA1_QM_ARB_MST_CRED_STS_7 0x528BCC + +#define mmDMA1_QM_ARB_MST_CRED_STS_8 0x528BD0 + +#define mmDMA1_QM_ARB_MST_CRED_STS_9 0x528BD4 + +#define mmDMA1_QM_ARB_MST_CRED_STS_10 0x528BD8 + +#define mmDMA1_QM_ARB_MST_CRED_STS_11 0x528BDC + +#define mmDMA1_QM_ARB_MST_CRED_STS_12 0x528BE0 + +#define mmDMA1_QM_ARB_MST_CRED_STS_13 0x528BE4 + +#define mmDMA1_QM_ARB_MST_CRED_STS_14 0x528BE8 + +#define mmDMA1_QM_ARB_MST_CRED_STS_15 0x528BEC + +#define mmDMA1_QM_ARB_MST_CRED_STS_16 0x528BF0 + +#define mmDMA1_QM_ARB_MST_CRED_STS_17 0x528BF4 + +#define mmDMA1_QM_ARB_MST_CRED_STS_18 0x528BF8 + +#define mmDMA1_QM_ARB_MST_CRED_STS_19 0x528BFC + +#define mmDMA1_QM_ARB_MST_CRED_STS_20 0x528C00 + +#define mmDMA1_QM_ARB_MST_CRED_STS_21 0x528C04 + +#define mmDMA1_QM_ARB_MST_CRED_STS_22 0x528C08 + +#define mmDMA1_QM_ARB_MST_CRED_STS_23 0x528C0C + +#define mmDMA1_QM_ARB_MST_CRED_STS_24 0x528C10 + +#define mmDMA1_QM_ARB_MST_CRED_STS_25 0x528C14 + +#define mmDMA1_QM_ARB_MST_CRED_STS_26 0x528C18 + +#define mmDMA1_QM_ARB_MST_CRED_STS_27 0x528C1C + +#define mmDMA1_QM_ARB_MST_CRED_STS_28 0x528C20 + +#define mmDMA1_QM_ARB_MST_CRED_STS_29 0x528C24 + +#define mmDMA1_QM_ARB_MST_CRED_STS_30 0x528C28 + +#define mmDMA1_QM_ARB_MST_CRED_STS_31 0x528C2C + +#define mmDMA1_QM_CGM_CFG 0x528C70 + +#define mmDMA1_QM_CGM_STS 0x528C74 + +#define mmDMA1_QM_CGM_CFG1 0x528C78 + +#define mmDMA1_QM_LOCAL_RANGE_BASE 0x528C80 + +#define mmDMA1_QM_LOCAL_RANGE_SIZE 0x528C84 + +#define mmDMA1_QM_CSMR_STRICT_PRIO_CFG 0x528C90 + +#define mmDMA1_QM_HBW_RD_RATE_LIM_CFG_1 0x528C94 + +#define mmDMA1_QM_LBW_WR_RATE_LIM_CFG_0 0x528C98 + +#define mmDMA1_QM_LBW_WR_RATE_LIM_CFG_1 0x528C9C + +#define mmDMA1_QM_HBW_RD_RATE_LIM_CFG_0 0x528CA0 + +#define mmDMA1_QM_GLBL_AXCACHE 0x528CA4 + +#define mmDMA1_QM_IND_GW_APB_CFG 0x528CB0 + +#define mmDMA1_QM_IND_GW_APB_WDATA 0x528CB4 + +#define mmDMA1_QM_IND_GW_APB_RDATA 0x528CB8 + +#define mmDMA1_QM_IND_GW_APB_STATUS 0x528CBC + +#define mmDMA1_QM_GLBL_ERR_ADDR_LO 0x528CD0 + +#define mmDMA1_QM_GLBL_ERR_ADDR_HI 0x528CD4 + +#define mmDMA1_QM_GLBL_ERR_WDATA 0x528CD8 + +#define mmDMA1_QM_GLBL_MEM_INIT_BUSY 0x528D00 + +#endif /* ASIC_REG_DMA1_QM_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma2_core_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma2_core_regs.h new file mode 100644 index 000000000..a42862cd5 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma2_core_regs.h @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA2_CORE_REGS_H_ +#define ASIC_REG_DMA2_CORE_REGS_H_ + +/* + ***************************************** + * DMA2_CORE (Prototype: DMA_CORE) + ***************************************** + */ + +#define mmDMA2_CORE_CFG_0 0x540000 + +#define mmDMA2_CORE_CFG_1 0x540004 + +#define mmDMA2_CORE_LBW_MAX_OUTSTAND 0x540008 + +#define mmDMA2_CORE_SRC_BASE_LO 0x540014 + +#define mmDMA2_CORE_SRC_BASE_HI 0x540018 + +#define mmDMA2_CORE_DST_BASE_LO 0x54001C + +#define mmDMA2_CORE_DST_BASE_HI 0x540020 + +#define mmDMA2_CORE_SRC_TSIZE_1 0x54002C + +#define mmDMA2_CORE_SRC_STRIDE_1 0x540030 + +#define mmDMA2_CORE_SRC_TSIZE_2 0x540034 + +#define mmDMA2_CORE_SRC_STRIDE_2 0x540038 + +#define mmDMA2_CORE_SRC_TSIZE_3 0x54003C + +#define mmDMA2_CORE_SRC_STRIDE_3 0x540040 + +#define mmDMA2_CORE_SRC_TSIZE_4 0x540044 + +#define mmDMA2_CORE_SRC_STRIDE_4 0x540048 + +#define mmDMA2_CORE_SRC_TSIZE_0 0x54004C + +#define mmDMA2_CORE_DST_TSIZE_1 0x540054 + +#define mmDMA2_CORE_DST_STRIDE_1 0x540058 + +#define mmDMA2_CORE_DST_TSIZE_2 0x54005C + +#define mmDMA2_CORE_DST_STRIDE_2 0x540060 + +#define mmDMA2_CORE_DST_TSIZE_3 0x540064 + +#define mmDMA2_CORE_DST_STRIDE_3 0x540068 + +#define mmDMA2_CORE_DST_TSIZE_4 0x54006C + +#define mmDMA2_CORE_DST_STRIDE_4 0x540070 + +#define mmDMA2_CORE_DST_TSIZE_0 0x540074 + +#define mmDMA2_CORE_COMMIT 0x540078 + +#define mmDMA2_CORE_WR_COMP_WDATA 0x54007C + +#define mmDMA2_CORE_WR_COMP_ADDR_LO 0x540080 + +#define mmDMA2_CORE_WR_COMP_ADDR_HI 0x540084 + +#define mmDMA2_CORE_WR_COMP_AWUSER_31_11 0x540088 + +#define mmDMA2_CORE_TE_NUMROWS 0x540094 + +#define mmDMA2_CORE_PROT 0x5400B8 + +#define mmDMA2_CORE_SECURE_PROPS 0x5400F0 + +#define mmDMA2_CORE_NON_SECURE_PROPS 0x5400F4 + +#define mmDMA2_CORE_RD_MAX_OUTSTAND 0x540100 + +#define mmDMA2_CORE_RD_MAX_SIZE 0x540104 + +#define mmDMA2_CORE_RD_ARCACHE 0x540108 + +#define mmDMA2_CORE_RD_ARUSER_31_11 0x540110 + +#define mmDMA2_CORE_RD_INFLIGHTS 0x540114 + +#define mmDMA2_CORE_WR_MAX_OUTSTAND 0x540120 + +#define mmDMA2_CORE_WR_MAX_AWID 0x540124 + +#define mmDMA2_CORE_WR_AWCACHE 0x540128 + +#define mmDMA2_CORE_WR_AWUSER_31_11 0x540130 + +#define mmDMA2_CORE_WR_INFLIGHTS 0x540134 + +#define mmDMA2_CORE_RD_RATE_LIM_CFG_0 0x540150 + +#define mmDMA2_CORE_RD_RATE_LIM_CFG_1 0x540154 + +#define mmDMA2_CORE_WR_RATE_LIM_CFG_0 0x540158 + +#define mmDMA2_CORE_WR_RATE_LIM_CFG_1 0x54015C + +#define mmDMA2_CORE_ERR_CFG 0x540160 + +#define mmDMA2_CORE_ERR_CAUSE 0x540164 + +#define mmDMA2_CORE_ERRMSG_ADDR_LO 0x540170 + +#define mmDMA2_CORE_ERRMSG_ADDR_HI 0x540174 + +#define mmDMA2_CORE_ERRMSG_WDATA 0x540178 + +#define mmDMA2_CORE_STS0 0x540190 + +#define mmDMA2_CORE_STS1 0x540194 + +#define mmDMA2_CORE_RD_DBGMEM_ADD 0x540200 + +#define mmDMA2_CORE_RD_DBGMEM_DATA_WR 0x540204 + +#define mmDMA2_CORE_RD_DBGMEM_DATA_RD 0x540208 + +#define mmDMA2_CORE_RD_DBGMEM_CTRL 0x54020C + +#define mmDMA2_CORE_RD_DBGMEM_RC 0x540210 + +#define mmDMA2_CORE_DBG_HBW_AXI_AR_CNT 0x540220 + +#define mmDMA2_CORE_DBG_HBW_AXI_AW_CNT 0x540224 + +#define mmDMA2_CORE_DBG_LBW_AXI_AW_CNT 0x540228 + +#define mmDMA2_CORE_DBG_DESC_CNT 0x54022C + +#define mmDMA2_CORE_DBG_STS 0x540230 + +#define mmDMA2_CORE_DBG_RD_DESC_ID 0x540234 + +#define mmDMA2_CORE_DBG_WR_DESC_ID 0x540238 + +#endif /* ASIC_REG_DMA2_CORE_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma2_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma2_qm_regs.h new file mode 100644 index 000000000..8c4d4e016 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma2_qm_regs.h @@ -0,0 +1,834 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA2_QM_REGS_H_ +#define ASIC_REG_DMA2_QM_REGS_H_ + +/* + ***************************************** + * DMA2_QM (Prototype: QMAN) + ***************************************** + */ + +#define mmDMA2_QM_GLBL_CFG0 0x548000 + +#define mmDMA2_QM_GLBL_CFG1 0x548004 + +#define mmDMA2_QM_GLBL_PROT 0x548008 + +#define mmDMA2_QM_GLBL_ERR_CFG 0x54800C + +#define mmDMA2_QM_GLBL_SECURE_PROPS_0 0x548010 + +#define mmDMA2_QM_GLBL_SECURE_PROPS_1 0x548014 + +#define mmDMA2_QM_GLBL_SECURE_PROPS_2 0x548018 + +#define mmDMA2_QM_GLBL_SECURE_PROPS_3 0x54801C + +#define mmDMA2_QM_GLBL_SECURE_PROPS_4 0x548020 + +#define mmDMA2_QM_GLBL_NON_SECURE_PROPS_0 0x548024 + +#define mmDMA2_QM_GLBL_NON_SECURE_PROPS_1 0x548028 + +#define mmDMA2_QM_GLBL_NON_SECURE_PROPS_2 0x54802C + +#define mmDMA2_QM_GLBL_NON_SECURE_PROPS_3 0x548030 + +#define mmDMA2_QM_GLBL_NON_SECURE_PROPS_4 0x548034 + +#define mmDMA2_QM_GLBL_STS0 0x548038 + +#define mmDMA2_QM_GLBL_STS1_0 0x548040 + +#define mmDMA2_QM_GLBL_STS1_1 0x548044 + +#define mmDMA2_QM_GLBL_STS1_2 0x548048 + +#define mmDMA2_QM_GLBL_STS1_3 0x54804C + +#define mmDMA2_QM_GLBL_STS1_4 0x548050 + +#define mmDMA2_QM_GLBL_MSG_EN_0 0x548054 + +#define mmDMA2_QM_GLBL_MSG_EN_1 0x548058 + +#define mmDMA2_QM_GLBL_MSG_EN_2 0x54805C + +#define mmDMA2_QM_GLBL_MSG_EN_3 0x548060 + +#define mmDMA2_QM_GLBL_MSG_EN_4 0x548068 + +#define mmDMA2_QM_PQ_BASE_LO_0 0x548070 + +#define mmDMA2_QM_PQ_BASE_LO_1 0x548074 + +#define mmDMA2_QM_PQ_BASE_LO_2 0x548078 + +#define mmDMA2_QM_PQ_BASE_LO_3 0x54807C + +#define mmDMA2_QM_PQ_BASE_HI_0 0x548080 + +#define mmDMA2_QM_PQ_BASE_HI_1 0x548084 + +#define mmDMA2_QM_PQ_BASE_HI_2 0x548088 + +#define mmDMA2_QM_PQ_BASE_HI_3 0x54808C + +#define mmDMA2_QM_PQ_SIZE_0 0x548090 + +#define mmDMA2_QM_PQ_SIZE_1 0x548094 + +#define mmDMA2_QM_PQ_SIZE_2 0x548098 + +#define mmDMA2_QM_PQ_SIZE_3 0x54809C + +#define mmDMA2_QM_PQ_PI_0 0x5480A0 + +#define mmDMA2_QM_PQ_PI_1 0x5480A4 + +#define mmDMA2_QM_PQ_PI_2 0x5480A8 + +#define mmDMA2_QM_PQ_PI_3 0x5480AC + +#define mmDMA2_QM_PQ_CI_0 0x5480B0 + +#define mmDMA2_QM_PQ_CI_1 0x5480B4 + +#define mmDMA2_QM_PQ_CI_2 0x5480B8 + +#define mmDMA2_QM_PQ_CI_3 0x5480BC + +#define mmDMA2_QM_PQ_CFG0_0 0x5480C0 + +#define mmDMA2_QM_PQ_CFG0_1 0x5480C4 + +#define mmDMA2_QM_PQ_CFG0_2 0x5480C8 + +#define mmDMA2_QM_PQ_CFG0_3 0x5480CC + +#define mmDMA2_QM_PQ_CFG1_0 0x5480D0 + +#define mmDMA2_QM_PQ_CFG1_1 0x5480D4 + +#define mmDMA2_QM_PQ_CFG1_2 0x5480D8 + +#define mmDMA2_QM_PQ_CFG1_3 0x5480DC + +#define mmDMA2_QM_PQ_ARUSER_31_11_0 0x5480E0 + +#define mmDMA2_QM_PQ_ARUSER_31_11_1 0x5480E4 + +#define mmDMA2_QM_PQ_ARUSER_31_11_2 0x5480E8 + +#define mmDMA2_QM_PQ_ARUSER_31_11_3 0x5480EC + +#define mmDMA2_QM_PQ_STS0_0 0x5480F0 + +#define mmDMA2_QM_PQ_STS0_1 0x5480F4 + +#define mmDMA2_QM_PQ_STS0_2 0x5480F8 + +#define mmDMA2_QM_PQ_STS0_3 0x5480FC + +#define mmDMA2_QM_PQ_STS1_0 0x548100 + +#define mmDMA2_QM_PQ_STS1_1 0x548104 + +#define mmDMA2_QM_PQ_STS1_2 0x548108 + +#define mmDMA2_QM_PQ_STS1_3 0x54810C + +#define mmDMA2_QM_CQ_CFG0_0 0x548110 + +#define mmDMA2_QM_CQ_CFG0_1 0x548114 + +#define mmDMA2_QM_CQ_CFG0_2 0x548118 + +#define mmDMA2_QM_CQ_CFG0_3 0x54811C + +#define mmDMA2_QM_CQ_CFG0_4 0x548120 + +#define mmDMA2_QM_CQ_CFG1_0 0x548124 + +#define mmDMA2_QM_CQ_CFG1_1 0x548128 + +#define mmDMA2_QM_CQ_CFG1_2 0x54812C + +#define mmDMA2_QM_CQ_CFG1_3 0x548130 + +#define mmDMA2_QM_CQ_CFG1_4 0x548134 + +#define mmDMA2_QM_CQ_ARUSER_31_11_0 0x548138 + +#define mmDMA2_QM_CQ_ARUSER_31_11_1 0x54813C + +#define mmDMA2_QM_CQ_ARUSER_31_11_2 0x548140 + +#define mmDMA2_QM_CQ_ARUSER_31_11_3 0x548144 + +#define mmDMA2_QM_CQ_ARUSER_31_11_4 0x548148 + +#define mmDMA2_QM_CQ_STS0_0 0x54814C + +#define mmDMA2_QM_CQ_STS0_1 0x548150 + +#define mmDMA2_QM_CQ_STS0_2 0x548154 + +#define mmDMA2_QM_CQ_STS0_3 0x548158 + +#define mmDMA2_QM_CQ_STS0_4 0x54815C + +#define mmDMA2_QM_CQ_STS1_0 0x548160 + +#define mmDMA2_QM_CQ_STS1_1 0x548164 + +#define mmDMA2_QM_CQ_STS1_2 0x548168 + +#define mmDMA2_QM_CQ_STS1_3 0x54816C + +#define mmDMA2_QM_CQ_STS1_4 0x548170 + +#define mmDMA2_QM_CQ_PTR_LO_0 0x548174 + +#define mmDMA2_QM_CQ_PTR_HI_0 0x548178 + +#define mmDMA2_QM_CQ_TSIZE_0 0x54817C + +#define mmDMA2_QM_CQ_CTL_0 0x548180 + +#define mmDMA2_QM_CQ_PTR_LO_1 0x548184 + +#define mmDMA2_QM_CQ_PTR_HI_1 0x548188 + +#define mmDMA2_QM_CQ_TSIZE_1 0x54818C + +#define mmDMA2_QM_CQ_CTL_1 0x548190 + +#define mmDMA2_QM_CQ_PTR_LO_2 0x548194 + +#define mmDMA2_QM_CQ_PTR_HI_2 0x548198 + +#define mmDMA2_QM_CQ_TSIZE_2 0x54819C + +#define mmDMA2_QM_CQ_CTL_2 0x5481A0 + +#define mmDMA2_QM_CQ_PTR_LO_3 0x5481A4 + +#define mmDMA2_QM_CQ_PTR_HI_3 0x5481A8 + +#define mmDMA2_QM_CQ_TSIZE_3 0x5481AC + +#define mmDMA2_QM_CQ_CTL_3 0x5481B0 + +#define mmDMA2_QM_CQ_PTR_LO_4 0x5481B4 + +#define mmDMA2_QM_CQ_PTR_HI_4 0x5481B8 + +#define mmDMA2_QM_CQ_TSIZE_4 0x5481BC + +#define mmDMA2_QM_CQ_CTL_4 0x5481C0 + +#define mmDMA2_QM_CQ_PTR_LO_STS_0 0x5481C4 + +#define mmDMA2_QM_CQ_PTR_LO_STS_1 0x5481C8 + +#define mmDMA2_QM_CQ_PTR_LO_STS_2 0x5481CC + +#define mmDMA2_QM_CQ_PTR_LO_STS_3 0x5481D0 + +#define mmDMA2_QM_CQ_PTR_LO_STS_4 0x5481D4 + +#define mmDMA2_QM_CQ_PTR_HI_STS_0 0x5481D8 + +#define mmDMA2_QM_CQ_PTR_HI_STS_1 0x5481DC + +#define mmDMA2_QM_CQ_PTR_HI_STS_2 0x5481E0 + +#define mmDMA2_QM_CQ_PTR_HI_STS_3 0x5481E4 + +#define mmDMA2_QM_CQ_PTR_HI_STS_4 0x5481E8 + +#define mmDMA2_QM_CQ_TSIZE_STS_0 0x5481EC + +#define mmDMA2_QM_CQ_TSIZE_STS_1 0x5481F0 + +#define mmDMA2_QM_CQ_TSIZE_STS_2 0x5481F4 + +#define mmDMA2_QM_CQ_TSIZE_STS_3 0x5481F8 + +#define mmDMA2_QM_CQ_TSIZE_STS_4 0x5481FC + +#define mmDMA2_QM_CQ_CTL_STS_0 0x548200 + +#define mmDMA2_QM_CQ_CTL_STS_1 0x548204 + +#define mmDMA2_QM_CQ_CTL_STS_2 0x548208 + +#define mmDMA2_QM_CQ_CTL_STS_3 0x54820C + +#define mmDMA2_QM_CQ_CTL_STS_4 0x548210 + +#define mmDMA2_QM_CQ_IFIFO_CNT_0 0x548214 + +#define mmDMA2_QM_CQ_IFIFO_CNT_1 0x548218 + +#define mmDMA2_QM_CQ_IFIFO_CNT_2 0x54821C + +#define mmDMA2_QM_CQ_IFIFO_CNT_3 0x548220 + +#define mmDMA2_QM_CQ_IFIFO_CNT_4 0x548224 + +#define mmDMA2_QM_CP_MSG_BASE0_ADDR_LO_0 0x548228 + +#define mmDMA2_QM_CP_MSG_BASE0_ADDR_LO_1 0x54822C + +#define mmDMA2_QM_CP_MSG_BASE0_ADDR_LO_2 0x548230 + +#define mmDMA2_QM_CP_MSG_BASE0_ADDR_LO_3 0x548234 + +#define mmDMA2_QM_CP_MSG_BASE0_ADDR_LO_4 0x548238 + +#define mmDMA2_QM_CP_MSG_BASE0_ADDR_HI_0 0x54823C + +#define mmDMA2_QM_CP_MSG_BASE0_ADDR_HI_1 0x548240 + +#define mmDMA2_QM_CP_MSG_BASE0_ADDR_HI_2 0x548244 + +#define mmDMA2_QM_CP_MSG_BASE0_ADDR_HI_3 0x548248 + +#define mmDMA2_QM_CP_MSG_BASE0_ADDR_HI_4 0x54824C + +#define mmDMA2_QM_CP_MSG_BASE1_ADDR_LO_0 0x548250 + +#define mmDMA2_QM_CP_MSG_BASE1_ADDR_LO_1 0x548254 + +#define mmDMA2_QM_CP_MSG_BASE1_ADDR_LO_2 0x548258 + +#define mmDMA2_QM_CP_MSG_BASE1_ADDR_LO_3 0x54825C + +#define mmDMA2_QM_CP_MSG_BASE1_ADDR_LO_4 0x548260 + +#define mmDMA2_QM_CP_MSG_BASE1_ADDR_HI_0 0x548264 + +#define mmDMA2_QM_CP_MSG_BASE1_ADDR_HI_1 0x548268 + +#define mmDMA2_QM_CP_MSG_BASE1_ADDR_HI_2 0x54826C + +#define mmDMA2_QM_CP_MSG_BASE1_ADDR_HI_3 0x548270 + +#define mmDMA2_QM_CP_MSG_BASE1_ADDR_HI_4 0x548274 + +#define mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_0 0x548278 + +#define mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_1 0x54827C + +#define mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_2 0x548280 + +#define mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_3 0x548284 + +#define mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_4 0x548288 + +#define mmDMA2_QM_CP_MSG_BASE2_ADDR_HI_0 0x54828C + +#define mmDMA2_QM_CP_MSG_BASE2_ADDR_HI_1 0x548290 + +#define mmDMA2_QM_CP_MSG_BASE2_ADDR_HI_2 0x548294 + +#define mmDMA2_QM_CP_MSG_BASE2_ADDR_HI_3 0x548298 + +#define mmDMA2_QM_CP_MSG_BASE2_ADDR_HI_4 0x54829C + +#define mmDMA2_QM_CP_MSG_BASE3_ADDR_LO_0 0x5482A0 + +#define mmDMA2_QM_CP_MSG_BASE3_ADDR_LO_1 0x5482A4 + +#define mmDMA2_QM_CP_MSG_BASE3_ADDR_LO_2 0x5482A8 + +#define mmDMA2_QM_CP_MSG_BASE3_ADDR_LO_3 0x5482AC + +#define mmDMA2_QM_CP_MSG_BASE3_ADDR_LO_4 0x5482B0 + +#define mmDMA2_QM_CP_MSG_BASE3_ADDR_HI_0 0x5482B4 + +#define mmDMA2_QM_CP_MSG_BASE3_ADDR_HI_1 0x5482B8 + +#define mmDMA2_QM_CP_MSG_BASE3_ADDR_HI_2 0x5482BC + +#define mmDMA2_QM_CP_MSG_BASE3_ADDR_HI_3 0x5482C0 + +#define mmDMA2_QM_CP_MSG_BASE3_ADDR_HI_4 0x5482C4 + +#define mmDMA2_QM_CP_LDMA_TSIZE_OFFSET_0 0x5482C8 + +#define mmDMA2_QM_CP_LDMA_TSIZE_OFFSET_1 0x5482CC + +#define mmDMA2_QM_CP_LDMA_TSIZE_OFFSET_2 0x5482D0 + +#define mmDMA2_QM_CP_LDMA_TSIZE_OFFSET_3 0x5482D4 + +#define mmDMA2_QM_CP_LDMA_TSIZE_OFFSET_4 0x5482D8 + +#define mmDMA2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0x5482E0 + +#define mmDMA2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0x5482E4 + +#define mmDMA2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0x5482E8 + +#define mmDMA2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0x5482EC + +#define mmDMA2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0x5482F0 + +#define mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0x5482F4 + +#define mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0x5482F8 + +#define mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0x5482FC + +#define mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0x548300 + +#define mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0x548304 + +#define mmDMA2_QM_CP_FENCE0_RDATA_0 0x548308 + +#define mmDMA2_QM_CP_FENCE0_RDATA_1 0x54830C + +#define mmDMA2_QM_CP_FENCE0_RDATA_2 0x548310 + +#define mmDMA2_QM_CP_FENCE0_RDATA_3 0x548314 + +#define mmDMA2_QM_CP_FENCE0_RDATA_4 0x548318 + +#define mmDMA2_QM_CP_FENCE1_RDATA_0 0x54831C + +#define mmDMA2_QM_CP_FENCE1_RDATA_1 0x548320 + +#define mmDMA2_QM_CP_FENCE1_RDATA_2 0x548324 + +#define mmDMA2_QM_CP_FENCE1_RDATA_3 0x548328 + +#define mmDMA2_QM_CP_FENCE1_RDATA_4 0x54832C + +#define mmDMA2_QM_CP_FENCE2_RDATA_0 0x548330 + +#define mmDMA2_QM_CP_FENCE2_RDATA_1 0x548334 + +#define mmDMA2_QM_CP_FENCE2_RDATA_2 0x548338 + +#define mmDMA2_QM_CP_FENCE2_RDATA_3 0x54833C + +#define mmDMA2_QM_CP_FENCE2_RDATA_4 0x548340 + +#define mmDMA2_QM_CP_FENCE3_RDATA_0 0x548344 + +#define mmDMA2_QM_CP_FENCE3_RDATA_1 0x548348 + +#define mmDMA2_QM_CP_FENCE3_RDATA_2 0x54834C + +#define mmDMA2_QM_CP_FENCE3_RDATA_3 0x548350 + +#define mmDMA2_QM_CP_FENCE3_RDATA_4 0x548354 + +#define mmDMA2_QM_CP_FENCE0_CNT_0 0x548358 + +#define mmDMA2_QM_CP_FENCE0_CNT_1 0x54835C + +#define mmDMA2_QM_CP_FENCE0_CNT_2 0x548360 + +#define mmDMA2_QM_CP_FENCE0_CNT_3 0x548364 + +#define mmDMA2_QM_CP_FENCE0_CNT_4 0x548368 + +#define mmDMA2_QM_CP_FENCE1_CNT_0 0x54836C + +#define mmDMA2_QM_CP_FENCE1_CNT_1 0x548370 + +#define mmDMA2_QM_CP_FENCE1_CNT_2 0x548374 + +#define mmDMA2_QM_CP_FENCE1_CNT_3 0x548378 + +#define mmDMA2_QM_CP_FENCE1_CNT_4 0x54837C + +#define mmDMA2_QM_CP_FENCE2_CNT_0 0x548380 + +#define mmDMA2_QM_CP_FENCE2_CNT_1 0x548384 + +#define mmDMA2_QM_CP_FENCE2_CNT_2 0x548388 + +#define mmDMA2_QM_CP_FENCE2_CNT_3 0x54838C + +#define mmDMA2_QM_CP_FENCE2_CNT_4 0x548390 + +#define mmDMA2_QM_CP_FENCE3_CNT_0 0x548394 + +#define mmDMA2_QM_CP_FENCE3_CNT_1 0x548398 + +#define mmDMA2_QM_CP_FENCE3_CNT_2 0x54839C + +#define mmDMA2_QM_CP_FENCE3_CNT_3 0x5483A0 + +#define mmDMA2_QM_CP_FENCE3_CNT_4 0x5483A4 + +#define mmDMA2_QM_CP_STS_0 0x5483A8 + +#define mmDMA2_QM_CP_STS_1 0x5483AC + +#define mmDMA2_QM_CP_STS_2 0x5483B0 + +#define mmDMA2_QM_CP_STS_3 0x5483B4 + +#define mmDMA2_QM_CP_STS_4 0x5483B8 + +#define mmDMA2_QM_CP_CURRENT_INST_LO_0 0x5483BC + +#define mmDMA2_QM_CP_CURRENT_INST_LO_1 0x5483C0 + +#define mmDMA2_QM_CP_CURRENT_INST_LO_2 0x5483C4 + +#define mmDMA2_QM_CP_CURRENT_INST_LO_3 0x5483C8 + +#define mmDMA2_QM_CP_CURRENT_INST_LO_4 0x5483CC + +#define mmDMA2_QM_CP_CURRENT_INST_HI_0 0x5483D0 + +#define mmDMA2_QM_CP_CURRENT_INST_HI_1 0x5483D4 + +#define mmDMA2_QM_CP_CURRENT_INST_HI_2 0x5483D8 + +#define mmDMA2_QM_CP_CURRENT_INST_HI_3 0x5483DC + +#define mmDMA2_QM_CP_CURRENT_INST_HI_4 0x5483E0 + +#define mmDMA2_QM_CP_BARRIER_CFG_0 0x5483F4 + +#define mmDMA2_QM_CP_BARRIER_CFG_1 0x5483F8 + +#define mmDMA2_QM_CP_BARRIER_CFG_2 0x5483FC + +#define mmDMA2_QM_CP_BARRIER_CFG_3 0x548400 + +#define mmDMA2_QM_CP_BARRIER_CFG_4 0x548404 + +#define mmDMA2_QM_CP_DBG_0_0 0x548408 + +#define mmDMA2_QM_CP_DBG_0_1 0x54840C + +#define mmDMA2_QM_CP_DBG_0_2 0x548410 + +#define mmDMA2_QM_CP_DBG_0_3 0x548414 + +#define mmDMA2_QM_CP_DBG_0_4 0x548418 + +#define mmDMA2_QM_CP_ARUSER_31_11_0 0x54841C + +#define mmDMA2_QM_CP_ARUSER_31_11_1 0x548420 + +#define mmDMA2_QM_CP_ARUSER_31_11_2 0x548424 + +#define mmDMA2_QM_CP_ARUSER_31_11_3 0x548428 + +#define mmDMA2_QM_CP_ARUSER_31_11_4 0x54842C + +#define mmDMA2_QM_CP_AWUSER_31_11_0 0x548430 + +#define mmDMA2_QM_CP_AWUSER_31_11_1 0x548434 + +#define mmDMA2_QM_CP_AWUSER_31_11_2 0x548438 + +#define mmDMA2_QM_CP_AWUSER_31_11_3 0x54843C + +#define mmDMA2_QM_CP_AWUSER_31_11_4 0x548440 + +#define mmDMA2_QM_ARB_CFG_0 0x548A00 + +#define mmDMA2_QM_ARB_CHOISE_Q_PUSH 0x548A04 + +#define mmDMA2_QM_ARB_WRR_WEIGHT_0 0x548A08 + +#define mmDMA2_QM_ARB_WRR_WEIGHT_1 0x548A0C + +#define mmDMA2_QM_ARB_WRR_WEIGHT_2 0x548A10 + +#define mmDMA2_QM_ARB_WRR_WEIGHT_3 0x548A14 + +#define mmDMA2_QM_ARB_CFG_1 0x548A18 + +#define mmDMA2_QM_ARB_MST_AVAIL_CRED_0 0x548A20 + +#define mmDMA2_QM_ARB_MST_AVAIL_CRED_1 0x548A24 + +#define mmDMA2_QM_ARB_MST_AVAIL_CRED_2 0x548A28 + +#define mmDMA2_QM_ARB_MST_AVAIL_CRED_3 0x548A2C + +#define mmDMA2_QM_ARB_MST_AVAIL_CRED_4 0x548A30 + +#define mmDMA2_QM_ARB_MST_AVAIL_CRED_5 0x548A34 + +#define mmDMA2_QM_ARB_MST_AVAIL_CRED_6 0x548A38 + +#define mmDMA2_QM_ARB_MST_AVAIL_CRED_7 0x548A3C + +#define mmDMA2_QM_ARB_MST_AVAIL_CRED_8 0x548A40 + +#define mmDMA2_QM_ARB_MST_AVAIL_CRED_9 0x548A44 + +#define mmDMA2_QM_ARB_MST_AVAIL_CRED_10 0x548A48 + +#define mmDMA2_QM_ARB_MST_AVAIL_CRED_11 0x548A4C + +#define mmDMA2_QM_ARB_MST_AVAIL_CRED_12 0x548A50 + +#define mmDMA2_QM_ARB_MST_AVAIL_CRED_13 0x548A54 + +#define mmDMA2_QM_ARB_MST_AVAIL_CRED_14 0x548A58 + +#define mmDMA2_QM_ARB_MST_AVAIL_CRED_15 0x548A5C + +#define mmDMA2_QM_ARB_MST_AVAIL_CRED_16 0x548A60 + +#define mmDMA2_QM_ARB_MST_AVAIL_CRED_17 0x548A64 + +#define mmDMA2_QM_ARB_MST_AVAIL_CRED_18 0x548A68 + +#define mmDMA2_QM_ARB_MST_AVAIL_CRED_19 0x548A6C + +#define mmDMA2_QM_ARB_MST_AVAIL_CRED_20 0x548A70 + +#define mmDMA2_QM_ARB_MST_AVAIL_CRED_21 0x548A74 + +#define mmDMA2_QM_ARB_MST_AVAIL_CRED_22 0x548A78 + +#define mmDMA2_QM_ARB_MST_AVAIL_CRED_23 0x548A7C + +#define mmDMA2_QM_ARB_MST_AVAIL_CRED_24 0x548A80 + +#define mmDMA2_QM_ARB_MST_AVAIL_CRED_25 0x548A84 + +#define mmDMA2_QM_ARB_MST_AVAIL_CRED_26 0x548A88 + +#define mmDMA2_QM_ARB_MST_AVAIL_CRED_27 0x548A8C + +#define mmDMA2_QM_ARB_MST_AVAIL_CRED_28 0x548A90 + +#define mmDMA2_QM_ARB_MST_AVAIL_CRED_29 0x548A94 + +#define mmDMA2_QM_ARB_MST_AVAIL_CRED_30 0x548A98 + +#define mmDMA2_QM_ARB_MST_AVAIL_CRED_31 0x548A9C + +#define mmDMA2_QM_ARB_MST_CRED_INC 0x548AA0 + +#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_0 0x548AA4 + +#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_1 0x548AA8 + +#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_2 0x548AAC + +#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_3 0x548AB0 + +#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_4 0x548AB4 + +#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_5 0x548AB8 + +#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_6 0x548ABC + +#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_7 0x548AC0 + +#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_8 0x548AC4 + +#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_9 0x548AC8 + +#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_10 0x548ACC + +#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_11 0x548AD0 + +#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_12 0x548AD4 + +#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_13 0x548AD8 + +#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_14 0x548ADC + +#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_15 0x548AE0 + +#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_16 0x548AE4 + +#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_17 0x548AE8 + +#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_18 0x548AEC + +#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_19 0x548AF0 + +#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_20 0x548AF4 + +#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_21 0x548AF8 + +#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_22 0x548AFC + +#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_23 0x548B00 + +#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_24 0x548B04 + +#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_25 0x548B08 + +#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_26 0x548B0C + +#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_27 0x548B10 + +#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_28 0x548B14 + +#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_29 0x548B18 + +#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_30 0x548B1C + +#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_31 0x548B20 + +#define mmDMA2_QM_ARB_SLV_MASTER_INC_CRED_OFST 0x548B28 + +#define mmDMA2_QM_ARB_MST_SLAVE_EN 0x548B2C + +#define mmDMA2_QM_ARB_MST_QUIET_PER 0x548B34 + +#define mmDMA2_QM_ARB_SLV_CHOISE_WDT 0x548B38 + +#define mmDMA2_QM_ARB_SLV_ID 0x548B3C + +#define mmDMA2_QM_ARB_MSG_MAX_INFLIGHT 0x548B44 + +#define mmDMA2_QM_ARB_MSG_AWUSER_31_11 0x548B48 + +#define mmDMA2_QM_ARB_MSG_AWUSER_SEC_PROP 0x548B4C + +#define mmDMA2_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0x548B50 + +#define mmDMA2_QM_ARB_BASE_LO 0x548B54 + +#define mmDMA2_QM_ARB_BASE_HI 0x548B58 + +#define mmDMA2_QM_ARB_STATE_STS 0x548B80 + +#define mmDMA2_QM_ARB_CHOISE_FULLNESS_STS 0x548B84 + +#define mmDMA2_QM_ARB_MSG_STS 0x548B88 + +#define mmDMA2_QM_ARB_SLV_CHOISE_Q_HEAD 0x548B8C + +#define mmDMA2_QM_ARB_ERR_CAUSE 0x548B9C + +#define mmDMA2_QM_ARB_ERR_MSG_EN 0x548BA0 + +#define mmDMA2_QM_ARB_ERR_STS_DRP 0x548BA8 + +#define mmDMA2_QM_ARB_MST_CRED_STS_0 0x548BB0 + +#define mmDMA2_QM_ARB_MST_CRED_STS_1 0x548BB4 + +#define mmDMA2_QM_ARB_MST_CRED_STS_2 0x548BB8 + +#define mmDMA2_QM_ARB_MST_CRED_STS_3 0x548BBC + +#define mmDMA2_QM_ARB_MST_CRED_STS_4 0x548BC0 + +#define mmDMA2_QM_ARB_MST_CRED_STS_5 0x548BC4 + +#define mmDMA2_QM_ARB_MST_CRED_STS_6 0x548BC8 + +#define mmDMA2_QM_ARB_MST_CRED_STS_7 0x548BCC + +#define mmDMA2_QM_ARB_MST_CRED_STS_8 0x548BD0 + +#define mmDMA2_QM_ARB_MST_CRED_STS_9 0x548BD4 + +#define mmDMA2_QM_ARB_MST_CRED_STS_10 0x548BD8 + +#define mmDMA2_QM_ARB_MST_CRED_STS_11 0x548BDC + +#define mmDMA2_QM_ARB_MST_CRED_STS_12 0x548BE0 + +#define mmDMA2_QM_ARB_MST_CRED_STS_13 0x548BE4 + +#define mmDMA2_QM_ARB_MST_CRED_STS_14 0x548BE8 + +#define mmDMA2_QM_ARB_MST_CRED_STS_15 0x548BEC + +#define mmDMA2_QM_ARB_MST_CRED_STS_16 0x548BF0 + +#define mmDMA2_QM_ARB_MST_CRED_STS_17 0x548BF4 + +#define mmDMA2_QM_ARB_MST_CRED_STS_18 0x548BF8 + +#define mmDMA2_QM_ARB_MST_CRED_STS_19 0x548BFC + +#define mmDMA2_QM_ARB_MST_CRED_STS_20 0x548C00 + +#define mmDMA2_QM_ARB_MST_CRED_STS_21 0x548C04 + +#define mmDMA2_QM_ARB_MST_CRED_STS_22 0x548C08 + +#define mmDMA2_QM_ARB_MST_CRED_STS_23 0x548C0C + +#define mmDMA2_QM_ARB_MST_CRED_STS_24 0x548C10 + +#define mmDMA2_QM_ARB_MST_CRED_STS_25 0x548C14 + +#define mmDMA2_QM_ARB_MST_CRED_STS_26 0x548C18 + +#define mmDMA2_QM_ARB_MST_CRED_STS_27 0x548C1C + +#define mmDMA2_QM_ARB_MST_CRED_STS_28 0x548C20 + +#define mmDMA2_QM_ARB_MST_CRED_STS_29 0x548C24 + +#define mmDMA2_QM_ARB_MST_CRED_STS_30 0x548C28 + +#define mmDMA2_QM_ARB_MST_CRED_STS_31 0x548C2C + +#define mmDMA2_QM_CGM_CFG 0x548C70 + +#define mmDMA2_QM_CGM_STS 0x548C74 + +#define mmDMA2_QM_CGM_CFG1 0x548C78 + +#define mmDMA2_QM_LOCAL_RANGE_BASE 0x548C80 + +#define mmDMA2_QM_LOCAL_RANGE_SIZE 0x548C84 + +#define mmDMA2_QM_CSMR_STRICT_PRIO_CFG 0x548C90 + +#define mmDMA2_QM_HBW_RD_RATE_LIM_CFG_1 0x548C94 + +#define mmDMA2_QM_LBW_WR_RATE_LIM_CFG_0 0x548C98 + +#define mmDMA2_QM_LBW_WR_RATE_LIM_CFG_1 0x548C9C + +#define mmDMA2_QM_HBW_RD_RATE_LIM_CFG_0 0x548CA0 + +#define mmDMA2_QM_GLBL_AXCACHE 0x548CA4 + +#define mmDMA2_QM_IND_GW_APB_CFG 0x548CB0 + +#define mmDMA2_QM_IND_GW_APB_WDATA 0x548CB4 + +#define mmDMA2_QM_IND_GW_APB_RDATA 0x548CB8 + +#define mmDMA2_QM_IND_GW_APB_STATUS 0x548CBC + +#define mmDMA2_QM_GLBL_ERR_ADDR_LO 0x548CD0 + +#define mmDMA2_QM_GLBL_ERR_ADDR_HI 0x548CD4 + +#define mmDMA2_QM_GLBL_ERR_WDATA 0x548CD8 + +#define mmDMA2_QM_GLBL_MEM_INIT_BUSY 0x548D00 + +#endif /* ASIC_REG_DMA2_QM_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma3_core_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma3_core_regs.h new file mode 100644 index 000000000..fb145f416 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma3_core_regs.h @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA3_CORE_REGS_H_ +#define ASIC_REG_DMA3_CORE_REGS_H_ + +/* + ***************************************** + * DMA3_CORE (Prototype: DMA_CORE) + ***************************************** + */ + +#define mmDMA3_CORE_CFG_0 0x560000 + +#define mmDMA3_CORE_CFG_1 0x560004 + +#define mmDMA3_CORE_LBW_MAX_OUTSTAND 0x560008 + +#define mmDMA3_CORE_SRC_BASE_LO 0x560014 + +#define mmDMA3_CORE_SRC_BASE_HI 0x560018 + +#define mmDMA3_CORE_DST_BASE_LO 0x56001C + +#define mmDMA3_CORE_DST_BASE_HI 0x560020 + +#define mmDMA3_CORE_SRC_TSIZE_1 0x56002C + +#define mmDMA3_CORE_SRC_STRIDE_1 0x560030 + +#define mmDMA3_CORE_SRC_TSIZE_2 0x560034 + +#define mmDMA3_CORE_SRC_STRIDE_2 0x560038 + +#define mmDMA3_CORE_SRC_TSIZE_3 0x56003C + +#define mmDMA3_CORE_SRC_STRIDE_3 0x560040 + +#define mmDMA3_CORE_SRC_TSIZE_4 0x560044 + +#define mmDMA3_CORE_SRC_STRIDE_4 0x560048 + +#define mmDMA3_CORE_SRC_TSIZE_0 0x56004C + +#define mmDMA3_CORE_DST_TSIZE_1 0x560054 + +#define mmDMA3_CORE_DST_STRIDE_1 0x560058 + +#define mmDMA3_CORE_DST_TSIZE_2 0x56005C + +#define mmDMA3_CORE_DST_STRIDE_2 0x560060 + +#define mmDMA3_CORE_DST_TSIZE_3 0x560064 + +#define mmDMA3_CORE_DST_STRIDE_3 0x560068 + +#define mmDMA3_CORE_DST_TSIZE_4 0x56006C + +#define mmDMA3_CORE_DST_STRIDE_4 0x560070 + +#define mmDMA3_CORE_DST_TSIZE_0 0x560074 + +#define mmDMA3_CORE_COMMIT 0x560078 + +#define mmDMA3_CORE_WR_COMP_WDATA 0x56007C + +#define mmDMA3_CORE_WR_COMP_ADDR_LO 0x560080 + +#define mmDMA3_CORE_WR_COMP_ADDR_HI 0x560084 + +#define mmDMA3_CORE_WR_COMP_AWUSER_31_11 0x560088 + +#define mmDMA3_CORE_TE_NUMROWS 0x560094 + +#define mmDMA3_CORE_PROT 0x5600B8 + +#define mmDMA3_CORE_SECURE_PROPS 0x5600F0 + +#define mmDMA3_CORE_NON_SECURE_PROPS 0x5600F4 + +#define mmDMA3_CORE_RD_MAX_OUTSTAND 0x560100 + +#define mmDMA3_CORE_RD_MAX_SIZE 0x560104 + +#define mmDMA3_CORE_RD_ARCACHE 0x560108 + +#define mmDMA3_CORE_RD_ARUSER_31_11 0x560110 + +#define mmDMA3_CORE_RD_INFLIGHTS 0x560114 + +#define mmDMA3_CORE_WR_MAX_OUTSTAND 0x560120 + +#define mmDMA3_CORE_WR_MAX_AWID 0x560124 + +#define mmDMA3_CORE_WR_AWCACHE 0x560128 + +#define mmDMA3_CORE_WR_AWUSER_31_11 0x560130 + +#define mmDMA3_CORE_WR_INFLIGHTS 0x560134 + +#define mmDMA3_CORE_RD_RATE_LIM_CFG_0 0x560150 + +#define mmDMA3_CORE_RD_RATE_LIM_CFG_1 0x560154 + +#define mmDMA3_CORE_WR_RATE_LIM_CFG_0 0x560158 + +#define mmDMA3_CORE_WR_RATE_LIM_CFG_1 0x56015C + +#define mmDMA3_CORE_ERR_CFG 0x560160 + +#define mmDMA3_CORE_ERR_CAUSE 0x560164 + +#define mmDMA3_CORE_ERRMSG_ADDR_LO 0x560170 + +#define mmDMA3_CORE_ERRMSG_ADDR_HI 0x560174 + +#define mmDMA3_CORE_ERRMSG_WDATA 0x560178 + +#define mmDMA3_CORE_STS0 0x560190 + +#define mmDMA3_CORE_STS1 0x560194 + +#define mmDMA3_CORE_RD_DBGMEM_ADD 0x560200 + +#define mmDMA3_CORE_RD_DBGMEM_DATA_WR 0x560204 + +#define mmDMA3_CORE_RD_DBGMEM_DATA_RD 0x560208 + +#define mmDMA3_CORE_RD_DBGMEM_CTRL 0x56020C + +#define mmDMA3_CORE_RD_DBGMEM_RC 0x560210 + +#define mmDMA3_CORE_DBG_HBW_AXI_AR_CNT 0x560220 + +#define mmDMA3_CORE_DBG_HBW_AXI_AW_CNT 0x560224 + +#define mmDMA3_CORE_DBG_LBW_AXI_AW_CNT 0x560228 + +#define mmDMA3_CORE_DBG_DESC_CNT 0x56022C + +#define mmDMA3_CORE_DBG_STS 0x560230 + +#define mmDMA3_CORE_DBG_RD_DESC_ID 0x560234 + +#define mmDMA3_CORE_DBG_WR_DESC_ID 0x560238 + +#endif /* ASIC_REG_DMA3_CORE_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma3_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma3_qm_regs.h new file mode 100644 index 000000000..a4b461ca3 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma3_qm_regs.h @@ -0,0 +1,834 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA3_QM_REGS_H_ +#define ASIC_REG_DMA3_QM_REGS_H_ + +/* + ***************************************** + * DMA3_QM (Prototype: QMAN) + ***************************************** + */ + +#define mmDMA3_QM_GLBL_CFG0 0x568000 + +#define mmDMA3_QM_GLBL_CFG1 0x568004 + +#define mmDMA3_QM_GLBL_PROT 0x568008 + +#define mmDMA3_QM_GLBL_ERR_CFG 0x56800C + +#define mmDMA3_QM_GLBL_SECURE_PROPS_0 0x568010 + +#define mmDMA3_QM_GLBL_SECURE_PROPS_1 0x568014 + +#define mmDMA3_QM_GLBL_SECURE_PROPS_2 0x568018 + +#define mmDMA3_QM_GLBL_SECURE_PROPS_3 0x56801C + +#define mmDMA3_QM_GLBL_SECURE_PROPS_4 0x568020 + +#define mmDMA3_QM_GLBL_NON_SECURE_PROPS_0 0x568024 + +#define mmDMA3_QM_GLBL_NON_SECURE_PROPS_1 0x568028 + +#define mmDMA3_QM_GLBL_NON_SECURE_PROPS_2 0x56802C + +#define mmDMA3_QM_GLBL_NON_SECURE_PROPS_3 0x568030 + +#define mmDMA3_QM_GLBL_NON_SECURE_PROPS_4 0x568034 + +#define mmDMA3_QM_GLBL_STS0 0x568038 + +#define mmDMA3_QM_GLBL_STS1_0 0x568040 + +#define mmDMA3_QM_GLBL_STS1_1 0x568044 + +#define mmDMA3_QM_GLBL_STS1_2 0x568048 + +#define mmDMA3_QM_GLBL_STS1_3 0x56804C + +#define mmDMA3_QM_GLBL_STS1_4 0x568050 + +#define mmDMA3_QM_GLBL_MSG_EN_0 0x568054 + +#define mmDMA3_QM_GLBL_MSG_EN_1 0x568058 + +#define mmDMA3_QM_GLBL_MSG_EN_2 0x56805C + +#define mmDMA3_QM_GLBL_MSG_EN_3 0x568060 + +#define mmDMA3_QM_GLBL_MSG_EN_4 0x568068 + +#define mmDMA3_QM_PQ_BASE_LO_0 0x568070 + +#define mmDMA3_QM_PQ_BASE_LO_1 0x568074 + +#define mmDMA3_QM_PQ_BASE_LO_2 0x568078 + +#define mmDMA3_QM_PQ_BASE_LO_3 0x56807C + +#define mmDMA3_QM_PQ_BASE_HI_0 0x568080 + +#define mmDMA3_QM_PQ_BASE_HI_1 0x568084 + +#define mmDMA3_QM_PQ_BASE_HI_2 0x568088 + +#define mmDMA3_QM_PQ_BASE_HI_3 0x56808C + +#define mmDMA3_QM_PQ_SIZE_0 0x568090 + +#define mmDMA3_QM_PQ_SIZE_1 0x568094 + +#define mmDMA3_QM_PQ_SIZE_2 0x568098 + +#define mmDMA3_QM_PQ_SIZE_3 0x56809C + +#define mmDMA3_QM_PQ_PI_0 0x5680A0 + +#define mmDMA3_QM_PQ_PI_1 0x5680A4 + +#define mmDMA3_QM_PQ_PI_2 0x5680A8 + +#define mmDMA3_QM_PQ_PI_3 0x5680AC + +#define mmDMA3_QM_PQ_CI_0 0x5680B0 + +#define mmDMA3_QM_PQ_CI_1 0x5680B4 + +#define mmDMA3_QM_PQ_CI_2 0x5680B8 + +#define mmDMA3_QM_PQ_CI_3 0x5680BC + +#define mmDMA3_QM_PQ_CFG0_0 0x5680C0 + +#define mmDMA3_QM_PQ_CFG0_1 0x5680C4 + +#define mmDMA3_QM_PQ_CFG0_2 0x5680C8 + +#define mmDMA3_QM_PQ_CFG0_3 0x5680CC + +#define mmDMA3_QM_PQ_CFG1_0 0x5680D0 + +#define mmDMA3_QM_PQ_CFG1_1 0x5680D4 + +#define mmDMA3_QM_PQ_CFG1_2 0x5680D8 + +#define mmDMA3_QM_PQ_CFG1_3 0x5680DC + +#define mmDMA3_QM_PQ_ARUSER_31_11_0 0x5680E0 + +#define mmDMA3_QM_PQ_ARUSER_31_11_1 0x5680E4 + +#define mmDMA3_QM_PQ_ARUSER_31_11_2 0x5680E8 + +#define mmDMA3_QM_PQ_ARUSER_31_11_3 0x5680EC + +#define mmDMA3_QM_PQ_STS0_0 0x5680F0 + +#define mmDMA3_QM_PQ_STS0_1 0x5680F4 + +#define mmDMA3_QM_PQ_STS0_2 0x5680F8 + +#define mmDMA3_QM_PQ_STS0_3 0x5680FC + +#define mmDMA3_QM_PQ_STS1_0 0x568100 + +#define mmDMA3_QM_PQ_STS1_1 0x568104 + +#define mmDMA3_QM_PQ_STS1_2 0x568108 + +#define mmDMA3_QM_PQ_STS1_3 0x56810C + +#define mmDMA3_QM_CQ_CFG0_0 0x568110 + +#define mmDMA3_QM_CQ_CFG0_1 0x568114 + +#define mmDMA3_QM_CQ_CFG0_2 0x568118 + +#define mmDMA3_QM_CQ_CFG0_3 0x56811C + +#define mmDMA3_QM_CQ_CFG0_4 0x568120 + +#define mmDMA3_QM_CQ_CFG1_0 0x568124 + +#define mmDMA3_QM_CQ_CFG1_1 0x568128 + +#define mmDMA3_QM_CQ_CFG1_2 0x56812C + +#define mmDMA3_QM_CQ_CFG1_3 0x568130 + +#define mmDMA3_QM_CQ_CFG1_4 0x568134 + +#define mmDMA3_QM_CQ_ARUSER_31_11_0 0x568138 + +#define mmDMA3_QM_CQ_ARUSER_31_11_1 0x56813C + +#define mmDMA3_QM_CQ_ARUSER_31_11_2 0x568140 + +#define mmDMA3_QM_CQ_ARUSER_31_11_3 0x568144 + +#define mmDMA3_QM_CQ_ARUSER_31_11_4 0x568148 + +#define mmDMA3_QM_CQ_STS0_0 0x56814C + +#define mmDMA3_QM_CQ_STS0_1 0x568150 + +#define mmDMA3_QM_CQ_STS0_2 0x568154 + +#define mmDMA3_QM_CQ_STS0_3 0x568158 + +#define mmDMA3_QM_CQ_STS0_4 0x56815C + +#define mmDMA3_QM_CQ_STS1_0 0x568160 + +#define mmDMA3_QM_CQ_STS1_1 0x568164 + +#define mmDMA3_QM_CQ_STS1_2 0x568168 + +#define mmDMA3_QM_CQ_STS1_3 0x56816C + +#define mmDMA3_QM_CQ_STS1_4 0x568170 + +#define mmDMA3_QM_CQ_PTR_LO_0 0x568174 + +#define mmDMA3_QM_CQ_PTR_HI_0 0x568178 + +#define mmDMA3_QM_CQ_TSIZE_0 0x56817C + +#define mmDMA3_QM_CQ_CTL_0 0x568180 + +#define mmDMA3_QM_CQ_PTR_LO_1 0x568184 + +#define mmDMA3_QM_CQ_PTR_HI_1 0x568188 + +#define mmDMA3_QM_CQ_TSIZE_1 0x56818C + +#define mmDMA3_QM_CQ_CTL_1 0x568190 + +#define mmDMA3_QM_CQ_PTR_LO_2 0x568194 + +#define mmDMA3_QM_CQ_PTR_HI_2 0x568198 + +#define mmDMA3_QM_CQ_TSIZE_2 0x56819C + +#define mmDMA3_QM_CQ_CTL_2 0x5681A0 + +#define mmDMA3_QM_CQ_PTR_LO_3 0x5681A4 + +#define mmDMA3_QM_CQ_PTR_HI_3 0x5681A8 + +#define mmDMA3_QM_CQ_TSIZE_3 0x5681AC + +#define mmDMA3_QM_CQ_CTL_3 0x5681B0 + +#define mmDMA3_QM_CQ_PTR_LO_4 0x5681B4 + +#define mmDMA3_QM_CQ_PTR_HI_4 0x5681B8 + +#define mmDMA3_QM_CQ_TSIZE_4 0x5681BC + +#define mmDMA3_QM_CQ_CTL_4 0x5681C0 + +#define mmDMA3_QM_CQ_PTR_LO_STS_0 0x5681C4 + +#define mmDMA3_QM_CQ_PTR_LO_STS_1 0x5681C8 + +#define mmDMA3_QM_CQ_PTR_LO_STS_2 0x5681CC + +#define mmDMA3_QM_CQ_PTR_LO_STS_3 0x5681D0 + +#define mmDMA3_QM_CQ_PTR_LO_STS_4 0x5681D4 + +#define mmDMA3_QM_CQ_PTR_HI_STS_0 0x5681D8 + +#define mmDMA3_QM_CQ_PTR_HI_STS_1 0x5681DC + +#define mmDMA3_QM_CQ_PTR_HI_STS_2 0x5681E0 + +#define mmDMA3_QM_CQ_PTR_HI_STS_3 0x5681E4 + +#define mmDMA3_QM_CQ_PTR_HI_STS_4 0x5681E8 + +#define mmDMA3_QM_CQ_TSIZE_STS_0 0x5681EC + +#define mmDMA3_QM_CQ_TSIZE_STS_1 0x5681F0 + +#define mmDMA3_QM_CQ_TSIZE_STS_2 0x5681F4 + +#define mmDMA3_QM_CQ_TSIZE_STS_3 0x5681F8 + +#define mmDMA3_QM_CQ_TSIZE_STS_4 0x5681FC + +#define mmDMA3_QM_CQ_CTL_STS_0 0x568200 + +#define mmDMA3_QM_CQ_CTL_STS_1 0x568204 + +#define mmDMA3_QM_CQ_CTL_STS_2 0x568208 + +#define mmDMA3_QM_CQ_CTL_STS_3 0x56820C + +#define mmDMA3_QM_CQ_CTL_STS_4 0x568210 + +#define mmDMA3_QM_CQ_IFIFO_CNT_0 0x568214 + +#define mmDMA3_QM_CQ_IFIFO_CNT_1 0x568218 + +#define mmDMA3_QM_CQ_IFIFO_CNT_2 0x56821C + +#define mmDMA3_QM_CQ_IFIFO_CNT_3 0x568220 + +#define mmDMA3_QM_CQ_IFIFO_CNT_4 0x568224 + +#define mmDMA3_QM_CP_MSG_BASE0_ADDR_LO_0 0x568228 + +#define mmDMA3_QM_CP_MSG_BASE0_ADDR_LO_1 0x56822C + +#define mmDMA3_QM_CP_MSG_BASE0_ADDR_LO_2 0x568230 + +#define mmDMA3_QM_CP_MSG_BASE0_ADDR_LO_3 0x568234 + +#define mmDMA3_QM_CP_MSG_BASE0_ADDR_LO_4 0x568238 + +#define mmDMA3_QM_CP_MSG_BASE0_ADDR_HI_0 0x56823C + +#define mmDMA3_QM_CP_MSG_BASE0_ADDR_HI_1 0x568240 + +#define mmDMA3_QM_CP_MSG_BASE0_ADDR_HI_2 0x568244 + +#define mmDMA3_QM_CP_MSG_BASE0_ADDR_HI_3 0x568248 + +#define mmDMA3_QM_CP_MSG_BASE0_ADDR_HI_4 0x56824C + +#define mmDMA3_QM_CP_MSG_BASE1_ADDR_LO_0 0x568250 + +#define mmDMA3_QM_CP_MSG_BASE1_ADDR_LO_1 0x568254 + +#define mmDMA3_QM_CP_MSG_BASE1_ADDR_LO_2 0x568258 + +#define mmDMA3_QM_CP_MSG_BASE1_ADDR_LO_3 0x56825C + +#define mmDMA3_QM_CP_MSG_BASE1_ADDR_LO_4 0x568260 + +#define mmDMA3_QM_CP_MSG_BASE1_ADDR_HI_0 0x568264 + +#define mmDMA3_QM_CP_MSG_BASE1_ADDR_HI_1 0x568268 + +#define mmDMA3_QM_CP_MSG_BASE1_ADDR_HI_2 0x56826C + +#define mmDMA3_QM_CP_MSG_BASE1_ADDR_HI_3 0x568270 + +#define mmDMA3_QM_CP_MSG_BASE1_ADDR_HI_4 0x568274 + +#define mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_0 0x568278 + +#define mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_1 0x56827C + +#define mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_2 0x568280 + +#define mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_3 0x568284 + +#define mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_4 0x568288 + +#define mmDMA3_QM_CP_MSG_BASE2_ADDR_HI_0 0x56828C + +#define mmDMA3_QM_CP_MSG_BASE2_ADDR_HI_1 0x568290 + +#define mmDMA3_QM_CP_MSG_BASE2_ADDR_HI_2 0x568294 + +#define mmDMA3_QM_CP_MSG_BASE2_ADDR_HI_3 0x568298 + +#define mmDMA3_QM_CP_MSG_BASE2_ADDR_HI_4 0x56829C + +#define mmDMA3_QM_CP_MSG_BASE3_ADDR_LO_0 0x5682A0 + +#define mmDMA3_QM_CP_MSG_BASE3_ADDR_LO_1 0x5682A4 + +#define mmDMA3_QM_CP_MSG_BASE3_ADDR_LO_2 0x5682A8 + +#define mmDMA3_QM_CP_MSG_BASE3_ADDR_LO_3 0x5682AC + +#define mmDMA3_QM_CP_MSG_BASE3_ADDR_LO_4 0x5682B0 + +#define mmDMA3_QM_CP_MSG_BASE3_ADDR_HI_0 0x5682B4 + +#define mmDMA3_QM_CP_MSG_BASE3_ADDR_HI_1 0x5682B8 + +#define mmDMA3_QM_CP_MSG_BASE3_ADDR_HI_2 0x5682BC + +#define mmDMA3_QM_CP_MSG_BASE3_ADDR_HI_3 0x5682C0 + +#define mmDMA3_QM_CP_MSG_BASE3_ADDR_HI_4 0x5682C4 + +#define mmDMA3_QM_CP_LDMA_TSIZE_OFFSET_0 0x5682C8 + +#define mmDMA3_QM_CP_LDMA_TSIZE_OFFSET_1 0x5682CC + +#define mmDMA3_QM_CP_LDMA_TSIZE_OFFSET_2 0x5682D0 + +#define mmDMA3_QM_CP_LDMA_TSIZE_OFFSET_3 0x5682D4 + +#define mmDMA3_QM_CP_LDMA_TSIZE_OFFSET_4 0x5682D8 + +#define mmDMA3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0x5682E0 + +#define mmDMA3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0x5682E4 + +#define mmDMA3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0x5682E8 + +#define mmDMA3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0x5682EC + +#define mmDMA3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0x5682F0 + +#define mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0x5682F4 + +#define mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0x5682F8 + +#define mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0x5682FC + +#define mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0x568300 + +#define mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0x568304 + +#define mmDMA3_QM_CP_FENCE0_RDATA_0 0x568308 + +#define mmDMA3_QM_CP_FENCE0_RDATA_1 0x56830C + +#define mmDMA3_QM_CP_FENCE0_RDATA_2 0x568310 + +#define mmDMA3_QM_CP_FENCE0_RDATA_3 0x568314 + +#define mmDMA3_QM_CP_FENCE0_RDATA_4 0x568318 + +#define mmDMA3_QM_CP_FENCE1_RDATA_0 0x56831C + +#define mmDMA3_QM_CP_FENCE1_RDATA_1 0x568320 + +#define mmDMA3_QM_CP_FENCE1_RDATA_2 0x568324 + +#define mmDMA3_QM_CP_FENCE1_RDATA_3 0x568328 + +#define mmDMA3_QM_CP_FENCE1_RDATA_4 0x56832C + +#define mmDMA3_QM_CP_FENCE2_RDATA_0 0x568330 + +#define mmDMA3_QM_CP_FENCE2_RDATA_1 0x568334 + +#define mmDMA3_QM_CP_FENCE2_RDATA_2 0x568338 + +#define mmDMA3_QM_CP_FENCE2_RDATA_3 0x56833C + +#define mmDMA3_QM_CP_FENCE2_RDATA_4 0x568340 + +#define mmDMA3_QM_CP_FENCE3_RDATA_0 0x568344 + +#define mmDMA3_QM_CP_FENCE3_RDATA_1 0x568348 + +#define mmDMA3_QM_CP_FENCE3_RDATA_2 0x56834C + +#define mmDMA3_QM_CP_FENCE3_RDATA_3 0x568350 + +#define mmDMA3_QM_CP_FENCE3_RDATA_4 0x568354 + +#define mmDMA3_QM_CP_FENCE0_CNT_0 0x568358 + +#define mmDMA3_QM_CP_FENCE0_CNT_1 0x56835C + +#define mmDMA3_QM_CP_FENCE0_CNT_2 0x568360 + +#define mmDMA3_QM_CP_FENCE0_CNT_3 0x568364 + +#define mmDMA3_QM_CP_FENCE0_CNT_4 0x568368 + +#define mmDMA3_QM_CP_FENCE1_CNT_0 0x56836C + +#define mmDMA3_QM_CP_FENCE1_CNT_1 0x568370 + +#define mmDMA3_QM_CP_FENCE1_CNT_2 0x568374 + +#define mmDMA3_QM_CP_FENCE1_CNT_3 0x568378 + +#define mmDMA3_QM_CP_FENCE1_CNT_4 0x56837C + +#define mmDMA3_QM_CP_FENCE2_CNT_0 0x568380 + +#define mmDMA3_QM_CP_FENCE2_CNT_1 0x568384 + +#define mmDMA3_QM_CP_FENCE2_CNT_2 0x568388 + +#define mmDMA3_QM_CP_FENCE2_CNT_3 0x56838C + +#define mmDMA3_QM_CP_FENCE2_CNT_4 0x568390 + +#define mmDMA3_QM_CP_FENCE3_CNT_0 0x568394 + +#define mmDMA3_QM_CP_FENCE3_CNT_1 0x568398 + +#define mmDMA3_QM_CP_FENCE3_CNT_2 0x56839C + +#define mmDMA3_QM_CP_FENCE3_CNT_3 0x5683A0 + +#define mmDMA3_QM_CP_FENCE3_CNT_4 0x5683A4 + +#define mmDMA3_QM_CP_STS_0 0x5683A8 + +#define mmDMA3_QM_CP_STS_1 0x5683AC + +#define mmDMA3_QM_CP_STS_2 0x5683B0 + +#define mmDMA3_QM_CP_STS_3 0x5683B4 + +#define mmDMA3_QM_CP_STS_4 0x5683B8 + +#define mmDMA3_QM_CP_CURRENT_INST_LO_0 0x5683BC + +#define mmDMA3_QM_CP_CURRENT_INST_LO_1 0x5683C0 + +#define mmDMA3_QM_CP_CURRENT_INST_LO_2 0x5683C4 + +#define mmDMA3_QM_CP_CURRENT_INST_LO_3 0x5683C8 + +#define mmDMA3_QM_CP_CURRENT_INST_LO_4 0x5683CC + +#define mmDMA3_QM_CP_CURRENT_INST_HI_0 0x5683D0 + +#define mmDMA3_QM_CP_CURRENT_INST_HI_1 0x5683D4 + +#define mmDMA3_QM_CP_CURRENT_INST_HI_2 0x5683D8 + +#define mmDMA3_QM_CP_CURRENT_INST_HI_3 0x5683DC + +#define mmDMA3_QM_CP_CURRENT_INST_HI_4 0x5683E0 + +#define mmDMA3_QM_CP_BARRIER_CFG_0 0x5683F4 + +#define mmDMA3_QM_CP_BARRIER_CFG_1 0x5683F8 + +#define mmDMA3_QM_CP_BARRIER_CFG_2 0x5683FC + +#define mmDMA3_QM_CP_BARRIER_CFG_3 0x568400 + +#define mmDMA3_QM_CP_BARRIER_CFG_4 0x568404 + +#define mmDMA3_QM_CP_DBG_0_0 0x568408 + +#define mmDMA3_QM_CP_DBG_0_1 0x56840C + +#define mmDMA3_QM_CP_DBG_0_2 0x568410 + +#define mmDMA3_QM_CP_DBG_0_3 0x568414 + +#define mmDMA3_QM_CP_DBG_0_4 0x568418 + +#define mmDMA3_QM_CP_ARUSER_31_11_0 0x56841C + +#define mmDMA3_QM_CP_ARUSER_31_11_1 0x568420 + +#define mmDMA3_QM_CP_ARUSER_31_11_2 0x568424 + +#define mmDMA3_QM_CP_ARUSER_31_11_3 0x568428 + +#define mmDMA3_QM_CP_ARUSER_31_11_4 0x56842C + +#define mmDMA3_QM_CP_AWUSER_31_11_0 0x568430 + +#define mmDMA3_QM_CP_AWUSER_31_11_1 0x568434 + +#define mmDMA3_QM_CP_AWUSER_31_11_2 0x568438 + +#define mmDMA3_QM_CP_AWUSER_31_11_3 0x56843C + +#define mmDMA3_QM_CP_AWUSER_31_11_4 0x568440 + +#define mmDMA3_QM_ARB_CFG_0 0x568A00 + +#define mmDMA3_QM_ARB_CHOISE_Q_PUSH 0x568A04 + +#define mmDMA3_QM_ARB_WRR_WEIGHT_0 0x568A08 + +#define mmDMA3_QM_ARB_WRR_WEIGHT_1 0x568A0C + +#define mmDMA3_QM_ARB_WRR_WEIGHT_2 0x568A10 + +#define mmDMA3_QM_ARB_WRR_WEIGHT_3 0x568A14 + +#define mmDMA3_QM_ARB_CFG_1 0x568A18 + +#define mmDMA3_QM_ARB_MST_AVAIL_CRED_0 0x568A20 + +#define mmDMA3_QM_ARB_MST_AVAIL_CRED_1 0x568A24 + +#define mmDMA3_QM_ARB_MST_AVAIL_CRED_2 0x568A28 + +#define mmDMA3_QM_ARB_MST_AVAIL_CRED_3 0x568A2C + +#define mmDMA3_QM_ARB_MST_AVAIL_CRED_4 0x568A30 + +#define mmDMA3_QM_ARB_MST_AVAIL_CRED_5 0x568A34 + +#define mmDMA3_QM_ARB_MST_AVAIL_CRED_6 0x568A38 + +#define mmDMA3_QM_ARB_MST_AVAIL_CRED_7 0x568A3C + +#define mmDMA3_QM_ARB_MST_AVAIL_CRED_8 0x568A40 + +#define mmDMA3_QM_ARB_MST_AVAIL_CRED_9 0x568A44 + +#define mmDMA3_QM_ARB_MST_AVAIL_CRED_10 0x568A48 + +#define mmDMA3_QM_ARB_MST_AVAIL_CRED_11 0x568A4C + +#define mmDMA3_QM_ARB_MST_AVAIL_CRED_12 0x568A50 + +#define mmDMA3_QM_ARB_MST_AVAIL_CRED_13 0x568A54 + +#define mmDMA3_QM_ARB_MST_AVAIL_CRED_14 0x568A58 + +#define mmDMA3_QM_ARB_MST_AVAIL_CRED_15 0x568A5C + +#define mmDMA3_QM_ARB_MST_AVAIL_CRED_16 0x568A60 + +#define mmDMA3_QM_ARB_MST_AVAIL_CRED_17 0x568A64 + +#define mmDMA3_QM_ARB_MST_AVAIL_CRED_18 0x568A68 + +#define mmDMA3_QM_ARB_MST_AVAIL_CRED_19 0x568A6C + +#define mmDMA3_QM_ARB_MST_AVAIL_CRED_20 0x568A70 + +#define mmDMA3_QM_ARB_MST_AVAIL_CRED_21 0x568A74 + +#define mmDMA3_QM_ARB_MST_AVAIL_CRED_22 0x568A78 + +#define mmDMA3_QM_ARB_MST_AVAIL_CRED_23 0x568A7C + +#define mmDMA3_QM_ARB_MST_AVAIL_CRED_24 0x568A80 + +#define mmDMA3_QM_ARB_MST_AVAIL_CRED_25 0x568A84 + +#define mmDMA3_QM_ARB_MST_AVAIL_CRED_26 0x568A88 + +#define mmDMA3_QM_ARB_MST_AVAIL_CRED_27 0x568A8C + +#define mmDMA3_QM_ARB_MST_AVAIL_CRED_28 0x568A90 + +#define mmDMA3_QM_ARB_MST_AVAIL_CRED_29 0x568A94 + +#define mmDMA3_QM_ARB_MST_AVAIL_CRED_30 0x568A98 + +#define mmDMA3_QM_ARB_MST_AVAIL_CRED_31 0x568A9C + +#define mmDMA3_QM_ARB_MST_CRED_INC 0x568AA0 + +#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_0 0x568AA4 + +#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_1 0x568AA8 + +#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_2 0x568AAC + +#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_3 0x568AB0 + +#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_4 0x568AB4 + +#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_5 0x568AB8 + +#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_6 0x568ABC + +#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_7 0x568AC0 + +#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_8 0x568AC4 + +#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_9 0x568AC8 + +#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_10 0x568ACC + +#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_11 0x568AD0 + +#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_12 0x568AD4 + +#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_13 0x568AD8 + +#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_14 0x568ADC + +#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_15 0x568AE0 + +#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_16 0x568AE4 + +#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_17 0x568AE8 + +#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_18 0x568AEC + +#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_19 0x568AF0 + +#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_20 0x568AF4 + +#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_21 0x568AF8 + +#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_22 0x568AFC + +#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_23 0x568B00 + +#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_24 0x568B04 + +#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_25 0x568B08 + +#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_26 0x568B0C + +#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_27 0x568B10 + +#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_28 0x568B14 + +#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_29 0x568B18 + +#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_30 0x568B1C + +#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_31 0x568B20 + +#define mmDMA3_QM_ARB_SLV_MASTER_INC_CRED_OFST 0x568B28 + +#define mmDMA3_QM_ARB_MST_SLAVE_EN 0x568B2C + +#define mmDMA3_QM_ARB_MST_QUIET_PER 0x568B34 + +#define mmDMA3_QM_ARB_SLV_CHOISE_WDT 0x568B38 + +#define mmDMA3_QM_ARB_SLV_ID 0x568B3C + +#define mmDMA3_QM_ARB_MSG_MAX_INFLIGHT 0x568B44 + +#define mmDMA3_QM_ARB_MSG_AWUSER_31_11 0x568B48 + +#define mmDMA3_QM_ARB_MSG_AWUSER_SEC_PROP 0x568B4C + +#define mmDMA3_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0x568B50 + +#define mmDMA3_QM_ARB_BASE_LO 0x568B54 + +#define mmDMA3_QM_ARB_BASE_HI 0x568B58 + +#define mmDMA3_QM_ARB_STATE_STS 0x568B80 + +#define mmDMA3_QM_ARB_CHOISE_FULLNESS_STS 0x568B84 + +#define mmDMA3_QM_ARB_MSG_STS 0x568B88 + +#define mmDMA3_QM_ARB_SLV_CHOISE_Q_HEAD 0x568B8C + +#define mmDMA3_QM_ARB_ERR_CAUSE 0x568B9C + +#define mmDMA3_QM_ARB_ERR_MSG_EN 0x568BA0 + +#define mmDMA3_QM_ARB_ERR_STS_DRP 0x568BA8 + +#define mmDMA3_QM_ARB_MST_CRED_STS_0 0x568BB0 + +#define mmDMA3_QM_ARB_MST_CRED_STS_1 0x568BB4 + +#define mmDMA3_QM_ARB_MST_CRED_STS_2 0x568BB8 + +#define mmDMA3_QM_ARB_MST_CRED_STS_3 0x568BBC + +#define mmDMA3_QM_ARB_MST_CRED_STS_4 0x568BC0 + +#define mmDMA3_QM_ARB_MST_CRED_STS_5 0x568BC4 + +#define mmDMA3_QM_ARB_MST_CRED_STS_6 0x568BC8 + +#define mmDMA3_QM_ARB_MST_CRED_STS_7 0x568BCC + +#define mmDMA3_QM_ARB_MST_CRED_STS_8 0x568BD0 + +#define mmDMA3_QM_ARB_MST_CRED_STS_9 0x568BD4 + +#define mmDMA3_QM_ARB_MST_CRED_STS_10 0x568BD8 + +#define mmDMA3_QM_ARB_MST_CRED_STS_11 0x568BDC + +#define mmDMA3_QM_ARB_MST_CRED_STS_12 0x568BE0 + +#define mmDMA3_QM_ARB_MST_CRED_STS_13 0x568BE4 + +#define mmDMA3_QM_ARB_MST_CRED_STS_14 0x568BE8 + +#define mmDMA3_QM_ARB_MST_CRED_STS_15 0x568BEC + +#define mmDMA3_QM_ARB_MST_CRED_STS_16 0x568BF0 + +#define mmDMA3_QM_ARB_MST_CRED_STS_17 0x568BF4 + +#define mmDMA3_QM_ARB_MST_CRED_STS_18 0x568BF8 + +#define mmDMA3_QM_ARB_MST_CRED_STS_19 0x568BFC + +#define mmDMA3_QM_ARB_MST_CRED_STS_20 0x568C00 + +#define mmDMA3_QM_ARB_MST_CRED_STS_21 0x568C04 + +#define mmDMA3_QM_ARB_MST_CRED_STS_22 0x568C08 + +#define mmDMA3_QM_ARB_MST_CRED_STS_23 0x568C0C + +#define mmDMA3_QM_ARB_MST_CRED_STS_24 0x568C10 + +#define mmDMA3_QM_ARB_MST_CRED_STS_25 0x568C14 + +#define mmDMA3_QM_ARB_MST_CRED_STS_26 0x568C18 + +#define mmDMA3_QM_ARB_MST_CRED_STS_27 0x568C1C + +#define mmDMA3_QM_ARB_MST_CRED_STS_28 0x568C20 + +#define mmDMA3_QM_ARB_MST_CRED_STS_29 0x568C24 + +#define mmDMA3_QM_ARB_MST_CRED_STS_30 0x568C28 + +#define mmDMA3_QM_ARB_MST_CRED_STS_31 0x568C2C + +#define mmDMA3_QM_CGM_CFG 0x568C70 + +#define mmDMA3_QM_CGM_STS 0x568C74 + +#define mmDMA3_QM_CGM_CFG1 0x568C78 + +#define mmDMA3_QM_LOCAL_RANGE_BASE 0x568C80 + +#define mmDMA3_QM_LOCAL_RANGE_SIZE 0x568C84 + +#define mmDMA3_QM_CSMR_STRICT_PRIO_CFG 0x568C90 + +#define mmDMA3_QM_HBW_RD_RATE_LIM_CFG_1 0x568C94 + +#define mmDMA3_QM_LBW_WR_RATE_LIM_CFG_0 0x568C98 + +#define mmDMA3_QM_LBW_WR_RATE_LIM_CFG_1 0x568C9C + +#define mmDMA3_QM_HBW_RD_RATE_LIM_CFG_0 0x568CA0 + +#define mmDMA3_QM_GLBL_AXCACHE 0x568CA4 + +#define mmDMA3_QM_IND_GW_APB_CFG 0x568CB0 + +#define mmDMA3_QM_IND_GW_APB_WDATA 0x568CB4 + +#define mmDMA3_QM_IND_GW_APB_RDATA 0x568CB8 + +#define mmDMA3_QM_IND_GW_APB_STATUS 0x568CBC + +#define mmDMA3_QM_GLBL_ERR_ADDR_LO 0x568CD0 + +#define mmDMA3_QM_GLBL_ERR_ADDR_HI 0x568CD4 + +#define mmDMA3_QM_GLBL_ERR_WDATA 0x568CD8 + +#define mmDMA3_QM_GLBL_MEM_INIT_BUSY 0x568D00 + +#endif /* ASIC_REG_DMA3_QM_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma4_core_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma4_core_regs.h new file mode 100644 index 000000000..192d11404 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma4_core_regs.h @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA4_CORE_REGS_H_ +#define ASIC_REG_DMA4_CORE_REGS_H_ + +/* + ***************************************** + * DMA4_CORE (Prototype: DMA_CORE) + ***************************************** + */ + +#define mmDMA4_CORE_CFG_0 0x580000 + +#define mmDMA4_CORE_CFG_1 0x580004 + +#define mmDMA4_CORE_LBW_MAX_OUTSTAND 0x580008 + +#define mmDMA4_CORE_SRC_BASE_LO 0x580014 + +#define mmDMA4_CORE_SRC_BASE_HI 0x580018 + +#define mmDMA4_CORE_DST_BASE_LO 0x58001C + +#define mmDMA4_CORE_DST_BASE_HI 0x580020 + +#define mmDMA4_CORE_SRC_TSIZE_1 0x58002C + +#define mmDMA4_CORE_SRC_STRIDE_1 0x580030 + +#define mmDMA4_CORE_SRC_TSIZE_2 0x580034 + +#define mmDMA4_CORE_SRC_STRIDE_2 0x580038 + +#define mmDMA4_CORE_SRC_TSIZE_3 0x58003C + +#define mmDMA4_CORE_SRC_STRIDE_3 0x580040 + +#define mmDMA4_CORE_SRC_TSIZE_4 0x580044 + +#define mmDMA4_CORE_SRC_STRIDE_4 0x580048 + +#define mmDMA4_CORE_SRC_TSIZE_0 0x58004C + +#define mmDMA4_CORE_DST_TSIZE_1 0x580054 + +#define mmDMA4_CORE_DST_STRIDE_1 0x580058 + +#define mmDMA4_CORE_DST_TSIZE_2 0x58005C + +#define mmDMA4_CORE_DST_STRIDE_2 0x580060 + +#define mmDMA4_CORE_DST_TSIZE_3 0x580064 + +#define mmDMA4_CORE_DST_STRIDE_3 0x580068 + +#define mmDMA4_CORE_DST_TSIZE_4 0x58006C + +#define mmDMA4_CORE_DST_STRIDE_4 0x580070 + +#define mmDMA4_CORE_DST_TSIZE_0 0x580074 + +#define mmDMA4_CORE_COMMIT 0x580078 + +#define mmDMA4_CORE_WR_COMP_WDATA 0x58007C + +#define mmDMA4_CORE_WR_COMP_ADDR_LO 0x580080 + +#define mmDMA4_CORE_WR_COMP_ADDR_HI 0x580084 + +#define mmDMA4_CORE_WR_COMP_AWUSER_31_11 0x580088 + +#define mmDMA4_CORE_TE_NUMROWS 0x580094 + +#define mmDMA4_CORE_PROT 0x5800B8 + +#define mmDMA4_CORE_SECURE_PROPS 0x5800F0 + +#define mmDMA4_CORE_NON_SECURE_PROPS 0x5800F4 + +#define mmDMA4_CORE_RD_MAX_OUTSTAND 0x580100 + +#define mmDMA4_CORE_RD_MAX_SIZE 0x580104 + +#define mmDMA4_CORE_RD_ARCACHE 0x580108 + +#define mmDMA4_CORE_RD_ARUSER_31_11 0x580110 + +#define mmDMA4_CORE_RD_INFLIGHTS 0x580114 + +#define mmDMA4_CORE_WR_MAX_OUTSTAND 0x580120 + +#define mmDMA4_CORE_WR_MAX_AWID 0x580124 + +#define mmDMA4_CORE_WR_AWCACHE 0x580128 + +#define mmDMA4_CORE_WR_AWUSER_31_11 0x580130 + +#define mmDMA4_CORE_WR_INFLIGHTS 0x580134 + +#define mmDMA4_CORE_RD_RATE_LIM_CFG_0 0x580150 + +#define mmDMA4_CORE_RD_RATE_LIM_CFG_1 0x580154 + +#define mmDMA4_CORE_WR_RATE_LIM_CFG_0 0x580158 + +#define mmDMA4_CORE_WR_RATE_LIM_CFG_1 0x58015C + +#define mmDMA4_CORE_ERR_CFG 0x580160 + +#define mmDMA4_CORE_ERR_CAUSE 0x580164 + +#define mmDMA4_CORE_ERRMSG_ADDR_LO 0x580170 + +#define mmDMA4_CORE_ERRMSG_ADDR_HI 0x580174 + +#define mmDMA4_CORE_ERRMSG_WDATA 0x580178 + +#define mmDMA4_CORE_STS0 0x580190 + +#define mmDMA4_CORE_STS1 0x580194 + +#define mmDMA4_CORE_RD_DBGMEM_ADD 0x580200 + +#define mmDMA4_CORE_RD_DBGMEM_DATA_WR 0x580204 + +#define mmDMA4_CORE_RD_DBGMEM_DATA_RD 0x580208 + +#define mmDMA4_CORE_RD_DBGMEM_CTRL 0x58020C + +#define mmDMA4_CORE_RD_DBGMEM_RC 0x580210 + +#define mmDMA4_CORE_DBG_HBW_AXI_AR_CNT 0x580220 + +#define mmDMA4_CORE_DBG_HBW_AXI_AW_CNT 0x580224 + +#define mmDMA4_CORE_DBG_LBW_AXI_AW_CNT 0x580228 + +#define mmDMA4_CORE_DBG_DESC_CNT 0x58022C + +#define mmDMA4_CORE_DBG_STS 0x580230 + +#define mmDMA4_CORE_DBG_RD_DESC_ID 0x580234 + +#define mmDMA4_CORE_DBG_WR_DESC_ID 0x580238 + +#endif /* ASIC_REG_DMA4_CORE_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma4_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma4_qm_regs.h new file mode 100644 index 000000000..f0cbda0d1 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma4_qm_regs.h @@ -0,0 +1,834 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA4_QM_REGS_H_ +#define ASIC_REG_DMA4_QM_REGS_H_ + +/* + ***************************************** + * DMA4_QM (Prototype: QMAN) + ***************************************** + */ + +#define mmDMA4_QM_GLBL_CFG0 0x588000 + +#define mmDMA4_QM_GLBL_CFG1 0x588004 + +#define mmDMA4_QM_GLBL_PROT 0x588008 + +#define mmDMA4_QM_GLBL_ERR_CFG 0x58800C + +#define mmDMA4_QM_GLBL_SECURE_PROPS_0 0x588010 + +#define mmDMA4_QM_GLBL_SECURE_PROPS_1 0x588014 + +#define mmDMA4_QM_GLBL_SECURE_PROPS_2 0x588018 + +#define mmDMA4_QM_GLBL_SECURE_PROPS_3 0x58801C + +#define mmDMA4_QM_GLBL_SECURE_PROPS_4 0x588020 + +#define mmDMA4_QM_GLBL_NON_SECURE_PROPS_0 0x588024 + +#define mmDMA4_QM_GLBL_NON_SECURE_PROPS_1 0x588028 + +#define mmDMA4_QM_GLBL_NON_SECURE_PROPS_2 0x58802C + +#define mmDMA4_QM_GLBL_NON_SECURE_PROPS_3 0x588030 + +#define mmDMA4_QM_GLBL_NON_SECURE_PROPS_4 0x588034 + +#define mmDMA4_QM_GLBL_STS0 0x588038 + +#define mmDMA4_QM_GLBL_STS1_0 0x588040 + +#define mmDMA4_QM_GLBL_STS1_1 0x588044 + +#define mmDMA4_QM_GLBL_STS1_2 0x588048 + +#define mmDMA4_QM_GLBL_STS1_3 0x58804C + +#define mmDMA4_QM_GLBL_STS1_4 0x588050 + +#define mmDMA4_QM_GLBL_MSG_EN_0 0x588054 + +#define mmDMA4_QM_GLBL_MSG_EN_1 0x588058 + +#define mmDMA4_QM_GLBL_MSG_EN_2 0x58805C + +#define mmDMA4_QM_GLBL_MSG_EN_3 0x588060 + +#define mmDMA4_QM_GLBL_MSG_EN_4 0x588068 + +#define mmDMA4_QM_PQ_BASE_LO_0 0x588070 + +#define mmDMA4_QM_PQ_BASE_LO_1 0x588074 + +#define mmDMA4_QM_PQ_BASE_LO_2 0x588078 + +#define mmDMA4_QM_PQ_BASE_LO_3 0x58807C + +#define mmDMA4_QM_PQ_BASE_HI_0 0x588080 + +#define mmDMA4_QM_PQ_BASE_HI_1 0x588084 + +#define mmDMA4_QM_PQ_BASE_HI_2 0x588088 + +#define mmDMA4_QM_PQ_BASE_HI_3 0x58808C + +#define mmDMA4_QM_PQ_SIZE_0 0x588090 + +#define mmDMA4_QM_PQ_SIZE_1 0x588094 + +#define mmDMA4_QM_PQ_SIZE_2 0x588098 + +#define mmDMA4_QM_PQ_SIZE_3 0x58809C + +#define mmDMA4_QM_PQ_PI_0 0x5880A0 + +#define mmDMA4_QM_PQ_PI_1 0x5880A4 + +#define mmDMA4_QM_PQ_PI_2 0x5880A8 + +#define mmDMA4_QM_PQ_PI_3 0x5880AC + +#define mmDMA4_QM_PQ_CI_0 0x5880B0 + +#define mmDMA4_QM_PQ_CI_1 0x5880B4 + +#define mmDMA4_QM_PQ_CI_2 0x5880B8 + +#define mmDMA4_QM_PQ_CI_3 0x5880BC + +#define mmDMA4_QM_PQ_CFG0_0 0x5880C0 + +#define mmDMA4_QM_PQ_CFG0_1 0x5880C4 + +#define mmDMA4_QM_PQ_CFG0_2 0x5880C8 + +#define mmDMA4_QM_PQ_CFG0_3 0x5880CC + +#define mmDMA4_QM_PQ_CFG1_0 0x5880D0 + +#define mmDMA4_QM_PQ_CFG1_1 0x5880D4 + +#define mmDMA4_QM_PQ_CFG1_2 0x5880D8 + +#define mmDMA4_QM_PQ_CFG1_3 0x5880DC + +#define mmDMA4_QM_PQ_ARUSER_31_11_0 0x5880E0 + +#define mmDMA4_QM_PQ_ARUSER_31_11_1 0x5880E4 + +#define mmDMA4_QM_PQ_ARUSER_31_11_2 0x5880E8 + +#define mmDMA4_QM_PQ_ARUSER_31_11_3 0x5880EC + +#define mmDMA4_QM_PQ_STS0_0 0x5880F0 + +#define mmDMA4_QM_PQ_STS0_1 0x5880F4 + +#define mmDMA4_QM_PQ_STS0_2 0x5880F8 + +#define mmDMA4_QM_PQ_STS0_3 0x5880FC + +#define mmDMA4_QM_PQ_STS1_0 0x588100 + +#define mmDMA4_QM_PQ_STS1_1 0x588104 + +#define mmDMA4_QM_PQ_STS1_2 0x588108 + +#define mmDMA4_QM_PQ_STS1_3 0x58810C + +#define mmDMA4_QM_CQ_CFG0_0 0x588110 + +#define mmDMA4_QM_CQ_CFG0_1 0x588114 + +#define mmDMA4_QM_CQ_CFG0_2 0x588118 + +#define mmDMA4_QM_CQ_CFG0_3 0x58811C + +#define mmDMA4_QM_CQ_CFG0_4 0x588120 + +#define mmDMA4_QM_CQ_CFG1_0 0x588124 + +#define mmDMA4_QM_CQ_CFG1_1 0x588128 + +#define mmDMA4_QM_CQ_CFG1_2 0x58812C + +#define mmDMA4_QM_CQ_CFG1_3 0x588130 + +#define mmDMA4_QM_CQ_CFG1_4 0x588134 + +#define mmDMA4_QM_CQ_ARUSER_31_11_0 0x588138 + +#define mmDMA4_QM_CQ_ARUSER_31_11_1 0x58813C + +#define mmDMA4_QM_CQ_ARUSER_31_11_2 0x588140 + +#define mmDMA4_QM_CQ_ARUSER_31_11_3 0x588144 + +#define mmDMA4_QM_CQ_ARUSER_31_11_4 0x588148 + +#define mmDMA4_QM_CQ_STS0_0 0x58814C + +#define mmDMA4_QM_CQ_STS0_1 0x588150 + +#define mmDMA4_QM_CQ_STS0_2 0x588154 + +#define mmDMA4_QM_CQ_STS0_3 0x588158 + +#define mmDMA4_QM_CQ_STS0_4 0x58815C + +#define mmDMA4_QM_CQ_STS1_0 0x588160 + +#define mmDMA4_QM_CQ_STS1_1 0x588164 + +#define mmDMA4_QM_CQ_STS1_2 0x588168 + +#define mmDMA4_QM_CQ_STS1_3 0x58816C + +#define mmDMA4_QM_CQ_STS1_4 0x588170 + +#define mmDMA4_QM_CQ_PTR_LO_0 0x588174 + +#define mmDMA4_QM_CQ_PTR_HI_0 0x588178 + +#define mmDMA4_QM_CQ_TSIZE_0 0x58817C + +#define mmDMA4_QM_CQ_CTL_0 0x588180 + +#define mmDMA4_QM_CQ_PTR_LO_1 0x588184 + +#define mmDMA4_QM_CQ_PTR_HI_1 0x588188 + +#define mmDMA4_QM_CQ_TSIZE_1 0x58818C + +#define mmDMA4_QM_CQ_CTL_1 0x588190 + +#define mmDMA4_QM_CQ_PTR_LO_2 0x588194 + +#define mmDMA4_QM_CQ_PTR_HI_2 0x588198 + +#define mmDMA4_QM_CQ_TSIZE_2 0x58819C + +#define mmDMA4_QM_CQ_CTL_2 0x5881A0 + +#define mmDMA4_QM_CQ_PTR_LO_3 0x5881A4 + +#define mmDMA4_QM_CQ_PTR_HI_3 0x5881A8 + +#define mmDMA4_QM_CQ_TSIZE_3 0x5881AC + +#define mmDMA4_QM_CQ_CTL_3 0x5881B0 + +#define mmDMA4_QM_CQ_PTR_LO_4 0x5881B4 + +#define mmDMA4_QM_CQ_PTR_HI_4 0x5881B8 + +#define mmDMA4_QM_CQ_TSIZE_4 0x5881BC + +#define mmDMA4_QM_CQ_CTL_4 0x5881C0 + +#define mmDMA4_QM_CQ_PTR_LO_STS_0 0x5881C4 + +#define mmDMA4_QM_CQ_PTR_LO_STS_1 0x5881C8 + +#define mmDMA4_QM_CQ_PTR_LO_STS_2 0x5881CC + +#define mmDMA4_QM_CQ_PTR_LO_STS_3 0x5881D0 + +#define mmDMA4_QM_CQ_PTR_LO_STS_4 0x5881D4 + +#define mmDMA4_QM_CQ_PTR_HI_STS_0 0x5881D8 + +#define mmDMA4_QM_CQ_PTR_HI_STS_1 0x5881DC + +#define mmDMA4_QM_CQ_PTR_HI_STS_2 0x5881E0 + +#define mmDMA4_QM_CQ_PTR_HI_STS_3 0x5881E4 + +#define mmDMA4_QM_CQ_PTR_HI_STS_4 0x5881E8 + +#define mmDMA4_QM_CQ_TSIZE_STS_0 0x5881EC + +#define mmDMA4_QM_CQ_TSIZE_STS_1 0x5881F0 + +#define mmDMA4_QM_CQ_TSIZE_STS_2 0x5881F4 + +#define mmDMA4_QM_CQ_TSIZE_STS_3 0x5881F8 + +#define mmDMA4_QM_CQ_TSIZE_STS_4 0x5881FC + +#define mmDMA4_QM_CQ_CTL_STS_0 0x588200 + +#define mmDMA4_QM_CQ_CTL_STS_1 0x588204 + +#define mmDMA4_QM_CQ_CTL_STS_2 0x588208 + +#define mmDMA4_QM_CQ_CTL_STS_3 0x58820C + +#define mmDMA4_QM_CQ_CTL_STS_4 0x588210 + +#define mmDMA4_QM_CQ_IFIFO_CNT_0 0x588214 + +#define mmDMA4_QM_CQ_IFIFO_CNT_1 0x588218 + +#define mmDMA4_QM_CQ_IFIFO_CNT_2 0x58821C + +#define mmDMA4_QM_CQ_IFIFO_CNT_3 0x588220 + +#define mmDMA4_QM_CQ_IFIFO_CNT_4 0x588224 + +#define mmDMA4_QM_CP_MSG_BASE0_ADDR_LO_0 0x588228 + +#define mmDMA4_QM_CP_MSG_BASE0_ADDR_LO_1 0x58822C + +#define mmDMA4_QM_CP_MSG_BASE0_ADDR_LO_2 0x588230 + +#define mmDMA4_QM_CP_MSG_BASE0_ADDR_LO_3 0x588234 + +#define mmDMA4_QM_CP_MSG_BASE0_ADDR_LO_4 0x588238 + +#define mmDMA4_QM_CP_MSG_BASE0_ADDR_HI_0 0x58823C + +#define mmDMA4_QM_CP_MSG_BASE0_ADDR_HI_1 0x588240 + +#define mmDMA4_QM_CP_MSG_BASE0_ADDR_HI_2 0x588244 + +#define mmDMA4_QM_CP_MSG_BASE0_ADDR_HI_3 0x588248 + +#define mmDMA4_QM_CP_MSG_BASE0_ADDR_HI_4 0x58824C + +#define mmDMA4_QM_CP_MSG_BASE1_ADDR_LO_0 0x588250 + +#define mmDMA4_QM_CP_MSG_BASE1_ADDR_LO_1 0x588254 + +#define mmDMA4_QM_CP_MSG_BASE1_ADDR_LO_2 0x588258 + +#define mmDMA4_QM_CP_MSG_BASE1_ADDR_LO_3 0x58825C + +#define mmDMA4_QM_CP_MSG_BASE1_ADDR_LO_4 0x588260 + +#define mmDMA4_QM_CP_MSG_BASE1_ADDR_HI_0 0x588264 + +#define mmDMA4_QM_CP_MSG_BASE1_ADDR_HI_1 0x588268 + +#define mmDMA4_QM_CP_MSG_BASE1_ADDR_HI_2 0x58826C + +#define mmDMA4_QM_CP_MSG_BASE1_ADDR_HI_3 0x588270 + +#define mmDMA4_QM_CP_MSG_BASE1_ADDR_HI_4 0x588274 + +#define mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_0 0x588278 + +#define mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_1 0x58827C + +#define mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_2 0x588280 + +#define mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_3 0x588284 + +#define mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_4 0x588288 + +#define mmDMA4_QM_CP_MSG_BASE2_ADDR_HI_0 0x58828C + +#define mmDMA4_QM_CP_MSG_BASE2_ADDR_HI_1 0x588290 + +#define mmDMA4_QM_CP_MSG_BASE2_ADDR_HI_2 0x588294 + +#define mmDMA4_QM_CP_MSG_BASE2_ADDR_HI_3 0x588298 + +#define mmDMA4_QM_CP_MSG_BASE2_ADDR_HI_4 0x58829C + +#define mmDMA4_QM_CP_MSG_BASE3_ADDR_LO_0 0x5882A0 + +#define mmDMA4_QM_CP_MSG_BASE3_ADDR_LO_1 0x5882A4 + +#define mmDMA4_QM_CP_MSG_BASE3_ADDR_LO_2 0x5882A8 + +#define mmDMA4_QM_CP_MSG_BASE3_ADDR_LO_3 0x5882AC + +#define mmDMA4_QM_CP_MSG_BASE3_ADDR_LO_4 0x5882B0 + +#define mmDMA4_QM_CP_MSG_BASE3_ADDR_HI_0 0x5882B4 + +#define mmDMA4_QM_CP_MSG_BASE3_ADDR_HI_1 0x5882B8 + +#define mmDMA4_QM_CP_MSG_BASE3_ADDR_HI_2 0x5882BC + +#define mmDMA4_QM_CP_MSG_BASE3_ADDR_HI_3 0x5882C0 + +#define mmDMA4_QM_CP_MSG_BASE3_ADDR_HI_4 0x5882C4 + +#define mmDMA4_QM_CP_LDMA_TSIZE_OFFSET_0 0x5882C8 + +#define mmDMA4_QM_CP_LDMA_TSIZE_OFFSET_1 0x5882CC + +#define mmDMA4_QM_CP_LDMA_TSIZE_OFFSET_2 0x5882D0 + +#define mmDMA4_QM_CP_LDMA_TSIZE_OFFSET_3 0x5882D4 + +#define mmDMA4_QM_CP_LDMA_TSIZE_OFFSET_4 0x5882D8 + +#define mmDMA4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0x5882E0 + +#define mmDMA4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0x5882E4 + +#define mmDMA4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0x5882E8 + +#define mmDMA4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0x5882EC + +#define mmDMA4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0x5882F0 + +#define mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0x5882F4 + +#define mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0x5882F8 + +#define mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0x5882FC + +#define mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0x588300 + +#define mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0x588304 + +#define mmDMA4_QM_CP_FENCE0_RDATA_0 0x588308 + +#define mmDMA4_QM_CP_FENCE0_RDATA_1 0x58830C + +#define mmDMA4_QM_CP_FENCE0_RDATA_2 0x588310 + +#define mmDMA4_QM_CP_FENCE0_RDATA_3 0x588314 + +#define mmDMA4_QM_CP_FENCE0_RDATA_4 0x588318 + +#define mmDMA4_QM_CP_FENCE1_RDATA_0 0x58831C + +#define mmDMA4_QM_CP_FENCE1_RDATA_1 0x588320 + +#define mmDMA4_QM_CP_FENCE1_RDATA_2 0x588324 + +#define mmDMA4_QM_CP_FENCE1_RDATA_3 0x588328 + +#define mmDMA4_QM_CP_FENCE1_RDATA_4 0x58832C + +#define mmDMA4_QM_CP_FENCE2_RDATA_0 0x588330 + +#define mmDMA4_QM_CP_FENCE2_RDATA_1 0x588334 + +#define mmDMA4_QM_CP_FENCE2_RDATA_2 0x588338 + +#define mmDMA4_QM_CP_FENCE2_RDATA_3 0x58833C + +#define mmDMA4_QM_CP_FENCE2_RDATA_4 0x588340 + +#define mmDMA4_QM_CP_FENCE3_RDATA_0 0x588344 + +#define mmDMA4_QM_CP_FENCE3_RDATA_1 0x588348 + +#define mmDMA4_QM_CP_FENCE3_RDATA_2 0x58834C + +#define mmDMA4_QM_CP_FENCE3_RDATA_3 0x588350 + +#define mmDMA4_QM_CP_FENCE3_RDATA_4 0x588354 + +#define mmDMA4_QM_CP_FENCE0_CNT_0 0x588358 + +#define mmDMA4_QM_CP_FENCE0_CNT_1 0x58835C + +#define mmDMA4_QM_CP_FENCE0_CNT_2 0x588360 + +#define mmDMA4_QM_CP_FENCE0_CNT_3 0x588364 + +#define mmDMA4_QM_CP_FENCE0_CNT_4 0x588368 + +#define mmDMA4_QM_CP_FENCE1_CNT_0 0x58836C + +#define mmDMA4_QM_CP_FENCE1_CNT_1 0x588370 + +#define mmDMA4_QM_CP_FENCE1_CNT_2 0x588374 + +#define mmDMA4_QM_CP_FENCE1_CNT_3 0x588378 + +#define mmDMA4_QM_CP_FENCE1_CNT_4 0x58837C + +#define mmDMA4_QM_CP_FENCE2_CNT_0 0x588380 + +#define mmDMA4_QM_CP_FENCE2_CNT_1 0x588384 + +#define mmDMA4_QM_CP_FENCE2_CNT_2 0x588388 + +#define mmDMA4_QM_CP_FENCE2_CNT_3 0x58838C + +#define mmDMA4_QM_CP_FENCE2_CNT_4 0x588390 + +#define mmDMA4_QM_CP_FENCE3_CNT_0 0x588394 + +#define mmDMA4_QM_CP_FENCE3_CNT_1 0x588398 + +#define mmDMA4_QM_CP_FENCE3_CNT_2 0x58839C + +#define mmDMA4_QM_CP_FENCE3_CNT_3 0x5883A0 + +#define mmDMA4_QM_CP_FENCE3_CNT_4 0x5883A4 + +#define mmDMA4_QM_CP_STS_0 0x5883A8 + +#define mmDMA4_QM_CP_STS_1 0x5883AC + +#define mmDMA4_QM_CP_STS_2 0x5883B0 + +#define mmDMA4_QM_CP_STS_3 0x5883B4 + +#define mmDMA4_QM_CP_STS_4 0x5883B8 + +#define mmDMA4_QM_CP_CURRENT_INST_LO_0 0x5883BC + +#define mmDMA4_QM_CP_CURRENT_INST_LO_1 0x5883C0 + +#define mmDMA4_QM_CP_CURRENT_INST_LO_2 0x5883C4 + +#define mmDMA4_QM_CP_CURRENT_INST_LO_3 0x5883C8 + +#define mmDMA4_QM_CP_CURRENT_INST_LO_4 0x5883CC + +#define mmDMA4_QM_CP_CURRENT_INST_HI_0 0x5883D0 + +#define mmDMA4_QM_CP_CURRENT_INST_HI_1 0x5883D4 + +#define mmDMA4_QM_CP_CURRENT_INST_HI_2 0x5883D8 + +#define mmDMA4_QM_CP_CURRENT_INST_HI_3 0x5883DC + +#define mmDMA4_QM_CP_CURRENT_INST_HI_4 0x5883E0 + +#define mmDMA4_QM_CP_BARRIER_CFG_0 0x5883F4 + +#define mmDMA4_QM_CP_BARRIER_CFG_1 0x5883F8 + +#define mmDMA4_QM_CP_BARRIER_CFG_2 0x5883FC + +#define mmDMA4_QM_CP_BARRIER_CFG_3 0x588400 + +#define mmDMA4_QM_CP_BARRIER_CFG_4 0x588404 + +#define mmDMA4_QM_CP_DBG_0_0 0x588408 + +#define mmDMA4_QM_CP_DBG_0_1 0x58840C + +#define mmDMA4_QM_CP_DBG_0_2 0x588410 + +#define mmDMA4_QM_CP_DBG_0_3 0x588414 + +#define mmDMA4_QM_CP_DBG_0_4 0x588418 + +#define mmDMA4_QM_CP_ARUSER_31_11_0 0x58841C + +#define mmDMA4_QM_CP_ARUSER_31_11_1 0x588420 + +#define mmDMA4_QM_CP_ARUSER_31_11_2 0x588424 + +#define mmDMA4_QM_CP_ARUSER_31_11_3 0x588428 + +#define mmDMA4_QM_CP_ARUSER_31_11_4 0x58842C + +#define mmDMA4_QM_CP_AWUSER_31_11_0 0x588430 + +#define mmDMA4_QM_CP_AWUSER_31_11_1 0x588434 + +#define mmDMA4_QM_CP_AWUSER_31_11_2 0x588438 + +#define mmDMA4_QM_CP_AWUSER_31_11_3 0x58843C + +#define mmDMA4_QM_CP_AWUSER_31_11_4 0x588440 + +#define mmDMA4_QM_ARB_CFG_0 0x588A00 + +#define mmDMA4_QM_ARB_CHOISE_Q_PUSH 0x588A04 + +#define mmDMA4_QM_ARB_WRR_WEIGHT_0 0x588A08 + +#define mmDMA4_QM_ARB_WRR_WEIGHT_1 0x588A0C + +#define mmDMA4_QM_ARB_WRR_WEIGHT_2 0x588A10 + +#define mmDMA4_QM_ARB_WRR_WEIGHT_3 0x588A14 + +#define mmDMA4_QM_ARB_CFG_1 0x588A18 + +#define mmDMA4_QM_ARB_MST_AVAIL_CRED_0 0x588A20 + +#define mmDMA4_QM_ARB_MST_AVAIL_CRED_1 0x588A24 + +#define mmDMA4_QM_ARB_MST_AVAIL_CRED_2 0x588A28 + +#define mmDMA4_QM_ARB_MST_AVAIL_CRED_3 0x588A2C + +#define mmDMA4_QM_ARB_MST_AVAIL_CRED_4 0x588A30 + +#define mmDMA4_QM_ARB_MST_AVAIL_CRED_5 0x588A34 + +#define mmDMA4_QM_ARB_MST_AVAIL_CRED_6 0x588A38 + +#define mmDMA4_QM_ARB_MST_AVAIL_CRED_7 0x588A3C + +#define mmDMA4_QM_ARB_MST_AVAIL_CRED_8 0x588A40 + +#define mmDMA4_QM_ARB_MST_AVAIL_CRED_9 0x588A44 + +#define mmDMA4_QM_ARB_MST_AVAIL_CRED_10 0x588A48 + +#define mmDMA4_QM_ARB_MST_AVAIL_CRED_11 0x588A4C + +#define mmDMA4_QM_ARB_MST_AVAIL_CRED_12 0x588A50 + +#define mmDMA4_QM_ARB_MST_AVAIL_CRED_13 0x588A54 + +#define mmDMA4_QM_ARB_MST_AVAIL_CRED_14 0x588A58 + +#define mmDMA4_QM_ARB_MST_AVAIL_CRED_15 0x588A5C + +#define mmDMA4_QM_ARB_MST_AVAIL_CRED_16 0x588A60 + +#define mmDMA4_QM_ARB_MST_AVAIL_CRED_17 0x588A64 + +#define mmDMA4_QM_ARB_MST_AVAIL_CRED_18 0x588A68 + +#define mmDMA4_QM_ARB_MST_AVAIL_CRED_19 0x588A6C + +#define mmDMA4_QM_ARB_MST_AVAIL_CRED_20 0x588A70 + +#define mmDMA4_QM_ARB_MST_AVAIL_CRED_21 0x588A74 + +#define mmDMA4_QM_ARB_MST_AVAIL_CRED_22 0x588A78 + +#define mmDMA4_QM_ARB_MST_AVAIL_CRED_23 0x588A7C + +#define mmDMA4_QM_ARB_MST_AVAIL_CRED_24 0x588A80 + +#define mmDMA4_QM_ARB_MST_AVAIL_CRED_25 0x588A84 + +#define mmDMA4_QM_ARB_MST_AVAIL_CRED_26 0x588A88 + +#define mmDMA4_QM_ARB_MST_AVAIL_CRED_27 0x588A8C + +#define mmDMA4_QM_ARB_MST_AVAIL_CRED_28 0x588A90 + +#define mmDMA4_QM_ARB_MST_AVAIL_CRED_29 0x588A94 + +#define mmDMA4_QM_ARB_MST_AVAIL_CRED_30 0x588A98 + +#define mmDMA4_QM_ARB_MST_AVAIL_CRED_31 0x588A9C + +#define mmDMA4_QM_ARB_MST_CRED_INC 0x588AA0 + +#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_0 0x588AA4 + +#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_1 0x588AA8 + +#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_2 0x588AAC + +#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_3 0x588AB0 + +#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_4 0x588AB4 + +#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_5 0x588AB8 + +#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_6 0x588ABC + +#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_7 0x588AC0 + +#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_8 0x588AC4 + +#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_9 0x588AC8 + +#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_10 0x588ACC + +#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_11 0x588AD0 + +#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_12 0x588AD4 + +#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_13 0x588AD8 + +#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_14 0x588ADC + +#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_15 0x588AE0 + +#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_16 0x588AE4 + +#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_17 0x588AE8 + +#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_18 0x588AEC + +#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_19 0x588AF0 + +#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_20 0x588AF4 + +#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_21 0x588AF8 + +#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_22 0x588AFC + +#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_23 0x588B00 + +#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_24 0x588B04 + +#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_25 0x588B08 + +#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_26 0x588B0C + +#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_27 0x588B10 + +#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_28 0x588B14 + +#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_29 0x588B18 + +#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_30 0x588B1C + +#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_31 0x588B20 + +#define mmDMA4_QM_ARB_SLV_MASTER_INC_CRED_OFST 0x588B28 + +#define mmDMA4_QM_ARB_MST_SLAVE_EN 0x588B2C + +#define mmDMA4_QM_ARB_MST_QUIET_PER 0x588B34 + +#define mmDMA4_QM_ARB_SLV_CHOISE_WDT 0x588B38 + +#define mmDMA4_QM_ARB_SLV_ID 0x588B3C + +#define mmDMA4_QM_ARB_MSG_MAX_INFLIGHT 0x588B44 + +#define mmDMA4_QM_ARB_MSG_AWUSER_31_11 0x588B48 + +#define mmDMA4_QM_ARB_MSG_AWUSER_SEC_PROP 0x588B4C + +#define mmDMA4_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0x588B50 + +#define mmDMA4_QM_ARB_BASE_LO 0x588B54 + +#define mmDMA4_QM_ARB_BASE_HI 0x588B58 + +#define mmDMA4_QM_ARB_STATE_STS 0x588B80 + +#define mmDMA4_QM_ARB_CHOISE_FULLNESS_STS 0x588B84 + +#define mmDMA4_QM_ARB_MSG_STS 0x588B88 + +#define mmDMA4_QM_ARB_SLV_CHOISE_Q_HEAD 0x588B8C + +#define mmDMA4_QM_ARB_ERR_CAUSE 0x588B9C + +#define mmDMA4_QM_ARB_ERR_MSG_EN 0x588BA0 + +#define mmDMA4_QM_ARB_ERR_STS_DRP 0x588BA8 + +#define mmDMA4_QM_ARB_MST_CRED_STS_0 0x588BB0 + +#define mmDMA4_QM_ARB_MST_CRED_STS_1 0x588BB4 + +#define mmDMA4_QM_ARB_MST_CRED_STS_2 0x588BB8 + +#define mmDMA4_QM_ARB_MST_CRED_STS_3 0x588BBC + +#define mmDMA4_QM_ARB_MST_CRED_STS_4 0x588BC0 + +#define mmDMA4_QM_ARB_MST_CRED_STS_5 0x588BC4 + +#define mmDMA4_QM_ARB_MST_CRED_STS_6 0x588BC8 + +#define mmDMA4_QM_ARB_MST_CRED_STS_7 0x588BCC + +#define mmDMA4_QM_ARB_MST_CRED_STS_8 0x588BD0 + +#define mmDMA4_QM_ARB_MST_CRED_STS_9 0x588BD4 + +#define mmDMA4_QM_ARB_MST_CRED_STS_10 0x588BD8 + +#define mmDMA4_QM_ARB_MST_CRED_STS_11 0x588BDC + +#define mmDMA4_QM_ARB_MST_CRED_STS_12 0x588BE0 + +#define mmDMA4_QM_ARB_MST_CRED_STS_13 0x588BE4 + +#define mmDMA4_QM_ARB_MST_CRED_STS_14 0x588BE8 + +#define mmDMA4_QM_ARB_MST_CRED_STS_15 0x588BEC + +#define mmDMA4_QM_ARB_MST_CRED_STS_16 0x588BF0 + +#define mmDMA4_QM_ARB_MST_CRED_STS_17 0x588BF4 + +#define mmDMA4_QM_ARB_MST_CRED_STS_18 0x588BF8 + +#define mmDMA4_QM_ARB_MST_CRED_STS_19 0x588BFC + +#define mmDMA4_QM_ARB_MST_CRED_STS_20 0x588C00 + +#define mmDMA4_QM_ARB_MST_CRED_STS_21 0x588C04 + +#define mmDMA4_QM_ARB_MST_CRED_STS_22 0x588C08 + +#define mmDMA4_QM_ARB_MST_CRED_STS_23 0x588C0C + +#define mmDMA4_QM_ARB_MST_CRED_STS_24 0x588C10 + +#define mmDMA4_QM_ARB_MST_CRED_STS_25 0x588C14 + +#define mmDMA4_QM_ARB_MST_CRED_STS_26 0x588C18 + +#define mmDMA4_QM_ARB_MST_CRED_STS_27 0x588C1C + +#define mmDMA4_QM_ARB_MST_CRED_STS_28 0x588C20 + +#define mmDMA4_QM_ARB_MST_CRED_STS_29 0x588C24 + +#define mmDMA4_QM_ARB_MST_CRED_STS_30 0x588C28 + +#define mmDMA4_QM_ARB_MST_CRED_STS_31 0x588C2C + +#define mmDMA4_QM_CGM_CFG 0x588C70 + +#define mmDMA4_QM_CGM_STS 0x588C74 + +#define mmDMA4_QM_CGM_CFG1 0x588C78 + +#define mmDMA4_QM_LOCAL_RANGE_BASE 0x588C80 + +#define mmDMA4_QM_LOCAL_RANGE_SIZE 0x588C84 + +#define mmDMA4_QM_CSMR_STRICT_PRIO_CFG 0x588C90 + +#define mmDMA4_QM_HBW_RD_RATE_LIM_CFG_1 0x588C94 + +#define mmDMA4_QM_LBW_WR_RATE_LIM_CFG_0 0x588C98 + +#define mmDMA4_QM_LBW_WR_RATE_LIM_CFG_1 0x588C9C + +#define mmDMA4_QM_HBW_RD_RATE_LIM_CFG_0 0x588CA0 + +#define mmDMA4_QM_GLBL_AXCACHE 0x588CA4 + +#define mmDMA4_QM_IND_GW_APB_CFG 0x588CB0 + +#define mmDMA4_QM_IND_GW_APB_WDATA 0x588CB4 + +#define mmDMA4_QM_IND_GW_APB_RDATA 0x588CB8 + +#define mmDMA4_QM_IND_GW_APB_STATUS 0x588CBC + +#define mmDMA4_QM_GLBL_ERR_ADDR_LO 0x588CD0 + +#define mmDMA4_QM_GLBL_ERR_ADDR_HI 0x588CD4 + +#define mmDMA4_QM_GLBL_ERR_WDATA 0x588CD8 + +#define mmDMA4_QM_GLBL_MEM_INIT_BUSY 0x588D00 + +#endif /* ASIC_REG_DMA4_QM_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma5_core_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma5_core_regs.h new file mode 100644 index 000000000..6e07c6fb6 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma5_core_regs.h @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA5_CORE_REGS_H_ +#define ASIC_REG_DMA5_CORE_REGS_H_ + +/* + ***************************************** + * DMA5_CORE (Prototype: DMA_CORE) + ***************************************** + */ + +#define mmDMA5_CORE_CFG_0 0x5A0000 + +#define mmDMA5_CORE_CFG_1 0x5A0004 + +#define mmDMA5_CORE_LBW_MAX_OUTSTAND 0x5A0008 + +#define mmDMA5_CORE_SRC_BASE_LO 0x5A0014 + +#define mmDMA5_CORE_SRC_BASE_HI 0x5A0018 + +#define mmDMA5_CORE_DST_BASE_LO 0x5A001C + +#define mmDMA5_CORE_DST_BASE_HI 0x5A0020 + +#define mmDMA5_CORE_SRC_TSIZE_1 0x5A002C + +#define mmDMA5_CORE_SRC_STRIDE_1 0x5A0030 + +#define mmDMA5_CORE_SRC_TSIZE_2 0x5A0034 + +#define mmDMA5_CORE_SRC_STRIDE_2 0x5A0038 + +#define mmDMA5_CORE_SRC_TSIZE_3 0x5A003C + +#define mmDMA5_CORE_SRC_STRIDE_3 0x5A0040 + +#define mmDMA5_CORE_SRC_TSIZE_4 0x5A0044 + +#define mmDMA5_CORE_SRC_STRIDE_4 0x5A0048 + +#define mmDMA5_CORE_SRC_TSIZE_0 0x5A004C + +#define mmDMA5_CORE_DST_TSIZE_1 0x5A0054 + +#define mmDMA5_CORE_DST_STRIDE_1 0x5A0058 + +#define mmDMA5_CORE_DST_TSIZE_2 0x5A005C + +#define mmDMA5_CORE_DST_STRIDE_2 0x5A0060 + +#define mmDMA5_CORE_DST_TSIZE_3 0x5A0064 + +#define mmDMA5_CORE_DST_STRIDE_3 0x5A0068 + +#define mmDMA5_CORE_DST_TSIZE_4 0x5A006C + +#define mmDMA5_CORE_DST_STRIDE_4 0x5A0070 + +#define mmDMA5_CORE_DST_TSIZE_0 0x5A0074 + +#define mmDMA5_CORE_COMMIT 0x5A0078 + +#define mmDMA5_CORE_WR_COMP_WDATA 0x5A007C + +#define mmDMA5_CORE_WR_COMP_ADDR_LO 0x5A0080 + +#define mmDMA5_CORE_WR_COMP_ADDR_HI 0x5A0084 + +#define mmDMA5_CORE_WR_COMP_AWUSER_31_11 0x5A0088 + +#define mmDMA5_CORE_TE_NUMROWS 0x5A0094 + +#define mmDMA5_CORE_PROT 0x5A00B8 + +#define mmDMA5_CORE_SECURE_PROPS 0x5A00F0 + +#define mmDMA5_CORE_NON_SECURE_PROPS 0x5A00F4 + +#define mmDMA5_CORE_RD_MAX_OUTSTAND 0x5A0100 + +#define mmDMA5_CORE_RD_MAX_SIZE 0x5A0104 + +#define mmDMA5_CORE_RD_ARCACHE 0x5A0108 + +#define mmDMA5_CORE_RD_ARUSER_31_11 0x5A0110 + +#define mmDMA5_CORE_RD_INFLIGHTS 0x5A0114 + +#define mmDMA5_CORE_WR_MAX_OUTSTAND 0x5A0120 + +#define mmDMA5_CORE_WR_MAX_AWID 0x5A0124 + +#define mmDMA5_CORE_WR_AWCACHE 0x5A0128 + +#define mmDMA5_CORE_WR_AWUSER_31_11 0x5A0130 + +#define mmDMA5_CORE_WR_INFLIGHTS 0x5A0134 + +#define mmDMA5_CORE_RD_RATE_LIM_CFG_0 0x5A0150 + +#define mmDMA5_CORE_RD_RATE_LIM_CFG_1 0x5A0154 + +#define mmDMA5_CORE_WR_RATE_LIM_CFG_0 0x5A0158 + +#define mmDMA5_CORE_WR_RATE_LIM_CFG_1 0x5A015C + +#define mmDMA5_CORE_ERR_CFG 0x5A0160 + +#define mmDMA5_CORE_ERR_CAUSE 0x5A0164 + +#define mmDMA5_CORE_ERRMSG_ADDR_LO 0x5A0170 + +#define mmDMA5_CORE_ERRMSG_ADDR_HI 0x5A0174 + +#define mmDMA5_CORE_ERRMSG_WDATA 0x5A0178 + +#define mmDMA5_CORE_STS0 0x5A0190 + +#define mmDMA5_CORE_STS1 0x5A0194 + +#define mmDMA5_CORE_RD_DBGMEM_ADD 0x5A0200 + +#define mmDMA5_CORE_RD_DBGMEM_DATA_WR 0x5A0204 + +#define mmDMA5_CORE_RD_DBGMEM_DATA_RD 0x5A0208 + +#define mmDMA5_CORE_RD_DBGMEM_CTRL 0x5A020C + +#define mmDMA5_CORE_RD_DBGMEM_RC 0x5A0210 + +#define mmDMA5_CORE_DBG_HBW_AXI_AR_CNT 0x5A0220 + +#define mmDMA5_CORE_DBG_HBW_AXI_AW_CNT 0x5A0224 + +#define mmDMA5_CORE_DBG_LBW_AXI_AW_CNT 0x5A0228 + +#define mmDMA5_CORE_DBG_DESC_CNT 0x5A022C + +#define mmDMA5_CORE_DBG_STS 0x5A0230 + +#define mmDMA5_CORE_DBG_RD_DESC_ID 0x5A0234 + +#define mmDMA5_CORE_DBG_WR_DESC_ID 0x5A0238 + +#endif /* ASIC_REG_DMA5_CORE_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma5_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma5_qm_regs.h new file mode 100644 index 000000000..0faea2175 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma5_qm_regs.h @@ -0,0 +1,834 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA5_QM_REGS_H_ +#define ASIC_REG_DMA5_QM_REGS_H_ + +/* + ***************************************** + * DMA5_QM (Prototype: QMAN) + ***************************************** + */ + +#define mmDMA5_QM_GLBL_CFG0 0x5A8000 + +#define mmDMA5_QM_GLBL_CFG1 0x5A8004 + +#define mmDMA5_QM_GLBL_PROT 0x5A8008 + +#define mmDMA5_QM_GLBL_ERR_CFG 0x5A800C + +#define mmDMA5_QM_GLBL_SECURE_PROPS_0 0x5A8010 + +#define mmDMA5_QM_GLBL_SECURE_PROPS_1 0x5A8014 + +#define mmDMA5_QM_GLBL_SECURE_PROPS_2 0x5A8018 + +#define mmDMA5_QM_GLBL_SECURE_PROPS_3 0x5A801C + +#define mmDMA5_QM_GLBL_SECURE_PROPS_4 0x5A8020 + +#define mmDMA5_QM_GLBL_NON_SECURE_PROPS_0 0x5A8024 + +#define mmDMA5_QM_GLBL_NON_SECURE_PROPS_1 0x5A8028 + +#define mmDMA5_QM_GLBL_NON_SECURE_PROPS_2 0x5A802C + +#define mmDMA5_QM_GLBL_NON_SECURE_PROPS_3 0x5A8030 + +#define mmDMA5_QM_GLBL_NON_SECURE_PROPS_4 0x5A8034 + +#define mmDMA5_QM_GLBL_STS0 0x5A8038 + +#define mmDMA5_QM_GLBL_STS1_0 0x5A8040 + +#define mmDMA5_QM_GLBL_STS1_1 0x5A8044 + +#define mmDMA5_QM_GLBL_STS1_2 0x5A8048 + +#define mmDMA5_QM_GLBL_STS1_3 0x5A804C + +#define mmDMA5_QM_GLBL_STS1_4 0x5A8050 + +#define mmDMA5_QM_GLBL_MSG_EN_0 0x5A8054 + +#define mmDMA5_QM_GLBL_MSG_EN_1 0x5A8058 + +#define mmDMA5_QM_GLBL_MSG_EN_2 0x5A805C + +#define mmDMA5_QM_GLBL_MSG_EN_3 0x5A8060 + +#define mmDMA5_QM_GLBL_MSG_EN_4 0x5A8068 + +#define mmDMA5_QM_PQ_BASE_LO_0 0x5A8070 + +#define mmDMA5_QM_PQ_BASE_LO_1 0x5A8074 + +#define mmDMA5_QM_PQ_BASE_LO_2 0x5A8078 + +#define mmDMA5_QM_PQ_BASE_LO_3 0x5A807C + +#define mmDMA5_QM_PQ_BASE_HI_0 0x5A8080 + +#define mmDMA5_QM_PQ_BASE_HI_1 0x5A8084 + +#define mmDMA5_QM_PQ_BASE_HI_2 0x5A8088 + +#define mmDMA5_QM_PQ_BASE_HI_3 0x5A808C + +#define mmDMA5_QM_PQ_SIZE_0 0x5A8090 + +#define mmDMA5_QM_PQ_SIZE_1 0x5A8094 + +#define mmDMA5_QM_PQ_SIZE_2 0x5A8098 + +#define mmDMA5_QM_PQ_SIZE_3 0x5A809C + +#define mmDMA5_QM_PQ_PI_0 0x5A80A0 + +#define mmDMA5_QM_PQ_PI_1 0x5A80A4 + +#define mmDMA5_QM_PQ_PI_2 0x5A80A8 + +#define mmDMA5_QM_PQ_PI_3 0x5A80AC + +#define mmDMA5_QM_PQ_CI_0 0x5A80B0 + +#define mmDMA5_QM_PQ_CI_1 0x5A80B4 + +#define mmDMA5_QM_PQ_CI_2 0x5A80B8 + +#define mmDMA5_QM_PQ_CI_3 0x5A80BC + +#define mmDMA5_QM_PQ_CFG0_0 0x5A80C0 + +#define mmDMA5_QM_PQ_CFG0_1 0x5A80C4 + +#define mmDMA5_QM_PQ_CFG0_2 0x5A80C8 + +#define mmDMA5_QM_PQ_CFG0_3 0x5A80CC + +#define mmDMA5_QM_PQ_CFG1_0 0x5A80D0 + +#define mmDMA5_QM_PQ_CFG1_1 0x5A80D4 + +#define mmDMA5_QM_PQ_CFG1_2 0x5A80D8 + +#define mmDMA5_QM_PQ_CFG1_3 0x5A80DC + +#define mmDMA5_QM_PQ_ARUSER_31_11_0 0x5A80E0 + +#define mmDMA5_QM_PQ_ARUSER_31_11_1 0x5A80E4 + +#define mmDMA5_QM_PQ_ARUSER_31_11_2 0x5A80E8 + +#define mmDMA5_QM_PQ_ARUSER_31_11_3 0x5A80EC + +#define mmDMA5_QM_PQ_STS0_0 0x5A80F0 + +#define mmDMA5_QM_PQ_STS0_1 0x5A80F4 + +#define mmDMA5_QM_PQ_STS0_2 0x5A80F8 + +#define mmDMA5_QM_PQ_STS0_3 0x5A80FC + +#define mmDMA5_QM_PQ_STS1_0 0x5A8100 + +#define mmDMA5_QM_PQ_STS1_1 0x5A8104 + +#define mmDMA5_QM_PQ_STS1_2 0x5A8108 + +#define mmDMA5_QM_PQ_STS1_3 0x5A810C + +#define mmDMA5_QM_CQ_CFG0_0 0x5A8110 + +#define mmDMA5_QM_CQ_CFG0_1 0x5A8114 + +#define mmDMA5_QM_CQ_CFG0_2 0x5A8118 + +#define mmDMA5_QM_CQ_CFG0_3 0x5A811C + +#define mmDMA5_QM_CQ_CFG0_4 0x5A8120 + +#define mmDMA5_QM_CQ_CFG1_0 0x5A8124 + +#define mmDMA5_QM_CQ_CFG1_1 0x5A8128 + +#define mmDMA5_QM_CQ_CFG1_2 0x5A812C + +#define mmDMA5_QM_CQ_CFG1_3 0x5A8130 + +#define mmDMA5_QM_CQ_CFG1_4 0x5A8134 + +#define mmDMA5_QM_CQ_ARUSER_31_11_0 0x5A8138 + +#define mmDMA5_QM_CQ_ARUSER_31_11_1 0x5A813C + +#define mmDMA5_QM_CQ_ARUSER_31_11_2 0x5A8140 + +#define mmDMA5_QM_CQ_ARUSER_31_11_3 0x5A8144 + +#define mmDMA5_QM_CQ_ARUSER_31_11_4 0x5A8148 + +#define mmDMA5_QM_CQ_STS0_0 0x5A814C + +#define mmDMA5_QM_CQ_STS0_1 0x5A8150 + +#define mmDMA5_QM_CQ_STS0_2 0x5A8154 + +#define mmDMA5_QM_CQ_STS0_3 0x5A8158 + +#define mmDMA5_QM_CQ_STS0_4 0x5A815C + +#define mmDMA5_QM_CQ_STS1_0 0x5A8160 + +#define mmDMA5_QM_CQ_STS1_1 0x5A8164 + +#define mmDMA5_QM_CQ_STS1_2 0x5A8168 + +#define mmDMA5_QM_CQ_STS1_3 0x5A816C + +#define mmDMA5_QM_CQ_STS1_4 0x5A8170 + +#define mmDMA5_QM_CQ_PTR_LO_0 0x5A8174 + +#define mmDMA5_QM_CQ_PTR_HI_0 0x5A8178 + +#define mmDMA5_QM_CQ_TSIZE_0 0x5A817C + +#define mmDMA5_QM_CQ_CTL_0 0x5A8180 + +#define mmDMA5_QM_CQ_PTR_LO_1 0x5A8184 + +#define mmDMA5_QM_CQ_PTR_HI_1 0x5A8188 + +#define mmDMA5_QM_CQ_TSIZE_1 0x5A818C + +#define mmDMA5_QM_CQ_CTL_1 0x5A8190 + +#define mmDMA5_QM_CQ_PTR_LO_2 0x5A8194 + +#define mmDMA5_QM_CQ_PTR_HI_2 0x5A8198 + +#define mmDMA5_QM_CQ_TSIZE_2 0x5A819C + +#define mmDMA5_QM_CQ_CTL_2 0x5A81A0 + +#define mmDMA5_QM_CQ_PTR_LO_3 0x5A81A4 + +#define mmDMA5_QM_CQ_PTR_HI_3 0x5A81A8 + +#define mmDMA5_QM_CQ_TSIZE_3 0x5A81AC + +#define mmDMA5_QM_CQ_CTL_3 0x5A81B0 + +#define mmDMA5_QM_CQ_PTR_LO_4 0x5A81B4 + +#define mmDMA5_QM_CQ_PTR_HI_4 0x5A81B8 + +#define mmDMA5_QM_CQ_TSIZE_4 0x5A81BC + +#define mmDMA5_QM_CQ_CTL_4 0x5A81C0 + +#define mmDMA5_QM_CQ_PTR_LO_STS_0 0x5A81C4 + +#define mmDMA5_QM_CQ_PTR_LO_STS_1 0x5A81C8 + +#define mmDMA5_QM_CQ_PTR_LO_STS_2 0x5A81CC + +#define mmDMA5_QM_CQ_PTR_LO_STS_3 0x5A81D0 + +#define mmDMA5_QM_CQ_PTR_LO_STS_4 0x5A81D4 + +#define mmDMA5_QM_CQ_PTR_HI_STS_0 0x5A81D8 + +#define mmDMA5_QM_CQ_PTR_HI_STS_1 0x5A81DC + +#define mmDMA5_QM_CQ_PTR_HI_STS_2 0x5A81E0 + +#define mmDMA5_QM_CQ_PTR_HI_STS_3 0x5A81E4 + +#define mmDMA5_QM_CQ_PTR_HI_STS_4 0x5A81E8 + +#define mmDMA5_QM_CQ_TSIZE_STS_0 0x5A81EC + +#define mmDMA5_QM_CQ_TSIZE_STS_1 0x5A81F0 + +#define mmDMA5_QM_CQ_TSIZE_STS_2 0x5A81F4 + +#define mmDMA5_QM_CQ_TSIZE_STS_3 0x5A81F8 + +#define mmDMA5_QM_CQ_TSIZE_STS_4 0x5A81FC + +#define mmDMA5_QM_CQ_CTL_STS_0 0x5A8200 + +#define mmDMA5_QM_CQ_CTL_STS_1 0x5A8204 + +#define mmDMA5_QM_CQ_CTL_STS_2 0x5A8208 + +#define mmDMA5_QM_CQ_CTL_STS_3 0x5A820C + +#define mmDMA5_QM_CQ_CTL_STS_4 0x5A8210 + +#define mmDMA5_QM_CQ_IFIFO_CNT_0 0x5A8214 + +#define mmDMA5_QM_CQ_IFIFO_CNT_1 0x5A8218 + +#define mmDMA5_QM_CQ_IFIFO_CNT_2 0x5A821C + +#define mmDMA5_QM_CQ_IFIFO_CNT_3 0x5A8220 + +#define mmDMA5_QM_CQ_IFIFO_CNT_4 0x5A8224 + +#define mmDMA5_QM_CP_MSG_BASE0_ADDR_LO_0 0x5A8228 + +#define mmDMA5_QM_CP_MSG_BASE0_ADDR_LO_1 0x5A822C + +#define mmDMA5_QM_CP_MSG_BASE0_ADDR_LO_2 0x5A8230 + +#define mmDMA5_QM_CP_MSG_BASE0_ADDR_LO_3 0x5A8234 + +#define mmDMA5_QM_CP_MSG_BASE0_ADDR_LO_4 0x5A8238 + +#define mmDMA5_QM_CP_MSG_BASE0_ADDR_HI_0 0x5A823C + +#define mmDMA5_QM_CP_MSG_BASE0_ADDR_HI_1 0x5A8240 + +#define mmDMA5_QM_CP_MSG_BASE0_ADDR_HI_2 0x5A8244 + +#define mmDMA5_QM_CP_MSG_BASE0_ADDR_HI_3 0x5A8248 + +#define mmDMA5_QM_CP_MSG_BASE0_ADDR_HI_4 0x5A824C + +#define mmDMA5_QM_CP_MSG_BASE1_ADDR_LO_0 0x5A8250 + +#define mmDMA5_QM_CP_MSG_BASE1_ADDR_LO_1 0x5A8254 + +#define mmDMA5_QM_CP_MSG_BASE1_ADDR_LO_2 0x5A8258 + +#define mmDMA5_QM_CP_MSG_BASE1_ADDR_LO_3 0x5A825C + +#define mmDMA5_QM_CP_MSG_BASE1_ADDR_LO_4 0x5A8260 + +#define mmDMA5_QM_CP_MSG_BASE1_ADDR_HI_0 0x5A8264 + +#define mmDMA5_QM_CP_MSG_BASE1_ADDR_HI_1 0x5A8268 + +#define mmDMA5_QM_CP_MSG_BASE1_ADDR_HI_2 0x5A826C + +#define mmDMA5_QM_CP_MSG_BASE1_ADDR_HI_3 0x5A8270 + +#define mmDMA5_QM_CP_MSG_BASE1_ADDR_HI_4 0x5A8274 + +#define mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_0 0x5A8278 + +#define mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_1 0x5A827C + +#define mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_2 0x5A8280 + +#define mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_3 0x5A8284 + +#define mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_4 0x5A8288 + +#define mmDMA5_QM_CP_MSG_BASE2_ADDR_HI_0 0x5A828C + +#define mmDMA5_QM_CP_MSG_BASE2_ADDR_HI_1 0x5A8290 + +#define mmDMA5_QM_CP_MSG_BASE2_ADDR_HI_2 0x5A8294 + +#define mmDMA5_QM_CP_MSG_BASE2_ADDR_HI_3 0x5A8298 + +#define mmDMA5_QM_CP_MSG_BASE2_ADDR_HI_4 0x5A829C + +#define mmDMA5_QM_CP_MSG_BASE3_ADDR_LO_0 0x5A82A0 + +#define mmDMA5_QM_CP_MSG_BASE3_ADDR_LO_1 0x5A82A4 + +#define mmDMA5_QM_CP_MSG_BASE3_ADDR_LO_2 0x5A82A8 + +#define mmDMA5_QM_CP_MSG_BASE3_ADDR_LO_3 0x5A82AC + +#define mmDMA5_QM_CP_MSG_BASE3_ADDR_LO_4 0x5A82B0 + +#define mmDMA5_QM_CP_MSG_BASE3_ADDR_HI_0 0x5A82B4 + +#define mmDMA5_QM_CP_MSG_BASE3_ADDR_HI_1 0x5A82B8 + +#define mmDMA5_QM_CP_MSG_BASE3_ADDR_HI_2 0x5A82BC + +#define mmDMA5_QM_CP_MSG_BASE3_ADDR_HI_3 0x5A82C0 + +#define mmDMA5_QM_CP_MSG_BASE3_ADDR_HI_4 0x5A82C4 + +#define mmDMA5_QM_CP_LDMA_TSIZE_OFFSET_0 0x5A82C8 + +#define mmDMA5_QM_CP_LDMA_TSIZE_OFFSET_1 0x5A82CC + +#define mmDMA5_QM_CP_LDMA_TSIZE_OFFSET_2 0x5A82D0 + +#define mmDMA5_QM_CP_LDMA_TSIZE_OFFSET_3 0x5A82D4 + +#define mmDMA5_QM_CP_LDMA_TSIZE_OFFSET_4 0x5A82D8 + +#define mmDMA5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0x5A82E0 + +#define mmDMA5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0x5A82E4 + +#define mmDMA5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0x5A82E8 + +#define mmDMA5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0x5A82EC + +#define mmDMA5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0x5A82F0 + +#define mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0x5A82F4 + +#define mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0x5A82F8 + +#define mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0x5A82FC + +#define mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0x5A8300 + +#define mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0x5A8304 + +#define mmDMA5_QM_CP_FENCE0_RDATA_0 0x5A8308 + +#define mmDMA5_QM_CP_FENCE0_RDATA_1 0x5A830C + +#define mmDMA5_QM_CP_FENCE0_RDATA_2 0x5A8310 + +#define mmDMA5_QM_CP_FENCE0_RDATA_3 0x5A8314 + +#define mmDMA5_QM_CP_FENCE0_RDATA_4 0x5A8318 + +#define mmDMA5_QM_CP_FENCE1_RDATA_0 0x5A831C + +#define mmDMA5_QM_CP_FENCE1_RDATA_1 0x5A8320 + +#define mmDMA5_QM_CP_FENCE1_RDATA_2 0x5A8324 + +#define mmDMA5_QM_CP_FENCE1_RDATA_3 0x5A8328 + +#define mmDMA5_QM_CP_FENCE1_RDATA_4 0x5A832C + +#define mmDMA5_QM_CP_FENCE2_RDATA_0 0x5A8330 + +#define mmDMA5_QM_CP_FENCE2_RDATA_1 0x5A8334 + +#define mmDMA5_QM_CP_FENCE2_RDATA_2 0x5A8338 + +#define mmDMA5_QM_CP_FENCE2_RDATA_3 0x5A833C + +#define mmDMA5_QM_CP_FENCE2_RDATA_4 0x5A8340 + +#define mmDMA5_QM_CP_FENCE3_RDATA_0 0x5A8344 + +#define mmDMA5_QM_CP_FENCE3_RDATA_1 0x5A8348 + +#define mmDMA5_QM_CP_FENCE3_RDATA_2 0x5A834C + +#define mmDMA5_QM_CP_FENCE3_RDATA_3 0x5A8350 + +#define mmDMA5_QM_CP_FENCE3_RDATA_4 0x5A8354 + +#define mmDMA5_QM_CP_FENCE0_CNT_0 0x5A8358 + +#define mmDMA5_QM_CP_FENCE0_CNT_1 0x5A835C + +#define mmDMA5_QM_CP_FENCE0_CNT_2 0x5A8360 + +#define mmDMA5_QM_CP_FENCE0_CNT_3 0x5A8364 + +#define mmDMA5_QM_CP_FENCE0_CNT_4 0x5A8368 + +#define mmDMA5_QM_CP_FENCE1_CNT_0 0x5A836C + +#define mmDMA5_QM_CP_FENCE1_CNT_1 0x5A8370 + +#define mmDMA5_QM_CP_FENCE1_CNT_2 0x5A8374 + +#define mmDMA5_QM_CP_FENCE1_CNT_3 0x5A8378 + +#define mmDMA5_QM_CP_FENCE1_CNT_4 0x5A837C + +#define mmDMA5_QM_CP_FENCE2_CNT_0 0x5A8380 + +#define mmDMA5_QM_CP_FENCE2_CNT_1 0x5A8384 + +#define mmDMA5_QM_CP_FENCE2_CNT_2 0x5A8388 + +#define mmDMA5_QM_CP_FENCE2_CNT_3 0x5A838C + +#define mmDMA5_QM_CP_FENCE2_CNT_4 0x5A8390 + +#define mmDMA5_QM_CP_FENCE3_CNT_0 0x5A8394 + +#define mmDMA5_QM_CP_FENCE3_CNT_1 0x5A8398 + +#define mmDMA5_QM_CP_FENCE3_CNT_2 0x5A839C + +#define mmDMA5_QM_CP_FENCE3_CNT_3 0x5A83A0 + +#define mmDMA5_QM_CP_FENCE3_CNT_4 0x5A83A4 + +#define mmDMA5_QM_CP_STS_0 0x5A83A8 + +#define mmDMA5_QM_CP_STS_1 0x5A83AC + +#define mmDMA5_QM_CP_STS_2 0x5A83B0 + +#define mmDMA5_QM_CP_STS_3 0x5A83B4 + +#define mmDMA5_QM_CP_STS_4 0x5A83B8 + +#define mmDMA5_QM_CP_CURRENT_INST_LO_0 0x5A83BC + +#define mmDMA5_QM_CP_CURRENT_INST_LO_1 0x5A83C0 + +#define mmDMA5_QM_CP_CURRENT_INST_LO_2 0x5A83C4 + +#define mmDMA5_QM_CP_CURRENT_INST_LO_3 0x5A83C8 + +#define mmDMA5_QM_CP_CURRENT_INST_LO_4 0x5A83CC + +#define mmDMA5_QM_CP_CURRENT_INST_HI_0 0x5A83D0 + +#define mmDMA5_QM_CP_CURRENT_INST_HI_1 0x5A83D4 + +#define mmDMA5_QM_CP_CURRENT_INST_HI_2 0x5A83D8 + +#define mmDMA5_QM_CP_CURRENT_INST_HI_3 0x5A83DC + +#define mmDMA5_QM_CP_CURRENT_INST_HI_4 0x5A83E0 + +#define mmDMA5_QM_CP_BARRIER_CFG_0 0x5A83F4 + +#define mmDMA5_QM_CP_BARRIER_CFG_1 0x5A83F8 + +#define mmDMA5_QM_CP_BARRIER_CFG_2 0x5A83FC + +#define mmDMA5_QM_CP_BARRIER_CFG_3 0x5A8400 + +#define mmDMA5_QM_CP_BARRIER_CFG_4 0x5A8404 + +#define mmDMA5_QM_CP_DBG_0_0 0x5A8408 + +#define mmDMA5_QM_CP_DBG_0_1 0x5A840C + +#define mmDMA5_QM_CP_DBG_0_2 0x5A8410 + +#define mmDMA5_QM_CP_DBG_0_3 0x5A8414 + +#define mmDMA5_QM_CP_DBG_0_4 0x5A8418 + +#define mmDMA5_QM_CP_ARUSER_31_11_0 0x5A841C + +#define mmDMA5_QM_CP_ARUSER_31_11_1 0x5A8420 + +#define mmDMA5_QM_CP_ARUSER_31_11_2 0x5A8424 + +#define mmDMA5_QM_CP_ARUSER_31_11_3 0x5A8428 + +#define mmDMA5_QM_CP_ARUSER_31_11_4 0x5A842C + +#define mmDMA5_QM_CP_AWUSER_31_11_0 0x5A8430 + +#define mmDMA5_QM_CP_AWUSER_31_11_1 0x5A8434 + +#define mmDMA5_QM_CP_AWUSER_31_11_2 0x5A8438 + +#define mmDMA5_QM_CP_AWUSER_31_11_3 0x5A843C + +#define mmDMA5_QM_CP_AWUSER_31_11_4 0x5A8440 + +#define mmDMA5_QM_ARB_CFG_0 0x5A8A00 + +#define mmDMA5_QM_ARB_CHOISE_Q_PUSH 0x5A8A04 + +#define mmDMA5_QM_ARB_WRR_WEIGHT_0 0x5A8A08 + +#define mmDMA5_QM_ARB_WRR_WEIGHT_1 0x5A8A0C + +#define mmDMA5_QM_ARB_WRR_WEIGHT_2 0x5A8A10 + +#define mmDMA5_QM_ARB_WRR_WEIGHT_3 0x5A8A14 + +#define mmDMA5_QM_ARB_CFG_1 0x5A8A18 + +#define mmDMA5_QM_ARB_MST_AVAIL_CRED_0 0x5A8A20 + +#define mmDMA5_QM_ARB_MST_AVAIL_CRED_1 0x5A8A24 + +#define mmDMA5_QM_ARB_MST_AVAIL_CRED_2 0x5A8A28 + +#define mmDMA5_QM_ARB_MST_AVAIL_CRED_3 0x5A8A2C + +#define mmDMA5_QM_ARB_MST_AVAIL_CRED_4 0x5A8A30 + +#define mmDMA5_QM_ARB_MST_AVAIL_CRED_5 0x5A8A34 + +#define mmDMA5_QM_ARB_MST_AVAIL_CRED_6 0x5A8A38 + +#define mmDMA5_QM_ARB_MST_AVAIL_CRED_7 0x5A8A3C + +#define mmDMA5_QM_ARB_MST_AVAIL_CRED_8 0x5A8A40 + +#define mmDMA5_QM_ARB_MST_AVAIL_CRED_9 0x5A8A44 + +#define mmDMA5_QM_ARB_MST_AVAIL_CRED_10 0x5A8A48 + +#define mmDMA5_QM_ARB_MST_AVAIL_CRED_11 0x5A8A4C + +#define mmDMA5_QM_ARB_MST_AVAIL_CRED_12 0x5A8A50 + +#define mmDMA5_QM_ARB_MST_AVAIL_CRED_13 0x5A8A54 + +#define mmDMA5_QM_ARB_MST_AVAIL_CRED_14 0x5A8A58 + +#define mmDMA5_QM_ARB_MST_AVAIL_CRED_15 0x5A8A5C + +#define mmDMA5_QM_ARB_MST_AVAIL_CRED_16 0x5A8A60 + +#define mmDMA5_QM_ARB_MST_AVAIL_CRED_17 0x5A8A64 + +#define mmDMA5_QM_ARB_MST_AVAIL_CRED_18 0x5A8A68 + +#define mmDMA5_QM_ARB_MST_AVAIL_CRED_19 0x5A8A6C + +#define mmDMA5_QM_ARB_MST_AVAIL_CRED_20 0x5A8A70 + +#define mmDMA5_QM_ARB_MST_AVAIL_CRED_21 0x5A8A74 + +#define mmDMA5_QM_ARB_MST_AVAIL_CRED_22 0x5A8A78 + +#define mmDMA5_QM_ARB_MST_AVAIL_CRED_23 0x5A8A7C + +#define mmDMA5_QM_ARB_MST_AVAIL_CRED_24 0x5A8A80 + +#define mmDMA5_QM_ARB_MST_AVAIL_CRED_25 0x5A8A84 + +#define mmDMA5_QM_ARB_MST_AVAIL_CRED_26 0x5A8A88 + +#define mmDMA5_QM_ARB_MST_AVAIL_CRED_27 0x5A8A8C + +#define mmDMA5_QM_ARB_MST_AVAIL_CRED_28 0x5A8A90 + +#define mmDMA5_QM_ARB_MST_AVAIL_CRED_29 0x5A8A94 + +#define mmDMA5_QM_ARB_MST_AVAIL_CRED_30 0x5A8A98 + +#define mmDMA5_QM_ARB_MST_AVAIL_CRED_31 0x5A8A9C + +#define mmDMA5_QM_ARB_MST_CRED_INC 0x5A8AA0 + +#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_0 0x5A8AA4 + +#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_1 0x5A8AA8 + +#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_2 0x5A8AAC + +#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_3 0x5A8AB0 + +#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_4 0x5A8AB4 + +#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_5 0x5A8AB8 + +#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_6 0x5A8ABC + +#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_7 0x5A8AC0 + +#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_8 0x5A8AC4 + +#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_9 0x5A8AC8 + +#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_10 0x5A8ACC + +#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_11 0x5A8AD0 + +#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_12 0x5A8AD4 + +#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_13 0x5A8AD8 + +#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_14 0x5A8ADC + +#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_15 0x5A8AE0 + +#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_16 0x5A8AE4 + +#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_17 0x5A8AE8 + +#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_18 0x5A8AEC + +#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_19 0x5A8AF0 + +#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_20 0x5A8AF4 + +#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_21 0x5A8AF8 + +#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_22 0x5A8AFC + +#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_23 0x5A8B00 + +#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_24 0x5A8B04 + +#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_25 0x5A8B08 + +#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_26 0x5A8B0C + +#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_27 0x5A8B10 + +#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_28 0x5A8B14 + +#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_29 0x5A8B18 + +#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_30 0x5A8B1C + +#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_31 0x5A8B20 + +#define mmDMA5_QM_ARB_SLV_MASTER_INC_CRED_OFST 0x5A8B28 + +#define mmDMA5_QM_ARB_MST_SLAVE_EN 0x5A8B2C + +#define mmDMA5_QM_ARB_MST_QUIET_PER 0x5A8B34 + +#define mmDMA5_QM_ARB_SLV_CHOISE_WDT 0x5A8B38 + +#define mmDMA5_QM_ARB_SLV_ID 0x5A8B3C + +#define mmDMA5_QM_ARB_MSG_MAX_INFLIGHT 0x5A8B44 + +#define mmDMA5_QM_ARB_MSG_AWUSER_31_11 0x5A8B48 + +#define mmDMA5_QM_ARB_MSG_AWUSER_SEC_PROP 0x5A8B4C + +#define mmDMA5_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0x5A8B50 + +#define mmDMA5_QM_ARB_BASE_LO 0x5A8B54 + +#define mmDMA5_QM_ARB_BASE_HI 0x5A8B58 + +#define mmDMA5_QM_ARB_STATE_STS 0x5A8B80 + +#define mmDMA5_QM_ARB_CHOISE_FULLNESS_STS 0x5A8B84 + +#define mmDMA5_QM_ARB_MSG_STS 0x5A8B88 + +#define mmDMA5_QM_ARB_SLV_CHOISE_Q_HEAD 0x5A8B8C + +#define mmDMA5_QM_ARB_ERR_CAUSE 0x5A8B9C + +#define mmDMA5_QM_ARB_ERR_MSG_EN 0x5A8BA0 + +#define mmDMA5_QM_ARB_ERR_STS_DRP 0x5A8BA8 + +#define mmDMA5_QM_ARB_MST_CRED_STS_0 0x5A8BB0 + +#define mmDMA5_QM_ARB_MST_CRED_STS_1 0x5A8BB4 + +#define mmDMA5_QM_ARB_MST_CRED_STS_2 0x5A8BB8 + +#define mmDMA5_QM_ARB_MST_CRED_STS_3 0x5A8BBC + +#define mmDMA5_QM_ARB_MST_CRED_STS_4 0x5A8BC0 + +#define mmDMA5_QM_ARB_MST_CRED_STS_5 0x5A8BC4 + +#define mmDMA5_QM_ARB_MST_CRED_STS_6 0x5A8BC8 + +#define mmDMA5_QM_ARB_MST_CRED_STS_7 0x5A8BCC + +#define mmDMA5_QM_ARB_MST_CRED_STS_8 0x5A8BD0 + +#define mmDMA5_QM_ARB_MST_CRED_STS_9 0x5A8BD4 + +#define mmDMA5_QM_ARB_MST_CRED_STS_10 0x5A8BD8 + +#define mmDMA5_QM_ARB_MST_CRED_STS_11 0x5A8BDC + +#define mmDMA5_QM_ARB_MST_CRED_STS_12 0x5A8BE0 + +#define mmDMA5_QM_ARB_MST_CRED_STS_13 0x5A8BE4 + +#define mmDMA5_QM_ARB_MST_CRED_STS_14 0x5A8BE8 + +#define mmDMA5_QM_ARB_MST_CRED_STS_15 0x5A8BEC + +#define mmDMA5_QM_ARB_MST_CRED_STS_16 0x5A8BF0 + +#define mmDMA5_QM_ARB_MST_CRED_STS_17 0x5A8BF4 + +#define mmDMA5_QM_ARB_MST_CRED_STS_18 0x5A8BF8 + +#define mmDMA5_QM_ARB_MST_CRED_STS_19 0x5A8BFC + +#define mmDMA5_QM_ARB_MST_CRED_STS_20 0x5A8C00 + +#define mmDMA5_QM_ARB_MST_CRED_STS_21 0x5A8C04 + +#define mmDMA5_QM_ARB_MST_CRED_STS_22 0x5A8C08 + +#define mmDMA5_QM_ARB_MST_CRED_STS_23 0x5A8C0C + +#define mmDMA5_QM_ARB_MST_CRED_STS_24 0x5A8C10 + +#define mmDMA5_QM_ARB_MST_CRED_STS_25 0x5A8C14 + +#define mmDMA5_QM_ARB_MST_CRED_STS_26 0x5A8C18 + +#define mmDMA5_QM_ARB_MST_CRED_STS_27 0x5A8C1C + +#define mmDMA5_QM_ARB_MST_CRED_STS_28 0x5A8C20 + +#define mmDMA5_QM_ARB_MST_CRED_STS_29 0x5A8C24 + +#define mmDMA5_QM_ARB_MST_CRED_STS_30 0x5A8C28 + +#define mmDMA5_QM_ARB_MST_CRED_STS_31 0x5A8C2C + +#define mmDMA5_QM_CGM_CFG 0x5A8C70 + +#define mmDMA5_QM_CGM_STS 0x5A8C74 + +#define mmDMA5_QM_CGM_CFG1 0x5A8C78 + +#define mmDMA5_QM_LOCAL_RANGE_BASE 0x5A8C80 + +#define mmDMA5_QM_LOCAL_RANGE_SIZE 0x5A8C84 + +#define mmDMA5_QM_CSMR_STRICT_PRIO_CFG 0x5A8C90 + +#define mmDMA5_QM_HBW_RD_RATE_LIM_CFG_1 0x5A8C94 + +#define mmDMA5_QM_LBW_WR_RATE_LIM_CFG_0 0x5A8C98 + +#define mmDMA5_QM_LBW_WR_RATE_LIM_CFG_1 0x5A8C9C + +#define mmDMA5_QM_HBW_RD_RATE_LIM_CFG_0 0x5A8CA0 + +#define mmDMA5_QM_GLBL_AXCACHE 0x5A8CA4 + +#define mmDMA5_QM_IND_GW_APB_CFG 0x5A8CB0 + +#define mmDMA5_QM_IND_GW_APB_WDATA 0x5A8CB4 + +#define mmDMA5_QM_IND_GW_APB_RDATA 0x5A8CB8 + +#define mmDMA5_QM_IND_GW_APB_STATUS 0x5A8CBC + +#define mmDMA5_QM_GLBL_ERR_ADDR_LO 0x5A8CD0 + +#define mmDMA5_QM_GLBL_ERR_ADDR_HI 0x5A8CD4 + +#define mmDMA5_QM_GLBL_ERR_WDATA 0x5A8CD8 + +#define mmDMA5_QM_GLBL_MEM_INIT_BUSY 0x5A8D00 + +#endif /* ASIC_REG_DMA5_QM_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma6_core_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma6_core_regs.h new file mode 100644 index 000000000..4962c13e2 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma6_core_regs.h @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA6_CORE_REGS_H_ +#define ASIC_REG_DMA6_CORE_REGS_H_ + +/* + ***************************************** + * DMA6_CORE (Prototype: DMA_CORE) + ***************************************** + */ + +#define mmDMA6_CORE_CFG_0 0x5C0000 + +#define mmDMA6_CORE_CFG_1 0x5C0004 + +#define mmDMA6_CORE_LBW_MAX_OUTSTAND 0x5C0008 + +#define mmDMA6_CORE_SRC_BASE_LO 0x5C0014 + +#define mmDMA6_CORE_SRC_BASE_HI 0x5C0018 + +#define mmDMA6_CORE_DST_BASE_LO 0x5C001C + +#define mmDMA6_CORE_DST_BASE_HI 0x5C0020 + +#define mmDMA6_CORE_SRC_TSIZE_1 0x5C002C + +#define mmDMA6_CORE_SRC_STRIDE_1 0x5C0030 + +#define mmDMA6_CORE_SRC_TSIZE_2 0x5C0034 + +#define mmDMA6_CORE_SRC_STRIDE_2 0x5C0038 + +#define mmDMA6_CORE_SRC_TSIZE_3 0x5C003C + +#define mmDMA6_CORE_SRC_STRIDE_3 0x5C0040 + +#define mmDMA6_CORE_SRC_TSIZE_4 0x5C0044 + +#define mmDMA6_CORE_SRC_STRIDE_4 0x5C0048 + +#define mmDMA6_CORE_SRC_TSIZE_0 0x5C004C + +#define mmDMA6_CORE_DST_TSIZE_1 0x5C0054 + +#define mmDMA6_CORE_DST_STRIDE_1 0x5C0058 + +#define mmDMA6_CORE_DST_TSIZE_2 0x5C005C + +#define mmDMA6_CORE_DST_STRIDE_2 0x5C0060 + +#define mmDMA6_CORE_DST_TSIZE_3 0x5C0064 + +#define mmDMA6_CORE_DST_STRIDE_3 0x5C0068 + +#define mmDMA6_CORE_DST_TSIZE_4 0x5C006C + +#define mmDMA6_CORE_DST_STRIDE_4 0x5C0070 + +#define mmDMA6_CORE_DST_TSIZE_0 0x5C0074 + +#define mmDMA6_CORE_COMMIT 0x5C0078 + +#define mmDMA6_CORE_WR_COMP_WDATA 0x5C007C + +#define mmDMA6_CORE_WR_COMP_ADDR_LO 0x5C0080 + +#define mmDMA6_CORE_WR_COMP_ADDR_HI 0x5C0084 + +#define mmDMA6_CORE_WR_COMP_AWUSER_31_11 0x5C0088 + +#define mmDMA6_CORE_TE_NUMROWS 0x5C0094 + +#define mmDMA6_CORE_PROT 0x5C00B8 + +#define mmDMA6_CORE_SECURE_PROPS 0x5C00F0 + +#define mmDMA6_CORE_NON_SECURE_PROPS 0x5C00F4 + +#define mmDMA6_CORE_RD_MAX_OUTSTAND 0x5C0100 + +#define mmDMA6_CORE_RD_MAX_SIZE 0x5C0104 + +#define mmDMA6_CORE_RD_ARCACHE 0x5C0108 + +#define mmDMA6_CORE_RD_ARUSER_31_11 0x5C0110 + +#define mmDMA6_CORE_RD_INFLIGHTS 0x5C0114 + +#define mmDMA6_CORE_WR_MAX_OUTSTAND 0x5C0120 + +#define mmDMA6_CORE_WR_MAX_AWID 0x5C0124 + +#define mmDMA6_CORE_WR_AWCACHE 0x5C0128 + +#define mmDMA6_CORE_WR_AWUSER_31_11 0x5C0130 + +#define mmDMA6_CORE_WR_INFLIGHTS 0x5C0134 + +#define mmDMA6_CORE_RD_RATE_LIM_CFG_0 0x5C0150 + +#define mmDMA6_CORE_RD_RATE_LIM_CFG_1 0x5C0154 + +#define mmDMA6_CORE_WR_RATE_LIM_CFG_0 0x5C0158 + +#define mmDMA6_CORE_WR_RATE_LIM_CFG_1 0x5C015C + +#define mmDMA6_CORE_ERR_CFG 0x5C0160 + +#define mmDMA6_CORE_ERR_CAUSE 0x5C0164 + +#define mmDMA6_CORE_ERRMSG_ADDR_LO 0x5C0170 + +#define mmDMA6_CORE_ERRMSG_ADDR_HI 0x5C0174 + +#define mmDMA6_CORE_ERRMSG_WDATA 0x5C0178 + +#define mmDMA6_CORE_STS0 0x5C0190 + +#define mmDMA6_CORE_STS1 0x5C0194 + +#define mmDMA6_CORE_RD_DBGMEM_ADD 0x5C0200 + +#define mmDMA6_CORE_RD_DBGMEM_DATA_WR 0x5C0204 + +#define mmDMA6_CORE_RD_DBGMEM_DATA_RD 0x5C0208 + +#define mmDMA6_CORE_RD_DBGMEM_CTRL 0x5C020C + +#define mmDMA6_CORE_RD_DBGMEM_RC 0x5C0210 + +#define mmDMA6_CORE_DBG_HBW_AXI_AR_CNT 0x5C0220 + +#define mmDMA6_CORE_DBG_HBW_AXI_AW_CNT 0x5C0224 + +#define mmDMA6_CORE_DBG_LBW_AXI_AW_CNT 0x5C0228 + +#define mmDMA6_CORE_DBG_DESC_CNT 0x5C022C + +#define mmDMA6_CORE_DBG_STS 0x5C0230 + +#define mmDMA6_CORE_DBG_RD_DESC_ID 0x5C0234 + +#define mmDMA6_CORE_DBG_WR_DESC_ID 0x5C0238 + +#endif /* ASIC_REG_DMA6_CORE_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma6_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma6_qm_regs.h new file mode 100644 index 000000000..af87adb94 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma6_qm_regs.h @@ -0,0 +1,834 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA6_QM_REGS_H_ +#define ASIC_REG_DMA6_QM_REGS_H_ + +/* + ***************************************** + * DMA6_QM (Prototype: QMAN) + ***************************************** + */ + +#define mmDMA6_QM_GLBL_CFG0 0x5C8000 + +#define mmDMA6_QM_GLBL_CFG1 0x5C8004 + +#define mmDMA6_QM_GLBL_PROT 0x5C8008 + +#define mmDMA6_QM_GLBL_ERR_CFG 0x5C800C + +#define mmDMA6_QM_GLBL_SECURE_PROPS_0 0x5C8010 + +#define mmDMA6_QM_GLBL_SECURE_PROPS_1 0x5C8014 + +#define mmDMA6_QM_GLBL_SECURE_PROPS_2 0x5C8018 + +#define mmDMA6_QM_GLBL_SECURE_PROPS_3 0x5C801C + +#define mmDMA6_QM_GLBL_SECURE_PROPS_4 0x5C8020 + +#define mmDMA6_QM_GLBL_NON_SECURE_PROPS_0 0x5C8024 + +#define mmDMA6_QM_GLBL_NON_SECURE_PROPS_1 0x5C8028 + +#define mmDMA6_QM_GLBL_NON_SECURE_PROPS_2 0x5C802C + +#define mmDMA6_QM_GLBL_NON_SECURE_PROPS_3 0x5C8030 + +#define mmDMA6_QM_GLBL_NON_SECURE_PROPS_4 0x5C8034 + +#define mmDMA6_QM_GLBL_STS0 0x5C8038 + +#define mmDMA6_QM_GLBL_STS1_0 0x5C8040 + +#define mmDMA6_QM_GLBL_STS1_1 0x5C8044 + +#define mmDMA6_QM_GLBL_STS1_2 0x5C8048 + +#define mmDMA6_QM_GLBL_STS1_3 0x5C804C + +#define mmDMA6_QM_GLBL_STS1_4 0x5C8050 + +#define mmDMA6_QM_GLBL_MSG_EN_0 0x5C8054 + +#define mmDMA6_QM_GLBL_MSG_EN_1 0x5C8058 + +#define mmDMA6_QM_GLBL_MSG_EN_2 0x5C805C + +#define mmDMA6_QM_GLBL_MSG_EN_3 0x5C8060 + +#define mmDMA6_QM_GLBL_MSG_EN_4 0x5C8068 + +#define mmDMA6_QM_PQ_BASE_LO_0 0x5C8070 + +#define mmDMA6_QM_PQ_BASE_LO_1 0x5C8074 + +#define mmDMA6_QM_PQ_BASE_LO_2 0x5C8078 + +#define mmDMA6_QM_PQ_BASE_LO_3 0x5C807C + +#define mmDMA6_QM_PQ_BASE_HI_0 0x5C8080 + +#define mmDMA6_QM_PQ_BASE_HI_1 0x5C8084 + +#define mmDMA6_QM_PQ_BASE_HI_2 0x5C8088 + +#define mmDMA6_QM_PQ_BASE_HI_3 0x5C808C + +#define mmDMA6_QM_PQ_SIZE_0 0x5C8090 + +#define mmDMA6_QM_PQ_SIZE_1 0x5C8094 + +#define mmDMA6_QM_PQ_SIZE_2 0x5C8098 + +#define mmDMA6_QM_PQ_SIZE_3 0x5C809C + +#define mmDMA6_QM_PQ_PI_0 0x5C80A0 + +#define mmDMA6_QM_PQ_PI_1 0x5C80A4 + +#define mmDMA6_QM_PQ_PI_2 0x5C80A8 + +#define mmDMA6_QM_PQ_PI_3 0x5C80AC + +#define mmDMA6_QM_PQ_CI_0 0x5C80B0 + +#define mmDMA6_QM_PQ_CI_1 0x5C80B4 + +#define mmDMA6_QM_PQ_CI_2 0x5C80B8 + +#define mmDMA6_QM_PQ_CI_3 0x5C80BC + +#define mmDMA6_QM_PQ_CFG0_0 0x5C80C0 + +#define mmDMA6_QM_PQ_CFG0_1 0x5C80C4 + +#define mmDMA6_QM_PQ_CFG0_2 0x5C80C8 + +#define mmDMA6_QM_PQ_CFG0_3 0x5C80CC + +#define mmDMA6_QM_PQ_CFG1_0 0x5C80D0 + +#define mmDMA6_QM_PQ_CFG1_1 0x5C80D4 + +#define mmDMA6_QM_PQ_CFG1_2 0x5C80D8 + +#define mmDMA6_QM_PQ_CFG1_3 0x5C80DC + +#define mmDMA6_QM_PQ_ARUSER_31_11_0 0x5C80E0 + +#define mmDMA6_QM_PQ_ARUSER_31_11_1 0x5C80E4 + +#define mmDMA6_QM_PQ_ARUSER_31_11_2 0x5C80E8 + +#define mmDMA6_QM_PQ_ARUSER_31_11_3 0x5C80EC + +#define mmDMA6_QM_PQ_STS0_0 0x5C80F0 + +#define mmDMA6_QM_PQ_STS0_1 0x5C80F4 + +#define mmDMA6_QM_PQ_STS0_2 0x5C80F8 + +#define mmDMA6_QM_PQ_STS0_3 0x5C80FC + +#define mmDMA6_QM_PQ_STS1_0 0x5C8100 + +#define mmDMA6_QM_PQ_STS1_1 0x5C8104 + +#define mmDMA6_QM_PQ_STS1_2 0x5C8108 + +#define mmDMA6_QM_PQ_STS1_3 0x5C810C + +#define mmDMA6_QM_CQ_CFG0_0 0x5C8110 + +#define mmDMA6_QM_CQ_CFG0_1 0x5C8114 + +#define mmDMA6_QM_CQ_CFG0_2 0x5C8118 + +#define mmDMA6_QM_CQ_CFG0_3 0x5C811C + +#define mmDMA6_QM_CQ_CFG0_4 0x5C8120 + +#define mmDMA6_QM_CQ_CFG1_0 0x5C8124 + +#define mmDMA6_QM_CQ_CFG1_1 0x5C8128 + +#define mmDMA6_QM_CQ_CFG1_2 0x5C812C + +#define mmDMA6_QM_CQ_CFG1_3 0x5C8130 + +#define mmDMA6_QM_CQ_CFG1_4 0x5C8134 + +#define mmDMA6_QM_CQ_ARUSER_31_11_0 0x5C8138 + +#define mmDMA6_QM_CQ_ARUSER_31_11_1 0x5C813C + +#define mmDMA6_QM_CQ_ARUSER_31_11_2 0x5C8140 + +#define mmDMA6_QM_CQ_ARUSER_31_11_3 0x5C8144 + +#define mmDMA6_QM_CQ_ARUSER_31_11_4 0x5C8148 + +#define mmDMA6_QM_CQ_STS0_0 0x5C814C + +#define mmDMA6_QM_CQ_STS0_1 0x5C8150 + +#define mmDMA6_QM_CQ_STS0_2 0x5C8154 + +#define mmDMA6_QM_CQ_STS0_3 0x5C8158 + +#define mmDMA6_QM_CQ_STS0_4 0x5C815C + +#define mmDMA6_QM_CQ_STS1_0 0x5C8160 + +#define mmDMA6_QM_CQ_STS1_1 0x5C8164 + +#define mmDMA6_QM_CQ_STS1_2 0x5C8168 + +#define mmDMA6_QM_CQ_STS1_3 0x5C816C + +#define mmDMA6_QM_CQ_STS1_4 0x5C8170 + +#define mmDMA6_QM_CQ_PTR_LO_0 0x5C8174 + +#define mmDMA6_QM_CQ_PTR_HI_0 0x5C8178 + +#define mmDMA6_QM_CQ_TSIZE_0 0x5C817C + +#define mmDMA6_QM_CQ_CTL_0 0x5C8180 + +#define mmDMA6_QM_CQ_PTR_LO_1 0x5C8184 + +#define mmDMA6_QM_CQ_PTR_HI_1 0x5C8188 + +#define mmDMA6_QM_CQ_TSIZE_1 0x5C818C + +#define mmDMA6_QM_CQ_CTL_1 0x5C8190 + +#define mmDMA6_QM_CQ_PTR_LO_2 0x5C8194 + +#define mmDMA6_QM_CQ_PTR_HI_2 0x5C8198 + +#define mmDMA6_QM_CQ_TSIZE_2 0x5C819C + +#define mmDMA6_QM_CQ_CTL_2 0x5C81A0 + +#define mmDMA6_QM_CQ_PTR_LO_3 0x5C81A4 + +#define mmDMA6_QM_CQ_PTR_HI_3 0x5C81A8 + +#define mmDMA6_QM_CQ_TSIZE_3 0x5C81AC + +#define mmDMA6_QM_CQ_CTL_3 0x5C81B0 + +#define mmDMA6_QM_CQ_PTR_LO_4 0x5C81B4 + +#define mmDMA6_QM_CQ_PTR_HI_4 0x5C81B8 + +#define mmDMA6_QM_CQ_TSIZE_4 0x5C81BC + +#define mmDMA6_QM_CQ_CTL_4 0x5C81C0 + +#define mmDMA6_QM_CQ_PTR_LO_STS_0 0x5C81C4 + +#define mmDMA6_QM_CQ_PTR_LO_STS_1 0x5C81C8 + +#define mmDMA6_QM_CQ_PTR_LO_STS_2 0x5C81CC + +#define mmDMA6_QM_CQ_PTR_LO_STS_3 0x5C81D0 + +#define mmDMA6_QM_CQ_PTR_LO_STS_4 0x5C81D4 + +#define mmDMA6_QM_CQ_PTR_HI_STS_0 0x5C81D8 + +#define mmDMA6_QM_CQ_PTR_HI_STS_1 0x5C81DC + +#define mmDMA6_QM_CQ_PTR_HI_STS_2 0x5C81E0 + +#define mmDMA6_QM_CQ_PTR_HI_STS_3 0x5C81E4 + +#define mmDMA6_QM_CQ_PTR_HI_STS_4 0x5C81E8 + +#define mmDMA6_QM_CQ_TSIZE_STS_0 0x5C81EC + +#define mmDMA6_QM_CQ_TSIZE_STS_1 0x5C81F0 + +#define mmDMA6_QM_CQ_TSIZE_STS_2 0x5C81F4 + +#define mmDMA6_QM_CQ_TSIZE_STS_3 0x5C81F8 + +#define mmDMA6_QM_CQ_TSIZE_STS_4 0x5C81FC + +#define mmDMA6_QM_CQ_CTL_STS_0 0x5C8200 + +#define mmDMA6_QM_CQ_CTL_STS_1 0x5C8204 + +#define mmDMA6_QM_CQ_CTL_STS_2 0x5C8208 + +#define mmDMA6_QM_CQ_CTL_STS_3 0x5C820C + +#define mmDMA6_QM_CQ_CTL_STS_4 0x5C8210 + +#define mmDMA6_QM_CQ_IFIFO_CNT_0 0x5C8214 + +#define mmDMA6_QM_CQ_IFIFO_CNT_1 0x5C8218 + +#define mmDMA6_QM_CQ_IFIFO_CNT_2 0x5C821C + +#define mmDMA6_QM_CQ_IFIFO_CNT_3 0x5C8220 + +#define mmDMA6_QM_CQ_IFIFO_CNT_4 0x5C8224 + +#define mmDMA6_QM_CP_MSG_BASE0_ADDR_LO_0 0x5C8228 + +#define mmDMA6_QM_CP_MSG_BASE0_ADDR_LO_1 0x5C822C + +#define mmDMA6_QM_CP_MSG_BASE0_ADDR_LO_2 0x5C8230 + +#define mmDMA6_QM_CP_MSG_BASE0_ADDR_LO_3 0x5C8234 + +#define mmDMA6_QM_CP_MSG_BASE0_ADDR_LO_4 0x5C8238 + +#define mmDMA6_QM_CP_MSG_BASE0_ADDR_HI_0 0x5C823C + +#define mmDMA6_QM_CP_MSG_BASE0_ADDR_HI_1 0x5C8240 + +#define mmDMA6_QM_CP_MSG_BASE0_ADDR_HI_2 0x5C8244 + +#define mmDMA6_QM_CP_MSG_BASE0_ADDR_HI_3 0x5C8248 + +#define mmDMA6_QM_CP_MSG_BASE0_ADDR_HI_4 0x5C824C + +#define mmDMA6_QM_CP_MSG_BASE1_ADDR_LO_0 0x5C8250 + +#define mmDMA6_QM_CP_MSG_BASE1_ADDR_LO_1 0x5C8254 + +#define mmDMA6_QM_CP_MSG_BASE1_ADDR_LO_2 0x5C8258 + +#define mmDMA6_QM_CP_MSG_BASE1_ADDR_LO_3 0x5C825C + +#define mmDMA6_QM_CP_MSG_BASE1_ADDR_LO_4 0x5C8260 + +#define mmDMA6_QM_CP_MSG_BASE1_ADDR_HI_0 0x5C8264 + +#define mmDMA6_QM_CP_MSG_BASE1_ADDR_HI_1 0x5C8268 + +#define mmDMA6_QM_CP_MSG_BASE1_ADDR_HI_2 0x5C826C + +#define mmDMA6_QM_CP_MSG_BASE1_ADDR_HI_3 0x5C8270 + +#define mmDMA6_QM_CP_MSG_BASE1_ADDR_HI_4 0x5C8274 + +#define mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_0 0x5C8278 + +#define mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_1 0x5C827C + +#define mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_2 0x5C8280 + +#define mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_3 0x5C8284 + +#define mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_4 0x5C8288 + +#define mmDMA6_QM_CP_MSG_BASE2_ADDR_HI_0 0x5C828C + +#define mmDMA6_QM_CP_MSG_BASE2_ADDR_HI_1 0x5C8290 + +#define mmDMA6_QM_CP_MSG_BASE2_ADDR_HI_2 0x5C8294 + +#define mmDMA6_QM_CP_MSG_BASE2_ADDR_HI_3 0x5C8298 + +#define mmDMA6_QM_CP_MSG_BASE2_ADDR_HI_4 0x5C829C + +#define mmDMA6_QM_CP_MSG_BASE3_ADDR_LO_0 0x5C82A0 + +#define mmDMA6_QM_CP_MSG_BASE3_ADDR_LO_1 0x5C82A4 + +#define mmDMA6_QM_CP_MSG_BASE3_ADDR_LO_2 0x5C82A8 + +#define mmDMA6_QM_CP_MSG_BASE3_ADDR_LO_3 0x5C82AC + +#define mmDMA6_QM_CP_MSG_BASE3_ADDR_LO_4 0x5C82B0 + +#define mmDMA6_QM_CP_MSG_BASE3_ADDR_HI_0 0x5C82B4 + +#define mmDMA6_QM_CP_MSG_BASE3_ADDR_HI_1 0x5C82B8 + +#define mmDMA6_QM_CP_MSG_BASE3_ADDR_HI_2 0x5C82BC + +#define mmDMA6_QM_CP_MSG_BASE3_ADDR_HI_3 0x5C82C0 + +#define mmDMA6_QM_CP_MSG_BASE3_ADDR_HI_4 0x5C82C4 + +#define mmDMA6_QM_CP_LDMA_TSIZE_OFFSET_0 0x5C82C8 + +#define mmDMA6_QM_CP_LDMA_TSIZE_OFFSET_1 0x5C82CC + +#define mmDMA6_QM_CP_LDMA_TSIZE_OFFSET_2 0x5C82D0 + +#define mmDMA6_QM_CP_LDMA_TSIZE_OFFSET_3 0x5C82D4 + +#define mmDMA6_QM_CP_LDMA_TSIZE_OFFSET_4 0x5C82D8 + +#define mmDMA6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0x5C82E0 + +#define mmDMA6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0x5C82E4 + +#define mmDMA6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0x5C82E8 + +#define mmDMA6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0x5C82EC + +#define mmDMA6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0x5C82F0 + +#define mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0x5C82F4 + +#define mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0x5C82F8 + +#define mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0x5C82FC + +#define mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0x5C8300 + +#define mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0x5C8304 + +#define mmDMA6_QM_CP_FENCE0_RDATA_0 0x5C8308 + +#define mmDMA6_QM_CP_FENCE0_RDATA_1 0x5C830C + +#define mmDMA6_QM_CP_FENCE0_RDATA_2 0x5C8310 + +#define mmDMA6_QM_CP_FENCE0_RDATA_3 0x5C8314 + +#define mmDMA6_QM_CP_FENCE0_RDATA_4 0x5C8318 + +#define mmDMA6_QM_CP_FENCE1_RDATA_0 0x5C831C + +#define mmDMA6_QM_CP_FENCE1_RDATA_1 0x5C8320 + +#define mmDMA6_QM_CP_FENCE1_RDATA_2 0x5C8324 + +#define mmDMA6_QM_CP_FENCE1_RDATA_3 0x5C8328 + +#define mmDMA6_QM_CP_FENCE1_RDATA_4 0x5C832C + +#define mmDMA6_QM_CP_FENCE2_RDATA_0 0x5C8330 + +#define mmDMA6_QM_CP_FENCE2_RDATA_1 0x5C8334 + +#define mmDMA6_QM_CP_FENCE2_RDATA_2 0x5C8338 + +#define mmDMA6_QM_CP_FENCE2_RDATA_3 0x5C833C + +#define mmDMA6_QM_CP_FENCE2_RDATA_4 0x5C8340 + +#define mmDMA6_QM_CP_FENCE3_RDATA_0 0x5C8344 + +#define mmDMA6_QM_CP_FENCE3_RDATA_1 0x5C8348 + +#define mmDMA6_QM_CP_FENCE3_RDATA_2 0x5C834C + +#define mmDMA6_QM_CP_FENCE3_RDATA_3 0x5C8350 + +#define mmDMA6_QM_CP_FENCE3_RDATA_4 0x5C8354 + +#define mmDMA6_QM_CP_FENCE0_CNT_0 0x5C8358 + +#define mmDMA6_QM_CP_FENCE0_CNT_1 0x5C835C + +#define mmDMA6_QM_CP_FENCE0_CNT_2 0x5C8360 + +#define mmDMA6_QM_CP_FENCE0_CNT_3 0x5C8364 + +#define mmDMA6_QM_CP_FENCE0_CNT_4 0x5C8368 + +#define mmDMA6_QM_CP_FENCE1_CNT_0 0x5C836C + +#define mmDMA6_QM_CP_FENCE1_CNT_1 0x5C8370 + +#define mmDMA6_QM_CP_FENCE1_CNT_2 0x5C8374 + +#define mmDMA6_QM_CP_FENCE1_CNT_3 0x5C8378 + +#define mmDMA6_QM_CP_FENCE1_CNT_4 0x5C837C + +#define mmDMA6_QM_CP_FENCE2_CNT_0 0x5C8380 + +#define mmDMA6_QM_CP_FENCE2_CNT_1 0x5C8384 + +#define mmDMA6_QM_CP_FENCE2_CNT_2 0x5C8388 + +#define mmDMA6_QM_CP_FENCE2_CNT_3 0x5C838C + +#define mmDMA6_QM_CP_FENCE2_CNT_4 0x5C8390 + +#define mmDMA6_QM_CP_FENCE3_CNT_0 0x5C8394 + +#define mmDMA6_QM_CP_FENCE3_CNT_1 0x5C8398 + +#define mmDMA6_QM_CP_FENCE3_CNT_2 0x5C839C + +#define mmDMA6_QM_CP_FENCE3_CNT_3 0x5C83A0 + +#define mmDMA6_QM_CP_FENCE3_CNT_4 0x5C83A4 + +#define mmDMA6_QM_CP_STS_0 0x5C83A8 + +#define mmDMA6_QM_CP_STS_1 0x5C83AC + +#define mmDMA6_QM_CP_STS_2 0x5C83B0 + +#define mmDMA6_QM_CP_STS_3 0x5C83B4 + +#define mmDMA6_QM_CP_STS_4 0x5C83B8 + +#define mmDMA6_QM_CP_CURRENT_INST_LO_0 0x5C83BC + +#define mmDMA6_QM_CP_CURRENT_INST_LO_1 0x5C83C0 + +#define mmDMA6_QM_CP_CURRENT_INST_LO_2 0x5C83C4 + +#define mmDMA6_QM_CP_CURRENT_INST_LO_3 0x5C83C8 + +#define mmDMA6_QM_CP_CURRENT_INST_LO_4 0x5C83CC + +#define mmDMA6_QM_CP_CURRENT_INST_HI_0 0x5C83D0 + +#define mmDMA6_QM_CP_CURRENT_INST_HI_1 0x5C83D4 + +#define mmDMA6_QM_CP_CURRENT_INST_HI_2 0x5C83D8 + +#define mmDMA6_QM_CP_CURRENT_INST_HI_3 0x5C83DC + +#define mmDMA6_QM_CP_CURRENT_INST_HI_4 0x5C83E0 + +#define mmDMA6_QM_CP_BARRIER_CFG_0 0x5C83F4 + +#define mmDMA6_QM_CP_BARRIER_CFG_1 0x5C83F8 + +#define mmDMA6_QM_CP_BARRIER_CFG_2 0x5C83FC + +#define mmDMA6_QM_CP_BARRIER_CFG_3 0x5C8400 + +#define mmDMA6_QM_CP_BARRIER_CFG_4 0x5C8404 + +#define mmDMA6_QM_CP_DBG_0_0 0x5C8408 + +#define mmDMA6_QM_CP_DBG_0_1 0x5C840C + +#define mmDMA6_QM_CP_DBG_0_2 0x5C8410 + +#define mmDMA6_QM_CP_DBG_0_3 0x5C8414 + +#define mmDMA6_QM_CP_DBG_0_4 0x5C8418 + +#define mmDMA6_QM_CP_ARUSER_31_11_0 0x5C841C + +#define mmDMA6_QM_CP_ARUSER_31_11_1 0x5C8420 + +#define mmDMA6_QM_CP_ARUSER_31_11_2 0x5C8424 + +#define mmDMA6_QM_CP_ARUSER_31_11_3 0x5C8428 + +#define mmDMA6_QM_CP_ARUSER_31_11_4 0x5C842C + +#define mmDMA6_QM_CP_AWUSER_31_11_0 0x5C8430 + +#define mmDMA6_QM_CP_AWUSER_31_11_1 0x5C8434 + +#define mmDMA6_QM_CP_AWUSER_31_11_2 0x5C8438 + +#define mmDMA6_QM_CP_AWUSER_31_11_3 0x5C843C + +#define mmDMA6_QM_CP_AWUSER_31_11_4 0x5C8440 + +#define mmDMA6_QM_ARB_CFG_0 0x5C8A00 + +#define mmDMA6_QM_ARB_CHOISE_Q_PUSH 0x5C8A04 + +#define mmDMA6_QM_ARB_WRR_WEIGHT_0 0x5C8A08 + +#define mmDMA6_QM_ARB_WRR_WEIGHT_1 0x5C8A0C + +#define mmDMA6_QM_ARB_WRR_WEIGHT_2 0x5C8A10 + +#define mmDMA6_QM_ARB_WRR_WEIGHT_3 0x5C8A14 + +#define mmDMA6_QM_ARB_CFG_1 0x5C8A18 + +#define mmDMA6_QM_ARB_MST_AVAIL_CRED_0 0x5C8A20 + +#define mmDMA6_QM_ARB_MST_AVAIL_CRED_1 0x5C8A24 + +#define mmDMA6_QM_ARB_MST_AVAIL_CRED_2 0x5C8A28 + +#define mmDMA6_QM_ARB_MST_AVAIL_CRED_3 0x5C8A2C + +#define mmDMA6_QM_ARB_MST_AVAIL_CRED_4 0x5C8A30 + +#define mmDMA6_QM_ARB_MST_AVAIL_CRED_5 0x5C8A34 + +#define mmDMA6_QM_ARB_MST_AVAIL_CRED_6 0x5C8A38 + +#define mmDMA6_QM_ARB_MST_AVAIL_CRED_7 0x5C8A3C + +#define mmDMA6_QM_ARB_MST_AVAIL_CRED_8 0x5C8A40 + +#define mmDMA6_QM_ARB_MST_AVAIL_CRED_9 0x5C8A44 + +#define mmDMA6_QM_ARB_MST_AVAIL_CRED_10 0x5C8A48 + +#define mmDMA6_QM_ARB_MST_AVAIL_CRED_11 0x5C8A4C + +#define mmDMA6_QM_ARB_MST_AVAIL_CRED_12 0x5C8A50 + +#define mmDMA6_QM_ARB_MST_AVAIL_CRED_13 0x5C8A54 + +#define mmDMA6_QM_ARB_MST_AVAIL_CRED_14 0x5C8A58 + +#define mmDMA6_QM_ARB_MST_AVAIL_CRED_15 0x5C8A5C + +#define mmDMA6_QM_ARB_MST_AVAIL_CRED_16 0x5C8A60 + +#define mmDMA6_QM_ARB_MST_AVAIL_CRED_17 0x5C8A64 + +#define mmDMA6_QM_ARB_MST_AVAIL_CRED_18 0x5C8A68 + +#define mmDMA6_QM_ARB_MST_AVAIL_CRED_19 0x5C8A6C + +#define mmDMA6_QM_ARB_MST_AVAIL_CRED_20 0x5C8A70 + +#define mmDMA6_QM_ARB_MST_AVAIL_CRED_21 0x5C8A74 + +#define mmDMA6_QM_ARB_MST_AVAIL_CRED_22 0x5C8A78 + +#define mmDMA6_QM_ARB_MST_AVAIL_CRED_23 0x5C8A7C + +#define mmDMA6_QM_ARB_MST_AVAIL_CRED_24 0x5C8A80 + +#define mmDMA6_QM_ARB_MST_AVAIL_CRED_25 0x5C8A84 + +#define mmDMA6_QM_ARB_MST_AVAIL_CRED_26 0x5C8A88 + +#define mmDMA6_QM_ARB_MST_AVAIL_CRED_27 0x5C8A8C + +#define mmDMA6_QM_ARB_MST_AVAIL_CRED_28 0x5C8A90 + +#define mmDMA6_QM_ARB_MST_AVAIL_CRED_29 0x5C8A94 + +#define mmDMA6_QM_ARB_MST_AVAIL_CRED_30 0x5C8A98 + +#define mmDMA6_QM_ARB_MST_AVAIL_CRED_31 0x5C8A9C + +#define mmDMA6_QM_ARB_MST_CRED_INC 0x5C8AA0 + +#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_0 0x5C8AA4 + +#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_1 0x5C8AA8 + +#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_2 0x5C8AAC + +#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_3 0x5C8AB0 + +#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_4 0x5C8AB4 + +#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_5 0x5C8AB8 + +#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_6 0x5C8ABC + +#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_7 0x5C8AC0 + +#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_8 0x5C8AC4 + +#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_9 0x5C8AC8 + +#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_10 0x5C8ACC + +#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_11 0x5C8AD0 + +#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_12 0x5C8AD4 + +#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_13 0x5C8AD8 + +#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_14 0x5C8ADC + +#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_15 0x5C8AE0 + +#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_16 0x5C8AE4 + +#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_17 0x5C8AE8 + +#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_18 0x5C8AEC + +#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_19 0x5C8AF0 + +#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_20 0x5C8AF4 + +#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_21 0x5C8AF8 + +#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_22 0x5C8AFC + +#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_23 0x5C8B00 + +#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_24 0x5C8B04 + +#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_25 0x5C8B08 + +#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_26 0x5C8B0C + +#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_27 0x5C8B10 + +#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_28 0x5C8B14 + +#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_29 0x5C8B18 + +#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_30 0x5C8B1C + +#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_31 0x5C8B20 + +#define mmDMA6_QM_ARB_SLV_MASTER_INC_CRED_OFST 0x5C8B28 + +#define mmDMA6_QM_ARB_MST_SLAVE_EN 0x5C8B2C + +#define mmDMA6_QM_ARB_MST_QUIET_PER 0x5C8B34 + +#define mmDMA6_QM_ARB_SLV_CHOISE_WDT 0x5C8B38 + +#define mmDMA6_QM_ARB_SLV_ID 0x5C8B3C + +#define mmDMA6_QM_ARB_MSG_MAX_INFLIGHT 0x5C8B44 + +#define mmDMA6_QM_ARB_MSG_AWUSER_31_11 0x5C8B48 + +#define mmDMA6_QM_ARB_MSG_AWUSER_SEC_PROP 0x5C8B4C + +#define mmDMA6_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0x5C8B50 + +#define mmDMA6_QM_ARB_BASE_LO 0x5C8B54 + +#define mmDMA6_QM_ARB_BASE_HI 0x5C8B58 + +#define mmDMA6_QM_ARB_STATE_STS 0x5C8B80 + +#define mmDMA6_QM_ARB_CHOISE_FULLNESS_STS 0x5C8B84 + +#define mmDMA6_QM_ARB_MSG_STS 0x5C8B88 + +#define mmDMA6_QM_ARB_SLV_CHOISE_Q_HEAD 0x5C8B8C + +#define mmDMA6_QM_ARB_ERR_CAUSE 0x5C8B9C + +#define mmDMA6_QM_ARB_ERR_MSG_EN 0x5C8BA0 + +#define mmDMA6_QM_ARB_ERR_STS_DRP 0x5C8BA8 + +#define mmDMA6_QM_ARB_MST_CRED_STS_0 0x5C8BB0 + +#define mmDMA6_QM_ARB_MST_CRED_STS_1 0x5C8BB4 + +#define mmDMA6_QM_ARB_MST_CRED_STS_2 0x5C8BB8 + +#define mmDMA6_QM_ARB_MST_CRED_STS_3 0x5C8BBC + +#define mmDMA6_QM_ARB_MST_CRED_STS_4 0x5C8BC0 + +#define mmDMA6_QM_ARB_MST_CRED_STS_5 0x5C8BC4 + +#define mmDMA6_QM_ARB_MST_CRED_STS_6 0x5C8BC8 + +#define mmDMA6_QM_ARB_MST_CRED_STS_7 0x5C8BCC + +#define mmDMA6_QM_ARB_MST_CRED_STS_8 0x5C8BD0 + +#define mmDMA6_QM_ARB_MST_CRED_STS_9 0x5C8BD4 + +#define mmDMA6_QM_ARB_MST_CRED_STS_10 0x5C8BD8 + +#define mmDMA6_QM_ARB_MST_CRED_STS_11 0x5C8BDC + +#define mmDMA6_QM_ARB_MST_CRED_STS_12 0x5C8BE0 + +#define mmDMA6_QM_ARB_MST_CRED_STS_13 0x5C8BE4 + +#define mmDMA6_QM_ARB_MST_CRED_STS_14 0x5C8BE8 + +#define mmDMA6_QM_ARB_MST_CRED_STS_15 0x5C8BEC + +#define mmDMA6_QM_ARB_MST_CRED_STS_16 0x5C8BF0 + +#define mmDMA6_QM_ARB_MST_CRED_STS_17 0x5C8BF4 + +#define mmDMA6_QM_ARB_MST_CRED_STS_18 0x5C8BF8 + +#define mmDMA6_QM_ARB_MST_CRED_STS_19 0x5C8BFC + +#define mmDMA6_QM_ARB_MST_CRED_STS_20 0x5C8C00 + +#define mmDMA6_QM_ARB_MST_CRED_STS_21 0x5C8C04 + +#define mmDMA6_QM_ARB_MST_CRED_STS_22 0x5C8C08 + +#define mmDMA6_QM_ARB_MST_CRED_STS_23 0x5C8C0C + +#define mmDMA6_QM_ARB_MST_CRED_STS_24 0x5C8C10 + +#define mmDMA6_QM_ARB_MST_CRED_STS_25 0x5C8C14 + +#define mmDMA6_QM_ARB_MST_CRED_STS_26 0x5C8C18 + +#define mmDMA6_QM_ARB_MST_CRED_STS_27 0x5C8C1C + +#define mmDMA6_QM_ARB_MST_CRED_STS_28 0x5C8C20 + +#define mmDMA6_QM_ARB_MST_CRED_STS_29 0x5C8C24 + +#define mmDMA6_QM_ARB_MST_CRED_STS_30 0x5C8C28 + +#define mmDMA6_QM_ARB_MST_CRED_STS_31 0x5C8C2C + +#define mmDMA6_QM_CGM_CFG 0x5C8C70 + +#define mmDMA6_QM_CGM_STS 0x5C8C74 + +#define mmDMA6_QM_CGM_CFG1 0x5C8C78 + +#define mmDMA6_QM_LOCAL_RANGE_BASE 0x5C8C80 + +#define mmDMA6_QM_LOCAL_RANGE_SIZE 0x5C8C84 + +#define mmDMA6_QM_CSMR_STRICT_PRIO_CFG 0x5C8C90 + +#define mmDMA6_QM_HBW_RD_RATE_LIM_CFG_1 0x5C8C94 + +#define mmDMA6_QM_LBW_WR_RATE_LIM_CFG_0 0x5C8C98 + +#define mmDMA6_QM_LBW_WR_RATE_LIM_CFG_1 0x5C8C9C + +#define mmDMA6_QM_HBW_RD_RATE_LIM_CFG_0 0x5C8CA0 + +#define mmDMA6_QM_GLBL_AXCACHE 0x5C8CA4 + +#define mmDMA6_QM_IND_GW_APB_CFG 0x5C8CB0 + +#define mmDMA6_QM_IND_GW_APB_WDATA 0x5C8CB4 + +#define mmDMA6_QM_IND_GW_APB_RDATA 0x5C8CB8 + +#define mmDMA6_QM_IND_GW_APB_STATUS 0x5C8CBC + +#define mmDMA6_QM_GLBL_ERR_ADDR_LO 0x5C8CD0 + +#define mmDMA6_QM_GLBL_ERR_ADDR_HI 0x5C8CD4 + +#define mmDMA6_QM_GLBL_ERR_WDATA 0x5C8CD8 + +#define mmDMA6_QM_GLBL_MEM_INIT_BUSY 0x5C8D00 + +#endif /* ASIC_REG_DMA6_QM_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma7_core_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma7_core_regs.h new file mode 100644 index 000000000..8dd705d20 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma7_core_regs.h @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA7_CORE_REGS_H_ +#define ASIC_REG_DMA7_CORE_REGS_H_ + +/* + ***************************************** + * DMA7_CORE (Prototype: DMA_CORE) + ***************************************** + */ + +#define mmDMA7_CORE_CFG_0 0x5E0000 + +#define mmDMA7_CORE_CFG_1 0x5E0004 + +#define mmDMA7_CORE_LBW_MAX_OUTSTAND 0x5E0008 + +#define mmDMA7_CORE_SRC_BASE_LO 0x5E0014 + +#define mmDMA7_CORE_SRC_BASE_HI 0x5E0018 + +#define mmDMA7_CORE_DST_BASE_LO 0x5E001C + +#define mmDMA7_CORE_DST_BASE_HI 0x5E0020 + +#define mmDMA7_CORE_SRC_TSIZE_1 0x5E002C + +#define mmDMA7_CORE_SRC_STRIDE_1 0x5E0030 + +#define mmDMA7_CORE_SRC_TSIZE_2 0x5E0034 + +#define mmDMA7_CORE_SRC_STRIDE_2 0x5E0038 + +#define mmDMA7_CORE_SRC_TSIZE_3 0x5E003C + +#define mmDMA7_CORE_SRC_STRIDE_3 0x5E0040 + +#define mmDMA7_CORE_SRC_TSIZE_4 0x5E0044 + +#define mmDMA7_CORE_SRC_STRIDE_4 0x5E0048 + +#define mmDMA7_CORE_SRC_TSIZE_0 0x5E004C + +#define mmDMA7_CORE_DST_TSIZE_1 0x5E0054 + +#define mmDMA7_CORE_DST_STRIDE_1 0x5E0058 + +#define mmDMA7_CORE_DST_TSIZE_2 0x5E005C + +#define mmDMA7_CORE_DST_STRIDE_2 0x5E0060 + +#define mmDMA7_CORE_DST_TSIZE_3 0x5E0064 + +#define mmDMA7_CORE_DST_STRIDE_3 0x5E0068 + +#define mmDMA7_CORE_DST_TSIZE_4 0x5E006C + +#define mmDMA7_CORE_DST_STRIDE_4 0x5E0070 + +#define mmDMA7_CORE_DST_TSIZE_0 0x5E0074 + +#define mmDMA7_CORE_COMMIT 0x5E0078 + +#define mmDMA7_CORE_WR_COMP_WDATA 0x5E007C + +#define mmDMA7_CORE_WR_COMP_ADDR_LO 0x5E0080 + +#define mmDMA7_CORE_WR_COMP_ADDR_HI 0x5E0084 + +#define mmDMA7_CORE_WR_COMP_AWUSER_31_11 0x5E0088 + +#define mmDMA7_CORE_TE_NUMROWS 0x5E0094 + +#define mmDMA7_CORE_PROT 0x5E00B8 + +#define mmDMA7_CORE_SECURE_PROPS 0x5E00F0 + +#define mmDMA7_CORE_NON_SECURE_PROPS 0x5E00F4 + +#define mmDMA7_CORE_RD_MAX_OUTSTAND 0x5E0100 + +#define mmDMA7_CORE_RD_MAX_SIZE 0x5E0104 + +#define mmDMA7_CORE_RD_ARCACHE 0x5E0108 + +#define mmDMA7_CORE_RD_ARUSER_31_11 0x5E0110 + +#define mmDMA7_CORE_RD_INFLIGHTS 0x5E0114 + +#define mmDMA7_CORE_WR_MAX_OUTSTAND 0x5E0120 + +#define mmDMA7_CORE_WR_MAX_AWID 0x5E0124 + +#define mmDMA7_CORE_WR_AWCACHE 0x5E0128 + +#define mmDMA7_CORE_WR_AWUSER_31_11 0x5E0130 + +#define mmDMA7_CORE_WR_INFLIGHTS 0x5E0134 + +#define mmDMA7_CORE_RD_RATE_LIM_CFG_0 0x5E0150 + +#define mmDMA7_CORE_RD_RATE_LIM_CFG_1 0x5E0154 + +#define mmDMA7_CORE_WR_RATE_LIM_CFG_0 0x5E0158 + +#define mmDMA7_CORE_WR_RATE_LIM_CFG_1 0x5E015C + +#define mmDMA7_CORE_ERR_CFG 0x5E0160 + +#define mmDMA7_CORE_ERR_CAUSE 0x5E0164 + +#define mmDMA7_CORE_ERRMSG_ADDR_LO 0x5E0170 + +#define mmDMA7_CORE_ERRMSG_ADDR_HI 0x5E0174 + +#define mmDMA7_CORE_ERRMSG_WDATA 0x5E0178 + +#define mmDMA7_CORE_STS0 0x5E0190 + +#define mmDMA7_CORE_STS1 0x5E0194 + +#define mmDMA7_CORE_RD_DBGMEM_ADD 0x5E0200 + +#define mmDMA7_CORE_RD_DBGMEM_DATA_WR 0x5E0204 + +#define mmDMA7_CORE_RD_DBGMEM_DATA_RD 0x5E0208 + +#define mmDMA7_CORE_RD_DBGMEM_CTRL 0x5E020C + +#define mmDMA7_CORE_RD_DBGMEM_RC 0x5E0210 + +#define mmDMA7_CORE_DBG_HBW_AXI_AR_CNT 0x5E0220 + +#define mmDMA7_CORE_DBG_HBW_AXI_AW_CNT 0x5E0224 + +#define mmDMA7_CORE_DBG_LBW_AXI_AW_CNT 0x5E0228 + +#define mmDMA7_CORE_DBG_DESC_CNT 0x5E022C + +#define mmDMA7_CORE_DBG_STS 0x5E0230 + +#define mmDMA7_CORE_DBG_RD_DESC_ID 0x5E0234 + +#define mmDMA7_CORE_DBG_WR_DESC_ID 0x5E0238 + +#endif /* ASIC_REG_DMA7_CORE_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma7_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma7_qm_regs.h new file mode 100644 index 000000000..d6c631f63 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma7_qm_regs.h @@ -0,0 +1,834 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA7_QM_REGS_H_ +#define ASIC_REG_DMA7_QM_REGS_H_ + +/* + ***************************************** + * DMA7_QM (Prototype: QMAN) + ***************************************** + */ + +#define mmDMA7_QM_GLBL_CFG0 0x5E8000 + +#define mmDMA7_QM_GLBL_CFG1 0x5E8004 + +#define mmDMA7_QM_GLBL_PROT 0x5E8008 + +#define mmDMA7_QM_GLBL_ERR_CFG 0x5E800C + +#define mmDMA7_QM_GLBL_SECURE_PROPS_0 0x5E8010 + +#define mmDMA7_QM_GLBL_SECURE_PROPS_1 0x5E8014 + +#define mmDMA7_QM_GLBL_SECURE_PROPS_2 0x5E8018 + +#define mmDMA7_QM_GLBL_SECURE_PROPS_3 0x5E801C + +#define mmDMA7_QM_GLBL_SECURE_PROPS_4 0x5E8020 + +#define mmDMA7_QM_GLBL_NON_SECURE_PROPS_0 0x5E8024 + +#define mmDMA7_QM_GLBL_NON_SECURE_PROPS_1 0x5E8028 + +#define mmDMA7_QM_GLBL_NON_SECURE_PROPS_2 0x5E802C + +#define mmDMA7_QM_GLBL_NON_SECURE_PROPS_3 0x5E8030 + +#define mmDMA7_QM_GLBL_NON_SECURE_PROPS_4 0x5E8034 + +#define mmDMA7_QM_GLBL_STS0 0x5E8038 + +#define mmDMA7_QM_GLBL_STS1_0 0x5E8040 + +#define mmDMA7_QM_GLBL_STS1_1 0x5E8044 + +#define mmDMA7_QM_GLBL_STS1_2 0x5E8048 + +#define mmDMA7_QM_GLBL_STS1_3 0x5E804C + +#define mmDMA7_QM_GLBL_STS1_4 0x5E8050 + +#define mmDMA7_QM_GLBL_MSG_EN_0 0x5E8054 + +#define mmDMA7_QM_GLBL_MSG_EN_1 0x5E8058 + +#define mmDMA7_QM_GLBL_MSG_EN_2 0x5E805C + +#define mmDMA7_QM_GLBL_MSG_EN_3 0x5E8060 + +#define mmDMA7_QM_GLBL_MSG_EN_4 0x5E8068 + +#define mmDMA7_QM_PQ_BASE_LO_0 0x5E8070 + +#define mmDMA7_QM_PQ_BASE_LO_1 0x5E8074 + +#define mmDMA7_QM_PQ_BASE_LO_2 0x5E8078 + +#define mmDMA7_QM_PQ_BASE_LO_3 0x5E807C + +#define mmDMA7_QM_PQ_BASE_HI_0 0x5E8080 + +#define mmDMA7_QM_PQ_BASE_HI_1 0x5E8084 + +#define mmDMA7_QM_PQ_BASE_HI_2 0x5E8088 + +#define mmDMA7_QM_PQ_BASE_HI_3 0x5E808C + +#define mmDMA7_QM_PQ_SIZE_0 0x5E8090 + +#define mmDMA7_QM_PQ_SIZE_1 0x5E8094 + +#define mmDMA7_QM_PQ_SIZE_2 0x5E8098 + +#define mmDMA7_QM_PQ_SIZE_3 0x5E809C + +#define mmDMA7_QM_PQ_PI_0 0x5E80A0 + +#define mmDMA7_QM_PQ_PI_1 0x5E80A4 + +#define mmDMA7_QM_PQ_PI_2 0x5E80A8 + +#define mmDMA7_QM_PQ_PI_3 0x5E80AC + +#define mmDMA7_QM_PQ_CI_0 0x5E80B0 + +#define mmDMA7_QM_PQ_CI_1 0x5E80B4 + +#define mmDMA7_QM_PQ_CI_2 0x5E80B8 + +#define mmDMA7_QM_PQ_CI_3 0x5E80BC + +#define mmDMA7_QM_PQ_CFG0_0 0x5E80C0 + +#define mmDMA7_QM_PQ_CFG0_1 0x5E80C4 + +#define mmDMA7_QM_PQ_CFG0_2 0x5E80C8 + +#define mmDMA7_QM_PQ_CFG0_3 0x5E80CC + +#define mmDMA7_QM_PQ_CFG1_0 0x5E80D0 + +#define mmDMA7_QM_PQ_CFG1_1 0x5E80D4 + +#define mmDMA7_QM_PQ_CFG1_2 0x5E80D8 + +#define mmDMA7_QM_PQ_CFG1_3 0x5E80DC + +#define mmDMA7_QM_PQ_ARUSER_31_11_0 0x5E80E0 + +#define mmDMA7_QM_PQ_ARUSER_31_11_1 0x5E80E4 + +#define mmDMA7_QM_PQ_ARUSER_31_11_2 0x5E80E8 + +#define mmDMA7_QM_PQ_ARUSER_31_11_3 0x5E80EC + +#define mmDMA7_QM_PQ_STS0_0 0x5E80F0 + +#define mmDMA7_QM_PQ_STS0_1 0x5E80F4 + +#define mmDMA7_QM_PQ_STS0_2 0x5E80F8 + +#define mmDMA7_QM_PQ_STS0_3 0x5E80FC + +#define mmDMA7_QM_PQ_STS1_0 0x5E8100 + +#define mmDMA7_QM_PQ_STS1_1 0x5E8104 + +#define mmDMA7_QM_PQ_STS1_2 0x5E8108 + +#define mmDMA7_QM_PQ_STS1_3 0x5E810C + +#define mmDMA7_QM_CQ_CFG0_0 0x5E8110 + +#define mmDMA7_QM_CQ_CFG0_1 0x5E8114 + +#define mmDMA7_QM_CQ_CFG0_2 0x5E8118 + +#define mmDMA7_QM_CQ_CFG0_3 0x5E811C + +#define mmDMA7_QM_CQ_CFG0_4 0x5E8120 + +#define mmDMA7_QM_CQ_CFG1_0 0x5E8124 + +#define mmDMA7_QM_CQ_CFG1_1 0x5E8128 + +#define mmDMA7_QM_CQ_CFG1_2 0x5E812C + +#define mmDMA7_QM_CQ_CFG1_3 0x5E8130 + +#define mmDMA7_QM_CQ_CFG1_4 0x5E8134 + +#define mmDMA7_QM_CQ_ARUSER_31_11_0 0x5E8138 + +#define mmDMA7_QM_CQ_ARUSER_31_11_1 0x5E813C + +#define mmDMA7_QM_CQ_ARUSER_31_11_2 0x5E8140 + +#define mmDMA7_QM_CQ_ARUSER_31_11_3 0x5E8144 + +#define mmDMA7_QM_CQ_ARUSER_31_11_4 0x5E8148 + +#define mmDMA7_QM_CQ_STS0_0 0x5E814C + +#define mmDMA7_QM_CQ_STS0_1 0x5E8150 + +#define mmDMA7_QM_CQ_STS0_2 0x5E8154 + +#define mmDMA7_QM_CQ_STS0_3 0x5E8158 + +#define mmDMA7_QM_CQ_STS0_4 0x5E815C + +#define mmDMA7_QM_CQ_STS1_0 0x5E8160 + +#define mmDMA7_QM_CQ_STS1_1 0x5E8164 + +#define mmDMA7_QM_CQ_STS1_2 0x5E8168 + +#define mmDMA7_QM_CQ_STS1_3 0x5E816C + +#define mmDMA7_QM_CQ_STS1_4 0x5E8170 + +#define mmDMA7_QM_CQ_PTR_LO_0 0x5E8174 + +#define mmDMA7_QM_CQ_PTR_HI_0 0x5E8178 + +#define mmDMA7_QM_CQ_TSIZE_0 0x5E817C + +#define mmDMA7_QM_CQ_CTL_0 0x5E8180 + +#define mmDMA7_QM_CQ_PTR_LO_1 0x5E8184 + +#define mmDMA7_QM_CQ_PTR_HI_1 0x5E8188 + +#define mmDMA7_QM_CQ_TSIZE_1 0x5E818C + +#define mmDMA7_QM_CQ_CTL_1 0x5E8190 + +#define mmDMA7_QM_CQ_PTR_LO_2 0x5E8194 + +#define mmDMA7_QM_CQ_PTR_HI_2 0x5E8198 + +#define mmDMA7_QM_CQ_TSIZE_2 0x5E819C + +#define mmDMA7_QM_CQ_CTL_2 0x5E81A0 + +#define mmDMA7_QM_CQ_PTR_LO_3 0x5E81A4 + +#define mmDMA7_QM_CQ_PTR_HI_3 0x5E81A8 + +#define mmDMA7_QM_CQ_TSIZE_3 0x5E81AC + +#define mmDMA7_QM_CQ_CTL_3 0x5E81B0 + +#define mmDMA7_QM_CQ_PTR_LO_4 0x5E81B4 + +#define mmDMA7_QM_CQ_PTR_HI_4 0x5E81B8 + +#define mmDMA7_QM_CQ_TSIZE_4 0x5E81BC + +#define mmDMA7_QM_CQ_CTL_4 0x5E81C0 + +#define mmDMA7_QM_CQ_PTR_LO_STS_0 0x5E81C4 + +#define mmDMA7_QM_CQ_PTR_LO_STS_1 0x5E81C8 + +#define mmDMA7_QM_CQ_PTR_LO_STS_2 0x5E81CC + +#define mmDMA7_QM_CQ_PTR_LO_STS_3 0x5E81D0 + +#define mmDMA7_QM_CQ_PTR_LO_STS_4 0x5E81D4 + +#define mmDMA7_QM_CQ_PTR_HI_STS_0 0x5E81D8 + +#define mmDMA7_QM_CQ_PTR_HI_STS_1 0x5E81DC + +#define mmDMA7_QM_CQ_PTR_HI_STS_2 0x5E81E0 + +#define mmDMA7_QM_CQ_PTR_HI_STS_3 0x5E81E4 + +#define mmDMA7_QM_CQ_PTR_HI_STS_4 0x5E81E8 + +#define mmDMA7_QM_CQ_TSIZE_STS_0 0x5E81EC + +#define mmDMA7_QM_CQ_TSIZE_STS_1 0x5E81F0 + +#define mmDMA7_QM_CQ_TSIZE_STS_2 0x5E81F4 + +#define mmDMA7_QM_CQ_TSIZE_STS_3 0x5E81F8 + +#define mmDMA7_QM_CQ_TSIZE_STS_4 0x5E81FC + +#define mmDMA7_QM_CQ_CTL_STS_0 0x5E8200 + +#define mmDMA7_QM_CQ_CTL_STS_1 0x5E8204 + +#define mmDMA7_QM_CQ_CTL_STS_2 0x5E8208 + +#define mmDMA7_QM_CQ_CTL_STS_3 0x5E820C + +#define mmDMA7_QM_CQ_CTL_STS_4 0x5E8210 + +#define mmDMA7_QM_CQ_IFIFO_CNT_0 0x5E8214 + +#define mmDMA7_QM_CQ_IFIFO_CNT_1 0x5E8218 + +#define mmDMA7_QM_CQ_IFIFO_CNT_2 0x5E821C + +#define mmDMA7_QM_CQ_IFIFO_CNT_3 0x5E8220 + +#define mmDMA7_QM_CQ_IFIFO_CNT_4 0x5E8224 + +#define mmDMA7_QM_CP_MSG_BASE0_ADDR_LO_0 0x5E8228 + +#define mmDMA7_QM_CP_MSG_BASE0_ADDR_LO_1 0x5E822C + +#define mmDMA7_QM_CP_MSG_BASE0_ADDR_LO_2 0x5E8230 + +#define mmDMA7_QM_CP_MSG_BASE0_ADDR_LO_3 0x5E8234 + +#define mmDMA7_QM_CP_MSG_BASE0_ADDR_LO_4 0x5E8238 + +#define mmDMA7_QM_CP_MSG_BASE0_ADDR_HI_0 0x5E823C + +#define mmDMA7_QM_CP_MSG_BASE0_ADDR_HI_1 0x5E8240 + +#define mmDMA7_QM_CP_MSG_BASE0_ADDR_HI_2 0x5E8244 + +#define mmDMA7_QM_CP_MSG_BASE0_ADDR_HI_3 0x5E8248 + +#define mmDMA7_QM_CP_MSG_BASE0_ADDR_HI_4 0x5E824C + +#define mmDMA7_QM_CP_MSG_BASE1_ADDR_LO_0 0x5E8250 + +#define mmDMA7_QM_CP_MSG_BASE1_ADDR_LO_1 0x5E8254 + +#define mmDMA7_QM_CP_MSG_BASE1_ADDR_LO_2 0x5E8258 + +#define mmDMA7_QM_CP_MSG_BASE1_ADDR_LO_3 0x5E825C + +#define mmDMA7_QM_CP_MSG_BASE1_ADDR_LO_4 0x5E8260 + +#define mmDMA7_QM_CP_MSG_BASE1_ADDR_HI_0 0x5E8264 + +#define mmDMA7_QM_CP_MSG_BASE1_ADDR_HI_1 0x5E8268 + +#define mmDMA7_QM_CP_MSG_BASE1_ADDR_HI_2 0x5E826C + +#define mmDMA7_QM_CP_MSG_BASE1_ADDR_HI_3 0x5E8270 + +#define mmDMA7_QM_CP_MSG_BASE1_ADDR_HI_4 0x5E8274 + +#define mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_0 0x5E8278 + +#define mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_1 0x5E827C + +#define mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_2 0x5E8280 + +#define mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_3 0x5E8284 + +#define mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_4 0x5E8288 + +#define mmDMA7_QM_CP_MSG_BASE2_ADDR_HI_0 0x5E828C + +#define mmDMA7_QM_CP_MSG_BASE2_ADDR_HI_1 0x5E8290 + +#define mmDMA7_QM_CP_MSG_BASE2_ADDR_HI_2 0x5E8294 + +#define mmDMA7_QM_CP_MSG_BASE2_ADDR_HI_3 0x5E8298 + +#define mmDMA7_QM_CP_MSG_BASE2_ADDR_HI_4 0x5E829C + +#define mmDMA7_QM_CP_MSG_BASE3_ADDR_LO_0 0x5E82A0 + +#define mmDMA7_QM_CP_MSG_BASE3_ADDR_LO_1 0x5E82A4 + +#define mmDMA7_QM_CP_MSG_BASE3_ADDR_LO_2 0x5E82A8 + +#define mmDMA7_QM_CP_MSG_BASE3_ADDR_LO_3 0x5E82AC + +#define mmDMA7_QM_CP_MSG_BASE3_ADDR_LO_4 0x5E82B0 + +#define mmDMA7_QM_CP_MSG_BASE3_ADDR_HI_0 0x5E82B4 + +#define mmDMA7_QM_CP_MSG_BASE3_ADDR_HI_1 0x5E82B8 + +#define mmDMA7_QM_CP_MSG_BASE3_ADDR_HI_2 0x5E82BC + +#define mmDMA7_QM_CP_MSG_BASE3_ADDR_HI_3 0x5E82C0 + +#define mmDMA7_QM_CP_MSG_BASE3_ADDR_HI_4 0x5E82C4 + +#define mmDMA7_QM_CP_LDMA_TSIZE_OFFSET_0 0x5E82C8 + +#define mmDMA7_QM_CP_LDMA_TSIZE_OFFSET_1 0x5E82CC + +#define mmDMA7_QM_CP_LDMA_TSIZE_OFFSET_2 0x5E82D0 + +#define mmDMA7_QM_CP_LDMA_TSIZE_OFFSET_3 0x5E82D4 + +#define mmDMA7_QM_CP_LDMA_TSIZE_OFFSET_4 0x5E82D8 + +#define mmDMA7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0x5E82E0 + +#define mmDMA7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0x5E82E4 + +#define mmDMA7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0x5E82E8 + +#define mmDMA7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0x5E82EC + +#define mmDMA7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0x5E82F0 + +#define mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0x5E82F4 + +#define mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0x5E82F8 + +#define mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0x5E82FC + +#define mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0x5E8300 + +#define mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0x5E8304 + +#define mmDMA7_QM_CP_FENCE0_RDATA_0 0x5E8308 + +#define mmDMA7_QM_CP_FENCE0_RDATA_1 0x5E830C + +#define mmDMA7_QM_CP_FENCE0_RDATA_2 0x5E8310 + +#define mmDMA7_QM_CP_FENCE0_RDATA_3 0x5E8314 + +#define mmDMA7_QM_CP_FENCE0_RDATA_4 0x5E8318 + +#define mmDMA7_QM_CP_FENCE1_RDATA_0 0x5E831C + +#define mmDMA7_QM_CP_FENCE1_RDATA_1 0x5E8320 + +#define mmDMA7_QM_CP_FENCE1_RDATA_2 0x5E8324 + +#define mmDMA7_QM_CP_FENCE1_RDATA_3 0x5E8328 + +#define mmDMA7_QM_CP_FENCE1_RDATA_4 0x5E832C + +#define mmDMA7_QM_CP_FENCE2_RDATA_0 0x5E8330 + +#define mmDMA7_QM_CP_FENCE2_RDATA_1 0x5E8334 + +#define mmDMA7_QM_CP_FENCE2_RDATA_2 0x5E8338 + +#define mmDMA7_QM_CP_FENCE2_RDATA_3 0x5E833C + +#define mmDMA7_QM_CP_FENCE2_RDATA_4 0x5E8340 + +#define mmDMA7_QM_CP_FENCE3_RDATA_0 0x5E8344 + +#define mmDMA7_QM_CP_FENCE3_RDATA_1 0x5E8348 + +#define mmDMA7_QM_CP_FENCE3_RDATA_2 0x5E834C + +#define mmDMA7_QM_CP_FENCE3_RDATA_3 0x5E8350 + +#define mmDMA7_QM_CP_FENCE3_RDATA_4 0x5E8354 + +#define mmDMA7_QM_CP_FENCE0_CNT_0 0x5E8358 + +#define mmDMA7_QM_CP_FENCE0_CNT_1 0x5E835C + +#define mmDMA7_QM_CP_FENCE0_CNT_2 0x5E8360 + +#define mmDMA7_QM_CP_FENCE0_CNT_3 0x5E8364 + +#define mmDMA7_QM_CP_FENCE0_CNT_4 0x5E8368 + +#define mmDMA7_QM_CP_FENCE1_CNT_0 0x5E836C + +#define mmDMA7_QM_CP_FENCE1_CNT_1 0x5E8370 + +#define mmDMA7_QM_CP_FENCE1_CNT_2 0x5E8374 + +#define mmDMA7_QM_CP_FENCE1_CNT_3 0x5E8378 + +#define mmDMA7_QM_CP_FENCE1_CNT_4 0x5E837C + +#define mmDMA7_QM_CP_FENCE2_CNT_0 0x5E8380 + +#define mmDMA7_QM_CP_FENCE2_CNT_1 0x5E8384 + +#define mmDMA7_QM_CP_FENCE2_CNT_2 0x5E8388 + +#define mmDMA7_QM_CP_FENCE2_CNT_3 0x5E838C + +#define mmDMA7_QM_CP_FENCE2_CNT_4 0x5E8390 + +#define mmDMA7_QM_CP_FENCE3_CNT_0 0x5E8394 + +#define mmDMA7_QM_CP_FENCE3_CNT_1 0x5E8398 + +#define mmDMA7_QM_CP_FENCE3_CNT_2 0x5E839C + +#define mmDMA7_QM_CP_FENCE3_CNT_3 0x5E83A0 + +#define mmDMA7_QM_CP_FENCE3_CNT_4 0x5E83A4 + +#define mmDMA7_QM_CP_STS_0 0x5E83A8 + +#define mmDMA7_QM_CP_STS_1 0x5E83AC + +#define mmDMA7_QM_CP_STS_2 0x5E83B0 + +#define mmDMA7_QM_CP_STS_3 0x5E83B4 + +#define mmDMA7_QM_CP_STS_4 0x5E83B8 + +#define mmDMA7_QM_CP_CURRENT_INST_LO_0 0x5E83BC + +#define mmDMA7_QM_CP_CURRENT_INST_LO_1 0x5E83C0 + +#define mmDMA7_QM_CP_CURRENT_INST_LO_2 0x5E83C4 + +#define mmDMA7_QM_CP_CURRENT_INST_LO_3 0x5E83C8 + +#define mmDMA7_QM_CP_CURRENT_INST_LO_4 0x5E83CC + +#define mmDMA7_QM_CP_CURRENT_INST_HI_0 0x5E83D0 + +#define mmDMA7_QM_CP_CURRENT_INST_HI_1 0x5E83D4 + +#define mmDMA7_QM_CP_CURRENT_INST_HI_2 0x5E83D8 + +#define mmDMA7_QM_CP_CURRENT_INST_HI_3 0x5E83DC + +#define mmDMA7_QM_CP_CURRENT_INST_HI_4 0x5E83E0 + +#define mmDMA7_QM_CP_BARRIER_CFG_0 0x5E83F4 + +#define mmDMA7_QM_CP_BARRIER_CFG_1 0x5E83F8 + +#define mmDMA7_QM_CP_BARRIER_CFG_2 0x5E83FC + +#define mmDMA7_QM_CP_BARRIER_CFG_3 0x5E8400 + +#define mmDMA7_QM_CP_BARRIER_CFG_4 0x5E8404 + +#define mmDMA7_QM_CP_DBG_0_0 0x5E8408 + +#define mmDMA7_QM_CP_DBG_0_1 0x5E840C + +#define mmDMA7_QM_CP_DBG_0_2 0x5E8410 + +#define mmDMA7_QM_CP_DBG_0_3 0x5E8414 + +#define mmDMA7_QM_CP_DBG_0_4 0x5E8418 + +#define mmDMA7_QM_CP_ARUSER_31_11_0 0x5E841C + +#define mmDMA7_QM_CP_ARUSER_31_11_1 0x5E8420 + +#define mmDMA7_QM_CP_ARUSER_31_11_2 0x5E8424 + +#define mmDMA7_QM_CP_ARUSER_31_11_3 0x5E8428 + +#define mmDMA7_QM_CP_ARUSER_31_11_4 0x5E842C + +#define mmDMA7_QM_CP_AWUSER_31_11_0 0x5E8430 + +#define mmDMA7_QM_CP_AWUSER_31_11_1 0x5E8434 + +#define mmDMA7_QM_CP_AWUSER_31_11_2 0x5E8438 + +#define mmDMA7_QM_CP_AWUSER_31_11_3 0x5E843C + +#define mmDMA7_QM_CP_AWUSER_31_11_4 0x5E8440 + +#define mmDMA7_QM_ARB_CFG_0 0x5E8A00 + +#define mmDMA7_QM_ARB_CHOISE_Q_PUSH 0x5E8A04 + +#define mmDMA7_QM_ARB_WRR_WEIGHT_0 0x5E8A08 + +#define mmDMA7_QM_ARB_WRR_WEIGHT_1 0x5E8A0C + +#define mmDMA7_QM_ARB_WRR_WEIGHT_2 0x5E8A10 + +#define mmDMA7_QM_ARB_WRR_WEIGHT_3 0x5E8A14 + +#define mmDMA7_QM_ARB_CFG_1 0x5E8A18 + +#define mmDMA7_QM_ARB_MST_AVAIL_CRED_0 0x5E8A20 + +#define mmDMA7_QM_ARB_MST_AVAIL_CRED_1 0x5E8A24 + +#define mmDMA7_QM_ARB_MST_AVAIL_CRED_2 0x5E8A28 + +#define mmDMA7_QM_ARB_MST_AVAIL_CRED_3 0x5E8A2C + +#define mmDMA7_QM_ARB_MST_AVAIL_CRED_4 0x5E8A30 + +#define mmDMA7_QM_ARB_MST_AVAIL_CRED_5 0x5E8A34 + +#define mmDMA7_QM_ARB_MST_AVAIL_CRED_6 0x5E8A38 + +#define mmDMA7_QM_ARB_MST_AVAIL_CRED_7 0x5E8A3C + +#define mmDMA7_QM_ARB_MST_AVAIL_CRED_8 0x5E8A40 + +#define mmDMA7_QM_ARB_MST_AVAIL_CRED_9 0x5E8A44 + +#define mmDMA7_QM_ARB_MST_AVAIL_CRED_10 0x5E8A48 + +#define mmDMA7_QM_ARB_MST_AVAIL_CRED_11 0x5E8A4C + +#define mmDMA7_QM_ARB_MST_AVAIL_CRED_12 0x5E8A50 + +#define mmDMA7_QM_ARB_MST_AVAIL_CRED_13 0x5E8A54 + +#define mmDMA7_QM_ARB_MST_AVAIL_CRED_14 0x5E8A58 + +#define mmDMA7_QM_ARB_MST_AVAIL_CRED_15 0x5E8A5C + +#define mmDMA7_QM_ARB_MST_AVAIL_CRED_16 0x5E8A60 + +#define mmDMA7_QM_ARB_MST_AVAIL_CRED_17 0x5E8A64 + +#define mmDMA7_QM_ARB_MST_AVAIL_CRED_18 0x5E8A68 + +#define mmDMA7_QM_ARB_MST_AVAIL_CRED_19 0x5E8A6C + +#define mmDMA7_QM_ARB_MST_AVAIL_CRED_20 0x5E8A70 + +#define mmDMA7_QM_ARB_MST_AVAIL_CRED_21 0x5E8A74 + +#define mmDMA7_QM_ARB_MST_AVAIL_CRED_22 0x5E8A78 + +#define mmDMA7_QM_ARB_MST_AVAIL_CRED_23 0x5E8A7C + +#define mmDMA7_QM_ARB_MST_AVAIL_CRED_24 0x5E8A80 + +#define mmDMA7_QM_ARB_MST_AVAIL_CRED_25 0x5E8A84 + +#define mmDMA7_QM_ARB_MST_AVAIL_CRED_26 0x5E8A88 + +#define mmDMA7_QM_ARB_MST_AVAIL_CRED_27 0x5E8A8C + +#define mmDMA7_QM_ARB_MST_AVAIL_CRED_28 0x5E8A90 + +#define mmDMA7_QM_ARB_MST_AVAIL_CRED_29 0x5E8A94 + +#define mmDMA7_QM_ARB_MST_AVAIL_CRED_30 0x5E8A98 + +#define mmDMA7_QM_ARB_MST_AVAIL_CRED_31 0x5E8A9C + +#define mmDMA7_QM_ARB_MST_CRED_INC 0x5E8AA0 + +#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_0 0x5E8AA4 + +#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_1 0x5E8AA8 + +#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_2 0x5E8AAC + +#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_3 0x5E8AB0 + +#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_4 0x5E8AB4 + +#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_5 0x5E8AB8 + +#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_6 0x5E8ABC + +#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_7 0x5E8AC0 + +#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_8 0x5E8AC4 + +#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_9 0x5E8AC8 + +#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_10 0x5E8ACC + +#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_11 0x5E8AD0 + +#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_12 0x5E8AD4 + +#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_13 0x5E8AD8 + +#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_14 0x5E8ADC + +#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_15 0x5E8AE0 + +#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_16 0x5E8AE4 + +#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_17 0x5E8AE8 + +#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_18 0x5E8AEC + +#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_19 0x5E8AF0 + +#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_20 0x5E8AF4 + +#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_21 0x5E8AF8 + +#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_22 0x5E8AFC + +#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_23 0x5E8B00 + +#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_24 0x5E8B04 + +#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_25 0x5E8B08 + +#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_26 0x5E8B0C + +#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_27 0x5E8B10 + +#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_28 0x5E8B14 + +#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_29 0x5E8B18 + +#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_30 0x5E8B1C + +#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_31 0x5E8B20 + +#define mmDMA7_QM_ARB_SLV_MASTER_INC_CRED_OFST 0x5E8B28 + +#define mmDMA7_QM_ARB_MST_SLAVE_EN 0x5E8B2C + +#define mmDMA7_QM_ARB_MST_QUIET_PER 0x5E8B34 + +#define mmDMA7_QM_ARB_SLV_CHOISE_WDT 0x5E8B38 + +#define mmDMA7_QM_ARB_SLV_ID 0x5E8B3C + +#define mmDMA7_QM_ARB_MSG_MAX_INFLIGHT 0x5E8B44 + +#define mmDMA7_QM_ARB_MSG_AWUSER_31_11 0x5E8B48 + +#define mmDMA7_QM_ARB_MSG_AWUSER_SEC_PROP 0x5E8B4C + +#define mmDMA7_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0x5E8B50 + +#define mmDMA7_QM_ARB_BASE_LO 0x5E8B54 + +#define mmDMA7_QM_ARB_BASE_HI 0x5E8B58 + +#define mmDMA7_QM_ARB_STATE_STS 0x5E8B80 + +#define mmDMA7_QM_ARB_CHOISE_FULLNESS_STS 0x5E8B84 + +#define mmDMA7_QM_ARB_MSG_STS 0x5E8B88 + +#define mmDMA7_QM_ARB_SLV_CHOISE_Q_HEAD 0x5E8B8C + +#define mmDMA7_QM_ARB_ERR_CAUSE 0x5E8B9C + +#define mmDMA7_QM_ARB_ERR_MSG_EN 0x5E8BA0 + +#define mmDMA7_QM_ARB_ERR_STS_DRP 0x5E8BA8 + +#define mmDMA7_QM_ARB_MST_CRED_STS_0 0x5E8BB0 + +#define mmDMA7_QM_ARB_MST_CRED_STS_1 0x5E8BB4 + +#define mmDMA7_QM_ARB_MST_CRED_STS_2 0x5E8BB8 + +#define mmDMA7_QM_ARB_MST_CRED_STS_3 0x5E8BBC + +#define mmDMA7_QM_ARB_MST_CRED_STS_4 0x5E8BC0 + +#define mmDMA7_QM_ARB_MST_CRED_STS_5 0x5E8BC4 + +#define mmDMA7_QM_ARB_MST_CRED_STS_6 0x5E8BC8 + +#define mmDMA7_QM_ARB_MST_CRED_STS_7 0x5E8BCC + +#define mmDMA7_QM_ARB_MST_CRED_STS_8 0x5E8BD0 + +#define mmDMA7_QM_ARB_MST_CRED_STS_9 0x5E8BD4 + +#define mmDMA7_QM_ARB_MST_CRED_STS_10 0x5E8BD8 + +#define mmDMA7_QM_ARB_MST_CRED_STS_11 0x5E8BDC + +#define mmDMA7_QM_ARB_MST_CRED_STS_12 0x5E8BE0 + +#define mmDMA7_QM_ARB_MST_CRED_STS_13 0x5E8BE4 + +#define mmDMA7_QM_ARB_MST_CRED_STS_14 0x5E8BE8 + +#define mmDMA7_QM_ARB_MST_CRED_STS_15 0x5E8BEC + +#define mmDMA7_QM_ARB_MST_CRED_STS_16 0x5E8BF0 + +#define mmDMA7_QM_ARB_MST_CRED_STS_17 0x5E8BF4 + +#define mmDMA7_QM_ARB_MST_CRED_STS_18 0x5E8BF8 + +#define mmDMA7_QM_ARB_MST_CRED_STS_19 0x5E8BFC + +#define mmDMA7_QM_ARB_MST_CRED_STS_20 0x5E8C00 + +#define mmDMA7_QM_ARB_MST_CRED_STS_21 0x5E8C04 + +#define mmDMA7_QM_ARB_MST_CRED_STS_22 0x5E8C08 + +#define mmDMA7_QM_ARB_MST_CRED_STS_23 0x5E8C0C + +#define mmDMA7_QM_ARB_MST_CRED_STS_24 0x5E8C10 + +#define mmDMA7_QM_ARB_MST_CRED_STS_25 0x5E8C14 + +#define mmDMA7_QM_ARB_MST_CRED_STS_26 0x5E8C18 + +#define mmDMA7_QM_ARB_MST_CRED_STS_27 0x5E8C1C + +#define mmDMA7_QM_ARB_MST_CRED_STS_28 0x5E8C20 + +#define mmDMA7_QM_ARB_MST_CRED_STS_29 0x5E8C24 + +#define mmDMA7_QM_ARB_MST_CRED_STS_30 0x5E8C28 + +#define mmDMA7_QM_ARB_MST_CRED_STS_31 0x5E8C2C + +#define mmDMA7_QM_CGM_CFG 0x5E8C70 + +#define mmDMA7_QM_CGM_STS 0x5E8C74 + +#define mmDMA7_QM_CGM_CFG1 0x5E8C78 + +#define mmDMA7_QM_LOCAL_RANGE_BASE 0x5E8C80 + +#define mmDMA7_QM_LOCAL_RANGE_SIZE 0x5E8C84 + +#define mmDMA7_QM_CSMR_STRICT_PRIO_CFG 0x5E8C90 + +#define mmDMA7_QM_HBW_RD_RATE_LIM_CFG_1 0x5E8C94 + +#define mmDMA7_QM_LBW_WR_RATE_LIM_CFG_0 0x5E8C98 + +#define mmDMA7_QM_LBW_WR_RATE_LIM_CFG_1 0x5E8C9C + +#define mmDMA7_QM_HBW_RD_RATE_LIM_CFG_0 0x5E8CA0 + +#define mmDMA7_QM_GLBL_AXCACHE 0x5E8CA4 + +#define mmDMA7_QM_IND_GW_APB_CFG 0x5E8CB0 + +#define mmDMA7_QM_IND_GW_APB_WDATA 0x5E8CB4 + +#define mmDMA7_QM_IND_GW_APB_RDATA 0x5E8CB8 + +#define mmDMA7_QM_IND_GW_APB_STATUS 0x5E8CBC + +#define mmDMA7_QM_GLBL_ERR_ADDR_LO 0x5E8CD0 + +#define mmDMA7_QM_GLBL_ERR_ADDR_HI 0x5E8CD4 + +#define mmDMA7_QM_GLBL_ERR_WDATA 0x5E8CD8 + +#define mmDMA7_QM_GLBL_MEM_INIT_BUSY 0x5E8D00 + +#endif /* ASIC_REG_DMA7_QM_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch0_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch0_regs.h new file mode 100644 index 000000000..8c1c72df4 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch0_regs.h @@ -0,0 +1,896 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA_IF_E_N_DOWN_CH0_REGS_H_ +#define ASIC_REG_DMA_IF_E_N_DOWN_CH0_REGS_H_ + +/* + ***************************************** + * DMA_IF_E_N_DOWN_CH0 (Prototype: RTR_CTRL) + ***************************************** + */ + +#define mmDMA_IF_E_N_DOWN_CH0_PERM_SEL 0x4E1108 + +#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_0 0x4E1114 + +#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_1 0x4E1118 + +#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_2 0x4E111C + +#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_3 0x4E1120 + +#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_4 0x4E1124 + +#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_5 0x4E1128 + +#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_6 0x4E112C + +#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_7 0x4E1130 + +#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_8 0x4E1134 + +#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_9 0x4E1138 + +#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_10 0x4E113C + +#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_11 0x4E1140 + +#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_12 0x4E1144 + +#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_13 0x4E1148 + +#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_14 0x4E114C + +#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_15 0x4E1150 + +#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_16 0x4E1154 + +#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_17 0x4E1158 + +#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_18 0x4E115C + +#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_19 0x4E1160 + +#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_20 0x4E1164 + +#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_21 0x4E1168 + +#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_22 0x4E116C + +#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_23 0x4E1170 + +#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_24 0x4E1174 + +#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_25 0x4E1178 + +#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_26 0x4E117C + +#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_27 0x4E1180 + +#define mmDMA_IF_E_N_DOWN_CH0_SRAM_POLY_H3_0 0x4E1184 + +#define mmDMA_IF_E_N_DOWN_CH0_SRAM_POLY_H3_1 0x4E1188 + +#define mmDMA_IF_E_N_DOWN_CH0_SRAM_POLY_H3_2 0x4E118C + +#define mmDMA_IF_E_N_DOWN_CH0_SRAM_POLY_H3_3 0x4E1190 + +#define mmDMA_IF_E_N_DOWN_CH0_SRAM_POLY_H3_4 0x4E1194 + +#define mmDMA_IF_E_N_DOWN_CH0_SRAM_POLY_H3_5 0x4E1198 + +#define mmDMA_IF_E_N_DOWN_CH0_SRAM_POLY_H3_6 0x4E119C + +#define mmDMA_IF_E_N_DOWN_CH0_SRAM_POLY_H3_7 0x4E11A0 + +#define mmDMA_IF_E_N_DOWN_CH0_SRAM_POLY_H3_8 0x4E11A4 + +#define mmDMA_IF_E_N_DOWN_CH0_SRAM_POLY_H3_9 0x4E11A8 + +#define mmDMA_IF_E_N_DOWN_CH0_SRAM_POLY_H3_10 0x4E11AC + +#define mmDMA_IF_E_N_DOWN_CH0_SRAM_POLY_H3_11 0x4E11B0 + +#define mmDMA_IF_E_N_DOWN_CH0_SRAM_POLY_H3_12 0x4E11B4 + +#define mmDMA_IF_E_N_DOWN_CH0_SRAM_POLY_H3_13 0x4E11B8 + +#define mmDMA_IF_E_N_DOWN_CH0_SRAM_POLY_H3_14 0x4E11BC + +#define mmDMA_IF_E_N_DOWN_CH0_SCRAM_SRAM_EN 0x4E126C + +#define mmDMA_IF_E_N_DOWN_CH0_RL_HBM_EN 0x4E1274 + +#define mmDMA_IF_E_N_DOWN_CH0_RL_HBM_SAT 0x4E1278 + +#define mmDMA_IF_E_N_DOWN_CH0_RL_HBM_RST 0x4E127C + +#define mmDMA_IF_E_N_DOWN_CH0_RL_HBM_TIMEOUT 0x4E1280 + +#define mmDMA_IF_E_N_DOWN_CH0_SCRAM_HBM_EN 0x4E1284 + +#define mmDMA_IF_E_N_DOWN_CH0_RL_PCI_EN 0x4E1288 + +#define mmDMA_IF_E_N_DOWN_CH0_RL_PCI_SAT 0x4E128C + +#define mmDMA_IF_E_N_DOWN_CH0_RL_PCI_RST 0x4E1290 + +#define mmDMA_IF_E_N_DOWN_CH0_RL_PCI_TIMEOUT 0x4E1294 + +#define mmDMA_IF_E_N_DOWN_CH0_RL_SRAM_EN 0x4E129C + +#define mmDMA_IF_E_N_DOWN_CH0_RL_SRAM_SAT 0x4E12A0 + +#define mmDMA_IF_E_N_DOWN_CH0_RL_SRAM_RST 0x4E12A4 + +#define mmDMA_IF_E_N_DOWN_CH0_RL_SRAM_TIMEOUT 0x4E12AC + +#define mmDMA_IF_E_N_DOWN_CH0_RL_SRAM_RED 0x4E12B4 + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_HBM_EN 0x4E12EC + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_PCI_EN 0x4E12F0 + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_HBM_WR_SIZE 0x4E12F4 + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_PCI_WR_SIZE 0x4E12F8 + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_PCI_CTR_SET_EN 0x4E1404 + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_PCI_CTR_SET 0x4E1408 + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_PCI_CTR_WRAP 0x4E140C + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_PCI_CTR_CNT 0x4E1410 + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM_CTR_SET_EN 0x4E1414 + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM_CTR_SET 0x4E1418 + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_HBM_RD_SIZE 0x4E141C + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_PCI_RD_SIZE 0x4E1420 + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_PCI_CTR_SET_EN 0x4E1424 + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_PCI_CTR_SET 0x4E1428 + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_PCI_CTR_WRAP 0x4E142C + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_PCI_CTR_CNT 0x4E1430 + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM_CTR_SET_EN 0x4E1434 + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM_CTR_SET 0x4E1438 + +#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_SEL_0 0x4E1450 + +#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_SEL_1 0x4E1454 + +#define mmDMA_IF_E_N_DOWN_CH0_NON_LIN_EN 0x4E1480 + +#define mmDMA_IF_E_N_DOWN_CH0_NL_SRAM_BANK_0 0x4E1500 + +#define mmDMA_IF_E_N_DOWN_CH0_NL_SRAM_BANK_1 0x4E1504 + +#define mmDMA_IF_E_N_DOWN_CH0_NL_SRAM_BANK_2 0x4E1508 + +#define mmDMA_IF_E_N_DOWN_CH0_NL_SRAM_BANK_3 0x4E150C + +#define mmDMA_IF_E_N_DOWN_CH0_NL_SRAM_BANK_4 0x4E1510 + +#define mmDMA_IF_E_N_DOWN_CH0_NL_SRAM_OFFSET_0 0x4E1514 + +#define mmDMA_IF_E_N_DOWN_CH0_NL_SRAM_OFFSET_1 0x4E1520 + +#define mmDMA_IF_E_N_DOWN_CH0_NL_SRAM_OFFSET_2 0x4E1524 + +#define mmDMA_IF_E_N_DOWN_CH0_NL_SRAM_OFFSET_3 0x4E1528 + +#define mmDMA_IF_E_N_DOWN_CH0_NL_SRAM_OFFSET_4 0x4E152C + +#define mmDMA_IF_E_N_DOWN_CH0_NL_SRAM_OFFSET_5 0x4E1530 + +#define mmDMA_IF_E_N_DOWN_CH0_NL_SRAM_OFFSET_6 0x4E1534 + +#define mmDMA_IF_E_N_DOWN_CH0_NL_SRAM_OFFSET_7 0x4E1538 + +#define mmDMA_IF_E_N_DOWN_CH0_NL_SRAM_OFFSET_8 0x4E153C + +#define mmDMA_IF_E_N_DOWN_CH0_NL_SRAM_OFFSET_9 0x4E1540 + +#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_0 0x4E1550 + +#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_1 0x4E1554 + +#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_2 0x4E1558 + +#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_3 0x4E155C + +#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_4 0x4E1560 + +#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_5 0x4E1564 + +#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_6 0x4E1568 + +#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_7 0x4E156C + +#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_8 0x4E1570 + +#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_9 0x4E1574 + +#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_10 0x4E1578 + +#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_11 0x4E157C + +#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_12 0x4E1580 + +#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_13 0x4E1584 + +#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_14 0x4E1588 + +#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_15 0x4E158C + +#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_16 0x4E1590 + +#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_17 0x4E1594 + +#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_18 0x4E1598 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_0 0x4E15E4 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_1 0x4E15E8 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_2 0x4E15EC + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_3 0x4E15F0 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_4 0x4E15F4 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_5 0x4E15F8 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_6 0x4E15FC + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_7 0x4E1600 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_8 0x4E1604 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_9 0x4E1608 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_10 0x4E160C + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_11 0x4E1610 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_12 0x4E1614 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_13 0x4E1618 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_14 0x4E161C + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_15 0x4E1620 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_0 0x4E1624 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_1 0x4E1628 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_2 0x4E162C + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_3 0x4E1630 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_4 0x4E1634 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_5 0x4E1638 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_6 0x4E163C + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_7 0x4E1640 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_8 0x4E1644 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_9 0x4E1648 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_10 0x4E164C + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_11 0x4E1650 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_12 0x4E1654 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_13 0x4E1658 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_14 0x4E165C + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_15 0x4E1660 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_0 0x4E1664 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_1 0x4E1668 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_2 0x4E166C + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_3 0x4E1670 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_4 0x4E1674 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_5 0x4E1678 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_6 0x4E167C + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_7 0x4E1680 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_8 0x4E1684 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_9 0x4E1688 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_10 0x4E168C + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_11 0x4E1690 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_12 0x4E1694 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_13 0x4E1698 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_14 0x4E169C + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_15 0x4E16A0 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_0 0x4E16A4 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_1 0x4E16A8 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_2 0x4E16AC + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_3 0x4E16B0 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_4 0x4E16B4 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_5 0x4E16B8 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_6 0x4E16BC + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_7 0x4E16C0 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_8 0x4E16C4 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_9 0x4E16C8 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_10 0x4E16CC + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_11 0x4E16D0 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_12 0x4E16D4 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_13 0x4E16D8 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_14 0x4E16DC + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_15 0x4E16E0 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_0 0x4E16E4 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_1 0x4E16E8 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_2 0x4E16EC + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_3 0x4E16F0 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_4 0x4E16F4 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_5 0x4E16F8 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_6 0x4E16FC + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_7 0x4E1700 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_8 0x4E1704 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_9 0x4E1708 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_10 0x4E170C + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_11 0x4E1710 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_12 0x4E1714 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_13 0x4E1718 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_14 0x4E171C + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_15 0x4E1720 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_0 0x4E1724 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_1 0x4E1728 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_2 0x4E172C + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_3 0x4E1730 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_4 0x4E1734 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_5 0x4E1738 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_6 0x4E173C + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_7 0x4E1740 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_8 0x4E1744 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_9 0x4E1748 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_10 0x4E174C + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_11 0x4E1750 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_12 0x4E1754 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_13 0x4E1758 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_14 0x4E175C + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_15 0x4E1760 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_0 0x4E1764 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_1 0x4E1768 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_2 0x4E176C + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_3 0x4E1770 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_4 0x4E1774 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_5 0x4E1778 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_6 0x4E177C + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_7 0x4E1780 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_8 0x4E1784 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_9 0x4E1788 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_10 0x4E178C + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_11 0x4E1790 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_12 0x4E1794 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_13 0x4E1798 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_14 0x4E179C + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_15 0x4E17A0 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_0 0x4E17A4 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_1 0x4E17A8 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_2 0x4E17AC + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_3 0x4E17B0 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_4 0x4E17B4 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_5 0x4E17B8 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_6 0x4E17BC + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_7 0x4E17C0 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_8 0x4E17C4 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_9 0x4E17C8 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_10 0x4E17CC + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_11 0x4E17D0 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_12 0x4E17D4 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_13 0x4E17D8 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_14 0x4E17DC + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_15 0x4E17E0 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_0 0x4E1824 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_1 0x4E1828 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_2 0x4E182C + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_3 0x4E1830 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_4 0x4E1834 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_5 0x4E1838 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_6 0x4E183C + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_7 0x4E1840 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_8 0x4E1844 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_9 0x4E1848 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_10 0x4E184C + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_11 0x4E1850 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_12 0x4E1854 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_13 0x4E1858 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_14 0x4E185C + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_15 0x4E1860 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_0 0x4E1864 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_1 0x4E1868 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_2 0x4E186C + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_3 0x4E1870 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_4 0x4E1874 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_5 0x4E1878 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_6 0x4E187C + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_7 0x4E1880 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_8 0x4E1884 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_9 0x4E1888 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_10 0x4E188C + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_11 0x4E1890 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_12 0x4E1894 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_13 0x4E1898 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_14 0x4E189C + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_15 0x4E18A0 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_0 0x4E18A4 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_1 0x4E18A8 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_2 0x4E18AC + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_3 0x4E18B0 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_4 0x4E18B4 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_5 0x4E18B8 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_6 0x4E18BC + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_7 0x4E18C0 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_8 0x4E18C4 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_9 0x4E18C8 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_10 0x4E18CC + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_11 0x4E18D0 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_12 0x4E18D4 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_13 0x4E18D8 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_14 0x4E18DC + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_15 0x4E18E0 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_0 0x4E18E4 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_1 0x4E18E8 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_2 0x4E18EC + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_3 0x4E18F0 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_4 0x4E18F4 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_5 0x4E18F8 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_6 0x4E18FC + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_7 0x4E1900 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_8 0x4E1904 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_9 0x4E1908 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_10 0x4E190C + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_11 0x4E1910 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_12 0x4E1914 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_13 0x4E1918 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_14 0x4E191C + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_15 0x4E1920 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_0 0x4E1924 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_1 0x4E1928 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_2 0x4E192C + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_3 0x4E1930 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_4 0x4E1934 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_5 0x4E1938 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_6 0x4E193C + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_7 0x4E1940 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_8 0x4E1944 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_9 0x4E1948 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_10 0x4E194C + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_11 0x4E1950 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_12 0x4E1954 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_13 0x4E1958 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_14 0x4E195C + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_15 0x4E1960 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_0 0x4E1964 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_1 0x4E1968 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_2 0x4E196C + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_3 0x4E1970 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_4 0x4E1974 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_5 0x4E1978 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_6 0x4E197C + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_7 0x4E1980 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_8 0x4E1984 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_9 0x4E1988 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_10 0x4E198C + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_11 0x4E1990 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_12 0x4E1994 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_13 0x4E1998 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_14 0x4E199C + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_15 0x4E19A0 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_0 0x4E19A4 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_1 0x4E19A8 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_2 0x4E19AC + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_3 0x4E19B0 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_4 0x4E19B4 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_5 0x4E19B8 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_6 0x4E19BC + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_7 0x4E19C0 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_8 0x4E19C4 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_9 0x4E19C8 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_10 0x4E19CC + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_11 0x4E19D0 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_12 0x4E19D4 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_13 0x4E19D8 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_14 0x4E19DC + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_15 0x4E19E0 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_0 0x4E19E4 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_1 0x4E19E8 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_2 0x4E19EC + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_3 0x4E19F0 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_4 0x4E19F4 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_5 0x4E19F8 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_6 0x4E19FC + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_7 0x4E1A00 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_8 0x4E1A04 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_9 0x4E1A08 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_10 0x4E1A0C + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_11 0x4E1A10 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_12 0x4E1A14 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_13 0x4E1A18 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_14 0x4E1A1C + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_15 0x4E1A20 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_HIT_AW 0x4E1A64 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_HIT_AR 0x4E1A68 + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_HIT_AW 0x4E1A6C + +#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_HIT_AR 0x4E1A70 + +#define mmDMA_IF_E_N_DOWN_CH0_RGL_CFG 0x4E1B64 + +#define mmDMA_IF_E_N_DOWN_CH0_RGL_SHIFT 0x4E1B68 + +#define mmDMA_IF_E_N_DOWN_CH0_RGL_EXPECTED_LAT_0 0x4E1B6C + +#define mmDMA_IF_E_N_DOWN_CH0_RGL_EXPECTED_LAT_1 0x4E1B70 + +#define mmDMA_IF_E_N_DOWN_CH0_RGL_EXPECTED_LAT_2 0x4E1B74 + +#define mmDMA_IF_E_N_DOWN_CH0_RGL_EXPECTED_LAT_3 0x4E1B78 + +#define mmDMA_IF_E_N_DOWN_CH0_RGL_EXPECTED_LAT_4 0x4E1B7C + +#define mmDMA_IF_E_N_DOWN_CH0_RGL_EXPECTED_LAT_5 0x4E1B80 + +#define mmDMA_IF_E_N_DOWN_CH0_RGL_EXPECTED_LAT_6 0x4E1B84 + +#define mmDMA_IF_E_N_DOWN_CH0_RGL_EXPECTED_LAT_7 0x4E1B88 + +#define mmDMA_IF_E_N_DOWN_CH0_RGL_TOKEN_0 0x4E1BAC + +#define mmDMA_IF_E_N_DOWN_CH0_RGL_TOKEN_1 0x4E1BB0 + +#define mmDMA_IF_E_N_DOWN_CH0_RGL_TOKEN_2 0x4E1BB4 + +#define mmDMA_IF_E_N_DOWN_CH0_RGL_TOKEN_3 0x4E1BB8 + +#define mmDMA_IF_E_N_DOWN_CH0_RGL_TOKEN_4 0x4E1BBC + +#define mmDMA_IF_E_N_DOWN_CH0_RGL_TOKEN_5 0x4E1BC0 + +#define mmDMA_IF_E_N_DOWN_CH0_RGL_TOKEN_6 0x4E1BC4 + +#define mmDMA_IF_E_N_DOWN_CH0_RGL_TOKEN_7 0x4E1BC8 + +#define mmDMA_IF_E_N_DOWN_CH0_RGL_BANK_ID_0 0x4E1BEC + +#define mmDMA_IF_E_N_DOWN_CH0_RGL_BANK_ID_1 0x4E1BF0 + +#define mmDMA_IF_E_N_DOWN_CH0_RGL_BANK_ID_2 0x4E1BF4 + +#define mmDMA_IF_E_N_DOWN_CH0_RGL_BANK_ID_3 0x4E1BF8 + +#define mmDMA_IF_E_N_DOWN_CH0_RGL_BANK_ID_4 0x4E1BFC + +#define mmDMA_IF_E_N_DOWN_CH0_RGL_BANK_ID_5 0x4E1C00 + +#define mmDMA_IF_E_N_DOWN_CH0_RGL_BANK_ID_6 0x4E1C04 + +#define mmDMA_IF_E_N_DOWN_CH0_RGL_BANK_ID_7 0x4E1C08 + +#define mmDMA_IF_E_N_DOWN_CH0_RGL_WDT 0x4E1C2C + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM0_CH0_CTR_WRAP 0x4E1C30 + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM0_CH1_CTR_WRAP 0x4E1C34 + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM1_CH0_CTR_WRAP 0x4E1C38 + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM1_CH1_CTR_WRAP 0x4E1C3C + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM2_CH0_CTR_WRAP 0x4E1C40 + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM2_CH1_CTR_WRAP 0x4E1C44 + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM3_CH0_CTR_WRAP 0x4E1C48 + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM3_CH1_CTR_WRAP 0x4E1C4C + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM0_CH0_CTR_CNT 0x4E1C50 + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM0_CH1_CTR_CNT 0x4E1C54 + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM1_CH0_CTR_CNT 0x4E1C58 + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM1_CH1_CTR_CNT 0x4E1C5C + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM2_CH0_CTR_CNT 0x4E1C60 + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM2_CH1_CTR_CNT 0x4E1C64 + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM3_CH0_CTR_CNT 0x4E1C68 + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM3_CH1_CTR_CNT 0x4E1C6C + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM0_CH0_CTR_WRAP 0x4E1C70 + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM0_CH1_CTR_WRAP 0x4E1C74 + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM1_CH0_CTR_WRAP 0x4E1C78 + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM1_CH1_CTR_WRAP 0x4E1C7C + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM2_CH0_CTR_WRAP 0x4E1C80 + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM2_CH1_CTR_WRAP 0x4E1C84 + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM3_CH0_CTR_WRAP 0x4E1C88 + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM3_CH1_CTR_WRAP 0x4E1C8C + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM0_CH0_CTR_CNT 0x4E1C90 + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM0_CH1_CTR_CNT 0x4E1C94 + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM1_CH0_CTR_CNT 0x4E1C98 + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM1_CH1_CTR_CNT 0x4E1C9C + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM2_CH0_CTR_CNT 0x4E1CA0 + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM2_CH1_CTR_CNT 0x4E1CA4 + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM3_CH0_CTR_CNT 0x4E1CA8 + +#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM3_CH1_CTR_CNT 0x4E1CAC + +#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_PC_SEL_0 0x4E1CB0 + +#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_PC_SEL_1 0x4E1CB4 + +#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_PC_SEL_2 0x4E1CB8 + +#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_PC_SEL_3 0x4E1CBC + +#endif /* ASIC_REG_DMA_IF_E_N_DOWN_CH0_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch1_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch1_regs.h new file mode 100644 index 000000000..b2b593fce --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch1_regs.h @@ -0,0 +1,896 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA_IF_E_N_DOWN_CH1_REGS_H_ +#define ASIC_REG_DMA_IF_E_N_DOWN_CH1_REGS_H_ + +/* + ***************************************** + * DMA_IF_E_N_DOWN_CH1 (Prototype: RTR_CTRL) + ***************************************** + */ + +#define mmDMA_IF_E_N_DOWN_CH1_PERM_SEL 0x4E2108 + +#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_0 0x4E2114 + +#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_1 0x4E2118 + +#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_2 0x4E211C + +#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_3 0x4E2120 + +#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_4 0x4E2124 + +#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_5 0x4E2128 + +#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_6 0x4E212C + +#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_7 0x4E2130 + +#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_8 0x4E2134 + +#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_9 0x4E2138 + +#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_10 0x4E213C + +#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_11 0x4E2140 + +#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_12 0x4E2144 + +#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_13 0x4E2148 + +#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_14 0x4E214C + +#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_15 0x4E2150 + +#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_16 0x4E2154 + +#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_17 0x4E2158 + +#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_18 0x4E215C + +#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_19 0x4E2160 + +#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_20 0x4E2164 + +#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_21 0x4E2168 + +#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_22 0x4E216C + +#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_23 0x4E2170 + +#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_24 0x4E2174 + +#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_25 0x4E2178 + +#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_26 0x4E217C + +#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_27 0x4E2180 + +#define mmDMA_IF_E_N_DOWN_CH1_SRAM_POLY_H3_0 0x4E2184 + +#define mmDMA_IF_E_N_DOWN_CH1_SRAM_POLY_H3_1 0x4E2188 + +#define mmDMA_IF_E_N_DOWN_CH1_SRAM_POLY_H3_2 0x4E218C + +#define mmDMA_IF_E_N_DOWN_CH1_SRAM_POLY_H3_3 0x4E2190 + +#define mmDMA_IF_E_N_DOWN_CH1_SRAM_POLY_H3_4 0x4E2194 + +#define mmDMA_IF_E_N_DOWN_CH1_SRAM_POLY_H3_5 0x4E2198 + +#define mmDMA_IF_E_N_DOWN_CH1_SRAM_POLY_H3_6 0x4E219C + +#define mmDMA_IF_E_N_DOWN_CH1_SRAM_POLY_H3_7 0x4E21A0 + +#define mmDMA_IF_E_N_DOWN_CH1_SRAM_POLY_H3_8 0x4E21A4 + +#define mmDMA_IF_E_N_DOWN_CH1_SRAM_POLY_H3_9 0x4E21A8 + +#define mmDMA_IF_E_N_DOWN_CH1_SRAM_POLY_H3_10 0x4E21AC + +#define mmDMA_IF_E_N_DOWN_CH1_SRAM_POLY_H3_11 0x4E21B0 + +#define mmDMA_IF_E_N_DOWN_CH1_SRAM_POLY_H3_12 0x4E21B4 + +#define mmDMA_IF_E_N_DOWN_CH1_SRAM_POLY_H3_13 0x4E21B8 + +#define mmDMA_IF_E_N_DOWN_CH1_SRAM_POLY_H3_14 0x4E21BC + +#define mmDMA_IF_E_N_DOWN_CH1_SCRAM_SRAM_EN 0x4E226C + +#define mmDMA_IF_E_N_DOWN_CH1_RL_HBM_EN 0x4E2274 + +#define mmDMA_IF_E_N_DOWN_CH1_RL_HBM_SAT 0x4E2278 + +#define mmDMA_IF_E_N_DOWN_CH1_RL_HBM_RST 0x4E227C + +#define mmDMA_IF_E_N_DOWN_CH1_RL_HBM_TIMEOUT 0x4E2280 + +#define mmDMA_IF_E_N_DOWN_CH1_SCRAM_HBM_EN 0x4E2284 + +#define mmDMA_IF_E_N_DOWN_CH1_RL_PCI_EN 0x4E2288 + +#define mmDMA_IF_E_N_DOWN_CH1_RL_PCI_SAT 0x4E228C + +#define mmDMA_IF_E_N_DOWN_CH1_RL_PCI_RST 0x4E2290 + +#define mmDMA_IF_E_N_DOWN_CH1_RL_PCI_TIMEOUT 0x4E2294 + +#define mmDMA_IF_E_N_DOWN_CH1_RL_SRAM_EN 0x4E229C + +#define mmDMA_IF_E_N_DOWN_CH1_RL_SRAM_SAT 0x4E22A0 + +#define mmDMA_IF_E_N_DOWN_CH1_RL_SRAM_RST 0x4E22A4 + +#define mmDMA_IF_E_N_DOWN_CH1_RL_SRAM_TIMEOUT 0x4E22AC + +#define mmDMA_IF_E_N_DOWN_CH1_RL_SRAM_RED 0x4E22B4 + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_HBM_EN 0x4E22EC + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_PCI_EN 0x4E22F0 + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_HBM_WR_SIZE 0x4E22F4 + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_PCI_WR_SIZE 0x4E22F8 + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_PCI_CTR_SET_EN 0x4E2404 + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_PCI_CTR_SET 0x4E2408 + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_PCI_CTR_WRAP 0x4E240C + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_PCI_CTR_CNT 0x4E2410 + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM_CTR_SET_EN 0x4E2414 + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM_CTR_SET 0x4E2418 + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_HBM_RD_SIZE 0x4E241C + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_PCI_RD_SIZE 0x4E2420 + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_PCI_CTR_SET_EN 0x4E2424 + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_PCI_CTR_SET 0x4E2428 + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_PCI_CTR_WRAP 0x4E242C + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_PCI_CTR_CNT 0x4E2430 + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM_CTR_SET_EN 0x4E2434 + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM_CTR_SET 0x4E2438 + +#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_SEL_0 0x4E2450 + +#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_SEL_1 0x4E2454 + +#define mmDMA_IF_E_N_DOWN_CH1_NON_LIN_EN 0x4E2480 + +#define mmDMA_IF_E_N_DOWN_CH1_NL_SRAM_BANK_0 0x4E2500 + +#define mmDMA_IF_E_N_DOWN_CH1_NL_SRAM_BANK_1 0x4E2504 + +#define mmDMA_IF_E_N_DOWN_CH1_NL_SRAM_BANK_2 0x4E2508 + +#define mmDMA_IF_E_N_DOWN_CH1_NL_SRAM_BANK_3 0x4E250C + +#define mmDMA_IF_E_N_DOWN_CH1_NL_SRAM_BANK_4 0x4E2510 + +#define mmDMA_IF_E_N_DOWN_CH1_NL_SRAM_OFFSET_0 0x4E2514 + +#define mmDMA_IF_E_N_DOWN_CH1_NL_SRAM_OFFSET_1 0x4E2520 + +#define mmDMA_IF_E_N_DOWN_CH1_NL_SRAM_OFFSET_2 0x4E2524 + +#define mmDMA_IF_E_N_DOWN_CH1_NL_SRAM_OFFSET_3 0x4E2528 + +#define mmDMA_IF_E_N_DOWN_CH1_NL_SRAM_OFFSET_4 0x4E252C + +#define mmDMA_IF_E_N_DOWN_CH1_NL_SRAM_OFFSET_5 0x4E2530 + +#define mmDMA_IF_E_N_DOWN_CH1_NL_SRAM_OFFSET_6 0x4E2534 + +#define mmDMA_IF_E_N_DOWN_CH1_NL_SRAM_OFFSET_7 0x4E2538 + +#define mmDMA_IF_E_N_DOWN_CH1_NL_SRAM_OFFSET_8 0x4E253C + +#define mmDMA_IF_E_N_DOWN_CH1_NL_SRAM_OFFSET_9 0x4E2540 + +#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_0 0x4E2550 + +#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_1 0x4E2554 + +#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_2 0x4E2558 + +#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_3 0x4E255C + +#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_4 0x4E2560 + +#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_5 0x4E2564 + +#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_6 0x4E2568 + +#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_7 0x4E256C + +#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_8 0x4E2570 + +#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_9 0x4E2574 + +#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_10 0x4E2578 + +#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_11 0x4E257C + +#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_12 0x4E2580 + +#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_13 0x4E2584 + +#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_14 0x4E2588 + +#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_15 0x4E258C + +#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_16 0x4E2590 + +#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_17 0x4E2594 + +#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_18 0x4E2598 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_0 0x4E25E4 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_1 0x4E25E8 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_2 0x4E25EC + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_3 0x4E25F0 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_4 0x4E25F4 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_5 0x4E25F8 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_6 0x4E25FC + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_7 0x4E2600 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_8 0x4E2604 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_9 0x4E2608 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_10 0x4E260C + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_11 0x4E2610 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_12 0x4E2614 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_13 0x4E2618 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_14 0x4E261C + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_15 0x4E2620 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_0 0x4E2624 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_1 0x4E2628 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_2 0x4E262C + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_3 0x4E2630 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_4 0x4E2634 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_5 0x4E2638 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_6 0x4E263C + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_7 0x4E2640 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_8 0x4E2644 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_9 0x4E2648 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_10 0x4E264C + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_11 0x4E2650 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_12 0x4E2654 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_13 0x4E2658 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_14 0x4E265C + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_15 0x4E2660 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_0 0x4E2664 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_1 0x4E2668 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_2 0x4E266C + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_3 0x4E2670 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_4 0x4E2674 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_5 0x4E2678 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_6 0x4E267C + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_7 0x4E2680 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_8 0x4E2684 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_9 0x4E2688 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_10 0x4E268C + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_11 0x4E2690 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_12 0x4E2694 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_13 0x4E2698 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_14 0x4E269C + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_15 0x4E26A0 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_0 0x4E26A4 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_1 0x4E26A8 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_2 0x4E26AC + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_3 0x4E26B0 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_4 0x4E26B4 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_5 0x4E26B8 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_6 0x4E26BC + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_7 0x4E26C0 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_8 0x4E26C4 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_9 0x4E26C8 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_10 0x4E26CC + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_11 0x4E26D0 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_12 0x4E26D4 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_13 0x4E26D8 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_14 0x4E26DC + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_15 0x4E26E0 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_0 0x4E26E4 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_1 0x4E26E8 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_2 0x4E26EC + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_3 0x4E26F0 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_4 0x4E26F4 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_5 0x4E26F8 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_6 0x4E26FC + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_7 0x4E2700 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_8 0x4E2704 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_9 0x4E2708 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_10 0x4E270C + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_11 0x4E2710 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_12 0x4E2714 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_13 0x4E2718 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_14 0x4E271C + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_15 0x4E2720 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_0 0x4E2724 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_1 0x4E2728 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_2 0x4E272C + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_3 0x4E2730 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_4 0x4E2734 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_5 0x4E2738 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_6 0x4E273C + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_7 0x4E2740 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_8 0x4E2744 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_9 0x4E2748 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_10 0x4E274C + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_11 0x4E2750 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_12 0x4E2754 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_13 0x4E2758 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_14 0x4E275C + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_15 0x4E2760 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_0 0x4E2764 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_1 0x4E2768 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_2 0x4E276C + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_3 0x4E2770 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_4 0x4E2774 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_5 0x4E2778 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_6 0x4E277C + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_7 0x4E2780 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_8 0x4E2784 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_9 0x4E2788 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_10 0x4E278C + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_11 0x4E2790 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_12 0x4E2794 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_13 0x4E2798 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_14 0x4E279C + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_15 0x4E27A0 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_0 0x4E27A4 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_1 0x4E27A8 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_2 0x4E27AC + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_3 0x4E27B0 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_4 0x4E27B4 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_5 0x4E27B8 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_6 0x4E27BC + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_7 0x4E27C0 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_8 0x4E27C4 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_9 0x4E27C8 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_10 0x4E27CC + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_11 0x4E27D0 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_12 0x4E27D4 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_13 0x4E27D8 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_14 0x4E27DC + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_15 0x4E27E0 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_0 0x4E2824 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_1 0x4E2828 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_2 0x4E282C + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_3 0x4E2830 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_4 0x4E2834 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_5 0x4E2838 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_6 0x4E283C + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_7 0x4E2840 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_8 0x4E2844 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_9 0x4E2848 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_10 0x4E284C + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_11 0x4E2850 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_12 0x4E2854 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_13 0x4E2858 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_14 0x4E285C + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_15 0x4E2860 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_0 0x4E2864 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_1 0x4E2868 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_2 0x4E286C + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_3 0x4E2870 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_4 0x4E2874 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_5 0x4E2878 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_6 0x4E287C + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_7 0x4E2880 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_8 0x4E2884 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_9 0x4E2888 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_10 0x4E288C + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_11 0x4E2890 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_12 0x4E2894 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_13 0x4E2898 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_14 0x4E289C + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_15 0x4E28A0 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_0 0x4E28A4 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_1 0x4E28A8 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_2 0x4E28AC + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_3 0x4E28B0 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_4 0x4E28B4 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_5 0x4E28B8 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_6 0x4E28BC + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_7 0x4E28C0 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_8 0x4E28C4 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_9 0x4E28C8 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_10 0x4E28CC + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_11 0x4E28D0 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_12 0x4E28D4 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_13 0x4E28D8 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_14 0x4E28DC + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_15 0x4E28E0 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_0 0x4E28E4 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_1 0x4E28E8 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_2 0x4E28EC + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_3 0x4E28F0 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_4 0x4E28F4 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_5 0x4E28F8 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_6 0x4E28FC + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_7 0x4E2900 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_8 0x4E2904 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_9 0x4E2908 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_10 0x4E290C + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_11 0x4E2910 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_12 0x4E2914 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_13 0x4E2918 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_14 0x4E291C + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_15 0x4E2920 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_0 0x4E2924 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_1 0x4E2928 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_2 0x4E292C + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_3 0x4E2930 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_4 0x4E2934 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_5 0x4E2938 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_6 0x4E293C + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_7 0x4E2940 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_8 0x4E2944 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_9 0x4E2948 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_10 0x4E294C + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_11 0x4E2950 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_12 0x4E2954 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_13 0x4E2958 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_14 0x4E295C + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_15 0x4E2960 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_0 0x4E2964 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_1 0x4E2968 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_2 0x4E296C + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_3 0x4E2970 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_4 0x4E2974 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_5 0x4E2978 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_6 0x4E297C + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_7 0x4E2980 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_8 0x4E2984 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_9 0x4E2988 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_10 0x4E298C + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_11 0x4E2990 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_12 0x4E2994 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_13 0x4E2998 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_14 0x4E299C + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_15 0x4E29A0 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_0 0x4E29A4 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_1 0x4E29A8 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_2 0x4E29AC + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_3 0x4E29B0 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_4 0x4E29B4 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_5 0x4E29B8 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_6 0x4E29BC + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_7 0x4E29C0 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_8 0x4E29C4 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_9 0x4E29C8 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_10 0x4E29CC + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_11 0x4E29D0 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_12 0x4E29D4 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_13 0x4E29D8 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_14 0x4E29DC + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_15 0x4E29E0 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_0 0x4E29E4 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_1 0x4E29E8 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_2 0x4E29EC + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_3 0x4E29F0 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_4 0x4E29F4 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_5 0x4E29F8 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_6 0x4E29FC + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_7 0x4E2A00 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_8 0x4E2A04 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_9 0x4E2A08 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_10 0x4E2A0C + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_11 0x4E2A10 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_12 0x4E2A14 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_13 0x4E2A18 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_14 0x4E2A1C + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_15 0x4E2A20 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_HIT_AW 0x4E2A64 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_HIT_AR 0x4E2A68 + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_HIT_AW 0x4E2A6C + +#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_HIT_AR 0x4E2A70 + +#define mmDMA_IF_E_N_DOWN_CH1_RGL_CFG 0x4E2B64 + +#define mmDMA_IF_E_N_DOWN_CH1_RGL_SHIFT 0x4E2B68 + +#define mmDMA_IF_E_N_DOWN_CH1_RGL_EXPECTED_LAT_0 0x4E2B6C + +#define mmDMA_IF_E_N_DOWN_CH1_RGL_EXPECTED_LAT_1 0x4E2B70 + +#define mmDMA_IF_E_N_DOWN_CH1_RGL_EXPECTED_LAT_2 0x4E2B74 + +#define mmDMA_IF_E_N_DOWN_CH1_RGL_EXPECTED_LAT_3 0x4E2B78 + +#define mmDMA_IF_E_N_DOWN_CH1_RGL_EXPECTED_LAT_4 0x4E2B7C + +#define mmDMA_IF_E_N_DOWN_CH1_RGL_EXPECTED_LAT_5 0x4E2B80 + +#define mmDMA_IF_E_N_DOWN_CH1_RGL_EXPECTED_LAT_6 0x4E2B84 + +#define mmDMA_IF_E_N_DOWN_CH1_RGL_EXPECTED_LAT_7 0x4E2B88 + +#define mmDMA_IF_E_N_DOWN_CH1_RGL_TOKEN_0 0x4E2BAC + +#define mmDMA_IF_E_N_DOWN_CH1_RGL_TOKEN_1 0x4E2BB0 + +#define mmDMA_IF_E_N_DOWN_CH1_RGL_TOKEN_2 0x4E2BB4 + +#define mmDMA_IF_E_N_DOWN_CH1_RGL_TOKEN_3 0x4E2BB8 + +#define mmDMA_IF_E_N_DOWN_CH1_RGL_TOKEN_4 0x4E2BBC + +#define mmDMA_IF_E_N_DOWN_CH1_RGL_TOKEN_5 0x4E2BC0 + +#define mmDMA_IF_E_N_DOWN_CH1_RGL_TOKEN_6 0x4E2BC4 + +#define mmDMA_IF_E_N_DOWN_CH1_RGL_TOKEN_7 0x4E2BC8 + +#define mmDMA_IF_E_N_DOWN_CH1_RGL_BANK_ID_0 0x4E2BEC + +#define mmDMA_IF_E_N_DOWN_CH1_RGL_BANK_ID_1 0x4E2BF0 + +#define mmDMA_IF_E_N_DOWN_CH1_RGL_BANK_ID_2 0x4E2BF4 + +#define mmDMA_IF_E_N_DOWN_CH1_RGL_BANK_ID_3 0x4E2BF8 + +#define mmDMA_IF_E_N_DOWN_CH1_RGL_BANK_ID_4 0x4E2BFC + +#define mmDMA_IF_E_N_DOWN_CH1_RGL_BANK_ID_5 0x4E2C00 + +#define mmDMA_IF_E_N_DOWN_CH1_RGL_BANK_ID_6 0x4E2C04 + +#define mmDMA_IF_E_N_DOWN_CH1_RGL_BANK_ID_7 0x4E2C08 + +#define mmDMA_IF_E_N_DOWN_CH1_RGL_WDT 0x4E2C2C + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM0_CH0_CTR_WRAP 0x4E2C30 + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM0_CH1_CTR_WRAP 0x4E2C34 + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM1_CH0_CTR_WRAP 0x4E2C38 + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM1_CH1_CTR_WRAP 0x4E2C3C + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM2_CH0_CTR_WRAP 0x4E2C40 + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM2_CH1_CTR_WRAP 0x4E2C44 + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM3_CH0_CTR_WRAP 0x4E2C48 + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM3_CH1_CTR_WRAP 0x4E2C4C + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM0_CH0_CTR_CNT 0x4E2C50 + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM0_CH1_CTR_CNT 0x4E2C54 + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM1_CH0_CTR_CNT 0x4E2C58 + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM1_CH1_CTR_CNT 0x4E2C5C + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM2_CH0_CTR_CNT 0x4E2C60 + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM2_CH1_CTR_CNT 0x4E2C64 + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM3_CH0_CTR_CNT 0x4E2C68 + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM3_CH1_CTR_CNT 0x4E2C6C + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM0_CH0_CTR_WRAP 0x4E2C70 + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM0_CH1_CTR_WRAP 0x4E2C74 + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM1_CH0_CTR_WRAP 0x4E2C78 + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM1_CH1_CTR_WRAP 0x4E2C7C + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM2_CH0_CTR_WRAP 0x4E2C80 + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM2_CH1_CTR_WRAP 0x4E2C84 + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM3_CH0_CTR_WRAP 0x4E2C88 + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM3_CH1_CTR_WRAP 0x4E2C8C + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM0_CH0_CTR_CNT 0x4E2C90 + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM0_CH1_CTR_CNT 0x4E2C94 + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM1_CH0_CTR_CNT 0x4E2C98 + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM1_CH1_CTR_CNT 0x4E2C9C + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM2_CH0_CTR_CNT 0x4E2CA0 + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM2_CH1_CTR_CNT 0x4E2CA4 + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM3_CH0_CTR_CNT 0x4E2CA8 + +#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM3_CH1_CTR_CNT 0x4E2CAC + +#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_PC_SEL_0 0x4E2CB0 + +#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_PC_SEL_1 0x4E2CB4 + +#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_PC_SEL_2 0x4E2CB8 + +#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_PC_SEL_3 0x4E2CBC + +#endif /* ASIC_REG_DMA_IF_E_N_DOWN_CH1_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_regs.h new file mode 100644 index 000000000..8a10c6a76 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_regs.h @@ -0,0 +1,860 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA_IF_E_N_REGS_H_ +#define ASIC_REG_DMA_IF_E_N_REGS_H_ + +/* + ***************************************** + * DMA_IF_E_N (Prototype: DMA_IF) + ***************************************** + */ + +#define mmDMA_IF_E_N_HBM0_WR_CRED_CNT 0x4E0000 + +#define mmDMA_IF_E_N_HBM1_WR_CRED_CNT 0x4E0004 + +#define mmDMA_IF_E_N_HBM0_RD_CRED_CNT 0x4E0008 + +#define mmDMA_IF_E_N_HBM1_RD_CRED_CNT 0x4E000C + +#define mmDMA_IF_E_N_HBM_LIMITER_0 0x4E0030 + +#define mmDMA_IF_E_N_HBM_LIMITER_1 0x4E0034 + +#define mmDMA_IF_E_N_HBM_LIMITER_2 0x4E0038 + +#define mmDMA_IF_E_N_HBM_LIMITER_3 0x4E003C + +#define mmDMA_IF_E_N_HBM_ALMOST_EN_0 0x4E0040 + +#define mmDMA_IF_E_N_HBM_ALMOST_EN_1 0x4E0044 + +#define mmDMA_IF_E_N_HBM_CRED_EN_0 0x4E0050 + +#define mmDMA_IF_E_N_HBM_CRED_EN_1 0x4E0054 + +#define mmDMA_IF_E_N_SOB_MIN_RPROT_0 0x4E0100 + +#define mmDMA_IF_E_N_SOB_MIN_RPROT_1 0x4E0104 + +#define mmDMA_IF_E_N_SOB_MIN_RPROT_2 0x4E0108 + +#define mmDMA_IF_E_N_SOB_MIN_RPROT_3 0x4E010C + +#define mmDMA_IF_E_N_SOB_MIN_RPROT_4 0x4E0110 + +#define mmDMA_IF_E_N_SOB_MIN_RPROT_5 0x4E0114 + +#define mmDMA_IF_E_N_SOB_MIN_RPROT_6 0x4E0118 + +#define mmDMA_IF_E_N_SOB_MIN_RPROT_7 0x4E011C + +#define mmDMA_IF_E_N_SOB_MIN_RPROT_8 0x4E0120 + +#define mmDMA_IF_E_N_SOB_MIN_RPROT_9 0x4E0124 + +#define mmDMA_IF_E_N_SOB_MIN_RPROT_10 0x4E0128 + +#define mmDMA_IF_E_N_SOB_MIN_RPROT_11 0x4E012C + +#define mmDMA_IF_E_N_SOB_MIN_RPROT_12 0x4E0130 + +#define mmDMA_IF_E_N_SOB_MIN_RPROT_13 0x4E0134 + +#define mmDMA_IF_E_N_SOB_MIN_RPROT_14 0x4E0138 + +#define mmDMA_IF_E_N_SOB_MIN_RPROT_15 0x4E013C + +#define mmDMA_IF_E_N_SOB_MAX_RPROT_0 0x4E0140 + +#define mmDMA_IF_E_N_SOB_MAX_RPROT_1 0x4E0144 + +#define mmDMA_IF_E_N_SOB_MAX_RPROT_2 0x4E0148 + +#define mmDMA_IF_E_N_SOB_MAX_RPROT_3 0x4E014C + +#define mmDMA_IF_E_N_SOB_MAX_RPROT_4 0x4E0150 + +#define mmDMA_IF_E_N_SOB_MAX_RPROT_5 0x4E0154 + +#define mmDMA_IF_E_N_SOB_MAX_RPROT_6 0x4E0158 + +#define mmDMA_IF_E_N_SOB_MAX_RPROT_7 0x4E015C + +#define mmDMA_IF_E_N_SOB_MAX_RPROT_8 0x4E0160 + +#define mmDMA_IF_E_N_SOB_MAX_RPROT_9 0x4E0164 + +#define mmDMA_IF_E_N_SOB_MAX_RPROT_10 0x4E0168 + +#define mmDMA_IF_E_N_SOB_MAX_RPROT_11 0x4E016C + +#define mmDMA_IF_E_N_SOB_MAX_RPROT_12 0x4E0170 + +#define mmDMA_IF_E_N_SOB_MAX_RPROT_13 0x4E0174 + +#define mmDMA_IF_E_N_SOB_MAX_RPROT_14 0x4E0178 + +#define mmDMA_IF_E_N_SOB_MAX_RPROT_15 0x4E017C + +#define mmDMA_IF_E_N_SOB_MIN_WPROT_0 0x4E0180 + +#define mmDMA_IF_E_N_SOB_MIN_WPROT_1 0x4E0184 + +#define mmDMA_IF_E_N_SOB_MIN_WPROT_2 0x4E0188 + +#define mmDMA_IF_E_N_SOB_MIN_WPROT_3 0x4E018C + +#define mmDMA_IF_E_N_SOB_MIN_WPROT_4 0x4E0190 + +#define mmDMA_IF_E_N_SOB_MIN_WPROT_5 0x4E0194 + +#define mmDMA_IF_E_N_SOB_MIN_WPROT_6 0x4E0198 + +#define mmDMA_IF_E_N_SOB_MIN_WPROT_7 0x4E019C + +#define mmDMA_IF_E_N_SOB_MIN_WPROT_8 0x4E01A0 + +#define mmDMA_IF_E_N_SOB_MIN_WPROT_9 0x4E01A4 + +#define mmDMA_IF_E_N_SOB_MIN_WPROT_10 0x4E01A8 + +#define mmDMA_IF_E_N_SOB_MIN_WPROT_11 0x4E01AC + +#define mmDMA_IF_E_N_SOB_MIN_WPROT_12 0x4E01B0 + +#define mmDMA_IF_E_N_SOB_MIN_WPROT_13 0x4E01B4 + +#define mmDMA_IF_E_N_SOB_MIN_WPROT_14 0x4E01B8 + +#define mmDMA_IF_E_N_SOB_MIN_WPROT_15 0x4E01BC + +#define mmDMA_IF_E_N_SOB_MAX_WPROT_0 0x4E01C0 + +#define mmDMA_IF_E_N_SOB_MAX_WPROT_1 0x4E01C4 + +#define mmDMA_IF_E_N_SOB_MAX_WPROT_2 0x4E01C8 + +#define mmDMA_IF_E_N_SOB_MAX_WPROT_3 0x4E01CC + +#define mmDMA_IF_E_N_SOB_MAX_WPROT_4 0x4E01D0 + +#define mmDMA_IF_E_N_SOB_MAX_WPROT_5 0x4E01D4 + +#define mmDMA_IF_E_N_SOB_MAX_WPROT_6 0x4E01D8 + +#define mmDMA_IF_E_N_SOB_MAX_WPROT_7 0x4E01DC + +#define mmDMA_IF_E_N_SOB_MAX_WPROT_8 0x4E01E0 + +#define mmDMA_IF_E_N_SOB_MAX_WPROT_9 0x4E01E4 + +#define mmDMA_IF_E_N_SOB_MAX_WPROT_10 0x4E01E8 + +#define mmDMA_IF_E_N_SOB_MAX_WPROT_11 0x4E01EC + +#define mmDMA_IF_E_N_SOB_MAX_WPROT_12 0x4E01F0 + +#define mmDMA_IF_E_N_SOB_MAX_WPROT_13 0x4E01F4 + +#define mmDMA_IF_E_N_SOB_MAX_WPROT_14 0x4E01F8 + +#define mmDMA_IF_E_N_SOB_MAX_WPROT_15 0x4E01FC + +#define mmDMA_IF_E_N_SOB_MIN_RPRIV_0 0x4E0200 + +#define mmDMA_IF_E_N_SOB_MIN_RPRIV_1 0x4E0204 + +#define mmDMA_IF_E_N_SOB_MIN_RPRIV_2 0x4E0208 + +#define mmDMA_IF_E_N_SOB_MIN_RPRIV_3 0x4E020C + +#define mmDMA_IF_E_N_SOB_MIN_RPRIV_4 0x4E0210 + +#define mmDMA_IF_E_N_SOB_MIN_RPRIV_5 0x4E0214 + +#define mmDMA_IF_E_N_SOB_MIN_RPRIV_6 0x4E0218 + +#define mmDMA_IF_E_N_SOB_MIN_RPRIV_7 0x4E021C + +#define mmDMA_IF_E_N_SOB_MIN_RPRIV_8 0x4E0220 + +#define mmDMA_IF_E_N_SOB_MIN_RPRIV_9 0x4E0224 + +#define mmDMA_IF_E_N_SOB_MIN_RPRIV_10 0x4E0228 + +#define mmDMA_IF_E_N_SOB_MIN_RPRIV_11 0x4E022C + +#define mmDMA_IF_E_N_SOB_MIN_RPRIV_12 0x4E0230 + +#define mmDMA_IF_E_N_SOB_MIN_RPRIV_13 0x4E0234 + +#define mmDMA_IF_E_N_SOB_MIN_RPRIV_14 0x4E0238 + +#define mmDMA_IF_E_N_SOB_MIN_RPRIV_15 0x4E023C + +#define mmDMA_IF_E_N_SOB_MAX_RPRIV_0 0x4E0240 + +#define mmDMA_IF_E_N_SOB_MAX_RPRIV_1 0x4E0244 + +#define mmDMA_IF_E_N_SOB_MAX_RPRIV_2 0x4E0248 + +#define mmDMA_IF_E_N_SOB_MAX_RPRIV_3 0x4E024C + +#define mmDMA_IF_E_N_SOB_MAX_RPRIV_4 0x4E0250 + +#define mmDMA_IF_E_N_SOB_MAX_RPRIV_5 0x4E0254 + +#define mmDMA_IF_E_N_SOB_MAX_RPRIV_6 0x4E0258 + +#define mmDMA_IF_E_N_SOB_MAX_RPRIV_7 0x4E025C + +#define mmDMA_IF_E_N_SOB_MAX_RPRIV_8 0x4E0260 + +#define mmDMA_IF_E_N_SOB_MAX_RPRIV_9 0x4E0264 + +#define mmDMA_IF_E_N_SOB_MAX_RPRIV_10 0x4E0268 + +#define mmDMA_IF_E_N_SOB_MAX_RPRIV_11 0x4E026C + +#define mmDMA_IF_E_N_SOB_MAX_RPRIV_12 0x4E0270 + +#define mmDMA_IF_E_N_SOB_MAX_RPRIV_13 0x4E0274 + +#define mmDMA_IF_E_N_SOB_MAX_RPRIV_14 0x4E0278 + +#define mmDMA_IF_E_N_SOB_MAX_RPRIV_15 0x4E027C + +#define mmDMA_IF_E_N_SOB_MIN_WPRIV_0 0x4E0280 + +#define mmDMA_IF_E_N_SOB_MIN_WPRIV_1 0x4E0284 + +#define mmDMA_IF_E_N_SOB_MIN_WPRIV_2 0x4E0288 + +#define mmDMA_IF_E_N_SOB_MIN_WPRIV_3 0x4E028C + +#define mmDMA_IF_E_N_SOB_MIN_WPRIV_4 0x4E0290 + +#define mmDMA_IF_E_N_SOB_MIN_WPRIV_5 0x4E0294 + +#define mmDMA_IF_E_N_SOB_MIN_WPRIV_6 0x4E0298 + +#define mmDMA_IF_E_N_SOB_MIN_WPRIV_7 0x4E029C + +#define mmDMA_IF_E_N_SOB_MIN_WPRIV_8 0x4E02A0 + +#define mmDMA_IF_E_N_SOB_MIN_WPRIV_9 0x4E02A4 + +#define mmDMA_IF_E_N_SOB_MIN_WPRIV_10 0x4E02A8 + +#define mmDMA_IF_E_N_SOB_MIN_WPRIV_11 0x4E02AC + +#define mmDMA_IF_E_N_SOB_MIN_WPRIV_12 0x4E02B0 + +#define mmDMA_IF_E_N_SOB_MIN_WPRIV_13 0x4E02B4 + +#define mmDMA_IF_E_N_SOB_MIN_WPRIV_14 0x4E02B8 + +#define mmDMA_IF_E_N_SOB_MIN_WPRIV_15 0x4E02BC + +#define mmDMA_IF_E_N_SOB_MAX_WPRIV_0 0x4E02C0 + +#define mmDMA_IF_E_N_SOB_MAX_WPRIV_1 0x4E02C4 + +#define mmDMA_IF_E_N_SOB_MAX_WPRIV_2 0x4E02C8 + +#define mmDMA_IF_E_N_SOB_MAX_WPRIV_3 0x4E02CC + +#define mmDMA_IF_E_N_SOB_MAX_WPRIV_4 0x4E02D0 + +#define mmDMA_IF_E_N_SOB_MAX_WPRIV_5 0x4E02D4 + +#define mmDMA_IF_E_N_SOB_MAX_WPRIV_6 0x4E02D8 + +#define mmDMA_IF_E_N_SOB_MAX_WPRIV_7 0x4E02DC + +#define mmDMA_IF_E_N_SOB_MAX_WPRIV_8 0x4E02E0 + +#define mmDMA_IF_E_N_SOB_MAX_WPRIV_9 0x4E02E4 + +#define mmDMA_IF_E_N_SOB_MAX_WPRIV_10 0x4E02E8 + +#define mmDMA_IF_E_N_SOB_MAX_WPRIV_11 0x4E02EC + +#define mmDMA_IF_E_N_SOB_MAX_WPRIV_12 0x4E02F0 + +#define mmDMA_IF_E_N_SOB_MAX_WPRIV_13 0x4E02F4 + +#define mmDMA_IF_E_N_SOB_MAX_WPRIV_14 0x4E02F8 + +#define mmDMA_IF_E_N_SOB_MAX_WPRIV_15 0x4E02FC + +#define mmDMA_IF_E_N_DMA0_MIN_RPROT_0 0x4E0300 + +#define mmDMA_IF_E_N_DMA0_MIN_RPROT_1 0x4E0304 + +#define mmDMA_IF_E_N_DMA0_MIN_RPROT_2 0x4E0308 + +#define mmDMA_IF_E_N_DMA0_MIN_RPROT_3 0x4E030C + +#define mmDMA_IF_E_N_DMA0_MIN_RPROT_4 0x4E0310 + +#define mmDMA_IF_E_N_DMA0_MIN_RPROT_5 0x4E0314 + +#define mmDMA_IF_E_N_DMA0_MIN_RPROT_6 0x4E0318 + +#define mmDMA_IF_E_N_DMA0_MIN_RPROT_7 0x4E031C + +#define mmDMA_IF_E_N_DMA0_MIN_RPROT_8 0x4E0320 + +#define mmDMA_IF_E_N_DMA0_MIN_RPROT_9 0x4E0324 + +#define mmDMA_IF_E_N_DMA0_MIN_RPROT_10 0x4E0328 + +#define mmDMA_IF_E_N_DMA0_MIN_RPROT_11 0x4E032C + +#define mmDMA_IF_E_N_DMA0_MIN_RPROT_12 0x4E0330 + +#define mmDMA_IF_E_N_DMA0_MIN_RPROT_13 0x4E0334 + +#define mmDMA_IF_E_N_DMA0_MIN_RPROT_14 0x4E0338 + +#define mmDMA_IF_E_N_DMA0_MIN_RPROT_15 0x4E033C + +#define mmDMA_IF_E_N_DMA0_MAX_RPROT_0 0x4E0340 + +#define mmDMA_IF_E_N_DMA0_MAX_RPROT_1 0x4E0344 + +#define mmDMA_IF_E_N_DMA0_MAX_RPROT_2 0x4E0348 + +#define mmDMA_IF_E_N_DMA0_MAX_RPROT_3 0x4E034C + +#define mmDMA_IF_E_N_DMA0_MAX_RPROT_4 0x4E0350 + +#define mmDMA_IF_E_N_DMA0_MAX_RPROT_5 0x4E0354 + +#define mmDMA_IF_E_N_DMA0_MAX_RPROT_6 0x4E0358 + +#define mmDMA_IF_E_N_DMA0_MAX_RPROT_7 0x4E035C + +#define mmDMA_IF_E_N_DMA0_MAX_RPROT_8 0x4E0360 + +#define mmDMA_IF_E_N_DMA0_MAX_RPROT_9 0x4E0364 + +#define mmDMA_IF_E_N_DMA0_MAX_RPROT_10 0x4E0368 + +#define mmDMA_IF_E_N_DMA0_MAX_RPROT_11 0x4E036C + +#define mmDMA_IF_E_N_DMA0_MAX_RPROT_12 0x4E0370 + +#define mmDMA_IF_E_N_DMA0_MAX_RPROT_13 0x4E0374 + +#define mmDMA_IF_E_N_DMA0_MAX_RPROT_14 0x4E0378 + +#define mmDMA_IF_E_N_DMA0_MAX_RPROT_15 0x4E037C + +#define mmDMA_IF_E_N_DMA0_MIN_WPROT_0 0x4E0380 + +#define mmDMA_IF_E_N_DMA0_MIN_WPROT_1 0x4E0384 + +#define mmDMA_IF_E_N_DMA0_MIN_WPROT_2 0x4E0388 + +#define mmDMA_IF_E_N_DMA0_MIN_WPROT_3 0x4E038C + +#define mmDMA_IF_E_N_DMA0_MIN_WPROT_4 0x4E0390 + +#define mmDMA_IF_E_N_DMA0_MIN_WPROT_5 0x4E0394 + +#define mmDMA_IF_E_N_DMA0_MIN_WPROT_6 0x4E0398 + +#define mmDMA_IF_E_N_DMA0_MIN_WPROT_7 0x4E039C + +#define mmDMA_IF_E_N_DMA0_MIN_WPROT_8 0x4E03A0 + +#define mmDMA_IF_E_N_DMA0_MIN_WPROT_9 0x4E03A4 + +#define mmDMA_IF_E_N_DMA0_MIN_WPROT_10 0x4E03A8 + +#define mmDMA_IF_E_N_DMA0_MIN_WPROT_11 0x4E03AC + +#define mmDMA_IF_E_N_DMA0_MIN_WPROT_12 0x4E03B0 + +#define mmDMA_IF_E_N_DMA0_MIN_WPROT_13 0x4E03B4 + +#define mmDMA_IF_E_N_DMA0_MIN_WPROT_14 0x4E03B8 + +#define mmDMA_IF_E_N_DMA0_MIN_WPROT_15 0x4E03BC + +#define mmDMA_IF_E_N_DMA0_MAX_WPROT_0 0x4E03C0 + +#define mmDMA_IF_E_N_DMA0_MAX_WPROT_1 0x4E03C4 + +#define mmDMA_IF_E_N_DMA0_MAX_WPROT_2 0x4E03C8 + +#define mmDMA_IF_E_N_DMA0_MAX_WPROT_3 0x4E03CC + +#define mmDMA_IF_E_N_DMA0_MAX_WPROT_4 0x4E03D0 + +#define mmDMA_IF_E_N_DMA0_MAX_WPROT_5 0x4E03D4 + +#define mmDMA_IF_E_N_DMA0_MAX_WPROT_6 0x4E03D8 + +#define mmDMA_IF_E_N_DMA0_MAX_WPROT_7 0x4E03DC + +#define mmDMA_IF_E_N_DMA0_MAX_WPROT_8 0x4E03E0 + +#define mmDMA_IF_E_N_DMA0_MAX_WPROT_9 0x4E03E4 + +#define mmDMA_IF_E_N_DMA0_MAX_WPROT_10 0x4E03E8 + +#define mmDMA_IF_E_N_DMA0_MAX_WPROT_11 0x4E03EC + +#define mmDMA_IF_E_N_DMA0_MAX_WPROT_12 0x4E03F0 + +#define mmDMA_IF_E_N_DMA0_MAX_WPROT_13 0x4E03F4 + +#define mmDMA_IF_E_N_DMA0_MAX_WPROT_14 0x4E03F8 + +#define mmDMA_IF_E_N_DMA0_MAX_WPROT_15 0x4E03FC + +#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_0 0x4E0400 + +#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_1 0x4E0404 + +#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_2 0x4E0408 + +#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_3 0x4E040C + +#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_4 0x4E0410 + +#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_5 0x4E0414 + +#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_6 0x4E0418 + +#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_7 0x4E041C + +#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_8 0x4E0420 + +#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_9 0x4E0424 + +#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_10 0x4E0428 + +#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_11 0x4E042C + +#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_12 0x4E0430 + +#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_13 0x4E0434 + +#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_14 0x4E0438 + +#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_15 0x4E043C + +#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_0 0x4E0440 + +#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_1 0x4E0444 + +#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_2 0x4E0448 + +#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_3 0x4E044C + +#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_4 0x4E0450 + +#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_5 0x4E0454 + +#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_6 0x4E0458 + +#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_7 0x4E045C + +#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_8 0x4E0460 + +#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_9 0x4E0464 + +#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_10 0x4E0468 + +#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_11 0x4E046C + +#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_12 0x4E0470 + +#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_13 0x4E0474 + +#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_14 0x4E0478 + +#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_15 0x4E047C + +#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_0 0x4E0480 + +#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_1 0x4E0484 + +#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_2 0x4E0488 + +#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_3 0x4E048C + +#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_4 0x4E0490 + +#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_5 0x4E0494 + +#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_6 0x4E0498 + +#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_7 0x4E049C + +#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_8 0x4E04A0 + +#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_9 0x4E04A4 + +#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_10 0x4E04A8 + +#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_11 0x4E04AC + +#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_12 0x4E04B0 + +#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_13 0x4E04B4 + +#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_14 0x4E04B8 + +#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_15 0x4E04BC + +#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_0 0x4E04C0 + +#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_1 0x4E04C4 + +#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_2 0x4E04C8 + +#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_3 0x4E04CC + +#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_4 0x4E04D0 + +#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_5 0x4E04D4 + +#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_6 0x4E04D8 + +#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_7 0x4E04DC + +#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_8 0x4E04E0 + +#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_9 0x4E04E4 + +#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_10 0x4E04E8 + +#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_11 0x4E04EC + +#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_12 0x4E04F0 + +#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_13 0x4E04F4 + +#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_14 0x4E04F8 + +#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_15 0x4E04FC + +#define mmDMA_IF_E_N_DMA1_MIN_RPROT_0 0x4E0500 + +#define mmDMA_IF_E_N_DMA1_MIN_RPROT_1 0x4E0504 + +#define mmDMA_IF_E_N_DMA1_MIN_RPROT_2 0x4E0508 + +#define mmDMA_IF_E_N_DMA1_MIN_RPROT_3 0x4E050C + +#define mmDMA_IF_E_N_DMA1_MIN_RPROT_4 0x4E0510 + +#define mmDMA_IF_E_N_DMA1_MIN_RPROT_5 0x4E0514 + +#define mmDMA_IF_E_N_DMA1_MIN_RPROT_6 0x4E0518 + +#define mmDMA_IF_E_N_DMA1_MIN_RPROT_7 0x4E051C + +#define mmDMA_IF_E_N_DMA1_MIN_RPROT_8 0x4E0520 + +#define mmDMA_IF_E_N_DMA1_MIN_RPROT_9 0x4E0524 + +#define mmDMA_IF_E_N_DMA1_MIN_RPROT_10 0x4E0528 + +#define mmDMA_IF_E_N_DMA1_MIN_RPROT_11 0x4E052C + +#define mmDMA_IF_E_N_DMA1_MIN_RPROT_12 0x4E0530 + +#define mmDMA_IF_E_N_DMA1_MIN_RPROT_13 0x4E0534 + +#define mmDMA_IF_E_N_DMA1_MIN_RPROT_14 0x4E0538 + +#define mmDMA_IF_E_N_DMA1_MIN_RPROT_15 0x4E053C + +#define mmDMA_IF_E_N_DMA1_MAX_RPROT_0 0x4E0540 + +#define mmDMA_IF_E_N_DMA1_MAX_RPROT_1 0x4E0544 + +#define mmDMA_IF_E_N_DMA1_MAX_RPROT_2 0x4E0548 + +#define mmDMA_IF_E_N_DMA1_MAX_RPROT_3 0x4E054C + +#define mmDMA_IF_E_N_DMA1_MAX_RPROT_4 0x4E0550 + +#define mmDMA_IF_E_N_DMA1_MAX_RPROT_5 0x4E0554 + +#define mmDMA_IF_E_N_DMA1_MAX_RPROT_6 0x4E0558 + +#define mmDMA_IF_E_N_DMA1_MAX_RPROT_7 0x4E055C + +#define mmDMA_IF_E_N_DMA1_MAX_RPROT_8 0x4E0560 + +#define mmDMA_IF_E_N_DMA1_MAX_RPROT_9 0x4E0564 + +#define mmDMA_IF_E_N_DMA1_MAX_RPROT_10 0x4E0568 + +#define mmDMA_IF_E_N_DMA1_MAX_RPROT_11 0x4E056C + +#define mmDMA_IF_E_N_DMA1_MAX_RPROT_12 0x4E0570 + +#define mmDMA_IF_E_N_DMA1_MAX_RPROT_13 0x4E0574 + +#define mmDMA_IF_E_N_DMA1_MAX_RPROT_14 0x4E0578 + +#define mmDMA_IF_E_N_DMA1_MAX_RPROT_15 0x4E057C + +#define mmDMA_IF_E_N_DMA1_MIN_WPROT_0 0x4E0580 + +#define mmDMA_IF_E_N_DMA1_MIN_WPROT_1 0x4E0584 + +#define mmDMA_IF_E_N_DMA1_MIN_WPROT_2 0x4E0588 + +#define mmDMA_IF_E_N_DMA1_MIN_WPROT_3 0x4E058C + +#define mmDMA_IF_E_N_DMA1_MIN_WPROT_4 0x4E0590 + +#define mmDMA_IF_E_N_DMA1_MIN_WPROT_5 0x4E0594 + +#define mmDMA_IF_E_N_DMA1_MIN_WPROT_6 0x4E0598 + +#define mmDMA_IF_E_N_DMA1_MIN_WPROT_7 0x4E059C + +#define mmDMA_IF_E_N_DMA1_MIN_WPROT_8 0x4E05A0 + +#define mmDMA_IF_E_N_DMA1_MIN_WPROT_9 0x4E05A4 + +#define mmDMA_IF_E_N_DMA1_MIN_WPROT_10 0x4E05A8 + +#define mmDMA_IF_E_N_DMA1_MIN_WPROT_11 0x4E05AC + +#define mmDMA_IF_E_N_DMA1_MIN_WPROT_12 0x4E05B0 + +#define mmDMA_IF_E_N_DMA1_MIN_WPROT_13 0x4E05B4 + +#define mmDMA_IF_E_N_DMA1_MIN_WPROT_14 0x4E05B8 + +#define mmDMA_IF_E_N_DMA1_MIN_WPROT_15 0x4E05BC + +#define mmDMA_IF_E_N_DMA1_MAX_WPROT_0 0x4E05C0 + +#define mmDMA_IF_E_N_DMA1_MAX_WPROT_1 0x4E05C4 + +#define mmDMA_IF_E_N_DMA1_MAX_WPROT_2 0x4E05C8 + +#define mmDMA_IF_E_N_DMA1_MAX_WPROT_3 0x4E05CC + +#define mmDMA_IF_E_N_DMA1_MAX_WPROT_4 0x4E05D0 + +#define mmDMA_IF_E_N_DMA1_MAX_WPROT_5 0x4E05D4 + +#define mmDMA_IF_E_N_DMA1_MAX_WPROT_6 0x4E05D8 + +#define mmDMA_IF_E_N_DMA1_MAX_WPROT_7 0x4E05DC + +#define mmDMA_IF_E_N_DMA1_MAX_WPROT_8 0x4E05E0 + +#define mmDMA_IF_E_N_DMA1_MAX_WPROT_9 0x4E05E4 + +#define mmDMA_IF_E_N_DMA1_MAX_WPROT_10 0x4E05E8 + +#define mmDMA_IF_E_N_DMA1_MAX_WPROT_11 0x4E05EC + +#define mmDMA_IF_E_N_DMA1_MAX_WPROT_12 0x4E05F0 + +#define mmDMA_IF_E_N_DMA1_MAX_WPROT_13 0x4E05F4 + +#define mmDMA_IF_E_N_DMA1_MAX_WPROT_14 0x4E05F8 + +#define mmDMA_IF_E_N_DMA1_MAX_WPROT_15 0x4E05FC + +#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_0 0x4E0600 + +#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_1 0x4E0604 + +#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_2 0x4E0608 + +#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_3 0x4E060C + +#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_4 0x4E0610 + +#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_5 0x4E0614 + +#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_6 0x4E0618 + +#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_7 0x4E061C + +#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_8 0x4E0620 + +#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_9 0x4E0624 + +#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_10 0x4E0628 + +#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_11 0x4E062C + +#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_12 0x4E0630 + +#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_13 0x4E0634 + +#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_14 0x4E0638 + +#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_15 0x4E063C + +#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_0 0x4E0640 + +#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_1 0x4E0644 + +#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_2 0x4E0648 + +#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_3 0x4E064C + +#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_4 0x4E0650 + +#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_5 0x4E0654 + +#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_6 0x4E0658 + +#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_7 0x4E065C + +#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_8 0x4E0660 + +#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_9 0x4E0664 + +#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_10 0x4E0668 + +#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_11 0x4E066C + +#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_12 0x4E0670 + +#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_13 0x4E0674 + +#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_14 0x4E0678 + +#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_15 0x4E067C + +#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_0 0x4E0680 + +#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_1 0x4E0684 + +#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_2 0x4E0688 + +#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_3 0x4E068C + +#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_4 0x4E0690 + +#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_5 0x4E0694 + +#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_6 0x4E0698 + +#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_7 0x4E069C + +#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_8 0x4E06A0 + +#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_9 0x4E06A4 + +#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_10 0x4E06A8 + +#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_11 0x4E06AC + +#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_12 0x4E06B0 + +#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_13 0x4E06B4 + +#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_14 0x4E06B8 + +#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_15 0x4E06BC + +#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_0 0x4E06C0 + +#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_1 0x4E06C4 + +#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_2 0x4E06C8 + +#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_3 0x4E06CC + +#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_4 0x4E06D0 + +#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_5 0x4E06D4 + +#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_6 0x4E06D8 + +#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_7 0x4E06DC + +#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_8 0x4E06E0 + +#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_9 0x4E06E4 + +#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_10 0x4E06E8 + +#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_11 0x4E06EC + +#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_12 0x4E06F0 + +#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_13 0x4E06F4 + +#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_14 0x4E06F8 + +#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_15 0x4E06FC + +#define mmDMA_IF_E_N_SOB_HIT_RPROT 0x4E0700 + +#define mmDMA_IF_E_N_SOB_HIT_WPROT 0x4E0704 + +#define mmDMA_IF_E_N_SOB_HIT_RPRIV 0x4E070C + +#define mmDMA_IF_E_N_SOB_HIT_WPRIV 0x4E0710 + +#define mmDMA_IF_E_N_DMA0_HIT_RPROT 0x4E071C + +#define mmDMA_IF_E_N_DMA0_HIT_WPROT 0x4E0720 + +#define mmDMA_IF_E_N_DMA0_HIT_RPRIV 0x4E0724 + +#define mmDMA_IF_E_N_DMA0_HIT_WPRIV 0x4E0728 + +#define mmDMA_IF_E_N_DMA1_HIT_RPROT 0x4E0730 + +#define mmDMA_IF_E_N_DMA1_HIT_WPROT 0x4E0734 + +#define mmDMA_IF_E_N_DMA1_HIT_RPRIV 0x4E0738 + +#define mmDMA_IF_E_N_DMA1_HIT_WPRIV 0x4E073C + +#define mmDMA_IF_E_N_HBM_BIN 0x4E0800 + +#define mmDMA_IF_E_N_MME_BIN 0x4E0804 + +#define mmDMA_IF_E_N_TPC_BIN 0x4E0808 + +#define mmDMA_IF_E_N_DMA_BIN 0x4E080C + +#define mmDMA_IF_E_N_SOB_CG_EN 0x4E0810 + +#define mmDMA_IF_E_N_HBM_I2C_ADDR_0 0x4E0820 + +#define mmDMA_IF_E_N_HBM_I2C_ADDR_1 0x4E0824 + +#define mmDMA_IF_E_N_HBM_I2C_ADDR_2 0x4E0828 + +#define mmDMA_IF_E_N_HBM_I2C_ADDR_3 0x4E082C + +#define mmDMA_IF_E_N_HBM_I2C_ADDR_4 0x4E0830 + +#define mmDMA_IF_E_N_HBM_MISC 0x4E0834 + +#endif /* ASIC_REG_DMA_IF_E_N_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch0_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch0_regs.h new file mode 100644 index 000000000..cd61289a1 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch0_regs.h @@ -0,0 +1,896 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA_IF_E_S_DOWN_CH0_REGS_H_ +#define ASIC_REG_DMA_IF_E_S_DOWN_CH0_REGS_H_ + +/* + ***************************************** + * DMA_IF_E_S_DOWN_CH0 (Prototype: RTR_CTRL) + ***************************************** + */ + +#define mmDMA_IF_E_S_DOWN_CH0_PERM_SEL 0x4A1108 + +#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_0 0x4A1114 + +#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_1 0x4A1118 + +#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_2 0x4A111C + +#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_3 0x4A1120 + +#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_4 0x4A1124 + +#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_5 0x4A1128 + +#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_6 0x4A112C + +#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_7 0x4A1130 + +#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_8 0x4A1134 + +#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_9 0x4A1138 + +#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_10 0x4A113C + +#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_11 0x4A1140 + +#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_12 0x4A1144 + +#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_13 0x4A1148 + +#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_14 0x4A114C + +#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_15 0x4A1150 + +#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_16 0x4A1154 + +#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_17 0x4A1158 + +#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_18 0x4A115C + +#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_19 0x4A1160 + +#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_20 0x4A1164 + +#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_21 0x4A1168 + +#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_22 0x4A116C + +#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_23 0x4A1170 + +#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_24 0x4A1174 + +#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_25 0x4A1178 + +#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_26 0x4A117C + +#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_27 0x4A1180 + +#define mmDMA_IF_E_S_DOWN_CH0_SRAM_POLY_H3_0 0x4A1184 + +#define mmDMA_IF_E_S_DOWN_CH0_SRAM_POLY_H3_1 0x4A1188 + +#define mmDMA_IF_E_S_DOWN_CH0_SRAM_POLY_H3_2 0x4A118C + +#define mmDMA_IF_E_S_DOWN_CH0_SRAM_POLY_H3_3 0x4A1190 + +#define mmDMA_IF_E_S_DOWN_CH0_SRAM_POLY_H3_4 0x4A1194 + +#define mmDMA_IF_E_S_DOWN_CH0_SRAM_POLY_H3_5 0x4A1198 + +#define mmDMA_IF_E_S_DOWN_CH0_SRAM_POLY_H3_6 0x4A119C + +#define mmDMA_IF_E_S_DOWN_CH0_SRAM_POLY_H3_7 0x4A11A0 + +#define mmDMA_IF_E_S_DOWN_CH0_SRAM_POLY_H3_8 0x4A11A4 + +#define mmDMA_IF_E_S_DOWN_CH0_SRAM_POLY_H3_9 0x4A11A8 + +#define mmDMA_IF_E_S_DOWN_CH0_SRAM_POLY_H3_10 0x4A11AC + +#define mmDMA_IF_E_S_DOWN_CH0_SRAM_POLY_H3_11 0x4A11B0 + +#define mmDMA_IF_E_S_DOWN_CH0_SRAM_POLY_H3_12 0x4A11B4 + +#define mmDMA_IF_E_S_DOWN_CH0_SRAM_POLY_H3_13 0x4A11B8 + +#define mmDMA_IF_E_S_DOWN_CH0_SRAM_POLY_H3_14 0x4A11BC + +#define mmDMA_IF_E_S_DOWN_CH0_SCRAM_SRAM_EN 0x4A126C + +#define mmDMA_IF_E_S_DOWN_CH0_RL_HBM_EN 0x4A1274 + +#define mmDMA_IF_E_S_DOWN_CH0_RL_HBM_SAT 0x4A1278 + +#define mmDMA_IF_E_S_DOWN_CH0_RL_HBM_RST 0x4A127C + +#define mmDMA_IF_E_S_DOWN_CH0_RL_HBM_TIMEOUT 0x4A1280 + +#define mmDMA_IF_E_S_DOWN_CH0_SCRAM_HBM_EN 0x4A1284 + +#define mmDMA_IF_E_S_DOWN_CH0_RL_PCI_EN 0x4A1288 + +#define mmDMA_IF_E_S_DOWN_CH0_RL_PCI_SAT 0x4A128C + +#define mmDMA_IF_E_S_DOWN_CH0_RL_PCI_RST 0x4A1290 + +#define mmDMA_IF_E_S_DOWN_CH0_RL_PCI_TIMEOUT 0x4A1294 + +#define mmDMA_IF_E_S_DOWN_CH0_RL_SRAM_EN 0x4A129C + +#define mmDMA_IF_E_S_DOWN_CH0_RL_SRAM_SAT 0x4A12A0 + +#define mmDMA_IF_E_S_DOWN_CH0_RL_SRAM_RST 0x4A12A4 + +#define mmDMA_IF_E_S_DOWN_CH0_RL_SRAM_TIMEOUT 0x4A12AC + +#define mmDMA_IF_E_S_DOWN_CH0_RL_SRAM_RED 0x4A12B4 + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_HBM_EN 0x4A12EC + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_PCI_EN 0x4A12F0 + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_HBM_WR_SIZE 0x4A12F4 + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_PCI_WR_SIZE 0x4A12F8 + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_PCI_CTR_SET_EN 0x4A1404 + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_PCI_CTR_SET 0x4A1408 + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_PCI_CTR_WRAP 0x4A140C + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_PCI_CTR_CNT 0x4A1410 + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM_CTR_SET_EN 0x4A1414 + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM_CTR_SET 0x4A1418 + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_HBM_RD_SIZE 0x4A141C + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_PCI_RD_SIZE 0x4A1420 + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_PCI_CTR_SET_EN 0x4A1424 + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_PCI_CTR_SET 0x4A1428 + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_PCI_CTR_WRAP 0x4A142C + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_PCI_CTR_CNT 0x4A1430 + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM_CTR_SET_EN 0x4A1434 + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM_CTR_SET 0x4A1438 + +#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_SEL_0 0x4A1450 + +#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_SEL_1 0x4A1454 + +#define mmDMA_IF_E_S_DOWN_CH0_NON_LIN_EN 0x4A1480 + +#define mmDMA_IF_E_S_DOWN_CH0_NL_SRAM_BANK_0 0x4A1500 + +#define mmDMA_IF_E_S_DOWN_CH0_NL_SRAM_BANK_1 0x4A1504 + +#define mmDMA_IF_E_S_DOWN_CH0_NL_SRAM_BANK_2 0x4A1508 + +#define mmDMA_IF_E_S_DOWN_CH0_NL_SRAM_BANK_3 0x4A150C + +#define mmDMA_IF_E_S_DOWN_CH0_NL_SRAM_BANK_4 0x4A1510 + +#define mmDMA_IF_E_S_DOWN_CH0_NL_SRAM_OFFSET_0 0x4A1514 + +#define mmDMA_IF_E_S_DOWN_CH0_NL_SRAM_OFFSET_1 0x4A1520 + +#define mmDMA_IF_E_S_DOWN_CH0_NL_SRAM_OFFSET_2 0x4A1524 + +#define mmDMA_IF_E_S_DOWN_CH0_NL_SRAM_OFFSET_3 0x4A1528 + +#define mmDMA_IF_E_S_DOWN_CH0_NL_SRAM_OFFSET_4 0x4A152C + +#define mmDMA_IF_E_S_DOWN_CH0_NL_SRAM_OFFSET_5 0x4A1530 + +#define mmDMA_IF_E_S_DOWN_CH0_NL_SRAM_OFFSET_6 0x4A1534 + +#define mmDMA_IF_E_S_DOWN_CH0_NL_SRAM_OFFSET_7 0x4A1538 + +#define mmDMA_IF_E_S_DOWN_CH0_NL_SRAM_OFFSET_8 0x4A153C + +#define mmDMA_IF_E_S_DOWN_CH0_NL_SRAM_OFFSET_9 0x4A1540 + +#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_0 0x4A1550 + +#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_1 0x4A1554 + +#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_2 0x4A1558 + +#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_3 0x4A155C + +#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_4 0x4A1560 + +#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_5 0x4A1564 + +#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_6 0x4A1568 + +#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_7 0x4A156C + +#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_8 0x4A1570 + +#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_9 0x4A1574 + +#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_10 0x4A1578 + +#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_11 0x4A157C + +#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_12 0x4A1580 + +#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_13 0x4A1584 + +#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_14 0x4A1588 + +#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_15 0x4A158C + +#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_16 0x4A1590 + +#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_17 0x4A1594 + +#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_18 0x4A1598 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_0 0x4A15E4 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_1 0x4A15E8 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_2 0x4A15EC + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_3 0x4A15F0 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_4 0x4A15F4 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_5 0x4A15F8 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_6 0x4A15FC + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_7 0x4A1600 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_8 0x4A1604 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_9 0x4A1608 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_10 0x4A160C + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_11 0x4A1610 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_12 0x4A1614 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_13 0x4A1618 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_14 0x4A161C + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_15 0x4A1620 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_0 0x4A1624 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_1 0x4A1628 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_2 0x4A162C + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_3 0x4A1630 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_4 0x4A1634 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_5 0x4A1638 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_6 0x4A163C + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_7 0x4A1640 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_8 0x4A1644 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_9 0x4A1648 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_10 0x4A164C + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_11 0x4A1650 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_12 0x4A1654 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_13 0x4A1658 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_14 0x4A165C + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_15 0x4A1660 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_0 0x4A1664 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_1 0x4A1668 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_2 0x4A166C + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_3 0x4A1670 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_4 0x4A1674 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_5 0x4A1678 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_6 0x4A167C + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_7 0x4A1680 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_8 0x4A1684 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_9 0x4A1688 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_10 0x4A168C + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_11 0x4A1690 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_12 0x4A1694 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_13 0x4A1698 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_14 0x4A169C + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_15 0x4A16A0 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_0 0x4A16A4 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_1 0x4A16A8 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_2 0x4A16AC + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_3 0x4A16B0 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_4 0x4A16B4 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_5 0x4A16B8 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_6 0x4A16BC + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_7 0x4A16C0 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_8 0x4A16C4 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_9 0x4A16C8 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_10 0x4A16CC + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_11 0x4A16D0 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_12 0x4A16D4 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_13 0x4A16D8 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_14 0x4A16DC + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_15 0x4A16E0 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_0 0x4A16E4 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_1 0x4A16E8 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_2 0x4A16EC + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_3 0x4A16F0 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_4 0x4A16F4 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_5 0x4A16F8 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_6 0x4A16FC + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_7 0x4A1700 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_8 0x4A1704 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_9 0x4A1708 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_10 0x4A170C + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_11 0x4A1710 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_12 0x4A1714 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_13 0x4A1718 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_14 0x4A171C + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_15 0x4A1720 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_0 0x4A1724 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_1 0x4A1728 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_2 0x4A172C + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_3 0x4A1730 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_4 0x4A1734 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_5 0x4A1738 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_6 0x4A173C + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_7 0x4A1740 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_8 0x4A1744 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_9 0x4A1748 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_10 0x4A174C + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_11 0x4A1750 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_12 0x4A1754 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_13 0x4A1758 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_14 0x4A175C + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_15 0x4A1760 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_0 0x4A1764 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_1 0x4A1768 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_2 0x4A176C + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_3 0x4A1770 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_4 0x4A1774 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_5 0x4A1778 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_6 0x4A177C + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_7 0x4A1780 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_8 0x4A1784 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_9 0x4A1788 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_10 0x4A178C + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_11 0x4A1790 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_12 0x4A1794 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_13 0x4A1798 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_14 0x4A179C + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_15 0x4A17A0 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_0 0x4A17A4 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_1 0x4A17A8 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_2 0x4A17AC + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_3 0x4A17B0 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_4 0x4A17B4 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_5 0x4A17B8 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_6 0x4A17BC + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_7 0x4A17C0 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_8 0x4A17C4 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_9 0x4A17C8 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_10 0x4A17CC + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_11 0x4A17D0 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_12 0x4A17D4 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_13 0x4A17D8 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_14 0x4A17DC + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_15 0x4A17E0 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_0 0x4A1824 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_1 0x4A1828 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_2 0x4A182C + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_3 0x4A1830 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_4 0x4A1834 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_5 0x4A1838 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_6 0x4A183C + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_7 0x4A1840 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_8 0x4A1844 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_9 0x4A1848 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_10 0x4A184C + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_11 0x4A1850 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_12 0x4A1854 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_13 0x4A1858 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_14 0x4A185C + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_15 0x4A1860 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_0 0x4A1864 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_1 0x4A1868 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_2 0x4A186C + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_3 0x4A1870 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_4 0x4A1874 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_5 0x4A1878 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_6 0x4A187C + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_7 0x4A1880 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_8 0x4A1884 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_9 0x4A1888 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_10 0x4A188C + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_11 0x4A1890 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_12 0x4A1894 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_13 0x4A1898 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_14 0x4A189C + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_15 0x4A18A0 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_0 0x4A18A4 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_1 0x4A18A8 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_2 0x4A18AC + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_3 0x4A18B0 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_4 0x4A18B4 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_5 0x4A18B8 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_6 0x4A18BC + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_7 0x4A18C0 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_8 0x4A18C4 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_9 0x4A18C8 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_10 0x4A18CC + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_11 0x4A18D0 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_12 0x4A18D4 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_13 0x4A18D8 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_14 0x4A18DC + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_15 0x4A18E0 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_0 0x4A18E4 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_1 0x4A18E8 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_2 0x4A18EC + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_3 0x4A18F0 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_4 0x4A18F4 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_5 0x4A18F8 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_6 0x4A18FC + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_7 0x4A1900 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_8 0x4A1904 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_9 0x4A1908 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_10 0x4A190C + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_11 0x4A1910 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_12 0x4A1914 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_13 0x4A1918 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_14 0x4A191C + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_15 0x4A1920 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_0 0x4A1924 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_1 0x4A1928 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_2 0x4A192C + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_3 0x4A1930 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_4 0x4A1934 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_5 0x4A1938 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_6 0x4A193C + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_7 0x4A1940 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_8 0x4A1944 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_9 0x4A1948 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_10 0x4A194C + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_11 0x4A1950 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_12 0x4A1954 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_13 0x4A1958 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_14 0x4A195C + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_15 0x4A1960 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_0 0x4A1964 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_1 0x4A1968 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_2 0x4A196C + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_3 0x4A1970 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_4 0x4A1974 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_5 0x4A1978 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_6 0x4A197C + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_7 0x4A1980 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_8 0x4A1984 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_9 0x4A1988 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_10 0x4A198C + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_11 0x4A1990 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_12 0x4A1994 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_13 0x4A1998 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_14 0x4A199C + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_15 0x4A19A0 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_0 0x4A19A4 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_1 0x4A19A8 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_2 0x4A19AC + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_3 0x4A19B0 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_4 0x4A19B4 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_5 0x4A19B8 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_6 0x4A19BC + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_7 0x4A19C0 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_8 0x4A19C4 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_9 0x4A19C8 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_10 0x4A19CC + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_11 0x4A19D0 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_12 0x4A19D4 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_13 0x4A19D8 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_14 0x4A19DC + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_15 0x4A19E0 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_0 0x4A19E4 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_1 0x4A19E8 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_2 0x4A19EC + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_3 0x4A19F0 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_4 0x4A19F4 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_5 0x4A19F8 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_6 0x4A19FC + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_7 0x4A1A00 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_8 0x4A1A04 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_9 0x4A1A08 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_10 0x4A1A0C + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_11 0x4A1A10 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_12 0x4A1A14 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_13 0x4A1A18 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_14 0x4A1A1C + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_15 0x4A1A20 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_HIT_AW 0x4A1A64 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_HIT_AR 0x4A1A68 + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_HIT_AW 0x4A1A6C + +#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_HIT_AR 0x4A1A70 + +#define mmDMA_IF_E_S_DOWN_CH0_RGL_CFG 0x4A1B64 + +#define mmDMA_IF_E_S_DOWN_CH0_RGL_SHIFT 0x4A1B68 + +#define mmDMA_IF_E_S_DOWN_CH0_RGL_EXPECTED_LAT_0 0x4A1B6C + +#define mmDMA_IF_E_S_DOWN_CH0_RGL_EXPECTED_LAT_1 0x4A1B70 + +#define mmDMA_IF_E_S_DOWN_CH0_RGL_EXPECTED_LAT_2 0x4A1B74 + +#define mmDMA_IF_E_S_DOWN_CH0_RGL_EXPECTED_LAT_3 0x4A1B78 + +#define mmDMA_IF_E_S_DOWN_CH0_RGL_EXPECTED_LAT_4 0x4A1B7C + +#define mmDMA_IF_E_S_DOWN_CH0_RGL_EXPECTED_LAT_5 0x4A1B80 + +#define mmDMA_IF_E_S_DOWN_CH0_RGL_EXPECTED_LAT_6 0x4A1B84 + +#define mmDMA_IF_E_S_DOWN_CH0_RGL_EXPECTED_LAT_7 0x4A1B88 + +#define mmDMA_IF_E_S_DOWN_CH0_RGL_TOKEN_0 0x4A1BAC + +#define mmDMA_IF_E_S_DOWN_CH0_RGL_TOKEN_1 0x4A1BB0 + +#define mmDMA_IF_E_S_DOWN_CH0_RGL_TOKEN_2 0x4A1BB4 + +#define mmDMA_IF_E_S_DOWN_CH0_RGL_TOKEN_3 0x4A1BB8 + +#define mmDMA_IF_E_S_DOWN_CH0_RGL_TOKEN_4 0x4A1BBC + +#define mmDMA_IF_E_S_DOWN_CH0_RGL_TOKEN_5 0x4A1BC0 + +#define mmDMA_IF_E_S_DOWN_CH0_RGL_TOKEN_6 0x4A1BC4 + +#define mmDMA_IF_E_S_DOWN_CH0_RGL_TOKEN_7 0x4A1BC8 + +#define mmDMA_IF_E_S_DOWN_CH0_RGL_BANK_ID_0 0x4A1BEC + +#define mmDMA_IF_E_S_DOWN_CH0_RGL_BANK_ID_1 0x4A1BF0 + +#define mmDMA_IF_E_S_DOWN_CH0_RGL_BANK_ID_2 0x4A1BF4 + +#define mmDMA_IF_E_S_DOWN_CH0_RGL_BANK_ID_3 0x4A1BF8 + +#define mmDMA_IF_E_S_DOWN_CH0_RGL_BANK_ID_4 0x4A1BFC + +#define mmDMA_IF_E_S_DOWN_CH0_RGL_BANK_ID_5 0x4A1C00 + +#define mmDMA_IF_E_S_DOWN_CH0_RGL_BANK_ID_6 0x4A1C04 + +#define mmDMA_IF_E_S_DOWN_CH0_RGL_BANK_ID_7 0x4A1C08 + +#define mmDMA_IF_E_S_DOWN_CH0_RGL_WDT 0x4A1C2C + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM0_CH0_CTR_WRAP 0x4A1C30 + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM0_CH1_CTR_WRAP 0x4A1C34 + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM1_CH0_CTR_WRAP 0x4A1C38 + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM1_CH1_CTR_WRAP 0x4A1C3C + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM2_CH0_CTR_WRAP 0x4A1C40 + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM2_CH1_CTR_WRAP 0x4A1C44 + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM3_CH0_CTR_WRAP 0x4A1C48 + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM3_CH1_CTR_WRAP 0x4A1C4C + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM0_CH0_CTR_CNT 0x4A1C50 + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM0_CH1_CTR_CNT 0x4A1C54 + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM1_CH0_CTR_CNT 0x4A1C58 + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM1_CH1_CTR_CNT 0x4A1C5C + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM2_CH0_CTR_CNT 0x4A1C60 + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM2_CH1_CTR_CNT 0x4A1C64 + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM3_CH0_CTR_CNT 0x4A1C68 + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM3_CH1_CTR_CNT 0x4A1C6C + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM0_CH0_CTR_WRAP 0x4A1C70 + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM0_CH1_CTR_WRAP 0x4A1C74 + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM1_CH0_CTR_WRAP 0x4A1C78 + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM1_CH1_CTR_WRAP 0x4A1C7C + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM2_CH0_CTR_WRAP 0x4A1C80 + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM2_CH1_CTR_WRAP 0x4A1C84 + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM3_CH0_CTR_WRAP 0x4A1C88 + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM3_CH1_CTR_WRAP 0x4A1C8C + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM0_CH0_CTR_CNT 0x4A1C90 + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM0_CH1_CTR_CNT 0x4A1C94 + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM1_CH0_CTR_CNT 0x4A1C98 + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM1_CH1_CTR_CNT 0x4A1C9C + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM2_CH0_CTR_CNT 0x4A1CA0 + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM2_CH1_CTR_CNT 0x4A1CA4 + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM3_CH0_CTR_CNT 0x4A1CA8 + +#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM3_CH1_CTR_CNT 0x4A1CAC + +#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_PC_SEL_0 0x4A1CB0 + +#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_PC_SEL_1 0x4A1CB4 + +#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_PC_SEL_2 0x4A1CB8 + +#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_PC_SEL_3 0x4A1CBC + +#endif /* ASIC_REG_DMA_IF_E_S_DOWN_CH0_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch1_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch1_regs.h new file mode 100644 index 000000000..3f32370a1 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch1_regs.h @@ -0,0 +1,896 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA_IF_E_S_DOWN_CH1_REGS_H_ +#define ASIC_REG_DMA_IF_E_S_DOWN_CH1_REGS_H_ + +/* + ***************************************** + * DMA_IF_E_S_DOWN_CH1 (Prototype: RTR_CTRL) + ***************************************** + */ + +#define mmDMA_IF_E_S_DOWN_CH1_PERM_SEL 0x4A2108 + +#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_0 0x4A2114 + +#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_1 0x4A2118 + +#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_2 0x4A211C + +#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_3 0x4A2120 + +#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_4 0x4A2124 + +#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_5 0x4A2128 + +#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_6 0x4A212C + +#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_7 0x4A2130 + +#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_8 0x4A2134 + +#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_9 0x4A2138 + +#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_10 0x4A213C + +#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_11 0x4A2140 + +#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_12 0x4A2144 + +#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_13 0x4A2148 + +#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_14 0x4A214C + +#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_15 0x4A2150 + +#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_16 0x4A2154 + +#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_17 0x4A2158 + +#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_18 0x4A215C + +#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_19 0x4A2160 + +#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_20 0x4A2164 + +#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_21 0x4A2168 + +#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_22 0x4A216C + +#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_23 0x4A2170 + +#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_24 0x4A2174 + +#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_25 0x4A2178 + +#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_26 0x4A217C + +#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_27 0x4A2180 + +#define mmDMA_IF_E_S_DOWN_CH1_SRAM_POLY_H3_0 0x4A2184 + +#define mmDMA_IF_E_S_DOWN_CH1_SRAM_POLY_H3_1 0x4A2188 + +#define mmDMA_IF_E_S_DOWN_CH1_SRAM_POLY_H3_2 0x4A218C + +#define mmDMA_IF_E_S_DOWN_CH1_SRAM_POLY_H3_3 0x4A2190 + +#define mmDMA_IF_E_S_DOWN_CH1_SRAM_POLY_H3_4 0x4A2194 + +#define mmDMA_IF_E_S_DOWN_CH1_SRAM_POLY_H3_5 0x4A2198 + +#define mmDMA_IF_E_S_DOWN_CH1_SRAM_POLY_H3_6 0x4A219C + +#define mmDMA_IF_E_S_DOWN_CH1_SRAM_POLY_H3_7 0x4A21A0 + +#define mmDMA_IF_E_S_DOWN_CH1_SRAM_POLY_H3_8 0x4A21A4 + +#define mmDMA_IF_E_S_DOWN_CH1_SRAM_POLY_H3_9 0x4A21A8 + +#define mmDMA_IF_E_S_DOWN_CH1_SRAM_POLY_H3_10 0x4A21AC + +#define mmDMA_IF_E_S_DOWN_CH1_SRAM_POLY_H3_11 0x4A21B0 + +#define mmDMA_IF_E_S_DOWN_CH1_SRAM_POLY_H3_12 0x4A21B4 + +#define mmDMA_IF_E_S_DOWN_CH1_SRAM_POLY_H3_13 0x4A21B8 + +#define mmDMA_IF_E_S_DOWN_CH1_SRAM_POLY_H3_14 0x4A21BC + +#define mmDMA_IF_E_S_DOWN_CH1_SCRAM_SRAM_EN 0x4A226C + +#define mmDMA_IF_E_S_DOWN_CH1_RL_HBM_EN 0x4A2274 + +#define mmDMA_IF_E_S_DOWN_CH1_RL_HBM_SAT 0x4A2278 + +#define mmDMA_IF_E_S_DOWN_CH1_RL_HBM_RST 0x4A227C + +#define mmDMA_IF_E_S_DOWN_CH1_RL_HBM_TIMEOUT 0x4A2280 + +#define mmDMA_IF_E_S_DOWN_CH1_SCRAM_HBM_EN 0x4A2284 + +#define mmDMA_IF_E_S_DOWN_CH1_RL_PCI_EN 0x4A2288 + +#define mmDMA_IF_E_S_DOWN_CH1_RL_PCI_SAT 0x4A228C + +#define mmDMA_IF_E_S_DOWN_CH1_RL_PCI_RST 0x4A2290 + +#define mmDMA_IF_E_S_DOWN_CH1_RL_PCI_TIMEOUT 0x4A2294 + +#define mmDMA_IF_E_S_DOWN_CH1_RL_SRAM_EN 0x4A229C + +#define mmDMA_IF_E_S_DOWN_CH1_RL_SRAM_SAT 0x4A22A0 + +#define mmDMA_IF_E_S_DOWN_CH1_RL_SRAM_RST 0x4A22A4 + +#define mmDMA_IF_E_S_DOWN_CH1_RL_SRAM_TIMEOUT 0x4A22AC + +#define mmDMA_IF_E_S_DOWN_CH1_RL_SRAM_RED 0x4A22B4 + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_HBM_EN 0x4A22EC + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_PCI_EN 0x4A22F0 + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_HBM_WR_SIZE 0x4A22F4 + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_PCI_WR_SIZE 0x4A22F8 + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_PCI_CTR_SET_EN 0x4A2404 + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_PCI_CTR_SET 0x4A2408 + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_PCI_CTR_WRAP 0x4A240C + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_PCI_CTR_CNT 0x4A2410 + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM_CTR_SET_EN 0x4A2414 + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM_CTR_SET 0x4A2418 + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_HBM_RD_SIZE 0x4A241C + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_PCI_RD_SIZE 0x4A2420 + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_PCI_CTR_SET_EN 0x4A2424 + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_PCI_CTR_SET 0x4A2428 + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_PCI_CTR_WRAP 0x4A242C + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_PCI_CTR_CNT 0x4A2430 + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM_CTR_SET_EN 0x4A2434 + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM_CTR_SET 0x4A2438 + +#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_SEL_0 0x4A2450 + +#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_SEL_1 0x4A2454 + +#define mmDMA_IF_E_S_DOWN_CH1_NON_LIN_EN 0x4A2480 + +#define mmDMA_IF_E_S_DOWN_CH1_NL_SRAM_BANK_0 0x4A2500 + +#define mmDMA_IF_E_S_DOWN_CH1_NL_SRAM_BANK_1 0x4A2504 + +#define mmDMA_IF_E_S_DOWN_CH1_NL_SRAM_BANK_2 0x4A2508 + +#define mmDMA_IF_E_S_DOWN_CH1_NL_SRAM_BANK_3 0x4A250C + +#define mmDMA_IF_E_S_DOWN_CH1_NL_SRAM_BANK_4 0x4A2510 + +#define mmDMA_IF_E_S_DOWN_CH1_NL_SRAM_OFFSET_0 0x4A2514 + +#define mmDMA_IF_E_S_DOWN_CH1_NL_SRAM_OFFSET_1 0x4A2520 + +#define mmDMA_IF_E_S_DOWN_CH1_NL_SRAM_OFFSET_2 0x4A2524 + +#define mmDMA_IF_E_S_DOWN_CH1_NL_SRAM_OFFSET_3 0x4A2528 + +#define mmDMA_IF_E_S_DOWN_CH1_NL_SRAM_OFFSET_4 0x4A252C + +#define mmDMA_IF_E_S_DOWN_CH1_NL_SRAM_OFFSET_5 0x4A2530 + +#define mmDMA_IF_E_S_DOWN_CH1_NL_SRAM_OFFSET_6 0x4A2534 + +#define mmDMA_IF_E_S_DOWN_CH1_NL_SRAM_OFFSET_7 0x4A2538 + +#define mmDMA_IF_E_S_DOWN_CH1_NL_SRAM_OFFSET_8 0x4A253C + +#define mmDMA_IF_E_S_DOWN_CH1_NL_SRAM_OFFSET_9 0x4A2540 + +#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_0 0x4A2550 + +#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_1 0x4A2554 + +#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_2 0x4A2558 + +#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_3 0x4A255C + +#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_4 0x4A2560 + +#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_5 0x4A2564 + +#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_6 0x4A2568 + +#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_7 0x4A256C + +#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_8 0x4A2570 + +#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_9 0x4A2574 + +#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_10 0x4A2578 + +#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_11 0x4A257C + +#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_12 0x4A2580 + +#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_13 0x4A2584 + +#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_14 0x4A2588 + +#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_15 0x4A258C + +#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_16 0x4A2590 + +#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_17 0x4A2594 + +#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_18 0x4A2598 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_0 0x4A25E4 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_1 0x4A25E8 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_2 0x4A25EC + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_3 0x4A25F0 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_4 0x4A25F4 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_5 0x4A25F8 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_6 0x4A25FC + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_7 0x4A2600 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_8 0x4A2604 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_9 0x4A2608 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_10 0x4A260C + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_11 0x4A2610 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_12 0x4A2614 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_13 0x4A2618 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_14 0x4A261C + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_15 0x4A2620 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_0 0x4A2624 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_1 0x4A2628 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_2 0x4A262C + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_3 0x4A2630 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_4 0x4A2634 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_5 0x4A2638 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_6 0x4A263C + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_7 0x4A2640 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_8 0x4A2644 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_9 0x4A2648 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_10 0x4A264C + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_11 0x4A2650 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_12 0x4A2654 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_13 0x4A2658 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_14 0x4A265C + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_15 0x4A2660 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_0 0x4A2664 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_1 0x4A2668 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_2 0x4A266C + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_3 0x4A2670 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_4 0x4A2674 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_5 0x4A2678 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_6 0x4A267C + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_7 0x4A2680 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_8 0x4A2684 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_9 0x4A2688 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_10 0x4A268C + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_11 0x4A2690 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_12 0x4A2694 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_13 0x4A2698 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_14 0x4A269C + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_15 0x4A26A0 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_0 0x4A26A4 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_1 0x4A26A8 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_2 0x4A26AC + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_3 0x4A26B0 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_4 0x4A26B4 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_5 0x4A26B8 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_6 0x4A26BC + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_7 0x4A26C0 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_8 0x4A26C4 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_9 0x4A26C8 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_10 0x4A26CC + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_11 0x4A26D0 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_12 0x4A26D4 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_13 0x4A26D8 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_14 0x4A26DC + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_15 0x4A26E0 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_0 0x4A26E4 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_1 0x4A26E8 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_2 0x4A26EC + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_3 0x4A26F0 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_4 0x4A26F4 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_5 0x4A26F8 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_6 0x4A26FC + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_7 0x4A2700 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_8 0x4A2704 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_9 0x4A2708 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_10 0x4A270C + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_11 0x4A2710 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_12 0x4A2714 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_13 0x4A2718 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_14 0x4A271C + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_15 0x4A2720 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_0 0x4A2724 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_1 0x4A2728 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_2 0x4A272C + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_3 0x4A2730 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_4 0x4A2734 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_5 0x4A2738 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_6 0x4A273C + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_7 0x4A2740 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_8 0x4A2744 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_9 0x4A2748 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_10 0x4A274C + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_11 0x4A2750 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_12 0x4A2754 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_13 0x4A2758 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_14 0x4A275C + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_15 0x4A2760 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_0 0x4A2764 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_1 0x4A2768 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_2 0x4A276C + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_3 0x4A2770 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_4 0x4A2774 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_5 0x4A2778 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_6 0x4A277C + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_7 0x4A2780 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_8 0x4A2784 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_9 0x4A2788 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_10 0x4A278C + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_11 0x4A2790 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_12 0x4A2794 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_13 0x4A2798 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_14 0x4A279C + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_15 0x4A27A0 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_0 0x4A27A4 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_1 0x4A27A8 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_2 0x4A27AC + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_3 0x4A27B0 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_4 0x4A27B4 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_5 0x4A27B8 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_6 0x4A27BC + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_7 0x4A27C0 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_8 0x4A27C4 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_9 0x4A27C8 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_10 0x4A27CC + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_11 0x4A27D0 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_12 0x4A27D4 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_13 0x4A27D8 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_14 0x4A27DC + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_15 0x4A27E0 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_0 0x4A2824 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_1 0x4A2828 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_2 0x4A282C + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_3 0x4A2830 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_4 0x4A2834 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_5 0x4A2838 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_6 0x4A283C + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_7 0x4A2840 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_8 0x4A2844 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_9 0x4A2848 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_10 0x4A284C + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_11 0x4A2850 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_12 0x4A2854 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_13 0x4A2858 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_14 0x4A285C + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_15 0x4A2860 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_0 0x4A2864 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_1 0x4A2868 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_2 0x4A286C + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_3 0x4A2870 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_4 0x4A2874 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_5 0x4A2878 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_6 0x4A287C + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_7 0x4A2880 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_8 0x4A2884 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_9 0x4A2888 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_10 0x4A288C + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_11 0x4A2890 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_12 0x4A2894 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_13 0x4A2898 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_14 0x4A289C + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_15 0x4A28A0 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_0 0x4A28A4 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_1 0x4A28A8 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_2 0x4A28AC + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_3 0x4A28B0 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_4 0x4A28B4 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_5 0x4A28B8 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_6 0x4A28BC + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_7 0x4A28C0 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_8 0x4A28C4 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_9 0x4A28C8 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_10 0x4A28CC + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_11 0x4A28D0 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_12 0x4A28D4 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_13 0x4A28D8 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_14 0x4A28DC + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_15 0x4A28E0 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_0 0x4A28E4 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_1 0x4A28E8 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_2 0x4A28EC + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_3 0x4A28F0 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_4 0x4A28F4 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_5 0x4A28F8 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_6 0x4A28FC + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_7 0x4A2900 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_8 0x4A2904 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_9 0x4A2908 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_10 0x4A290C + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_11 0x4A2910 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_12 0x4A2914 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_13 0x4A2918 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_14 0x4A291C + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_15 0x4A2920 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_0 0x4A2924 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_1 0x4A2928 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_2 0x4A292C + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_3 0x4A2930 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_4 0x4A2934 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_5 0x4A2938 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_6 0x4A293C + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_7 0x4A2940 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_8 0x4A2944 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_9 0x4A2948 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_10 0x4A294C + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_11 0x4A2950 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_12 0x4A2954 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_13 0x4A2958 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_14 0x4A295C + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_15 0x4A2960 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_0 0x4A2964 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_1 0x4A2968 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_2 0x4A296C + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_3 0x4A2970 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_4 0x4A2974 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_5 0x4A2978 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_6 0x4A297C + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_7 0x4A2980 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_8 0x4A2984 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_9 0x4A2988 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_10 0x4A298C + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_11 0x4A2990 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_12 0x4A2994 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_13 0x4A2998 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_14 0x4A299C + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_15 0x4A29A0 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_0 0x4A29A4 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_1 0x4A29A8 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_2 0x4A29AC + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_3 0x4A29B0 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_4 0x4A29B4 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_5 0x4A29B8 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_6 0x4A29BC + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_7 0x4A29C0 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_8 0x4A29C4 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_9 0x4A29C8 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_10 0x4A29CC + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_11 0x4A29D0 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_12 0x4A29D4 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_13 0x4A29D8 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_14 0x4A29DC + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_15 0x4A29E0 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_0 0x4A29E4 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_1 0x4A29E8 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_2 0x4A29EC + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_3 0x4A29F0 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_4 0x4A29F4 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_5 0x4A29F8 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_6 0x4A29FC + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_7 0x4A2A00 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_8 0x4A2A04 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_9 0x4A2A08 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_10 0x4A2A0C + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_11 0x4A2A10 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_12 0x4A2A14 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_13 0x4A2A18 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_14 0x4A2A1C + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_15 0x4A2A20 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_HIT_AW 0x4A2A64 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_HIT_AR 0x4A2A68 + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_HIT_AW 0x4A2A6C + +#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_HIT_AR 0x4A2A70 + +#define mmDMA_IF_E_S_DOWN_CH1_RGL_CFG 0x4A2B64 + +#define mmDMA_IF_E_S_DOWN_CH1_RGL_SHIFT 0x4A2B68 + +#define mmDMA_IF_E_S_DOWN_CH1_RGL_EXPECTED_LAT_0 0x4A2B6C + +#define mmDMA_IF_E_S_DOWN_CH1_RGL_EXPECTED_LAT_1 0x4A2B70 + +#define mmDMA_IF_E_S_DOWN_CH1_RGL_EXPECTED_LAT_2 0x4A2B74 + +#define mmDMA_IF_E_S_DOWN_CH1_RGL_EXPECTED_LAT_3 0x4A2B78 + +#define mmDMA_IF_E_S_DOWN_CH1_RGL_EXPECTED_LAT_4 0x4A2B7C + +#define mmDMA_IF_E_S_DOWN_CH1_RGL_EXPECTED_LAT_5 0x4A2B80 + +#define mmDMA_IF_E_S_DOWN_CH1_RGL_EXPECTED_LAT_6 0x4A2B84 + +#define mmDMA_IF_E_S_DOWN_CH1_RGL_EXPECTED_LAT_7 0x4A2B88 + +#define mmDMA_IF_E_S_DOWN_CH1_RGL_TOKEN_0 0x4A2BAC + +#define mmDMA_IF_E_S_DOWN_CH1_RGL_TOKEN_1 0x4A2BB0 + +#define mmDMA_IF_E_S_DOWN_CH1_RGL_TOKEN_2 0x4A2BB4 + +#define mmDMA_IF_E_S_DOWN_CH1_RGL_TOKEN_3 0x4A2BB8 + +#define mmDMA_IF_E_S_DOWN_CH1_RGL_TOKEN_4 0x4A2BBC + +#define mmDMA_IF_E_S_DOWN_CH1_RGL_TOKEN_5 0x4A2BC0 + +#define mmDMA_IF_E_S_DOWN_CH1_RGL_TOKEN_6 0x4A2BC4 + +#define mmDMA_IF_E_S_DOWN_CH1_RGL_TOKEN_7 0x4A2BC8 + +#define mmDMA_IF_E_S_DOWN_CH1_RGL_BANK_ID_0 0x4A2BEC + +#define mmDMA_IF_E_S_DOWN_CH1_RGL_BANK_ID_1 0x4A2BF0 + +#define mmDMA_IF_E_S_DOWN_CH1_RGL_BANK_ID_2 0x4A2BF4 + +#define mmDMA_IF_E_S_DOWN_CH1_RGL_BANK_ID_3 0x4A2BF8 + +#define mmDMA_IF_E_S_DOWN_CH1_RGL_BANK_ID_4 0x4A2BFC + +#define mmDMA_IF_E_S_DOWN_CH1_RGL_BANK_ID_5 0x4A2C00 + +#define mmDMA_IF_E_S_DOWN_CH1_RGL_BANK_ID_6 0x4A2C04 + +#define mmDMA_IF_E_S_DOWN_CH1_RGL_BANK_ID_7 0x4A2C08 + +#define mmDMA_IF_E_S_DOWN_CH1_RGL_WDT 0x4A2C2C + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM0_CH0_CTR_WRAP 0x4A2C30 + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM0_CH1_CTR_WRAP 0x4A2C34 + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM1_CH0_CTR_WRAP 0x4A2C38 + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM1_CH1_CTR_WRAP 0x4A2C3C + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM2_CH0_CTR_WRAP 0x4A2C40 + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM2_CH1_CTR_WRAP 0x4A2C44 + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM3_CH0_CTR_WRAP 0x4A2C48 + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM3_CH1_CTR_WRAP 0x4A2C4C + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM0_CH0_CTR_CNT 0x4A2C50 + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM0_CH1_CTR_CNT 0x4A2C54 + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM1_CH0_CTR_CNT 0x4A2C58 + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM1_CH1_CTR_CNT 0x4A2C5C + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM2_CH0_CTR_CNT 0x4A2C60 + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM2_CH1_CTR_CNT 0x4A2C64 + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM3_CH0_CTR_CNT 0x4A2C68 + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM3_CH1_CTR_CNT 0x4A2C6C + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM0_CH0_CTR_WRAP 0x4A2C70 + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM0_CH1_CTR_WRAP 0x4A2C74 + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM1_CH0_CTR_WRAP 0x4A2C78 + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM1_CH1_CTR_WRAP 0x4A2C7C + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM2_CH0_CTR_WRAP 0x4A2C80 + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM2_CH1_CTR_WRAP 0x4A2C84 + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM3_CH0_CTR_WRAP 0x4A2C88 + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM3_CH1_CTR_WRAP 0x4A2C8C + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM0_CH0_CTR_CNT 0x4A2C90 + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM0_CH1_CTR_CNT 0x4A2C94 + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM1_CH0_CTR_CNT 0x4A2C98 + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM1_CH1_CTR_CNT 0x4A2C9C + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM2_CH0_CTR_CNT 0x4A2CA0 + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM2_CH1_CTR_CNT 0x4A2CA4 + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM3_CH0_CTR_CNT 0x4A2CA8 + +#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM3_CH1_CTR_CNT 0x4A2CAC + +#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_PC_SEL_0 0x4A2CB0 + +#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_PC_SEL_1 0x4A2CB4 + +#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_PC_SEL_2 0x4A2CB8 + +#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_PC_SEL_3 0x4A2CBC + +#endif /* ASIC_REG_DMA_IF_E_S_DOWN_CH1_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_regs.h new file mode 100644 index 000000000..78c18da71 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_regs.h @@ -0,0 +1,860 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA_IF_E_S_REGS_H_ +#define ASIC_REG_DMA_IF_E_S_REGS_H_ + +/* + ***************************************** + * DMA_IF_E_S (Prototype: DMA_IF) + ***************************************** + */ + +#define mmDMA_IF_E_S_HBM0_WR_CRED_CNT 0x4A0000 + +#define mmDMA_IF_E_S_HBM1_WR_CRED_CNT 0x4A0004 + +#define mmDMA_IF_E_S_HBM0_RD_CRED_CNT 0x4A0008 + +#define mmDMA_IF_E_S_HBM1_RD_CRED_CNT 0x4A000C + +#define mmDMA_IF_E_S_HBM_LIMITER_0 0x4A0030 + +#define mmDMA_IF_E_S_HBM_LIMITER_1 0x4A0034 + +#define mmDMA_IF_E_S_HBM_LIMITER_2 0x4A0038 + +#define mmDMA_IF_E_S_HBM_LIMITER_3 0x4A003C + +#define mmDMA_IF_E_S_HBM_ALMOST_EN_0 0x4A0040 + +#define mmDMA_IF_E_S_HBM_ALMOST_EN_1 0x4A0044 + +#define mmDMA_IF_E_S_HBM_CRED_EN_0 0x4A0050 + +#define mmDMA_IF_E_S_HBM_CRED_EN_1 0x4A0054 + +#define mmDMA_IF_E_S_SOB_MIN_RPROT_0 0x4A0100 + +#define mmDMA_IF_E_S_SOB_MIN_RPROT_1 0x4A0104 + +#define mmDMA_IF_E_S_SOB_MIN_RPROT_2 0x4A0108 + +#define mmDMA_IF_E_S_SOB_MIN_RPROT_3 0x4A010C + +#define mmDMA_IF_E_S_SOB_MIN_RPROT_4 0x4A0110 + +#define mmDMA_IF_E_S_SOB_MIN_RPROT_5 0x4A0114 + +#define mmDMA_IF_E_S_SOB_MIN_RPROT_6 0x4A0118 + +#define mmDMA_IF_E_S_SOB_MIN_RPROT_7 0x4A011C + +#define mmDMA_IF_E_S_SOB_MIN_RPROT_8 0x4A0120 + +#define mmDMA_IF_E_S_SOB_MIN_RPROT_9 0x4A0124 + +#define mmDMA_IF_E_S_SOB_MIN_RPROT_10 0x4A0128 + +#define mmDMA_IF_E_S_SOB_MIN_RPROT_11 0x4A012C + +#define mmDMA_IF_E_S_SOB_MIN_RPROT_12 0x4A0130 + +#define mmDMA_IF_E_S_SOB_MIN_RPROT_13 0x4A0134 + +#define mmDMA_IF_E_S_SOB_MIN_RPROT_14 0x4A0138 + +#define mmDMA_IF_E_S_SOB_MIN_RPROT_15 0x4A013C + +#define mmDMA_IF_E_S_SOB_MAX_RPROT_0 0x4A0140 + +#define mmDMA_IF_E_S_SOB_MAX_RPROT_1 0x4A0144 + +#define mmDMA_IF_E_S_SOB_MAX_RPROT_2 0x4A0148 + +#define mmDMA_IF_E_S_SOB_MAX_RPROT_3 0x4A014C + +#define mmDMA_IF_E_S_SOB_MAX_RPROT_4 0x4A0150 + +#define mmDMA_IF_E_S_SOB_MAX_RPROT_5 0x4A0154 + +#define mmDMA_IF_E_S_SOB_MAX_RPROT_6 0x4A0158 + +#define mmDMA_IF_E_S_SOB_MAX_RPROT_7 0x4A015C + +#define mmDMA_IF_E_S_SOB_MAX_RPROT_8 0x4A0160 + +#define mmDMA_IF_E_S_SOB_MAX_RPROT_9 0x4A0164 + +#define mmDMA_IF_E_S_SOB_MAX_RPROT_10 0x4A0168 + +#define mmDMA_IF_E_S_SOB_MAX_RPROT_11 0x4A016C + +#define mmDMA_IF_E_S_SOB_MAX_RPROT_12 0x4A0170 + +#define mmDMA_IF_E_S_SOB_MAX_RPROT_13 0x4A0174 + +#define mmDMA_IF_E_S_SOB_MAX_RPROT_14 0x4A0178 + +#define mmDMA_IF_E_S_SOB_MAX_RPROT_15 0x4A017C + +#define mmDMA_IF_E_S_SOB_MIN_WPROT_0 0x4A0180 + +#define mmDMA_IF_E_S_SOB_MIN_WPROT_1 0x4A0184 + +#define mmDMA_IF_E_S_SOB_MIN_WPROT_2 0x4A0188 + +#define mmDMA_IF_E_S_SOB_MIN_WPROT_3 0x4A018C + +#define mmDMA_IF_E_S_SOB_MIN_WPROT_4 0x4A0190 + +#define mmDMA_IF_E_S_SOB_MIN_WPROT_5 0x4A0194 + +#define mmDMA_IF_E_S_SOB_MIN_WPROT_6 0x4A0198 + +#define mmDMA_IF_E_S_SOB_MIN_WPROT_7 0x4A019C + +#define mmDMA_IF_E_S_SOB_MIN_WPROT_8 0x4A01A0 + +#define mmDMA_IF_E_S_SOB_MIN_WPROT_9 0x4A01A4 + +#define mmDMA_IF_E_S_SOB_MIN_WPROT_10 0x4A01A8 + +#define mmDMA_IF_E_S_SOB_MIN_WPROT_11 0x4A01AC + +#define mmDMA_IF_E_S_SOB_MIN_WPROT_12 0x4A01B0 + +#define mmDMA_IF_E_S_SOB_MIN_WPROT_13 0x4A01B4 + +#define mmDMA_IF_E_S_SOB_MIN_WPROT_14 0x4A01B8 + +#define mmDMA_IF_E_S_SOB_MIN_WPROT_15 0x4A01BC + +#define mmDMA_IF_E_S_SOB_MAX_WPROT_0 0x4A01C0 + +#define mmDMA_IF_E_S_SOB_MAX_WPROT_1 0x4A01C4 + +#define mmDMA_IF_E_S_SOB_MAX_WPROT_2 0x4A01C8 + +#define mmDMA_IF_E_S_SOB_MAX_WPROT_3 0x4A01CC + +#define mmDMA_IF_E_S_SOB_MAX_WPROT_4 0x4A01D0 + +#define mmDMA_IF_E_S_SOB_MAX_WPROT_5 0x4A01D4 + +#define mmDMA_IF_E_S_SOB_MAX_WPROT_6 0x4A01D8 + +#define mmDMA_IF_E_S_SOB_MAX_WPROT_7 0x4A01DC + +#define mmDMA_IF_E_S_SOB_MAX_WPROT_8 0x4A01E0 + +#define mmDMA_IF_E_S_SOB_MAX_WPROT_9 0x4A01E4 + +#define mmDMA_IF_E_S_SOB_MAX_WPROT_10 0x4A01E8 + +#define mmDMA_IF_E_S_SOB_MAX_WPROT_11 0x4A01EC + +#define mmDMA_IF_E_S_SOB_MAX_WPROT_12 0x4A01F0 + +#define mmDMA_IF_E_S_SOB_MAX_WPROT_13 0x4A01F4 + +#define mmDMA_IF_E_S_SOB_MAX_WPROT_14 0x4A01F8 + +#define mmDMA_IF_E_S_SOB_MAX_WPROT_15 0x4A01FC + +#define mmDMA_IF_E_S_SOB_MIN_RPRIV_0 0x4A0200 + +#define mmDMA_IF_E_S_SOB_MIN_RPRIV_1 0x4A0204 + +#define mmDMA_IF_E_S_SOB_MIN_RPRIV_2 0x4A0208 + +#define mmDMA_IF_E_S_SOB_MIN_RPRIV_3 0x4A020C + +#define mmDMA_IF_E_S_SOB_MIN_RPRIV_4 0x4A0210 + +#define mmDMA_IF_E_S_SOB_MIN_RPRIV_5 0x4A0214 + +#define mmDMA_IF_E_S_SOB_MIN_RPRIV_6 0x4A0218 + +#define mmDMA_IF_E_S_SOB_MIN_RPRIV_7 0x4A021C + +#define mmDMA_IF_E_S_SOB_MIN_RPRIV_8 0x4A0220 + +#define mmDMA_IF_E_S_SOB_MIN_RPRIV_9 0x4A0224 + +#define mmDMA_IF_E_S_SOB_MIN_RPRIV_10 0x4A0228 + +#define mmDMA_IF_E_S_SOB_MIN_RPRIV_11 0x4A022C + +#define mmDMA_IF_E_S_SOB_MIN_RPRIV_12 0x4A0230 + +#define mmDMA_IF_E_S_SOB_MIN_RPRIV_13 0x4A0234 + +#define mmDMA_IF_E_S_SOB_MIN_RPRIV_14 0x4A0238 + +#define mmDMA_IF_E_S_SOB_MIN_RPRIV_15 0x4A023C + +#define mmDMA_IF_E_S_SOB_MAX_RPRIV_0 0x4A0240 + +#define mmDMA_IF_E_S_SOB_MAX_RPRIV_1 0x4A0244 + +#define mmDMA_IF_E_S_SOB_MAX_RPRIV_2 0x4A0248 + +#define mmDMA_IF_E_S_SOB_MAX_RPRIV_3 0x4A024C + +#define mmDMA_IF_E_S_SOB_MAX_RPRIV_4 0x4A0250 + +#define mmDMA_IF_E_S_SOB_MAX_RPRIV_5 0x4A0254 + +#define mmDMA_IF_E_S_SOB_MAX_RPRIV_6 0x4A0258 + +#define mmDMA_IF_E_S_SOB_MAX_RPRIV_7 0x4A025C + +#define mmDMA_IF_E_S_SOB_MAX_RPRIV_8 0x4A0260 + +#define mmDMA_IF_E_S_SOB_MAX_RPRIV_9 0x4A0264 + +#define mmDMA_IF_E_S_SOB_MAX_RPRIV_10 0x4A0268 + +#define mmDMA_IF_E_S_SOB_MAX_RPRIV_11 0x4A026C + +#define mmDMA_IF_E_S_SOB_MAX_RPRIV_12 0x4A0270 + +#define mmDMA_IF_E_S_SOB_MAX_RPRIV_13 0x4A0274 + +#define mmDMA_IF_E_S_SOB_MAX_RPRIV_14 0x4A0278 + +#define mmDMA_IF_E_S_SOB_MAX_RPRIV_15 0x4A027C + +#define mmDMA_IF_E_S_SOB_MIN_WPRIV_0 0x4A0280 + +#define mmDMA_IF_E_S_SOB_MIN_WPRIV_1 0x4A0284 + +#define mmDMA_IF_E_S_SOB_MIN_WPRIV_2 0x4A0288 + +#define mmDMA_IF_E_S_SOB_MIN_WPRIV_3 0x4A028C + +#define mmDMA_IF_E_S_SOB_MIN_WPRIV_4 0x4A0290 + +#define mmDMA_IF_E_S_SOB_MIN_WPRIV_5 0x4A0294 + +#define mmDMA_IF_E_S_SOB_MIN_WPRIV_6 0x4A0298 + +#define mmDMA_IF_E_S_SOB_MIN_WPRIV_7 0x4A029C + +#define mmDMA_IF_E_S_SOB_MIN_WPRIV_8 0x4A02A0 + +#define mmDMA_IF_E_S_SOB_MIN_WPRIV_9 0x4A02A4 + +#define mmDMA_IF_E_S_SOB_MIN_WPRIV_10 0x4A02A8 + +#define mmDMA_IF_E_S_SOB_MIN_WPRIV_11 0x4A02AC + +#define mmDMA_IF_E_S_SOB_MIN_WPRIV_12 0x4A02B0 + +#define mmDMA_IF_E_S_SOB_MIN_WPRIV_13 0x4A02B4 + +#define mmDMA_IF_E_S_SOB_MIN_WPRIV_14 0x4A02B8 + +#define mmDMA_IF_E_S_SOB_MIN_WPRIV_15 0x4A02BC + +#define mmDMA_IF_E_S_SOB_MAX_WPRIV_0 0x4A02C0 + +#define mmDMA_IF_E_S_SOB_MAX_WPRIV_1 0x4A02C4 + +#define mmDMA_IF_E_S_SOB_MAX_WPRIV_2 0x4A02C8 + +#define mmDMA_IF_E_S_SOB_MAX_WPRIV_3 0x4A02CC + +#define mmDMA_IF_E_S_SOB_MAX_WPRIV_4 0x4A02D0 + +#define mmDMA_IF_E_S_SOB_MAX_WPRIV_5 0x4A02D4 + +#define mmDMA_IF_E_S_SOB_MAX_WPRIV_6 0x4A02D8 + +#define mmDMA_IF_E_S_SOB_MAX_WPRIV_7 0x4A02DC + +#define mmDMA_IF_E_S_SOB_MAX_WPRIV_8 0x4A02E0 + +#define mmDMA_IF_E_S_SOB_MAX_WPRIV_9 0x4A02E4 + +#define mmDMA_IF_E_S_SOB_MAX_WPRIV_10 0x4A02E8 + +#define mmDMA_IF_E_S_SOB_MAX_WPRIV_11 0x4A02EC + +#define mmDMA_IF_E_S_SOB_MAX_WPRIV_12 0x4A02F0 + +#define mmDMA_IF_E_S_SOB_MAX_WPRIV_13 0x4A02F4 + +#define mmDMA_IF_E_S_SOB_MAX_WPRIV_14 0x4A02F8 + +#define mmDMA_IF_E_S_SOB_MAX_WPRIV_15 0x4A02FC + +#define mmDMA_IF_E_S_DMA0_MIN_RPROT_0 0x4A0300 + +#define mmDMA_IF_E_S_DMA0_MIN_RPROT_1 0x4A0304 + +#define mmDMA_IF_E_S_DMA0_MIN_RPROT_2 0x4A0308 + +#define mmDMA_IF_E_S_DMA0_MIN_RPROT_3 0x4A030C + +#define mmDMA_IF_E_S_DMA0_MIN_RPROT_4 0x4A0310 + +#define mmDMA_IF_E_S_DMA0_MIN_RPROT_5 0x4A0314 + +#define mmDMA_IF_E_S_DMA0_MIN_RPROT_6 0x4A0318 + +#define mmDMA_IF_E_S_DMA0_MIN_RPROT_7 0x4A031C + +#define mmDMA_IF_E_S_DMA0_MIN_RPROT_8 0x4A0320 + +#define mmDMA_IF_E_S_DMA0_MIN_RPROT_9 0x4A0324 + +#define mmDMA_IF_E_S_DMA0_MIN_RPROT_10 0x4A0328 + +#define mmDMA_IF_E_S_DMA0_MIN_RPROT_11 0x4A032C + +#define mmDMA_IF_E_S_DMA0_MIN_RPROT_12 0x4A0330 + +#define mmDMA_IF_E_S_DMA0_MIN_RPROT_13 0x4A0334 + +#define mmDMA_IF_E_S_DMA0_MIN_RPROT_14 0x4A0338 + +#define mmDMA_IF_E_S_DMA0_MIN_RPROT_15 0x4A033C + +#define mmDMA_IF_E_S_DMA0_MAX_RPROT_0 0x4A0340 + +#define mmDMA_IF_E_S_DMA0_MAX_RPROT_1 0x4A0344 + +#define mmDMA_IF_E_S_DMA0_MAX_RPROT_2 0x4A0348 + +#define mmDMA_IF_E_S_DMA0_MAX_RPROT_3 0x4A034C + +#define mmDMA_IF_E_S_DMA0_MAX_RPROT_4 0x4A0350 + +#define mmDMA_IF_E_S_DMA0_MAX_RPROT_5 0x4A0354 + +#define mmDMA_IF_E_S_DMA0_MAX_RPROT_6 0x4A0358 + +#define mmDMA_IF_E_S_DMA0_MAX_RPROT_7 0x4A035C + +#define mmDMA_IF_E_S_DMA0_MAX_RPROT_8 0x4A0360 + +#define mmDMA_IF_E_S_DMA0_MAX_RPROT_9 0x4A0364 + +#define mmDMA_IF_E_S_DMA0_MAX_RPROT_10 0x4A0368 + +#define mmDMA_IF_E_S_DMA0_MAX_RPROT_11 0x4A036C + +#define mmDMA_IF_E_S_DMA0_MAX_RPROT_12 0x4A0370 + +#define mmDMA_IF_E_S_DMA0_MAX_RPROT_13 0x4A0374 + +#define mmDMA_IF_E_S_DMA0_MAX_RPROT_14 0x4A0378 + +#define mmDMA_IF_E_S_DMA0_MAX_RPROT_15 0x4A037C + +#define mmDMA_IF_E_S_DMA0_MIN_WPROT_0 0x4A0380 + +#define mmDMA_IF_E_S_DMA0_MIN_WPROT_1 0x4A0384 + +#define mmDMA_IF_E_S_DMA0_MIN_WPROT_2 0x4A0388 + +#define mmDMA_IF_E_S_DMA0_MIN_WPROT_3 0x4A038C + +#define mmDMA_IF_E_S_DMA0_MIN_WPROT_4 0x4A0390 + +#define mmDMA_IF_E_S_DMA0_MIN_WPROT_5 0x4A0394 + +#define mmDMA_IF_E_S_DMA0_MIN_WPROT_6 0x4A0398 + +#define mmDMA_IF_E_S_DMA0_MIN_WPROT_7 0x4A039C + +#define mmDMA_IF_E_S_DMA0_MIN_WPROT_8 0x4A03A0 + +#define mmDMA_IF_E_S_DMA0_MIN_WPROT_9 0x4A03A4 + +#define mmDMA_IF_E_S_DMA0_MIN_WPROT_10 0x4A03A8 + +#define mmDMA_IF_E_S_DMA0_MIN_WPROT_11 0x4A03AC + +#define mmDMA_IF_E_S_DMA0_MIN_WPROT_12 0x4A03B0 + +#define mmDMA_IF_E_S_DMA0_MIN_WPROT_13 0x4A03B4 + +#define mmDMA_IF_E_S_DMA0_MIN_WPROT_14 0x4A03B8 + +#define mmDMA_IF_E_S_DMA0_MIN_WPROT_15 0x4A03BC + +#define mmDMA_IF_E_S_DMA0_MAX_WPROT_0 0x4A03C0 + +#define mmDMA_IF_E_S_DMA0_MAX_WPROT_1 0x4A03C4 + +#define mmDMA_IF_E_S_DMA0_MAX_WPROT_2 0x4A03C8 + +#define mmDMA_IF_E_S_DMA0_MAX_WPROT_3 0x4A03CC + +#define mmDMA_IF_E_S_DMA0_MAX_WPROT_4 0x4A03D0 + +#define mmDMA_IF_E_S_DMA0_MAX_WPROT_5 0x4A03D4 + +#define mmDMA_IF_E_S_DMA0_MAX_WPROT_6 0x4A03D8 + +#define mmDMA_IF_E_S_DMA0_MAX_WPROT_7 0x4A03DC + +#define mmDMA_IF_E_S_DMA0_MAX_WPROT_8 0x4A03E0 + +#define mmDMA_IF_E_S_DMA0_MAX_WPROT_9 0x4A03E4 + +#define mmDMA_IF_E_S_DMA0_MAX_WPROT_10 0x4A03E8 + +#define mmDMA_IF_E_S_DMA0_MAX_WPROT_11 0x4A03EC + +#define mmDMA_IF_E_S_DMA0_MAX_WPROT_12 0x4A03F0 + +#define mmDMA_IF_E_S_DMA0_MAX_WPROT_13 0x4A03F4 + +#define mmDMA_IF_E_S_DMA0_MAX_WPROT_14 0x4A03F8 + +#define mmDMA_IF_E_S_DMA0_MAX_WPROT_15 0x4A03FC + +#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_0 0x4A0400 + +#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_1 0x4A0404 + +#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_2 0x4A0408 + +#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_3 0x4A040C + +#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_4 0x4A0410 + +#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_5 0x4A0414 + +#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_6 0x4A0418 + +#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_7 0x4A041C + +#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_8 0x4A0420 + +#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_9 0x4A0424 + +#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_10 0x4A0428 + +#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_11 0x4A042C + +#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_12 0x4A0430 + +#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_13 0x4A0434 + +#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_14 0x4A0438 + +#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_15 0x4A043C + +#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_0 0x4A0440 + +#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_1 0x4A0444 + +#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_2 0x4A0448 + +#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_3 0x4A044C + +#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_4 0x4A0450 + +#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_5 0x4A0454 + +#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_6 0x4A0458 + +#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_7 0x4A045C + +#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_8 0x4A0460 + +#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_9 0x4A0464 + +#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_10 0x4A0468 + +#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_11 0x4A046C + +#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_12 0x4A0470 + +#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_13 0x4A0474 + +#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_14 0x4A0478 + +#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_15 0x4A047C + +#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_0 0x4A0480 + +#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_1 0x4A0484 + +#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_2 0x4A0488 + +#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_3 0x4A048C + +#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_4 0x4A0490 + +#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_5 0x4A0494 + +#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_6 0x4A0498 + +#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_7 0x4A049C + +#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_8 0x4A04A0 + +#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_9 0x4A04A4 + +#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_10 0x4A04A8 + +#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_11 0x4A04AC + +#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_12 0x4A04B0 + +#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_13 0x4A04B4 + +#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_14 0x4A04B8 + +#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_15 0x4A04BC + +#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_0 0x4A04C0 + +#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_1 0x4A04C4 + +#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_2 0x4A04C8 + +#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_3 0x4A04CC + +#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_4 0x4A04D0 + +#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_5 0x4A04D4 + +#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_6 0x4A04D8 + +#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_7 0x4A04DC + +#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_8 0x4A04E0 + +#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_9 0x4A04E4 + +#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_10 0x4A04E8 + +#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_11 0x4A04EC + +#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_12 0x4A04F0 + +#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_13 0x4A04F4 + +#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_14 0x4A04F8 + +#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_15 0x4A04FC + +#define mmDMA_IF_E_S_DMA1_MIN_RPROT_0 0x4A0500 + +#define mmDMA_IF_E_S_DMA1_MIN_RPROT_1 0x4A0504 + +#define mmDMA_IF_E_S_DMA1_MIN_RPROT_2 0x4A0508 + +#define mmDMA_IF_E_S_DMA1_MIN_RPROT_3 0x4A050C + +#define mmDMA_IF_E_S_DMA1_MIN_RPROT_4 0x4A0510 + +#define mmDMA_IF_E_S_DMA1_MIN_RPROT_5 0x4A0514 + +#define mmDMA_IF_E_S_DMA1_MIN_RPROT_6 0x4A0518 + +#define mmDMA_IF_E_S_DMA1_MIN_RPROT_7 0x4A051C + +#define mmDMA_IF_E_S_DMA1_MIN_RPROT_8 0x4A0520 + +#define mmDMA_IF_E_S_DMA1_MIN_RPROT_9 0x4A0524 + +#define mmDMA_IF_E_S_DMA1_MIN_RPROT_10 0x4A0528 + +#define mmDMA_IF_E_S_DMA1_MIN_RPROT_11 0x4A052C + +#define mmDMA_IF_E_S_DMA1_MIN_RPROT_12 0x4A0530 + +#define mmDMA_IF_E_S_DMA1_MIN_RPROT_13 0x4A0534 + +#define mmDMA_IF_E_S_DMA1_MIN_RPROT_14 0x4A0538 + +#define mmDMA_IF_E_S_DMA1_MIN_RPROT_15 0x4A053C + +#define mmDMA_IF_E_S_DMA1_MAX_RPROT_0 0x4A0540 + +#define mmDMA_IF_E_S_DMA1_MAX_RPROT_1 0x4A0544 + +#define mmDMA_IF_E_S_DMA1_MAX_RPROT_2 0x4A0548 + +#define mmDMA_IF_E_S_DMA1_MAX_RPROT_3 0x4A054C + +#define mmDMA_IF_E_S_DMA1_MAX_RPROT_4 0x4A0550 + +#define mmDMA_IF_E_S_DMA1_MAX_RPROT_5 0x4A0554 + +#define mmDMA_IF_E_S_DMA1_MAX_RPROT_6 0x4A0558 + +#define mmDMA_IF_E_S_DMA1_MAX_RPROT_7 0x4A055C + +#define mmDMA_IF_E_S_DMA1_MAX_RPROT_8 0x4A0560 + +#define mmDMA_IF_E_S_DMA1_MAX_RPROT_9 0x4A0564 + +#define mmDMA_IF_E_S_DMA1_MAX_RPROT_10 0x4A0568 + +#define mmDMA_IF_E_S_DMA1_MAX_RPROT_11 0x4A056C + +#define mmDMA_IF_E_S_DMA1_MAX_RPROT_12 0x4A0570 + +#define mmDMA_IF_E_S_DMA1_MAX_RPROT_13 0x4A0574 + +#define mmDMA_IF_E_S_DMA1_MAX_RPROT_14 0x4A0578 + +#define mmDMA_IF_E_S_DMA1_MAX_RPROT_15 0x4A057C + +#define mmDMA_IF_E_S_DMA1_MIN_WPROT_0 0x4A0580 + +#define mmDMA_IF_E_S_DMA1_MIN_WPROT_1 0x4A0584 + +#define mmDMA_IF_E_S_DMA1_MIN_WPROT_2 0x4A0588 + +#define mmDMA_IF_E_S_DMA1_MIN_WPROT_3 0x4A058C + +#define mmDMA_IF_E_S_DMA1_MIN_WPROT_4 0x4A0590 + +#define mmDMA_IF_E_S_DMA1_MIN_WPROT_5 0x4A0594 + +#define mmDMA_IF_E_S_DMA1_MIN_WPROT_6 0x4A0598 + +#define mmDMA_IF_E_S_DMA1_MIN_WPROT_7 0x4A059C + +#define mmDMA_IF_E_S_DMA1_MIN_WPROT_8 0x4A05A0 + +#define mmDMA_IF_E_S_DMA1_MIN_WPROT_9 0x4A05A4 + +#define mmDMA_IF_E_S_DMA1_MIN_WPROT_10 0x4A05A8 + +#define mmDMA_IF_E_S_DMA1_MIN_WPROT_11 0x4A05AC + +#define mmDMA_IF_E_S_DMA1_MIN_WPROT_12 0x4A05B0 + +#define mmDMA_IF_E_S_DMA1_MIN_WPROT_13 0x4A05B4 + +#define mmDMA_IF_E_S_DMA1_MIN_WPROT_14 0x4A05B8 + +#define mmDMA_IF_E_S_DMA1_MIN_WPROT_15 0x4A05BC + +#define mmDMA_IF_E_S_DMA1_MAX_WPROT_0 0x4A05C0 + +#define mmDMA_IF_E_S_DMA1_MAX_WPROT_1 0x4A05C4 + +#define mmDMA_IF_E_S_DMA1_MAX_WPROT_2 0x4A05C8 + +#define mmDMA_IF_E_S_DMA1_MAX_WPROT_3 0x4A05CC + +#define mmDMA_IF_E_S_DMA1_MAX_WPROT_4 0x4A05D0 + +#define mmDMA_IF_E_S_DMA1_MAX_WPROT_5 0x4A05D4 + +#define mmDMA_IF_E_S_DMA1_MAX_WPROT_6 0x4A05D8 + +#define mmDMA_IF_E_S_DMA1_MAX_WPROT_7 0x4A05DC + +#define mmDMA_IF_E_S_DMA1_MAX_WPROT_8 0x4A05E0 + +#define mmDMA_IF_E_S_DMA1_MAX_WPROT_9 0x4A05E4 + +#define mmDMA_IF_E_S_DMA1_MAX_WPROT_10 0x4A05E8 + +#define mmDMA_IF_E_S_DMA1_MAX_WPROT_11 0x4A05EC + +#define mmDMA_IF_E_S_DMA1_MAX_WPROT_12 0x4A05F0 + +#define mmDMA_IF_E_S_DMA1_MAX_WPROT_13 0x4A05F4 + +#define mmDMA_IF_E_S_DMA1_MAX_WPROT_14 0x4A05F8 + +#define mmDMA_IF_E_S_DMA1_MAX_WPROT_15 0x4A05FC + +#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_0 0x4A0600 + +#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_1 0x4A0604 + +#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_2 0x4A0608 + +#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_3 0x4A060C + +#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_4 0x4A0610 + +#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_5 0x4A0614 + +#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_6 0x4A0618 + +#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_7 0x4A061C + +#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_8 0x4A0620 + +#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_9 0x4A0624 + +#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_10 0x4A0628 + +#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_11 0x4A062C + +#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_12 0x4A0630 + +#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_13 0x4A0634 + +#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_14 0x4A0638 + +#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_15 0x4A063C + +#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_0 0x4A0640 + +#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_1 0x4A0644 + +#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_2 0x4A0648 + +#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_3 0x4A064C + +#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_4 0x4A0650 + +#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_5 0x4A0654 + +#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_6 0x4A0658 + +#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_7 0x4A065C + +#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_8 0x4A0660 + +#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_9 0x4A0664 + +#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_10 0x4A0668 + +#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_11 0x4A066C + +#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_12 0x4A0670 + +#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_13 0x4A0674 + +#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_14 0x4A0678 + +#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_15 0x4A067C + +#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_0 0x4A0680 + +#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_1 0x4A0684 + +#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_2 0x4A0688 + +#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_3 0x4A068C + +#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_4 0x4A0690 + +#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_5 0x4A0694 + +#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_6 0x4A0698 + +#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_7 0x4A069C + +#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_8 0x4A06A0 + +#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_9 0x4A06A4 + +#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_10 0x4A06A8 + +#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_11 0x4A06AC + +#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_12 0x4A06B0 + +#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_13 0x4A06B4 + +#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_14 0x4A06B8 + +#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_15 0x4A06BC + +#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_0 0x4A06C0 + +#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_1 0x4A06C4 + +#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_2 0x4A06C8 + +#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_3 0x4A06CC + +#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_4 0x4A06D0 + +#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_5 0x4A06D4 + +#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_6 0x4A06D8 + +#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_7 0x4A06DC + +#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_8 0x4A06E0 + +#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_9 0x4A06E4 + +#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_10 0x4A06E8 + +#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_11 0x4A06EC + +#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_12 0x4A06F0 + +#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_13 0x4A06F4 + +#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_14 0x4A06F8 + +#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_15 0x4A06FC + +#define mmDMA_IF_E_S_SOB_HIT_RPROT 0x4A0700 + +#define mmDMA_IF_E_S_SOB_HIT_WPROT 0x4A0704 + +#define mmDMA_IF_E_S_SOB_HIT_RPRIV 0x4A070C + +#define mmDMA_IF_E_S_SOB_HIT_WPRIV 0x4A0710 + +#define mmDMA_IF_E_S_DMA0_HIT_RPROT 0x4A071C + +#define mmDMA_IF_E_S_DMA0_HIT_WPROT 0x4A0720 + +#define mmDMA_IF_E_S_DMA0_HIT_RPRIV 0x4A0724 + +#define mmDMA_IF_E_S_DMA0_HIT_WPRIV 0x4A0728 + +#define mmDMA_IF_E_S_DMA1_HIT_RPROT 0x4A0730 + +#define mmDMA_IF_E_S_DMA1_HIT_WPROT 0x4A0734 + +#define mmDMA_IF_E_S_DMA1_HIT_RPRIV 0x4A0738 + +#define mmDMA_IF_E_S_DMA1_HIT_WPRIV 0x4A073C + +#define mmDMA_IF_E_S_HBM_BIN 0x4A0800 + +#define mmDMA_IF_E_S_MME_BIN 0x4A0804 + +#define mmDMA_IF_E_S_TPC_BIN 0x4A0808 + +#define mmDMA_IF_E_S_DMA_BIN 0x4A080C + +#define mmDMA_IF_E_S_SOB_CG_EN 0x4A0810 + +#define mmDMA_IF_E_S_HBM_I2C_ADDR_0 0x4A0820 + +#define mmDMA_IF_E_S_HBM_I2C_ADDR_1 0x4A0824 + +#define mmDMA_IF_E_S_HBM_I2C_ADDR_2 0x4A0828 + +#define mmDMA_IF_E_S_HBM_I2C_ADDR_3 0x4A082C + +#define mmDMA_IF_E_S_HBM_I2C_ADDR_4 0x4A0830 + +#define mmDMA_IF_E_S_HBM_MISC 0x4A0834 + +#endif /* ASIC_REG_DMA_IF_E_S_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch0_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch0_regs.h new file mode 100644 index 000000000..4ccaf8712 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch0_regs.h @@ -0,0 +1,896 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA_IF_W_N_DOWN_CH0_REGS_H_ +#define ASIC_REG_DMA_IF_W_N_DOWN_CH0_REGS_H_ + +/* + ***************************************** + * DMA_IF_W_N_DOWN_CH0 (Prototype: RTR_CTRL) + ***************************************** + */ + +#define mmDMA_IF_W_N_DOWN_CH0_PERM_SEL 0x4C1108 + +#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_0 0x4C1114 + +#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_1 0x4C1118 + +#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_2 0x4C111C + +#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_3 0x4C1120 + +#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_4 0x4C1124 + +#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_5 0x4C1128 + +#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_6 0x4C112C + +#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_7 0x4C1130 + +#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_8 0x4C1134 + +#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_9 0x4C1138 + +#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_10 0x4C113C + +#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_11 0x4C1140 + +#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_12 0x4C1144 + +#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_13 0x4C1148 + +#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_14 0x4C114C + +#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_15 0x4C1150 + +#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_16 0x4C1154 + +#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_17 0x4C1158 + +#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_18 0x4C115C + +#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_19 0x4C1160 + +#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_20 0x4C1164 + +#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_21 0x4C1168 + +#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_22 0x4C116C + +#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_23 0x4C1170 + +#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_24 0x4C1174 + +#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_25 0x4C1178 + +#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_26 0x4C117C + +#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_27 0x4C1180 + +#define mmDMA_IF_W_N_DOWN_CH0_SRAM_POLY_H3_0 0x4C1184 + +#define mmDMA_IF_W_N_DOWN_CH0_SRAM_POLY_H3_1 0x4C1188 + +#define mmDMA_IF_W_N_DOWN_CH0_SRAM_POLY_H3_2 0x4C118C + +#define mmDMA_IF_W_N_DOWN_CH0_SRAM_POLY_H3_3 0x4C1190 + +#define mmDMA_IF_W_N_DOWN_CH0_SRAM_POLY_H3_4 0x4C1194 + +#define mmDMA_IF_W_N_DOWN_CH0_SRAM_POLY_H3_5 0x4C1198 + +#define mmDMA_IF_W_N_DOWN_CH0_SRAM_POLY_H3_6 0x4C119C + +#define mmDMA_IF_W_N_DOWN_CH0_SRAM_POLY_H3_7 0x4C11A0 + +#define mmDMA_IF_W_N_DOWN_CH0_SRAM_POLY_H3_8 0x4C11A4 + +#define mmDMA_IF_W_N_DOWN_CH0_SRAM_POLY_H3_9 0x4C11A8 + +#define mmDMA_IF_W_N_DOWN_CH0_SRAM_POLY_H3_10 0x4C11AC + +#define mmDMA_IF_W_N_DOWN_CH0_SRAM_POLY_H3_11 0x4C11B0 + +#define mmDMA_IF_W_N_DOWN_CH0_SRAM_POLY_H3_12 0x4C11B4 + +#define mmDMA_IF_W_N_DOWN_CH0_SRAM_POLY_H3_13 0x4C11B8 + +#define mmDMA_IF_W_N_DOWN_CH0_SRAM_POLY_H3_14 0x4C11BC + +#define mmDMA_IF_W_N_DOWN_CH0_SCRAM_SRAM_EN 0x4C126C + +#define mmDMA_IF_W_N_DOWN_CH0_RL_HBM_EN 0x4C1274 + +#define mmDMA_IF_W_N_DOWN_CH0_RL_HBM_SAT 0x4C1278 + +#define mmDMA_IF_W_N_DOWN_CH0_RL_HBM_RST 0x4C127C + +#define mmDMA_IF_W_N_DOWN_CH0_RL_HBM_TIMEOUT 0x4C1280 + +#define mmDMA_IF_W_N_DOWN_CH0_SCRAM_HBM_EN 0x4C1284 + +#define mmDMA_IF_W_N_DOWN_CH0_RL_PCI_EN 0x4C1288 + +#define mmDMA_IF_W_N_DOWN_CH0_RL_PCI_SAT 0x4C128C + +#define mmDMA_IF_W_N_DOWN_CH0_RL_PCI_RST 0x4C1290 + +#define mmDMA_IF_W_N_DOWN_CH0_RL_PCI_TIMEOUT 0x4C1294 + +#define mmDMA_IF_W_N_DOWN_CH0_RL_SRAM_EN 0x4C129C + +#define mmDMA_IF_W_N_DOWN_CH0_RL_SRAM_SAT 0x4C12A0 + +#define mmDMA_IF_W_N_DOWN_CH0_RL_SRAM_RST 0x4C12A4 + +#define mmDMA_IF_W_N_DOWN_CH0_RL_SRAM_TIMEOUT 0x4C12AC + +#define mmDMA_IF_W_N_DOWN_CH0_RL_SRAM_RED 0x4C12B4 + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_HBM_EN 0x4C12EC + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_PCI_EN 0x4C12F0 + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_HBM_WR_SIZE 0x4C12F4 + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_PCI_WR_SIZE 0x4C12F8 + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_PCI_CTR_SET_EN 0x4C1404 + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_PCI_CTR_SET 0x4C1408 + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_PCI_CTR_WRAP 0x4C140C + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_PCI_CTR_CNT 0x4C1410 + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM_CTR_SET_EN 0x4C1414 + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM_CTR_SET 0x4C1418 + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_HBM_RD_SIZE 0x4C141C + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_PCI_RD_SIZE 0x4C1420 + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_PCI_CTR_SET_EN 0x4C1424 + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_PCI_CTR_SET 0x4C1428 + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_PCI_CTR_WRAP 0x4C142C + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_PCI_CTR_CNT 0x4C1430 + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM_CTR_SET_EN 0x4C1434 + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM_CTR_SET 0x4C1438 + +#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_SEL_0 0x4C1450 + +#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_SEL_1 0x4C1454 + +#define mmDMA_IF_W_N_DOWN_CH0_NON_LIN_EN 0x4C1480 + +#define mmDMA_IF_W_N_DOWN_CH0_NL_SRAM_BANK_0 0x4C1500 + +#define mmDMA_IF_W_N_DOWN_CH0_NL_SRAM_BANK_1 0x4C1504 + +#define mmDMA_IF_W_N_DOWN_CH0_NL_SRAM_BANK_2 0x4C1508 + +#define mmDMA_IF_W_N_DOWN_CH0_NL_SRAM_BANK_3 0x4C150C + +#define mmDMA_IF_W_N_DOWN_CH0_NL_SRAM_BANK_4 0x4C1510 + +#define mmDMA_IF_W_N_DOWN_CH0_NL_SRAM_OFFSET_0 0x4C1514 + +#define mmDMA_IF_W_N_DOWN_CH0_NL_SRAM_OFFSET_1 0x4C1520 + +#define mmDMA_IF_W_N_DOWN_CH0_NL_SRAM_OFFSET_2 0x4C1524 + +#define mmDMA_IF_W_N_DOWN_CH0_NL_SRAM_OFFSET_3 0x4C1528 + +#define mmDMA_IF_W_N_DOWN_CH0_NL_SRAM_OFFSET_4 0x4C152C + +#define mmDMA_IF_W_N_DOWN_CH0_NL_SRAM_OFFSET_5 0x4C1530 + +#define mmDMA_IF_W_N_DOWN_CH0_NL_SRAM_OFFSET_6 0x4C1534 + +#define mmDMA_IF_W_N_DOWN_CH0_NL_SRAM_OFFSET_7 0x4C1538 + +#define mmDMA_IF_W_N_DOWN_CH0_NL_SRAM_OFFSET_8 0x4C153C + +#define mmDMA_IF_W_N_DOWN_CH0_NL_SRAM_OFFSET_9 0x4C1540 + +#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_0 0x4C1550 + +#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_1 0x4C1554 + +#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_2 0x4C1558 + +#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_3 0x4C155C + +#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_4 0x4C1560 + +#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_5 0x4C1564 + +#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_6 0x4C1568 + +#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_7 0x4C156C + +#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_8 0x4C1570 + +#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_9 0x4C1574 + +#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_10 0x4C1578 + +#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_11 0x4C157C + +#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_12 0x4C1580 + +#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_13 0x4C1584 + +#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_14 0x4C1588 + +#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_15 0x4C158C + +#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_16 0x4C1590 + +#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_17 0x4C1594 + +#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_18 0x4C1598 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_0 0x4C15E4 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_1 0x4C15E8 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_2 0x4C15EC + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_3 0x4C15F0 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_4 0x4C15F4 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_5 0x4C15F8 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_6 0x4C15FC + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_7 0x4C1600 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_8 0x4C1604 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_9 0x4C1608 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_10 0x4C160C + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_11 0x4C1610 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_12 0x4C1614 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_13 0x4C1618 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_14 0x4C161C + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_15 0x4C1620 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_0 0x4C1624 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_1 0x4C1628 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_2 0x4C162C + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_3 0x4C1630 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_4 0x4C1634 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_5 0x4C1638 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_6 0x4C163C + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_7 0x4C1640 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_8 0x4C1644 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_9 0x4C1648 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_10 0x4C164C + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_11 0x4C1650 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_12 0x4C1654 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_13 0x4C1658 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_14 0x4C165C + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_15 0x4C1660 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_0 0x4C1664 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_1 0x4C1668 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_2 0x4C166C + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_3 0x4C1670 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_4 0x4C1674 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_5 0x4C1678 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_6 0x4C167C + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_7 0x4C1680 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_8 0x4C1684 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_9 0x4C1688 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_10 0x4C168C + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_11 0x4C1690 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_12 0x4C1694 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_13 0x4C1698 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_14 0x4C169C + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_15 0x4C16A0 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_0 0x4C16A4 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_1 0x4C16A8 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_2 0x4C16AC + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_3 0x4C16B0 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_4 0x4C16B4 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_5 0x4C16B8 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_6 0x4C16BC + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_7 0x4C16C0 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_8 0x4C16C4 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_9 0x4C16C8 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_10 0x4C16CC + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_11 0x4C16D0 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_12 0x4C16D4 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_13 0x4C16D8 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_14 0x4C16DC + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_15 0x4C16E0 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_0 0x4C16E4 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_1 0x4C16E8 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_2 0x4C16EC + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_3 0x4C16F0 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_4 0x4C16F4 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_5 0x4C16F8 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_6 0x4C16FC + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_7 0x4C1700 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_8 0x4C1704 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_9 0x4C1708 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_10 0x4C170C + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_11 0x4C1710 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_12 0x4C1714 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_13 0x4C1718 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_14 0x4C171C + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_15 0x4C1720 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_0 0x4C1724 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_1 0x4C1728 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_2 0x4C172C + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_3 0x4C1730 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_4 0x4C1734 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_5 0x4C1738 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_6 0x4C173C + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_7 0x4C1740 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_8 0x4C1744 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_9 0x4C1748 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_10 0x4C174C + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_11 0x4C1750 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_12 0x4C1754 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_13 0x4C1758 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_14 0x4C175C + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_15 0x4C1760 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_0 0x4C1764 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_1 0x4C1768 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_2 0x4C176C + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_3 0x4C1770 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_4 0x4C1774 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_5 0x4C1778 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_6 0x4C177C + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_7 0x4C1780 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_8 0x4C1784 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_9 0x4C1788 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_10 0x4C178C + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_11 0x4C1790 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_12 0x4C1794 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_13 0x4C1798 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_14 0x4C179C + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_15 0x4C17A0 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_0 0x4C17A4 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_1 0x4C17A8 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_2 0x4C17AC + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_3 0x4C17B0 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_4 0x4C17B4 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_5 0x4C17B8 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_6 0x4C17BC + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_7 0x4C17C0 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_8 0x4C17C4 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_9 0x4C17C8 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_10 0x4C17CC + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_11 0x4C17D0 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_12 0x4C17D4 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_13 0x4C17D8 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_14 0x4C17DC + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_15 0x4C17E0 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_0 0x4C1824 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_1 0x4C1828 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_2 0x4C182C + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_3 0x4C1830 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_4 0x4C1834 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_5 0x4C1838 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_6 0x4C183C + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_7 0x4C1840 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_8 0x4C1844 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_9 0x4C1848 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_10 0x4C184C + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_11 0x4C1850 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_12 0x4C1854 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_13 0x4C1858 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_14 0x4C185C + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_15 0x4C1860 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_0 0x4C1864 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_1 0x4C1868 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_2 0x4C186C + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_3 0x4C1870 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_4 0x4C1874 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_5 0x4C1878 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_6 0x4C187C + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_7 0x4C1880 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_8 0x4C1884 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_9 0x4C1888 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_10 0x4C188C + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_11 0x4C1890 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_12 0x4C1894 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_13 0x4C1898 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_14 0x4C189C + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_15 0x4C18A0 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_0 0x4C18A4 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_1 0x4C18A8 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_2 0x4C18AC + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_3 0x4C18B0 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_4 0x4C18B4 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_5 0x4C18B8 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_6 0x4C18BC + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_7 0x4C18C0 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_8 0x4C18C4 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_9 0x4C18C8 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_10 0x4C18CC + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_11 0x4C18D0 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_12 0x4C18D4 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_13 0x4C18D8 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_14 0x4C18DC + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_15 0x4C18E0 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_0 0x4C18E4 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_1 0x4C18E8 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_2 0x4C18EC + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_3 0x4C18F0 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_4 0x4C18F4 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_5 0x4C18F8 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_6 0x4C18FC + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_7 0x4C1900 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_8 0x4C1904 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_9 0x4C1908 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_10 0x4C190C + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_11 0x4C1910 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_12 0x4C1914 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_13 0x4C1918 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_14 0x4C191C + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_15 0x4C1920 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_0 0x4C1924 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_1 0x4C1928 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_2 0x4C192C + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_3 0x4C1930 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_4 0x4C1934 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_5 0x4C1938 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_6 0x4C193C + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_7 0x4C1940 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_8 0x4C1944 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_9 0x4C1948 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_10 0x4C194C + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_11 0x4C1950 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_12 0x4C1954 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_13 0x4C1958 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_14 0x4C195C + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_15 0x4C1960 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_0 0x4C1964 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_1 0x4C1968 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_2 0x4C196C + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_3 0x4C1970 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_4 0x4C1974 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_5 0x4C1978 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_6 0x4C197C + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_7 0x4C1980 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_8 0x4C1984 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_9 0x4C1988 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_10 0x4C198C + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_11 0x4C1990 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_12 0x4C1994 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_13 0x4C1998 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_14 0x4C199C + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_15 0x4C19A0 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_0 0x4C19A4 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_1 0x4C19A8 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_2 0x4C19AC + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_3 0x4C19B0 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_4 0x4C19B4 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_5 0x4C19B8 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_6 0x4C19BC + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_7 0x4C19C0 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_8 0x4C19C4 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_9 0x4C19C8 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_10 0x4C19CC + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_11 0x4C19D0 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_12 0x4C19D4 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_13 0x4C19D8 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_14 0x4C19DC + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_15 0x4C19E0 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_0 0x4C19E4 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_1 0x4C19E8 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_2 0x4C19EC + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_3 0x4C19F0 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_4 0x4C19F4 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_5 0x4C19F8 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_6 0x4C19FC + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_7 0x4C1A00 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_8 0x4C1A04 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_9 0x4C1A08 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_10 0x4C1A0C + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_11 0x4C1A10 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_12 0x4C1A14 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_13 0x4C1A18 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_14 0x4C1A1C + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_15 0x4C1A20 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_HIT_AW 0x4C1A64 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_HIT_AR 0x4C1A68 + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_HIT_AW 0x4C1A6C + +#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_HIT_AR 0x4C1A70 + +#define mmDMA_IF_W_N_DOWN_CH0_RGL_CFG 0x4C1B64 + +#define mmDMA_IF_W_N_DOWN_CH0_RGL_SHIFT 0x4C1B68 + +#define mmDMA_IF_W_N_DOWN_CH0_RGL_EXPECTED_LAT_0 0x4C1B6C + +#define mmDMA_IF_W_N_DOWN_CH0_RGL_EXPECTED_LAT_1 0x4C1B70 + +#define mmDMA_IF_W_N_DOWN_CH0_RGL_EXPECTED_LAT_2 0x4C1B74 + +#define mmDMA_IF_W_N_DOWN_CH0_RGL_EXPECTED_LAT_3 0x4C1B78 + +#define mmDMA_IF_W_N_DOWN_CH0_RGL_EXPECTED_LAT_4 0x4C1B7C + +#define mmDMA_IF_W_N_DOWN_CH0_RGL_EXPECTED_LAT_5 0x4C1B80 + +#define mmDMA_IF_W_N_DOWN_CH0_RGL_EXPECTED_LAT_6 0x4C1B84 + +#define mmDMA_IF_W_N_DOWN_CH0_RGL_EXPECTED_LAT_7 0x4C1B88 + +#define mmDMA_IF_W_N_DOWN_CH0_RGL_TOKEN_0 0x4C1BAC + +#define mmDMA_IF_W_N_DOWN_CH0_RGL_TOKEN_1 0x4C1BB0 + +#define mmDMA_IF_W_N_DOWN_CH0_RGL_TOKEN_2 0x4C1BB4 + +#define mmDMA_IF_W_N_DOWN_CH0_RGL_TOKEN_3 0x4C1BB8 + +#define mmDMA_IF_W_N_DOWN_CH0_RGL_TOKEN_4 0x4C1BBC + +#define mmDMA_IF_W_N_DOWN_CH0_RGL_TOKEN_5 0x4C1BC0 + +#define mmDMA_IF_W_N_DOWN_CH0_RGL_TOKEN_6 0x4C1BC4 + +#define mmDMA_IF_W_N_DOWN_CH0_RGL_TOKEN_7 0x4C1BC8 + +#define mmDMA_IF_W_N_DOWN_CH0_RGL_BANK_ID_0 0x4C1BEC + +#define mmDMA_IF_W_N_DOWN_CH0_RGL_BANK_ID_1 0x4C1BF0 + +#define mmDMA_IF_W_N_DOWN_CH0_RGL_BANK_ID_2 0x4C1BF4 + +#define mmDMA_IF_W_N_DOWN_CH0_RGL_BANK_ID_3 0x4C1BF8 + +#define mmDMA_IF_W_N_DOWN_CH0_RGL_BANK_ID_4 0x4C1BFC + +#define mmDMA_IF_W_N_DOWN_CH0_RGL_BANK_ID_5 0x4C1C00 + +#define mmDMA_IF_W_N_DOWN_CH0_RGL_BANK_ID_6 0x4C1C04 + +#define mmDMA_IF_W_N_DOWN_CH0_RGL_BANK_ID_7 0x4C1C08 + +#define mmDMA_IF_W_N_DOWN_CH0_RGL_WDT 0x4C1C2C + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM0_CH0_CTR_WRAP 0x4C1C30 + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM0_CH1_CTR_WRAP 0x4C1C34 + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM1_CH0_CTR_WRAP 0x4C1C38 + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM1_CH1_CTR_WRAP 0x4C1C3C + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM2_CH0_CTR_WRAP 0x4C1C40 + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM2_CH1_CTR_WRAP 0x4C1C44 + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM3_CH0_CTR_WRAP 0x4C1C48 + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM3_CH1_CTR_WRAP 0x4C1C4C + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM0_CH0_CTR_CNT 0x4C1C50 + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM0_CH1_CTR_CNT 0x4C1C54 + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM1_CH0_CTR_CNT 0x4C1C58 + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM1_CH1_CTR_CNT 0x4C1C5C + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM2_CH0_CTR_CNT 0x4C1C60 + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM2_CH1_CTR_CNT 0x4C1C64 + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM3_CH0_CTR_CNT 0x4C1C68 + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM3_CH1_CTR_CNT 0x4C1C6C + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM0_CH0_CTR_WRAP 0x4C1C70 + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM0_CH1_CTR_WRAP 0x4C1C74 + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM1_CH0_CTR_WRAP 0x4C1C78 + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM1_CH1_CTR_WRAP 0x4C1C7C + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM2_CH0_CTR_WRAP 0x4C1C80 + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM2_CH1_CTR_WRAP 0x4C1C84 + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM3_CH0_CTR_WRAP 0x4C1C88 + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM3_CH1_CTR_WRAP 0x4C1C8C + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM0_CH0_CTR_CNT 0x4C1C90 + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM0_CH1_CTR_CNT 0x4C1C94 + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM1_CH0_CTR_CNT 0x4C1C98 + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM1_CH1_CTR_CNT 0x4C1C9C + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM2_CH0_CTR_CNT 0x4C1CA0 + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM2_CH1_CTR_CNT 0x4C1CA4 + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM3_CH0_CTR_CNT 0x4C1CA8 + +#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM3_CH1_CTR_CNT 0x4C1CAC + +#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_PC_SEL_0 0x4C1CB0 + +#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_PC_SEL_1 0x4C1CB4 + +#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_PC_SEL_2 0x4C1CB8 + +#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_PC_SEL_3 0x4C1CBC + +#endif /* ASIC_REG_DMA_IF_W_N_DOWN_CH0_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch1_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch1_regs.h new file mode 100644 index 000000000..9236f4183 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch1_regs.h @@ -0,0 +1,896 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA_IF_W_N_DOWN_CH1_REGS_H_ +#define ASIC_REG_DMA_IF_W_N_DOWN_CH1_REGS_H_ + +/* + ***************************************** + * DMA_IF_W_N_DOWN_CH1 (Prototype: RTR_CTRL) + ***************************************** + */ + +#define mmDMA_IF_W_N_DOWN_CH1_PERM_SEL 0x4C2108 + +#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_0 0x4C2114 + +#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_1 0x4C2118 + +#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_2 0x4C211C + +#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_3 0x4C2120 + +#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_4 0x4C2124 + +#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_5 0x4C2128 + +#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_6 0x4C212C + +#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_7 0x4C2130 + +#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_8 0x4C2134 + +#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_9 0x4C2138 + +#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_10 0x4C213C + +#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_11 0x4C2140 + +#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_12 0x4C2144 + +#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_13 0x4C2148 + +#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_14 0x4C214C + +#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_15 0x4C2150 + +#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_16 0x4C2154 + +#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_17 0x4C2158 + +#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_18 0x4C215C + +#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_19 0x4C2160 + +#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_20 0x4C2164 + +#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_21 0x4C2168 + +#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_22 0x4C216C + +#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_23 0x4C2170 + +#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_24 0x4C2174 + +#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_25 0x4C2178 + +#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_26 0x4C217C + +#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_27 0x4C2180 + +#define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_0 0x4C2184 + +#define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_1 0x4C2188 + +#define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_2 0x4C218C + +#define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_3 0x4C2190 + +#define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_4 0x4C2194 + +#define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_5 0x4C2198 + +#define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_6 0x4C219C + +#define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_7 0x4C21A0 + +#define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_8 0x4C21A4 + +#define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_9 0x4C21A8 + +#define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_10 0x4C21AC + +#define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_11 0x4C21B0 + +#define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_12 0x4C21B4 + +#define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_13 0x4C21B8 + +#define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_14 0x4C21BC + +#define mmDMA_IF_W_N_DOWN_CH1_SCRAM_SRAM_EN 0x4C226C + +#define mmDMA_IF_W_N_DOWN_CH1_RL_HBM_EN 0x4C2274 + +#define mmDMA_IF_W_N_DOWN_CH1_RL_HBM_SAT 0x4C2278 + +#define mmDMA_IF_W_N_DOWN_CH1_RL_HBM_RST 0x4C227C + +#define mmDMA_IF_W_N_DOWN_CH1_RL_HBM_TIMEOUT 0x4C2280 + +#define mmDMA_IF_W_N_DOWN_CH1_SCRAM_HBM_EN 0x4C2284 + +#define mmDMA_IF_W_N_DOWN_CH1_RL_PCI_EN 0x4C2288 + +#define mmDMA_IF_W_N_DOWN_CH1_RL_PCI_SAT 0x4C228C + +#define mmDMA_IF_W_N_DOWN_CH1_RL_PCI_RST 0x4C2290 + +#define mmDMA_IF_W_N_DOWN_CH1_RL_PCI_TIMEOUT 0x4C2294 + +#define mmDMA_IF_W_N_DOWN_CH1_RL_SRAM_EN 0x4C229C + +#define mmDMA_IF_W_N_DOWN_CH1_RL_SRAM_SAT 0x4C22A0 + +#define mmDMA_IF_W_N_DOWN_CH1_RL_SRAM_RST 0x4C22A4 + +#define mmDMA_IF_W_N_DOWN_CH1_RL_SRAM_TIMEOUT 0x4C22AC + +#define mmDMA_IF_W_N_DOWN_CH1_RL_SRAM_RED 0x4C22B4 + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_HBM_EN 0x4C22EC + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_PCI_EN 0x4C22F0 + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_HBM_WR_SIZE 0x4C22F4 + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_PCI_WR_SIZE 0x4C22F8 + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_PCI_CTR_SET_EN 0x4C2404 + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_PCI_CTR_SET 0x4C2408 + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_PCI_CTR_WRAP 0x4C240C + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_PCI_CTR_CNT 0x4C2410 + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM_CTR_SET_EN 0x4C2414 + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM_CTR_SET 0x4C2418 + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_HBM_RD_SIZE 0x4C241C + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_PCI_RD_SIZE 0x4C2420 + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_PCI_CTR_SET_EN 0x4C2424 + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_PCI_CTR_SET 0x4C2428 + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_PCI_CTR_WRAP 0x4C242C + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_PCI_CTR_CNT 0x4C2430 + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM_CTR_SET_EN 0x4C2434 + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM_CTR_SET 0x4C2438 + +#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_SEL_0 0x4C2450 + +#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_SEL_1 0x4C2454 + +#define mmDMA_IF_W_N_DOWN_CH1_NON_LIN_EN 0x4C2480 + +#define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_BANK_0 0x4C2500 + +#define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_BANK_1 0x4C2504 + +#define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_BANK_2 0x4C2508 + +#define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_BANK_3 0x4C250C + +#define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_BANK_4 0x4C2510 + +#define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_OFFSET_0 0x4C2514 + +#define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_OFFSET_1 0x4C2520 + +#define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_OFFSET_2 0x4C2524 + +#define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_OFFSET_3 0x4C2528 + +#define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_OFFSET_4 0x4C252C + +#define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_OFFSET_5 0x4C2530 + +#define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_OFFSET_6 0x4C2534 + +#define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_OFFSET_7 0x4C2538 + +#define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_OFFSET_8 0x4C253C + +#define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_OFFSET_9 0x4C2540 + +#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_0 0x4C2550 + +#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_1 0x4C2554 + +#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_2 0x4C2558 + +#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_3 0x4C255C + +#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_4 0x4C2560 + +#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_5 0x4C2564 + +#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_6 0x4C2568 + +#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_7 0x4C256C + +#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_8 0x4C2570 + +#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_9 0x4C2574 + +#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_10 0x4C2578 + +#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_11 0x4C257C + +#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_12 0x4C2580 + +#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_13 0x4C2584 + +#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_14 0x4C2588 + +#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_15 0x4C258C + +#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_16 0x4C2590 + +#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_17 0x4C2594 + +#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_18 0x4C2598 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_0 0x4C25E4 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_1 0x4C25E8 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_2 0x4C25EC + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_3 0x4C25F0 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_4 0x4C25F4 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_5 0x4C25F8 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_6 0x4C25FC + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_7 0x4C2600 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_8 0x4C2604 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_9 0x4C2608 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_10 0x4C260C + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_11 0x4C2610 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_12 0x4C2614 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_13 0x4C2618 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_14 0x4C261C + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_15 0x4C2620 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_0 0x4C2624 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_1 0x4C2628 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_2 0x4C262C + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_3 0x4C2630 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_4 0x4C2634 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_5 0x4C2638 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_6 0x4C263C + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_7 0x4C2640 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_8 0x4C2644 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_9 0x4C2648 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_10 0x4C264C + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_11 0x4C2650 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_12 0x4C2654 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_13 0x4C2658 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_14 0x4C265C + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_15 0x4C2660 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_0 0x4C2664 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_1 0x4C2668 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_2 0x4C266C + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_3 0x4C2670 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_4 0x4C2674 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_5 0x4C2678 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_6 0x4C267C + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_7 0x4C2680 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_8 0x4C2684 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_9 0x4C2688 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_10 0x4C268C + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_11 0x4C2690 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_12 0x4C2694 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_13 0x4C2698 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_14 0x4C269C + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_15 0x4C26A0 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_0 0x4C26A4 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_1 0x4C26A8 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_2 0x4C26AC + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_3 0x4C26B0 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_4 0x4C26B4 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_5 0x4C26B8 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_6 0x4C26BC + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_7 0x4C26C0 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_8 0x4C26C4 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_9 0x4C26C8 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_10 0x4C26CC + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_11 0x4C26D0 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_12 0x4C26D4 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_13 0x4C26D8 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_14 0x4C26DC + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_15 0x4C26E0 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_0 0x4C26E4 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_1 0x4C26E8 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_2 0x4C26EC + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_3 0x4C26F0 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_4 0x4C26F4 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_5 0x4C26F8 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_6 0x4C26FC + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_7 0x4C2700 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_8 0x4C2704 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_9 0x4C2708 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_10 0x4C270C + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_11 0x4C2710 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_12 0x4C2714 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_13 0x4C2718 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_14 0x4C271C + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_15 0x4C2720 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_0 0x4C2724 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_1 0x4C2728 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_2 0x4C272C + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_3 0x4C2730 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_4 0x4C2734 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_5 0x4C2738 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_6 0x4C273C + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_7 0x4C2740 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_8 0x4C2744 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_9 0x4C2748 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_10 0x4C274C + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_11 0x4C2750 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_12 0x4C2754 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_13 0x4C2758 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_14 0x4C275C + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_15 0x4C2760 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_0 0x4C2764 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_1 0x4C2768 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_2 0x4C276C + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_3 0x4C2770 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_4 0x4C2774 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_5 0x4C2778 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_6 0x4C277C + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_7 0x4C2780 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_8 0x4C2784 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_9 0x4C2788 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_10 0x4C278C + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_11 0x4C2790 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_12 0x4C2794 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_13 0x4C2798 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_14 0x4C279C + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_15 0x4C27A0 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_0 0x4C27A4 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_1 0x4C27A8 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_2 0x4C27AC + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_3 0x4C27B0 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_4 0x4C27B4 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_5 0x4C27B8 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_6 0x4C27BC + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_7 0x4C27C0 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_8 0x4C27C4 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_9 0x4C27C8 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_10 0x4C27CC + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_11 0x4C27D0 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_12 0x4C27D4 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_13 0x4C27D8 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_14 0x4C27DC + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_15 0x4C27E0 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_0 0x4C2824 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_1 0x4C2828 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_2 0x4C282C + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_3 0x4C2830 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_4 0x4C2834 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_5 0x4C2838 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_6 0x4C283C + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_7 0x4C2840 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_8 0x4C2844 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_9 0x4C2848 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_10 0x4C284C + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_11 0x4C2850 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_12 0x4C2854 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_13 0x4C2858 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_14 0x4C285C + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_15 0x4C2860 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_0 0x4C2864 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_1 0x4C2868 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_2 0x4C286C + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_3 0x4C2870 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_4 0x4C2874 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_5 0x4C2878 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_6 0x4C287C + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_7 0x4C2880 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_8 0x4C2884 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_9 0x4C2888 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_10 0x4C288C + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_11 0x4C2890 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_12 0x4C2894 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_13 0x4C2898 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_14 0x4C289C + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_15 0x4C28A0 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_0 0x4C28A4 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_1 0x4C28A8 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_2 0x4C28AC + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_3 0x4C28B0 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_4 0x4C28B4 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_5 0x4C28B8 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_6 0x4C28BC + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_7 0x4C28C0 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_8 0x4C28C4 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_9 0x4C28C8 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_10 0x4C28CC + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_11 0x4C28D0 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_12 0x4C28D4 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_13 0x4C28D8 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_14 0x4C28DC + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_15 0x4C28E0 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_0 0x4C28E4 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_1 0x4C28E8 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_2 0x4C28EC + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_3 0x4C28F0 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_4 0x4C28F4 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_5 0x4C28F8 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_6 0x4C28FC + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_7 0x4C2900 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_8 0x4C2904 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_9 0x4C2908 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_10 0x4C290C + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_11 0x4C2910 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_12 0x4C2914 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_13 0x4C2918 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_14 0x4C291C + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_15 0x4C2920 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_0 0x4C2924 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_1 0x4C2928 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_2 0x4C292C + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_3 0x4C2930 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_4 0x4C2934 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_5 0x4C2938 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_6 0x4C293C + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_7 0x4C2940 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_8 0x4C2944 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_9 0x4C2948 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_10 0x4C294C + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_11 0x4C2950 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_12 0x4C2954 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_13 0x4C2958 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_14 0x4C295C + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_15 0x4C2960 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_0 0x4C2964 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_1 0x4C2968 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_2 0x4C296C + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_3 0x4C2970 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_4 0x4C2974 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_5 0x4C2978 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_6 0x4C297C + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_7 0x4C2980 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_8 0x4C2984 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_9 0x4C2988 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_10 0x4C298C + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_11 0x4C2990 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_12 0x4C2994 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_13 0x4C2998 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_14 0x4C299C + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_15 0x4C29A0 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_0 0x4C29A4 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_1 0x4C29A8 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_2 0x4C29AC + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_3 0x4C29B0 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_4 0x4C29B4 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_5 0x4C29B8 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_6 0x4C29BC + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_7 0x4C29C0 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_8 0x4C29C4 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_9 0x4C29C8 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_10 0x4C29CC + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_11 0x4C29D0 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_12 0x4C29D4 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_13 0x4C29D8 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_14 0x4C29DC + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_15 0x4C29E0 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_0 0x4C29E4 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_1 0x4C29E8 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_2 0x4C29EC + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_3 0x4C29F0 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_4 0x4C29F4 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_5 0x4C29F8 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_6 0x4C29FC + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_7 0x4C2A00 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_8 0x4C2A04 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_9 0x4C2A08 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_10 0x4C2A0C + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_11 0x4C2A10 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_12 0x4C2A14 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_13 0x4C2A18 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_14 0x4C2A1C + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_15 0x4C2A20 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_HIT_AW 0x4C2A64 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_HIT_AR 0x4C2A68 + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_HIT_AW 0x4C2A6C + +#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_HIT_AR 0x4C2A70 + +#define mmDMA_IF_W_N_DOWN_CH1_RGL_CFG 0x4C2B64 + +#define mmDMA_IF_W_N_DOWN_CH1_RGL_SHIFT 0x4C2B68 + +#define mmDMA_IF_W_N_DOWN_CH1_RGL_EXPECTED_LAT_0 0x4C2B6C + +#define mmDMA_IF_W_N_DOWN_CH1_RGL_EXPECTED_LAT_1 0x4C2B70 + +#define mmDMA_IF_W_N_DOWN_CH1_RGL_EXPECTED_LAT_2 0x4C2B74 + +#define mmDMA_IF_W_N_DOWN_CH1_RGL_EXPECTED_LAT_3 0x4C2B78 + +#define mmDMA_IF_W_N_DOWN_CH1_RGL_EXPECTED_LAT_4 0x4C2B7C + +#define mmDMA_IF_W_N_DOWN_CH1_RGL_EXPECTED_LAT_5 0x4C2B80 + +#define mmDMA_IF_W_N_DOWN_CH1_RGL_EXPECTED_LAT_6 0x4C2B84 + +#define mmDMA_IF_W_N_DOWN_CH1_RGL_EXPECTED_LAT_7 0x4C2B88 + +#define mmDMA_IF_W_N_DOWN_CH1_RGL_TOKEN_0 0x4C2BAC + +#define mmDMA_IF_W_N_DOWN_CH1_RGL_TOKEN_1 0x4C2BB0 + +#define mmDMA_IF_W_N_DOWN_CH1_RGL_TOKEN_2 0x4C2BB4 + +#define mmDMA_IF_W_N_DOWN_CH1_RGL_TOKEN_3 0x4C2BB8 + +#define mmDMA_IF_W_N_DOWN_CH1_RGL_TOKEN_4 0x4C2BBC + +#define mmDMA_IF_W_N_DOWN_CH1_RGL_TOKEN_5 0x4C2BC0 + +#define mmDMA_IF_W_N_DOWN_CH1_RGL_TOKEN_6 0x4C2BC4 + +#define mmDMA_IF_W_N_DOWN_CH1_RGL_TOKEN_7 0x4C2BC8 + +#define mmDMA_IF_W_N_DOWN_CH1_RGL_BANK_ID_0 0x4C2BEC + +#define mmDMA_IF_W_N_DOWN_CH1_RGL_BANK_ID_1 0x4C2BF0 + +#define mmDMA_IF_W_N_DOWN_CH1_RGL_BANK_ID_2 0x4C2BF4 + +#define mmDMA_IF_W_N_DOWN_CH1_RGL_BANK_ID_3 0x4C2BF8 + +#define mmDMA_IF_W_N_DOWN_CH1_RGL_BANK_ID_4 0x4C2BFC + +#define mmDMA_IF_W_N_DOWN_CH1_RGL_BANK_ID_5 0x4C2C00 + +#define mmDMA_IF_W_N_DOWN_CH1_RGL_BANK_ID_6 0x4C2C04 + +#define mmDMA_IF_W_N_DOWN_CH1_RGL_BANK_ID_7 0x4C2C08 + +#define mmDMA_IF_W_N_DOWN_CH1_RGL_WDT 0x4C2C2C + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM0_CH0_CTR_WRAP 0x4C2C30 + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM0_CH1_CTR_WRAP 0x4C2C34 + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM1_CH0_CTR_WRAP 0x4C2C38 + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM1_CH1_CTR_WRAP 0x4C2C3C + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM2_CH0_CTR_WRAP 0x4C2C40 + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM2_CH1_CTR_WRAP 0x4C2C44 + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM3_CH0_CTR_WRAP 0x4C2C48 + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM3_CH1_CTR_WRAP 0x4C2C4C + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM0_CH0_CTR_CNT 0x4C2C50 + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM0_CH1_CTR_CNT 0x4C2C54 + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM1_CH0_CTR_CNT 0x4C2C58 + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM1_CH1_CTR_CNT 0x4C2C5C + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM2_CH0_CTR_CNT 0x4C2C60 + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM2_CH1_CTR_CNT 0x4C2C64 + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM3_CH0_CTR_CNT 0x4C2C68 + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM3_CH1_CTR_CNT 0x4C2C6C + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM0_CH0_CTR_WRAP 0x4C2C70 + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM0_CH1_CTR_WRAP 0x4C2C74 + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM1_CH0_CTR_WRAP 0x4C2C78 + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM1_CH1_CTR_WRAP 0x4C2C7C + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM2_CH0_CTR_WRAP 0x4C2C80 + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM2_CH1_CTR_WRAP 0x4C2C84 + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM3_CH0_CTR_WRAP 0x4C2C88 + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM3_CH1_CTR_WRAP 0x4C2C8C + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM0_CH0_CTR_CNT 0x4C2C90 + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM0_CH1_CTR_CNT 0x4C2C94 + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM1_CH0_CTR_CNT 0x4C2C98 + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM1_CH1_CTR_CNT 0x4C2C9C + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM2_CH0_CTR_CNT 0x4C2CA0 + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM2_CH1_CTR_CNT 0x4C2CA4 + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM3_CH0_CTR_CNT 0x4C2CA8 + +#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM3_CH1_CTR_CNT 0x4C2CAC + +#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_PC_SEL_0 0x4C2CB0 + +#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_PC_SEL_1 0x4C2CB4 + +#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_PC_SEL_2 0x4C2CB8 + +#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_PC_SEL_3 0x4C2CBC + +#endif /* ASIC_REG_DMA_IF_W_N_DOWN_CH1_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_regs.h new file mode 100644 index 000000000..da60893a5 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_regs.h @@ -0,0 +1,860 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA_IF_W_N_REGS_H_ +#define ASIC_REG_DMA_IF_W_N_REGS_H_ + +/* + ***************************************** + * DMA_IF_W_N (Prototype: DMA_IF) + ***************************************** + */ + +#define mmDMA_IF_W_N_HBM0_WR_CRED_CNT 0x4C0000 + +#define mmDMA_IF_W_N_HBM1_WR_CRED_CNT 0x4C0004 + +#define mmDMA_IF_W_N_HBM0_RD_CRED_CNT 0x4C0008 + +#define mmDMA_IF_W_N_HBM1_RD_CRED_CNT 0x4C000C + +#define mmDMA_IF_W_N_HBM_LIMITER_0 0x4C0030 + +#define mmDMA_IF_W_N_HBM_LIMITER_1 0x4C0034 + +#define mmDMA_IF_W_N_HBM_LIMITER_2 0x4C0038 + +#define mmDMA_IF_W_N_HBM_LIMITER_3 0x4C003C + +#define mmDMA_IF_W_N_HBM_ALMOST_EN_0 0x4C0040 + +#define mmDMA_IF_W_N_HBM_ALMOST_EN_1 0x4C0044 + +#define mmDMA_IF_W_N_HBM_CRED_EN_0 0x4C0050 + +#define mmDMA_IF_W_N_HBM_CRED_EN_1 0x4C0054 + +#define mmDMA_IF_W_N_SOB_MIN_RPROT_0 0x4C0100 + +#define mmDMA_IF_W_N_SOB_MIN_RPROT_1 0x4C0104 + +#define mmDMA_IF_W_N_SOB_MIN_RPROT_2 0x4C0108 + +#define mmDMA_IF_W_N_SOB_MIN_RPROT_3 0x4C010C + +#define mmDMA_IF_W_N_SOB_MIN_RPROT_4 0x4C0110 + +#define mmDMA_IF_W_N_SOB_MIN_RPROT_5 0x4C0114 + +#define mmDMA_IF_W_N_SOB_MIN_RPROT_6 0x4C0118 + +#define mmDMA_IF_W_N_SOB_MIN_RPROT_7 0x4C011C + +#define mmDMA_IF_W_N_SOB_MIN_RPROT_8 0x4C0120 + +#define mmDMA_IF_W_N_SOB_MIN_RPROT_9 0x4C0124 + +#define mmDMA_IF_W_N_SOB_MIN_RPROT_10 0x4C0128 + +#define mmDMA_IF_W_N_SOB_MIN_RPROT_11 0x4C012C + +#define mmDMA_IF_W_N_SOB_MIN_RPROT_12 0x4C0130 + +#define mmDMA_IF_W_N_SOB_MIN_RPROT_13 0x4C0134 + +#define mmDMA_IF_W_N_SOB_MIN_RPROT_14 0x4C0138 + +#define mmDMA_IF_W_N_SOB_MIN_RPROT_15 0x4C013C + +#define mmDMA_IF_W_N_SOB_MAX_RPROT_0 0x4C0140 + +#define mmDMA_IF_W_N_SOB_MAX_RPROT_1 0x4C0144 + +#define mmDMA_IF_W_N_SOB_MAX_RPROT_2 0x4C0148 + +#define mmDMA_IF_W_N_SOB_MAX_RPROT_3 0x4C014C + +#define mmDMA_IF_W_N_SOB_MAX_RPROT_4 0x4C0150 + +#define mmDMA_IF_W_N_SOB_MAX_RPROT_5 0x4C0154 + +#define mmDMA_IF_W_N_SOB_MAX_RPROT_6 0x4C0158 + +#define mmDMA_IF_W_N_SOB_MAX_RPROT_7 0x4C015C + +#define mmDMA_IF_W_N_SOB_MAX_RPROT_8 0x4C0160 + +#define mmDMA_IF_W_N_SOB_MAX_RPROT_9 0x4C0164 + +#define mmDMA_IF_W_N_SOB_MAX_RPROT_10 0x4C0168 + +#define mmDMA_IF_W_N_SOB_MAX_RPROT_11 0x4C016C + +#define mmDMA_IF_W_N_SOB_MAX_RPROT_12 0x4C0170 + +#define mmDMA_IF_W_N_SOB_MAX_RPROT_13 0x4C0174 + +#define mmDMA_IF_W_N_SOB_MAX_RPROT_14 0x4C0178 + +#define mmDMA_IF_W_N_SOB_MAX_RPROT_15 0x4C017C + +#define mmDMA_IF_W_N_SOB_MIN_WPROT_0 0x4C0180 + +#define mmDMA_IF_W_N_SOB_MIN_WPROT_1 0x4C0184 + +#define mmDMA_IF_W_N_SOB_MIN_WPROT_2 0x4C0188 + +#define mmDMA_IF_W_N_SOB_MIN_WPROT_3 0x4C018C + +#define mmDMA_IF_W_N_SOB_MIN_WPROT_4 0x4C0190 + +#define mmDMA_IF_W_N_SOB_MIN_WPROT_5 0x4C0194 + +#define mmDMA_IF_W_N_SOB_MIN_WPROT_6 0x4C0198 + +#define mmDMA_IF_W_N_SOB_MIN_WPROT_7 0x4C019C + +#define mmDMA_IF_W_N_SOB_MIN_WPROT_8 0x4C01A0 + +#define mmDMA_IF_W_N_SOB_MIN_WPROT_9 0x4C01A4 + +#define mmDMA_IF_W_N_SOB_MIN_WPROT_10 0x4C01A8 + +#define mmDMA_IF_W_N_SOB_MIN_WPROT_11 0x4C01AC + +#define mmDMA_IF_W_N_SOB_MIN_WPROT_12 0x4C01B0 + +#define mmDMA_IF_W_N_SOB_MIN_WPROT_13 0x4C01B4 + +#define mmDMA_IF_W_N_SOB_MIN_WPROT_14 0x4C01B8 + +#define mmDMA_IF_W_N_SOB_MIN_WPROT_15 0x4C01BC + +#define mmDMA_IF_W_N_SOB_MAX_WPROT_0 0x4C01C0 + +#define mmDMA_IF_W_N_SOB_MAX_WPROT_1 0x4C01C4 + +#define mmDMA_IF_W_N_SOB_MAX_WPROT_2 0x4C01C8 + +#define mmDMA_IF_W_N_SOB_MAX_WPROT_3 0x4C01CC + +#define mmDMA_IF_W_N_SOB_MAX_WPROT_4 0x4C01D0 + +#define mmDMA_IF_W_N_SOB_MAX_WPROT_5 0x4C01D4 + +#define mmDMA_IF_W_N_SOB_MAX_WPROT_6 0x4C01D8 + +#define mmDMA_IF_W_N_SOB_MAX_WPROT_7 0x4C01DC + +#define mmDMA_IF_W_N_SOB_MAX_WPROT_8 0x4C01E0 + +#define mmDMA_IF_W_N_SOB_MAX_WPROT_9 0x4C01E4 + +#define mmDMA_IF_W_N_SOB_MAX_WPROT_10 0x4C01E8 + +#define mmDMA_IF_W_N_SOB_MAX_WPROT_11 0x4C01EC + +#define mmDMA_IF_W_N_SOB_MAX_WPROT_12 0x4C01F0 + +#define mmDMA_IF_W_N_SOB_MAX_WPROT_13 0x4C01F4 + +#define mmDMA_IF_W_N_SOB_MAX_WPROT_14 0x4C01F8 + +#define mmDMA_IF_W_N_SOB_MAX_WPROT_15 0x4C01FC + +#define mmDMA_IF_W_N_SOB_MIN_RPRIV_0 0x4C0200 + +#define mmDMA_IF_W_N_SOB_MIN_RPRIV_1 0x4C0204 + +#define mmDMA_IF_W_N_SOB_MIN_RPRIV_2 0x4C0208 + +#define mmDMA_IF_W_N_SOB_MIN_RPRIV_3 0x4C020C + +#define mmDMA_IF_W_N_SOB_MIN_RPRIV_4 0x4C0210 + +#define mmDMA_IF_W_N_SOB_MIN_RPRIV_5 0x4C0214 + +#define mmDMA_IF_W_N_SOB_MIN_RPRIV_6 0x4C0218 + +#define mmDMA_IF_W_N_SOB_MIN_RPRIV_7 0x4C021C + +#define mmDMA_IF_W_N_SOB_MIN_RPRIV_8 0x4C0220 + +#define mmDMA_IF_W_N_SOB_MIN_RPRIV_9 0x4C0224 + +#define mmDMA_IF_W_N_SOB_MIN_RPRIV_10 0x4C0228 + +#define mmDMA_IF_W_N_SOB_MIN_RPRIV_11 0x4C022C + +#define mmDMA_IF_W_N_SOB_MIN_RPRIV_12 0x4C0230 + +#define mmDMA_IF_W_N_SOB_MIN_RPRIV_13 0x4C0234 + +#define mmDMA_IF_W_N_SOB_MIN_RPRIV_14 0x4C0238 + +#define mmDMA_IF_W_N_SOB_MIN_RPRIV_15 0x4C023C + +#define mmDMA_IF_W_N_SOB_MAX_RPRIV_0 0x4C0240 + +#define mmDMA_IF_W_N_SOB_MAX_RPRIV_1 0x4C0244 + +#define mmDMA_IF_W_N_SOB_MAX_RPRIV_2 0x4C0248 + +#define mmDMA_IF_W_N_SOB_MAX_RPRIV_3 0x4C024C + +#define mmDMA_IF_W_N_SOB_MAX_RPRIV_4 0x4C0250 + +#define mmDMA_IF_W_N_SOB_MAX_RPRIV_5 0x4C0254 + +#define mmDMA_IF_W_N_SOB_MAX_RPRIV_6 0x4C0258 + +#define mmDMA_IF_W_N_SOB_MAX_RPRIV_7 0x4C025C + +#define mmDMA_IF_W_N_SOB_MAX_RPRIV_8 0x4C0260 + +#define mmDMA_IF_W_N_SOB_MAX_RPRIV_9 0x4C0264 + +#define mmDMA_IF_W_N_SOB_MAX_RPRIV_10 0x4C0268 + +#define mmDMA_IF_W_N_SOB_MAX_RPRIV_11 0x4C026C + +#define mmDMA_IF_W_N_SOB_MAX_RPRIV_12 0x4C0270 + +#define mmDMA_IF_W_N_SOB_MAX_RPRIV_13 0x4C0274 + +#define mmDMA_IF_W_N_SOB_MAX_RPRIV_14 0x4C0278 + +#define mmDMA_IF_W_N_SOB_MAX_RPRIV_15 0x4C027C + +#define mmDMA_IF_W_N_SOB_MIN_WPRIV_0 0x4C0280 + +#define mmDMA_IF_W_N_SOB_MIN_WPRIV_1 0x4C0284 + +#define mmDMA_IF_W_N_SOB_MIN_WPRIV_2 0x4C0288 + +#define mmDMA_IF_W_N_SOB_MIN_WPRIV_3 0x4C028C + +#define mmDMA_IF_W_N_SOB_MIN_WPRIV_4 0x4C0290 + +#define mmDMA_IF_W_N_SOB_MIN_WPRIV_5 0x4C0294 + +#define mmDMA_IF_W_N_SOB_MIN_WPRIV_6 0x4C0298 + +#define mmDMA_IF_W_N_SOB_MIN_WPRIV_7 0x4C029C + +#define mmDMA_IF_W_N_SOB_MIN_WPRIV_8 0x4C02A0 + +#define mmDMA_IF_W_N_SOB_MIN_WPRIV_9 0x4C02A4 + +#define mmDMA_IF_W_N_SOB_MIN_WPRIV_10 0x4C02A8 + +#define mmDMA_IF_W_N_SOB_MIN_WPRIV_11 0x4C02AC + +#define mmDMA_IF_W_N_SOB_MIN_WPRIV_12 0x4C02B0 + +#define mmDMA_IF_W_N_SOB_MIN_WPRIV_13 0x4C02B4 + +#define mmDMA_IF_W_N_SOB_MIN_WPRIV_14 0x4C02B8 + +#define mmDMA_IF_W_N_SOB_MIN_WPRIV_15 0x4C02BC + +#define mmDMA_IF_W_N_SOB_MAX_WPRIV_0 0x4C02C0 + +#define mmDMA_IF_W_N_SOB_MAX_WPRIV_1 0x4C02C4 + +#define mmDMA_IF_W_N_SOB_MAX_WPRIV_2 0x4C02C8 + +#define mmDMA_IF_W_N_SOB_MAX_WPRIV_3 0x4C02CC + +#define mmDMA_IF_W_N_SOB_MAX_WPRIV_4 0x4C02D0 + +#define mmDMA_IF_W_N_SOB_MAX_WPRIV_5 0x4C02D4 + +#define mmDMA_IF_W_N_SOB_MAX_WPRIV_6 0x4C02D8 + +#define mmDMA_IF_W_N_SOB_MAX_WPRIV_7 0x4C02DC + +#define mmDMA_IF_W_N_SOB_MAX_WPRIV_8 0x4C02E0 + +#define mmDMA_IF_W_N_SOB_MAX_WPRIV_9 0x4C02E4 + +#define mmDMA_IF_W_N_SOB_MAX_WPRIV_10 0x4C02E8 + +#define mmDMA_IF_W_N_SOB_MAX_WPRIV_11 0x4C02EC + +#define mmDMA_IF_W_N_SOB_MAX_WPRIV_12 0x4C02F0 + +#define mmDMA_IF_W_N_SOB_MAX_WPRIV_13 0x4C02F4 + +#define mmDMA_IF_W_N_SOB_MAX_WPRIV_14 0x4C02F8 + +#define mmDMA_IF_W_N_SOB_MAX_WPRIV_15 0x4C02FC + +#define mmDMA_IF_W_N_DMA0_MIN_RPROT_0 0x4C0300 + +#define mmDMA_IF_W_N_DMA0_MIN_RPROT_1 0x4C0304 + +#define mmDMA_IF_W_N_DMA0_MIN_RPROT_2 0x4C0308 + +#define mmDMA_IF_W_N_DMA0_MIN_RPROT_3 0x4C030C + +#define mmDMA_IF_W_N_DMA0_MIN_RPROT_4 0x4C0310 + +#define mmDMA_IF_W_N_DMA0_MIN_RPROT_5 0x4C0314 + +#define mmDMA_IF_W_N_DMA0_MIN_RPROT_6 0x4C0318 + +#define mmDMA_IF_W_N_DMA0_MIN_RPROT_7 0x4C031C + +#define mmDMA_IF_W_N_DMA0_MIN_RPROT_8 0x4C0320 + +#define mmDMA_IF_W_N_DMA0_MIN_RPROT_9 0x4C0324 + +#define mmDMA_IF_W_N_DMA0_MIN_RPROT_10 0x4C0328 + +#define mmDMA_IF_W_N_DMA0_MIN_RPROT_11 0x4C032C + +#define mmDMA_IF_W_N_DMA0_MIN_RPROT_12 0x4C0330 + +#define mmDMA_IF_W_N_DMA0_MIN_RPROT_13 0x4C0334 + +#define mmDMA_IF_W_N_DMA0_MIN_RPROT_14 0x4C0338 + +#define mmDMA_IF_W_N_DMA0_MIN_RPROT_15 0x4C033C + +#define mmDMA_IF_W_N_DMA0_MAX_RPROT_0 0x4C0340 + +#define mmDMA_IF_W_N_DMA0_MAX_RPROT_1 0x4C0344 + +#define mmDMA_IF_W_N_DMA0_MAX_RPROT_2 0x4C0348 + +#define mmDMA_IF_W_N_DMA0_MAX_RPROT_3 0x4C034C + +#define mmDMA_IF_W_N_DMA0_MAX_RPROT_4 0x4C0350 + +#define mmDMA_IF_W_N_DMA0_MAX_RPROT_5 0x4C0354 + +#define mmDMA_IF_W_N_DMA0_MAX_RPROT_6 0x4C0358 + +#define mmDMA_IF_W_N_DMA0_MAX_RPROT_7 0x4C035C + +#define mmDMA_IF_W_N_DMA0_MAX_RPROT_8 0x4C0360 + +#define mmDMA_IF_W_N_DMA0_MAX_RPROT_9 0x4C0364 + +#define mmDMA_IF_W_N_DMA0_MAX_RPROT_10 0x4C0368 + +#define mmDMA_IF_W_N_DMA0_MAX_RPROT_11 0x4C036C + +#define mmDMA_IF_W_N_DMA0_MAX_RPROT_12 0x4C0370 + +#define mmDMA_IF_W_N_DMA0_MAX_RPROT_13 0x4C0374 + +#define mmDMA_IF_W_N_DMA0_MAX_RPROT_14 0x4C0378 + +#define mmDMA_IF_W_N_DMA0_MAX_RPROT_15 0x4C037C + +#define mmDMA_IF_W_N_DMA0_MIN_WPROT_0 0x4C0380 + +#define mmDMA_IF_W_N_DMA0_MIN_WPROT_1 0x4C0384 + +#define mmDMA_IF_W_N_DMA0_MIN_WPROT_2 0x4C0388 + +#define mmDMA_IF_W_N_DMA0_MIN_WPROT_3 0x4C038C + +#define mmDMA_IF_W_N_DMA0_MIN_WPROT_4 0x4C0390 + +#define mmDMA_IF_W_N_DMA0_MIN_WPROT_5 0x4C0394 + +#define mmDMA_IF_W_N_DMA0_MIN_WPROT_6 0x4C0398 + +#define mmDMA_IF_W_N_DMA0_MIN_WPROT_7 0x4C039C + +#define mmDMA_IF_W_N_DMA0_MIN_WPROT_8 0x4C03A0 + +#define mmDMA_IF_W_N_DMA0_MIN_WPROT_9 0x4C03A4 + +#define mmDMA_IF_W_N_DMA0_MIN_WPROT_10 0x4C03A8 + +#define mmDMA_IF_W_N_DMA0_MIN_WPROT_11 0x4C03AC + +#define mmDMA_IF_W_N_DMA0_MIN_WPROT_12 0x4C03B0 + +#define mmDMA_IF_W_N_DMA0_MIN_WPROT_13 0x4C03B4 + +#define mmDMA_IF_W_N_DMA0_MIN_WPROT_14 0x4C03B8 + +#define mmDMA_IF_W_N_DMA0_MIN_WPROT_15 0x4C03BC + +#define mmDMA_IF_W_N_DMA0_MAX_WPROT_0 0x4C03C0 + +#define mmDMA_IF_W_N_DMA0_MAX_WPROT_1 0x4C03C4 + +#define mmDMA_IF_W_N_DMA0_MAX_WPROT_2 0x4C03C8 + +#define mmDMA_IF_W_N_DMA0_MAX_WPROT_3 0x4C03CC + +#define mmDMA_IF_W_N_DMA0_MAX_WPROT_4 0x4C03D0 + +#define mmDMA_IF_W_N_DMA0_MAX_WPROT_5 0x4C03D4 + +#define mmDMA_IF_W_N_DMA0_MAX_WPROT_6 0x4C03D8 + +#define mmDMA_IF_W_N_DMA0_MAX_WPROT_7 0x4C03DC + +#define mmDMA_IF_W_N_DMA0_MAX_WPROT_8 0x4C03E0 + +#define mmDMA_IF_W_N_DMA0_MAX_WPROT_9 0x4C03E4 + +#define mmDMA_IF_W_N_DMA0_MAX_WPROT_10 0x4C03E8 + +#define mmDMA_IF_W_N_DMA0_MAX_WPROT_11 0x4C03EC + +#define mmDMA_IF_W_N_DMA0_MAX_WPROT_12 0x4C03F0 + +#define mmDMA_IF_W_N_DMA0_MAX_WPROT_13 0x4C03F4 + +#define mmDMA_IF_W_N_DMA0_MAX_WPROT_14 0x4C03F8 + +#define mmDMA_IF_W_N_DMA0_MAX_WPROT_15 0x4C03FC + +#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_0 0x4C0400 + +#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_1 0x4C0404 + +#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_2 0x4C0408 + +#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_3 0x4C040C + +#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_4 0x4C0410 + +#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_5 0x4C0414 + +#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_6 0x4C0418 + +#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_7 0x4C041C + +#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_8 0x4C0420 + +#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_9 0x4C0424 + +#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_10 0x4C0428 + +#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_11 0x4C042C + +#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_12 0x4C0430 + +#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_13 0x4C0434 + +#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_14 0x4C0438 + +#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_15 0x4C043C + +#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_0 0x4C0440 + +#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_1 0x4C0444 + +#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_2 0x4C0448 + +#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_3 0x4C044C + +#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_4 0x4C0450 + +#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_5 0x4C0454 + +#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_6 0x4C0458 + +#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_7 0x4C045C + +#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_8 0x4C0460 + +#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_9 0x4C0464 + +#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_10 0x4C0468 + +#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_11 0x4C046C + +#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_12 0x4C0470 + +#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_13 0x4C0474 + +#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_14 0x4C0478 + +#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_15 0x4C047C + +#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_0 0x4C0480 + +#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_1 0x4C0484 + +#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_2 0x4C0488 + +#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_3 0x4C048C + +#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_4 0x4C0490 + +#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_5 0x4C0494 + +#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_6 0x4C0498 + +#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_7 0x4C049C + +#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_8 0x4C04A0 + +#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_9 0x4C04A4 + +#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_10 0x4C04A8 + +#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_11 0x4C04AC + +#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_12 0x4C04B0 + +#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_13 0x4C04B4 + +#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_14 0x4C04B8 + +#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_15 0x4C04BC + +#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_0 0x4C04C0 + +#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_1 0x4C04C4 + +#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_2 0x4C04C8 + +#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_3 0x4C04CC + +#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_4 0x4C04D0 + +#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_5 0x4C04D4 + +#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_6 0x4C04D8 + +#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_7 0x4C04DC + +#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_8 0x4C04E0 + +#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_9 0x4C04E4 + +#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_10 0x4C04E8 + +#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_11 0x4C04EC + +#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_12 0x4C04F0 + +#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_13 0x4C04F4 + +#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_14 0x4C04F8 + +#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_15 0x4C04FC + +#define mmDMA_IF_W_N_DMA1_MIN_RPROT_0 0x4C0500 + +#define mmDMA_IF_W_N_DMA1_MIN_RPROT_1 0x4C0504 + +#define mmDMA_IF_W_N_DMA1_MIN_RPROT_2 0x4C0508 + +#define mmDMA_IF_W_N_DMA1_MIN_RPROT_3 0x4C050C + +#define mmDMA_IF_W_N_DMA1_MIN_RPROT_4 0x4C0510 + +#define mmDMA_IF_W_N_DMA1_MIN_RPROT_5 0x4C0514 + +#define mmDMA_IF_W_N_DMA1_MIN_RPROT_6 0x4C0518 + +#define mmDMA_IF_W_N_DMA1_MIN_RPROT_7 0x4C051C + +#define mmDMA_IF_W_N_DMA1_MIN_RPROT_8 0x4C0520 + +#define mmDMA_IF_W_N_DMA1_MIN_RPROT_9 0x4C0524 + +#define mmDMA_IF_W_N_DMA1_MIN_RPROT_10 0x4C0528 + +#define mmDMA_IF_W_N_DMA1_MIN_RPROT_11 0x4C052C + +#define mmDMA_IF_W_N_DMA1_MIN_RPROT_12 0x4C0530 + +#define mmDMA_IF_W_N_DMA1_MIN_RPROT_13 0x4C0534 + +#define mmDMA_IF_W_N_DMA1_MIN_RPROT_14 0x4C0538 + +#define mmDMA_IF_W_N_DMA1_MIN_RPROT_15 0x4C053C + +#define mmDMA_IF_W_N_DMA1_MAX_RPROT_0 0x4C0540 + +#define mmDMA_IF_W_N_DMA1_MAX_RPROT_1 0x4C0544 + +#define mmDMA_IF_W_N_DMA1_MAX_RPROT_2 0x4C0548 + +#define mmDMA_IF_W_N_DMA1_MAX_RPROT_3 0x4C054C + +#define mmDMA_IF_W_N_DMA1_MAX_RPROT_4 0x4C0550 + +#define mmDMA_IF_W_N_DMA1_MAX_RPROT_5 0x4C0554 + +#define mmDMA_IF_W_N_DMA1_MAX_RPROT_6 0x4C0558 + +#define mmDMA_IF_W_N_DMA1_MAX_RPROT_7 0x4C055C + +#define mmDMA_IF_W_N_DMA1_MAX_RPROT_8 0x4C0560 + +#define mmDMA_IF_W_N_DMA1_MAX_RPROT_9 0x4C0564 + +#define mmDMA_IF_W_N_DMA1_MAX_RPROT_10 0x4C0568 + +#define mmDMA_IF_W_N_DMA1_MAX_RPROT_11 0x4C056C + +#define mmDMA_IF_W_N_DMA1_MAX_RPROT_12 0x4C0570 + +#define mmDMA_IF_W_N_DMA1_MAX_RPROT_13 0x4C0574 + +#define mmDMA_IF_W_N_DMA1_MAX_RPROT_14 0x4C0578 + +#define mmDMA_IF_W_N_DMA1_MAX_RPROT_15 0x4C057C + +#define mmDMA_IF_W_N_DMA1_MIN_WPROT_0 0x4C0580 + +#define mmDMA_IF_W_N_DMA1_MIN_WPROT_1 0x4C0584 + +#define mmDMA_IF_W_N_DMA1_MIN_WPROT_2 0x4C0588 + +#define mmDMA_IF_W_N_DMA1_MIN_WPROT_3 0x4C058C + +#define mmDMA_IF_W_N_DMA1_MIN_WPROT_4 0x4C0590 + +#define mmDMA_IF_W_N_DMA1_MIN_WPROT_5 0x4C0594 + +#define mmDMA_IF_W_N_DMA1_MIN_WPROT_6 0x4C0598 + +#define mmDMA_IF_W_N_DMA1_MIN_WPROT_7 0x4C059C + +#define mmDMA_IF_W_N_DMA1_MIN_WPROT_8 0x4C05A0 + +#define mmDMA_IF_W_N_DMA1_MIN_WPROT_9 0x4C05A4 + +#define mmDMA_IF_W_N_DMA1_MIN_WPROT_10 0x4C05A8 + +#define mmDMA_IF_W_N_DMA1_MIN_WPROT_11 0x4C05AC + +#define mmDMA_IF_W_N_DMA1_MIN_WPROT_12 0x4C05B0 + +#define mmDMA_IF_W_N_DMA1_MIN_WPROT_13 0x4C05B4 + +#define mmDMA_IF_W_N_DMA1_MIN_WPROT_14 0x4C05B8 + +#define mmDMA_IF_W_N_DMA1_MIN_WPROT_15 0x4C05BC + +#define mmDMA_IF_W_N_DMA1_MAX_WPROT_0 0x4C05C0 + +#define mmDMA_IF_W_N_DMA1_MAX_WPROT_1 0x4C05C4 + +#define mmDMA_IF_W_N_DMA1_MAX_WPROT_2 0x4C05C8 + +#define mmDMA_IF_W_N_DMA1_MAX_WPROT_3 0x4C05CC + +#define mmDMA_IF_W_N_DMA1_MAX_WPROT_4 0x4C05D0 + +#define mmDMA_IF_W_N_DMA1_MAX_WPROT_5 0x4C05D4 + +#define mmDMA_IF_W_N_DMA1_MAX_WPROT_6 0x4C05D8 + +#define mmDMA_IF_W_N_DMA1_MAX_WPROT_7 0x4C05DC + +#define mmDMA_IF_W_N_DMA1_MAX_WPROT_8 0x4C05E0 + +#define mmDMA_IF_W_N_DMA1_MAX_WPROT_9 0x4C05E4 + +#define mmDMA_IF_W_N_DMA1_MAX_WPROT_10 0x4C05E8 + +#define mmDMA_IF_W_N_DMA1_MAX_WPROT_11 0x4C05EC + +#define mmDMA_IF_W_N_DMA1_MAX_WPROT_12 0x4C05F0 + +#define mmDMA_IF_W_N_DMA1_MAX_WPROT_13 0x4C05F4 + +#define mmDMA_IF_W_N_DMA1_MAX_WPROT_14 0x4C05F8 + +#define mmDMA_IF_W_N_DMA1_MAX_WPROT_15 0x4C05FC + +#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_0 0x4C0600 + +#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_1 0x4C0604 + +#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_2 0x4C0608 + +#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_3 0x4C060C + +#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_4 0x4C0610 + +#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_5 0x4C0614 + +#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_6 0x4C0618 + +#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_7 0x4C061C + +#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_8 0x4C0620 + +#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_9 0x4C0624 + +#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_10 0x4C0628 + +#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_11 0x4C062C + +#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_12 0x4C0630 + +#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_13 0x4C0634 + +#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_14 0x4C0638 + +#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_15 0x4C063C + +#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_0 0x4C0640 + +#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_1 0x4C0644 + +#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_2 0x4C0648 + +#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_3 0x4C064C + +#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_4 0x4C0650 + +#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_5 0x4C0654 + +#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_6 0x4C0658 + +#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_7 0x4C065C + +#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_8 0x4C0660 + +#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_9 0x4C0664 + +#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_10 0x4C0668 + +#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_11 0x4C066C + +#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_12 0x4C0670 + +#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_13 0x4C0674 + +#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_14 0x4C0678 + +#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_15 0x4C067C + +#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_0 0x4C0680 + +#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_1 0x4C0684 + +#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_2 0x4C0688 + +#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_3 0x4C068C + +#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_4 0x4C0690 + +#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_5 0x4C0694 + +#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_6 0x4C0698 + +#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_7 0x4C069C + +#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_8 0x4C06A0 + +#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_9 0x4C06A4 + +#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_10 0x4C06A8 + +#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_11 0x4C06AC + +#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_12 0x4C06B0 + +#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_13 0x4C06B4 + +#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_14 0x4C06B8 + +#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_15 0x4C06BC + +#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_0 0x4C06C0 + +#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_1 0x4C06C4 + +#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_2 0x4C06C8 + +#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_3 0x4C06CC + +#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_4 0x4C06D0 + +#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_5 0x4C06D4 + +#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_6 0x4C06D8 + +#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_7 0x4C06DC + +#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_8 0x4C06E0 + +#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_9 0x4C06E4 + +#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_10 0x4C06E8 + +#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_11 0x4C06EC + +#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_12 0x4C06F0 + +#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_13 0x4C06F4 + +#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_14 0x4C06F8 + +#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_15 0x4C06FC + +#define mmDMA_IF_W_N_SOB_HIT_RPROT 0x4C0700 + +#define mmDMA_IF_W_N_SOB_HIT_WPROT 0x4C0704 + +#define mmDMA_IF_W_N_SOB_HIT_RPRIV 0x4C070C + +#define mmDMA_IF_W_N_SOB_HIT_WPRIV 0x4C0710 + +#define mmDMA_IF_W_N_DMA0_HIT_RPROT 0x4C071C + +#define mmDMA_IF_W_N_DMA0_HIT_WPROT 0x4C0720 + +#define mmDMA_IF_W_N_DMA0_HIT_RPRIV 0x4C0724 + +#define mmDMA_IF_W_N_DMA0_HIT_WPRIV 0x4C0728 + +#define mmDMA_IF_W_N_DMA1_HIT_RPROT 0x4C0730 + +#define mmDMA_IF_W_N_DMA1_HIT_WPROT 0x4C0734 + +#define mmDMA_IF_W_N_DMA1_HIT_RPRIV 0x4C0738 + +#define mmDMA_IF_W_N_DMA1_HIT_WPRIV 0x4C073C + +#define mmDMA_IF_W_N_HBM_BIN 0x4C0800 + +#define mmDMA_IF_W_N_MME_BIN 0x4C0804 + +#define mmDMA_IF_W_N_TPC_BIN 0x4C0808 + +#define mmDMA_IF_W_N_DMA_BIN 0x4C080C + +#define mmDMA_IF_W_N_SOB_CG_EN 0x4C0810 + +#define mmDMA_IF_W_N_HBM_I2C_ADDR_0 0x4C0820 + +#define mmDMA_IF_W_N_HBM_I2C_ADDR_1 0x4C0824 + +#define mmDMA_IF_W_N_HBM_I2C_ADDR_2 0x4C0828 + +#define mmDMA_IF_W_N_HBM_I2C_ADDR_3 0x4C082C + +#define mmDMA_IF_W_N_HBM_I2C_ADDR_4 0x4C0830 + +#define mmDMA_IF_W_N_HBM_MISC 0x4C0834 + +#endif /* ASIC_REG_DMA_IF_W_N_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch0_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch0_regs.h new file mode 100644 index 000000000..56ffc920d --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch0_regs.h @@ -0,0 +1,896 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA_IF_W_S_DOWN_CH0_REGS_H_ +#define ASIC_REG_DMA_IF_W_S_DOWN_CH0_REGS_H_ + +/* + ***************************************** + * DMA_IF_W_S_DOWN_CH0 (Prototype: RTR_CTRL) + ***************************************** + */ + +#define mmDMA_IF_W_S_DOWN_CH0_PERM_SEL 0x481108 + +#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_0 0x481114 + +#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_1 0x481118 + +#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_2 0x48111C + +#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_3 0x481120 + +#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_4 0x481124 + +#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_5 0x481128 + +#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_6 0x48112C + +#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_7 0x481130 + +#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_8 0x481134 + +#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_9 0x481138 + +#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_10 0x48113C + +#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_11 0x481140 + +#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_12 0x481144 + +#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_13 0x481148 + +#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_14 0x48114C + +#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_15 0x481150 + +#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_16 0x481154 + +#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_17 0x481158 + +#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_18 0x48115C + +#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_19 0x481160 + +#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_20 0x481164 + +#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_21 0x481168 + +#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_22 0x48116C + +#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_23 0x481170 + +#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_24 0x481174 + +#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_25 0x481178 + +#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_26 0x48117C + +#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_27 0x481180 + +#define mmDMA_IF_W_S_DOWN_CH0_SRAM_POLY_H3_0 0x481184 + +#define mmDMA_IF_W_S_DOWN_CH0_SRAM_POLY_H3_1 0x481188 + +#define mmDMA_IF_W_S_DOWN_CH0_SRAM_POLY_H3_2 0x48118C + +#define mmDMA_IF_W_S_DOWN_CH0_SRAM_POLY_H3_3 0x481190 + +#define mmDMA_IF_W_S_DOWN_CH0_SRAM_POLY_H3_4 0x481194 + +#define mmDMA_IF_W_S_DOWN_CH0_SRAM_POLY_H3_5 0x481198 + +#define mmDMA_IF_W_S_DOWN_CH0_SRAM_POLY_H3_6 0x48119C + +#define mmDMA_IF_W_S_DOWN_CH0_SRAM_POLY_H3_7 0x4811A0 + +#define mmDMA_IF_W_S_DOWN_CH0_SRAM_POLY_H3_8 0x4811A4 + +#define mmDMA_IF_W_S_DOWN_CH0_SRAM_POLY_H3_9 0x4811A8 + +#define mmDMA_IF_W_S_DOWN_CH0_SRAM_POLY_H3_10 0x4811AC + +#define mmDMA_IF_W_S_DOWN_CH0_SRAM_POLY_H3_11 0x4811B0 + +#define mmDMA_IF_W_S_DOWN_CH0_SRAM_POLY_H3_12 0x4811B4 + +#define mmDMA_IF_W_S_DOWN_CH0_SRAM_POLY_H3_13 0x4811B8 + +#define mmDMA_IF_W_S_DOWN_CH0_SRAM_POLY_H3_14 0x4811BC + +#define mmDMA_IF_W_S_DOWN_CH0_SCRAM_SRAM_EN 0x48126C + +#define mmDMA_IF_W_S_DOWN_CH0_RL_HBM_EN 0x481274 + +#define mmDMA_IF_W_S_DOWN_CH0_RL_HBM_SAT 0x481278 + +#define mmDMA_IF_W_S_DOWN_CH0_RL_HBM_RST 0x48127C + +#define mmDMA_IF_W_S_DOWN_CH0_RL_HBM_TIMEOUT 0x481280 + +#define mmDMA_IF_W_S_DOWN_CH0_SCRAM_HBM_EN 0x481284 + +#define mmDMA_IF_W_S_DOWN_CH0_RL_PCI_EN 0x481288 + +#define mmDMA_IF_W_S_DOWN_CH0_RL_PCI_SAT 0x48128C + +#define mmDMA_IF_W_S_DOWN_CH0_RL_PCI_RST 0x481290 + +#define mmDMA_IF_W_S_DOWN_CH0_RL_PCI_TIMEOUT 0x481294 + +#define mmDMA_IF_W_S_DOWN_CH0_RL_SRAM_EN 0x48129C + +#define mmDMA_IF_W_S_DOWN_CH0_RL_SRAM_SAT 0x4812A0 + +#define mmDMA_IF_W_S_DOWN_CH0_RL_SRAM_RST 0x4812A4 + +#define mmDMA_IF_W_S_DOWN_CH0_RL_SRAM_TIMEOUT 0x4812AC + +#define mmDMA_IF_W_S_DOWN_CH0_RL_SRAM_RED 0x4812B4 + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_HBM_EN 0x4812EC + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_PCI_EN 0x4812F0 + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_HBM_WR_SIZE 0x4812F4 + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_PCI_WR_SIZE 0x4812F8 + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_PCI_CTR_SET_EN 0x481404 + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_PCI_CTR_SET 0x481408 + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_PCI_CTR_WRAP 0x48140C + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_PCI_CTR_CNT 0x481410 + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM_CTR_SET_EN 0x481414 + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM_CTR_SET 0x481418 + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_HBM_RD_SIZE 0x48141C + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_PCI_RD_SIZE 0x481420 + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_PCI_CTR_SET_EN 0x481424 + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_PCI_CTR_SET 0x481428 + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_PCI_CTR_WRAP 0x48142C + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_PCI_CTR_CNT 0x481430 + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM_CTR_SET_EN 0x481434 + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM_CTR_SET 0x481438 + +#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_SEL_0 0x481450 + +#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_SEL_1 0x481454 + +#define mmDMA_IF_W_S_DOWN_CH0_NON_LIN_EN 0x481480 + +#define mmDMA_IF_W_S_DOWN_CH0_NL_SRAM_BANK_0 0x481500 + +#define mmDMA_IF_W_S_DOWN_CH0_NL_SRAM_BANK_1 0x481504 + +#define mmDMA_IF_W_S_DOWN_CH0_NL_SRAM_BANK_2 0x481508 + +#define mmDMA_IF_W_S_DOWN_CH0_NL_SRAM_BANK_3 0x48150C + +#define mmDMA_IF_W_S_DOWN_CH0_NL_SRAM_BANK_4 0x481510 + +#define mmDMA_IF_W_S_DOWN_CH0_NL_SRAM_OFFSET_0 0x481514 + +#define mmDMA_IF_W_S_DOWN_CH0_NL_SRAM_OFFSET_1 0x481520 + +#define mmDMA_IF_W_S_DOWN_CH0_NL_SRAM_OFFSET_2 0x481524 + +#define mmDMA_IF_W_S_DOWN_CH0_NL_SRAM_OFFSET_3 0x481528 + +#define mmDMA_IF_W_S_DOWN_CH0_NL_SRAM_OFFSET_4 0x48152C + +#define mmDMA_IF_W_S_DOWN_CH0_NL_SRAM_OFFSET_5 0x481530 + +#define mmDMA_IF_W_S_DOWN_CH0_NL_SRAM_OFFSET_6 0x481534 + +#define mmDMA_IF_W_S_DOWN_CH0_NL_SRAM_OFFSET_7 0x481538 + +#define mmDMA_IF_W_S_DOWN_CH0_NL_SRAM_OFFSET_8 0x48153C + +#define mmDMA_IF_W_S_DOWN_CH0_NL_SRAM_OFFSET_9 0x481540 + +#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_0 0x481550 + +#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_1 0x481554 + +#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_2 0x481558 + +#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_3 0x48155C + +#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_4 0x481560 + +#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_5 0x481564 + +#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_6 0x481568 + +#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_7 0x48156C + +#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_8 0x481570 + +#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_9 0x481574 + +#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_10 0x481578 + +#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_11 0x48157C + +#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_12 0x481580 + +#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_13 0x481584 + +#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_14 0x481588 + +#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_15 0x48158C + +#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_16 0x481590 + +#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_17 0x481594 + +#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_18 0x481598 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_0 0x4815E4 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_1 0x4815E8 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_2 0x4815EC + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_3 0x4815F0 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_4 0x4815F4 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_5 0x4815F8 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_6 0x4815FC + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_7 0x481600 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_8 0x481604 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_9 0x481608 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_10 0x48160C + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_11 0x481610 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_12 0x481614 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_13 0x481618 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_14 0x48161C + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_15 0x481620 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_0 0x481624 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_1 0x481628 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_2 0x48162C + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_3 0x481630 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_4 0x481634 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_5 0x481638 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_6 0x48163C + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_7 0x481640 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_8 0x481644 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_9 0x481648 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_10 0x48164C + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_11 0x481650 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_12 0x481654 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_13 0x481658 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_14 0x48165C + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_15 0x481660 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_0 0x481664 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_1 0x481668 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_2 0x48166C + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_3 0x481670 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_4 0x481674 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_5 0x481678 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_6 0x48167C + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_7 0x481680 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_8 0x481684 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_9 0x481688 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_10 0x48168C + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_11 0x481690 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_12 0x481694 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_13 0x481698 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_14 0x48169C + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_15 0x4816A0 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_0 0x4816A4 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_1 0x4816A8 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_2 0x4816AC + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_3 0x4816B0 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_4 0x4816B4 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_5 0x4816B8 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_6 0x4816BC + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_7 0x4816C0 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_8 0x4816C4 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_9 0x4816C8 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_10 0x4816CC + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_11 0x4816D0 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_12 0x4816D4 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_13 0x4816D8 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_14 0x4816DC + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_15 0x4816E0 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_0 0x4816E4 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_1 0x4816E8 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_2 0x4816EC + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_3 0x4816F0 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_4 0x4816F4 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_5 0x4816F8 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_6 0x4816FC + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_7 0x481700 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_8 0x481704 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_9 0x481708 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_10 0x48170C + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_11 0x481710 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_12 0x481714 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_13 0x481718 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_14 0x48171C + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_15 0x481720 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_0 0x481724 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_1 0x481728 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_2 0x48172C + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_3 0x481730 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_4 0x481734 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_5 0x481738 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_6 0x48173C + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_7 0x481740 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_8 0x481744 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_9 0x481748 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_10 0x48174C + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_11 0x481750 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_12 0x481754 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_13 0x481758 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_14 0x48175C + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_15 0x481760 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_0 0x481764 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_1 0x481768 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_2 0x48176C + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_3 0x481770 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_4 0x481774 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_5 0x481778 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_6 0x48177C + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_7 0x481780 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_8 0x481784 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_9 0x481788 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_10 0x48178C + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_11 0x481790 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_12 0x481794 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_13 0x481798 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_14 0x48179C + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_15 0x4817A0 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_0 0x4817A4 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_1 0x4817A8 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_2 0x4817AC + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_3 0x4817B0 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_4 0x4817B4 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_5 0x4817B8 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_6 0x4817BC + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_7 0x4817C0 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_8 0x4817C4 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_9 0x4817C8 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_10 0x4817CC + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_11 0x4817D0 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_12 0x4817D4 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_13 0x4817D8 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_14 0x4817DC + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_15 0x4817E0 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_0 0x481824 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_1 0x481828 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_2 0x48182C + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_3 0x481830 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_4 0x481834 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_5 0x481838 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_6 0x48183C + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_7 0x481840 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_8 0x481844 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_9 0x481848 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_10 0x48184C + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_11 0x481850 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_12 0x481854 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_13 0x481858 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_14 0x48185C + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_15 0x481860 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_0 0x481864 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_1 0x481868 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_2 0x48186C + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_3 0x481870 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_4 0x481874 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_5 0x481878 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_6 0x48187C + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_7 0x481880 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_8 0x481884 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_9 0x481888 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_10 0x48188C + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_11 0x481890 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_12 0x481894 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_13 0x481898 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_14 0x48189C + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_15 0x4818A0 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_0 0x4818A4 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_1 0x4818A8 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_2 0x4818AC + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_3 0x4818B0 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_4 0x4818B4 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_5 0x4818B8 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_6 0x4818BC + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_7 0x4818C0 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_8 0x4818C4 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_9 0x4818C8 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_10 0x4818CC + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_11 0x4818D0 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_12 0x4818D4 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_13 0x4818D8 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_14 0x4818DC + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_15 0x4818E0 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_0 0x4818E4 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_1 0x4818E8 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_2 0x4818EC + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_3 0x4818F0 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_4 0x4818F4 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_5 0x4818F8 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_6 0x4818FC + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_7 0x481900 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_8 0x481904 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_9 0x481908 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_10 0x48190C + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_11 0x481910 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_12 0x481914 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_13 0x481918 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_14 0x48191C + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_15 0x481920 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_0 0x481924 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_1 0x481928 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_2 0x48192C + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_3 0x481930 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_4 0x481934 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_5 0x481938 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_6 0x48193C + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_7 0x481940 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_8 0x481944 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_9 0x481948 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_10 0x48194C + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_11 0x481950 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_12 0x481954 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_13 0x481958 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_14 0x48195C + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_15 0x481960 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_0 0x481964 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_1 0x481968 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_2 0x48196C + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_3 0x481970 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_4 0x481974 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_5 0x481978 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_6 0x48197C + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_7 0x481980 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_8 0x481984 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_9 0x481988 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_10 0x48198C + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_11 0x481990 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_12 0x481994 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_13 0x481998 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_14 0x48199C + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_15 0x4819A0 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_0 0x4819A4 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_1 0x4819A8 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_2 0x4819AC + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_3 0x4819B0 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_4 0x4819B4 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_5 0x4819B8 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_6 0x4819BC + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_7 0x4819C0 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_8 0x4819C4 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_9 0x4819C8 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_10 0x4819CC + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_11 0x4819D0 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_12 0x4819D4 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_13 0x4819D8 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_14 0x4819DC + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_15 0x4819E0 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_0 0x4819E4 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_1 0x4819E8 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_2 0x4819EC + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_3 0x4819F0 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_4 0x4819F4 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_5 0x4819F8 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_6 0x4819FC + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_7 0x481A00 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_8 0x481A04 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_9 0x481A08 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_10 0x481A0C + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_11 0x481A10 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_12 0x481A14 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_13 0x481A18 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_14 0x481A1C + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_15 0x481A20 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_HIT_AW 0x481A64 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_HIT_AR 0x481A68 + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_HIT_AW 0x481A6C + +#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_HIT_AR 0x481A70 + +#define mmDMA_IF_W_S_DOWN_CH0_RGL_CFG 0x481B64 + +#define mmDMA_IF_W_S_DOWN_CH0_RGL_SHIFT 0x481B68 + +#define mmDMA_IF_W_S_DOWN_CH0_RGL_EXPECTED_LAT_0 0x481B6C + +#define mmDMA_IF_W_S_DOWN_CH0_RGL_EXPECTED_LAT_1 0x481B70 + +#define mmDMA_IF_W_S_DOWN_CH0_RGL_EXPECTED_LAT_2 0x481B74 + +#define mmDMA_IF_W_S_DOWN_CH0_RGL_EXPECTED_LAT_3 0x481B78 + +#define mmDMA_IF_W_S_DOWN_CH0_RGL_EXPECTED_LAT_4 0x481B7C + +#define mmDMA_IF_W_S_DOWN_CH0_RGL_EXPECTED_LAT_5 0x481B80 + +#define mmDMA_IF_W_S_DOWN_CH0_RGL_EXPECTED_LAT_6 0x481B84 + +#define mmDMA_IF_W_S_DOWN_CH0_RGL_EXPECTED_LAT_7 0x481B88 + +#define mmDMA_IF_W_S_DOWN_CH0_RGL_TOKEN_0 0x481BAC + +#define mmDMA_IF_W_S_DOWN_CH0_RGL_TOKEN_1 0x481BB0 + +#define mmDMA_IF_W_S_DOWN_CH0_RGL_TOKEN_2 0x481BB4 + +#define mmDMA_IF_W_S_DOWN_CH0_RGL_TOKEN_3 0x481BB8 + +#define mmDMA_IF_W_S_DOWN_CH0_RGL_TOKEN_4 0x481BBC + +#define mmDMA_IF_W_S_DOWN_CH0_RGL_TOKEN_5 0x481BC0 + +#define mmDMA_IF_W_S_DOWN_CH0_RGL_TOKEN_6 0x481BC4 + +#define mmDMA_IF_W_S_DOWN_CH0_RGL_TOKEN_7 0x481BC8 + +#define mmDMA_IF_W_S_DOWN_CH0_RGL_BANK_ID_0 0x481BEC + +#define mmDMA_IF_W_S_DOWN_CH0_RGL_BANK_ID_1 0x481BF0 + +#define mmDMA_IF_W_S_DOWN_CH0_RGL_BANK_ID_2 0x481BF4 + +#define mmDMA_IF_W_S_DOWN_CH0_RGL_BANK_ID_3 0x481BF8 + +#define mmDMA_IF_W_S_DOWN_CH0_RGL_BANK_ID_4 0x481BFC + +#define mmDMA_IF_W_S_DOWN_CH0_RGL_BANK_ID_5 0x481C00 + +#define mmDMA_IF_W_S_DOWN_CH0_RGL_BANK_ID_6 0x481C04 + +#define mmDMA_IF_W_S_DOWN_CH0_RGL_BANK_ID_7 0x481C08 + +#define mmDMA_IF_W_S_DOWN_CH0_RGL_WDT 0x481C2C + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM0_CH0_CTR_WRAP 0x481C30 + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM0_CH1_CTR_WRAP 0x481C34 + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM1_CH0_CTR_WRAP 0x481C38 + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM1_CH1_CTR_WRAP 0x481C3C + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM2_CH0_CTR_WRAP 0x481C40 + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM2_CH1_CTR_WRAP 0x481C44 + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM3_CH0_CTR_WRAP 0x481C48 + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM3_CH1_CTR_WRAP 0x481C4C + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM0_CH0_CTR_CNT 0x481C50 + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM0_CH1_CTR_CNT 0x481C54 + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM1_CH0_CTR_CNT 0x481C58 + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM1_CH1_CTR_CNT 0x481C5C + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM2_CH0_CTR_CNT 0x481C60 + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM2_CH1_CTR_CNT 0x481C64 + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM3_CH0_CTR_CNT 0x481C68 + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM3_CH1_CTR_CNT 0x481C6C + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM0_CH0_CTR_WRAP 0x481C70 + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM0_CH1_CTR_WRAP 0x481C74 + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM1_CH0_CTR_WRAP 0x481C78 + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM1_CH1_CTR_WRAP 0x481C7C + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM2_CH0_CTR_WRAP 0x481C80 + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM2_CH1_CTR_WRAP 0x481C84 + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM3_CH0_CTR_WRAP 0x481C88 + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM3_CH1_CTR_WRAP 0x481C8C + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM0_CH0_CTR_CNT 0x481C90 + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM0_CH1_CTR_CNT 0x481C94 + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM1_CH0_CTR_CNT 0x481C98 + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM1_CH1_CTR_CNT 0x481C9C + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM2_CH0_CTR_CNT 0x481CA0 + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM2_CH1_CTR_CNT 0x481CA4 + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM3_CH0_CTR_CNT 0x481CA8 + +#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM3_CH1_CTR_CNT 0x481CAC + +#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_PC_SEL_0 0x481CB0 + +#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_PC_SEL_1 0x481CB4 + +#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_PC_SEL_2 0x481CB8 + +#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_PC_SEL_3 0x481CBC + +#endif /* ASIC_REG_DMA_IF_W_S_DOWN_CH0_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch1_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch1_regs.h new file mode 100644 index 000000000..cbc642918 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch1_regs.h @@ -0,0 +1,896 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA_IF_W_S_DOWN_CH1_REGS_H_ +#define ASIC_REG_DMA_IF_W_S_DOWN_CH1_REGS_H_ + +/* + ***************************************** + * DMA_IF_W_S_DOWN_CH1 (Prototype: RTR_CTRL) + ***************************************** + */ + +#define mmDMA_IF_W_S_DOWN_CH1_PERM_SEL 0x482108 + +#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_0 0x482114 + +#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_1 0x482118 + +#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_2 0x48211C + +#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_3 0x482120 + +#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_4 0x482124 + +#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_5 0x482128 + +#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_6 0x48212C + +#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_7 0x482130 + +#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_8 0x482134 + +#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_9 0x482138 + +#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_10 0x48213C + +#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_11 0x482140 + +#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_12 0x482144 + +#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_13 0x482148 + +#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_14 0x48214C + +#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_15 0x482150 + +#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_16 0x482154 + +#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_17 0x482158 + +#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_18 0x48215C + +#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_19 0x482160 + +#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_20 0x482164 + +#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_21 0x482168 + +#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_22 0x48216C + +#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_23 0x482170 + +#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_24 0x482174 + +#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_25 0x482178 + +#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_26 0x48217C + +#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_27 0x482180 + +#define mmDMA_IF_W_S_DOWN_CH1_SRAM_POLY_H3_0 0x482184 + +#define mmDMA_IF_W_S_DOWN_CH1_SRAM_POLY_H3_1 0x482188 + +#define mmDMA_IF_W_S_DOWN_CH1_SRAM_POLY_H3_2 0x48218C + +#define mmDMA_IF_W_S_DOWN_CH1_SRAM_POLY_H3_3 0x482190 + +#define mmDMA_IF_W_S_DOWN_CH1_SRAM_POLY_H3_4 0x482194 + +#define mmDMA_IF_W_S_DOWN_CH1_SRAM_POLY_H3_5 0x482198 + +#define mmDMA_IF_W_S_DOWN_CH1_SRAM_POLY_H3_6 0x48219C + +#define mmDMA_IF_W_S_DOWN_CH1_SRAM_POLY_H3_7 0x4821A0 + +#define mmDMA_IF_W_S_DOWN_CH1_SRAM_POLY_H3_8 0x4821A4 + +#define mmDMA_IF_W_S_DOWN_CH1_SRAM_POLY_H3_9 0x4821A8 + +#define mmDMA_IF_W_S_DOWN_CH1_SRAM_POLY_H3_10 0x4821AC + +#define mmDMA_IF_W_S_DOWN_CH1_SRAM_POLY_H3_11 0x4821B0 + +#define mmDMA_IF_W_S_DOWN_CH1_SRAM_POLY_H3_12 0x4821B4 + +#define mmDMA_IF_W_S_DOWN_CH1_SRAM_POLY_H3_13 0x4821B8 + +#define mmDMA_IF_W_S_DOWN_CH1_SRAM_POLY_H3_14 0x4821BC + +#define mmDMA_IF_W_S_DOWN_CH1_SCRAM_SRAM_EN 0x48226C + +#define mmDMA_IF_W_S_DOWN_CH1_RL_HBM_EN 0x482274 + +#define mmDMA_IF_W_S_DOWN_CH1_RL_HBM_SAT 0x482278 + +#define mmDMA_IF_W_S_DOWN_CH1_RL_HBM_RST 0x48227C + +#define mmDMA_IF_W_S_DOWN_CH1_RL_HBM_TIMEOUT 0x482280 + +#define mmDMA_IF_W_S_DOWN_CH1_SCRAM_HBM_EN 0x482284 + +#define mmDMA_IF_W_S_DOWN_CH1_RL_PCI_EN 0x482288 + +#define mmDMA_IF_W_S_DOWN_CH1_RL_PCI_SAT 0x48228C + +#define mmDMA_IF_W_S_DOWN_CH1_RL_PCI_RST 0x482290 + +#define mmDMA_IF_W_S_DOWN_CH1_RL_PCI_TIMEOUT 0x482294 + +#define mmDMA_IF_W_S_DOWN_CH1_RL_SRAM_EN 0x48229C + +#define mmDMA_IF_W_S_DOWN_CH1_RL_SRAM_SAT 0x4822A0 + +#define mmDMA_IF_W_S_DOWN_CH1_RL_SRAM_RST 0x4822A4 + +#define mmDMA_IF_W_S_DOWN_CH1_RL_SRAM_TIMEOUT 0x4822AC + +#define mmDMA_IF_W_S_DOWN_CH1_RL_SRAM_RED 0x4822B4 + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_HBM_EN 0x4822EC + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_PCI_EN 0x4822F0 + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_HBM_WR_SIZE 0x4822F4 + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_PCI_WR_SIZE 0x4822F8 + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_PCI_CTR_SET_EN 0x482404 + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_PCI_CTR_SET 0x482408 + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_PCI_CTR_WRAP 0x48240C + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_PCI_CTR_CNT 0x482410 + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM_CTR_SET_EN 0x482414 + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM_CTR_SET 0x482418 + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_HBM_RD_SIZE 0x48241C + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_PCI_RD_SIZE 0x482420 + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_PCI_CTR_SET_EN 0x482424 + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_PCI_CTR_SET 0x482428 + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_PCI_CTR_WRAP 0x48242C + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_PCI_CTR_CNT 0x482430 + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM_CTR_SET_EN 0x482434 + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM_CTR_SET 0x482438 + +#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_SEL_0 0x482450 + +#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_SEL_1 0x482454 + +#define mmDMA_IF_W_S_DOWN_CH1_NON_LIN_EN 0x482480 + +#define mmDMA_IF_W_S_DOWN_CH1_NL_SRAM_BANK_0 0x482500 + +#define mmDMA_IF_W_S_DOWN_CH1_NL_SRAM_BANK_1 0x482504 + +#define mmDMA_IF_W_S_DOWN_CH1_NL_SRAM_BANK_2 0x482508 + +#define mmDMA_IF_W_S_DOWN_CH1_NL_SRAM_BANK_3 0x48250C + +#define mmDMA_IF_W_S_DOWN_CH1_NL_SRAM_BANK_4 0x482510 + +#define mmDMA_IF_W_S_DOWN_CH1_NL_SRAM_OFFSET_0 0x482514 + +#define mmDMA_IF_W_S_DOWN_CH1_NL_SRAM_OFFSET_1 0x482520 + +#define mmDMA_IF_W_S_DOWN_CH1_NL_SRAM_OFFSET_2 0x482524 + +#define mmDMA_IF_W_S_DOWN_CH1_NL_SRAM_OFFSET_3 0x482528 + +#define mmDMA_IF_W_S_DOWN_CH1_NL_SRAM_OFFSET_4 0x48252C + +#define mmDMA_IF_W_S_DOWN_CH1_NL_SRAM_OFFSET_5 0x482530 + +#define mmDMA_IF_W_S_DOWN_CH1_NL_SRAM_OFFSET_6 0x482534 + +#define mmDMA_IF_W_S_DOWN_CH1_NL_SRAM_OFFSET_7 0x482538 + +#define mmDMA_IF_W_S_DOWN_CH1_NL_SRAM_OFFSET_8 0x48253C + +#define mmDMA_IF_W_S_DOWN_CH1_NL_SRAM_OFFSET_9 0x482540 + +#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_0 0x482550 + +#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_1 0x482554 + +#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_2 0x482558 + +#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_3 0x48255C + +#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_4 0x482560 + +#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_5 0x482564 + +#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_6 0x482568 + +#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_7 0x48256C + +#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_8 0x482570 + +#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_9 0x482574 + +#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_10 0x482578 + +#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_11 0x48257C + +#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_12 0x482580 + +#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_13 0x482584 + +#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_14 0x482588 + +#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_15 0x48258C + +#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_16 0x482590 + +#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_17 0x482594 + +#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_18 0x482598 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_0 0x4825E4 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_1 0x4825E8 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_2 0x4825EC + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_3 0x4825F0 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_4 0x4825F4 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_5 0x4825F8 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_6 0x4825FC + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_7 0x482600 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_8 0x482604 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_9 0x482608 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_10 0x48260C + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_11 0x482610 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_12 0x482614 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_13 0x482618 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_14 0x48261C + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_15 0x482620 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_0 0x482624 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_1 0x482628 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_2 0x48262C + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_3 0x482630 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_4 0x482634 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_5 0x482638 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_6 0x48263C + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_7 0x482640 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_8 0x482644 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_9 0x482648 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_10 0x48264C + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_11 0x482650 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_12 0x482654 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_13 0x482658 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_14 0x48265C + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_15 0x482660 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_0 0x482664 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_1 0x482668 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_2 0x48266C + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_3 0x482670 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_4 0x482674 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_5 0x482678 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_6 0x48267C + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_7 0x482680 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_8 0x482684 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_9 0x482688 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_10 0x48268C + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_11 0x482690 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_12 0x482694 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_13 0x482698 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_14 0x48269C + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_15 0x4826A0 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_0 0x4826A4 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_1 0x4826A8 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_2 0x4826AC + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_3 0x4826B0 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_4 0x4826B4 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_5 0x4826B8 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_6 0x4826BC + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_7 0x4826C0 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_8 0x4826C4 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_9 0x4826C8 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_10 0x4826CC + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_11 0x4826D0 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_12 0x4826D4 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_13 0x4826D8 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_14 0x4826DC + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_15 0x4826E0 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_0 0x4826E4 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_1 0x4826E8 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_2 0x4826EC + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_3 0x4826F0 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_4 0x4826F4 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_5 0x4826F8 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_6 0x4826FC + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_7 0x482700 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_8 0x482704 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_9 0x482708 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_10 0x48270C + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_11 0x482710 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_12 0x482714 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_13 0x482718 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_14 0x48271C + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_15 0x482720 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_0 0x482724 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_1 0x482728 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_2 0x48272C + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_3 0x482730 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_4 0x482734 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_5 0x482738 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_6 0x48273C + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_7 0x482740 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_8 0x482744 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_9 0x482748 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_10 0x48274C + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_11 0x482750 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_12 0x482754 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_13 0x482758 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_14 0x48275C + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_15 0x482760 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_0 0x482764 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_1 0x482768 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_2 0x48276C + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_3 0x482770 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_4 0x482774 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_5 0x482778 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_6 0x48277C + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_7 0x482780 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_8 0x482784 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_9 0x482788 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_10 0x48278C + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_11 0x482790 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_12 0x482794 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_13 0x482798 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_14 0x48279C + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_15 0x4827A0 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_0 0x4827A4 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_1 0x4827A8 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_2 0x4827AC + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_3 0x4827B0 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_4 0x4827B4 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_5 0x4827B8 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_6 0x4827BC + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_7 0x4827C0 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_8 0x4827C4 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_9 0x4827C8 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_10 0x4827CC + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_11 0x4827D0 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_12 0x4827D4 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_13 0x4827D8 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_14 0x4827DC + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_15 0x4827E0 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_0 0x482824 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_1 0x482828 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_2 0x48282C + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_3 0x482830 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_4 0x482834 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_5 0x482838 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_6 0x48283C + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_7 0x482840 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_8 0x482844 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_9 0x482848 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_10 0x48284C + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_11 0x482850 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_12 0x482854 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_13 0x482858 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_14 0x48285C + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_15 0x482860 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_0 0x482864 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_1 0x482868 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_2 0x48286C + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_3 0x482870 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_4 0x482874 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_5 0x482878 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_6 0x48287C + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_7 0x482880 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_8 0x482884 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_9 0x482888 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_10 0x48288C + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_11 0x482890 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_12 0x482894 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_13 0x482898 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_14 0x48289C + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_15 0x4828A0 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_0 0x4828A4 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_1 0x4828A8 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_2 0x4828AC + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_3 0x4828B0 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_4 0x4828B4 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_5 0x4828B8 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_6 0x4828BC + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_7 0x4828C0 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_8 0x4828C4 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_9 0x4828C8 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_10 0x4828CC + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_11 0x4828D0 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_12 0x4828D4 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_13 0x4828D8 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_14 0x4828DC + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_15 0x4828E0 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_0 0x4828E4 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_1 0x4828E8 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_2 0x4828EC + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_3 0x4828F0 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_4 0x4828F4 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_5 0x4828F8 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_6 0x4828FC + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_7 0x482900 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_8 0x482904 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_9 0x482908 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_10 0x48290C + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_11 0x482910 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_12 0x482914 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_13 0x482918 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_14 0x48291C + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_15 0x482920 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_0 0x482924 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_1 0x482928 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_2 0x48292C + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_3 0x482930 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_4 0x482934 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_5 0x482938 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_6 0x48293C + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_7 0x482940 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_8 0x482944 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_9 0x482948 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_10 0x48294C + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_11 0x482950 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_12 0x482954 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_13 0x482958 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_14 0x48295C + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_15 0x482960 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_0 0x482964 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_1 0x482968 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_2 0x48296C + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_3 0x482970 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_4 0x482974 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_5 0x482978 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_6 0x48297C + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_7 0x482980 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_8 0x482984 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_9 0x482988 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_10 0x48298C + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_11 0x482990 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_12 0x482994 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_13 0x482998 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_14 0x48299C + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_15 0x4829A0 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_0 0x4829A4 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_1 0x4829A8 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_2 0x4829AC + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_3 0x4829B0 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_4 0x4829B4 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_5 0x4829B8 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_6 0x4829BC + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_7 0x4829C0 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_8 0x4829C4 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_9 0x4829C8 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_10 0x4829CC + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_11 0x4829D0 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_12 0x4829D4 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_13 0x4829D8 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_14 0x4829DC + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_15 0x4829E0 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_0 0x4829E4 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_1 0x4829E8 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_2 0x4829EC + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_3 0x4829F0 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_4 0x4829F4 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_5 0x4829F8 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_6 0x4829FC + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_7 0x482A00 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_8 0x482A04 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_9 0x482A08 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_10 0x482A0C + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_11 0x482A10 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_12 0x482A14 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_13 0x482A18 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_14 0x482A1C + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_15 0x482A20 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_HIT_AW 0x482A64 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_HIT_AR 0x482A68 + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_HIT_AW 0x482A6C + +#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_HIT_AR 0x482A70 + +#define mmDMA_IF_W_S_DOWN_CH1_RGL_CFG 0x482B64 + +#define mmDMA_IF_W_S_DOWN_CH1_RGL_SHIFT 0x482B68 + +#define mmDMA_IF_W_S_DOWN_CH1_RGL_EXPECTED_LAT_0 0x482B6C + +#define mmDMA_IF_W_S_DOWN_CH1_RGL_EXPECTED_LAT_1 0x482B70 + +#define mmDMA_IF_W_S_DOWN_CH1_RGL_EXPECTED_LAT_2 0x482B74 + +#define mmDMA_IF_W_S_DOWN_CH1_RGL_EXPECTED_LAT_3 0x482B78 + +#define mmDMA_IF_W_S_DOWN_CH1_RGL_EXPECTED_LAT_4 0x482B7C + +#define mmDMA_IF_W_S_DOWN_CH1_RGL_EXPECTED_LAT_5 0x482B80 + +#define mmDMA_IF_W_S_DOWN_CH1_RGL_EXPECTED_LAT_6 0x482B84 + +#define mmDMA_IF_W_S_DOWN_CH1_RGL_EXPECTED_LAT_7 0x482B88 + +#define mmDMA_IF_W_S_DOWN_CH1_RGL_TOKEN_0 0x482BAC + +#define mmDMA_IF_W_S_DOWN_CH1_RGL_TOKEN_1 0x482BB0 + +#define mmDMA_IF_W_S_DOWN_CH1_RGL_TOKEN_2 0x482BB4 + +#define mmDMA_IF_W_S_DOWN_CH1_RGL_TOKEN_3 0x482BB8 + +#define mmDMA_IF_W_S_DOWN_CH1_RGL_TOKEN_4 0x482BBC + +#define mmDMA_IF_W_S_DOWN_CH1_RGL_TOKEN_5 0x482BC0 + +#define mmDMA_IF_W_S_DOWN_CH1_RGL_TOKEN_6 0x482BC4 + +#define mmDMA_IF_W_S_DOWN_CH1_RGL_TOKEN_7 0x482BC8 + +#define mmDMA_IF_W_S_DOWN_CH1_RGL_BANK_ID_0 0x482BEC + +#define mmDMA_IF_W_S_DOWN_CH1_RGL_BANK_ID_1 0x482BF0 + +#define mmDMA_IF_W_S_DOWN_CH1_RGL_BANK_ID_2 0x482BF4 + +#define mmDMA_IF_W_S_DOWN_CH1_RGL_BANK_ID_3 0x482BF8 + +#define mmDMA_IF_W_S_DOWN_CH1_RGL_BANK_ID_4 0x482BFC + +#define mmDMA_IF_W_S_DOWN_CH1_RGL_BANK_ID_5 0x482C00 + +#define mmDMA_IF_W_S_DOWN_CH1_RGL_BANK_ID_6 0x482C04 + +#define mmDMA_IF_W_S_DOWN_CH1_RGL_BANK_ID_7 0x482C08 + +#define mmDMA_IF_W_S_DOWN_CH1_RGL_WDT 0x482C2C + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM0_CH0_CTR_WRAP 0x482C30 + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM0_CH1_CTR_WRAP 0x482C34 + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM1_CH0_CTR_WRAP 0x482C38 + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM1_CH1_CTR_WRAP 0x482C3C + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM2_CH0_CTR_WRAP 0x482C40 + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM2_CH1_CTR_WRAP 0x482C44 + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM3_CH0_CTR_WRAP 0x482C48 + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM3_CH1_CTR_WRAP 0x482C4C + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM0_CH0_CTR_CNT 0x482C50 + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM0_CH1_CTR_CNT 0x482C54 + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM1_CH0_CTR_CNT 0x482C58 + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM1_CH1_CTR_CNT 0x482C5C + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM2_CH0_CTR_CNT 0x482C60 + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM2_CH1_CTR_CNT 0x482C64 + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM3_CH0_CTR_CNT 0x482C68 + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM3_CH1_CTR_CNT 0x482C6C + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM0_CH0_CTR_WRAP 0x482C70 + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM0_CH1_CTR_WRAP 0x482C74 + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM1_CH0_CTR_WRAP 0x482C78 + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM1_CH1_CTR_WRAP 0x482C7C + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM2_CH0_CTR_WRAP 0x482C80 + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM2_CH1_CTR_WRAP 0x482C84 + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM3_CH0_CTR_WRAP 0x482C88 + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM3_CH1_CTR_WRAP 0x482C8C + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM0_CH0_CTR_CNT 0x482C90 + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM0_CH1_CTR_CNT 0x482C94 + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM1_CH0_CTR_CNT 0x482C98 + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM1_CH1_CTR_CNT 0x482C9C + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM2_CH0_CTR_CNT 0x482CA0 + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM2_CH1_CTR_CNT 0x482CA4 + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM3_CH0_CTR_CNT 0x482CA8 + +#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM3_CH1_CTR_CNT 0x482CAC + +#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_PC_SEL_0 0x482CB0 + +#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_PC_SEL_1 0x482CB4 + +#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_PC_SEL_2 0x482CB8 + +#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_PC_SEL_3 0x482CBC + +#endif /* ASIC_REG_DMA_IF_W_S_DOWN_CH1_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_regs.h new file mode 100644 index 000000000..2382bc41b --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_regs.h @@ -0,0 +1,860 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA_IF_W_S_REGS_H_ +#define ASIC_REG_DMA_IF_W_S_REGS_H_ + +/* + ***************************************** + * DMA_IF_W_S (Prototype: DMA_IF) + ***************************************** + */ + +#define mmDMA_IF_W_S_HBM0_WR_CRED_CNT 0x480000 + +#define mmDMA_IF_W_S_HBM1_WR_CRED_CNT 0x480004 + +#define mmDMA_IF_W_S_HBM0_RD_CRED_CNT 0x480008 + +#define mmDMA_IF_W_S_HBM1_RD_CRED_CNT 0x48000C + +#define mmDMA_IF_W_S_HBM_LIMITER_0 0x480030 + +#define mmDMA_IF_W_S_HBM_LIMITER_1 0x480034 + +#define mmDMA_IF_W_S_HBM_LIMITER_2 0x480038 + +#define mmDMA_IF_W_S_HBM_LIMITER_3 0x48003C + +#define mmDMA_IF_W_S_HBM_ALMOST_EN_0 0x480040 + +#define mmDMA_IF_W_S_HBM_ALMOST_EN_1 0x480044 + +#define mmDMA_IF_W_S_HBM_CRED_EN_0 0x480050 + +#define mmDMA_IF_W_S_HBM_CRED_EN_1 0x480054 + +#define mmDMA_IF_W_S_SOB_MIN_RPROT_0 0x480100 + +#define mmDMA_IF_W_S_SOB_MIN_RPROT_1 0x480104 + +#define mmDMA_IF_W_S_SOB_MIN_RPROT_2 0x480108 + +#define mmDMA_IF_W_S_SOB_MIN_RPROT_3 0x48010C + +#define mmDMA_IF_W_S_SOB_MIN_RPROT_4 0x480110 + +#define mmDMA_IF_W_S_SOB_MIN_RPROT_5 0x480114 + +#define mmDMA_IF_W_S_SOB_MIN_RPROT_6 0x480118 + +#define mmDMA_IF_W_S_SOB_MIN_RPROT_7 0x48011C + +#define mmDMA_IF_W_S_SOB_MIN_RPROT_8 0x480120 + +#define mmDMA_IF_W_S_SOB_MIN_RPROT_9 0x480124 + +#define mmDMA_IF_W_S_SOB_MIN_RPROT_10 0x480128 + +#define mmDMA_IF_W_S_SOB_MIN_RPROT_11 0x48012C + +#define mmDMA_IF_W_S_SOB_MIN_RPROT_12 0x480130 + +#define mmDMA_IF_W_S_SOB_MIN_RPROT_13 0x480134 + +#define mmDMA_IF_W_S_SOB_MIN_RPROT_14 0x480138 + +#define mmDMA_IF_W_S_SOB_MIN_RPROT_15 0x48013C + +#define mmDMA_IF_W_S_SOB_MAX_RPROT_0 0x480140 + +#define mmDMA_IF_W_S_SOB_MAX_RPROT_1 0x480144 + +#define mmDMA_IF_W_S_SOB_MAX_RPROT_2 0x480148 + +#define mmDMA_IF_W_S_SOB_MAX_RPROT_3 0x48014C + +#define mmDMA_IF_W_S_SOB_MAX_RPROT_4 0x480150 + +#define mmDMA_IF_W_S_SOB_MAX_RPROT_5 0x480154 + +#define mmDMA_IF_W_S_SOB_MAX_RPROT_6 0x480158 + +#define mmDMA_IF_W_S_SOB_MAX_RPROT_7 0x48015C + +#define mmDMA_IF_W_S_SOB_MAX_RPROT_8 0x480160 + +#define mmDMA_IF_W_S_SOB_MAX_RPROT_9 0x480164 + +#define mmDMA_IF_W_S_SOB_MAX_RPROT_10 0x480168 + +#define mmDMA_IF_W_S_SOB_MAX_RPROT_11 0x48016C + +#define mmDMA_IF_W_S_SOB_MAX_RPROT_12 0x480170 + +#define mmDMA_IF_W_S_SOB_MAX_RPROT_13 0x480174 + +#define mmDMA_IF_W_S_SOB_MAX_RPROT_14 0x480178 + +#define mmDMA_IF_W_S_SOB_MAX_RPROT_15 0x48017C + +#define mmDMA_IF_W_S_SOB_MIN_WPROT_0 0x480180 + +#define mmDMA_IF_W_S_SOB_MIN_WPROT_1 0x480184 + +#define mmDMA_IF_W_S_SOB_MIN_WPROT_2 0x480188 + +#define mmDMA_IF_W_S_SOB_MIN_WPROT_3 0x48018C + +#define mmDMA_IF_W_S_SOB_MIN_WPROT_4 0x480190 + +#define mmDMA_IF_W_S_SOB_MIN_WPROT_5 0x480194 + +#define mmDMA_IF_W_S_SOB_MIN_WPROT_6 0x480198 + +#define mmDMA_IF_W_S_SOB_MIN_WPROT_7 0x48019C + +#define mmDMA_IF_W_S_SOB_MIN_WPROT_8 0x4801A0 + +#define mmDMA_IF_W_S_SOB_MIN_WPROT_9 0x4801A4 + +#define mmDMA_IF_W_S_SOB_MIN_WPROT_10 0x4801A8 + +#define mmDMA_IF_W_S_SOB_MIN_WPROT_11 0x4801AC + +#define mmDMA_IF_W_S_SOB_MIN_WPROT_12 0x4801B0 + +#define mmDMA_IF_W_S_SOB_MIN_WPROT_13 0x4801B4 + +#define mmDMA_IF_W_S_SOB_MIN_WPROT_14 0x4801B8 + +#define mmDMA_IF_W_S_SOB_MIN_WPROT_15 0x4801BC + +#define mmDMA_IF_W_S_SOB_MAX_WPROT_0 0x4801C0 + +#define mmDMA_IF_W_S_SOB_MAX_WPROT_1 0x4801C4 + +#define mmDMA_IF_W_S_SOB_MAX_WPROT_2 0x4801C8 + +#define mmDMA_IF_W_S_SOB_MAX_WPROT_3 0x4801CC + +#define mmDMA_IF_W_S_SOB_MAX_WPROT_4 0x4801D0 + +#define mmDMA_IF_W_S_SOB_MAX_WPROT_5 0x4801D4 + +#define mmDMA_IF_W_S_SOB_MAX_WPROT_6 0x4801D8 + +#define mmDMA_IF_W_S_SOB_MAX_WPROT_7 0x4801DC + +#define mmDMA_IF_W_S_SOB_MAX_WPROT_8 0x4801E0 + +#define mmDMA_IF_W_S_SOB_MAX_WPROT_9 0x4801E4 + +#define mmDMA_IF_W_S_SOB_MAX_WPROT_10 0x4801E8 + +#define mmDMA_IF_W_S_SOB_MAX_WPROT_11 0x4801EC + +#define mmDMA_IF_W_S_SOB_MAX_WPROT_12 0x4801F0 + +#define mmDMA_IF_W_S_SOB_MAX_WPROT_13 0x4801F4 + +#define mmDMA_IF_W_S_SOB_MAX_WPROT_14 0x4801F8 + +#define mmDMA_IF_W_S_SOB_MAX_WPROT_15 0x4801FC + +#define mmDMA_IF_W_S_SOB_MIN_RPRIV_0 0x480200 + +#define mmDMA_IF_W_S_SOB_MIN_RPRIV_1 0x480204 + +#define mmDMA_IF_W_S_SOB_MIN_RPRIV_2 0x480208 + +#define mmDMA_IF_W_S_SOB_MIN_RPRIV_3 0x48020C + +#define mmDMA_IF_W_S_SOB_MIN_RPRIV_4 0x480210 + +#define mmDMA_IF_W_S_SOB_MIN_RPRIV_5 0x480214 + +#define mmDMA_IF_W_S_SOB_MIN_RPRIV_6 0x480218 + +#define mmDMA_IF_W_S_SOB_MIN_RPRIV_7 0x48021C + +#define mmDMA_IF_W_S_SOB_MIN_RPRIV_8 0x480220 + +#define mmDMA_IF_W_S_SOB_MIN_RPRIV_9 0x480224 + +#define mmDMA_IF_W_S_SOB_MIN_RPRIV_10 0x480228 + +#define mmDMA_IF_W_S_SOB_MIN_RPRIV_11 0x48022C + +#define mmDMA_IF_W_S_SOB_MIN_RPRIV_12 0x480230 + +#define mmDMA_IF_W_S_SOB_MIN_RPRIV_13 0x480234 + +#define mmDMA_IF_W_S_SOB_MIN_RPRIV_14 0x480238 + +#define mmDMA_IF_W_S_SOB_MIN_RPRIV_15 0x48023C + +#define mmDMA_IF_W_S_SOB_MAX_RPRIV_0 0x480240 + +#define mmDMA_IF_W_S_SOB_MAX_RPRIV_1 0x480244 + +#define mmDMA_IF_W_S_SOB_MAX_RPRIV_2 0x480248 + +#define mmDMA_IF_W_S_SOB_MAX_RPRIV_3 0x48024C + +#define mmDMA_IF_W_S_SOB_MAX_RPRIV_4 0x480250 + +#define mmDMA_IF_W_S_SOB_MAX_RPRIV_5 0x480254 + +#define mmDMA_IF_W_S_SOB_MAX_RPRIV_6 0x480258 + +#define mmDMA_IF_W_S_SOB_MAX_RPRIV_7 0x48025C + +#define mmDMA_IF_W_S_SOB_MAX_RPRIV_8 0x480260 + +#define mmDMA_IF_W_S_SOB_MAX_RPRIV_9 0x480264 + +#define mmDMA_IF_W_S_SOB_MAX_RPRIV_10 0x480268 + +#define mmDMA_IF_W_S_SOB_MAX_RPRIV_11 0x48026C + +#define mmDMA_IF_W_S_SOB_MAX_RPRIV_12 0x480270 + +#define mmDMA_IF_W_S_SOB_MAX_RPRIV_13 0x480274 + +#define mmDMA_IF_W_S_SOB_MAX_RPRIV_14 0x480278 + +#define mmDMA_IF_W_S_SOB_MAX_RPRIV_15 0x48027C + +#define mmDMA_IF_W_S_SOB_MIN_WPRIV_0 0x480280 + +#define mmDMA_IF_W_S_SOB_MIN_WPRIV_1 0x480284 + +#define mmDMA_IF_W_S_SOB_MIN_WPRIV_2 0x480288 + +#define mmDMA_IF_W_S_SOB_MIN_WPRIV_3 0x48028C + +#define mmDMA_IF_W_S_SOB_MIN_WPRIV_4 0x480290 + +#define mmDMA_IF_W_S_SOB_MIN_WPRIV_5 0x480294 + +#define mmDMA_IF_W_S_SOB_MIN_WPRIV_6 0x480298 + +#define mmDMA_IF_W_S_SOB_MIN_WPRIV_7 0x48029C + +#define mmDMA_IF_W_S_SOB_MIN_WPRIV_8 0x4802A0 + +#define mmDMA_IF_W_S_SOB_MIN_WPRIV_9 0x4802A4 + +#define mmDMA_IF_W_S_SOB_MIN_WPRIV_10 0x4802A8 + +#define mmDMA_IF_W_S_SOB_MIN_WPRIV_11 0x4802AC + +#define mmDMA_IF_W_S_SOB_MIN_WPRIV_12 0x4802B0 + +#define mmDMA_IF_W_S_SOB_MIN_WPRIV_13 0x4802B4 + +#define mmDMA_IF_W_S_SOB_MIN_WPRIV_14 0x4802B8 + +#define mmDMA_IF_W_S_SOB_MIN_WPRIV_15 0x4802BC + +#define mmDMA_IF_W_S_SOB_MAX_WPRIV_0 0x4802C0 + +#define mmDMA_IF_W_S_SOB_MAX_WPRIV_1 0x4802C4 + +#define mmDMA_IF_W_S_SOB_MAX_WPRIV_2 0x4802C8 + +#define mmDMA_IF_W_S_SOB_MAX_WPRIV_3 0x4802CC + +#define mmDMA_IF_W_S_SOB_MAX_WPRIV_4 0x4802D0 + +#define mmDMA_IF_W_S_SOB_MAX_WPRIV_5 0x4802D4 + +#define mmDMA_IF_W_S_SOB_MAX_WPRIV_6 0x4802D8 + +#define mmDMA_IF_W_S_SOB_MAX_WPRIV_7 0x4802DC + +#define mmDMA_IF_W_S_SOB_MAX_WPRIV_8 0x4802E0 + +#define mmDMA_IF_W_S_SOB_MAX_WPRIV_9 0x4802E4 + +#define mmDMA_IF_W_S_SOB_MAX_WPRIV_10 0x4802E8 + +#define mmDMA_IF_W_S_SOB_MAX_WPRIV_11 0x4802EC + +#define mmDMA_IF_W_S_SOB_MAX_WPRIV_12 0x4802F0 + +#define mmDMA_IF_W_S_SOB_MAX_WPRIV_13 0x4802F4 + +#define mmDMA_IF_W_S_SOB_MAX_WPRIV_14 0x4802F8 + +#define mmDMA_IF_W_S_SOB_MAX_WPRIV_15 0x4802FC + +#define mmDMA_IF_W_S_DMA0_MIN_RPROT_0 0x480300 + +#define mmDMA_IF_W_S_DMA0_MIN_RPROT_1 0x480304 + +#define mmDMA_IF_W_S_DMA0_MIN_RPROT_2 0x480308 + +#define mmDMA_IF_W_S_DMA0_MIN_RPROT_3 0x48030C + +#define mmDMA_IF_W_S_DMA0_MIN_RPROT_4 0x480310 + +#define mmDMA_IF_W_S_DMA0_MIN_RPROT_5 0x480314 + +#define mmDMA_IF_W_S_DMA0_MIN_RPROT_6 0x480318 + +#define mmDMA_IF_W_S_DMA0_MIN_RPROT_7 0x48031C + +#define mmDMA_IF_W_S_DMA0_MIN_RPROT_8 0x480320 + +#define mmDMA_IF_W_S_DMA0_MIN_RPROT_9 0x480324 + +#define mmDMA_IF_W_S_DMA0_MIN_RPROT_10 0x480328 + +#define mmDMA_IF_W_S_DMA0_MIN_RPROT_11 0x48032C + +#define mmDMA_IF_W_S_DMA0_MIN_RPROT_12 0x480330 + +#define mmDMA_IF_W_S_DMA0_MIN_RPROT_13 0x480334 + +#define mmDMA_IF_W_S_DMA0_MIN_RPROT_14 0x480338 + +#define mmDMA_IF_W_S_DMA0_MIN_RPROT_15 0x48033C + +#define mmDMA_IF_W_S_DMA0_MAX_RPROT_0 0x480340 + +#define mmDMA_IF_W_S_DMA0_MAX_RPROT_1 0x480344 + +#define mmDMA_IF_W_S_DMA0_MAX_RPROT_2 0x480348 + +#define mmDMA_IF_W_S_DMA0_MAX_RPROT_3 0x48034C + +#define mmDMA_IF_W_S_DMA0_MAX_RPROT_4 0x480350 + +#define mmDMA_IF_W_S_DMA0_MAX_RPROT_5 0x480354 + +#define mmDMA_IF_W_S_DMA0_MAX_RPROT_6 0x480358 + +#define mmDMA_IF_W_S_DMA0_MAX_RPROT_7 0x48035C + +#define mmDMA_IF_W_S_DMA0_MAX_RPROT_8 0x480360 + +#define mmDMA_IF_W_S_DMA0_MAX_RPROT_9 0x480364 + +#define mmDMA_IF_W_S_DMA0_MAX_RPROT_10 0x480368 + +#define mmDMA_IF_W_S_DMA0_MAX_RPROT_11 0x48036C + +#define mmDMA_IF_W_S_DMA0_MAX_RPROT_12 0x480370 + +#define mmDMA_IF_W_S_DMA0_MAX_RPROT_13 0x480374 + +#define mmDMA_IF_W_S_DMA0_MAX_RPROT_14 0x480378 + +#define mmDMA_IF_W_S_DMA0_MAX_RPROT_15 0x48037C + +#define mmDMA_IF_W_S_DMA0_MIN_WPROT_0 0x480380 + +#define mmDMA_IF_W_S_DMA0_MIN_WPROT_1 0x480384 + +#define mmDMA_IF_W_S_DMA0_MIN_WPROT_2 0x480388 + +#define mmDMA_IF_W_S_DMA0_MIN_WPROT_3 0x48038C + +#define mmDMA_IF_W_S_DMA0_MIN_WPROT_4 0x480390 + +#define mmDMA_IF_W_S_DMA0_MIN_WPROT_5 0x480394 + +#define mmDMA_IF_W_S_DMA0_MIN_WPROT_6 0x480398 + +#define mmDMA_IF_W_S_DMA0_MIN_WPROT_7 0x48039C + +#define mmDMA_IF_W_S_DMA0_MIN_WPROT_8 0x4803A0 + +#define mmDMA_IF_W_S_DMA0_MIN_WPROT_9 0x4803A4 + +#define mmDMA_IF_W_S_DMA0_MIN_WPROT_10 0x4803A8 + +#define mmDMA_IF_W_S_DMA0_MIN_WPROT_11 0x4803AC + +#define mmDMA_IF_W_S_DMA0_MIN_WPROT_12 0x4803B0 + +#define mmDMA_IF_W_S_DMA0_MIN_WPROT_13 0x4803B4 + +#define mmDMA_IF_W_S_DMA0_MIN_WPROT_14 0x4803B8 + +#define mmDMA_IF_W_S_DMA0_MIN_WPROT_15 0x4803BC + +#define mmDMA_IF_W_S_DMA0_MAX_WPROT_0 0x4803C0 + +#define mmDMA_IF_W_S_DMA0_MAX_WPROT_1 0x4803C4 + +#define mmDMA_IF_W_S_DMA0_MAX_WPROT_2 0x4803C8 + +#define mmDMA_IF_W_S_DMA0_MAX_WPROT_3 0x4803CC + +#define mmDMA_IF_W_S_DMA0_MAX_WPROT_4 0x4803D0 + +#define mmDMA_IF_W_S_DMA0_MAX_WPROT_5 0x4803D4 + +#define mmDMA_IF_W_S_DMA0_MAX_WPROT_6 0x4803D8 + +#define mmDMA_IF_W_S_DMA0_MAX_WPROT_7 0x4803DC + +#define mmDMA_IF_W_S_DMA0_MAX_WPROT_8 0x4803E0 + +#define mmDMA_IF_W_S_DMA0_MAX_WPROT_9 0x4803E4 + +#define mmDMA_IF_W_S_DMA0_MAX_WPROT_10 0x4803E8 + +#define mmDMA_IF_W_S_DMA0_MAX_WPROT_11 0x4803EC + +#define mmDMA_IF_W_S_DMA0_MAX_WPROT_12 0x4803F0 + +#define mmDMA_IF_W_S_DMA0_MAX_WPROT_13 0x4803F4 + +#define mmDMA_IF_W_S_DMA0_MAX_WPROT_14 0x4803F8 + +#define mmDMA_IF_W_S_DMA0_MAX_WPROT_15 0x4803FC + +#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_0 0x480400 + +#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_1 0x480404 + +#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_2 0x480408 + +#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_3 0x48040C + +#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_4 0x480410 + +#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_5 0x480414 + +#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_6 0x480418 + +#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_7 0x48041C + +#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_8 0x480420 + +#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_9 0x480424 + +#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_10 0x480428 + +#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_11 0x48042C + +#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_12 0x480430 + +#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_13 0x480434 + +#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_14 0x480438 + +#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_15 0x48043C + +#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_0 0x480440 + +#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_1 0x480444 + +#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_2 0x480448 + +#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_3 0x48044C + +#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_4 0x480450 + +#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_5 0x480454 + +#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_6 0x480458 + +#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_7 0x48045C + +#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_8 0x480460 + +#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_9 0x480464 + +#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_10 0x480468 + +#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_11 0x48046C + +#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_12 0x480470 + +#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_13 0x480474 + +#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_14 0x480478 + +#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_15 0x48047C + +#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_0 0x480480 + +#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_1 0x480484 + +#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_2 0x480488 + +#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_3 0x48048C + +#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_4 0x480490 + +#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_5 0x480494 + +#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_6 0x480498 + +#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_7 0x48049C + +#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_8 0x4804A0 + +#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_9 0x4804A4 + +#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_10 0x4804A8 + +#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_11 0x4804AC + +#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_12 0x4804B0 + +#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_13 0x4804B4 + +#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_14 0x4804B8 + +#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_15 0x4804BC + +#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_0 0x4804C0 + +#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_1 0x4804C4 + +#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_2 0x4804C8 + +#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_3 0x4804CC + +#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_4 0x4804D0 + +#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_5 0x4804D4 + +#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_6 0x4804D8 + +#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_7 0x4804DC + +#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_8 0x4804E0 + +#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_9 0x4804E4 + +#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_10 0x4804E8 + +#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_11 0x4804EC + +#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_12 0x4804F0 + +#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_13 0x4804F4 + +#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_14 0x4804F8 + +#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_15 0x4804FC + +#define mmDMA_IF_W_S_DMA1_MIN_RPROT_0 0x480500 + +#define mmDMA_IF_W_S_DMA1_MIN_RPROT_1 0x480504 + +#define mmDMA_IF_W_S_DMA1_MIN_RPROT_2 0x480508 + +#define mmDMA_IF_W_S_DMA1_MIN_RPROT_3 0x48050C + +#define mmDMA_IF_W_S_DMA1_MIN_RPROT_4 0x480510 + +#define mmDMA_IF_W_S_DMA1_MIN_RPROT_5 0x480514 + +#define mmDMA_IF_W_S_DMA1_MIN_RPROT_6 0x480518 + +#define mmDMA_IF_W_S_DMA1_MIN_RPROT_7 0x48051C + +#define mmDMA_IF_W_S_DMA1_MIN_RPROT_8 0x480520 + +#define mmDMA_IF_W_S_DMA1_MIN_RPROT_9 0x480524 + +#define mmDMA_IF_W_S_DMA1_MIN_RPROT_10 0x480528 + +#define mmDMA_IF_W_S_DMA1_MIN_RPROT_11 0x48052C + +#define mmDMA_IF_W_S_DMA1_MIN_RPROT_12 0x480530 + +#define mmDMA_IF_W_S_DMA1_MIN_RPROT_13 0x480534 + +#define mmDMA_IF_W_S_DMA1_MIN_RPROT_14 0x480538 + +#define mmDMA_IF_W_S_DMA1_MIN_RPROT_15 0x48053C + +#define mmDMA_IF_W_S_DMA1_MAX_RPROT_0 0x480540 + +#define mmDMA_IF_W_S_DMA1_MAX_RPROT_1 0x480544 + +#define mmDMA_IF_W_S_DMA1_MAX_RPROT_2 0x480548 + +#define mmDMA_IF_W_S_DMA1_MAX_RPROT_3 0x48054C + +#define mmDMA_IF_W_S_DMA1_MAX_RPROT_4 0x480550 + +#define mmDMA_IF_W_S_DMA1_MAX_RPROT_5 0x480554 + +#define mmDMA_IF_W_S_DMA1_MAX_RPROT_6 0x480558 + +#define mmDMA_IF_W_S_DMA1_MAX_RPROT_7 0x48055C + +#define mmDMA_IF_W_S_DMA1_MAX_RPROT_8 0x480560 + +#define mmDMA_IF_W_S_DMA1_MAX_RPROT_9 0x480564 + +#define mmDMA_IF_W_S_DMA1_MAX_RPROT_10 0x480568 + +#define mmDMA_IF_W_S_DMA1_MAX_RPROT_11 0x48056C + +#define mmDMA_IF_W_S_DMA1_MAX_RPROT_12 0x480570 + +#define mmDMA_IF_W_S_DMA1_MAX_RPROT_13 0x480574 + +#define mmDMA_IF_W_S_DMA1_MAX_RPROT_14 0x480578 + +#define mmDMA_IF_W_S_DMA1_MAX_RPROT_15 0x48057C + +#define mmDMA_IF_W_S_DMA1_MIN_WPROT_0 0x480580 + +#define mmDMA_IF_W_S_DMA1_MIN_WPROT_1 0x480584 + +#define mmDMA_IF_W_S_DMA1_MIN_WPROT_2 0x480588 + +#define mmDMA_IF_W_S_DMA1_MIN_WPROT_3 0x48058C + +#define mmDMA_IF_W_S_DMA1_MIN_WPROT_4 0x480590 + +#define mmDMA_IF_W_S_DMA1_MIN_WPROT_5 0x480594 + +#define mmDMA_IF_W_S_DMA1_MIN_WPROT_6 0x480598 + +#define mmDMA_IF_W_S_DMA1_MIN_WPROT_7 0x48059C + +#define mmDMA_IF_W_S_DMA1_MIN_WPROT_8 0x4805A0 + +#define mmDMA_IF_W_S_DMA1_MIN_WPROT_9 0x4805A4 + +#define mmDMA_IF_W_S_DMA1_MIN_WPROT_10 0x4805A8 + +#define mmDMA_IF_W_S_DMA1_MIN_WPROT_11 0x4805AC + +#define mmDMA_IF_W_S_DMA1_MIN_WPROT_12 0x4805B0 + +#define mmDMA_IF_W_S_DMA1_MIN_WPROT_13 0x4805B4 + +#define mmDMA_IF_W_S_DMA1_MIN_WPROT_14 0x4805B8 + +#define mmDMA_IF_W_S_DMA1_MIN_WPROT_15 0x4805BC + +#define mmDMA_IF_W_S_DMA1_MAX_WPROT_0 0x4805C0 + +#define mmDMA_IF_W_S_DMA1_MAX_WPROT_1 0x4805C4 + +#define mmDMA_IF_W_S_DMA1_MAX_WPROT_2 0x4805C8 + +#define mmDMA_IF_W_S_DMA1_MAX_WPROT_3 0x4805CC + +#define mmDMA_IF_W_S_DMA1_MAX_WPROT_4 0x4805D0 + +#define mmDMA_IF_W_S_DMA1_MAX_WPROT_5 0x4805D4 + +#define mmDMA_IF_W_S_DMA1_MAX_WPROT_6 0x4805D8 + +#define mmDMA_IF_W_S_DMA1_MAX_WPROT_7 0x4805DC + +#define mmDMA_IF_W_S_DMA1_MAX_WPROT_8 0x4805E0 + +#define mmDMA_IF_W_S_DMA1_MAX_WPROT_9 0x4805E4 + +#define mmDMA_IF_W_S_DMA1_MAX_WPROT_10 0x4805E8 + +#define mmDMA_IF_W_S_DMA1_MAX_WPROT_11 0x4805EC + +#define mmDMA_IF_W_S_DMA1_MAX_WPROT_12 0x4805F0 + +#define mmDMA_IF_W_S_DMA1_MAX_WPROT_13 0x4805F4 + +#define mmDMA_IF_W_S_DMA1_MAX_WPROT_14 0x4805F8 + +#define mmDMA_IF_W_S_DMA1_MAX_WPROT_15 0x4805FC + +#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_0 0x480600 + +#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_1 0x480604 + +#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_2 0x480608 + +#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_3 0x48060C + +#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_4 0x480610 + +#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_5 0x480614 + +#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_6 0x480618 + +#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_7 0x48061C + +#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_8 0x480620 + +#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_9 0x480624 + +#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_10 0x480628 + +#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_11 0x48062C + +#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_12 0x480630 + +#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_13 0x480634 + +#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_14 0x480638 + +#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_15 0x48063C + +#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_0 0x480640 + +#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_1 0x480644 + +#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_2 0x480648 + +#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_3 0x48064C + +#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_4 0x480650 + +#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_5 0x480654 + +#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_6 0x480658 + +#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_7 0x48065C + +#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_8 0x480660 + +#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_9 0x480664 + +#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_10 0x480668 + +#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_11 0x48066C + +#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_12 0x480670 + +#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_13 0x480674 + +#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_14 0x480678 + +#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_15 0x48067C + +#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_0 0x480680 + +#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_1 0x480684 + +#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_2 0x480688 + +#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_3 0x48068C + +#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_4 0x480690 + +#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_5 0x480694 + +#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_6 0x480698 + +#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_7 0x48069C + +#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_8 0x4806A0 + +#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_9 0x4806A4 + +#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_10 0x4806A8 + +#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_11 0x4806AC + +#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_12 0x4806B0 + +#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_13 0x4806B4 + +#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_14 0x4806B8 + +#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_15 0x4806BC + +#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_0 0x4806C0 + +#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_1 0x4806C4 + +#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_2 0x4806C8 + +#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_3 0x4806CC + +#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_4 0x4806D0 + +#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_5 0x4806D4 + +#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_6 0x4806D8 + +#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_7 0x4806DC + +#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_8 0x4806E0 + +#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_9 0x4806E4 + +#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_10 0x4806E8 + +#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_11 0x4806EC + +#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_12 0x4806F0 + +#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_13 0x4806F4 + +#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_14 0x4806F8 + +#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_15 0x4806FC + +#define mmDMA_IF_W_S_SOB_HIT_RPROT 0x480700 + +#define mmDMA_IF_W_S_SOB_HIT_WPROT 0x480704 + +#define mmDMA_IF_W_S_SOB_HIT_RPRIV 0x48070C + +#define mmDMA_IF_W_S_SOB_HIT_WPRIV 0x480710 + +#define mmDMA_IF_W_S_DMA0_HIT_RPROT 0x48071C + +#define mmDMA_IF_W_S_DMA0_HIT_WPROT 0x480720 + +#define mmDMA_IF_W_S_DMA0_HIT_RPRIV 0x480724 + +#define mmDMA_IF_W_S_DMA0_HIT_WPRIV 0x480728 + +#define mmDMA_IF_W_S_DMA1_HIT_RPROT 0x480730 + +#define mmDMA_IF_W_S_DMA1_HIT_WPROT 0x480734 + +#define mmDMA_IF_W_S_DMA1_HIT_RPRIV 0x480738 + +#define mmDMA_IF_W_S_DMA1_HIT_WPRIV 0x48073C + +#define mmDMA_IF_W_S_HBM_BIN 0x480800 + +#define mmDMA_IF_W_S_MME_BIN 0x480804 + +#define mmDMA_IF_W_S_TPC_BIN 0x480808 + +#define mmDMA_IF_W_S_DMA_BIN 0x48080C + +#define mmDMA_IF_W_S_SOB_CG_EN 0x480810 + +#define mmDMA_IF_W_S_HBM_I2C_ADDR_0 0x480820 + +#define mmDMA_IF_W_S_HBM_I2C_ADDR_1 0x480824 + +#define mmDMA_IF_W_S_HBM_I2C_ADDR_2 0x480828 + +#define mmDMA_IF_W_S_HBM_I2C_ADDR_3 0x48082C + +#define mmDMA_IF_W_S_HBM_I2C_ADDR_4 0x480830 + +#define mmDMA_IF_W_S_HBM_MISC 0x480834 + +#endif /* ASIC_REG_DMA_IF_W_S_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_blocks.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_blocks.h new file mode 100644 index 000000000..c7596aac7 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_blocks.h @@ -0,0 +1,4974 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef GAUDI_BLOCKS_H_ +#define GAUDI_BLOCKS_H_ + +#define mmNIC0_PHY0_BASE 0x0ull +#define NIC0_PHY0_MAX_OFFSET 0x9F13 +#define mmMME0_ACC_BASE 0x7FFC020000ull +#define MME0_ACC_MAX_OFFSET 0x5C00 +#define MME0_ACC_SECTION 0x20000 +#define mmMME0_SBAB_BASE 0x7FFC040000ull +#define MME0_SBAB_MAX_OFFSET 0x5800 +#define MME0_SBAB_SECTION 0x1000 +#define mmMME0_PRTN_BASE 0x7FFC041000ull +#define MME0_PRTN_MAX_OFFSET 0x5000 +#define MME0_PRTN_SECTION 0x1F000 +#define mmMME0_CTRL_BASE 0x7FFC060000ull +#define MME0_CTRL_MAX_OFFSET 0xDA80 +#define MME0_CTRL_SECTION 0x8000 +#define mmARCH_MME0_CTRL_BASE 0x7FFC060008ull +#define ARCH_MME0_CTRL_MAX_OFFSET 0x3400 +#define ARCH_MME0_CTRL_SECTION 0x3400 +#define mmARCH_TENSOR_S_MME0_CTRL_BASE 0x7FFC06003Cull +#define ARCH_TENSOR_S_MME0_CTRL_MAX_OFFSET 0x4C00 +#define ARCH_TENSOR_S_MME0_CTRL_SECTION 0x4C00 +#define mmARCH_AGU_S_MME0_CTRL_BASE 0x7FFC060088ull +#define ARCH_AGU_S_MME0_CTRL_MAX_OFFSET 0x2400 +#define ARCH_AGU_S_MME0_CTRL_SECTION 0x2400 +#define mmARCH_TENSOR_L_MME0_CTRL_BASE 0x7FFC0600ACull +#define ARCH_TENSOR_L_MME0_CTRL_MAX_OFFSET 0x4C00 +#define ARCH_TENSOR_L_MME0_CTRL_SECTION 0x4C00 +#define mmARCH_AGU_L_LOCAL_MME0_CTRL_BASE 0x7FFC0600F8ull +#define ARCH_AGU_L_LOCAL_MME0_CTRL_MAX_OFFSET 0x2400 +#define ARCH_AGU_L_LOCAL_MME0_CTRL_SECTION 0x2400 +#define mmARCH_AGU_L_REMOTE_MME0_CTRL_BASE 0x7FFC06011Cull +#define ARCH_AGU_L_REMOTE_MME0_CTRL_MAX_OFFSET 0x2400 +#define ARCH_AGU_L_REMOTE_MME0_CTRL_SECTION 0x2400 +#define mmARCH_TENSOR_O_MME0_CTRL_BASE 0x7FFC060140ull +#define ARCH_TENSOR_O_MME0_CTRL_MAX_OFFSET 0x4C00 +#define ARCH_TENSOR_O_MME0_CTRL_SECTION 0x4C00 +#define mmARCH_AGU_O_LOCAL_MME0_CTRL_BASE 0x7FFC06018Cull +#define ARCH_AGU_O_LOCAL_MME0_CTRL_MAX_OFFSET 0x2400 +#define ARCH_AGU_O_LOCAL_MME0_CTRL_SECTION 0x2400 +#define mmARCH_AGU_O_REMOTE_MME0_CTRL_BASE 0x7FFC0601B0ull +#define ARCH_AGU_O_REMOTE_MME0_CTRL_MAX_OFFSET 0x2400 +#define ARCH_AGU_O_REMOTE_MME0_CTRL_SECTION 0x2400 +#define mmARCH_DESC_MME0_CTRL_BASE 0x7FFC0601D4ull +#define ARCH_DESC_MME0_CTRL_MAX_OFFSET 0x5400 +#define ARCH_DESC_MME0_CTRL_SECTION 0x2340 +#define mmSHADOW_0_MME0_CTRL_BASE 0x7FFC060408ull +#define SHADOW_0_MME0_CTRL_MAX_OFFSET 0x3400 +#define SHADOW_0_MME0_CTRL_SECTION 0x3400 +#define mmSHADOW_0_TENSOR_S_MME0_CTRL_BASE 0x7FFC06043Cull +#define SHADOW_0_TENSOR_S_MME0_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_0_TENSOR_S_MME0_CTRL_SECTION 0x4C00 +#define mmSHADOW_0_AGU_S_MME0_CTRL_BASE 0x7FFC060488ull +#define SHADOW_0_AGU_S_MME0_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_0_AGU_S_MME0_CTRL_SECTION 0x2400 +#define mmSHADOW_0_TENSOR_L_MME0_CTRL_BASE 0x7FFC0604ACull +#define SHADOW_0_TENSOR_L_MME0_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_0_TENSOR_L_MME0_CTRL_SECTION 0x4C00 +#define mmSHADOW_0_AGU_L_LOCAL_MME0_CTRL_BASE 0x7FFC0604F8ull +#define SHADOW_0_AGU_L_LOCAL_MME0_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_0_AGU_L_LOCAL_MME0_CTRL_SECTION 0x2400 +#define mmSHADOW_0_AGU_L_REMOTE_MME0_CTRL_BASE 0x7FFC06051Cull +#define SHADOW_0_AGU_L_REMOTE_MME0_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_0_AGU_L_REMOTE_MME0_CTRL_SECTION 0x2400 +#define mmSHADOW_0_TENSOR_O_MME0_CTRL_BASE 0x7FFC060540ull +#define SHADOW_0_TENSOR_O_MME0_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_0_TENSOR_O_MME0_CTRL_SECTION 0x4C00 +#define mmSHADOW_0_AGU_O_LOCAL_MME0_CTRL_BASE 0x7FFC06058Cull +#define SHADOW_0_AGU_O_LOCAL_MME0_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_0_AGU_O_LOCAL_MME0_CTRL_SECTION 0x2400 +#define mmSHADOW_0_AGU_O_REMOTE_MME0_CTRL_BASE 0x7FFC0605B0ull +#define SHADOW_0_AGU_O_REMOTE_MME0_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_0_AGU_O_REMOTE_MME0_CTRL_SECTION 0x2400 +#define mmSHADOW_0_DESC_MME0_CTRL_BASE 0x7FFC0605D4ull +#define SHADOW_0_DESC_MME0_CTRL_MAX_OFFSET 0x5400 +#define SHADOW_0_DESC_MME0_CTRL_SECTION 0xB400 +#define mmSHADOW_1_MME0_CTRL_BASE 0x7FFC060688ull +#define SHADOW_1_MME0_CTRL_MAX_OFFSET 0x3400 +#define SHADOW_1_MME0_CTRL_SECTION 0x3400 +#define mmSHADOW_1_TENSOR_S_MME0_CTRL_BASE 0x7FFC0606BCull +#define SHADOW_1_TENSOR_S_MME0_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_1_TENSOR_S_MME0_CTRL_SECTION 0x4C00 +#define mmSHADOW_1_AGU_S_MME0_CTRL_BASE 0x7FFC060708ull +#define SHADOW_1_AGU_S_MME0_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_1_AGU_S_MME0_CTRL_SECTION 0x2400 +#define mmSHADOW_1_TENSOR_L_MME0_CTRL_BASE 0x7FFC06072Cull +#define SHADOW_1_TENSOR_L_MME0_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_1_TENSOR_L_MME0_CTRL_SECTION 0x4C00 +#define mmSHADOW_1_AGU_L_LOCAL_MME0_CTRL_BASE 0x7FFC060778ull +#define SHADOW_1_AGU_L_LOCAL_MME0_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_1_AGU_L_LOCAL_MME0_CTRL_SECTION 0x2400 +#define mmSHADOW_1_AGU_L_REMOTE_MME0_CTRL_BASE 0x7FFC06079Cull +#define SHADOW_1_AGU_L_REMOTE_MME0_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_1_AGU_L_REMOTE_MME0_CTRL_SECTION 0x2400 +#define mmSHADOW_1_TENSOR_O_MME0_CTRL_BASE 0x7FFC0607C0ull +#define SHADOW_1_TENSOR_O_MME0_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_1_TENSOR_O_MME0_CTRL_SECTION 0x4C00 +#define mmSHADOW_1_AGU_O_LOCAL_MME0_CTRL_BASE 0x7FFC06080Cull +#define SHADOW_1_AGU_O_LOCAL_MME0_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_1_AGU_O_LOCAL_MME0_CTRL_SECTION 0x2400 +#define mmSHADOW_1_AGU_O_REMOTE_MME0_CTRL_BASE 0x7FFC060830ull +#define SHADOW_1_AGU_O_REMOTE_MME0_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_1_AGU_O_REMOTE_MME0_CTRL_SECTION 0x2400 +#define mmSHADOW_1_DESC_MME0_CTRL_BASE 0x7FFC060854ull +#define SHADOW_1_DESC_MME0_CTRL_MAX_OFFSET 0x5400 +#define SHADOW_1_DESC_MME0_CTRL_SECTION 0xB400 +#define mmSHADOW_2_MME0_CTRL_BASE 0x7FFC060908ull +#define SHADOW_2_MME0_CTRL_MAX_OFFSET 0x3400 +#define SHADOW_2_MME0_CTRL_SECTION 0x3400 +#define mmSHADOW_2_TENSOR_S_MME0_CTRL_BASE 0x7FFC06093Cull +#define SHADOW_2_TENSOR_S_MME0_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_2_TENSOR_S_MME0_CTRL_SECTION 0x4C00 +#define mmSHADOW_2_AGU_S_MME0_CTRL_BASE 0x7FFC060988ull +#define SHADOW_2_AGU_S_MME0_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_2_AGU_S_MME0_CTRL_SECTION 0x2400 +#define mmSHADOW_2_TENSOR_L_MME0_CTRL_BASE 0x7FFC0609ACull +#define SHADOW_2_TENSOR_L_MME0_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_2_TENSOR_L_MME0_CTRL_SECTION 0x4C00 +#define mmSHADOW_2_AGU_L_LOCAL_MME0_CTRL_BASE 0x7FFC0609F8ull +#define SHADOW_2_AGU_L_LOCAL_MME0_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_2_AGU_L_LOCAL_MME0_CTRL_SECTION 0x2400 +#define mmSHADOW_2_AGU_L_REMOTE_MME0_CTRL_BASE 0x7FFC060A1Cull +#define SHADOW_2_AGU_L_REMOTE_MME0_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_2_AGU_L_REMOTE_MME0_CTRL_SECTION 0x2400 +#define mmSHADOW_2_TENSOR_O_MME0_CTRL_BASE 0x7FFC060A40ull +#define SHADOW_2_TENSOR_O_MME0_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_2_TENSOR_O_MME0_CTRL_SECTION 0x4C00 +#define mmSHADOW_2_AGU_O_LOCAL_MME0_CTRL_BASE 0x7FFC060A8Cull +#define SHADOW_2_AGU_O_LOCAL_MME0_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_2_AGU_O_LOCAL_MME0_CTRL_SECTION 0x2400 +#define mmSHADOW_2_AGU_O_REMOTE_MME0_CTRL_BASE 0x7FFC060AB0ull +#define SHADOW_2_AGU_O_REMOTE_MME0_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_2_AGU_O_REMOTE_MME0_CTRL_SECTION 0x2400 +#define mmSHADOW_2_DESC_MME0_CTRL_BASE 0x7FFC060AD4ull +#define SHADOW_2_DESC_MME0_CTRL_MAX_OFFSET 0x5400 +#define SHADOW_2_DESC_MME0_CTRL_SECTION 0xB400 +#define mmSHADOW_3_MME0_CTRL_BASE 0x7FFC060B88ull +#define SHADOW_3_MME0_CTRL_MAX_OFFSET 0x3400 +#define SHADOW_3_MME0_CTRL_SECTION 0x3400 +#define mmSHADOW_3_TENSOR_S_MME0_CTRL_BASE 0x7FFC060BBCull +#define SHADOW_3_TENSOR_S_MME0_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_3_TENSOR_S_MME0_CTRL_SECTION 0x4C00 +#define mmSHADOW_3_AGU_S_MME0_CTRL_BASE 0x7FFC060C08ull +#define SHADOW_3_AGU_S_MME0_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_3_AGU_S_MME0_CTRL_SECTION 0x2400 +#define mmSHADOW_3_TENSOR_L_MME0_CTRL_BASE 0x7FFC060C2Cull +#define SHADOW_3_TENSOR_L_MME0_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_3_TENSOR_L_MME0_CTRL_SECTION 0x4C00 +#define mmSHADOW_3_AGU_L_LOCAL_MME0_CTRL_BASE 0x7FFC060C78ull +#define SHADOW_3_AGU_L_LOCAL_MME0_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_3_AGU_L_LOCAL_MME0_CTRL_SECTION 0x2400 +#define mmSHADOW_3_AGU_L_REMOTE_MME0_CTRL_BASE 0x7FFC060C9Cull +#define SHADOW_3_AGU_L_REMOTE_MME0_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_3_AGU_L_REMOTE_MME0_CTRL_SECTION 0x2400 +#define mmSHADOW_3_TENSOR_O_MME0_CTRL_BASE 0x7FFC060CC0ull +#define SHADOW_3_TENSOR_O_MME0_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_3_TENSOR_O_MME0_CTRL_SECTION 0x4C00 +#define mmSHADOW_3_AGU_O_LOCAL_MME0_CTRL_BASE 0x7FFC060D0Cull +#define SHADOW_3_AGU_O_LOCAL_MME0_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_3_AGU_O_LOCAL_MME0_CTRL_SECTION 0x2400 +#define mmSHADOW_3_AGU_O_REMOTE_MME0_CTRL_BASE 0x7FFC060D30ull +#define SHADOW_3_AGU_O_REMOTE_MME0_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_3_AGU_O_REMOTE_MME0_CTRL_SECTION 0x2400 +#define mmSHADOW_3_DESC_MME0_CTRL_BASE 0x7FFC060D54ull +#define SHADOW_3_DESC_MME0_CTRL_MAX_OFFSET 0x5400 +#define SHADOW_3_DESC_MME0_CTRL_SECTION 0x72AC +#define mmMME0_QM_BASE 0x7FFC068000ull +#define MME0_QM_MAX_OFFSET 0xD040 +#define MME0_QM_SECTION 0x38000 +#define mmMME1_ACC_BASE 0x7FFC0A0000ull +#define MME1_ACC_MAX_OFFSET 0x5C00 +#define MME1_ACC_SECTION 0x20000 +#define mmMME1_SBAB_BASE 0x7FFC0C0000ull +#define MME1_SBAB_MAX_OFFSET 0x5800 +#define MME1_SBAB_SECTION 0x1000 +#define mmMME1_PRTN_BASE 0x7FFC0C1000ull +#define MME1_PRTN_MAX_OFFSET 0x5000 +#define MME1_PRTN_SECTION 0x1F000 +#define mmMME1_CTRL_BASE 0x7FFC0E0000ull +#define MME1_CTRL_MAX_OFFSET 0xDA80 +#define MME1_CTRL_SECTION 0x8000 +#define mmARCH_MME1_CTRL_BASE 0x7FFC0E0008ull +#define ARCH_MME1_CTRL_MAX_OFFSET 0x3400 +#define ARCH_MME1_CTRL_SECTION 0x3400 +#define mmARCH_TENSOR_S_MME1_CTRL_BASE 0x7FFC0E003Cull +#define ARCH_TENSOR_S_MME1_CTRL_MAX_OFFSET 0x4C00 +#define ARCH_TENSOR_S_MME1_CTRL_SECTION 0x4C00 +#define mmARCH_AGU_S_MME1_CTRL_BASE 0x7FFC0E0088ull +#define ARCH_AGU_S_MME1_CTRL_MAX_OFFSET 0x2400 +#define ARCH_AGU_S_MME1_CTRL_SECTION 0x2400 +#define mmARCH_TENSOR_L_MME1_CTRL_BASE 0x7FFC0E00ACull +#define ARCH_TENSOR_L_MME1_CTRL_MAX_OFFSET 0x4C00 +#define ARCH_TENSOR_L_MME1_CTRL_SECTION 0x4C00 +#define mmARCH_AGU_L_LOCAL_MME1_CTRL_BASE 0x7FFC0E00F8ull +#define ARCH_AGU_L_LOCAL_MME1_CTRL_MAX_OFFSET 0x2400 +#define ARCH_AGU_L_LOCAL_MME1_CTRL_SECTION 0x2400 +#define mmARCH_AGU_L_REMOTE_MME1_CTRL_BASE 0x7FFC0E011Cull +#define ARCH_AGU_L_REMOTE_MME1_CTRL_MAX_OFFSET 0x2400 +#define ARCH_AGU_L_REMOTE_MME1_CTRL_SECTION 0x2400 +#define mmARCH_TENSOR_O_MME1_CTRL_BASE 0x7FFC0E0140ull +#define ARCH_TENSOR_O_MME1_CTRL_MAX_OFFSET 0x4C00 +#define ARCH_TENSOR_O_MME1_CTRL_SECTION 0x4C00 +#define mmARCH_AGU_O_LOCAL_MME1_CTRL_BASE 0x7FFC0E018Cull +#define ARCH_AGU_O_LOCAL_MME1_CTRL_MAX_OFFSET 0x2400 +#define ARCH_AGU_O_LOCAL_MME1_CTRL_SECTION 0x2400 +#define mmARCH_AGU_O_REMOTE_MME1_CTRL_BASE 0x7FFC0E01B0ull +#define ARCH_AGU_O_REMOTE_MME1_CTRL_MAX_OFFSET 0x2400 +#define ARCH_AGU_O_REMOTE_MME1_CTRL_SECTION 0x2400 +#define mmARCH_DESC_MME1_CTRL_BASE 0x7FFC0E01D4ull +#define ARCH_DESC_MME1_CTRL_MAX_OFFSET 0x5400 +#define ARCH_DESC_MME1_CTRL_SECTION 0x2340 +#define mmSHADOW_0_MME1_CTRL_BASE 0x7FFC0E0408ull +#define SHADOW_0_MME1_CTRL_MAX_OFFSET 0x3400 +#define SHADOW_0_MME1_CTRL_SECTION 0x3400 +#define mmSHADOW_0_TENSOR_S_MME1_CTRL_BASE 0x7FFC0E043Cull +#define SHADOW_0_TENSOR_S_MME1_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_0_TENSOR_S_MME1_CTRL_SECTION 0x4C00 +#define mmSHADOW_0_AGU_S_MME1_CTRL_BASE 0x7FFC0E0488ull +#define SHADOW_0_AGU_S_MME1_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_0_AGU_S_MME1_CTRL_SECTION 0x2400 +#define mmSHADOW_0_TENSOR_L_MME1_CTRL_BASE 0x7FFC0E04ACull +#define SHADOW_0_TENSOR_L_MME1_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_0_TENSOR_L_MME1_CTRL_SECTION 0x4C00 +#define mmSHADOW_0_AGU_L_LOCAL_MME1_CTRL_BASE 0x7FFC0E04F8ull +#define SHADOW_0_AGU_L_LOCAL_MME1_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_0_AGU_L_LOCAL_MME1_CTRL_SECTION 0x2400 +#define mmSHADOW_0_AGU_L_REMOTE_MME1_CTRL_BASE 0x7FFC0E051Cull +#define SHADOW_0_AGU_L_REMOTE_MME1_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_0_AGU_L_REMOTE_MME1_CTRL_SECTION 0x2400 +#define mmSHADOW_0_TENSOR_O_MME1_CTRL_BASE 0x7FFC0E0540ull +#define SHADOW_0_TENSOR_O_MME1_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_0_TENSOR_O_MME1_CTRL_SECTION 0x4C00 +#define mmSHADOW_0_AGU_O_LOCAL_MME1_CTRL_BASE 0x7FFC0E058Cull +#define SHADOW_0_AGU_O_LOCAL_MME1_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_0_AGU_O_LOCAL_MME1_CTRL_SECTION 0x2400 +#define mmSHADOW_0_AGU_O_REMOTE_MME1_CTRL_BASE 0x7FFC0E05B0ull +#define SHADOW_0_AGU_O_REMOTE_MME1_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_0_AGU_O_REMOTE_MME1_CTRL_SECTION 0x2400 +#define mmSHADOW_0_DESC_MME1_CTRL_BASE 0x7FFC0E05D4ull +#define SHADOW_0_DESC_MME1_CTRL_MAX_OFFSET 0x5400 +#define SHADOW_0_DESC_MME1_CTRL_SECTION 0xB400 +#define mmSHADOW_1_MME1_CTRL_BASE 0x7FFC0E0688ull +#define SHADOW_1_MME1_CTRL_MAX_OFFSET 0x3400 +#define SHADOW_1_MME1_CTRL_SECTION 0x3400 +#define mmSHADOW_1_TENSOR_S_MME1_CTRL_BASE 0x7FFC0E06BCull +#define SHADOW_1_TENSOR_S_MME1_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_1_TENSOR_S_MME1_CTRL_SECTION 0x4C00 +#define mmSHADOW_1_AGU_S_MME1_CTRL_BASE 0x7FFC0E0708ull +#define SHADOW_1_AGU_S_MME1_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_1_AGU_S_MME1_CTRL_SECTION 0x2400 +#define mmSHADOW_1_TENSOR_L_MME1_CTRL_BASE 0x7FFC0E072Cull +#define SHADOW_1_TENSOR_L_MME1_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_1_TENSOR_L_MME1_CTRL_SECTION 0x4C00 +#define mmSHADOW_1_AGU_L_LOCAL_MME1_CTRL_BASE 0x7FFC0E0778ull +#define SHADOW_1_AGU_L_LOCAL_MME1_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_1_AGU_L_LOCAL_MME1_CTRL_SECTION 0x2400 +#define mmSHADOW_1_AGU_L_REMOTE_MME1_CTRL_BASE 0x7FFC0E079Cull +#define SHADOW_1_AGU_L_REMOTE_MME1_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_1_AGU_L_REMOTE_MME1_CTRL_SECTION 0x2400 +#define mmSHADOW_1_TENSOR_O_MME1_CTRL_BASE 0x7FFC0E07C0ull +#define SHADOW_1_TENSOR_O_MME1_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_1_TENSOR_O_MME1_CTRL_SECTION 0x4C00 +#define mmSHADOW_1_AGU_O_LOCAL_MME1_CTRL_BASE 0x7FFC0E080Cull +#define SHADOW_1_AGU_O_LOCAL_MME1_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_1_AGU_O_LOCAL_MME1_CTRL_SECTION 0x2400 +#define mmSHADOW_1_AGU_O_REMOTE_MME1_CTRL_BASE 0x7FFC0E0830ull +#define SHADOW_1_AGU_O_REMOTE_MME1_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_1_AGU_O_REMOTE_MME1_CTRL_SECTION 0x2400 +#define mmSHADOW_1_DESC_MME1_CTRL_BASE 0x7FFC0E0854ull +#define SHADOW_1_DESC_MME1_CTRL_MAX_OFFSET 0x5400 +#define SHADOW_1_DESC_MME1_CTRL_SECTION 0xB400 +#define mmSHADOW_2_MME1_CTRL_BASE 0x7FFC0E0908ull +#define SHADOW_2_MME1_CTRL_MAX_OFFSET 0x3400 +#define SHADOW_2_MME1_CTRL_SECTION 0x3400 +#define mmSHADOW_2_TENSOR_S_MME1_CTRL_BASE 0x7FFC0E093Cull +#define SHADOW_2_TENSOR_S_MME1_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_2_TENSOR_S_MME1_CTRL_SECTION 0x4C00 +#define mmSHADOW_2_AGU_S_MME1_CTRL_BASE 0x7FFC0E0988ull +#define SHADOW_2_AGU_S_MME1_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_2_AGU_S_MME1_CTRL_SECTION 0x2400 +#define mmSHADOW_2_TENSOR_L_MME1_CTRL_BASE 0x7FFC0E09ACull +#define SHADOW_2_TENSOR_L_MME1_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_2_TENSOR_L_MME1_CTRL_SECTION 0x4C00 +#define mmSHADOW_2_AGU_L_LOCAL_MME1_CTRL_BASE 0x7FFC0E09F8ull +#define SHADOW_2_AGU_L_LOCAL_MME1_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_2_AGU_L_LOCAL_MME1_CTRL_SECTION 0x2400 +#define mmSHADOW_2_AGU_L_REMOTE_MME1_CTRL_BASE 0x7FFC0E0A1Cull +#define SHADOW_2_AGU_L_REMOTE_MME1_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_2_AGU_L_REMOTE_MME1_CTRL_SECTION 0x2400 +#define mmSHADOW_2_TENSOR_O_MME1_CTRL_BASE 0x7FFC0E0A40ull +#define SHADOW_2_TENSOR_O_MME1_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_2_TENSOR_O_MME1_CTRL_SECTION 0x4C00 +#define mmSHADOW_2_AGU_O_LOCAL_MME1_CTRL_BASE 0x7FFC0E0A8Cull +#define SHADOW_2_AGU_O_LOCAL_MME1_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_2_AGU_O_LOCAL_MME1_CTRL_SECTION 0x2400 +#define mmSHADOW_2_AGU_O_REMOTE_MME1_CTRL_BASE 0x7FFC0E0AB0ull +#define SHADOW_2_AGU_O_REMOTE_MME1_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_2_AGU_O_REMOTE_MME1_CTRL_SECTION 0x2400 +#define mmSHADOW_2_DESC_MME1_CTRL_BASE 0x7FFC0E0AD4ull +#define SHADOW_2_DESC_MME1_CTRL_MAX_OFFSET 0x5400 +#define SHADOW_2_DESC_MME1_CTRL_SECTION 0xB400 +#define mmSHADOW_3_MME1_CTRL_BASE 0x7FFC0E0B88ull +#define SHADOW_3_MME1_CTRL_MAX_OFFSET 0x3400 +#define SHADOW_3_MME1_CTRL_SECTION 0x3400 +#define mmSHADOW_3_TENSOR_S_MME1_CTRL_BASE 0x7FFC0E0BBCull +#define SHADOW_3_TENSOR_S_MME1_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_3_TENSOR_S_MME1_CTRL_SECTION 0x4C00 +#define mmSHADOW_3_AGU_S_MME1_CTRL_BASE 0x7FFC0E0C08ull +#define SHADOW_3_AGU_S_MME1_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_3_AGU_S_MME1_CTRL_SECTION 0x2400 +#define mmSHADOW_3_TENSOR_L_MME1_CTRL_BASE 0x7FFC0E0C2Cull +#define SHADOW_3_TENSOR_L_MME1_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_3_TENSOR_L_MME1_CTRL_SECTION 0x4C00 +#define mmSHADOW_3_AGU_L_LOCAL_MME1_CTRL_BASE 0x7FFC0E0C78ull +#define SHADOW_3_AGU_L_LOCAL_MME1_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_3_AGU_L_LOCAL_MME1_CTRL_SECTION 0x2400 +#define mmSHADOW_3_AGU_L_REMOTE_MME1_CTRL_BASE 0x7FFC0E0C9Cull +#define SHADOW_3_AGU_L_REMOTE_MME1_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_3_AGU_L_REMOTE_MME1_CTRL_SECTION 0x2400 +#define mmSHADOW_3_TENSOR_O_MME1_CTRL_BASE 0x7FFC0E0CC0ull +#define SHADOW_3_TENSOR_O_MME1_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_3_TENSOR_O_MME1_CTRL_SECTION 0x4C00 +#define mmSHADOW_3_AGU_O_LOCAL_MME1_CTRL_BASE 0x7FFC0E0D0Cull +#define SHADOW_3_AGU_O_LOCAL_MME1_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_3_AGU_O_LOCAL_MME1_CTRL_SECTION 0x2400 +#define mmSHADOW_3_AGU_O_REMOTE_MME1_CTRL_BASE 0x7FFC0E0D30ull +#define SHADOW_3_AGU_O_REMOTE_MME1_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_3_AGU_O_REMOTE_MME1_CTRL_SECTION 0x2400 +#define mmSHADOW_3_DESC_MME1_CTRL_BASE 0x7FFC0E0D54ull +#define SHADOW_3_DESC_MME1_CTRL_MAX_OFFSET 0x5400 +#define SHADOW_3_DESC_MME1_CTRL_SECTION 0x72AC +#define mmMME1_QM_BASE 0x7FFC0E8000ull +#define MME1_QM_MAX_OFFSET 0xD040 +#define MME1_QM_SECTION 0x38000 +#define mmMME2_ACC_BASE 0x7FFC120000ull +#define MME2_ACC_MAX_OFFSET 0x5C00 +#define MME2_ACC_SECTION 0x20000 +#define mmMME2_SBAB_BASE 0x7FFC140000ull +#define MME2_SBAB_MAX_OFFSET 0x5800 +#define MME2_SBAB_SECTION 0x1000 +#define mmMME2_PRTN_BASE 0x7FFC141000ull +#define MME2_PRTN_MAX_OFFSET 0x5000 +#define MME2_PRTN_SECTION 0x1F000 +#define mmMME2_CTRL_BASE 0x7FFC160000ull +#define MME2_CTRL_MAX_OFFSET 0xDA80 +#define MME2_CTRL_SECTION 0x8000 +#define mmARCH_MME2_CTRL_BASE 0x7FFC160008ull +#define ARCH_MME2_CTRL_MAX_OFFSET 0x3400 +#define ARCH_MME2_CTRL_SECTION 0x3400 +#define mmARCH_TENSOR_S_MME2_CTRL_BASE 0x7FFC16003Cull +#define ARCH_TENSOR_S_MME2_CTRL_MAX_OFFSET 0x4C00 +#define ARCH_TENSOR_S_MME2_CTRL_SECTION 0x4C00 +#define mmARCH_AGU_S_MME2_CTRL_BASE 0x7FFC160088ull +#define ARCH_AGU_S_MME2_CTRL_MAX_OFFSET 0x2400 +#define ARCH_AGU_S_MME2_CTRL_SECTION 0x2400 +#define mmARCH_TENSOR_L_MME2_CTRL_BASE 0x7FFC1600ACull +#define ARCH_TENSOR_L_MME2_CTRL_MAX_OFFSET 0x4C00 +#define ARCH_TENSOR_L_MME2_CTRL_SECTION 0x4C00 +#define mmARCH_AGU_L_LOCAL_MME2_CTRL_BASE 0x7FFC1600F8ull +#define ARCH_AGU_L_LOCAL_MME2_CTRL_MAX_OFFSET 0x2400 +#define ARCH_AGU_L_LOCAL_MME2_CTRL_SECTION 0x2400 +#define mmARCH_AGU_L_REMOTE_MME2_CTRL_BASE 0x7FFC16011Cull +#define ARCH_AGU_L_REMOTE_MME2_CTRL_MAX_OFFSET 0x2400 +#define ARCH_AGU_L_REMOTE_MME2_CTRL_SECTION 0x2400 +#define mmARCH_TENSOR_O_MME2_CTRL_BASE 0x7FFC160140ull +#define ARCH_TENSOR_O_MME2_CTRL_MAX_OFFSET 0x4C00 +#define ARCH_TENSOR_O_MME2_CTRL_SECTION 0x4C00 +#define mmARCH_AGU_O_LOCAL_MME2_CTRL_BASE 0x7FFC16018Cull +#define ARCH_AGU_O_LOCAL_MME2_CTRL_MAX_OFFSET 0x2400 +#define ARCH_AGU_O_LOCAL_MME2_CTRL_SECTION 0x2400 +#define mmARCH_AGU_O_REMOTE_MME2_CTRL_BASE 0x7FFC1601B0ull +#define ARCH_AGU_O_REMOTE_MME2_CTRL_MAX_OFFSET 0x2400 +#define ARCH_AGU_O_REMOTE_MME2_CTRL_SECTION 0x2400 +#define mmARCH_DESC_MME2_CTRL_BASE 0x7FFC1601D4ull +#define ARCH_DESC_MME2_CTRL_MAX_OFFSET 0x5400 +#define ARCH_DESC_MME2_CTRL_SECTION 0x2340 +#define mmSHADOW_0_MME2_CTRL_BASE 0x7FFC160408ull +#define SHADOW_0_MME2_CTRL_MAX_OFFSET 0x3400 +#define SHADOW_0_MME2_CTRL_SECTION 0x3400 +#define mmSHADOW_0_TENSOR_S_MME2_CTRL_BASE 0x7FFC16043Cull +#define SHADOW_0_TENSOR_S_MME2_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_0_TENSOR_S_MME2_CTRL_SECTION 0x4C00 +#define mmSHADOW_0_AGU_S_MME2_CTRL_BASE 0x7FFC160488ull +#define SHADOW_0_AGU_S_MME2_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_0_AGU_S_MME2_CTRL_SECTION 0x2400 +#define mmSHADOW_0_TENSOR_L_MME2_CTRL_BASE 0x7FFC1604ACull +#define SHADOW_0_TENSOR_L_MME2_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_0_TENSOR_L_MME2_CTRL_SECTION 0x4C00 +#define mmSHADOW_0_AGU_L_LOCAL_MME2_CTRL_BASE 0x7FFC1604F8ull +#define SHADOW_0_AGU_L_LOCAL_MME2_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_0_AGU_L_LOCAL_MME2_CTRL_SECTION 0x2400 +#define mmSHADOW_0_AGU_L_REMOTE_MME2_CTRL_BASE 0x7FFC16051Cull +#define SHADOW_0_AGU_L_REMOTE_MME2_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_0_AGU_L_REMOTE_MME2_CTRL_SECTION 0x2400 +#define mmSHADOW_0_TENSOR_O_MME2_CTRL_BASE 0x7FFC160540ull +#define SHADOW_0_TENSOR_O_MME2_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_0_TENSOR_O_MME2_CTRL_SECTION 0x4C00 +#define mmSHADOW_0_AGU_O_LOCAL_MME2_CTRL_BASE 0x7FFC16058Cull +#define SHADOW_0_AGU_O_LOCAL_MME2_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_0_AGU_O_LOCAL_MME2_CTRL_SECTION 0x2400 +#define mmSHADOW_0_AGU_O_REMOTE_MME2_CTRL_BASE 0x7FFC1605B0ull +#define SHADOW_0_AGU_O_REMOTE_MME2_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_0_AGU_O_REMOTE_MME2_CTRL_SECTION 0x2400 +#define mmSHADOW_0_DESC_MME2_CTRL_BASE 0x7FFC1605D4ull +#define SHADOW_0_DESC_MME2_CTRL_MAX_OFFSET 0x5400 +#define SHADOW_0_DESC_MME2_CTRL_SECTION 0xB400 +#define mmSHADOW_1_MME2_CTRL_BASE 0x7FFC160688ull +#define SHADOW_1_MME2_CTRL_MAX_OFFSET 0x3400 +#define SHADOW_1_MME2_CTRL_SECTION 0x3400 +#define mmSHADOW_1_TENSOR_S_MME2_CTRL_BASE 0x7FFC1606BCull +#define SHADOW_1_TENSOR_S_MME2_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_1_TENSOR_S_MME2_CTRL_SECTION 0x4C00 +#define mmSHADOW_1_AGU_S_MME2_CTRL_BASE 0x7FFC160708ull +#define SHADOW_1_AGU_S_MME2_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_1_AGU_S_MME2_CTRL_SECTION 0x2400 +#define mmSHADOW_1_TENSOR_L_MME2_CTRL_BASE 0x7FFC16072Cull +#define SHADOW_1_TENSOR_L_MME2_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_1_TENSOR_L_MME2_CTRL_SECTION 0x4C00 +#define mmSHADOW_1_AGU_L_LOCAL_MME2_CTRL_BASE 0x7FFC160778ull +#define SHADOW_1_AGU_L_LOCAL_MME2_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_1_AGU_L_LOCAL_MME2_CTRL_SECTION 0x2400 +#define mmSHADOW_1_AGU_L_REMOTE_MME2_CTRL_BASE 0x7FFC16079Cull +#define SHADOW_1_AGU_L_REMOTE_MME2_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_1_AGU_L_REMOTE_MME2_CTRL_SECTION 0x2400 +#define mmSHADOW_1_TENSOR_O_MME2_CTRL_BASE 0x7FFC1607C0ull +#define SHADOW_1_TENSOR_O_MME2_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_1_TENSOR_O_MME2_CTRL_SECTION 0x4C00 +#define mmSHADOW_1_AGU_O_LOCAL_MME2_CTRL_BASE 0x7FFC16080Cull +#define SHADOW_1_AGU_O_LOCAL_MME2_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_1_AGU_O_LOCAL_MME2_CTRL_SECTION 0x2400 +#define mmSHADOW_1_AGU_O_REMOTE_MME2_CTRL_BASE 0x7FFC160830ull +#define SHADOW_1_AGU_O_REMOTE_MME2_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_1_AGU_O_REMOTE_MME2_CTRL_SECTION 0x2400 +#define mmSHADOW_1_DESC_MME2_CTRL_BASE 0x7FFC160854ull +#define SHADOW_1_DESC_MME2_CTRL_MAX_OFFSET 0x5400 +#define SHADOW_1_DESC_MME2_CTRL_SECTION 0xB400 +#define mmSHADOW_2_MME2_CTRL_BASE 0x7FFC160908ull +#define SHADOW_2_MME2_CTRL_MAX_OFFSET 0x3400 +#define SHADOW_2_MME2_CTRL_SECTION 0x3400 +#define mmSHADOW_2_TENSOR_S_MME2_CTRL_BASE 0x7FFC16093Cull +#define SHADOW_2_TENSOR_S_MME2_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_2_TENSOR_S_MME2_CTRL_SECTION 0x4C00 +#define mmSHADOW_2_AGU_S_MME2_CTRL_BASE 0x7FFC160988ull +#define SHADOW_2_AGU_S_MME2_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_2_AGU_S_MME2_CTRL_SECTION 0x2400 +#define mmSHADOW_2_TENSOR_L_MME2_CTRL_BASE 0x7FFC1609ACull +#define SHADOW_2_TENSOR_L_MME2_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_2_TENSOR_L_MME2_CTRL_SECTION 0x4C00 +#define mmSHADOW_2_AGU_L_LOCAL_MME2_CTRL_BASE 0x7FFC1609F8ull +#define SHADOW_2_AGU_L_LOCAL_MME2_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_2_AGU_L_LOCAL_MME2_CTRL_SECTION 0x2400 +#define mmSHADOW_2_AGU_L_REMOTE_MME2_CTRL_BASE 0x7FFC160A1Cull +#define SHADOW_2_AGU_L_REMOTE_MME2_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_2_AGU_L_REMOTE_MME2_CTRL_SECTION 0x2400 +#define mmSHADOW_2_TENSOR_O_MME2_CTRL_BASE 0x7FFC160A40ull +#define SHADOW_2_TENSOR_O_MME2_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_2_TENSOR_O_MME2_CTRL_SECTION 0x4C00 +#define mmSHADOW_2_AGU_O_LOCAL_MME2_CTRL_BASE 0x7FFC160A8Cull +#define SHADOW_2_AGU_O_LOCAL_MME2_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_2_AGU_O_LOCAL_MME2_CTRL_SECTION 0x2400 +#define mmSHADOW_2_AGU_O_REMOTE_MME2_CTRL_BASE 0x7FFC160AB0ull +#define SHADOW_2_AGU_O_REMOTE_MME2_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_2_AGU_O_REMOTE_MME2_CTRL_SECTION 0x2400 +#define mmSHADOW_2_DESC_MME2_CTRL_BASE 0x7FFC160AD4ull +#define SHADOW_2_DESC_MME2_CTRL_MAX_OFFSET 0x5400 +#define SHADOW_2_DESC_MME2_CTRL_SECTION 0xB400 +#define mmSHADOW_3_MME2_CTRL_BASE 0x7FFC160B88ull +#define SHADOW_3_MME2_CTRL_MAX_OFFSET 0x3400 +#define SHADOW_3_MME2_CTRL_SECTION 0x3400 +#define mmSHADOW_3_TENSOR_S_MME2_CTRL_BASE 0x7FFC160BBCull +#define SHADOW_3_TENSOR_S_MME2_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_3_TENSOR_S_MME2_CTRL_SECTION 0x4C00 +#define mmSHADOW_3_AGU_S_MME2_CTRL_BASE 0x7FFC160C08ull +#define SHADOW_3_AGU_S_MME2_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_3_AGU_S_MME2_CTRL_SECTION 0x2400 +#define mmSHADOW_3_TENSOR_L_MME2_CTRL_BASE 0x7FFC160C2Cull +#define SHADOW_3_TENSOR_L_MME2_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_3_TENSOR_L_MME2_CTRL_SECTION 0x4C00 +#define mmSHADOW_3_AGU_L_LOCAL_MME2_CTRL_BASE 0x7FFC160C78ull +#define SHADOW_3_AGU_L_LOCAL_MME2_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_3_AGU_L_LOCAL_MME2_CTRL_SECTION 0x2400 +#define mmSHADOW_3_AGU_L_REMOTE_MME2_CTRL_BASE 0x7FFC160C9Cull +#define SHADOW_3_AGU_L_REMOTE_MME2_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_3_AGU_L_REMOTE_MME2_CTRL_SECTION 0x2400 +#define mmSHADOW_3_TENSOR_O_MME2_CTRL_BASE 0x7FFC160CC0ull +#define SHADOW_3_TENSOR_O_MME2_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_3_TENSOR_O_MME2_CTRL_SECTION 0x4C00 +#define mmSHADOW_3_AGU_O_LOCAL_MME2_CTRL_BASE 0x7FFC160D0Cull +#define SHADOW_3_AGU_O_LOCAL_MME2_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_3_AGU_O_LOCAL_MME2_CTRL_SECTION 0x2400 +#define mmSHADOW_3_AGU_O_REMOTE_MME2_CTRL_BASE 0x7FFC160D30ull +#define SHADOW_3_AGU_O_REMOTE_MME2_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_3_AGU_O_REMOTE_MME2_CTRL_SECTION 0x2400 +#define mmSHADOW_3_DESC_MME2_CTRL_BASE 0x7FFC160D54ull +#define SHADOW_3_DESC_MME2_CTRL_MAX_OFFSET 0x5400 +#define SHADOW_3_DESC_MME2_CTRL_SECTION 0x72AC +#define mmMME2_QM_BASE 0x7FFC168000ull +#define MME2_QM_MAX_OFFSET 0xD040 +#define MME2_QM_SECTION 0x38000 +#define mmMME3_ACC_BASE 0x7FFC1A0000ull +#define MME3_ACC_MAX_OFFSET 0x5C00 +#define MME3_ACC_SECTION 0x20000 +#define mmMME3_SBAB_BASE 0x7FFC1C0000ull +#define MME3_SBAB_MAX_OFFSET 0x5800 +#define MME3_SBAB_SECTION 0x1000 +#define mmMME3_PRTN_BASE 0x7FFC1C1000ull +#define MME3_PRTN_MAX_OFFSET 0x5000 +#define MME3_PRTN_SECTION 0x1F000 +#define mmMME3_CTRL_BASE 0x7FFC1E0000ull +#define MME3_CTRL_MAX_OFFSET 0xDA80 +#define MME3_CTRL_SECTION 0x8000 +#define mmARCH_MME3_CTRL_BASE 0x7FFC1E0008ull +#define ARCH_MME3_CTRL_MAX_OFFSET 0x3400 +#define ARCH_MME3_CTRL_SECTION 0x3400 +#define mmARCH_TENSOR_S_MME3_CTRL_BASE 0x7FFC1E003Cull +#define ARCH_TENSOR_S_MME3_CTRL_MAX_OFFSET 0x4C00 +#define ARCH_TENSOR_S_MME3_CTRL_SECTION 0x4C00 +#define mmARCH_AGU_S_MME3_CTRL_BASE 0x7FFC1E0088ull +#define ARCH_AGU_S_MME3_CTRL_MAX_OFFSET 0x2400 +#define ARCH_AGU_S_MME3_CTRL_SECTION 0x2400 +#define mmARCH_TENSOR_L_MME3_CTRL_BASE 0x7FFC1E00ACull +#define ARCH_TENSOR_L_MME3_CTRL_MAX_OFFSET 0x4C00 +#define ARCH_TENSOR_L_MME3_CTRL_SECTION 0x4C00 +#define mmARCH_AGU_L_LOCAL_MME3_CTRL_BASE 0x7FFC1E00F8ull +#define ARCH_AGU_L_LOCAL_MME3_CTRL_MAX_OFFSET 0x2400 +#define ARCH_AGU_L_LOCAL_MME3_CTRL_SECTION 0x2400 +#define mmARCH_AGU_L_REMOTE_MME3_CTRL_BASE 0x7FFC1E011Cull +#define ARCH_AGU_L_REMOTE_MME3_CTRL_MAX_OFFSET 0x2400 +#define ARCH_AGU_L_REMOTE_MME3_CTRL_SECTION 0x2400 +#define mmARCH_TENSOR_O_MME3_CTRL_BASE 0x7FFC1E0140ull +#define ARCH_TENSOR_O_MME3_CTRL_MAX_OFFSET 0x4C00 +#define ARCH_TENSOR_O_MME3_CTRL_SECTION 0x4C00 +#define mmARCH_AGU_O_LOCAL_MME3_CTRL_BASE 0x7FFC1E018Cull +#define ARCH_AGU_O_LOCAL_MME3_CTRL_MAX_OFFSET 0x2400 +#define ARCH_AGU_O_LOCAL_MME3_CTRL_SECTION 0x2400 +#define mmARCH_AGU_O_REMOTE_MME3_CTRL_BASE 0x7FFC1E01B0ull +#define ARCH_AGU_O_REMOTE_MME3_CTRL_MAX_OFFSET 0x2400 +#define ARCH_AGU_O_REMOTE_MME3_CTRL_SECTION 0x2400 +#define mmARCH_DESC_MME3_CTRL_BASE 0x7FFC1E01D4ull +#define ARCH_DESC_MME3_CTRL_MAX_OFFSET 0x5400 +#define ARCH_DESC_MME3_CTRL_SECTION 0x2340 +#define mmSHADOW_0_MME3_CTRL_BASE 0x7FFC1E0408ull +#define SHADOW_0_MME3_CTRL_MAX_OFFSET 0x3400 +#define SHADOW_0_MME3_CTRL_SECTION 0x3400 +#define mmSHADOW_0_TENSOR_S_MME3_CTRL_BASE 0x7FFC1E043Cull +#define SHADOW_0_TENSOR_S_MME3_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_0_TENSOR_S_MME3_CTRL_SECTION 0x4C00 +#define mmSHADOW_0_AGU_S_MME3_CTRL_BASE 0x7FFC1E0488ull +#define SHADOW_0_AGU_S_MME3_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_0_AGU_S_MME3_CTRL_SECTION 0x2400 +#define mmSHADOW_0_TENSOR_L_MME3_CTRL_BASE 0x7FFC1E04ACull +#define SHADOW_0_TENSOR_L_MME3_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_0_TENSOR_L_MME3_CTRL_SECTION 0x4C00 +#define mmSHADOW_0_AGU_L_LOCAL_MME3_CTRL_BASE 0x7FFC1E04F8ull +#define SHADOW_0_AGU_L_LOCAL_MME3_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_0_AGU_L_LOCAL_MME3_CTRL_SECTION 0x2400 +#define mmSHADOW_0_AGU_L_REMOTE_MME3_CTRL_BASE 0x7FFC1E051Cull +#define SHADOW_0_AGU_L_REMOTE_MME3_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_0_AGU_L_REMOTE_MME3_CTRL_SECTION 0x2400 +#define mmSHADOW_0_TENSOR_O_MME3_CTRL_BASE 0x7FFC1E0540ull +#define SHADOW_0_TENSOR_O_MME3_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_0_TENSOR_O_MME3_CTRL_SECTION 0x4C00 +#define mmSHADOW_0_AGU_O_LOCAL_MME3_CTRL_BASE 0x7FFC1E058Cull +#define SHADOW_0_AGU_O_LOCAL_MME3_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_0_AGU_O_LOCAL_MME3_CTRL_SECTION 0x2400 +#define mmSHADOW_0_AGU_O_REMOTE_MME3_CTRL_BASE 0x7FFC1E05B0ull +#define SHADOW_0_AGU_O_REMOTE_MME3_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_0_AGU_O_REMOTE_MME3_CTRL_SECTION 0x2400 +#define mmSHADOW_0_DESC_MME3_CTRL_BASE 0x7FFC1E05D4ull +#define SHADOW_0_DESC_MME3_CTRL_MAX_OFFSET 0x5400 +#define SHADOW_0_DESC_MME3_CTRL_SECTION 0xB400 +#define mmSHADOW_1_MME3_CTRL_BASE 0x7FFC1E0688ull +#define SHADOW_1_MME3_CTRL_MAX_OFFSET 0x3400 +#define SHADOW_1_MME3_CTRL_SECTION 0x3400 +#define mmSHADOW_1_TENSOR_S_MME3_CTRL_BASE 0x7FFC1E06BCull +#define SHADOW_1_TENSOR_S_MME3_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_1_TENSOR_S_MME3_CTRL_SECTION 0x4C00 +#define mmSHADOW_1_AGU_S_MME3_CTRL_BASE 0x7FFC1E0708ull +#define SHADOW_1_AGU_S_MME3_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_1_AGU_S_MME3_CTRL_SECTION 0x2400 +#define mmSHADOW_1_TENSOR_L_MME3_CTRL_BASE 0x7FFC1E072Cull +#define SHADOW_1_TENSOR_L_MME3_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_1_TENSOR_L_MME3_CTRL_SECTION 0x4C00 +#define mmSHADOW_1_AGU_L_LOCAL_MME3_CTRL_BASE 0x7FFC1E0778ull +#define SHADOW_1_AGU_L_LOCAL_MME3_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_1_AGU_L_LOCAL_MME3_CTRL_SECTION 0x2400 +#define mmSHADOW_1_AGU_L_REMOTE_MME3_CTRL_BASE 0x7FFC1E079Cull +#define SHADOW_1_AGU_L_REMOTE_MME3_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_1_AGU_L_REMOTE_MME3_CTRL_SECTION 0x2400 +#define mmSHADOW_1_TENSOR_O_MME3_CTRL_BASE 0x7FFC1E07C0ull +#define SHADOW_1_TENSOR_O_MME3_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_1_TENSOR_O_MME3_CTRL_SECTION 0x4C00 +#define mmSHADOW_1_AGU_O_LOCAL_MME3_CTRL_BASE 0x7FFC1E080Cull +#define SHADOW_1_AGU_O_LOCAL_MME3_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_1_AGU_O_LOCAL_MME3_CTRL_SECTION 0x2400 +#define mmSHADOW_1_AGU_O_REMOTE_MME3_CTRL_BASE 0x7FFC1E0830ull +#define SHADOW_1_AGU_O_REMOTE_MME3_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_1_AGU_O_REMOTE_MME3_CTRL_SECTION 0x2400 +#define mmSHADOW_1_DESC_MME3_CTRL_BASE 0x7FFC1E0854ull +#define SHADOW_1_DESC_MME3_CTRL_MAX_OFFSET 0x5400 +#define SHADOW_1_DESC_MME3_CTRL_SECTION 0xB400 +#define mmSHADOW_2_MME3_CTRL_BASE 0x7FFC1E0908ull +#define SHADOW_2_MME3_CTRL_MAX_OFFSET 0x3400 +#define SHADOW_2_MME3_CTRL_SECTION 0x3400 +#define mmSHADOW_2_TENSOR_S_MME3_CTRL_BASE 0x7FFC1E093Cull +#define SHADOW_2_TENSOR_S_MME3_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_2_TENSOR_S_MME3_CTRL_SECTION 0x4C00 +#define mmSHADOW_2_AGU_S_MME3_CTRL_BASE 0x7FFC1E0988ull +#define SHADOW_2_AGU_S_MME3_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_2_AGU_S_MME3_CTRL_SECTION 0x2400 +#define mmSHADOW_2_TENSOR_L_MME3_CTRL_BASE 0x7FFC1E09ACull +#define SHADOW_2_TENSOR_L_MME3_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_2_TENSOR_L_MME3_CTRL_SECTION 0x4C00 +#define mmSHADOW_2_AGU_L_LOCAL_MME3_CTRL_BASE 0x7FFC1E09F8ull +#define SHADOW_2_AGU_L_LOCAL_MME3_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_2_AGU_L_LOCAL_MME3_CTRL_SECTION 0x2400 +#define mmSHADOW_2_AGU_L_REMOTE_MME3_CTRL_BASE 0x7FFC1E0A1Cull +#define SHADOW_2_AGU_L_REMOTE_MME3_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_2_AGU_L_REMOTE_MME3_CTRL_SECTION 0x2400 +#define mmSHADOW_2_TENSOR_O_MME3_CTRL_BASE 0x7FFC1E0A40ull +#define SHADOW_2_TENSOR_O_MME3_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_2_TENSOR_O_MME3_CTRL_SECTION 0x4C00 +#define mmSHADOW_2_AGU_O_LOCAL_MME3_CTRL_BASE 0x7FFC1E0A8Cull +#define SHADOW_2_AGU_O_LOCAL_MME3_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_2_AGU_O_LOCAL_MME3_CTRL_SECTION 0x2400 +#define mmSHADOW_2_AGU_O_REMOTE_MME3_CTRL_BASE 0x7FFC1E0AB0ull +#define SHADOW_2_AGU_O_REMOTE_MME3_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_2_AGU_O_REMOTE_MME3_CTRL_SECTION 0x2400 +#define mmSHADOW_2_DESC_MME3_CTRL_BASE 0x7FFC1E0AD4ull +#define SHADOW_2_DESC_MME3_CTRL_MAX_OFFSET 0x5400 +#define SHADOW_2_DESC_MME3_CTRL_SECTION 0xB400 +#define mmSHADOW_3_MME3_CTRL_BASE 0x7FFC1E0B88ull +#define SHADOW_3_MME3_CTRL_MAX_OFFSET 0x3400 +#define SHADOW_3_MME3_CTRL_SECTION 0x3400 +#define mmSHADOW_3_TENSOR_S_MME3_CTRL_BASE 0x7FFC1E0BBCull +#define SHADOW_3_TENSOR_S_MME3_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_3_TENSOR_S_MME3_CTRL_SECTION 0x4C00 +#define mmSHADOW_3_AGU_S_MME3_CTRL_BASE 0x7FFC1E0C08ull +#define SHADOW_3_AGU_S_MME3_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_3_AGU_S_MME3_CTRL_SECTION 0x2400 +#define mmSHADOW_3_TENSOR_L_MME3_CTRL_BASE 0x7FFC1E0C2Cull +#define SHADOW_3_TENSOR_L_MME3_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_3_TENSOR_L_MME3_CTRL_SECTION 0x4C00 +#define mmSHADOW_3_AGU_L_LOCAL_MME3_CTRL_BASE 0x7FFC1E0C78ull +#define SHADOW_3_AGU_L_LOCAL_MME3_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_3_AGU_L_LOCAL_MME3_CTRL_SECTION 0x2400 +#define mmSHADOW_3_AGU_L_REMOTE_MME3_CTRL_BASE 0x7FFC1E0C9Cull +#define SHADOW_3_AGU_L_REMOTE_MME3_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_3_AGU_L_REMOTE_MME3_CTRL_SECTION 0x2400 +#define mmSHADOW_3_TENSOR_O_MME3_CTRL_BASE 0x7FFC1E0CC0ull +#define SHADOW_3_TENSOR_O_MME3_CTRL_MAX_OFFSET 0x4C00 +#define SHADOW_3_TENSOR_O_MME3_CTRL_SECTION 0x4C00 +#define mmSHADOW_3_AGU_O_LOCAL_MME3_CTRL_BASE 0x7FFC1E0D0Cull +#define SHADOW_3_AGU_O_LOCAL_MME3_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_3_AGU_O_LOCAL_MME3_CTRL_SECTION 0x2400 +#define mmSHADOW_3_AGU_O_REMOTE_MME3_CTRL_BASE 0x7FFC1E0D30ull +#define SHADOW_3_AGU_O_REMOTE_MME3_CTRL_MAX_OFFSET 0x2400 +#define SHADOW_3_AGU_O_REMOTE_MME3_CTRL_SECTION 0x2400 +#define mmSHADOW_3_DESC_MME3_CTRL_BASE 0x7FFC1E0D54ull +#define SHADOW_3_DESC_MME3_CTRL_MAX_OFFSET 0x5400 +#define SHADOW_3_DESC_MME3_CTRL_SECTION 0x72AC +#define mmMME3_QM_BASE 0x7FFC1E8000ull +#define MME3_QM_MAX_OFFSET 0xD040 +#define MME3_QM_SECTION 0x18000 +#define mmSRAM_Y0_X0_BANK_BASE 0x7FFC200000ull +#define SRAM_Y0_X0_BANK_MAX_OFFSET 0x4000 +#define SRAM_Y0_X0_BANK_SECTION 0x1000 +#define mmSRAM_Y0_X0_RTR_BASE 0x7FFC201000ull +#define SRAM_Y0_X0_RTR_MAX_OFFSET 0x3340 +#define SRAM_Y0_X0_RTR_SECTION 0x7000 +#define mmSRAM_Y0_X1_BANK_BASE 0x7FFC208000ull +#define SRAM_Y0_X1_BANK_MAX_OFFSET 0x4000 +#define SRAM_Y0_X1_BANK_SECTION 0x1000 +#define mmSRAM_Y0_X1_RTR_BASE 0x7FFC209000ull +#define SRAM_Y0_X1_RTR_MAX_OFFSET 0x3340 +#define SRAM_Y0_X1_RTR_SECTION 0x7000 +#define mmSRAM_Y0_X2_BANK_BASE 0x7FFC210000ull +#define SRAM_Y0_X2_BANK_MAX_OFFSET 0x4000 +#define SRAM_Y0_X2_BANK_SECTION 0x1000 +#define mmSRAM_Y0_X2_RTR_BASE 0x7FFC211000ull +#define SRAM_Y0_X2_RTR_MAX_OFFSET 0x3340 +#define SRAM_Y0_X2_RTR_SECTION 0x7000 +#define mmSRAM_Y0_X3_BANK_BASE 0x7FFC218000ull +#define SRAM_Y0_X3_BANK_MAX_OFFSET 0x4000 +#define SRAM_Y0_X3_BANK_SECTION 0x1000 +#define mmSRAM_Y0_X3_RTR_BASE 0x7FFC219000ull +#define SRAM_Y0_X3_RTR_MAX_OFFSET 0x3340 +#define SRAM_Y0_X3_RTR_SECTION 0x7000 +#define mmSRAM_Y0_X4_BANK_BASE 0x7FFC220000ull +#define SRAM_Y0_X4_BANK_MAX_OFFSET 0x4000 +#define SRAM_Y0_X4_BANK_SECTION 0x1000 +#define mmSRAM_Y0_X4_RTR_BASE 0x7FFC221000ull +#define SRAM_Y0_X4_RTR_MAX_OFFSET 0x3340 +#define SRAM_Y0_X4_RTR_SECTION 0x7000 +#define mmSRAM_Y0_X5_BANK_BASE 0x7FFC228000ull +#define SRAM_Y0_X5_BANK_MAX_OFFSET 0x4000 +#define SRAM_Y0_X5_BANK_SECTION 0x1000 +#define mmSRAM_Y0_X5_RTR_BASE 0x7FFC229000ull +#define SRAM_Y0_X5_RTR_MAX_OFFSET 0x3340 +#define SRAM_Y0_X5_RTR_SECTION 0x7000 +#define mmSRAM_Y0_X6_BANK_BASE 0x7FFC230000ull +#define SRAM_Y0_X6_BANK_MAX_OFFSET 0x4000 +#define SRAM_Y0_X6_BANK_SECTION 0x1000 +#define mmSRAM_Y0_X6_RTR_BASE 0x7FFC231000ull +#define SRAM_Y0_X6_RTR_MAX_OFFSET 0x3340 +#define SRAM_Y0_X6_RTR_SECTION 0x7000 +#define mmSRAM_Y0_X7_BANK_BASE 0x7FFC238000ull +#define SRAM_Y0_X7_BANK_MAX_OFFSET 0x4000 +#define SRAM_Y0_X7_BANK_SECTION 0x1000 +#define mmSRAM_Y0_X7_RTR_BASE 0x7FFC239000ull +#define SRAM_Y0_X7_RTR_MAX_OFFSET 0x3340 +#define SRAM_Y0_X7_RTR_SECTION 0x7000 +#define mmSRAM_Y1_X0_BANK_BASE 0x7FFC240000ull +#define SRAM_Y1_X0_BANK_MAX_OFFSET 0x4000 +#define SRAM_Y1_X0_BANK_SECTION 0x1000 +#define mmSRAM_Y1_X0_RTR_BASE 0x7FFC241000ull +#define SRAM_Y1_X0_RTR_MAX_OFFSET 0x3340 +#define SRAM_Y1_X0_RTR_SECTION 0x7000 +#define mmSRAM_Y1_X1_BANK_BASE 0x7FFC248000ull +#define SRAM_Y1_X1_BANK_MAX_OFFSET 0x4000 +#define SRAM_Y1_X1_BANK_SECTION 0x1000 +#define mmSRAM_Y1_X1_RTR_BASE 0x7FFC249000ull +#define SRAM_Y1_X1_RTR_MAX_OFFSET 0x3340 +#define SRAM_Y1_X1_RTR_SECTION 0x7000 +#define mmSRAM_Y1_X2_BANK_BASE 0x7FFC250000ull +#define SRAM_Y1_X2_BANK_MAX_OFFSET 0x4000 +#define SRAM_Y1_X2_BANK_SECTION 0x1000 +#define mmSRAM_Y1_X2_RTR_BASE 0x7FFC251000ull +#define SRAM_Y1_X2_RTR_MAX_OFFSET 0x3340 +#define SRAM_Y1_X2_RTR_SECTION 0x7000 +#define mmSRAM_Y1_X3_BANK_BASE 0x7FFC258000ull +#define SRAM_Y1_X3_BANK_MAX_OFFSET 0x4000 +#define SRAM_Y1_X3_BANK_SECTION 0x1000 +#define mmSRAM_Y1_X3_RTR_BASE 0x7FFC259000ull +#define SRAM_Y1_X3_RTR_MAX_OFFSET 0x3340 +#define SRAM_Y1_X3_RTR_SECTION 0x7000 +#define mmSRAM_Y1_X4_BANK_BASE 0x7FFC260000ull +#define SRAM_Y1_X4_BANK_MAX_OFFSET 0x4000 +#define SRAM_Y1_X4_BANK_SECTION 0x1000 +#define mmSRAM_Y1_X4_RTR_BASE 0x7FFC261000ull +#define SRAM_Y1_X4_RTR_MAX_OFFSET 0x3340 +#define SRAM_Y1_X4_RTR_SECTION 0x7000 +#define mmSRAM_Y1_X5_BANK_BASE 0x7FFC268000ull +#define SRAM_Y1_X5_BANK_MAX_OFFSET 0x4000 +#define SRAM_Y1_X5_BANK_SECTION 0x1000 +#define mmSRAM_Y1_X5_RTR_BASE 0x7FFC269000ull +#define SRAM_Y1_X5_RTR_MAX_OFFSET 0x3340 +#define SRAM_Y1_X5_RTR_SECTION 0x7000 +#define mmSRAM_Y1_X6_BANK_BASE 0x7FFC270000ull +#define SRAM_Y1_X6_BANK_MAX_OFFSET 0x4000 +#define SRAM_Y1_X6_BANK_SECTION 0x1000 +#define mmSRAM_Y1_X6_RTR_BASE 0x7FFC271000ull +#define SRAM_Y1_X6_RTR_MAX_OFFSET 0x3340 +#define SRAM_Y1_X6_RTR_SECTION 0x7000 +#define mmSRAM_Y1_X7_BANK_BASE 0x7FFC278000ull +#define SRAM_Y1_X7_BANK_MAX_OFFSET 0x4000 +#define SRAM_Y1_X7_BANK_SECTION 0x1000 +#define mmSRAM_Y1_X7_RTR_BASE 0x7FFC279000ull +#define SRAM_Y1_X7_RTR_MAX_OFFSET 0x3340 +#define SRAM_Y1_X7_RTR_SECTION 0x7000 +#define mmSRAM_Y2_X0_BANK_BASE 0x7FFC280000ull +#define SRAM_Y2_X0_BANK_MAX_OFFSET 0x4000 +#define SRAM_Y2_X0_BANK_SECTION 0x1000 +#define mmSRAM_Y2_X0_RTR_BASE 0x7FFC281000ull +#define SRAM_Y2_X0_RTR_MAX_OFFSET 0x3340 +#define SRAM_Y2_X0_RTR_SECTION 0x7000 +#define mmSRAM_Y2_X1_BANK_BASE 0x7FFC288000ull +#define SRAM_Y2_X1_BANK_MAX_OFFSET 0x4000 +#define SRAM_Y2_X1_BANK_SECTION 0x1000 +#define mmSRAM_Y2_X1_RTR_BASE 0x7FFC289000ull +#define SRAM_Y2_X1_RTR_MAX_OFFSET 0x3340 +#define SRAM_Y2_X1_RTR_SECTION 0x7000 +#define mmSRAM_Y2_X2_BANK_BASE 0x7FFC290000ull +#define SRAM_Y2_X2_BANK_MAX_OFFSET 0x4000 +#define SRAM_Y2_X2_BANK_SECTION 0x1000 +#define mmSRAM_Y2_X2_RTR_BASE 0x7FFC291000ull +#define SRAM_Y2_X2_RTR_MAX_OFFSET 0x3340 +#define SRAM_Y2_X2_RTR_SECTION 0x7000 +#define mmSRAM_Y2_X3_BANK_BASE 0x7FFC298000ull +#define SRAM_Y2_X3_BANK_MAX_OFFSET 0x4000 +#define SRAM_Y2_X3_BANK_SECTION 0x1000 +#define mmSRAM_Y2_X3_RTR_BASE 0x7FFC299000ull +#define SRAM_Y2_X3_RTR_MAX_OFFSET 0x3340 +#define SRAM_Y2_X3_RTR_SECTION 0x7000 +#define mmSRAM_Y2_X4_BANK_BASE 0x7FFC2A0000ull +#define SRAM_Y2_X4_BANK_MAX_OFFSET 0x4000 +#define SRAM_Y2_X4_BANK_SECTION 0x1000 +#define mmSRAM_Y2_X4_RTR_BASE 0x7FFC2A1000ull +#define SRAM_Y2_X4_RTR_MAX_OFFSET 0x3340 +#define SRAM_Y2_X4_RTR_SECTION 0x7000 +#define mmSRAM_Y2_X5_BANK_BASE 0x7FFC2A8000ull +#define SRAM_Y2_X5_BANK_MAX_OFFSET 0x4000 +#define SRAM_Y2_X5_BANK_SECTION 0x1000 +#define mmSRAM_Y2_X5_RTR_BASE 0x7FFC2A9000ull +#define SRAM_Y2_X5_RTR_MAX_OFFSET 0x3340 +#define SRAM_Y2_X5_RTR_SECTION 0x7000 +#define mmSRAM_Y2_X6_BANK_BASE 0x7FFC2B0000ull +#define SRAM_Y2_X6_BANK_MAX_OFFSET 0x4000 +#define SRAM_Y2_X6_BANK_SECTION 0x1000 +#define mmSRAM_Y2_X6_RTR_BASE 0x7FFC2B1000ull +#define SRAM_Y2_X6_RTR_MAX_OFFSET 0x3340 +#define SRAM_Y2_X6_RTR_SECTION 0x7000 +#define mmSRAM_Y2_X7_BANK_BASE 0x7FFC2B8000ull +#define SRAM_Y2_X7_BANK_MAX_OFFSET 0x4000 +#define SRAM_Y2_X7_BANK_SECTION 0x1000 +#define mmSRAM_Y2_X7_RTR_BASE 0x7FFC2B9000ull +#define SRAM_Y2_X7_RTR_MAX_OFFSET 0x3340 +#define SRAM_Y2_X7_RTR_SECTION 0x7000 +#define mmSRAM_Y3_X0_BANK_BASE 0x7FFC2C0000ull +#define SRAM_Y3_X0_BANK_MAX_OFFSET 0x4000 +#define SRAM_Y3_X0_BANK_SECTION 0x1000 +#define mmSRAM_Y3_X0_RTR_BASE 0x7FFC2C1000ull +#define SRAM_Y3_X0_RTR_MAX_OFFSET 0x3340 +#define SRAM_Y3_X0_RTR_SECTION 0x7000 +#define mmSRAM_Y3_X1_BANK_BASE 0x7FFC2C8000ull +#define SRAM_Y3_X1_BANK_MAX_OFFSET 0x4000 +#define SRAM_Y3_X1_BANK_SECTION 0x1000 +#define mmSRAM_Y3_X1_RTR_BASE 0x7FFC2C9000ull +#define SRAM_Y3_X1_RTR_MAX_OFFSET 0x3340 +#define SRAM_Y3_X1_RTR_SECTION 0x7000 +#define mmSRAM_Y3_X2_BANK_BASE 0x7FFC2D0000ull +#define SRAM_Y3_X2_BANK_MAX_OFFSET 0x4000 +#define SRAM_Y3_X2_BANK_SECTION 0x1000 +#define mmSRAM_Y3_X2_RTR_BASE 0x7FFC2D1000ull +#define SRAM_Y3_X2_RTR_MAX_OFFSET 0x3340 +#define SRAM_Y3_X2_RTR_SECTION 0x7000 +#define mmSRAM_Y3_X3_BANK_BASE 0x7FFC2D8000ull +#define SRAM_Y3_X3_BANK_MAX_OFFSET 0x4000 +#define SRAM_Y3_X3_BANK_SECTION 0x1000 +#define mmSRAM_Y3_X3_RTR_BASE 0x7FFC2D9000ull +#define SRAM_Y3_X3_RTR_MAX_OFFSET 0x3340 +#define SRAM_Y3_X3_RTR_SECTION 0x7000 +#define mmSRAM_Y3_X4_BANK_BASE 0x7FFC2E0000ull +#define SRAM_Y3_X4_BANK_MAX_OFFSET 0x4000 +#define SRAM_Y3_X4_BANK_SECTION 0x1000 +#define mmSRAM_Y3_X4_RTR_BASE 0x7FFC2E1000ull +#define SRAM_Y3_X4_RTR_MAX_OFFSET 0x3340 +#define SRAM_Y3_X4_RTR_SECTION 0x7000 +#define mmSRAM_Y3_X5_BANK_BASE 0x7FFC2E8000ull +#define SRAM_Y3_X5_BANK_MAX_OFFSET 0x4000 +#define SRAM_Y3_X5_BANK_SECTION 0x1000 +#define mmSRAM_Y3_X5_RTR_BASE 0x7FFC2E9000ull +#define SRAM_Y3_X5_RTR_MAX_OFFSET 0x3340 +#define SRAM_Y3_X5_RTR_SECTION 0x7000 +#define mmSRAM_Y3_X6_BANK_BASE 0x7FFC2F0000ull +#define SRAM_Y3_X6_BANK_MAX_OFFSET 0x4000 +#define SRAM_Y3_X6_BANK_SECTION 0x1000 +#define mmSRAM_Y3_X6_RTR_BASE 0x7FFC2F1000ull +#define SRAM_Y3_X6_RTR_MAX_OFFSET 0x3340 +#define SRAM_Y3_X6_RTR_SECTION 0x7000 +#define mmSRAM_Y3_X7_BANK_BASE 0x7FFC2F8000ull +#define SRAM_Y3_X7_BANK_MAX_OFFSET 0x4000 +#define SRAM_Y3_X7_BANK_SECTION 0x1000 +#define mmSRAM_Y3_X7_RTR_BASE 0x7FFC2F9000ull +#define SRAM_Y3_X7_RTR_MAX_OFFSET 0x3340 +#define SRAM_Y3_X7_RTR_SECTION 0x7000 +#define mmSIF_RTR_0_BASE 0x7FFC300000ull +#define SIF_RTR_0_MAX_OFFSET 0x6500 +#define SIF_RTR_0_SECTION 0x6000 +#define mmSIF_RTR_CTRL_0_BASE 0x7FFC306000ull +#define SIF_RTR_CTRL_0_MAX_OFFSET 0xCC00 +#define SIF_RTR_CTRL_0_SECTION 0xA000 +#define mmSIF_RTR_1_BASE 0x7FFC310000ull +#define SIF_RTR_1_MAX_OFFSET 0x6500 +#define SIF_RTR_1_SECTION 0x6000 +#define mmSIF_RTR_CTRL_1_BASE 0x7FFC316000ull +#define SIF_RTR_CTRL_1_MAX_OFFSET 0xCC00 +#define SIF_RTR_CTRL_1_SECTION 0xA000 +#define mmSIF_RTR_2_BASE 0x7FFC320000ull +#define SIF_RTR_2_MAX_OFFSET 0x6500 +#define SIF_RTR_2_SECTION 0x6000 +#define mmSIF_RTR_CTRL_2_BASE 0x7FFC326000ull +#define SIF_RTR_CTRL_2_MAX_OFFSET 0xCC00 +#define SIF_RTR_CTRL_2_SECTION 0xA000 +#define mmSIF_RTR_3_BASE 0x7FFC330000ull +#define SIF_RTR_3_MAX_OFFSET 0x6500 +#define SIF_RTR_3_SECTION 0x6000 +#define mmSIF_RTR_CTRL_3_BASE 0x7FFC336000ull +#define SIF_RTR_CTRL_3_MAX_OFFSET 0xCC00 +#define SIF_RTR_CTRL_3_SECTION 0xA000 +#define mmSIF_RTR_4_BASE 0x7FFC340000ull +#define SIF_RTR_4_MAX_OFFSET 0x6500 +#define SIF_RTR_4_SECTION 0x6000 +#define mmSIF_RTR_CTRL_4_BASE 0x7FFC346000ull +#define SIF_RTR_CTRL_4_MAX_OFFSET 0xCC00 +#define SIF_RTR_CTRL_4_SECTION 0xA000 +#define mmSIF_RTR_5_BASE 0x7FFC350000ull +#define SIF_RTR_5_MAX_OFFSET 0x6500 +#define SIF_RTR_5_SECTION 0x6000 +#define mmSIF_RTR_CTRL_5_BASE 0x7FFC356000ull +#define SIF_RTR_CTRL_5_MAX_OFFSET 0xCC00 +#define SIF_RTR_CTRL_5_SECTION 0xA000 +#define mmSIF_RTR_6_BASE 0x7FFC360000ull +#define SIF_RTR_6_MAX_OFFSET 0x6500 +#define SIF_RTR_6_SECTION 0x6000 +#define mmSIF_RTR_CTRL_6_BASE 0x7FFC366000ull +#define SIF_RTR_CTRL_6_MAX_OFFSET 0xCC00 +#define SIF_RTR_CTRL_6_SECTION 0xA000 +#define mmSIF_RTR_7_BASE 0x7FFC370000ull +#define SIF_RTR_7_MAX_OFFSET 0x6500 +#define SIF_RTR_7_SECTION 0x6000 +#define mmSIF_RTR_CTRL_7_BASE 0x7FFC376000ull +#define SIF_RTR_CTRL_7_MAX_OFFSET 0xCC00 +#define SIF_RTR_CTRL_7_SECTION 0xA000 +#define mmNIF_RTR_0_BASE 0x7FFC380000ull +#define NIF_RTR_0_MAX_OFFSET 0x6500 +#define NIF_RTR_0_SECTION 0x6000 +#define mmNIF_RTR_CTRL_0_BASE 0x7FFC386000ull +#define NIF_RTR_CTRL_0_MAX_OFFSET 0xCC00 +#define NIF_RTR_CTRL_0_SECTION 0xA000 +#define mmNIF_RTR_1_BASE 0x7FFC390000ull +#define NIF_RTR_1_MAX_OFFSET 0x6500 +#define NIF_RTR_1_SECTION 0x6000 +#define mmNIF_RTR_CTRL_1_BASE 0x7FFC396000ull +#define NIF_RTR_CTRL_1_MAX_OFFSET 0xCC00 +#define NIF_RTR_CTRL_1_SECTION 0xA000 +#define mmNIF_RTR_2_BASE 0x7FFC3A0000ull +#define NIF_RTR_2_MAX_OFFSET 0x6500 +#define NIF_RTR_2_SECTION 0x6000 +#define mmNIF_RTR_CTRL_2_BASE 0x7FFC3A6000ull +#define NIF_RTR_CTRL_2_MAX_OFFSET 0xCC00 +#define NIF_RTR_CTRL_2_SECTION 0xA000 +#define mmNIF_RTR_3_BASE 0x7FFC3B0000ull +#define NIF_RTR_3_MAX_OFFSET 0x6500 +#define NIF_RTR_3_SECTION 0x6000 +#define mmNIF_RTR_CTRL_3_BASE 0x7FFC3B6000ull +#define NIF_RTR_CTRL_3_MAX_OFFSET 0xCC00 +#define NIF_RTR_CTRL_3_SECTION 0xA000 +#define mmNIF_RTR_4_BASE 0x7FFC3C0000ull +#define NIF_RTR_4_MAX_OFFSET 0x6500 +#define NIF_RTR_4_SECTION 0x6000 +#define mmNIF_RTR_CTRL_4_BASE 0x7FFC3C6000ull +#define NIF_RTR_CTRL_4_MAX_OFFSET 0xCC00 +#define NIF_RTR_CTRL_4_SECTION 0xA000 +#define mmNIF_RTR_5_BASE 0x7FFC3D0000ull +#define NIF_RTR_5_MAX_OFFSET 0x6500 +#define NIF_RTR_5_SECTION 0x6000 +#define mmNIF_RTR_CTRL_5_BASE 0x7FFC3D6000ull +#define NIF_RTR_CTRL_5_MAX_OFFSET 0xCC00 +#define NIF_RTR_CTRL_5_SECTION 0xA000 +#define mmNIF_RTR_6_BASE 0x7FFC3E0000ull +#define NIF_RTR_6_MAX_OFFSET 0x6500 +#define NIF_RTR_6_SECTION 0x6000 +#define mmNIF_RTR_CTRL_6_BASE 0x7FFC3E6000ull +#define NIF_RTR_CTRL_6_MAX_OFFSET 0xCC00 +#define NIF_RTR_CTRL_6_SECTION 0xA000 +#define mmNIF_RTR_7_BASE 0x7FFC3F0000ull +#define NIF_RTR_7_MAX_OFFSET 0x6500 +#define NIF_RTR_7_SECTION 0x6000 +#define mmNIF_RTR_CTRL_7_BASE 0x7FFC3F6000ull +#define NIF_RTR_CTRL_7_MAX_OFFSET 0xCC00 +#define NIF_RTR_CTRL_7_SECTION 0x4B000 +#define mmCPU_CA53_CFG_BASE 0x7FFC441000ull +#define CPU_CA53_CFG_MAX_OFFSET 0x2180 +#define CPU_CA53_CFG_SECTION 0x1000 +#define mmCPU_IF_BASE 0x7FFC442000ull +#define CPU_IF_MAX_OFFSET 0x43C0 +#define CPU_IF_SECTION 0x2000 +#define mmCPU_TIMESTAMP_BASE 0x7FFC444000ull +#define CPU_TIMESTAMP_MAX_OFFSET 0x1000 +#define CPU_TIMESTAMP_SECTION 0x3C000 +#define mmDMA_IF_W_S_BASE 0x7FFC480000ull +#define DMA_IF_W_S_MAX_OFFSET 0x8380 +#define DMA_IF_W_S_SECTION 0x1000 +#define mmDMA_IF_W_S_DOWN_CH0_BASE 0x7FFC481000ull +#define DMA_IF_W_S_DOWN_CH0_MAX_OFFSET 0xCC00 +#define DMA_IF_W_S_DOWN_CH0_SECTION 0x1000 +#define mmDMA_IF_W_S_DOWN_CH1_BASE 0x7FFC482000ull +#define DMA_IF_W_S_DOWN_CH1_MAX_OFFSET 0xCC00 +#define DMA_IF_W_S_DOWN_CH1_SECTION 0x5000 +#define mmDMA_W_PLL_BASE 0x7FFC487000ull +#define DMA_W_PLL_MAX_OFFSET 0x5200 +#define DMA_W_PLL_SECTION 0x1000 +#define mmIF_W_PLL_BASE 0x7FFC488000ull +#define IF_W_PLL_MAX_OFFSET 0x5200 +#define IF_W_PLL_SECTION 0x1000 +#define mmDMA_IF_W_S_DOWN_BASE 0x7FFC489000ull +#define DMA_IF_W_S_DOWN_MAX_OFFSET 0x1500 +#define DMA_IF_W_S_DOWN_SECTION 0x7000 +#define mmSYNC_MNGR_GLBL_W_S_BASE 0x7FFC490000ull +#define SYNC_MNGR_GLBL_W_S_MAX_OFFSET 0x6C00 +#define SYNC_MNGR_GLBL_W_S_SECTION 0x1000 +#define mmSYNC_MNGR_OBJS_W_S_BASE 0x7FFC491000ull +#define SYNC_MNGR_OBJS_W_S_MAX_OFFSET 0x5C00 +#define SYNC_MNGR_OBJS_W_S_SECTION 0xF000 +#define mmDMA_IF_E_S_BASE 0x7FFC4A0000ull +#define DMA_IF_E_S_MAX_OFFSET 0x8380 +#define DMA_IF_E_S_SECTION 0x1000 +#define mmDMA_IF_E_S_DOWN_CH0_BASE 0x7FFC4A1000ull +#define DMA_IF_E_S_DOWN_CH0_MAX_OFFSET 0xCC00 +#define DMA_IF_E_S_DOWN_CH0_SECTION 0x1000 +#define mmDMA_IF_E_S_DOWN_CH1_BASE 0x7FFC4A2000ull +#define DMA_IF_E_S_DOWN_CH1_MAX_OFFSET 0xCC00 +#define DMA_IF_E_S_DOWN_CH1_SECTION 0x5000 +#define mmIF_E_PLL_BASE 0x7FFC4A7000ull +#define IF_E_PLL_MAX_OFFSET 0x5200 +#define IF_E_PLL_SECTION 0x1000 +#define mmDMA_E_PLL_BASE 0x7FFC4A8000ull +#define DMA_E_PLL_MAX_OFFSET 0x5200 +#define DMA_E_PLL_SECTION 0x1000 +#define mmDMA_IF_E_S_DOWN_BASE 0x7FFC4A9000ull +#define DMA_IF_E_S_DOWN_MAX_OFFSET 0x1500 +#define DMA_IF_E_S_DOWN_SECTION 0x7000 +#define mmSYNC_MNGR_GLBL_E_S_BASE 0x7FFC4B0000ull +#define SYNC_MNGR_GLBL_E_S_MAX_OFFSET 0x6C00 +#define SYNC_MNGR_GLBL_E_S_SECTION 0x1000 +#define mmSYNC_MNGR_OBJS_E_S_BASE 0x7FFC4B1000ull +#define SYNC_MNGR_OBJS_E_S_MAX_OFFSET 0x5C00 +#define SYNC_MNGR_OBJS_E_S_SECTION 0xF000 +#define mmDMA_IF_W_N_BASE 0x7FFC4C0000ull +#define DMA_IF_W_N_MAX_OFFSET 0x8380 +#define DMA_IF_W_N_SECTION 0x1000 +#define mmDMA_IF_W_N_DOWN_CH0_BASE 0x7FFC4C1000ull +#define DMA_IF_W_N_DOWN_CH0_MAX_OFFSET 0xCC00 +#define DMA_IF_W_N_DOWN_CH0_SECTION 0x1000 +#define mmDMA_IF_W_N_DOWN_CH1_BASE 0x7FFC4C2000ull +#define DMA_IF_W_N_DOWN_CH1_MAX_OFFSET 0xCC00 +#define DMA_IF_W_N_DOWN_CH1_SECTION 0x5000 +#define mmMESH_W_PLL_BASE 0x7FFC4C7000ull +#define MESH_W_PLL_MAX_OFFSET 0x5200 +#define MESH_W_PLL_SECTION 0x1000 +#define mmSRAM_W_PLL_BASE 0x7FFC4C8000ull +#define SRAM_W_PLL_MAX_OFFSET 0x5200 +#define SRAM_W_PLL_SECTION 0x1000 +#define mmDMA_IF_W_N_DOWN_BASE 0x7FFC4C9000ull +#define DMA_IF_W_N_DOWN_MAX_OFFSET 0x1500 +#define DMA_IF_W_N_DOWN_SECTION 0x7000 +#define mmSYNC_MNGR_GLBL_W_N_BASE 0x7FFC4D0000ull +#define SYNC_MNGR_GLBL_W_N_MAX_OFFSET 0x6C00 +#define SYNC_MNGR_GLBL_W_N_SECTION 0x1000 +#define mmSYNC_MNGR_OBJS_W_N_BASE 0x7FFC4D1000ull +#define SYNC_MNGR_OBJS_W_N_MAX_OFFSET 0x5C00 +#define SYNC_MNGR_OBJS_W_N_SECTION 0xF000 +#define mmDMA_IF_E_N_BASE 0x7FFC4E0000ull +#define DMA_IF_E_N_MAX_OFFSET 0x8380 +#define DMA_IF_E_N_SECTION 0x1000 +#define mmDMA_IF_E_N_DOWN_CH0_BASE 0x7FFC4E1000ull +#define DMA_IF_E_N_DOWN_CH0_MAX_OFFSET 0xCC00 +#define DMA_IF_E_N_DOWN_CH0_SECTION 0x1000 +#define mmDMA_IF_E_N_DOWN_CH1_BASE 0x7FFC4E2000ull +#define DMA_IF_E_N_DOWN_CH1_MAX_OFFSET 0xCC00 +#define DMA_IF_E_N_DOWN_CH1_SECTION 0x5000 +#define mmMESH_E_PLL_BASE 0x7FFC4E7000ull +#define MESH_E_PLL_MAX_OFFSET 0x5200 +#define MESH_E_PLL_SECTION 0x1000 +#define mmSRAM_E_PLL_BASE 0x7FFC4E8000ull +#define SRAM_E_PLL_MAX_OFFSET 0x5200 +#define SRAM_E_PLL_SECTION 0x1000 +#define mmDMA_IF_E_N_DOWN_BASE 0x7FFC4E9000ull +#define DMA_IF_E_N_DOWN_MAX_OFFSET 0x1500 +#define DMA_IF_E_N_DOWN_SECTION 0x7000 +#define mmSYNC_MNGR_GLBL_E_N_BASE 0x7FFC4F0000ull +#define SYNC_MNGR_GLBL_E_N_MAX_OFFSET 0x6C00 +#define SYNC_MNGR_GLBL_E_N_SECTION 0x1000 +#define mmSYNC_MNGR_OBJS_E_N_BASE 0x7FFC4F1000ull +#define SYNC_MNGR_OBJS_E_N_MAX_OFFSET 0x5C00 +#define SYNC_MNGR_OBJS_E_N_SECTION 0xF000 +#define mmDMA0_CORE_BASE 0x7FFC500000ull +#define DMA0_CORE_MAX_OFFSET 0x23C0 +#define DMA0_CORE_SECTION 0x8000 +#define mmDMA0_QM_BASE 0x7FFC508000ull +#define DMA0_QM_MAX_OFFSET 0xD040 +#define DMA0_QM_SECTION 0x18000 +#define mmDMA1_CORE_BASE 0x7FFC520000ull +#define DMA1_CORE_MAX_OFFSET 0x23C0 +#define DMA1_CORE_SECTION 0x8000 +#define mmDMA1_QM_BASE 0x7FFC528000ull +#define DMA1_QM_MAX_OFFSET 0xD040 +#define DMA1_QM_SECTION 0x18000 +#define mmDMA2_CORE_BASE 0x7FFC540000ull +#define DMA2_CORE_MAX_OFFSET 0x23C0 +#define DMA2_CORE_SECTION 0x8000 +#define mmDMA2_QM_BASE 0x7FFC548000ull +#define DMA2_QM_MAX_OFFSET 0xD040 +#define DMA2_QM_SECTION 0x18000 +#define mmDMA3_CORE_BASE 0x7FFC560000ull +#define DMA3_CORE_MAX_OFFSET 0x23C0 +#define DMA3_CORE_SECTION 0x8000 +#define mmDMA3_QM_BASE 0x7FFC568000ull +#define DMA3_QM_MAX_OFFSET 0xD040 +#define DMA3_QM_SECTION 0x18000 +#define mmDMA4_CORE_BASE 0x7FFC580000ull +#define DMA4_CORE_MAX_OFFSET 0x23C0 +#define DMA4_CORE_SECTION 0x8000 +#define mmDMA4_QM_BASE 0x7FFC588000ull +#define DMA4_QM_MAX_OFFSET 0xD040 +#define DMA4_QM_SECTION 0x18000 +#define mmDMA5_CORE_BASE 0x7FFC5A0000ull +#define DMA5_CORE_MAX_OFFSET 0x23C0 +#define DMA5_CORE_SECTION 0x8000 +#define mmDMA5_QM_BASE 0x7FFC5A8000ull +#define DMA5_QM_MAX_OFFSET 0xD040 +#define DMA5_QM_SECTION 0x18000 +#define mmDMA6_CORE_BASE 0x7FFC5C0000ull +#define DMA6_CORE_MAX_OFFSET 0x23C0 +#define DMA6_CORE_SECTION 0x8000 +#define mmDMA6_QM_BASE 0x7FFC5C8000ull +#define DMA6_QM_MAX_OFFSET 0xD040 +#define DMA6_QM_SECTION 0x18000 +#define mmDMA7_CORE_BASE 0x7FFC5E0000ull +#define DMA7_CORE_MAX_OFFSET 0x23C0 +#define DMA7_CORE_SECTION 0x8000 +#define mmDMA7_QM_BASE 0x7FFC5E8000ull +#define DMA7_QM_MAX_OFFSET 0xD040 +#define DMA7_QM_SECTION 0x18000 +#define mmHBM0_BASE 0x7FFC600000ull +#define HBM0_MAX_OFFSET 0x8F58 +#define HBM0_SECTION 0x80000 +#define mmHBM1_BASE 0x7FFC680000ull +#define HBM1_MAX_OFFSET 0x8F58 +#define HBM1_SECTION 0x80000 +#define mmHBM2_BASE 0x7FFC700000ull +#define HBM2_MAX_OFFSET 0x8F58 +#define HBM2_SECTION 0x80000 +#define mmHBM3_BASE 0x7FFC780000ull +#define HBM3_MAX_OFFSET 0x8F58 +#define HBM3_SECTION 0x80000 +#define mmGIC_BASE 0x7FFC800000ull +#define GIC_MAX_OFFSET 0x10000 +#define GIC_SECTION 0x401000 +#define mmPCIE_WRAP_BASE 0x7FFCC01000ull +#define PCIE_WRAP_MAX_OFFSET 0xDF00 +#define PCIE_WRAP_SECTION 0x1000 +#define mmPCIE_DBI_BASE 0x7FFCC02000ull +#define PCIE_DBI_MAX_OFFSET 0xC040 +#define PCIE_DBI_SECTION 0x2000 +#define mmPCIE_CORE_BASE 0x7FFCC04000ull +#define PCIE_CORE_MAX_OFFSET 0x9BC0 +#define PCIE_CORE_SECTION 0x3000 +#define mmPCIE_AUX_BASE 0x7FFCC07000ull +#define PCIE_AUX_MAX_OFFSET 0x9C40 +#define PCIE_AUX_SECTION 0x9000 +#define mmPCIE_PHY_BASE 0x7FFCC10000ull +#define PCIE_PHY_MAX_OFFSET 0x9640 +#define PCIE_PHY_SECTION 0x1000 +#define mmMMU_UP_BASE 0x7FFCC11000ull +#define MMU_UP_MAX_OFFSET 0x7000 +#define MMU_UP_SECTION 0x1000 +#define mmSTLB_BASE 0x7FFCC12000ull +#define STLB_MAX_OFFSET 0x8800 +#define STLB_SECTION 0x1000 +#define mmPCIE_MSI_BASE 0x7FFCC13000ull +#define PCIE_MSI_MAX_OFFSET 0x8000 +#define PCIE_MSI_SECTION 0x2D000 +#define mmPSOC_I2C_M0_BASE 0x7FFCC40000ull +#define PSOC_I2C_M0_MAX_OFFSET 0x1000 +#define PSOC_I2C_M0_SECTION 0x1000 +#define mmPSOC_I2C_M1_BASE 0x7FFCC41000ull +#define PSOC_I2C_M1_MAX_OFFSET 0x1000 +#define PSOC_I2C_M1_SECTION 0x1000 +#define mmPSOC_I2C_S_BASE 0x7FFCC42000ull +#define PSOC_I2C_S_MAX_OFFSET 0x1000 +#define PSOC_I2C_S_SECTION 0x1000 +#define mmPSOC_SPI_BASE 0x7FFCC43000ull +#define PSOC_SPI_MAX_OFFSET 0x1000 +#define PSOC_SPI_SECTION 0x2000 +#define mmPSOC_UART_0_BASE 0x7FFCC45000ull +#define PSOC_UART_0_MAX_OFFSET 0x1000 +#define PSOC_UART_0_SECTION 0x1000 +#define mmPSOC_UART_1_BASE 0x7FFCC46000ull +#define PSOC_UART_1_MAX_OFFSET 0x1000 +#define PSOC_UART_1_SECTION 0x1000 +#define mmPSOC_TIMER_BASE 0x7FFCC47000ull +#define PSOC_TIMER_MAX_OFFSET 0x1000 +#define PSOC_TIMER_SECTION 0x1000 +#define mmPSOC_WDOG_BASE 0x7FFCC48000ull +#define PSOC_WDOG_MAX_OFFSET 0x1000 +#define PSOC_WDOG_SECTION 0x1000 +#define mmPSOC_TIMESTAMP_BASE 0x7FFCC49000ull +#define PSOC_TIMESTAMP_MAX_OFFSET 0x1000 +#define PSOC_TIMESTAMP_SECTION 0x1000 +#define mmPSOC_EFUSE_BASE 0x7FFCC4A000ull +#define PSOC_EFUSE_MAX_OFFSET 0x3040 +#define PSOC_EFUSE_SECTION 0x1000 +#define mmPSOC_GLOBAL_CONF_BASE 0x7FFCC4B000ull +#define PSOC_GLOBAL_CONF_MAX_OFFSET 0xCD80 +#define PSOC_GLOBAL_CONF_SECTION 0x1000 +#define mmPSOC_GPIO0_BASE 0x7FFCC4C000ull +#define PSOC_GPIO0_MAX_OFFSET 0x1000 +#define PSOC_GPIO0_SECTION 0x1000 +#define mmPSOC_GPIO1_BASE 0x7FFCC4D000ull +#define PSOC_GPIO1_MAX_OFFSET 0x1000 +#define PSOC_GPIO1_SECTION 0x1000 +#define mmPSOC_BTL_BASE 0x7FFCC4E000ull +#define PSOC_BTL_MAX_OFFSET 0x1480 +#define PSOC_BTL_SECTION 0x1000 +#define mmPSOC_CS_TRACE_BASE 0x7FFCC4F000ull +#define PSOC_CS_TRACE_MAX_OFFSET 0x1680 +#define PSOC_CS_TRACE_SECTION 0x1000 +#define mmPSOC_GPIO2_BASE 0x7FFCC50000ull +#define PSOC_GPIO2_MAX_OFFSET 0x1000 +#define PSOC_GPIO2_SECTION 0x1000 +#define mmPSOC_GPIO3_BASE 0x7FFCC51000ull +#define PSOC_GPIO3_MAX_OFFSET 0x1000 +#define PSOC_GPIO3_SECTION 0x1000 +#define mmPSOC_GPIO4_BASE 0x7FFCC52000ull +#define PSOC_GPIO4_MAX_OFFSET 0x1000 +#define PSOC_GPIO4_SECTION 0x1000 +#define mmPSOC_DFT_EFUSE_BASE 0x7FFCC53000ull +#define PSOC_DFT_EFUSE_MAX_OFFSET 0x3040 +#define PSOC_DFT_EFUSE_SECTION 0x1000 +#define mmPSOC_RPM_0_BASE 0x7FFCC54000ull +#define PSOC_RPM_0_MAX_OFFSET 0x8800 +#define PSOC_RPM_0_SECTION 0x1000 +#define mmPSOC_RPM_1_BASE 0x7FFCC55000ull +#define PSOC_RPM_1_MAX_OFFSET 0x8800 +#define PSOC_RPM_1_SECTION 0x1000 +#define mmPSOC_RPM_2_BASE 0x7FFCC56000ull +#define PSOC_RPM_2_MAX_OFFSET 0x8800 +#define PSOC_RPM_2_SECTION 0x1000 +#define mmPSOC_RPM_3_BASE 0x7FFCC57000ull +#define PSOC_RPM_3_MAX_OFFSET 0x8800 +#define PSOC_RPM_3_SECTION 0x19000 +#define mmPSOC_CPU_PLL_BASE 0x7FFCC70000ull +#define PSOC_CPU_PLL_MAX_OFFSET 0x5200 +#define PSOC_CPU_PLL_SECTION 0x1000 +#define mmPSOC_MME_PLL_BASE 0x7FFCC71000ull +#define PSOC_MME_PLL_MAX_OFFSET 0x5200 +#define PSOC_MME_PLL_SECTION 0x1000 +#define mmPSOC_PCI_PLL_BASE 0x7FFCC72000ull +#define PSOC_PCI_PLL_MAX_OFFSET 0x5200 +#define PSOC_PCI_PLL_SECTION 0x1000 +#define mmPSOC_TPC_PLL_BASE 0x7FFCC73000ull +#define PSOC_TPC_PLL_MAX_OFFSET 0x5200 +#define PSOC_TPC_PLL_SECTION 0x1000 +#define mmPSOC_HBM_PLL_BASE 0x7FFCC74000ull +#define PSOC_HBM_PLL_MAX_OFFSET 0x5200 +#define PSOC_HBM_PLL_SECTION 0x1000 +#define mmPSOC_PM_BASE 0x7FFCC75000ull +#define PSOC_PM_MAX_OFFSET 0x1F00 +#define PSOC_PM_SECTION 0x1000 +#define mmPSOC_TS_BASE 0x7FFCC76000ull +#define PSOC_TS_MAX_OFFSET 0xE640 +#define PSOC_TS_SECTION 0x2000 +#define mmPSOC_PWM0_BASE 0x7FFCC78000ull +#define PSOC_PWM0_MAX_OFFSET 0x5800 +#define PSOC_PWM0_SECTION 0x1000 +#define mmPSOC_PWM1_BASE 0x7FFCC79000ull +#define PSOC_PWM1_MAX_OFFSET 0x5800 +#define PSOC_PWM1_SECTION 0x1000 +#define mmPSOC_PWM2_BASE 0x7FFCC7A000ull +#define PSOC_PWM2_MAX_OFFSET 0x5800 +#define PSOC_PWM2_SECTION 0x1000 +#define mmPSOC_PWM3_BASE 0x7FFCC7B000ull +#define PSOC_PWM3_MAX_OFFSET 0x5800 +#define PSOC_PWM3_SECTION 0x1000 +#define mmPSOC_GPIO5_BASE 0x7FFCC7C000ull +#define PSOC_GPIO5_MAX_OFFSET 0x1000 +#define PSOC_GPIO5_SECTION 0x1000 +#define mmPSOC_GPIO6_BASE 0x7FFCC7D000ull +#define PSOC_GPIO6_MAX_OFFSET 0x1000 +#define PSOC_GPIO6_SECTION 0x3000 +#define mmPCIE_PMA_0_BASE 0x7FFCC80000ull +#define PCIE_PMA_0_MAX_OFFSET 0x10003 +#define PCIE_PMA_0_SECTION 0x10000 +#define mmPCIE_PMA_1_BASE 0x7FFCC90000ull +#define PCIE_PMA_1_MAX_OFFSET 0x10003 +#define PCIE_PMA_1_SECTION 0x10000 +#define mmPCIE_PMA_2_BASE 0x7FFCCA0000ull +#define PCIE_PMA_2_MAX_OFFSET 0x10003 +#define PCIE_PMA_2_SECTION 0x10000 +#define mmPCIE_PMA_3_BASE 0x7FFCCB0000ull +#define PCIE_PMA_3_MAX_OFFSET 0x10003 +#define PCIE_PMA_3_SECTION 0x10000 +#define mmNIC0_MAC_CH0_BASE 0x7FFCCC0000ull +#define NIC0_MAC_CH0_MAX_OFFSET 0x8400 +#define NIC0_MAC_CH0_SECTION 0x1000 +#define mmNIC0_MAC_CH1_BASE 0x7FFCCC1000ull +#define NIC0_MAC_CH1_MAX_OFFSET 0x8400 +#define NIC0_MAC_CH1_SECTION 0x1000 +#define mmNIC0_MAC_CH2_BASE 0x7FFCCC2000ull +#define NIC0_MAC_CH2_MAX_OFFSET 0x8400 +#define NIC0_MAC_CH2_SECTION 0x1000 +#define mmNIC0_MAC_CH3_BASE 0x7FFCCC3000ull +#define NIC0_MAC_CH3_MAX_OFFSET 0x8400 +#define NIC0_MAC_CH3_SECTION 0x1000 +#define mmNIC0_STAT_BASE 0x7FFCCC4000ull +#define NIC0_STAT_MAX_OFFSET 0x4D00 +#define NIC0_STAT_SECTION 0x1000 +#define mmNIC0_MAC_XPCS91_BASE 0x7FFCCC5000ull +#define NIC0_MAC_XPCS91_MAX_OFFSET 0x2380 +#define NIC0_MAC_XPCS91_SECTION 0x3000 +#define mmNIC0_MAC_CORE_BASE 0x7FFCCC8000ull +#define NIC0_MAC_CORE_MAX_OFFSET 0x5400 +#define NIC0_MAC_CORE_SECTION 0x1000 +#define mmNIC0_MAC_AUX_BASE 0x7FFCCC9000ull +#define NIC0_MAC_AUX_MAX_OFFSET 0x3000 +#define NIC0_MAC_AUX_SECTION 0xF000 +#define mmNIC0_PHY_BASE 0x7FFCCD8000ull +#define NIC0_PHY_MAX_OFFSET 0x3400 +#define NIC0_PHY_SECTION 0x8000 +#define mmNIC0_QM0_BASE 0x7FFCCE0000ull +#define NIC0_QM0_MAX_OFFSET 0xD040 +#define NIC0_QM0_SECTION 0x2000 +#define mmNIC0_QM1_BASE 0x7FFCCE2000ull +#define NIC0_QM1_MAX_OFFSET 0xD040 +#define NIC0_QM1_SECTION 0x2000 +#define mmNIC0_QPC0_BASE 0x7FFCCE4000ull +#define NIC0_QPC0_MAX_OFFSET 0x7140 +#define NIC0_QPC0_SECTION 0x1000 +#define mmNIC0_QPC1_BASE 0x7FFCCE5000ull +#define NIC0_QPC1_MAX_OFFSET 0x7140 +#define NIC0_QPC1_SECTION 0x3000 +#define mmNIC0_RXB_BASE 0x7FFCCE8000ull +#define NIC0_RXB_MAX_OFFSET 0x6040 +#define NIC0_RXB_SECTION 0x1000 +#define mmNIC0_RXE0_BASE 0x7FFCCE9000ull +#define NIC0_RXE0_MAX_OFFSET 0x2FC0 +#define NIC0_RXE0_SECTION 0x1000 +#define mmNIC0_RXE1_BASE 0x7FFCCEA000ull +#define NIC0_RXE1_MAX_OFFSET 0x2FC0 +#define NIC0_RXE1_SECTION 0x1000 +#define mmNIC0_RX_GW_BASE 0x7FFCCEB000ull +#define NIC0_RX_GW_MAX_OFFSET 0x4540 +#define NIC0_RX_GW_SECTION 0x5000 +#define mmNIC0_TXS0_BASE 0x7FFCCF0000ull +#define NIC0_TXS0_MAX_OFFSET 0x19C0 +#define NIC0_TXS0_SECTION 0x1000 +#define mmNIC0_TXS1_BASE 0x7FFCCF1000ull +#define NIC0_TXS1_MAX_OFFSET 0x19C0 +#define NIC0_TXS1_SECTION 0x1000 +#define mmNIC0_TXE0_BASE 0x7FFCCF2000ull +#define NIC0_TXE0_MAX_OFFSET 0x2040 +#define NIC0_TXE0_SECTION 0x1000 +#define mmNIC0_TXE1_BASE 0x7FFCCF3000ull +#define NIC0_TXE1_MAX_OFFSET 0x2040 +#define NIC0_TXE1_SECTION 0x1000 +#define mmNIC0_TXB_BASE 0x7FFCCF4000ull +#define NIC0_TXB_MAX_OFFSET 0xD400 +#define NIC0_TXB_SECTION 0x1000 +#define mmNIC0_TMR_BASE 0x7FFCCF5000ull +#define NIC0_TMR_MAX_OFFSET 0x1600 +#define NIC0_TMR_SECTION 0x1000 +#define mmNIC0_TX_GW_BASE 0x7FFCCF6000ull +#define NIC0_TX_GW_MAX_OFFSET 0x1400 +#define NIC0_TX_GW_SECTION 0x2000 +#define mmNIC0_TS_BASE 0x7FFCCF8000ull +#define NIC0_TS_MAX_OFFSET 0xE640 +#define NIC0_TS_SECTION 0x1000 +#define mmNIC0_PLL_BASE 0x7FFCCF9000ull +#define NIC0_PLL_MAX_OFFSET 0x5200 +#define NIC0_PLL_SECTION 0x1000 +#define mmNIC0_PM_BASE 0x7FFCCFA000ull +#define NIC0_PM_MAX_OFFSET 0x1F00 +#define NIC0_PM_SECTION 0x6000 +#define mmNIC1_MAC_CH0_BASE 0x7FFCD00000ull +#define NIC1_MAC_CH0_MAX_OFFSET 0x8400 +#define NIC1_MAC_CH0_SECTION 0x1000 +#define mmNIC1_MAC_CH1_BASE 0x7FFCD01000ull +#define NIC1_MAC_CH1_MAX_OFFSET 0x8400 +#define NIC1_MAC_CH1_SECTION 0x1000 +#define mmNIC1_MAC_CH2_BASE 0x7FFCD02000ull +#define NIC1_MAC_CH2_MAX_OFFSET 0x8400 +#define NIC1_MAC_CH2_SECTION 0x1000 +#define mmNIC1_MAC_CH3_BASE 0x7FFCD03000ull +#define NIC1_MAC_CH3_MAX_OFFSET 0x8400 +#define NIC1_MAC_CH3_SECTION 0x1000 +#define mmNIC1_STAT_BASE 0x7FFCD04000ull +#define NIC1_STAT_MAX_OFFSET 0x4D00 +#define NIC1_STAT_SECTION 0x1000 +#define mmNIC1_MAC_XPCS91_BASE 0x7FFCD05000ull +#define NIC1_MAC_XPCS91_MAX_OFFSET 0x2380 +#define NIC1_MAC_XPCS91_SECTION 0x3000 +#define mmNIC1_MAC_CORE_BASE 0x7FFCD08000ull +#define NIC1_MAC_CORE_MAX_OFFSET 0x5400 +#define NIC1_MAC_CORE_SECTION 0x1000 +#define mmNIC1_MAC_AUX_BASE 0x7FFCD09000ull +#define NIC1_MAC_AUX_MAX_OFFSET 0x3000 +#define NIC1_MAC_AUX_SECTION 0xF000 +#define mmNIC1_PHY_BASE 0x7FFCD18000ull +#define NIC1_PHY_MAX_OFFSET 0x3400 +#define NIC1_PHY_SECTION 0x8000 +#define mmNIC1_QM0_BASE 0x7FFCD20000ull +#define NIC1_QM0_MAX_OFFSET 0xD040 +#define NIC1_QM0_SECTION 0x2000 +#define mmNIC1_QM1_BASE 0x7FFCD22000ull +#define NIC1_QM1_MAX_OFFSET 0xD040 +#define NIC1_QM1_SECTION 0x2000 +#define mmNIC1_QPC0_BASE 0x7FFCD24000ull +#define NIC1_QPC0_MAX_OFFSET 0x7140 +#define NIC1_QPC0_SECTION 0x1000 +#define mmNIC1_QPC1_BASE 0x7FFCD25000ull +#define NIC1_QPC1_MAX_OFFSET 0x7140 +#define NIC1_QPC1_SECTION 0x3000 +#define mmNIC1_RXB_BASE 0x7FFCD28000ull +#define NIC1_RXB_MAX_OFFSET 0x6040 +#define NIC1_RXB_SECTION 0x1000 +#define mmNIC1_RXE0_BASE 0x7FFCD29000ull +#define NIC1_RXE0_MAX_OFFSET 0x2FC0 +#define NIC1_RXE0_SECTION 0x1000 +#define mmNIC1_RXE1_BASE 0x7FFCD2A000ull +#define NIC1_RXE1_MAX_OFFSET 0x2FC0 +#define NIC1_RXE1_SECTION 0x1000 +#define mmNIC1_RX_GW_BASE 0x7FFCD2B000ull +#define NIC1_RX_GW_MAX_OFFSET 0x4540 +#define NIC1_RX_GW_SECTION 0x5000 +#define mmNIC1_TXS0_BASE 0x7FFCD30000ull +#define NIC1_TXS0_MAX_OFFSET 0x19C0 +#define NIC1_TXS0_SECTION 0x1000 +#define mmNIC1_TXS1_BASE 0x7FFCD31000ull +#define NIC1_TXS1_MAX_OFFSET 0x19C0 +#define NIC1_TXS1_SECTION 0x1000 +#define mmNIC1_TXE0_BASE 0x7FFCD32000ull +#define NIC1_TXE0_MAX_OFFSET 0x2040 +#define NIC1_TXE0_SECTION 0x1000 +#define mmNIC1_TXE1_BASE 0x7FFCD33000ull +#define NIC1_TXE1_MAX_OFFSET 0x2040 +#define NIC1_TXE1_SECTION 0x1000 +#define mmNIC1_TXB_BASE 0x7FFCD34000ull +#define NIC1_TXB_MAX_OFFSET 0xD400 +#define NIC1_TXB_SECTION 0x1000 +#define mmNIC1_TMR_BASE 0x7FFCD35000ull +#define NIC1_TMR_MAX_OFFSET 0x1600 +#define NIC1_TMR_SECTION 0x1000 +#define mmNIC1_TX_GW_BASE 0x7FFCD36000ull +#define NIC1_TX_GW_MAX_OFFSET 0x1400 +#define NIC1_TX_GW_SECTION 0x2000 +#define mmNIC1_TS_BASE 0x7FFCD38000ull +#define NIC1_TS_MAX_OFFSET 0xE640 +#define NIC1_TS_SECTION 0x1000 +#define mmNIC1_PLL_BASE 0x7FFCD39000ull +#define NIC1_PLL_MAX_OFFSET 0x5200 +#define NIC1_PLL_SECTION 0x1000 +#define mmNIC1_PM_BASE 0x7FFCD3A000ull +#define NIC1_PM_MAX_OFFSET 0x1F00 +#define NIC1_PM_SECTION 0x6000 +#define mmNIC2_MAC_CH0_BASE 0x7FFCD40000ull +#define NIC2_MAC_CH0_MAX_OFFSET 0x8400 +#define NIC2_MAC_CH0_SECTION 0x1000 +#define mmNIC2_MAC_CH1_BASE 0x7FFCD41000ull +#define NIC2_MAC_CH1_MAX_OFFSET 0x8400 +#define NIC2_MAC_CH1_SECTION 0x1000 +#define mmNIC2_MAC_CH2_BASE 0x7FFCD42000ull +#define NIC2_MAC_CH2_MAX_OFFSET 0x8400 +#define NIC2_MAC_CH2_SECTION 0x1000 +#define mmNIC2_MAC_CH3_BASE 0x7FFCD43000ull +#define NIC2_MAC_CH3_MAX_OFFSET 0x8400 +#define NIC2_MAC_CH3_SECTION 0x1000 +#define mmNIC2_STAT_BASE 0x7FFCD44000ull +#define NIC2_STAT_MAX_OFFSET 0x4D00 +#define NIC2_STAT_SECTION 0x1000 +#define mmNIC2_MAC_XPCS91_BASE 0x7FFCD45000ull +#define NIC2_MAC_XPCS91_MAX_OFFSET 0x2380 +#define NIC2_MAC_XPCS91_SECTION 0x3000 +#define mmNIC2_MAC_CORE_BASE 0x7FFCD48000ull +#define NIC2_MAC_CORE_MAX_OFFSET 0x5400 +#define NIC2_MAC_CORE_SECTION 0x1000 +#define mmNIC2_MAC_AUX_BASE 0x7FFCD49000ull +#define NIC2_MAC_AUX_MAX_OFFSET 0x3000 +#define NIC2_MAC_AUX_SECTION 0xF000 +#define mmNIC2_PHY_BASE 0x7FFCD58000ull +#define NIC2_PHY_MAX_OFFSET 0x3400 +#define NIC2_PHY_SECTION 0x8000 +#define mmNIC2_QM0_BASE 0x7FFCD60000ull +#define NIC2_QM0_MAX_OFFSET 0xD040 +#define NIC2_QM0_SECTION 0x2000 +#define mmNIC2_QM1_BASE 0x7FFCD62000ull +#define NIC2_QM1_MAX_OFFSET 0xD040 +#define NIC2_QM1_SECTION 0x2000 +#define mmNIC2_QPC0_BASE 0x7FFCD64000ull +#define NIC2_QPC0_MAX_OFFSET 0x7140 +#define NIC2_QPC0_SECTION 0x1000 +#define mmNIC2_QPC1_BASE 0x7FFCD65000ull +#define NIC2_QPC1_MAX_OFFSET 0x7140 +#define NIC2_QPC1_SECTION 0x3000 +#define mmNIC2_RXB_BASE 0x7FFCD68000ull +#define NIC2_RXB_MAX_OFFSET 0x6040 +#define NIC2_RXB_SECTION 0x1000 +#define mmNIC2_RXE0_BASE 0x7FFCD69000ull +#define NIC2_RXE0_MAX_OFFSET 0x2FC0 +#define NIC2_RXE0_SECTION 0x1000 +#define mmNIC2_RXE1_BASE 0x7FFCD6A000ull +#define NIC2_RXE1_MAX_OFFSET 0x2FC0 +#define NIC2_RXE1_SECTION 0x1000 +#define mmNIC2_RX_GW_BASE 0x7FFCD6B000ull +#define NIC2_RX_GW_MAX_OFFSET 0x4540 +#define NIC2_RX_GW_SECTION 0x5000 +#define mmNIC2_TXS0_BASE 0x7FFCD70000ull +#define NIC2_TXS0_MAX_OFFSET 0x19C0 +#define NIC2_TXS0_SECTION 0x1000 +#define mmNIC2_TXS1_BASE 0x7FFCD71000ull +#define NIC2_TXS1_MAX_OFFSET 0x19C0 +#define NIC2_TXS1_SECTION 0x1000 +#define mmNIC2_TXE0_BASE 0x7FFCD72000ull +#define NIC2_TXE0_MAX_OFFSET 0x2040 +#define NIC2_TXE0_SECTION 0x1000 +#define mmNIC2_TXE1_BASE 0x7FFCD73000ull +#define NIC2_TXE1_MAX_OFFSET 0x2040 +#define NIC2_TXE1_SECTION 0x1000 +#define mmNIC2_TXB_BASE 0x7FFCD74000ull +#define NIC2_TXB_MAX_OFFSET 0xD400 +#define NIC2_TXB_SECTION 0x1000 +#define mmNIC2_TMR_BASE 0x7FFCD75000ull +#define NIC2_TMR_MAX_OFFSET 0x1600 +#define NIC2_TMR_SECTION 0x1000 +#define mmNIC2_TX_GW_BASE 0x7FFCD76000ull +#define NIC2_TX_GW_MAX_OFFSET 0x1400 +#define NIC2_TX_GW_SECTION 0x2000 +#define mmNIC2_HBM_PLL_BASE 0x7FFCD78000ull +#define NIC2_HBM_PLL_MAX_OFFSET 0x5200 +#define NIC2_HBM_PLL_SECTION 0x1000 +#define mmNIC2_MME_PLL_BASE 0x7FFCD79000ull +#define NIC2_MME_PLL_MAX_OFFSET 0x5200 +#define NIC2_MME_PLL_SECTION 0x1000 +#define mmNIC2_TPC_PLL_BASE 0x7FFCD7A000ull +#define NIC2_TPC_PLL_MAX_OFFSET 0x5200 +#define NIC2_TPC_PLL_SECTION 0x6000 +#define mmNIC3_MAC_CH0_BASE 0x7FFCD80000ull +#define NIC3_MAC_CH0_MAX_OFFSET 0x8400 +#define NIC3_MAC_CH0_SECTION 0x1000 +#define mmNIC3_MAC_CH1_BASE 0x7FFCD81000ull +#define NIC3_MAC_CH1_MAX_OFFSET 0x8400 +#define NIC3_MAC_CH1_SECTION 0x1000 +#define mmNIC3_MAC_CH2_BASE 0x7FFCD82000ull +#define NIC3_MAC_CH2_MAX_OFFSET 0x8400 +#define NIC3_MAC_CH2_SECTION 0x1000 +#define mmNIC3_MAC_CH3_BASE 0x7FFCD83000ull +#define NIC3_MAC_CH3_MAX_OFFSET 0x8400 +#define NIC3_MAC_CH3_SECTION 0x1000 +#define mmNIC3_STAT_BASE 0x7FFCD84000ull +#define NIC3_STAT_MAX_OFFSET 0x4D00 +#define NIC3_STAT_SECTION 0x1000 +#define mmNIC3_MAC_XPCS91_BASE 0x7FFCD85000ull +#define NIC3_MAC_XPCS91_MAX_OFFSET 0x2380 +#define NIC3_MAC_XPCS91_SECTION 0x3000 +#define mmNIC3_MAC_CORE_BASE 0x7FFCD88000ull +#define NIC3_MAC_CORE_MAX_OFFSET 0x5400 +#define NIC3_MAC_CORE_SECTION 0x1000 +#define mmNIC3_MAC_AUX_BASE 0x7FFCD89000ull +#define NIC3_MAC_AUX_MAX_OFFSET 0x3000 +#define NIC3_MAC_AUX_SECTION 0xF000 +#define mmNIC3_PHY_BASE 0x7FFCD98000ull +#define NIC3_PHY_MAX_OFFSET 0x3400 +#define NIC3_PHY_SECTION 0x8000 +#define mmNIC3_QM0_BASE 0x7FFCDA0000ull +#define NIC3_QM0_MAX_OFFSET 0xD040 +#define NIC3_QM0_SECTION 0x2000 +#define mmNIC3_QM1_BASE 0x7FFCDA2000ull +#define NIC3_QM1_MAX_OFFSET 0xD040 +#define NIC3_QM1_SECTION 0x2000 +#define mmNIC3_QPC0_BASE 0x7FFCDA4000ull +#define NIC3_QPC0_MAX_OFFSET 0x7140 +#define NIC3_QPC0_SECTION 0x1000 +#define mmNIC3_QPC1_BASE 0x7FFCDA5000ull +#define NIC3_QPC1_MAX_OFFSET 0x7140 +#define NIC3_QPC1_SECTION 0x3000 +#define mmNIC3_RXB_BASE 0x7FFCDA8000ull +#define NIC3_RXB_MAX_OFFSET 0x6040 +#define NIC3_RXB_SECTION 0x1000 +#define mmNIC3_RXE0_BASE 0x7FFCDA9000ull +#define NIC3_RXE0_MAX_OFFSET 0x2FC0 +#define NIC3_RXE0_SECTION 0x1000 +#define mmNIC3_RXE1_BASE 0x7FFCDAA000ull +#define NIC3_RXE1_MAX_OFFSET 0x2FC0 +#define NIC3_RXE1_SECTION 0x1000 +#define mmNIC3_RX_GW_BASE 0x7FFCDAB000ull +#define NIC3_RX_GW_MAX_OFFSET 0x4540 +#define NIC3_RX_GW_SECTION 0x5000 +#define mmNIC3_TXS0_BASE 0x7FFCDB0000ull +#define NIC3_TXS0_MAX_OFFSET 0x19C0 +#define NIC3_TXS0_SECTION 0x1000 +#define mmNIC3_TXS1_BASE 0x7FFCDB1000ull +#define NIC3_TXS1_MAX_OFFSET 0x19C0 +#define NIC3_TXS1_SECTION 0x1000 +#define mmNIC3_TXE0_BASE 0x7FFCDB2000ull +#define NIC3_TXE0_MAX_OFFSET 0x2040 +#define NIC3_TXE0_SECTION 0x1000 +#define mmNIC3_TXE1_BASE 0x7FFCDB3000ull +#define NIC3_TXE1_MAX_OFFSET 0x2040 +#define NIC3_TXE1_SECTION 0x1000 +#define mmNIC3_TXB_BASE 0x7FFCDB4000ull +#define NIC3_TXB_MAX_OFFSET 0xD400 +#define NIC3_TXB_SECTION 0x1000 +#define mmNIC3_TMR_BASE 0x7FFCDB5000ull +#define NIC3_TMR_MAX_OFFSET 0x1600 +#define NIC3_TMR_SECTION 0x1000 +#define mmNIC3_TX_GW_BASE 0x7FFCDB6000ull +#define NIC3_TX_GW_MAX_OFFSET 0x1400 +#define NIC3_TX_GW_SECTION 0x2000 +#define mmNIC3_TS_BASE 0x7FFCDB8000ull +#define NIC3_TS_MAX_OFFSET 0xE640 +#define NIC3_TS_SECTION 0x2000 +#define mmNIC3_PM_BASE 0x7FFCDBA000ull +#define NIC3_PM_MAX_OFFSET 0x1F00 +#define NIC3_PM_SECTION 0x6000 +#define mmNIC4_MAC_CH0_BASE 0x7FFCDC0000ull +#define NIC4_MAC_CH0_MAX_OFFSET 0x8400 +#define NIC4_MAC_CH0_SECTION 0x1000 +#define mmNIC4_MAC_CH1_BASE 0x7FFCDC1000ull +#define NIC4_MAC_CH1_MAX_OFFSET 0x8400 +#define NIC4_MAC_CH1_SECTION 0x1000 +#define mmNIC4_MAC_CH2_BASE 0x7FFCDC2000ull +#define NIC4_MAC_CH2_MAX_OFFSET 0x8400 +#define NIC4_MAC_CH2_SECTION 0x1000 +#define mmNIC4_MAC_CH3_BASE 0x7FFCDC3000ull +#define NIC4_MAC_CH3_MAX_OFFSET 0x8400 +#define NIC4_MAC_CH3_SECTION 0x1000 +#define mmNIC4_STAT_BASE 0x7FFCDC4000ull +#define NIC4_STAT_MAX_OFFSET 0x4D00 +#define NIC4_STAT_SECTION 0x1000 +#define mmNIC4_MAC_XPCS91_BASE 0x7FFCDC5000ull +#define NIC4_MAC_XPCS91_MAX_OFFSET 0x2380 +#define NIC4_MAC_XPCS91_SECTION 0x3000 +#define mmNIC4_MAC_CORE_BASE 0x7FFCDC8000ull +#define NIC4_MAC_CORE_MAX_OFFSET 0x5400 +#define NIC4_MAC_CORE_SECTION 0x1000 +#define mmNIC4_MAC_AUX_BASE 0x7FFCDC9000ull +#define NIC4_MAC_AUX_MAX_OFFSET 0x3000 +#define NIC4_MAC_AUX_SECTION 0xF000 +#define mmNIC4_PHY_BASE 0x7FFCDD8000ull +#define NIC4_PHY_MAX_OFFSET 0x3400 +#define NIC4_PHY_SECTION 0x8000 +#define mmNIC4_QM0_BASE 0x7FFCDE0000ull +#define NIC4_QM0_MAX_OFFSET 0xD040 +#define NIC4_QM0_SECTION 0x2000 +#define mmNIC4_QM1_BASE 0x7FFCDE2000ull +#define NIC4_QM1_MAX_OFFSET 0xD040 +#define NIC4_QM1_SECTION 0x2000 +#define mmNIC4_QPC0_BASE 0x7FFCDE4000ull +#define NIC4_QPC0_MAX_OFFSET 0x7140 +#define NIC4_QPC0_SECTION 0x1000 +#define mmNIC4_QPC1_BASE 0x7FFCDE5000ull +#define NIC4_QPC1_MAX_OFFSET 0x7140 +#define NIC4_QPC1_SECTION 0x3000 +#define mmNIC4_RXB_BASE 0x7FFCDE8000ull +#define NIC4_RXB_MAX_OFFSET 0x6040 +#define NIC4_RXB_SECTION 0x1000 +#define mmNIC4_RXE0_BASE 0x7FFCDE9000ull +#define NIC4_RXE0_MAX_OFFSET 0x2FC0 +#define NIC4_RXE0_SECTION 0x1000 +#define mmNIC4_RXE1_BASE 0x7FFCDEA000ull +#define NIC4_RXE1_MAX_OFFSET 0x2FC0 +#define NIC4_RXE1_SECTION 0x1000 +#define mmNIC4_RX_GW_BASE 0x7FFCDEB000ull +#define NIC4_RX_GW_MAX_OFFSET 0x4540 +#define NIC4_RX_GW_SECTION 0x5000 +#define mmNIC4_TXS0_BASE 0x7FFCDF0000ull +#define NIC4_TXS0_MAX_OFFSET 0x19C0 +#define NIC4_TXS0_SECTION 0x1000 +#define mmNIC4_TXS1_BASE 0x7FFCDF1000ull +#define NIC4_TXS1_MAX_OFFSET 0x19C0 +#define NIC4_TXS1_SECTION 0x1000 +#define mmNIC4_TXE0_BASE 0x7FFCDF2000ull +#define NIC4_TXE0_MAX_OFFSET 0x2040 +#define NIC4_TXE0_SECTION 0x1000 +#define mmNIC4_TXE1_BASE 0x7FFCDF3000ull +#define NIC4_TXE1_MAX_OFFSET 0x2040 +#define NIC4_TXE1_SECTION 0x1000 +#define mmNIC4_TXB_BASE 0x7FFCDF4000ull +#define NIC4_TXB_MAX_OFFSET 0xD400 +#define NIC4_TXB_SECTION 0x1000 +#define mmNIC4_TMR_BASE 0x7FFCDF5000ull +#define NIC4_TMR_MAX_OFFSET 0x1600 +#define NIC4_TMR_SECTION 0x1000 +#define mmNIC4_TX_GW_BASE 0x7FFCDF6000ull +#define NIC4_TX_GW_MAX_OFFSET 0x1400 +#define NIC4_TX_GW_SECTION 0x10000 +#define mmTPC0_CFG_BASE 0x7FFCE06000ull +#define TPC0_CFG_MAX_OFFSET 0xE400 +#define TPC0_CFG_SECTION 0x4000 +#define mmKERNEL_TENSOR_0_TPC0_CFG_BASE 0x7FFCE06400ull +#define KERNEL_TENSOR_0_TPC0_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_0_TPC0_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_1_TPC0_CFG_BASE 0x7FFCE06438ull +#define KERNEL_TENSOR_1_TPC0_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_1_TPC0_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_2_TPC0_CFG_BASE 0x7FFCE06470ull +#define KERNEL_TENSOR_2_TPC0_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_2_TPC0_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_3_TPC0_CFG_BASE 0x7FFCE064A8ull +#define KERNEL_TENSOR_3_TPC0_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_3_TPC0_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_4_TPC0_CFG_BASE 0x7FFCE064E0ull +#define KERNEL_TENSOR_4_TPC0_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_4_TPC0_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_5_TPC0_CFG_BASE 0x7FFCE06518ull +#define KERNEL_TENSOR_5_TPC0_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_5_TPC0_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_6_TPC0_CFG_BASE 0x7FFCE06550ull +#define KERNEL_TENSOR_6_TPC0_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_6_TPC0_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_7_TPC0_CFG_BASE 0x7FFCE06588ull +#define KERNEL_TENSOR_7_TPC0_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_7_TPC0_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_8_TPC0_CFG_BASE 0x7FFCE065C0ull +#define KERNEL_TENSOR_8_TPC0_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_8_TPC0_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_9_TPC0_CFG_BASE 0x7FFCE065F8ull +#define KERNEL_TENSOR_9_TPC0_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_9_TPC0_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_10_TPC0_CFG_BASE 0x7FFCE06630ull +#define KERNEL_TENSOR_10_TPC0_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_10_TPC0_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_11_TPC0_CFG_BASE 0x7FFCE06668ull +#define KERNEL_TENSOR_11_TPC0_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_11_TPC0_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_12_TPC0_CFG_BASE 0x7FFCE066A0ull +#define KERNEL_TENSOR_12_TPC0_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_12_TPC0_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_13_TPC0_CFG_BASE 0x7FFCE066D8ull +#define KERNEL_TENSOR_13_TPC0_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_13_TPC0_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_14_TPC0_CFG_BASE 0x7FFCE06710ull +#define KERNEL_TENSOR_14_TPC0_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_14_TPC0_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_15_TPC0_CFG_BASE 0x7FFCE06748ull +#define KERNEL_TENSOR_15_TPC0_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_15_TPC0_CFG_SECTION 0x3800 +#define mmKERNEL_SYNC_OBJECT_TPC0_CFG_BASE 0x7FFCE06780ull +#define KERNEL_SYNC_OBJECT_TPC0_CFG_MAX_OFFSET 0x8000 +#define KERNEL_SYNC_OBJECT_TPC0_CFG_SECTION 0x8000 +#define mmKERNEL_TPC0_CFG_BASE 0x7FFCE06788ull +#define KERNEL_TPC0_CFG_MAX_OFFSET 0xB800 +#define KERNEL_TPC0_CFG_SECTION 0x2780 +#define mmQM_TENSOR_0_TPC0_CFG_BASE 0x7FFCE06A00ull +#define QM_TENSOR_0_TPC0_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_0_TPC0_CFG_SECTION 0x3800 +#define mmQM_TENSOR_1_TPC0_CFG_BASE 0x7FFCE06A38ull +#define QM_TENSOR_1_TPC0_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_1_TPC0_CFG_SECTION 0x3800 +#define mmQM_TENSOR_2_TPC0_CFG_BASE 0x7FFCE06A70ull +#define QM_TENSOR_2_TPC0_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_2_TPC0_CFG_SECTION 0x3800 +#define mmQM_TENSOR_3_TPC0_CFG_BASE 0x7FFCE06AA8ull +#define QM_TENSOR_3_TPC0_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_3_TPC0_CFG_SECTION 0x3800 +#define mmQM_TENSOR_4_TPC0_CFG_BASE 0x7FFCE06AE0ull +#define QM_TENSOR_4_TPC0_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_4_TPC0_CFG_SECTION 0x3800 +#define mmQM_TENSOR_5_TPC0_CFG_BASE 0x7FFCE06B18ull +#define QM_TENSOR_5_TPC0_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_5_TPC0_CFG_SECTION 0x3800 +#define mmQM_TENSOR_6_TPC0_CFG_BASE 0x7FFCE06B50ull +#define QM_TENSOR_6_TPC0_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_6_TPC0_CFG_SECTION 0x3800 +#define mmQM_TENSOR_7_TPC0_CFG_BASE 0x7FFCE06B88ull +#define QM_TENSOR_7_TPC0_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_7_TPC0_CFG_SECTION 0x3800 +#define mmQM_TENSOR_8_TPC0_CFG_BASE 0x7FFCE06BC0ull +#define QM_TENSOR_8_TPC0_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_8_TPC0_CFG_SECTION 0x3800 +#define mmQM_TENSOR_9_TPC0_CFG_BASE 0x7FFCE06BF8ull +#define QM_TENSOR_9_TPC0_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_9_TPC0_CFG_SECTION 0x3800 +#define mmQM_TENSOR_10_TPC0_CFG_BASE 0x7FFCE06C30ull +#define QM_TENSOR_10_TPC0_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_10_TPC0_CFG_SECTION 0x3800 +#define mmQM_TENSOR_11_TPC0_CFG_BASE 0x7FFCE06C68ull +#define QM_TENSOR_11_TPC0_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_11_TPC0_CFG_SECTION 0x3800 +#define mmQM_TENSOR_12_TPC0_CFG_BASE 0x7FFCE06CA0ull +#define QM_TENSOR_12_TPC0_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_12_TPC0_CFG_SECTION 0x3800 +#define mmQM_TENSOR_13_TPC0_CFG_BASE 0x7FFCE06CD8ull +#define QM_TENSOR_13_TPC0_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_13_TPC0_CFG_SECTION 0x3800 +#define mmQM_TENSOR_14_TPC0_CFG_BASE 0x7FFCE06D10ull +#define QM_TENSOR_14_TPC0_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_14_TPC0_CFG_SECTION 0x3800 +#define mmQM_TENSOR_15_TPC0_CFG_BASE 0x7FFCE06D48ull +#define QM_TENSOR_15_TPC0_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_15_TPC0_CFG_SECTION 0x3800 +#define mmQM_SYNC_OBJECT_TPC0_CFG_BASE 0x7FFCE06D80ull +#define QM_SYNC_OBJECT_TPC0_CFG_MAX_OFFSET 0x8000 +#define QM_SYNC_OBJECT_TPC0_CFG_SECTION 0x8000 +#define mmQM_TPC0_CFG_BASE 0x7FFCE06D88ull +#define QM_TPC0_CFG_MAX_OFFSET 0xB800 +#define QM_TPC0_CFG_SECTION 0x2780 +#define mmTPC0_E2E_CRED_BASE 0x7FFCE07000ull +#define TPC0_E2E_CRED_MAX_OFFSET 0x1680 +#define TPC0_E2E_CRED_SECTION 0x1000 +#define mmTPC0_QM_BASE 0x7FFCE08000ull +#define TPC0_QM_MAX_OFFSET 0xD040 +#define TPC0_QM_SECTION 0x3E000 +#define mmTPC1_CFG_BASE 0x7FFCE46000ull +#define TPC1_CFG_MAX_OFFSET 0xE400 +#define TPC1_CFG_SECTION 0x4000 +#define mmKERNEL_TENSOR_0_TPC1_CFG_BASE 0x7FFCE46400ull +#define KERNEL_TENSOR_0_TPC1_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_0_TPC1_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_1_TPC1_CFG_BASE 0x7FFCE46438ull +#define KERNEL_TENSOR_1_TPC1_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_1_TPC1_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_2_TPC1_CFG_BASE 0x7FFCE46470ull +#define KERNEL_TENSOR_2_TPC1_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_2_TPC1_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_3_TPC1_CFG_BASE 0x7FFCE464A8ull +#define KERNEL_TENSOR_3_TPC1_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_3_TPC1_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_4_TPC1_CFG_BASE 0x7FFCE464E0ull +#define KERNEL_TENSOR_4_TPC1_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_4_TPC1_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_5_TPC1_CFG_BASE 0x7FFCE46518ull +#define KERNEL_TENSOR_5_TPC1_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_5_TPC1_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_6_TPC1_CFG_BASE 0x7FFCE46550ull +#define KERNEL_TENSOR_6_TPC1_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_6_TPC1_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_7_TPC1_CFG_BASE 0x7FFCE46588ull +#define KERNEL_TENSOR_7_TPC1_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_7_TPC1_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_8_TPC1_CFG_BASE 0x7FFCE465C0ull +#define KERNEL_TENSOR_8_TPC1_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_8_TPC1_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_9_TPC1_CFG_BASE 0x7FFCE465F8ull +#define KERNEL_TENSOR_9_TPC1_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_9_TPC1_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_10_TPC1_CFG_BASE 0x7FFCE46630ull +#define KERNEL_TENSOR_10_TPC1_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_10_TPC1_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_11_TPC1_CFG_BASE 0x7FFCE46668ull +#define KERNEL_TENSOR_11_TPC1_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_11_TPC1_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_12_TPC1_CFG_BASE 0x7FFCE466A0ull +#define KERNEL_TENSOR_12_TPC1_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_12_TPC1_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_13_TPC1_CFG_BASE 0x7FFCE466D8ull +#define KERNEL_TENSOR_13_TPC1_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_13_TPC1_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_14_TPC1_CFG_BASE 0x7FFCE46710ull +#define KERNEL_TENSOR_14_TPC1_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_14_TPC1_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_15_TPC1_CFG_BASE 0x7FFCE46748ull +#define KERNEL_TENSOR_15_TPC1_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_15_TPC1_CFG_SECTION 0x3800 +#define mmKERNEL_SYNC_OBJECT_TPC1_CFG_BASE 0x7FFCE46780ull +#define KERNEL_SYNC_OBJECT_TPC1_CFG_MAX_OFFSET 0x8000 +#define KERNEL_SYNC_OBJECT_TPC1_CFG_SECTION 0x8000 +#define mmKERNEL_TPC1_CFG_BASE 0x7FFCE46788ull +#define KERNEL_TPC1_CFG_MAX_OFFSET 0xB800 +#define KERNEL_TPC1_CFG_SECTION 0x2780 +#define mmQM_TENSOR_0_TPC1_CFG_BASE 0x7FFCE46A00ull +#define QM_TENSOR_0_TPC1_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_0_TPC1_CFG_SECTION 0x3800 +#define mmQM_TENSOR_1_TPC1_CFG_BASE 0x7FFCE46A38ull +#define QM_TENSOR_1_TPC1_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_1_TPC1_CFG_SECTION 0x3800 +#define mmQM_TENSOR_2_TPC1_CFG_BASE 0x7FFCE46A70ull +#define QM_TENSOR_2_TPC1_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_2_TPC1_CFG_SECTION 0x3800 +#define mmQM_TENSOR_3_TPC1_CFG_BASE 0x7FFCE46AA8ull +#define QM_TENSOR_3_TPC1_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_3_TPC1_CFG_SECTION 0x3800 +#define mmQM_TENSOR_4_TPC1_CFG_BASE 0x7FFCE46AE0ull +#define QM_TENSOR_4_TPC1_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_4_TPC1_CFG_SECTION 0x3800 +#define mmQM_TENSOR_5_TPC1_CFG_BASE 0x7FFCE46B18ull +#define QM_TENSOR_5_TPC1_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_5_TPC1_CFG_SECTION 0x3800 +#define mmQM_TENSOR_6_TPC1_CFG_BASE 0x7FFCE46B50ull +#define QM_TENSOR_6_TPC1_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_6_TPC1_CFG_SECTION 0x3800 +#define mmQM_TENSOR_7_TPC1_CFG_BASE 0x7FFCE46B88ull +#define QM_TENSOR_7_TPC1_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_7_TPC1_CFG_SECTION 0x3800 +#define mmQM_TENSOR_8_TPC1_CFG_BASE 0x7FFCE46BC0ull +#define QM_TENSOR_8_TPC1_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_8_TPC1_CFG_SECTION 0x3800 +#define mmQM_TENSOR_9_TPC1_CFG_BASE 0x7FFCE46BF8ull +#define QM_TENSOR_9_TPC1_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_9_TPC1_CFG_SECTION 0x3800 +#define mmQM_TENSOR_10_TPC1_CFG_BASE 0x7FFCE46C30ull +#define QM_TENSOR_10_TPC1_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_10_TPC1_CFG_SECTION 0x3800 +#define mmQM_TENSOR_11_TPC1_CFG_BASE 0x7FFCE46C68ull +#define QM_TENSOR_11_TPC1_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_11_TPC1_CFG_SECTION 0x3800 +#define mmQM_TENSOR_12_TPC1_CFG_BASE 0x7FFCE46CA0ull +#define QM_TENSOR_12_TPC1_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_12_TPC1_CFG_SECTION 0x3800 +#define mmQM_TENSOR_13_TPC1_CFG_BASE 0x7FFCE46CD8ull +#define QM_TENSOR_13_TPC1_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_13_TPC1_CFG_SECTION 0x3800 +#define mmQM_TENSOR_14_TPC1_CFG_BASE 0x7FFCE46D10ull +#define QM_TENSOR_14_TPC1_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_14_TPC1_CFG_SECTION 0x3800 +#define mmQM_TENSOR_15_TPC1_CFG_BASE 0x7FFCE46D48ull +#define QM_TENSOR_15_TPC1_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_15_TPC1_CFG_SECTION 0x3800 +#define mmQM_SYNC_OBJECT_TPC1_CFG_BASE 0x7FFCE46D80ull +#define QM_SYNC_OBJECT_TPC1_CFG_MAX_OFFSET 0x8000 +#define QM_SYNC_OBJECT_TPC1_CFG_SECTION 0x8000 +#define mmQM_TPC1_CFG_BASE 0x7FFCE46D88ull +#define QM_TPC1_CFG_MAX_OFFSET 0xB800 +#define QM_TPC1_CFG_SECTION 0x2780 +#define mmTPC1_E2E_CRED_BASE 0x7FFCE47000ull +#define TPC1_E2E_CRED_MAX_OFFSET 0x1680 +#define TPC1_E2E_CRED_SECTION 0x1000 +#define mmTPC1_QM_BASE 0x7FFCE48000ull +#define TPC1_QM_MAX_OFFSET 0xD040 +#define TPC1_QM_SECTION 0x3E000 +#define mmTPC2_CFG_BASE 0x7FFCE86000ull +#define TPC2_CFG_MAX_OFFSET 0xE400 +#define TPC2_CFG_SECTION 0x4000 +#define mmKERNEL_TENSOR_0_TPC2_CFG_BASE 0x7FFCE86400ull +#define KERNEL_TENSOR_0_TPC2_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_0_TPC2_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_1_TPC2_CFG_BASE 0x7FFCE86438ull +#define KERNEL_TENSOR_1_TPC2_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_1_TPC2_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_2_TPC2_CFG_BASE 0x7FFCE86470ull +#define KERNEL_TENSOR_2_TPC2_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_2_TPC2_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_3_TPC2_CFG_BASE 0x7FFCE864A8ull +#define KERNEL_TENSOR_3_TPC2_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_3_TPC2_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_4_TPC2_CFG_BASE 0x7FFCE864E0ull +#define KERNEL_TENSOR_4_TPC2_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_4_TPC2_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_5_TPC2_CFG_BASE 0x7FFCE86518ull +#define KERNEL_TENSOR_5_TPC2_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_5_TPC2_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_6_TPC2_CFG_BASE 0x7FFCE86550ull +#define KERNEL_TENSOR_6_TPC2_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_6_TPC2_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_7_TPC2_CFG_BASE 0x7FFCE86588ull +#define KERNEL_TENSOR_7_TPC2_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_7_TPC2_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_8_TPC2_CFG_BASE 0x7FFCE865C0ull +#define KERNEL_TENSOR_8_TPC2_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_8_TPC2_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_9_TPC2_CFG_BASE 0x7FFCE865F8ull +#define KERNEL_TENSOR_9_TPC2_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_9_TPC2_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_10_TPC2_CFG_BASE 0x7FFCE86630ull +#define KERNEL_TENSOR_10_TPC2_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_10_TPC2_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_11_TPC2_CFG_BASE 0x7FFCE86668ull +#define KERNEL_TENSOR_11_TPC2_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_11_TPC2_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_12_TPC2_CFG_BASE 0x7FFCE866A0ull +#define KERNEL_TENSOR_12_TPC2_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_12_TPC2_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_13_TPC2_CFG_BASE 0x7FFCE866D8ull +#define KERNEL_TENSOR_13_TPC2_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_13_TPC2_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_14_TPC2_CFG_BASE 0x7FFCE86710ull +#define KERNEL_TENSOR_14_TPC2_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_14_TPC2_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_15_TPC2_CFG_BASE 0x7FFCE86748ull +#define KERNEL_TENSOR_15_TPC2_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_15_TPC2_CFG_SECTION 0x3800 +#define mmKERNEL_SYNC_OBJECT_TPC2_CFG_BASE 0x7FFCE86780ull +#define KERNEL_SYNC_OBJECT_TPC2_CFG_MAX_OFFSET 0x8000 +#define KERNEL_SYNC_OBJECT_TPC2_CFG_SECTION 0x8000 +#define mmKERNEL_TPC2_CFG_BASE 0x7FFCE86788ull +#define KERNEL_TPC2_CFG_MAX_OFFSET 0xB800 +#define KERNEL_TPC2_CFG_SECTION 0x2780 +#define mmQM_TENSOR_0_TPC2_CFG_BASE 0x7FFCE86A00ull +#define QM_TENSOR_0_TPC2_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_0_TPC2_CFG_SECTION 0x3800 +#define mmQM_TENSOR_1_TPC2_CFG_BASE 0x7FFCE86A38ull +#define QM_TENSOR_1_TPC2_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_1_TPC2_CFG_SECTION 0x3800 +#define mmQM_TENSOR_2_TPC2_CFG_BASE 0x7FFCE86A70ull +#define QM_TENSOR_2_TPC2_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_2_TPC2_CFG_SECTION 0x3800 +#define mmQM_TENSOR_3_TPC2_CFG_BASE 0x7FFCE86AA8ull +#define QM_TENSOR_3_TPC2_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_3_TPC2_CFG_SECTION 0x3800 +#define mmQM_TENSOR_4_TPC2_CFG_BASE 0x7FFCE86AE0ull +#define QM_TENSOR_4_TPC2_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_4_TPC2_CFG_SECTION 0x3800 +#define mmQM_TENSOR_5_TPC2_CFG_BASE 0x7FFCE86B18ull +#define QM_TENSOR_5_TPC2_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_5_TPC2_CFG_SECTION 0x3800 +#define mmQM_TENSOR_6_TPC2_CFG_BASE 0x7FFCE86B50ull +#define QM_TENSOR_6_TPC2_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_6_TPC2_CFG_SECTION 0x3800 +#define mmQM_TENSOR_7_TPC2_CFG_BASE 0x7FFCE86B88ull +#define QM_TENSOR_7_TPC2_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_7_TPC2_CFG_SECTION 0x3800 +#define mmQM_TENSOR_8_TPC2_CFG_BASE 0x7FFCE86BC0ull +#define QM_TENSOR_8_TPC2_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_8_TPC2_CFG_SECTION 0x3800 +#define mmQM_TENSOR_9_TPC2_CFG_BASE 0x7FFCE86BF8ull +#define QM_TENSOR_9_TPC2_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_9_TPC2_CFG_SECTION 0x3800 +#define mmQM_TENSOR_10_TPC2_CFG_BASE 0x7FFCE86C30ull +#define QM_TENSOR_10_TPC2_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_10_TPC2_CFG_SECTION 0x3800 +#define mmQM_TENSOR_11_TPC2_CFG_BASE 0x7FFCE86C68ull +#define QM_TENSOR_11_TPC2_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_11_TPC2_CFG_SECTION 0x3800 +#define mmQM_TENSOR_12_TPC2_CFG_BASE 0x7FFCE86CA0ull +#define QM_TENSOR_12_TPC2_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_12_TPC2_CFG_SECTION 0x3800 +#define mmQM_TENSOR_13_TPC2_CFG_BASE 0x7FFCE86CD8ull +#define QM_TENSOR_13_TPC2_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_13_TPC2_CFG_SECTION 0x3800 +#define mmQM_TENSOR_14_TPC2_CFG_BASE 0x7FFCE86D10ull +#define QM_TENSOR_14_TPC2_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_14_TPC2_CFG_SECTION 0x3800 +#define mmQM_TENSOR_15_TPC2_CFG_BASE 0x7FFCE86D48ull +#define QM_TENSOR_15_TPC2_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_15_TPC2_CFG_SECTION 0x3800 +#define mmQM_SYNC_OBJECT_TPC2_CFG_BASE 0x7FFCE86D80ull +#define QM_SYNC_OBJECT_TPC2_CFG_MAX_OFFSET 0x8000 +#define QM_SYNC_OBJECT_TPC2_CFG_SECTION 0x8000 +#define mmQM_TPC2_CFG_BASE 0x7FFCE86D88ull +#define QM_TPC2_CFG_MAX_OFFSET 0xB800 +#define QM_TPC2_CFG_SECTION 0x2780 +#define mmTPC2_E2E_CRED_BASE 0x7FFCE87000ull +#define TPC2_E2E_CRED_MAX_OFFSET 0x1680 +#define TPC2_E2E_CRED_SECTION 0x1000 +#define mmTPC2_QM_BASE 0x7FFCE88000ull +#define TPC2_QM_MAX_OFFSET 0xD040 +#define TPC2_QM_SECTION 0x3E000 +#define mmTPC3_CFG_BASE 0x7FFCEC6000ull +#define TPC3_CFG_MAX_OFFSET 0xE400 +#define TPC3_CFG_SECTION 0x4000 +#define mmKERNEL_TENSOR_0_TPC3_CFG_BASE 0x7FFCEC6400ull +#define KERNEL_TENSOR_0_TPC3_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_0_TPC3_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_1_TPC3_CFG_BASE 0x7FFCEC6438ull +#define KERNEL_TENSOR_1_TPC3_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_1_TPC3_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_2_TPC3_CFG_BASE 0x7FFCEC6470ull +#define KERNEL_TENSOR_2_TPC3_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_2_TPC3_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_3_TPC3_CFG_BASE 0x7FFCEC64A8ull +#define KERNEL_TENSOR_3_TPC3_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_3_TPC3_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_4_TPC3_CFG_BASE 0x7FFCEC64E0ull +#define KERNEL_TENSOR_4_TPC3_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_4_TPC3_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_5_TPC3_CFG_BASE 0x7FFCEC6518ull +#define KERNEL_TENSOR_5_TPC3_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_5_TPC3_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_6_TPC3_CFG_BASE 0x7FFCEC6550ull +#define KERNEL_TENSOR_6_TPC3_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_6_TPC3_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_7_TPC3_CFG_BASE 0x7FFCEC6588ull +#define KERNEL_TENSOR_7_TPC3_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_7_TPC3_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_8_TPC3_CFG_BASE 0x7FFCEC65C0ull +#define KERNEL_TENSOR_8_TPC3_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_8_TPC3_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_9_TPC3_CFG_BASE 0x7FFCEC65F8ull +#define KERNEL_TENSOR_9_TPC3_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_9_TPC3_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_10_TPC3_CFG_BASE 0x7FFCEC6630ull +#define KERNEL_TENSOR_10_TPC3_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_10_TPC3_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_11_TPC3_CFG_BASE 0x7FFCEC6668ull +#define KERNEL_TENSOR_11_TPC3_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_11_TPC3_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_12_TPC3_CFG_BASE 0x7FFCEC66A0ull +#define KERNEL_TENSOR_12_TPC3_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_12_TPC3_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_13_TPC3_CFG_BASE 0x7FFCEC66D8ull +#define KERNEL_TENSOR_13_TPC3_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_13_TPC3_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_14_TPC3_CFG_BASE 0x7FFCEC6710ull +#define KERNEL_TENSOR_14_TPC3_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_14_TPC3_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_15_TPC3_CFG_BASE 0x7FFCEC6748ull +#define KERNEL_TENSOR_15_TPC3_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_15_TPC3_CFG_SECTION 0x3800 +#define mmKERNEL_SYNC_OBJECT_TPC3_CFG_BASE 0x7FFCEC6780ull +#define KERNEL_SYNC_OBJECT_TPC3_CFG_MAX_OFFSET 0x8000 +#define KERNEL_SYNC_OBJECT_TPC3_CFG_SECTION 0x8000 +#define mmKERNEL_TPC3_CFG_BASE 0x7FFCEC6788ull +#define KERNEL_TPC3_CFG_MAX_OFFSET 0xB800 +#define KERNEL_TPC3_CFG_SECTION 0x2780 +#define mmQM_TENSOR_0_TPC3_CFG_BASE 0x7FFCEC6A00ull +#define QM_TENSOR_0_TPC3_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_0_TPC3_CFG_SECTION 0x3800 +#define mmQM_TENSOR_1_TPC3_CFG_BASE 0x7FFCEC6A38ull +#define QM_TENSOR_1_TPC3_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_1_TPC3_CFG_SECTION 0x3800 +#define mmQM_TENSOR_2_TPC3_CFG_BASE 0x7FFCEC6A70ull +#define QM_TENSOR_2_TPC3_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_2_TPC3_CFG_SECTION 0x3800 +#define mmQM_TENSOR_3_TPC3_CFG_BASE 0x7FFCEC6AA8ull +#define QM_TENSOR_3_TPC3_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_3_TPC3_CFG_SECTION 0x3800 +#define mmQM_TENSOR_4_TPC3_CFG_BASE 0x7FFCEC6AE0ull +#define QM_TENSOR_4_TPC3_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_4_TPC3_CFG_SECTION 0x3800 +#define mmQM_TENSOR_5_TPC3_CFG_BASE 0x7FFCEC6B18ull +#define QM_TENSOR_5_TPC3_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_5_TPC3_CFG_SECTION 0x3800 +#define mmQM_TENSOR_6_TPC3_CFG_BASE 0x7FFCEC6B50ull +#define QM_TENSOR_6_TPC3_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_6_TPC3_CFG_SECTION 0x3800 +#define mmQM_TENSOR_7_TPC3_CFG_BASE 0x7FFCEC6B88ull +#define QM_TENSOR_7_TPC3_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_7_TPC3_CFG_SECTION 0x3800 +#define mmQM_TENSOR_8_TPC3_CFG_BASE 0x7FFCEC6BC0ull +#define QM_TENSOR_8_TPC3_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_8_TPC3_CFG_SECTION 0x3800 +#define mmQM_TENSOR_9_TPC3_CFG_BASE 0x7FFCEC6BF8ull +#define QM_TENSOR_9_TPC3_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_9_TPC3_CFG_SECTION 0x3800 +#define mmQM_TENSOR_10_TPC3_CFG_BASE 0x7FFCEC6C30ull +#define QM_TENSOR_10_TPC3_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_10_TPC3_CFG_SECTION 0x3800 +#define mmQM_TENSOR_11_TPC3_CFG_BASE 0x7FFCEC6C68ull +#define QM_TENSOR_11_TPC3_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_11_TPC3_CFG_SECTION 0x3800 +#define mmQM_TENSOR_12_TPC3_CFG_BASE 0x7FFCEC6CA0ull +#define QM_TENSOR_12_TPC3_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_12_TPC3_CFG_SECTION 0x3800 +#define mmQM_TENSOR_13_TPC3_CFG_BASE 0x7FFCEC6CD8ull +#define QM_TENSOR_13_TPC3_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_13_TPC3_CFG_SECTION 0x3800 +#define mmQM_TENSOR_14_TPC3_CFG_BASE 0x7FFCEC6D10ull +#define QM_TENSOR_14_TPC3_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_14_TPC3_CFG_SECTION 0x3800 +#define mmQM_TENSOR_15_TPC3_CFG_BASE 0x7FFCEC6D48ull +#define QM_TENSOR_15_TPC3_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_15_TPC3_CFG_SECTION 0x3800 +#define mmQM_SYNC_OBJECT_TPC3_CFG_BASE 0x7FFCEC6D80ull +#define QM_SYNC_OBJECT_TPC3_CFG_MAX_OFFSET 0x8000 +#define QM_SYNC_OBJECT_TPC3_CFG_SECTION 0x8000 +#define mmQM_TPC3_CFG_BASE 0x7FFCEC6D88ull +#define QM_TPC3_CFG_MAX_OFFSET 0xB800 +#define QM_TPC3_CFG_SECTION 0x2780 +#define mmTPC3_E2E_CRED_BASE 0x7FFCEC7000ull +#define TPC3_E2E_CRED_MAX_OFFSET 0x1680 +#define TPC3_E2E_CRED_SECTION 0x1000 +#define mmTPC3_QM_BASE 0x7FFCEC8000ull +#define TPC3_QM_MAX_OFFSET 0xD040 +#define TPC3_QM_SECTION 0x3E000 +#define mmTPC4_CFG_BASE 0x7FFCF06000ull +#define TPC4_CFG_MAX_OFFSET 0xE400 +#define TPC4_CFG_SECTION 0x4000 +#define mmKERNEL_TENSOR_0_TPC4_CFG_BASE 0x7FFCF06400ull +#define KERNEL_TENSOR_0_TPC4_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_0_TPC4_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_1_TPC4_CFG_BASE 0x7FFCF06438ull +#define KERNEL_TENSOR_1_TPC4_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_1_TPC4_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_2_TPC4_CFG_BASE 0x7FFCF06470ull +#define KERNEL_TENSOR_2_TPC4_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_2_TPC4_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_3_TPC4_CFG_BASE 0x7FFCF064A8ull +#define KERNEL_TENSOR_3_TPC4_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_3_TPC4_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_4_TPC4_CFG_BASE 0x7FFCF064E0ull +#define KERNEL_TENSOR_4_TPC4_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_4_TPC4_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_5_TPC4_CFG_BASE 0x7FFCF06518ull +#define KERNEL_TENSOR_5_TPC4_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_5_TPC4_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_6_TPC4_CFG_BASE 0x7FFCF06550ull +#define KERNEL_TENSOR_6_TPC4_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_6_TPC4_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_7_TPC4_CFG_BASE 0x7FFCF06588ull +#define KERNEL_TENSOR_7_TPC4_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_7_TPC4_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_8_TPC4_CFG_BASE 0x7FFCF065C0ull +#define KERNEL_TENSOR_8_TPC4_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_8_TPC4_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_9_TPC4_CFG_BASE 0x7FFCF065F8ull +#define KERNEL_TENSOR_9_TPC4_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_9_TPC4_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_10_TPC4_CFG_BASE 0x7FFCF06630ull +#define KERNEL_TENSOR_10_TPC4_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_10_TPC4_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_11_TPC4_CFG_BASE 0x7FFCF06668ull +#define KERNEL_TENSOR_11_TPC4_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_11_TPC4_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_12_TPC4_CFG_BASE 0x7FFCF066A0ull +#define KERNEL_TENSOR_12_TPC4_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_12_TPC4_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_13_TPC4_CFG_BASE 0x7FFCF066D8ull +#define KERNEL_TENSOR_13_TPC4_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_13_TPC4_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_14_TPC4_CFG_BASE 0x7FFCF06710ull +#define KERNEL_TENSOR_14_TPC4_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_14_TPC4_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_15_TPC4_CFG_BASE 0x7FFCF06748ull +#define KERNEL_TENSOR_15_TPC4_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_15_TPC4_CFG_SECTION 0x3800 +#define mmKERNEL_SYNC_OBJECT_TPC4_CFG_BASE 0x7FFCF06780ull +#define KERNEL_SYNC_OBJECT_TPC4_CFG_MAX_OFFSET 0x8000 +#define KERNEL_SYNC_OBJECT_TPC4_CFG_SECTION 0x8000 +#define mmKERNEL_TPC4_CFG_BASE 0x7FFCF06788ull +#define KERNEL_TPC4_CFG_MAX_OFFSET 0xB800 +#define KERNEL_TPC4_CFG_SECTION 0x2780 +#define mmQM_TENSOR_0_TPC4_CFG_BASE 0x7FFCF06A00ull +#define QM_TENSOR_0_TPC4_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_0_TPC4_CFG_SECTION 0x3800 +#define mmQM_TENSOR_1_TPC4_CFG_BASE 0x7FFCF06A38ull +#define QM_TENSOR_1_TPC4_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_1_TPC4_CFG_SECTION 0x3800 +#define mmQM_TENSOR_2_TPC4_CFG_BASE 0x7FFCF06A70ull +#define QM_TENSOR_2_TPC4_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_2_TPC4_CFG_SECTION 0x3800 +#define mmQM_TENSOR_3_TPC4_CFG_BASE 0x7FFCF06AA8ull +#define QM_TENSOR_3_TPC4_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_3_TPC4_CFG_SECTION 0x3800 +#define mmQM_TENSOR_4_TPC4_CFG_BASE 0x7FFCF06AE0ull +#define QM_TENSOR_4_TPC4_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_4_TPC4_CFG_SECTION 0x3800 +#define mmQM_TENSOR_5_TPC4_CFG_BASE 0x7FFCF06B18ull +#define QM_TENSOR_5_TPC4_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_5_TPC4_CFG_SECTION 0x3800 +#define mmQM_TENSOR_6_TPC4_CFG_BASE 0x7FFCF06B50ull +#define QM_TENSOR_6_TPC4_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_6_TPC4_CFG_SECTION 0x3800 +#define mmQM_TENSOR_7_TPC4_CFG_BASE 0x7FFCF06B88ull +#define QM_TENSOR_7_TPC4_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_7_TPC4_CFG_SECTION 0x3800 +#define mmQM_TENSOR_8_TPC4_CFG_BASE 0x7FFCF06BC0ull +#define QM_TENSOR_8_TPC4_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_8_TPC4_CFG_SECTION 0x3800 +#define mmQM_TENSOR_9_TPC4_CFG_BASE 0x7FFCF06BF8ull +#define QM_TENSOR_9_TPC4_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_9_TPC4_CFG_SECTION 0x3800 +#define mmQM_TENSOR_10_TPC4_CFG_BASE 0x7FFCF06C30ull +#define QM_TENSOR_10_TPC4_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_10_TPC4_CFG_SECTION 0x3800 +#define mmQM_TENSOR_11_TPC4_CFG_BASE 0x7FFCF06C68ull +#define QM_TENSOR_11_TPC4_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_11_TPC4_CFG_SECTION 0x3800 +#define mmQM_TENSOR_12_TPC4_CFG_BASE 0x7FFCF06CA0ull +#define QM_TENSOR_12_TPC4_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_12_TPC4_CFG_SECTION 0x3800 +#define mmQM_TENSOR_13_TPC4_CFG_BASE 0x7FFCF06CD8ull +#define QM_TENSOR_13_TPC4_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_13_TPC4_CFG_SECTION 0x3800 +#define mmQM_TENSOR_14_TPC4_CFG_BASE 0x7FFCF06D10ull +#define QM_TENSOR_14_TPC4_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_14_TPC4_CFG_SECTION 0x3800 +#define mmQM_TENSOR_15_TPC4_CFG_BASE 0x7FFCF06D48ull +#define QM_TENSOR_15_TPC4_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_15_TPC4_CFG_SECTION 0x3800 +#define mmQM_SYNC_OBJECT_TPC4_CFG_BASE 0x7FFCF06D80ull +#define QM_SYNC_OBJECT_TPC4_CFG_MAX_OFFSET 0x8000 +#define QM_SYNC_OBJECT_TPC4_CFG_SECTION 0x8000 +#define mmQM_TPC4_CFG_BASE 0x7FFCF06D88ull +#define QM_TPC4_CFG_MAX_OFFSET 0xB800 +#define QM_TPC4_CFG_SECTION 0x2780 +#define mmTPC4_E2E_CRED_BASE 0x7FFCF07000ull +#define TPC4_E2E_CRED_MAX_OFFSET 0x1680 +#define TPC4_E2E_CRED_SECTION 0x1000 +#define mmTPC4_QM_BASE 0x7FFCF08000ull +#define TPC4_QM_MAX_OFFSET 0xD040 +#define TPC4_QM_SECTION 0x3E000 +#define mmTPC5_CFG_BASE 0x7FFCF46000ull +#define TPC5_CFG_MAX_OFFSET 0xE400 +#define TPC5_CFG_SECTION 0x4000 +#define mmKERNEL_TENSOR_0_TPC5_CFG_BASE 0x7FFCF46400ull +#define KERNEL_TENSOR_0_TPC5_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_0_TPC5_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_1_TPC5_CFG_BASE 0x7FFCF46438ull +#define KERNEL_TENSOR_1_TPC5_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_1_TPC5_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_2_TPC5_CFG_BASE 0x7FFCF46470ull +#define KERNEL_TENSOR_2_TPC5_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_2_TPC5_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_3_TPC5_CFG_BASE 0x7FFCF464A8ull +#define KERNEL_TENSOR_3_TPC5_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_3_TPC5_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_4_TPC5_CFG_BASE 0x7FFCF464E0ull +#define KERNEL_TENSOR_4_TPC5_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_4_TPC5_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_5_TPC5_CFG_BASE 0x7FFCF46518ull +#define KERNEL_TENSOR_5_TPC5_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_5_TPC5_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_6_TPC5_CFG_BASE 0x7FFCF46550ull +#define KERNEL_TENSOR_6_TPC5_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_6_TPC5_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_7_TPC5_CFG_BASE 0x7FFCF46588ull +#define KERNEL_TENSOR_7_TPC5_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_7_TPC5_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_8_TPC5_CFG_BASE 0x7FFCF465C0ull +#define KERNEL_TENSOR_8_TPC5_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_8_TPC5_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_9_TPC5_CFG_BASE 0x7FFCF465F8ull +#define KERNEL_TENSOR_9_TPC5_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_9_TPC5_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_10_TPC5_CFG_BASE 0x7FFCF46630ull +#define KERNEL_TENSOR_10_TPC5_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_10_TPC5_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_11_TPC5_CFG_BASE 0x7FFCF46668ull +#define KERNEL_TENSOR_11_TPC5_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_11_TPC5_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_12_TPC5_CFG_BASE 0x7FFCF466A0ull +#define KERNEL_TENSOR_12_TPC5_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_12_TPC5_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_13_TPC5_CFG_BASE 0x7FFCF466D8ull +#define KERNEL_TENSOR_13_TPC5_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_13_TPC5_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_14_TPC5_CFG_BASE 0x7FFCF46710ull +#define KERNEL_TENSOR_14_TPC5_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_14_TPC5_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_15_TPC5_CFG_BASE 0x7FFCF46748ull +#define KERNEL_TENSOR_15_TPC5_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_15_TPC5_CFG_SECTION 0x3800 +#define mmKERNEL_SYNC_OBJECT_TPC5_CFG_BASE 0x7FFCF46780ull +#define KERNEL_SYNC_OBJECT_TPC5_CFG_MAX_OFFSET 0x8000 +#define KERNEL_SYNC_OBJECT_TPC5_CFG_SECTION 0x8000 +#define mmKERNEL_TPC5_CFG_BASE 0x7FFCF46788ull +#define KERNEL_TPC5_CFG_MAX_OFFSET 0xB800 +#define KERNEL_TPC5_CFG_SECTION 0x2780 +#define mmQM_TENSOR_0_TPC5_CFG_BASE 0x7FFCF46A00ull +#define QM_TENSOR_0_TPC5_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_0_TPC5_CFG_SECTION 0x3800 +#define mmQM_TENSOR_1_TPC5_CFG_BASE 0x7FFCF46A38ull +#define QM_TENSOR_1_TPC5_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_1_TPC5_CFG_SECTION 0x3800 +#define mmQM_TENSOR_2_TPC5_CFG_BASE 0x7FFCF46A70ull +#define QM_TENSOR_2_TPC5_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_2_TPC5_CFG_SECTION 0x3800 +#define mmQM_TENSOR_3_TPC5_CFG_BASE 0x7FFCF46AA8ull +#define QM_TENSOR_3_TPC5_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_3_TPC5_CFG_SECTION 0x3800 +#define mmQM_TENSOR_4_TPC5_CFG_BASE 0x7FFCF46AE0ull +#define QM_TENSOR_4_TPC5_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_4_TPC5_CFG_SECTION 0x3800 +#define mmQM_TENSOR_5_TPC5_CFG_BASE 0x7FFCF46B18ull +#define QM_TENSOR_5_TPC5_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_5_TPC5_CFG_SECTION 0x3800 +#define mmQM_TENSOR_6_TPC5_CFG_BASE 0x7FFCF46B50ull +#define QM_TENSOR_6_TPC5_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_6_TPC5_CFG_SECTION 0x3800 +#define mmQM_TENSOR_7_TPC5_CFG_BASE 0x7FFCF46B88ull +#define QM_TENSOR_7_TPC5_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_7_TPC5_CFG_SECTION 0x3800 +#define mmQM_TENSOR_8_TPC5_CFG_BASE 0x7FFCF46BC0ull +#define QM_TENSOR_8_TPC5_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_8_TPC5_CFG_SECTION 0x3800 +#define mmQM_TENSOR_9_TPC5_CFG_BASE 0x7FFCF46BF8ull +#define QM_TENSOR_9_TPC5_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_9_TPC5_CFG_SECTION 0x3800 +#define mmQM_TENSOR_10_TPC5_CFG_BASE 0x7FFCF46C30ull +#define QM_TENSOR_10_TPC5_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_10_TPC5_CFG_SECTION 0x3800 +#define mmQM_TENSOR_11_TPC5_CFG_BASE 0x7FFCF46C68ull +#define QM_TENSOR_11_TPC5_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_11_TPC5_CFG_SECTION 0x3800 +#define mmQM_TENSOR_12_TPC5_CFG_BASE 0x7FFCF46CA0ull +#define QM_TENSOR_12_TPC5_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_12_TPC5_CFG_SECTION 0x3800 +#define mmQM_TENSOR_13_TPC5_CFG_BASE 0x7FFCF46CD8ull +#define QM_TENSOR_13_TPC5_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_13_TPC5_CFG_SECTION 0x3800 +#define mmQM_TENSOR_14_TPC5_CFG_BASE 0x7FFCF46D10ull +#define QM_TENSOR_14_TPC5_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_14_TPC5_CFG_SECTION 0x3800 +#define mmQM_TENSOR_15_TPC5_CFG_BASE 0x7FFCF46D48ull +#define QM_TENSOR_15_TPC5_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_15_TPC5_CFG_SECTION 0x3800 +#define mmQM_SYNC_OBJECT_TPC5_CFG_BASE 0x7FFCF46D80ull +#define QM_SYNC_OBJECT_TPC5_CFG_MAX_OFFSET 0x8000 +#define QM_SYNC_OBJECT_TPC5_CFG_SECTION 0x8000 +#define mmQM_TPC5_CFG_BASE 0x7FFCF46D88ull +#define QM_TPC5_CFG_MAX_OFFSET 0xB800 +#define QM_TPC5_CFG_SECTION 0x2780 +#define mmTPC5_E2E_CRED_BASE 0x7FFCF47000ull +#define TPC5_E2E_CRED_MAX_OFFSET 0x1680 +#define TPC5_E2E_CRED_SECTION 0x1000 +#define mmTPC5_QM_BASE 0x7FFCF48000ull +#define TPC5_QM_MAX_OFFSET 0xD040 +#define TPC5_QM_SECTION 0x3E000 +#define mmTPC6_CFG_BASE 0x7FFCF86000ull +#define TPC6_CFG_MAX_OFFSET 0xE400 +#define TPC6_CFG_SECTION 0x4000 +#define mmKERNEL_TENSOR_0_TPC6_CFG_BASE 0x7FFCF86400ull +#define KERNEL_TENSOR_0_TPC6_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_0_TPC6_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_1_TPC6_CFG_BASE 0x7FFCF86438ull +#define KERNEL_TENSOR_1_TPC6_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_1_TPC6_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_2_TPC6_CFG_BASE 0x7FFCF86470ull +#define KERNEL_TENSOR_2_TPC6_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_2_TPC6_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_3_TPC6_CFG_BASE 0x7FFCF864A8ull +#define KERNEL_TENSOR_3_TPC6_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_3_TPC6_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_4_TPC6_CFG_BASE 0x7FFCF864E0ull +#define KERNEL_TENSOR_4_TPC6_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_4_TPC6_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_5_TPC6_CFG_BASE 0x7FFCF86518ull +#define KERNEL_TENSOR_5_TPC6_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_5_TPC6_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_6_TPC6_CFG_BASE 0x7FFCF86550ull +#define KERNEL_TENSOR_6_TPC6_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_6_TPC6_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_7_TPC6_CFG_BASE 0x7FFCF86588ull +#define KERNEL_TENSOR_7_TPC6_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_7_TPC6_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_8_TPC6_CFG_BASE 0x7FFCF865C0ull +#define KERNEL_TENSOR_8_TPC6_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_8_TPC6_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_9_TPC6_CFG_BASE 0x7FFCF865F8ull +#define KERNEL_TENSOR_9_TPC6_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_9_TPC6_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_10_TPC6_CFG_BASE 0x7FFCF86630ull +#define KERNEL_TENSOR_10_TPC6_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_10_TPC6_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_11_TPC6_CFG_BASE 0x7FFCF86668ull +#define KERNEL_TENSOR_11_TPC6_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_11_TPC6_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_12_TPC6_CFG_BASE 0x7FFCF866A0ull +#define KERNEL_TENSOR_12_TPC6_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_12_TPC6_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_13_TPC6_CFG_BASE 0x7FFCF866D8ull +#define KERNEL_TENSOR_13_TPC6_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_13_TPC6_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_14_TPC6_CFG_BASE 0x7FFCF86710ull +#define KERNEL_TENSOR_14_TPC6_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_14_TPC6_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_15_TPC6_CFG_BASE 0x7FFCF86748ull +#define KERNEL_TENSOR_15_TPC6_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_15_TPC6_CFG_SECTION 0x3800 +#define mmKERNEL_SYNC_OBJECT_TPC6_CFG_BASE 0x7FFCF86780ull +#define KERNEL_SYNC_OBJECT_TPC6_CFG_MAX_OFFSET 0x8000 +#define KERNEL_SYNC_OBJECT_TPC6_CFG_SECTION 0x8000 +#define mmKERNEL_TPC6_CFG_BASE 0x7FFCF86788ull +#define KERNEL_TPC6_CFG_MAX_OFFSET 0xB800 +#define KERNEL_TPC6_CFG_SECTION 0x2780 +#define mmQM_TENSOR_0_TPC6_CFG_BASE 0x7FFCF86A00ull +#define QM_TENSOR_0_TPC6_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_0_TPC6_CFG_SECTION 0x3800 +#define mmQM_TENSOR_1_TPC6_CFG_BASE 0x7FFCF86A38ull +#define QM_TENSOR_1_TPC6_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_1_TPC6_CFG_SECTION 0x3800 +#define mmQM_TENSOR_2_TPC6_CFG_BASE 0x7FFCF86A70ull +#define QM_TENSOR_2_TPC6_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_2_TPC6_CFG_SECTION 0x3800 +#define mmQM_TENSOR_3_TPC6_CFG_BASE 0x7FFCF86AA8ull +#define QM_TENSOR_3_TPC6_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_3_TPC6_CFG_SECTION 0x3800 +#define mmQM_TENSOR_4_TPC6_CFG_BASE 0x7FFCF86AE0ull +#define QM_TENSOR_4_TPC6_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_4_TPC6_CFG_SECTION 0x3800 +#define mmQM_TENSOR_5_TPC6_CFG_BASE 0x7FFCF86B18ull +#define QM_TENSOR_5_TPC6_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_5_TPC6_CFG_SECTION 0x3800 +#define mmQM_TENSOR_6_TPC6_CFG_BASE 0x7FFCF86B50ull +#define QM_TENSOR_6_TPC6_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_6_TPC6_CFG_SECTION 0x3800 +#define mmQM_TENSOR_7_TPC6_CFG_BASE 0x7FFCF86B88ull +#define QM_TENSOR_7_TPC6_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_7_TPC6_CFG_SECTION 0x3800 +#define mmQM_TENSOR_8_TPC6_CFG_BASE 0x7FFCF86BC0ull +#define QM_TENSOR_8_TPC6_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_8_TPC6_CFG_SECTION 0x3800 +#define mmQM_TENSOR_9_TPC6_CFG_BASE 0x7FFCF86BF8ull +#define QM_TENSOR_9_TPC6_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_9_TPC6_CFG_SECTION 0x3800 +#define mmQM_TENSOR_10_TPC6_CFG_BASE 0x7FFCF86C30ull +#define QM_TENSOR_10_TPC6_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_10_TPC6_CFG_SECTION 0x3800 +#define mmQM_TENSOR_11_TPC6_CFG_BASE 0x7FFCF86C68ull +#define QM_TENSOR_11_TPC6_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_11_TPC6_CFG_SECTION 0x3800 +#define mmQM_TENSOR_12_TPC6_CFG_BASE 0x7FFCF86CA0ull +#define QM_TENSOR_12_TPC6_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_12_TPC6_CFG_SECTION 0x3800 +#define mmQM_TENSOR_13_TPC6_CFG_BASE 0x7FFCF86CD8ull +#define QM_TENSOR_13_TPC6_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_13_TPC6_CFG_SECTION 0x3800 +#define mmQM_TENSOR_14_TPC6_CFG_BASE 0x7FFCF86D10ull +#define QM_TENSOR_14_TPC6_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_14_TPC6_CFG_SECTION 0x3800 +#define mmQM_TENSOR_15_TPC6_CFG_BASE 0x7FFCF86D48ull +#define QM_TENSOR_15_TPC6_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_15_TPC6_CFG_SECTION 0x3800 +#define mmQM_SYNC_OBJECT_TPC6_CFG_BASE 0x7FFCF86D80ull +#define QM_SYNC_OBJECT_TPC6_CFG_MAX_OFFSET 0x8000 +#define QM_SYNC_OBJECT_TPC6_CFG_SECTION 0x8000 +#define mmQM_TPC6_CFG_BASE 0x7FFCF86D88ull +#define QM_TPC6_CFG_MAX_OFFSET 0xB800 +#define QM_TPC6_CFG_SECTION 0x2780 +#define mmTPC6_E2E_CRED_BASE 0x7FFCF87000ull +#define TPC6_E2E_CRED_MAX_OFFSET 0x1680 +#define TPC6_E2E_CRED_SECTION 0x1000 +#define mmTPC6_QM_BASE 0x7FFCF88000ull +#define TPC6_QM_MAX_OFFSET 0xD040 +#define TPC6_QM_SECTION 0x3E000 +#define mmTPC7_CFG_BASE 0x7FFCFC6000ull +#define TPC7_CFG_MAX_OFFSET 0xE400 +#define TPC7_CFG_SECTION 0x4000 +#define mmKERNEL_TENSOR_0_TPC7_CFG_BASE 0x7FFCFC6400ull +#define KERNEL_TENSOR_0_TPC7_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_0_TPC7_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_1_TPC7_CFG_BASE 0x7FFCFC6438ull +#define KERNEL_TENSOR_1_TPC7_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_1_TPC7_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_2_TPC7_CFG_BASE 0x7FFCFC6470ull +#define KERNEL_TENSOR_2_TPC7_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_2_TPC7_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_3_TPC7_CFG_BASE 0x7FFCFC64A8ull +#define KERNEL_TENSOR_3_TPC7_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_3_TPC7_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_4_TPC7_CFG_BASE 0x7FFCFC64E0ull +#define KERNEL_TENSOR_4_TPC7_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_4_TPC7_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_5_TPC7_CFG_BASE 0x7FFCFC6518ull +#define KERNEL_TENSOR_5_TPC7_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_5_TPC7_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_6_TPC7_CFG_BASE 0x7FFCFC6550ull +#define KERNEL_TENSOR_6_TPC7_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_6_TPC7_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_7_TPC7_CFG_BASE 0x7FFCFC6588ull +#define KERNEL_TENSOR_7_TPC7_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_7_TPC7_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_8_TPC7_CFG_BASE 0x7FFCFC65C0ull +#define KERNEL_TENSOR_8_TPC7_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_8_TPC7_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_9_TPC7_CFG_BASE 0x7FFCFC65F8ull +#define KERNEL_TENSOR_9_TPC7_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_9_TPC7_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_10_TPC7_CFG_BASE 0x7FFCFC6630ull +#define KERNEL_TENSOR_10_TPC7_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_10_TPC7_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_11_TPC7_CFG_BASE 0x7FFCFC6668ull +#define KERNEL_TENSOR_11_TPC7_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_11_TPC7_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_12_TPC7_CFG_BASE 0x7FFCFC66A0ull +#define KERNEL_TENSOR_12_TPC7_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_12_TPC7_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_13_TPC7_CFG_BASE 0x7FFCFC66D8ull +#define KERNEL_TENSOR_13_TPC7_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_13_TPC7_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_14_TPC7_CFG_BASE 0x7FFCFC6710ull +#define KERNEL_TENSOR_14_TPC7_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_14_TPC7_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_15_TPC7_CFG_BASE 0x7FFCFC6748ull +#define KERNEL_TENSOR_15_TPC7_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_15_TPC7_CFG_SECTION 0x3800 +#define mmKERNEL_SYNC_OBJECT_TPC7_CFG_BASE 0x7FFCFC6780ull +#define KERNEL_SYNC_OBJECT_TPC7_CFG_MAX_OFFSET 0x8000 +#define KERNEL_SYNC_OBJECT_TPC7_CFG_SECTION 0x8000 +#define mmKERNEL_TPC7_CFG_BASE 0x7FFCFC6788ull +#define KERNEL_TPC7_CFG_MAX_OFFSET 0xB800 +#define KERNEL_TPC7_CFG_SECTION 0x2780 +#define mmQM_TENSOR_0_TPC7_CFG_BASE 0x7FFCFC6A00ull +#define QM_TENSOR_0_TPC7_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_0_TPC7_CFG_SECTION 0x3800 +#define mmQM_TENSOR_1_TPC7_CFG_BASE 0x7FFCFC6A38ull +#define QM_TENSOR_1_TPC7_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_1_TPC7_CFG_SECTION 0x3800 +#define mmQM_TENSOR_2_TPC7_CFG_BASE 0x7FFCFC6A70ull +#define QM_TENSOR_2_TPC7_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_2_TPC7_CFG_SECTION 0x3800 +#define mmQM_TENSOR_3_TPC7_CFG_BASE 0x7FFCFC6AA8ull +#define QM_TENSOR_3_TPC7_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_3_TPC7_CFG_SECTION 0x3800 +#define mmQM_TENSOR_4_TPC7_CFG_BASE 0x7FFCFC6AE0ull +#define QM_TENSOR_4_TPC7_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_4_TPC7_CFG_SECTION 0x3800 +#define mmQM_TENSOR_5_TPC7_CFG_BASE 0x7FFCFC6B18ull +#define QM_TENSOR_5_TPC7_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_5_TPC7_CFG_SECTION 0x3800 +#define mmQM_TENSOR_6_TPC7_CFG_BASE 0x7FFCFC6B50ull +#define QM_TENSOR_6_TPC7_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_6_TPC7_CFG_SECTION 0x3800 +#define mmQM_TENSOR_7_TPC7_CFG_BASE 0x7FFCFC6B88ull +#define QM_TENSOR_7_TPC7_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_7_TPC7_CFG_SECTION 0x3800 +#define mmQM_TENSOR_8_TPC7_CFG_BASE 0x7FFCFC6BC0ull +#define QM_TENSOR_8_TPC7_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_8_TPC7_CFG_SECTION 0x3800 +#define mmQM_TENSOR_9_TPC7_CFG_BASE 0x7FFCFC6BF8ull +#define QM_TENSOR_9_TPC7_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_9_TPC7_CFG_SECTION 0x3800 +#define mmQM_TENSOR_10_TPC7_CFG_BASE 0x7FFCFC6C30ull +#define QM_TENSOR_10_TPC7_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_10_TPC7_CFG_SECTION 0x3800 +#define mmQM_TENSOR_11_TPC7_CFG_BASE 0x7FFCFC6C68ull +#define QM_TENSOR_11_TPC7_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_11_TPC7_CFG_SECTION 0x3800 +#define mmQM_TENSOR_12_TPC7_CFG_BASE 0x7FFCFC6CA0ull +#define QM_TENSOR_12_TPC7_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_12_TPC7_CFG_SECTION 0x3800 +#define mmQM_TENSOR_13_TPC7_CFG_BASE 0x7FFCFC6CD8ull +#define QM_TENSOR_13_TPC7_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_13_TPC7_CFG_SECTION 0x3800 +#define mmQM_TENSOR_14_TPC7_CFG_BASE 0x7FFCFC6D10ull +#define QM_TENSOR_14_TPC7_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_14_TPC7_CFG_SECTION 0x3800 +#define mmQM_TENSOR_15_TPC7_CFG_BASE 0x7FFCFC6D48ull +#define QM_TENSOR_15_TPC7_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_15_TPC7_CFG_SECTION 0x3800 +#define mmQM_SYNC_OBJECT_TPC7_CFG_BASE 0x7FFCFC6D80ull +#define QM_SYNC_OBJECT_TPC7_CFG_MAX_OFFSET 0x8000 +#define QM_SYNC_OBJECT_TPC7_CFG_SECTION 0x8000 +#define mmQM_TPC7_CFG_BASE 0x7FFCFC6D88ull +#define QM_TPC7_CFG_MAX_OFFSET 0xB800 +#define QM_TPC7_CFG_SECTION 0x2780 +#define mmTPC7_E2E_CRED_BASE 0x7FFCFC7000ull +#define TPC7_E2E_CRED_MAX_OFFSET 0x1680 +#define TPC7_E2E_CRED_SECTION 0x1000 +#define mmTPC7_QM_BASE 0x7FFCFC8000ull +#define TPC7_QM_MAX_OFFSET 0xD040 +#define TPC7_QM_SECTION 0x1038000 +#define mmMME_S_ROM_TABLE_BASE 0x7FFE000000ull +#define MME_S_ROM_TABLE_MAX_OFFSET 0x1000 +#define MME_S_ROM_TABLE_SECTION 0x21000 +#define mmMME0_ACC_STM_BASE 0x7FFE021000ull +#define MME0_ACC_STM_MAX_OFFSET 0x1000 +#define MME0_ACC_STM_SECTION 0x1000 +#define mmMME0_ACC_CTI_BASE 0x7FFE022000ull +#define MME0_ACC_CTI_MAX_OFFSET 0x1000 +#define MME0_ACC_CTI_SECTION 0x1000 +#define mmMME0_ACC_ETF_BASE 0x7FFE023000ull +#define MME0_ACC_ETF_MAX_OFFSET 0x1000 +#define MME0_ACC_ETF_SECTION 0x1000 +#define mmMME0_ACC_SPMU_BASE 0x7FFE024000ull +#define MME0_ACC_SPMU_MAX_OFFSET 0x1000 +#define MME0_ACC_SPMU_SECTION 0x1000 +#define mmMME0_ACC_CTI0_BASE 0x7FFE025000ull +#define MME0_ACC_CTI0_MAX_OFFSET 0x1000 +#define MME0_ACC_CTI0_SECTION 0x1000 +#define mmMME0_ACC_CTI1_BASE 0x7FFE026000ull +#define MME0_ACC_CTI1_MAX_OFFSET 0x1000 +#define MME0_ACC_CTI1_SECTION 0x1000 +#define mmMME0_ACC_BMON0_BASE 0x7FFE027000ull +#define MME0_ACC_BMON0_MAX_OFFSET 0x1000 +#define MME0_ACC_BMON0_SECTION 0x9000 +#define mmMME0_ACC_FUNNEL_BASE 0x7FFE030000ull +#define MME0_ACC_FUNNEL_MAX_OFFSET 0x1000 +#define MME0_ACC_FUNNEL_SECTION 0x11000 +#define mmMME0_SBAB_STM_BASE 0x7FFE041000ull +#define MME0_SBAB_STM_MAX_OFFSET 0x1000 +#define MME0_SBAB_STM_SECTION 0x1000 +#define mmMME0_SBAB_CTI_BASE 0x7FFE042000ull +#define MME0_SBAB_CTI_MAX_OFFSET 0x1000 +#define MME0_SBAB_CTI_SECTION 0x1000 +#define mmMME0_SBAB_ETF_BASE 0x7FFE043000ull +#define MME0_SBAB_ETF_MAX_OFFSET 0x1000 +#define MME0_SBAB_ETF_SECTION 0x1000 +#define mmMME0_SBAB_SPMU_BASE 0x7FFE044000ull +#define MME0_SBAB_SPMU_MAX_OFFSET 0x1000 +#define MME0_SBAB_SPMU_SECTION 0x1000 +#define mmMME0_SBAB_CTI0_BASE 0x7FFE045000ull +#define MME0_SBAB_CTI0_MAX_OFFSET 0x1000 +#define MME0_SBAB_CTI0_SECTION 0x1000 +#define mmMME0_SBAB_CTI1_BASE 0x7FFE046000ull +#define MME0_SBAB_CTI1_MAX_OFFSET 0x1000 +#define MME0_SBAB_CTI1_SECTION 0x1000 +#define mmMME0_SBAB_BMON0_BASE 0x7FFE047000ull +#define MME0_SBAB_BMON0_MAX_OFFSET 0x1000 +#define MME0_SBAB_BMON0_SECTION 0x1000 +#define mmMME0_SBAB_BMON1_BASE 0x7FFE048000ull +#define MME0_SBAB_BMON1_MAX_OFFSET 0x1000 +#define MME0_SBAB_BMON1_SECTION 0x19000 +#define mmMME0_CTRL_STM_BASE 0x7FFE061000ull +#define MME0_CTRL_STM_MAX_OFFSET 0x1000 +#define MME0_CTRL_STM_SECTION 0x1000 +#define mmMME0_CTRL_CTI_BASE 0x7FFE062000ull +#define MME0_CTRL_CTI_MAX_OFFSET 0x1000 +#define MME0_CTRL_CTI_SECTION 0x1000 +#define mmMME0_CTRL_ETF_BASE 0x7FFE063000ull +#define MME0_CTRL_ETF_MAX_OFFSET 0x1000 +#define MME0_CTRL_ETF_SECTION 0x1000 +#define mmMME0_CTRL_SPMU_BASE 0x7FFE064000ull +#define MME0_CTRL_SPMU_MAX_OFFSET 0x1000 +#define MME0_CTRL_SPMU_SECTION 0x1000 +#define mmMME0_CTRL_CTI0_BASE 0x7FFE065000ull +#define MME0_CTRL_CTI0_MAX_OFFSET 0x1000 +#define MME0_CTRL_CTI0_SECTION 0x1000 +#define mmMME0_CTRL_CTI1_BASE 0x7FFE066000ull +#define MME0_CTRL_CTI1_MAX_OFFSET 0x1000 +#define MME0_CTRL_CTI1_SECTION 0x1000 +#define mmMME0_CTRL_BMON0_BASE 0x7FFE067000ull +#define MME0_CTRL_BMON0_MAX_OFFSET 0x1000 +#define MME0_CTRL_BMON0_SECTION 0x1000 +#define mmMME0_CTRL_BMON1_BASE 0x7FFE068000ull +#define MME0_CTRL_BMON1_MAX_OFFSET 0x1000 +#define MME0_CTRL_BMON1_SECTION 0x39000 +#define mmMME1_ACC_STM_BASE 0x7FFE0A1000ull +#define MME1_ACC_STM_MAX_OFFSET 0x1000 +#define MME1_ACC_STM_SECTION 0x1000 +#define mmMME1_ACC_CTI_BASE 0x7FFE0A2000ull +#define MME1_ACC_CTI_MAX_OFFSET 0x1000 +#define MME1_ACC_CTI_SECTION 0x1000 +#define mmMME1_ACC_ETF_BASE 0x7FFE0A3000ull +#define MME1_ACC_ETF_MAX_OFFSET 0x1000 +#define MME1_ACC_ETF_SECTION 0x1000 +#define mmMME1_ACC_SPMU_BASE 0x7FFE0A4000ull +#define MME1_ACC_SPMU_MAX_OFFSET 0x1000 +#define MME1_ACC_SPMU_SECTION 0x1000 +#define mmMME1_ACC_CTI0_BASE 0x7FFE0A5000ull +#define MME1_ACC_CTI0_MAX_OFFSET 0x1000 +#define MME1_ACC_CTI0_SECTION 0x1000 +#define mmMME1_ACC_CTI1_BASE 0x7FFE0A6000ull +#define MME1_ACC_CTI1_MAX_OFFSET 0x1000 +#define MME1_ACC_CTI1_SECTION 0x1000 +#define mmMME1_ACC_BMON0_BASE 0x7FFE0A7000ull +#define MME1_ACC_BMON0_MAX_OFFSET 0x1000 +#define MME1_ACC_BMON0_SECTION 0x9000 +#define mmMME1_ACC_FUNNEL_BASE 0x7FFE0B0000ull +#define MME1_ACC_FUNNEL_MAX_OFFSET 0x1000 +#define MME1_ACC_FUNNEL_SECTION 0x11000 +#define mmMME1_SBAB_STM_BASE 0x7FFE0C1000ull +#define MME1_SBAB_STM_MAX_OFFSET 0x1000 +#define MME1_SBAB_STM_SECTION 0x1000 +#define mmMME1_SBAB_CTI_BASE 0x7FFE0C2000ull +#define MME1_SBAB_CTI_MAX_OFFSET 0x1000 +#define MME1_SBAB_CTI_SECTION 0x1000 +#define mmMME1_SBAB_ETF_BASE 0x7FFE0C3000ull +#define MME1_SBAB_ETF_MAX_OFFSET 0x1000 +#define MME1_SBAB_ETF_SECTION 0x1000 +#define mmMME1_SBAB_SPMU_BASE 0x7FFE0C4000ull +#define MME1_SBAB_SPMU_MAX_OFFSET 0x1000 +#define MME1_SBAB_SPMU_SECTION 0x1000 +#define mmMME1_SBAB_CTI0_BASE 0x7FFE0C5000ull +#define MME1_SBAB_CTI0_MAX_OFFSET 0x1000 +#define MME1_SBAB_CTI0_SECTION 0x1000 +#define mmMME1_SBAB_CTI1_BASE 0x7FFE0C6000ull +#define MME1_SBAB_CTI1_MAX_OFFSET 0x1000 +#define MME1_SBAB_CTI1_SECTION 0x1000 +#define mmMME1_SBAB_BMON0_BASE 0x7FFE0C7000ull +#define MME1_SBAB_BMON0_MAX_OFFSET 0x1000 +#define MME1_SBAB_BMON0_SECTION 0x1000 +#define mmMME1_SBAB_BMON1_BASE 0x7FFE0C8000ull +#define MME1_SBAB_BMON1_MAX_OFFSET 0x1000 +#define MME1_SBAB_BMON1_SECTION 0x19000 +#define mmMME1_CTRL_STM_BASE 0x7FFE0E1000ull +#define MME1_CTRL_STM_MAX_OFFSET 0x1000 +#define MME1_CTRL_STM_SECTION 0x1000 +#define mmMME1_CTRL_CTI_BASE 0x7FFE0E2000ull +#define MME1_CTRL_CTI_MAX_OFFSET 0x1000 +#define MME1_CTRL_CTI_SECTION 0x1000 +#define mmMME1_CTRL_ETF_BASE 0x7FFE0E3000ull +#define MME1_CTRL_ETF_MAX_OFFSET 0x1000 +#define MME1_CTRL_ETF_SECTION 0x1000 +#define mmMME1_CTRL_SPMU_BASE 0x7FFE0E4000ull +#define MME1_CTRL_SPMU_MAX_OFFSET 0x1000 +#define MME1_CTRL_SPMU_SECTION 0x1000 +#define mmMME1_CTRL_CTI0_BASE 0x7FFE0E5000ull +#define MME1_CTRL_CTI0_MAX_OFFSET 0x1000 +#define MME1_CTRL_CTI0_SECTION 0x1000 +#define mmMME1_CTRL_CTI1_BASE 0x7FFE0E6000ull +#define MME1_CTRL_CTI1_MAX_OFFSET 0x1000 +#define MME1_CTRL_CTI1_SECTION 0x1000 +#define mmMME1_CTRL_BMON0_BASE 0x7FFE0E7000ull +#define MME1_CTRL_BMON0_MAX_OFFSET 0x1000 +#define MME1_CTRL_BMON0_SECTION 0x1000 +#define mmMME1_CTRL_BMON1_BASE 0x7FFE0E8000ull +#define MME1_CTRL_BMON1_MAX_OFFSET 0x1000 +#define MME1_CTRL_BMON1_SECTION 0x18000 +#define mmMME_N_ROM_TABLE_BASE 0x7FFE100000ull +#define MME_N_ROM_TABLE_MAX_OFFSET 0x1000 +#define MME_N_ROM_TABLE_SECTION 0x21000 +#define mmMME2_ACC_STM_BASE 0x7FFE121000ull +#define MME2_ACC_STM_MAX_OFFSET 0x1000 +#define MME2_ACC_STM_SECTION 0x1000 +#define mmMME2_ACC_CTI_BASE 0x7FFE122000ull +#define MME2_ACC_CTI_MAX_OFFSET 0x1000 +#define MME2_ACC_CTI_SECTION 0x1000 +#define mmMME2_MME2_ACC_ETF_BASE 0x7FFE123000ull +#define MME2_MME2_ACC_ETF_MAX_OFFSET 0x1000 +#define MME2_MME2_ACC_ETF_SECTION 0x1000 +#define mmMME2_ACC_SPMU_BASE 0x7FFE124000ull +#define MME2_ACC_SPMU_MAX_OFFSET 0x1000 +#define MME2_ACC_SPMU_SECTION 0x1000 +#define mmMME2_ACC_CTI0_BASE 0x7FFE125000ull +#define MME2_ACC_CTI0_MAX_OFFSET 0x1000 +#define MME2_ACC_CTI0_SECTION 0x1000 +#define mmMME2_ACC_CTI1_BASE 0x7FFE126000ull +#define MME2_ACC_CTI1_MAX_OFFSET 0x1000 +#define MME2_ACC_CTI1_SECTION 0x1000 +#define mmMME2_ACC_BMON0_BASE 0x7FFE127000ull +#define MME2_ACC_BMON0_MAX_OFFSET 0x1000 +#define MME2_ACC_BMON0_SECTION 0x9000 +#define mmMME2_ACC_FUNNEL_BASE 0x7FFE130000ull +#define MME2_ACC_FUNNEL_MAX_OFFSET 0x1000 +#define MME2_ACC_FUNNEL_SECTION 0x11000 +#define mmMME2_SBAB_STM_BASE 0x7FFE141000ull +#define MME2_SBAB_STM_MAX_OFFSET 0x1000 +#define MME2_SBAB_STM_SECTION 0x1000 +#define mmMME2_SBAB_CTI_BASE 0x7FFE142000ull +#define MME2_SBAB_CTI_MAX_OFFSET 0x1000 +#define MME2_SBAB_CTI_SECTION 0x1000 +#define mmMME2_SBAB_ETF_BASE 0x7FFE143000ull +#define MME2_SBAB_ETF_MAX_OFFSET 0x1000 +#define MME2_SBAB_ETF_SECTION 0x1000 +#define mmMME2_SBAB_SPMU_BASE 0x7FFE144000ull +#define MME2_SBAB_SPMU_MAX_OFFSET 0x1000 +#define MME2_SBAB_SPMU_SECTION 0x1000 +#define mmMME2_SBAB_CTI0_BASE 0x7FFE145000ull +#define MME2_SBAB_CTI0_MAX_OFFSET 0x1000 +#define MME2_SBAB_CTI0_SECTION 0x1000 +#define mmMME2_SBAB_CTI1_BASE 0x7FFE146000ull +#define MME2_SBAB_CTI1_MAX_OFFSET 0x1000 +#define MME2_SBAB_CTI1_SECTION 0x1000 +#define mmMME2_SBAB_BMON0_BASE 0x7FFE147000ull +#define MME2_SBAB_BMON0_MAX_OFFSET 0x1000 +#define MME2_SBAB_BMON0_SECTION 0x1000 +#define mmMME2_SBAB_BMON1_BASE 0x7FFE148000ull +#define MME2_SBAB_BMON1_MAX_OFFSET 0x1000 +#define MME2_SBAB_BMON1_SECTION 0x19000 +#define mmMME2_CTRL_STM_BASE 0x7FFE161000ull +#define MME2_CTRL_STM_MAX_OFFSET 0x1000 +#define MME2_CTRL_STM_SECTION 0x1000 +#define mmMME2_CTRL_CTI_BASE 0x7FFE162000ull +#define MME2_CTRL_CTI_MAX_OFFSET 0x1000 +#define MME2_CTRL_CTI_SECTION 0x1000 +#define mmMME2_CTRL_ETF_BASE 0x7FFE163000ull +#define MME2_CTRL_ETF_MAX_OFFSET 0x1000 +#define MME2_CTRL_ETF_SECTION 0x1000 +#define mmMME2_CTRL_SPMU_BASE 0x7FFE164000ull +#define MME2_CTRL_SPMU_MAX_OFFSET 0x1000 +#define MME2_CTRL_SPMU_SECTION 0x1000 +#define mmMME2_CTRL_CTI0_BASE 0x7FFE165000ull +#define MME2_CTRL_CTI0_MAX_OFFSET 0x1000 +#define MME2_CTRL_CTI0_SECTION 0x1000 +#define mmMME2_CTRL_CTI1_BASE 0x7FFE166000ull +#define MME2_CTRL_CTI1_MAX_OFFSET 0x1000 +#define MME2_CTRL_CTI1_SECTION 0x1000 +#define mmMME2_CTRL_BMON0_BASE 0x7FFE167000ull +#define MME2_CTRL_BMON0_MAX_OFFSET 0x1000 +#define MME2_CTRL_BMON0_SECTION 0x1000 +#define mmMME2_CTRL_BMON1_BASE 0x7FFE168000ull +#define MME2_CTRL_BMON1_MAX_OFFSET 0x1000 +#define MME2_CTRL_BMON1_SECTION 0x39000 +#define mmMME3_ACC_STM_BASE 0x7FFE1A1000ull +#define MME3_ACC_STM_MAX_OFFSET 0x1000 +#define MME3_ACC_STM_SECTION 0x1000 +#define mmMME3_ACC_CTI_BASE 0x7FFE1A2000ull +#define MME3_ACC_CTI_MAX_OFFSET 0x1000 +#define MME3_ACC_CTI_SECTION 0x1000 +#define mmMME3_ACC_ETF_BASE 0x7FFE1A3000ull +#define MME3_ACC_ETF_MAX_OFFSET 0x1000 +#define MME3_ACC_ETF_SECTION 0x1000 +#define mmMME3_ACC_SPMU_BASE 0x7FFE1A4000ull +#define MME3_ACC_SPMU_MAX_OFFSET 0x1000 +#define MME3_ACC_SPMU_SECTION 0x1000 +#define mmMME3_ACC_CTI0_BASE 0x7FFE1A5000ull +#define MME3_ACC_CTI0_MAX_OFFSET 0x1000 +#define MME3_ACC_CTI0_SECTION 0x1000 +#define mmMME3_ACC_CTI1_BASE 0x7FFE1A6000ull +#define MME3_ACC_CTI1_MAX_OFFSET 0x1000 +#define MME3_ACC_CTI1_SECTION 0x1000 +#define mmMME3_ACC_BMON0_BASE 0x7FFE1A7000ull +#define MME3_ACC_BMON0_MAX_OFFSET 0x1000 +#define MME3_ACC_BMON0_SECTION 0x9000 +#define mmMME3_ACC_FUNNEL_BASE 0x7FFE1B0000ull +#define MME3_ACC_FUNNEL_MAX_OFFSET 0x1000 +#define MME3_ACC_FUNNEL_SECTION 0x11000 +#define mmMME3_SBAB_STM_BASE 0x7FFE1C1000ull +#define MME3_SBAB_STM_MAX_OFFSET 0x1000 +#define MME3_SBAB_STM_SECTION 0x1000 +#define mmMME3_SBAB_CTI_BASE 0x7FFE1C2000ull +#define MME3_SBAB_CTI_MAX_OFFSET 0x1000 +#define MME3_SBAB_CTI_SECTION 0x1000 +#define mmMME3_SBAB_ETF_BASE 0x7FFE1C3000ull +#define MME3_SBAB_ETF_MAX_OFFSET 0x1000 +#define MME3_SBAB_ETF_SECTION 0x1000 +#define mmMME3_SBAB_SPMU_BASE 0x7FFE1C4000ull +#define MME3_SBAB_SPMU_MAX_OFFSET 0x1000 +#define MME3_SBAB_SPMU_SECTION 0x1000 +#define mmMME3_SBAB_CTI0_BASE 0x7FFE1C5000ull +#define MME3_SBAB_CTI0_MAX_OFFSET 0x1000 +#define MME3_SBAB_CTI0_SECTION 0x1000 +#define mmMME3_SBAB_CTI1_BASE 0x7FFE1C6000ull +#define MME3_SBAB_CTI1_MAX_OFFSET 0x1000 +#define MME3_SBAB_CTI1_SECTION 0x1000 +#define mmMME3_SBAB_BMON0_BASE 0x7FFE1C7000ull +#define MME3_SBAB_BMON0_MAX_OFFSET 0x1000 +#define MME3_SBAB_BMON0_SECTION 0x1000 +#define mmMME3_SBAB_BMON1_BASE 0x7FFE1C8000ull +#define MME3_SBAB_BMON1_MAX_OFFSET 0x1000 +#define MME3_SBAB_BMON1_SECTION 0x19000 +#define mmMME3_CTRL_STM_BASE 0x7FFE1E1000ull +#define MME3_CTRL_STM_MAX_OFFSET 0x1000 +#define MME3_CTRL_STM_SECTION 0x1000 +#define mmMME3_CTRL_CTI_BASE 0x7FFE1E2000ull +#define MME3_CTRL_CTI_MAX_OFFSET 0x1000 +#define MME3_CTRL_CTI_SECTION 0x1000 +#define mmMME3_CTRL_ETF_BASE 0x7FFE1E3000ull +#define MME3_CTRL_ETF_MAX_OFFSET 0x1000 +#define MME3_CTRL_ETF_SECTION 0x1000 +#define mmMME3_CTRL_SPMU_BASE 0x7FFE1E4000ull +#define MME3_CTRL_SPMU_MAX_OFFSET 0x1000 +#define MME3_CTRL_SPMU_SECTION 0x1000 +#define mmMME3_CTRL_CTI0_BASE 0x7FFE1E5000ull +#define MME3_CTRL_CTI0_MAX_OFFSET 0x1000 +#define MME3_CTRL_CTI0_SECTION 0x1000 +#define mmMME3_CTRL_CTI1_BASE 0x7FFE1E6000ull +#define MME3_CTRL_CTI1_MAX_OFFSET 0x1000 +#define MME3_CTRL_CTI1_SECTION 0x1000 +#define mmMME3_CTRL_BMON0_BASE 0x7FFE1E7000ull +#define MME3_CTRL_BMON0_MAX_OFFSET 0x1000 +#define MME3_CTRL_BMON0_SECTION 0x1000 +#define mmMME3_CTRL_BMON1_BASE 0x7FFE1E8000ull +#define MME3_CTRL_BMON1_MAX_OFFSET 0x1000 +#define MME3_CTRL_BMON1_SECTION 0x18000 +#define mmIC_ROM_TABLE_BASE 0x7FFE200000ull +#define IC_ROM_TABLE_MAX_OFFSET 0x1000 +#define IC_ROM_TABLE_SECTION 0x1000 +#define mmSRAM_Y0_X0_FUNNEL_BASE 0x7FFE201000ull +#define SRAM_Y0_X0_FUNNEL_MAX_OFFSET 0x1000 +#define SRAM_Y0_X0_FUNNEL_SECTION 0x8000 +#define mmSRAM_Y0_X1_FUNNEL_BASE 0x7FFE209000ull +#define SRAM_Y0_X1_FUNNEL_MAX_OFFSET 0x1000 +#define SRAM_Y0_X1_FUNNEL_SECTION 0x8000 +#define mmSRAM_Y0_X2_FUNNEL_BASE 0x7FFE211000ull +#define SRAM_Y0_X2_FUNNEL_MAX_OFFSET 0x1000 +#define SRAM_Y0_X2_FUNNEL_SECTION 0x8000 +#define mmSRAM_Y0_X3_FUNNEL_BASE 0x7FFE219000ull +#define SRAM_Y0_X3_FUNNEL_MAX_OFFSET 0x1000 +#define SRAM_Y0_X3_FUNNEL_SECTION 0x8000 +#define mmSRAM_Y0_X4_FUNNEL_BASE 0x7FFE221000ull +#define SRAM_Y0_X4_FUNNEL_MAX_OFFSET 0x1000 +#define SRAM_Y0_X4_FUNNEL_SECTION 0x8000 +#define mmSRAM_Y0_X5_FUNNEL_BASE 0x7FFE229000ull +#define SRAM_Y0_X5_FUNNEL_MAX_OFFSET 0x1000 +#define SRAM_Y0_X5_FUNNEL_SECTION 0x8000 +#define mmSRAM_Y0_X6_FUNNEL_BASE 0x7FFE231000ull +#define SRAM_Y0_X6_FUNNEL_MAX_OFFSET 0x1000 +#define SRAM_Y0_X6_FUNNEL_SECTION 0x8000 +#define mmSRAM_Y0_X7_FUNNEL_BASE 0x7FFE239000ull +#define SRAM_Y0_X7_FUNNEL_MAX_OFFSET 0x1000 +#define SRAM_Y0_X7_FUNNEL_SECTION 0x8000 +#define mmSRAM_Y1_X0_FUNNEL_BASE 0x7FFE241000ull +#define SRAM_Y1_X0_FUNNEL_MAX_OFFSET 0x1000 +#define SRAM_Y1_X0_FUNNEL_SECTION 0x8000 +#define mmSRAM_Y1_X1_FUNNEL_BASE 0x7FFE249000ull +#define SRAM_Y1_X1_FUNNEL_MAX_OFFSET 0x1000 +#define SRAM_Y1_X1_FUNNEL_SECTION 0x8000 +#define mmSRAM_Y1_X2_FUNNEL_BASE 0x7FFE251000ull +#define SRAM_Y1_X2_FUNNEL_MAX_OFFSET 0x1000 +#define SRAM_Y1_X2_FUNNEL_SECTION 0x8000 +#define mmSRAM_Y1_X3_FUNNEL_BASE 0x7FFE259000ull +#define SRAM_Y1_X3_FUNNEL_MAX_OFFSET 0x1000 +#define SRAM_Y1_X3_FUNNEL_SECTION 0x8000 +#define mmSRAM_Y1_X4_FUNNEL_BASE 0x7FFE261000ull +#define SRAM_Y1_X4_FUNNEL_MAX_OFFSET 0x1000 +#define SRAM_Y1_X4_FUNNEL_SECTION 0x8000 +#define mmSRAM_Y1_X5_FUNNEL_BASE 0x7FFE269000ull +#define SRAM_Y1_X5_FUNNEL_MAX_OFFSET 0x1000 +#define SRAM_Y1_X5_FUNNEL_SECTION 0x8000 +#define mmSRAM_Y1_X6_FUNNEL_BASE 0x7FFE271000ull +#define SRAM_Y1_X6_FUNNEL_MAX_OFFSET 0x1000 +#define SRAM_Y1_X6_FUNNEL_SECTION 0x8000 +#define mmSRAM_Y1_X7_FUNNEL_BASE 0x7FFE279000ull +#define SRAM_Y1_X7_FUNNEL_MAX_OFFSET 0x1000 +#define SRAM_Y1_X7_FUNNEL_SECTION 0x8000 +#define mmSRAM_Y2_X0_FUNNEL_BASE 0x7FFE281000ull +#define SRAM_Y2_X0_FUNNEL_MAX_OFFSET 0x1000 +#define SRAM_Y2_X0_FUNNEL_SECTION 0x8000 +#define mmSRAM_Y2_X1_FUNNEL_BASE 0x7FFE289000ull +#define SRAM_Y2_X1_FUNNEL_MAX_OFFSET 0x1000 +#define SRAM_Y2_X1_FUNNEL_SECTION 0x8000 +#define mmSRAM_Y2_X2_FUNNEL_BASE 0x7FFE291000ull +#define SRAM_Y2_X2_FUNNEL_MAX_OFFSET 0x1000 +#define SRAM_Y2_X2_FUNNEL_SECTION 0x8000 +#define mmSRAM_Y2_X3_FUNNEL_BASE 0x7FFE299000ull +#define SRAM_Y2_X3_FUNNEL_MAX_OFFSET 0x1000 +#define SRAM_Y2_X3_FUNNEL_SECTION 0x8000 +#define mmSRAM_Y2_X4_FUNNEL_BASE 0x7FFE2A1000ull +#define SRAM_Y2_X4_FUNNEL_MAX_OFFSET 0x1000 +#define SRAM_Y2_X4_FUNNEL_SECTION 0x8000 +#define mmSRAM_Y2_X5_FUNNEL_BASE 0x7FFE2A9000ull +#define SRAM_Y2_X5_FUNNEL_MAX_OFFSET 0x1000 +#define SRAM_Y2_X5_FUNNEL_SECTION 0x8000 +#define mmSRAM_Y2_X6_FUNNEL_BASE 0x7FFE2B1000ull +#define SRAM_Y2_X6_FUNNEL_MAX_OFFSET 0x1000 +#define SRAM_Y2_X6_FUNNEL_SECTION 0x8000 +#define mmSRAM_Y2_X7_FUNNEL_BASE 0x7FFE2B9000ull +#define SRAM_Y2_X7_FUNNEL_MAX_OFFSET 0x1000 +#define SRAM_Y2_X7_FUNNEL_SECTION 0x8000 +#define mmSRAM_Y3_X0_FUNNEL_BASE 0x7FFE2C1000ull +#define SRAM_Y3_X0_FUNNEL_MAX_OFFSET 0x1000 +#define SRAM_Y3_X0_FUNNEL_SECTION 0x8000 +#define mmSRAM_Y3_X1_FUNNEL_BASE 0x7FFE2C9000ull +#define SRAM_Y3_X1_FUNNEL_MAX_OFFSET 0x1000 +#define SRAM_Y3_X1_FUNNEL_SECTION 0x8000 +#define mmSRAM_Y3_X2_FUNNEL_BASE 0x7FFE2D1000ull +#define SRAM_Y3_X2_FUNNEL_MAX_OFFSET 0x1000 +#define SRAM_Y3_X2_FUNNEL_SECTION 0x8000 +#define mmSRAM_Y3_X4_FUNNEL_BASE 0x7FFE2D9000ull +#define SRAM_Y3_X4_FUNNEL_MAX_OFFSET 0x1000 +#define SRAM_Y3_X4_FUNNEL_SECTION 0x8000 +#define mmSRAM_Y3_X3_FUNNEL_BASE 0x7FFE2E1000ull +#define SRAM_Y3_X3_FUNNEL_MAX_OFFSET 0x1000 +#define SRAM_Y3_X3_FUNNEL_SECTION 0x8000 +#define mmSRAM_Y3_X5_FUNNEL_BASE 0x7FFE2E9000ull +#define SRAM_Y3_X5_FUNNEL_MAX_OFFSET 0x1000 +#define SRAM_Y3_X5_FUNNEL_SECTION 0x8000 +#define mmSRAM_Y3_X6_FUNNEL_BASE 0x7FFE2F1000ull +#define SRAM_Y3_X6_FUNNEL_MAX_OFFSET 0x1000 +#define SRAM_Y3_X6_FUNNEL_SECTION 0x8000 +#define mmSRAM_Y3_X7_FUNNEL_BASE 0x7FFE2F9000ull +#define SRAM_Y3_X7_FUNNEL_MAX_OFFSET 0x1000 +#define SRAM_Y3_X7_FUNNEL_SECTION 0x7000 +#define mmIF_ROM_TABLE_BASE 0x7FFE300000ull +#define IF_ROM_TABLE_MAX_OFFSET 0x1000 +#define IF_ROM_TABLE_SECTION 0x1000 +#define mmSIF_FUNNEL_0_BASE 0x7FFE301000ull +#define SIF_FUNNEL_0_MAX_OFFSET 0x1000 +#define SIF_FUNNEL_0_SECTION 0x10000 +#define mmSIF_FUNNEL_1_BASE 0x7FFE311000ull +#define SIF_FUNNEL_1_MAX_OFFSET 0x1000 +#define SIF_FUNNEL_1_SECTION 0x10000 +#define mmSIF_FUNNEL_2_BASE 0x7FFE321000ull +#define SIF_FUNNEL_2_MAX_OFFSET 0x1000 +#define SIF_FUNNEL_2_SECTION 0x10000 +#define mmSIF_FUNNEL_3_BASE 0x7FFE331000ull +#define SIF_FUNNEL_3_MAX_OFFSET 0x1000 +#define SIF_FUNNEL_3_SECTION 0x10000 +#define mmSIF_FUNNEL_4_BASE 0x7FFE341000ull +#define SIF_FUNNEL_4_MAX_OFFSET 0x1000 +#define SIF_FUNNEL_4_SECTION 0x10000 +#define mmSIF_FUNNEL_5_BASE 0x7FFE351000ull +#define SIF_FUNNEL_5_MAX_OFFSET 0x1000 +#define SIF_FUNNEL_5_SECTION 0x10000 +#define mmSIF_FUNNEL_6_BASE 0x7FFE361000ull +#define SIF_FUNNEL_6_MAX_OFFSET 0x1000 +#define SIF_FUNNEL_6_SECTION 0x10000 +#define mmSIF_FUNNEL_7_BASE 0x7FFE371000ull +#define SIF_FUNNEL_7_MAX_OFFSET 0x1000 +#define SIF_FUNNEL_7_SECTION 0x10000 +#define mmNIF_FUNNEL_0_BASE 0x7FFE381000ull +#define NIF_FUNNEL_0_MAX_OFFSET 0x1000 +#define NIF_FUNNEL_0_SECTION 0x10000 +#define mmNIF_FUNNEL_1_BASE 0x7FFE391000ull +#define NIF_FUNNEL_1_MAX_OFFSET 0x1000 +#define NIF_FUNNEL_1_SECTION 0x10000 +#define mmNIF_FUNNEL_2_BASE 0x7FFE3A1000ull +#define NIF_FUNNEL_2_MAX_OFFSET 0x1000 +#define NIF_FUNNEL_2_SECTION 0x10000 +#define mmNIF_FUNNEL_3_BASE 0x7FFE3B1000ull +#define NIF_FUNNEL_3_MAX_OFFSET 0x1000 +#define NIF_FUNNEL_3_SECTION 0x10000 +#define mmNIF_FUNNEL_4_BASE 0x7FFE3C1000ull +#define NIF_FUNNEL_4_MAX_OFFSET 0x1000 +#define NIF_FUNNEL_4_SECTION 0x10000 +#define mmNIF_FUNNEL_5_BASE 0x7FFE3D1000ull +#define NIF_FUNNEL_5_MAX_OFFSET 0x1000 +#define NIF_FUNNEL_5_SECTION 0x10000 +#define mmNIF_FUNNEL_6_BASE 0x7FFE3E1000ull +#define NIF_FUNNEL_6_MAX_OFFSET 0x1000 +#define NIF_FUNNEL_6_SECTION 0x10000 +#define mmNIF_FUNNEL_7_BASE 0x7FFE3F1000ull +#define NIF_FUNNEL_7_MAX_OFFSET 0x1000 +#define NIF_FUNNEL_7_SECTION 0xF000 +#define mmDMA_IF_ROM_TABLE_BASE 0x7FFE400000ull +#define DMA_IF_ROM_TABLE_MAX_OFFSET 0x1000 +#define DMA_IF_ROM_TABLE_SECTION 0x1000 +#define mmDMA_IF_W_S_STM_BASE 0x7FFE401000ull +#define DMA_IF_W_S_STM_MAX_OFFSET 0x1000 +#define DMA_IF_W_S_STM_SECTION 0x1000 +#define mmDMA_IF_W_S_CTI_BASE 0x7FFE402000ull +#define DMA_IF_W_S_CTI_MAX_OFFSET 0x1000 +#define DMA_IF_W_S_CTI_SECTION 0x1000 +#define mmDMA_IF_W_S_ETF_BASE 0x7FFE403000ull +#define DMA_IF_W_S_ETF_MAX_OFFSET 0x1000 +#define DMA_IF_W_S_ETF_SECTION 0x2000 +#define mmDMA_IF_W_S_BMON0_CTI_BASE 0x7FFE405000ull +#define DMA_IF_W_S_BMON0_CTI_MAX_OFFSET 0x1000 +#define DMA_IF_W_S_BMON0_CTI_SECTION 0x1000 +#define mmDMA_IF_W_S_BMON1_CTI_BASE 0x7FFE406000ull +#define DMA_IF_W_S_BMON1_CTI_MAX_OFFSET 0x1000 +#define DMA_IF_W_S_BMON1_CTI_SECTION 0x1000 +#define mmDMA_IF_W_S_HBM0_WR_BMON_BASE 0x7FFE407000ull +#define DMA_IF_W_S_HBM0_WR_BMON_MAX_OFFSET 0x1000 +#define DMA_IF_W_S_HBM0_WR_BMON_SECTION 0x1000 +#define mmDMA_IF_W_S_HBM0_RD_BMON_BASE 0x7FFE408000ull +#define DMA_IF_W_S_HBM0_RD_BMON_MAX_OFFSET 0x1000 +#define DMA_IF_W_S_HBM0_RD_BMON_SECTION 0x1000 +#define mmDMA_IF_W_S_HBM1_WR_BMON_BASE 0x7FFE409000ull +#define DMA_IF_W_S_HBM1_WR_BMON_MAX_OFFSET 0x1000 +#define DMA_IF_W_S_HBM1_WR_BMON_SECTION 0x1000 +#define mmDMA_IF_W_S_HBM1_RD_BMON_BASE 0x7FFE40A000ull +#define DMA_IF_W_S_HBM1_RD_BMON_MAX_OFFSET 0x1000 +#define DMA_IF_W_S_HBM1_RD_BMON_SECTION 0x1000 +#define mmDMA_IF_W_S_SOB_WR_BMON_BASE 0x7FFE40B000ull +#define DMA_IF_W_S_SOB_WR_BMON_MAX_OFFSET 0x1000 +#define DMA_IF_W_S_SOB_WR_BMON_SECTION 0x4000 +#define mmDMA_IF_W_S_FUNNEL_BASE 0x7FFE40F000ull +#define DMA_IF_W_S_FUNNEL_MAX_OFFSET 0x1000 +#define DMA_IF_W_S_FUNNEL_SECTION 0x12000 +#define mmDMA_IF_E_S_STM_BASE 0x7FFE421000ull +#define DMA_IF_E_S_STM_MAX_OFFSET 0x1000 +#define DMA_IF_E_S_STM_SECTION 0x1000 +#define mmDMA_IF_E_S_CTI_BASE 0x7FFE422000ull +#define DMA_IF_E_S_CTI_MAX_OFFSET 0x1000 +#define DMA_IF_E_S_CTI_SECTION 0x1000 +#define mmDMA_IF_E_S_ETF_BASE 0x7FFE423000ull +#define DMA_IF_E_S_ETF_MAX_OFFSET 0x1000 +#define DMA_IF_E_S_ETF_SECTION 0x2000 +#define mmDMA_IF_E_S_BMON0_CTI_BASE 0x7FFE425000ull +#define DMA_IF_E_S_BMON0_CTI_MAX_OFFSET 0x1000 +#define DMA_IF_E_S_BMON0_CTI_SECTION 0x1000 +#define mmDMA_IF_E_S_BMON1_CTI_BASE 0x7FFE426000ull +#define DMA_IF_E_S_BMON1_CTI_MAX_OFFSET 0x1000 +#define DMA_IF_E_S_BMON1_CTI_SECTION 0x1000 +#define mmDMA_IF_E_S_HBM0_WR_BMON_BASE 0x7FFE427000ull +#define DMA_IF_E_S_HBM0_WR_BMON_MAX_OFFSET 0x1000 +#define DMA_IF_E_S_HBM0_WR_BMON_SECTION 0x1000 +#define mmDMA_IF_E_S_HBM0_RD_BMON_BASE 0x7FFE428000ull +#define DMA_IF_E_S_HBM0_RD_BMON_MAX_OFFSET 0x1000 +#define DMA_IF_E_S_HBM0_RD_BMON_SECTION 0x1000 +#define mmDMA_IF_E_S_HBM1_WR_BMON_BASE 0x7FFE429000ull +#define DMA_IF_E_S_HBM1_WR_BMON_MAX_OFFSET 0x1000 +#define DMA_IF_E_S_HBM1_WR_BMON_SECTION 0x1000 +#define mmDMA_IF_E_S_HBM1_RD_BMON_BASE 0x7FFE42A000ull +#define DMA_IF_E_S_HBM1_RD_BMON_MAX_OFFSET 0x1000 +#define DMA_IF_E_S_HBM1_RD_BMON_SECTION 0x1000 +#define mmDMA_IF_E_S_SOB_WR_BMON_BASE 0x7FFE42B000ull +#define DMA_IF_E_S_SOB_WR_BMON_MAX_OFFSET 0x1000 +#define DMA_IF_E_S_SOB_WR_BMON_SECTION 0x4000 +#define mmDMA_IF_E_S_FUNNEL_BASE 0x7FFE42F000ull +#define DMA_IF_E_S_FUNNEL_MAX_OFFSET 0x1000 +#define DMA_IF_E_S_FUNNEL_SECTION 0x12000 +#define mmDMA_IF_W_N_STM_BASE 0x7FFE441000ull +#define DMA_IF_W_N_STM_MAX_OFFSET 0x1000 +#define DMA_IF_W_N_STM_SECTION 0x1000 +#define mmDMA_IF_W_N_CTI_BASE 0x7FFE442000ull +#define DMA_IF_W_N_CTI_MAX_OFFSET 0x1000 +#define DMA_IF_W_N_CTI_SECTION 0x1000 +#define mmDMA_IF_W_N_ETF_BASE 0x7FFE443000ull +#define DMA_IF_W_N_ETF_MAX_OFFSET 0x1000 +#define DMA_IF_W_N_ETF_SECTION 0x2000 +#define mmDMA_IF_W_N_BMON0_CTI_BASE 0x7FFE445000ull +#define DMA_IF_W_N_BMON0_CTI_MAX_OFFSET 0x1000 +#define DMA_IF_W_N_BMON0_CTI_SECTION 0x1000 +#define mmDMA_IF_W_N_BMON1_CTI_BASE 0x7FFE446000ull +#define DMA_IF_W_N_BMON1_CTI_MAX_OFFSET 0x1000 +#define DMA_IF_W_N_BMON1_CTI_SECTION 0x1000 +#define mmDMA_IF_W_N_HBM0_WR_BMON_BASE 0x7FFE447000ull +#define DMA_IF_W_N_HBM0_WR_BMON_MAX_OFFSET 0x1000 +#define DMA_IF_W_N_HBM0_WR_BMON_SECTION 0x1000 +#define mmDMA_IF_W_N_HBM0_RD_BMON_BASE 0x7FFE448000ull +#define DMA_IF_W_N_HBM0_RD_BMON_MAX_OFFSET 0x1000 +#define DMA_IF_W_N_HBM0_RD_BMON_SECTION 0x1000 +#define mmDMA_IF_W_N_HBM1_WR_BMON_BASE 0x7FFE449000ull +#define DMA_IF_W_N_HBM1_WR_BMON_MAX_OFFSET 0x1000 +#define DMA_IF_W_N_HBM1_WR_BMON_SECTION 0x1000 +#define mmDMA_IF_W_N_HBM1_RD_BMON_BASE 0x7FFE44A000ull +#define DMA_IF_W_N_HBM1_RD_BMON_MAX_OFFSET 0x1000 +#define DMA_IF_W_N_HBM1_RD_BMON_SECTION 0x1000 +#define mmDMA_IF_W_N_SOB_WR_BMON_BASE 0x7FFE44B000ull +#define DMA_IF_W_N_SOB_WR_BMON_MAX_OFFSET 0x1000 +#define DMA_IF_W_N_SOB_WR_BMON_SECTION 0x4000 +#define mmDMA_IF_W_N_FUNNEL_BASE 0x7FFE44F000ull +#define DMA_IF_W_N_FUNNEL_MAX_OFFSET 0x1000 +#define DMA_IF_W_N_FUNNEL_SECTION 0x12000 +#define mmDMA_IF_E_N_STM_BASE 0x7FFE461000ull +#define DMA_IF_E_N_STM_MAX_OFFSET 0x1000 +#define DMA_IF_E_N_STM_SECTION 0x1000 +#define mmDMA_IF_E_N_CTI_BASE 0x7FFE462000ull +#define DMA_IF_E_N_CTI_MAX_OFFSET 0x1000 +#define DMA_IF_E_N_CTI_SECTION 0x1000 +#define mmDMA_IF_E_N_ETF_BASE 0x7FFE463000ull +#define DMA_IF_E_N_ETF_MAX_OFFSET 0x1000 +#define DMA_IF_E_N_ETF_SECTION 0x2000 +#define mmDMA_IF_E_N_BMON0_CTI_BASE 0x7FFE465000ull +#define DMA_IF_E_N_BMON0_CTI_MAX_OFFSET 0x1000 +#define DMA_IF_E_N_BMON0_CTI_SECTION 0x1000 +#define mmDMA_IF_E_N_BMON1_CTI_BASE 0x7FFE466000ull +#define DMA_IF_E_N_BMON1_CTI_MAX_OFFSET 0x1000 +#define DMA_IF_E_N_BMON1_CTI_SECTION 0x1000 +#define mmDMA_IF_E_N_HBM0_WR_BMON_BASE 0x7FFE467000ull +#define DMA_IF_E_N_HBM0_WR_BMON_MAX_OFFSET 0x1000 +#define DMA_IF_E_N_HBM0_WR_BMON_SECTION 0x1000 +#define mmDMA_IF_E_N_HBM0_RD_BMON_BASE 0x7FFE468000ull +#define DMA_IF_E_N_HBM0_RD_BMON_MAX_OFFSET 0x1000 +#define DMA_IF_E_N_HBM0_RD_BMON_SECTION 0x1000 +#define mmDMA_IF_E_N_HBM1_WR_BMON_BASE 0x7FFE469000ull +#define DMA_IF_E_N_HBM1_WR_BMON_MAX_OFFSET 0x1000 +#define DMA_IF_E_N_HBM1_WR_BMON_SECTION 0x1000 +#define mmDMA_IF_E_N_HBM1_RD_BMON_BASE 0x7FFE46A000ull +#define DMA_IF_E_N_HBM1_RD_BMON_MAX_OFFSET 0x1000 +#define DMA_IF_E_N_HBM1_RD_BMON_SECTION 0x1000 +#define mmDMA_IF_E_N_SOB_WR_BMON_BASE 0x7FFE46B000ull +#define DMA_IF_E_N_SOB_WR_BMON_MAX_OFFSET 0x1000 +#define DMA_IF_E_N_SOB_WR_BMON_SECTION 0x4000 +#define mmDMA_IF_E_N_FUNNEL_BASE 0x7FFE46F000ull +#define DMA_IF_E_N_FUNNEL_MAX_OFFSET 0x1000 +#define DMA_IF_E_N_FUNNEL_SECTION 0x11000 +#define mmCPU_ROM_TABLE_BASE 0x7FFE480000ull +#define CPU_ROM_TABLE_MAX_OFFSET 0x1000 +#define CPU_ROM_TABLE_SECTION 0x1000 +#define mmCPU_ETF_0_BASE 0x7FFE481000ull +#define CPU_ETF_0_MAX_OFFSET 0x1000 +#define CPU_ETF_0_SECTION 0x1000 +#define mmCPU_ETF_1_BASE 0x7FFE482000ull +#define CPU_ETF_1_MAX_OFFSET 0x1000 +#define CPU_ETF_1_SECTION 0x2000 +#define mmCPU_CTI_BASE 0x7FFE484000ull +#define CPU_CTI_MAX_OFFSET 0x1000 +#define CPU_CTI_SECTION 0x1000 +#define mmCPU_FUNNEL_BASE 0x7FFE485000ull +#define CPU_FUNNEL_MAX_OFFSET 0x1000 +#define CPU_FUNNEL_SECTION 0x1000 +#define mmCPU_STM_BASE 0x7FFE486000ull +#define CPU_STM_MAX_OFFSET 0x1000 +#define CPU_STM_SECTION 0x1000 +#define mmCPU_CTI_TRACE_BASE 0x7FFE487000ull +#define CPU_CTI_TRACE_MAX_OFFSET 0x1000 +#define CPU_CTI_TRACE_SECTION 0x1000 +#define mmCPU_ETF_TRACE_BASE 0x7FFE488000ull +#define CPU_ETF_TRACE_MAX_OFFSET 0x1000 +#define CPU_ETF_TRACE_SECTION 0x1000 +#define mmCPU_WR_BMON_BASE 0x7FFE489000ull +#define CPU_WR_BMON_MAX_OFFSET 0x1000 +#define CPU_WR_BMON_SECTION 0x1000 +#define mmCPU_RD_BMON_BASE 0x7FFE48A000ull +#define CPU_RD_BMON_MAX_OFFSET 0x1000 +#define CPU_RD_BMON_SECTION 0x76000 +#define mmDMA_ROM_TABLE_BASE 0x7FFE500000ull +#define DMA_ROM_TABLE_MAX_OFFSET 0x1000 +#define DMA_ROM_TABLE_SECTION 0x1000 +#define mmDMA_CH_0_CS_STM_BASE 0x7FFE501000ull +#define DMA_CH_0_CS_STM_MAX_OFFSET 0x1000 +#define DMA_CH_0_CS_STM_SECTION 0x1000 +#define mmDMA_CH_0_CS_CTI_BASE 0x7FFE502000ull +#define DMA_CH_0_CS_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_0_CS_CTI_SECTION 0x1000 +#define mmDMA_CH_0_CS_ETF_BASE 0x7FFE503000ull +#define DMA_CH_0_CS_ETF_MAX_OFFSET 0x1000 +#define DMA_CH_0_CS_ETF_SECTION 0x1000 +#define mmDMA_CH_0_CS_SPMU_BASE 0x7FFE504000ull +#define DMA_CH_0_CS_SPMU_MAX_OFFSET 0x1000 +#define DMA_CH_0_CS_SPMU_SECTION 0x1000 +#define mmDMA_CH_0_BMON_CTI_BASE 0x7FFE505000ull +#define DMA_CH_0_BMON_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_0_BMON_CTI_SECTION 0x1000 +#define mmDMA_CH_0_USER_CTI_BASE 0x7FFE506000ull +#define DMA_CH_0_USER_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_0_USER_CTI_SECTION 0x1000 +#define mmDMA_CH_0_BMON_0_BASE 0x7FFE507000ull +#define DMA_CH_0_BMON_0_MAX_OFFSET 0x1000 +#define DMA_CH_0_BMON_0_SECTION 0x1000 +#define mmDMA_CH_0_BMON_1_BASE 0x7FFE508000ull +#define DMA_CH_0_BMON_1_MAX_OFFSET 0x1000 +#define DMA_CH_0_BMON_1_SECTION 0x19000 +#define mmDMA_CH_1_CS_STM_BASE 0x7FFE521000ull +#define DMA_CH_1_CS_STM_MAX_OFFSET 0x1000 +#define DMA_CH_1_CS_STM_SECTION 0x1000 +#define mmDMA_CH_1_CS_CTI_BASE 0x7FFE522000ull +#define DMA_CH_1_CS_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_1_CS_CTI_SECTION 0x1000 +#define mmDMA_CH_1_CS_ETF_BASE 0x7FFE523000ull +#define DMA_CH_1_CS_ETF_MAX_OFFSET 0x1000 +#define DMA_CH_1_CS_ETF_SECTION 0x1000 +#define mmDMA_CH_1_CS_SPMU_BASE 0x7FFE524000ull +#define DMA_CH_1_CS_SPMU_MAX_OFFSET 0x1000 +#define DMA_CH_1_CS_SPMU_SECTION 0x1000 +#define mmDMA_CH_1_BMON_CTI_BASE 0x7FFE525000ull +#define DMA_CH_1_BMON_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_1_BMON_CTI_SECTION 0x1000 +#define mmDMA_CH_1_USER_CTI_BASE 0x7FFE526000ull +#define DMA_CH_1_USER_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_1_USER_CTI_SECTION 0x1000 +#define mmDMA_CH_1_BMON_0_BASE 0x7FFE527000ull +#define DMA_CH_1_BMON_0_MAX_OFFSET 0x1000 +#define DMA_CH_1_BMON_0_SECTION 0x1000 +#define mmDMA_CH_1_BMON_1_BASE 0x7FFE528000ull +#define DMA_CH_1_BMON_1_MAX_OFFSET 0x1000 +#define DMA_CH_1_BMON_1_SECTION 0x19000 +#define mmDMA_CH_2_CS_STM_BASE 0x7FFE541000ull +#define DMA_CH_2_CS_STM_MAX_OFFSET 0x1000 +#define DMA_CH_2_CS_STM_SECTION 0x1000 +#define mmDMA_CH_2_CS_CTI_BASE 0x7FFE542000ull +#define DMA_CH_2_CS_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_2_CS_CTI_SECTION 0x1000 +#define mmDMA_CH_2_CS_ETF_BASE 0x7FFE543000ull +#define DMA_CH_2_CS_ETF_MAX_OFFSET 0x1000 +#define DMA_CH_2_CS_ETF_SECTION 0x1000 +#define mmDMA_CH_2_CS_SPMU_BASE 0x7FFE544000ull +#define DMA_CH_2_CS_SPMU_MAX_OFFSET 0x1000 +#define DMA_CH_2_CS_SPMU_SECTION 0x1000 +#define mmDMA_CH_2_BMON_CTI_BASE 0x7FFE545000ull +#define DMA_CH_2_BMON_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_2_BMON_CTI_SECTION 0x1000 +#define mmDMA_CH_2_USER_CTI_BASE 0x7FFE546000ull +#define DMA_CH_2_USER_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_2_USER_CTI_SECTION 0x1000 +#define mmDMA_CH_2_BMON_0_BASE 0x7FFE547000ull +#define DMA_CH_2_BMON_0_MAX_OFFSET 0x1000 +#define DMA_CH_2_BMON_0_SECTION 0x1000 +#define mmDMA_CH_2_BMON_1_BASE 0x7FFE548000ull +#define DMA_CH_2_BMON_1_MAX_OFFSET 0x1000 +#define DMA_CH_2_BMON_1_SECTION 0x19000 +#define mmDMA_CH_3_CS_STM_BASE 0x7FFE561000ull +#define DMA_CH_3_CS_STM_MAX_OFFSET 0x1000 +#define DMA_CH_3_CS_STM_SECTION 0x1000 +#define mmDMA_CH_3_CS_CTI_BASE 0x7FFE562000ull +#define DMA_CH_3_CS_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_3_CS_CTI_SECTION 0x1000 +#define mmDMA_CH_3_CS_ETF_BASE 0x7FFE563000ull +#define DMA_CH_3_CS_ETF_MAX_OFFSET 0x1000 +#define DMA_CH_3_CS_ETF_SECTION 0x1000 +#define mmDMA_CH_3_CS_SPMU_BASE 0x7FFE564000ull +#define DMA_CH_3_CS_SPMU_MAX_OFFSET 0x1000 +#define DMA_CH_3_CS_SPMU_SECTION 0x1000 +#define mmDMA_CH_3_BMON_CTI_BASE 0x7FFE565000ull +#define DMA_CH_3_BMON_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_3_BMON_CTI_SECTION 0x1000 +#define mmDMA_CH_3_USER_CTI_BASE 0x7FFE566000ull +#define DMA_CH_3_USER_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_3_USER_CTI_SECTION 0x1000 +#define mmDMA_CH_3_BMON_0_BASE 0x7FFE567000ull +#define DMA_CH_3_BMON_0_MAX_OFFSET 0x1000 +#define DMA_CH_3_BMON_0_SECTION 0x1000 +#define mmDMA_CH_3_BMON_1_BASE 0x7FFE568000ull +#define DMA_CH_3_BMON_1_MAX_OFFSET 0x1000 +#define DMA_CH_3_BMON_1_SECTION 0x19000 +#define mmDMA_CH_4_CS_STM_BASE 0x7FFE581000ull +#define DMA_CH_4_CS_STM_MAX_OFFSET 0x1000 +#define DMA_CH_4_CS_STM_SECTION 0x1000 +#define mmDMA_CH_4_CS_CTI_BASE 0x7FFE582000ull +#define DMA_CH_4_CS_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_4_CS_CTI_SECTION 0x1000 +#define mmDMA_CH_4_CS_ETF_BASE 0x7FFE583000ull +#define DMA_CH_4_CS_ETF_MAX_OFFSET 0x1000 +#define DMA_CH_4_CS_ETF_SECTION 0x1000 +#define mmDMA_CH_4_CS_SPMU_BASE 0x7FFE584000ull +#define DMA_CH_4_CS_SPMU_MAX_OFFSET 0x1000 +#define DMA_CH_4_CS_SPMU_SECTION 0x1000 +#define mmDMA_CH_4_BMON_CTI_BASE 0x7FFE585000ull +#define DMA_CH_4_BMON_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_4_BMON_CTI_SECTION 0x1000 +#define mmDMA_CH_4_USER_CTI_BASE 0x7FFE586000ull +#define DMA_CH_4_USER_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_4_USER_CTI_SECTION 0x1000 +#define mmDMA_CH_4_BMON_0_BASE 0x7FFE587000ull +#define DMA_CH_4_BMON_0_MAX_OFFSET 0x1000 +#define DMA_CH_4_BMON_0_SECTION 0x1000 +#define mmDMA_CH_4_BMON_1_BASE 0x7FFE588000ull +#define DMA_CH_4_BMON_1_MAX_OFFSET 0x1000 +#define DMA_CH_4_BMON_1_SECTION 0x19000 +#define mmDMA_CH_5_CS_STM_BASE 0x7FFE5A1000ull +#define DMA_CH_5_CS_STM_MAX_OFFSET 0x1000 +#define DMA_CH_5_CS_STM_SECTION 0x1000 +#define mmDMA_CH_5_CS_CTI_BASE 0x7FFE5A2000ull +#define DMA_CH_5_CS_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_5_CS_CTI_SECTION 0x1000 +#define mmDMA_CH_5_CS_ETF_BASE 0x7FFE5A3000ull +#define DMA_CH_5_CS_ETF_MAX_OFFSET 0x1000 +#define DMA_CH_5_CS_ETF_SECTION 0x1000 +#define mmDMA_CH_5_CS_SPMU_BASE 0x7FFE5A4000ull +#define DMA_CH_5_CS_SPMU_MAX_OFFSET 0x1000 +#define DMA_CH_5_CS_SPMU_SECTION 0x1000 +#define mmDMA_CH_5_BMON_CTI_BASE 0x7FFE5A5000ull +#define DMA_CH_5_BMON_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_5_BMON_CTI_SECTION 0x1000 +#define mmDMA_CH_5_USER_CTI_BASE 0x7FFE5A6000ull +#define DMA_CH_5_USER_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_5_USER_CTI_SECTION 0x1000 +#define mmDMA_CH_5_BMON_0_BASE 0x7FFE5A7000ull +#define DMA_CH_5_BMON_0_MAX_OFFSET 0x1000 +#define DMA_CH_5_BMON_0_SECTION 0x1000 +#define mmDMA_CH_5_BMON_1_BASE 0x7FFE5A8000ull +#define DMA_CH_5_BMON_1_MAX_OFFSET 0x1000 +#define DMA_CH_5_BMON_1_SECTION 0x19000 +#define mmDMA_CH_6_CS_STM_BASE 0x7FFE5C1000ull +#define DMA_CH_6_CS_STM_MAX_OFFSET 0x1000 +#define DMA_CH_6_CS_STM_SECTION 0x1000 +#define mmDMA_CH_6_CS_CTI_BASE 0x7FFE5C2000ull +#define DMA_CH_6_CS_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_6_CS_CTI_SECTION 0x1000 +#define mmDMA_CH_6_CS_ETF_BASE 0x7FFE5C3000ull +#define DMA_CH_6_CS_ETF_MAX_OFFSET 0x1000 +#define DMA_CH_6_CS_ETF_SECTION 0x1000 +#define mmDMA_CH_6_CS_SPMU_BASE 0x7FFE5C4000ull +#define DMA_CH_6_CS_SPMU_MAX_OFFSET 0x1000 +#define DMA_CH_6_CS_SPMU_SECTION 0x1000 +#define mmDMA_CH_6_BMON_CTI_BASE 0x7FFE5C5000ull +#define DMA_CH_6_BMON_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_6_BMON_CTI_SECTION 0x1000 +#define mmDMA_CH_6_USER_CTI_BASE 0x7FFE5C6000ull +#define DMA_CH_6_USER_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_6_USER_CTI_SECTION 0x1000 +#define mmDMA_CH_6_BMON_0_BASE 0x7FFE5C7000ull +#define DMA_CH_6_BMON_0_MAX_OFFSET 0x1000 +#define DMA_CH_6_BMON_0_SECTION 0x1000 +#define mmDMA_CH_6_BMON_1_BASE 0x7FFE5C8000ull +#define DMA_CH_6_BMON_1_MAX_OFFSET 0x1000 +#define DMA_CH_6_BMON_1_SECTION 0x19000 +#define mmDMA_CH_7_CS_STM_BASE 0x7FFE5E1000ull +#define DMA_CH_7_CS_STM_MAX_OFFSET 0x1000 +#define DMA_CH_7_CS_STM_SECTION 0x1000 +#define mmDMA_CH_7_CS_CTI_BASE 0x7FFE5E2000ull +#define DMA_CH_7_CS_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_7_CS_CTI_SECTION 0x1000 +#define mmDMA_CH_7_CS_ETF_BASE 0x7FFE5E3000ull +#define DMA_CH_7_CS_ETF_MAX_OFFSET 0x1000 +#define DMA_CH_7_CS_ETF_SECTION 0x1000 +#define mmDMA_CH_7_CS_SPMU_BASE 0x7FFE5E4000ull +#define DMA_CH_7_CS_SPMU_MAX_OFFSET 0x1000 +#define DMA_CH_7_CS_SPMU_SECTION 0x1000 +#define mmDMA_CH_7_BMON_CTI_BASE 0x7FFE5E5000ull +#define DMA_CH_7_BMON_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_7_BMON_CTI_SECTION 0x1000 +#define mmDMA_CH_7_USER_CTI_BASE 0x7FFE5E6000ull +#define DMA_CH_7_USER_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_7_USER_CTI_SECTION 0x1000 +#define mmDMA_CH_7_BMON_0_BASE 0x7FFE5E7000ull +#define DMA_CH_7_BMON_0_MAX_OFFSET 0x1000 +#define DMA_CH_7_BMON_0_SECTION 0x1000 +#define mmDMA_CH_7_BMON_1_BASE 0x7FFE5E8000ull +#define DMA_CH_7_BMON_1_MAX_OFFSET 0x1000 +#define DMA_CH_7_BMON_1_SECTION 0x18000 +#define mmNIC_TPC_FUNNEL_W_S_BASE 0x7FFE600000ull +#define NIC_TPC_FUNNEL_W_S_MAX_OFFSET 0x1000 +#define NIC_TPC_FUNNEL_W_S_SECTION 0x80000 +#define mmNIC_TPC_FUNNEL_E_S_BASE 0x7FFE680000ull +#define NIC_TPC_FUNNEL_E_S_MAX_OFFSET 0x1000 +#define NIC_TPC_FUNNEL_E_S_SECTION 0x80000 +#define mmNIC_TPC_FUNNEL_W_N_BASE 0x7FFE700000ull +#define NIC_TPC_FUNNEL_W_N_MAX_OFFSET 0x1000 +#define NIC_TPC_FUNNEL_W_N_SECTION 0x80000 +#define mmNIC_TPC_FUNNEL_E_N_BASE 0x7FFE780000ull +#define NIC_TPC_FUNNEL_E_N_MAX_OFFSET 0x1000 +#define NIC_TPC_FUNNEL_E_N_SECTION 0x80000 +#define mmCA53_BASE 0x7FFE800000ull +#define CA53_MAX_OFFSET 0x141000 +#define CA53_SECTION 0x400000 +#define mmPCI_ROM_TABLE_BASE 0x7FFEC00000ull +#define PCI_ROM_TABLE_MAX_OFFSET 0x1000 +#define PCI_ROM_TABLE_SECTION 0x1000 +#define mmPCIE_STM_BASE 0x7FFEC01000ull +#define PCIE_STM_MAX_OFFSET 0x1000 +#define PCIE_STM_SECTION 0x1000 +#define mmPCIE_ETF_BASE 0x7FFEC02000ull +#define PCIE_ETF_MAX_OFFSET 0x1000 +#define PCIE_ETF_SECTION 0x1000 +#define mmPCIE_CTI_0_BASE 0x7FFEC03000ull +#define PCIE_CTI_0_MAX_OFFSET 0x1000 +#define PCIE_CTI_0_SECTION 0x1000 +#define mmPCIE_SPMU_BASE 0x7FFEC04000ull +#define PCIE_SPMU_MAX_OFFSET 0x1000 +#define PCIE_SPMU_SECTION 0x1000 +#define mmPCIE_CTI_1_BASE 0x7FFEC05000ull +#define PCIE_CTI_1_MAX_OFFSET 0x1000 +#define PCIE_CTI_1_SECTION 0x1000 +#define mmPCIE_FUNNEL_BASE 0x7FFEC06000ull +#define PCIE_FUNNEL_MAX_OFFSET 0x1000 +#define PCIE_FUNNEL_SECTION 0x1000 +#define mmPCIE_BMON_MSTR_WR_BASE 0x7FFEC07000ull +#define PCIE_BMON_MSTR_WR_MAX_OFFSET 0x1000 +#define PCIE_BMON_MSTR_WR_SECTION 0x1000 +#define mmPCIE_BMON_MSTR_RD_BASE 0x7FFEC08000ull +#define PCIE_BMON_MSTR_RD_MAX_OFFSET 0x1000 +#define PCIE_BMON_MSTR_RD_SECTION 0x1000 +#define mmPCIE_BMON_SLV_WR_BASE 0x7FFEC09000ull +#define PCIE_BMON_SLV_WR_MAX_OFFSET 0x1000 +#define PCIE_BMON_SLV_WR_SECTION 0x1000 +#define mmPCIE_BMON_SLV_RD_BASE 0x7FFEC0A000ull +#define PCIE_BMON_SLV_RD_MAX_OFFSET 0x1000 +#define PCIE_BMON_SLV_RD_SECTION 0x7000 +#define mmMMU_CS_STM_BASE 0x7FFEC11000ull +#define MMU_CS_STM_MAX_OFFSET 0x1000 +#define MMU_CS_STM_SECTION 0x1000 +#define mmMMU_CS_CTI_BASE 0x7FFEC12000ull +#define MMU_CS_CTI_MAX_OFFSET 0x1000 +#define MMU_CS_CTI_SECTION 0x1000 +#define mmMMU_CS_ETF_BASE 0x7FFEC13000ull +#define MMU_CS_ETF_MAX_OFFSET 0x1000 +#define MMU_CS_ETF_SECTION 0x1000 +#define mmMMU_CS_SPMU_BASE 0x7FFEC14000ull +#define MMU_CS_SPMU_MAX_OFFSET 0x1000 +#define MMU_CS_SPMU_SECTION 0x1000 +#define mmMMU_BMON_CTI_BASE 0x7FFEC15000ull +#define MMU_BMON_CTI_MAX_OFFSET 0x1000 +#define MMU_BMON_CTI_SECTION 0x1000 +#define mmMMU_USER_CTI_BASE 0x7FFEC16000ull +#define MMU_USER_CTI_MAX_OFFSET 0x1000 +#define MMU_USER_CTI_SECTION 0x1000 +#define mmMMU_BMON_0_BASE 0x7FFEC17000ull +#define MMU_BMON_0_MAX_OFFSET 0x1000 +#define MMU_BMON_0_SECTION 0x1000 +#define mmMMU_BMON_1_BASE 0x7FFEC18000ull +#define MMU_BMON_1_MAX_OFFSET 0x1000 +#define MMU_BMON_1_SECTION 0x28000 +#define mmPSOC_CTI_BASE 0x7FFEC40000ull +#define PSOC_CTI_MAX_OFFSET 0x1000 +#define PSOC_CTI_SECTION 0x1000 +#define mmPSOC_STM_BASE 0x7FFEC41000ull +#define PSOC_STM_MAX_OFFSET 0x1000 +#define PSOC_STM_SECTION 0x1000 +#define mmPSOC_FUNNEL_BASE 0x7FFEC42000ull +#define PSOC_FUNNEL_MAX_OFFSET 0x1000 +#define PSOC_FUNNEL_SECTION 0x1000 +#define mmPSOC_ETR_BASE 0x7FFEC43000ull +#define PSOC_ETR_MAX_OFFSET 0x1000 +#define PSOC_ETR_SECTION 0x1000 +#define mmPSOC_ETF_BASE 0x7FFEC44000ull +#define PSOC_ETF_MAX_OFFSET 0x1000 +#define PSOC_ETF_SECTION 0x1000 +#define mmPSOC_TS_CTI_BASE 0x7FFEC45000ull +#define PSOC_TS_CTI_MAX_OFFSET 0x1000 +#define PSOC_TS_CTI_SECTION 0xB000 +#define mmTOP_ROM_TABLE_BASE 0x7FFEC50000ull +#define TOP_ROM_TABLE_MAX_OFFSET 0x1000 +#define TOP_ROM_TABLE_SECTION 0x70000 +#define mmNIC0_ROM_TABLE_BASE 0x7FFECC0000ull +#define NIC0_ROM_TABLE_MAX_OFFSET 0x1000 +#define NIC0_ROM_TABLE_SECTION 0x1000 +#define mmSTM_0_NIC0_DBG_BASE 0x7FFECC1000ull +#define STM_0_NIC0_DBG_MAX_OFFSET 0x21000 +#define STM_0_NIC0_DBG_SECTION 0x1000 +#define mmCTI_0_NIC0_DBG_BASE 0x7FFECC2000ull +#define CTI_0_NIC0_DBG_MAX_OFFSET 0x1000 +#define CTI_0_NIC0_DBG_SECTION 0x1000 +#define mmETF_0_NIC0_DBG_BASE 0x7FFECC3000ull +#define ETF_0_NIC0_DBG_MAX_OFFSET 0x1000 +#define ETF_0_NIC0_DBG_SECTION 0x1000 +#define mmSPMU_0_NIC0_DBG_BASE 0x7FFECC4000ull +#define SPMU_0_NIC0_DBG_MAX_OFFSET 0x1000 +#define SPMU_0_NIC0_DBG_SECTION 0x2000 +#define mmUSER_CTI_0_NIC0_DBG_BASE 0x7FFECC6000ull +#define USER_CTI_0_NIC0_DBG_MAX_OFFSET 0x1000 +#define USER_CTI_0_NIC0_DBG_SECTION 0xB000 +#define mmSTM_1_NIC0_DBG_BASE 0x7FFECD1000ull +#define STM_1_NIC0_DBG_MAX_OFFSET 0x1000 +#define STM_1_NIC0_DBG_SECTION 0x1000 +#define mmCTI_1_NIC0_DBG_BASE 0x7FFECD2000ull +#define CTI_1_NIC0_DBG_MAX_OFFSET 0x1000 +#define CTI_1_NIC0_DBG_SECTION 0x1000 +#define mmETF_1_NIC0_DBG_BASE 0x7FFECD3000ull +#define ETF_1_NIC0_DBG_MAX_OFFSET 0x1000 +#define ETF_1_NIC0_DBG_SECTION 0x1000 +#define mmSPMU_1_NIC0_DBG_BASE 0x7FFECD4000ull +#define SPMU_1_NIC0_DBG_MAX_OFFSET 0x1000 +#define SPMU_1_NIC0_DBG_SECTION 0x1000 +#define mmBMON_CTI_NIC0_DBG_BASE 0x7FFECD5000ull +#define BMON_CTI_NIC0_DBG_MAX_OFFSET 0x1000 +#define BMON_CTI_NIC0_DBG_SECTION 0x1000 +#define mmUSER_CTI_1_NIC0_DBG_BASE 0x7FFECD6000ull +#define USER_CTI_1_NIC0_DBG_MAX_OFFSET 0x1000 +#define USER_CTI_1_NIC0_DBG_SECTION 0x1000 +#define mmBMON0_NIC0_DBG_BASE 0x7FFECD7000ull +#define BMON0_NIC0_DBG_MAX_OFFSET 0x1000 +#define BMON0_NIC0_DBG_SECTION 0x1000 +#define mmBMON1_NIC0_DBG_BASE 0x7FFECD8000ull +#define BMON1_NIC0_DBG_MAX_OFFSET 0x1000 +#define BMON1_NIC0_DBG_SECTION 0x1000 +#define mmBMON2_NIC0_DBG_BASE 0x7FFECD9000ull +#define BMON2_NIC0_DBG_MAX_OFFSET 0x1000 +#define BMON2_NIC0_DBG_SECTION 0x1000 +#define mmBMON3_NIC0_DBG_BASE 0x7FFECDA000ull +#define BMON3_NIC0_DBG_MAX_OFFSET 0x1000 +#define BMON3_NIC0_DBG_SECTION 0x1000 +#define mmBMON4_NIC0_DBG_BASE 0x7FFECDB000ull +#define BMON4_NIC0_DBG_MAX_OFFSET 0x1000 +#define BMON4_NIC0_DBG_SECTION 0x6000 +#define mmFUNNEL_NIC0_DBG_BASE 0x7FFECE1000ull +#define FUNNEL_NIC0_DBG_MAX_OFFSET 0x1000 +#define FUNNEL_NIC0_DBG_SECTION 0x1F000 +#define mmNIC1_ROM_TABLE_BASE 0x7FFED00000ull +#define NIC1_ROM_TABLE_MAX_OFFSET 0x1000 +#define NIC1_ROM_TABLE_SECTION 0x1000 +#define mmSTM_0_NIC1_DBG_BASE 0x7FFED01000ull +#define STM_0_NIC1_DBG_MAX_OFFSET 0x21000 +#define STM_0_NIC1_DBG_SECTION 0x1000 +#define mmCTI_0_NIC1_DBG_BASE 0x7FFED02000ull +#define CTI_0_NIC1_DBG_MAX_OFFSET 0x1000 +#define CTI_0_NIC1_DBG_SECTION 0x1000 +#define mmETF_0_NIC1_DBG_BASE 0x7FFED03000ull +#define ETF_0_NIC1_DBG_MAX_OFFSET 0x1000 +#define ETF_0_NIC1_DBG_SECTION 0x1000 +#define mmSPMU_0_NIC1_DBG_BASE 0x7FFED04000ull +#define SPMU_0_NIC1_DBG_MAX_OFFSET 0x1000 +#define SPMU_0_NIC1_DBG_SECTION 0x2000 +#define mmUSER_CTI_0_NIC1_DBG_BASE 0x7FFED06000ull +#define USER_CTI_0_NIC1_DBG_MAX_OFFSET 0x1000 +#define USER_CTI_0_NIC1_DBG_SECTION 0xB000 +#define mmSTM_1_NIC1_DBG_BASE 0x7FFED11000ull +#define STM_1_NIC1_DBG_MAX_OFFSET 0x1000 +#define STM_1_NIC1_DBG_SECTION 0x1000 +#define mmCTI_1_NIC1_DBG_BASE 0x7FFED12000ull +#define CTI_1_NIC1_DBG_MAX_OFFSET 0x1000 +#define CTI_1_NIC1_DBG_SECTION 0x1000 +#define mmETF_1_NIC1_DBG_BASE 0x7FFED13000ull +#define ETF_1_NIC1_DBG_MAX_OFFSET 0x1000 +#define ETF_1_NIC1_DBG_SECTION 0x1000 +#define mmSPMU_1_NIC1_DBG_BASE 0x7FFED14000ull +#define SPMU_1_NIC1_DBG_MAX_OFFSET 0x1000 +#define SPMU_1_NIC1_DBG_SECTION 0x1000 +#define mmBMON_CTI_NIC1_DBG_BASE 0x7FFED15000ull +#define BMON_CTI_NIC1_DBG_MAX_OFFSET 0x1000 +#define BMON_CTI_NIC1_DBG_SECTION 0x1000 +#define mmUSER_CTI_1_NIC1_DBG_BASE 0x7FFED16000ull +#define USER_CTI_1_NIC1_DBG_MAX_OFFSET 0x1000 +#define USER_CTI_1_NIC1_DBG_SECTION 0x1000 +#define mmBMON0_NIC1_DBG_BASE 0x7FFED17000ull +#define BMON0_NIC1_DBG_MAX_OFFSET 0x1000 +#define BMON0_NIC1_DBG_SECTION 0x1000 +#define mmBMON1_NIC1_DBG_BASE 0x7FFED18000ull +#define BMON1_NIC1_DBG_MAX_OFFSET 0x1000 +#define BMON1_NIC1_DBG_SECTION 0x1000 +#define mmBMON2_NIC1_DBG_BASE 0x7FFED19000ull +#define BMON2_NIC1_DBG_MAX_OFFSET 0x1000 +#define BMON2_NIC1_DBG_SECTION 0x1000 +#define mmBMON3_NIC1_DBG_BASE 0x7FFED1A000ull +#define BMON3_NIC1_DBG_MAX_OFFSET 0x1000 +#define BMON3_NIC1_DBG_SECTION 0x1000 +#define mmBMON4_NIC1_DBG_BASE 0x7FFED1B000ull +#define BMON4_NIC1_DBG_MAX_OFFSET 0x1000 +#define BMON4_NIC1_DBG_SECTION 0x6000 +#define mmFUNNEL_NIC1_DBG_BASE 0x7FFED21000ull +#define FUNNEL_NIC1_DBG_MAX_OFFSET 0x1000 +#define FUNNEL_NIC1_DBG_SECTION 0x1F000 +#define mmNIC2_ROM_TABLE_BASE 0x7FFED40000ull +#define NIC2_ROM_TABLE_MAX_OFFSET 0x1000 +#define NIC2_ROM_TABLE_SECTION 0x1000 +#define mmSTM_0_NIC2_DBG_BASE 0x7FFED41000ull +#define STM_0_NIC2_DBG_MAX_OFFSET 0x21000 +#define STM_0_NIC2_DBG_SECTION 0x1000 +#define mmCTI_0_NIC2_DBG_BASE 0x7FFED42000ull +#define CTI_0_NIC2_DBG_MAX_OFFSET 0x1000 +#define CTI_0_NIC2_DBG_SECTION 0x1000 +#define mmETF_0_NIC2_DBG_BASE 0x7FFED43000ull +#define ETF_0_NIC2_DBG_MAX_OFFSET 0x1000 +#define ETF_0_NIC2_DBG_SECTION 0x1000 +#define mmSPMU_0_NIC2_DBG_BASE 0x7FFED44000ull +#define SPMU_0_NIC2_DBG_MAX_OFFSET 0x1000 +#define SPMU_0_NIC2_DBG_SECTION 0x2000 +#define mmUSER_CTI_0_NIC2_DBG_BASE 0x7FFED46000ull +#define USER_CTI_0_NIC2_DBG_MAX_OFFSET 0x1000 +#define USER_CTI_0_NIC2_DBG_SECTION 0xB000 +#define mmSTM_1_NIC2_DBG_BASE 0x7FFED51000ull +#define STM_1_NIC2_DBG_MAX_OFFSET 0x1000 +#define STM_1_NIC2_DBG_SECTION 0x1000 +#define mmCTI_1_NIC2_DBG_BASE 0x7FFED52000ull +#define CTI_1_NIC2_DBG_MAX_OFFSET 0x1000 +#define CTI_1_NIC2_DBG_SECTION 0x1000 +#define mmETF_1_NIC2_DBG_BASE 0x7FFED53000ull +#define ETF_1_NIC2_DBG_MAX_OFFSET 0x1000 +#define ETF_1_NIC2_DBG_SECTION 0x1000 +#define mmSPMU_1_NIC2_DBG_BASE 0x7FFED54000ull +#define SPMU_1_NIC2_DBG_MAX_OFFSET 0x1000 +#define SPMU_1_NIC2_DBG_SECTION 0x1000 +#define mmBMON_CTI_NIC2_DBG_BASE 0x7FFED55000ull +#define BMON_CTI_NIC2_DBG_MAX_OFFSET 0x1000 +#define BMON_CTI_NIC2_DBG_SECTION 0x1000 +#define mmUSER_CTI_1_NIC2_DBG_BASE 0x7FFED56000ull +#define USER_CTI_1_NIC2_DBG_MAX_OFFSET 0x1000 +#define USER_CTI_1_NIC2_DBG_SECTION 0x1000 +#define mmBMON0_NIC2_DBG_BASE 0x7FFED57000ull +#define BMON0_NIC2_DBG_MAX_OFFSET 0x1000 +#define BMON0_NIC2_DBG_SECTION 0x1000 +#define mmBMON1_NIC2_DBG_BASE 0x7FFED58000ull +#define BMON1_NIC2_DBG_MAX_OFFSET 0x1000 +#define BMON1_NIC2_DBG_SECTION 0x1000 +#define mmBMON2_NIC2_DBG_BASE 0x7FFED59000ull +#define BMON2_NIC2_DBG_MAX_OFFSET 0x1000 +#define BMON2_NIC2_DBG_SECTION 0x1000 +#define mmBMON3_NIC2_DBG_BASE 0x7FFED5A000ull +#define BMON3_NIC2_DBG_MAX_OFFSET 0x1000 +#define BMON3_NIC2_DBG_SECTION 0x1000 +#define mmBMON4_NIC2_DBG_BASE 0x7FFED5B000ull +#define BMON4_NIC2_DBG_MAX_OFFSET 0x1000 +#define BMON4_NIC2_DBG_SECTION 0x6000 +#define mmFUNNEL_NIC2_DBG_BASE 0x7FFED61000ull +#define FUNNEL_NIC2_DBG_MAX_OFFSET 0x1000 +#define FUNNEL_NIC2_DBG_SECTION 0x1F000 +#define mmNIC3_ROM_TABLE_BASE 0x7FFED80000ull +#define NIC3_ROM_TABLE_MAX_OFFSET 0x1000 +#define NIC3_ROM_TABLE_SECTION 0x1000 +#define mmSTM_0_NIC3_DBG_BASE 0x7FFED81000ull +#define STM_0_NIC3_DBG_MAX_OFFSET 0x21000 +#define STM_0_NIC3_DBG_SECTION 0x1000 +#define mmCTI_0_NIC3_DBG_BASE 0x7FFED82000ull +#define CTI_0_NIC3_DBG_MAX_OFFSET 0x1000 +#define CTI_0_NIC3_DBG_SECTION 0x1000 +#define mmETF_0_NIC3_DBG_BASE 0x7FFED83000ull +#define ETF_0_NIC3_DBG_MAX_OFFSET 0x1000 +#define ETF_0_NIC3_DBG_SECTION 0x1000 +#define mmSPMU_0_NIC3_DBG_BASE 0x7FFED84000ull +#define SPMU_0_NIC3_DBG_MAX_OFFSET 0x1000 +#define SPMU_0_NIC3_DBG_SECTION 0x2000 +#define mmUSER_CTI_0_NIC3_DBG_BASE 0x7FFED86000ull +#define USER_CTI_0_NIC3_DBG_MAX_OFFSET 0x1000 +#define USER_CTI_0_NIC3_DBG_SECTION 0xB000 +#define mmSTM_1_NIC3_DBG_BASE 0x7FFED91000ull +#define STM_1_NIC3_DBG_MAX_OFFSET 0x1000 +#define STM_1_NIC3_DBG_SECTION 0x1000 +#define mmCTI_1_NIC3_DBG_BASE 0x7FFED92000ull +#define CTI_1_NIC3_DBG_MAX_OFFSET 0x1000 +#define CTI_1_NIC3_DBG_SECTION 0x1000 +#define mmETF_1_NIC3_DBG_BASE 0x7FFED93000ull +#define ETF_1_NIC3_DBG_MAX_OFFSET 0x1000 +#define ETF_1_NIC3_DBG_SECTION 0x1000 +#define mmSPMU_1_NIC3_DBG_BASE 0x7FFED94000ull +#define SPMU_1_NIC3_DBG_MAX_OFFSET 0x1000 +#define SPMU_1_NIC3_DBG_SECTION 0x1000 +#define mmBMON_CTI_NIC3_DBG_BASE 0x7FFED95000ull +#define BMON_CTI_NIC3_DBG_MAX_OFFSET 0x1000 +#define BMON_CTI_NIC3_DBG_SECTION 0x1000 +#define mmUSER_CTI_1_NIC3_DBG_BASE 0x7FFED96000ull +#define USER_CTI_1_NIC3_DBG_MAX_OFFSET 0x1000 +#define USER_CTI_1_NIC3_DBG_SECTION 0x1000 +#define mmBMON0_NIC3_DBG_BASE 0x7FFED97000ull +#define BMON0_NIC3_DBG_MAX_OFFSET 0x1000 +#define BMON0_NIC3_DBG_SECTION 0x1000 +#define mmBMON1_NIC3_DBG_BASE 0x7FFED98000ull +#define BMON1_NIC3_DBG_MAX_OFFSET 0x1000 +#define BMON1_NIC3_DBG_SECTION 0x1000 +#define mmBMON2_NIC3_DBG_BASE 0x7FFED99000ull +#define BMON2_NIC3_DBG_MAX_OFFSET 0x1000 +#define BMON2_NIC3_DBG_SECTION 0x1000 +#define mmBMON3_NIC3_DBG_BASE 0x7FFED9A000ull +#define BMON3_NIC3_DBG_MAX_OFFSET 0x1000 +#define BMON3_NIC3_DBG_SECTION 0x1000 +#define mmBMON4_NIC3_DBG_BASE 0x7FFED9B000ull +#define BMON4_NIC3_DBG_MAX_OFFSET 0x1000 +#define BMON4_NIC3_DBG_SECTION 0x6000 +#define mmFUNNEL_NIC3_DBG_BASE 0x7FFEDA1000ull +#define FUNNEL_NIC3_DBG_MAX_OFFSET 0x1000 +#define FUNNEL_NIC3_DBG_SECTION 0x1F000 +#define mmNIC4_ROM_TABLE_BASE 0x7FFEDC0000ull +#define NIC4_ROM_TABLE_MAX_OFFSET 0x1000 +#define NIC4_ROM_TABLE_SECTION 0x1000 +#define mmSTM_0_NIC4_DBG_BASE 0x7FFEDC1000ull +#define STM_0_NIC4_DBG_MAX_OFFSET 0x21000 +#define STM_0_NIC4_DBG_SECTION 0x1000 +#define mmCTI_0_NIC4_DBG_BASE 0x7FFEDC2000ull +#define CTI_0_NIC4_DBG_MAX_OFFSET 0x1000 +#define CTI_0_NIC4_DBG_SECTION 0x1000 +#define mmETF_0_NIC4_DBG_BASE 0x7FFEDC3000ull +#define ETF_0_NIC4_DBG_MAX_OFFSET 0x1000 +#define ETF_0_NIC4_DBG_SECTION 0x1000 +#define mmSPMU_0_NIC4_DBG_BASE 0x7FFEDC4000ull +#define SPMU_0_NIC4_DBG_MAX_OFFSET 0x1000 +#define SPMU_0_NIC4_DBG_SECTION 0x2000 +#define mmUSER_CTI_0_NIC4_DBG_BASE 0x7FFEDC6000ull +#define USER_CTI_0_NIC4_DBG_MAX_OFFSET 0x1000 +#define USER_CTI_0_NIC4_DBG_SECTION 0xB000 +#define mmSTM_1_NIC4_DBG_BASE 0x7FFEDD1000ull +#define STM_1_NIC4_DBG_MAX_OFFSET 0x1000 +#define STM_1_NIC4_DBG_SECTION 0x1000 +#define mmCTI_1_NIC4_DBG_BASE 0x7FFEDD2000ull +#define CTI_1_NIC4_DBG_MAX_OFFSET 0x1000 +#define CTI_1_NIC4_DBG_SECTION 0x1000 +#define mmETF_1_NIC4_DBG_BASE 0x7FFEDD3000ull +#define ETF_1_NIC4_DBG_MAX_OFFSET 0x1000 +#define ETF_1_NIC4_DBG_SECTION 0x1000 +#define mmSPMU_1_NIC4_DBG_BASE 0x7FFEDD4000ull +#define SPMU_1_NIC4_DBG_MAX_OFFSET 0x1000 +#define SPMU_1_NIC4_DBG_SECTION 0x1000 +#define mmBMON_CTI_NIC4_DBG_BASE 0x7FFEDD5000ull +#define BMON_CTI_NIC4_DBG_MAX_OFFSET 0x1000 +#define BMON_CTI_NIC4_DBG_SECTION 0x1000 +#define mmUSER_CTI_1_NIC4_DBG_BASE 0x7FFEDD6000ull +#define USER_CTI_1_NIC4_DBG_MAX_OFFSET 0x1000 +#define USER_CTI_1_NIC4_DBG_SECTION 0x1000 +#define mmBMON0_NIC4_DBG_BASE 0x7FFEDD7000ull +#define BMON0_NIC4_DBG_MAX_OFFSET 0x1000 +#define BMON0_NIC4_DBG_SECTION 0x1000 +#define mmBMON1_NIC4_DBG_BASE 0x7FFEDD8000ull +#define BMON1_NIC4_DBG_MAX_OFFSET 0x1000 +#define BMON1_NIC4_DBG_SECTION 0x1000 +#define mmBMON2_NIC4_DBG_BASE 0x7FFEDD9000ull +#define BMON2_NIC4_DBG_MAX_OFFSET 0x1000 +#define BMON2_NIC4_DBG_SECTION 0x1000 +#define mmBMON3_NIC4_DBG_BASE 0x7FFEDDA000ull +#define BMON3_NIC4_DBG_MAX_OFFSET 0x1000 +#define BMON3_NIC4_DBG_SECTION 0x1000 +#define mmBMON4_NIC4_DBG_BASE 0x7FFEDDB000ull +#define BMON4_NIC4_DBG_MAX_OFFSET 0x1000 +#define BMON4_NIC4_DBG_SECTION 0x6000 +#define mmFUNNEL_NIC4_DBG_BASE 0x7FFEDE1000ull +#define FUNNEL_NIC4_DBG_MAX_OFFSET 0x1000 +#define FUNNEL_NIC4_DBG_SECTION 0x21F000 +#define mmTPC0_ROM_TABLE_BASE 0x7FFF000000ull +#define TPC0_ROM_TABLE_MAX_OFFSET 0x1000 +#define TPC0_ROM_TABLE_SECTION 0x1000 +#define mmTPC0_EML_SPMU_BASE 0x7FFF001000ull +#define TPC0_EML_SPMU_MAX_OFFSET 0x1000 +#define TPC0_EML_SPMU_SECTION 0x1000 +#define mmTPC0_EML_ETF_BASE 0x7FFF002000ull +#define TPC0_EML_ETF_MAX_OFFSET 0x1000 +#define TPC0_EML_ETF_SECTION 0x1000 +#define mmTPC0_EML_STM_BASE 0x7FFF003000ull +#define TPC0_EML_STM_MAX_OFFSET 0x1000 +#define TPC0_EML_STM_SECTION 0x2000 +#define mmTPC0_EML_CTI_BASE 0x7FFF005000ull +#define TPC0_EML_CTI_MAX_OFFSET 0x1000 +#define TPC0_EML_CTI_SECTION 0x1000 +#define mmTPC0_EML_FUNNEL_BASE 0x7FFF006000ull +#define TPC0_EML_FUNNEL_MAX_OFFSET 0x1000 +#define TPC0_EML_FUNNEL_SECTION 0x1000 +#define mmTPC0_EML_BUSMON_0_BASE 0x7FFF007000ull +#define TPC0_EML_BUSMON_0_MAX_OFFSET 0x1000 +#define TPC0_EML_BUSMON_0_SECTION 0x1000 +#define mmTPC0_EML_BUSMON_1_BASE 0x7FFF008000ull +#define TPC0_EML_BUSMON_1_MAX_OFFSET 0x1000 +#define TPC0_EML_BUSMON_1_SECTION 0x1000 +#define mmTPC0_EML_BUSMON_2_BASE 0x7FFF009000ull +#define TPC0_EML_BUSMON_2_MAX_OFFSET 0x1000 +#define TPC0_EML_BUSMON_2_SECTION 0x1000 +#define mmTPC0_EML_BUSMON_3_BASE 0x7FFF00A000ull +#define TPC0_EML_BUSMON_3_MAX_OFFSET 0x1000 +#define TPC0_EML_BUSMON_3_SECTION 0x36000 +#define mmTPC0_EML_CFG_BASE 0x7FFF040000ull +#define TPC0_EML_CFG_MAX_OFFSET 0x3380 +#define TPC0_EML_CFG_SECTION 0x1000 +#define mmTPC0_EML_TPC_CFG_BASE 0x7FFF041000ull +#define TPC0_EML_TPC_CFG_MAX_OFFSET 0xE400 +#define TPC0_EML_TPC_CFG_SECTION 0x4000 +#define mmKERNEL_TENSOR_0_TPC0_EML_TPC_CFG_BASE 0x7FFF041400ull +#define KERNEL_TENSOR_0_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_0_TPC0_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_1_TPC0_EML_TPC_CFG_BASE 0x7FFF041438ull +#define KERNEL_TENSOR_1_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_1_TPC0_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_2_TPC0_EML_TPC_CFG_BASE 0x7FFF041470ull +#define KERNEL_TENSOR_2_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_2_TPC0_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_3_TPC0_EML_TPC_CFG_BASE 0x7FFF0414A8ull +#define KERNEL_TENSOR_3_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_3_TPC0_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_4_TPC0_EML_TPC_CFG_BASE 0x7FFF0414E0ull +#define KERNEL_TENSOR_4_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_4_TPC0_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_5_TPC0_EML_TPC_CFG_BASE 0x7FFF041518ull +#define KERNEL_TENSOR_5_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_5_TPC0_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_6_TPC0_EML_TPC_CFG_BASE 0x7FFF041550ull +#define KERNEL_TENSOR_6_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_6_TPC0_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_7_TPC0_EML_TPC_CFG_BASE 0x7FFF041588ull +#define KERNEL_TENSOR_7_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_7_TPC0_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_8_TPC0_EML_TPC_CFG_BASE 0x7FFF0415C0ull +#define KERNEL_TENSOR_8_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_8_TPC0_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_9_TPC0_EML_TPC_CFG_BASE 0x7FFF0415F8ull +#define KERNEL_TENSOR_9_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_9_TPC0_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_10_TPC0_EML_TPC_CFG_BASE 0x7FFF041630ull +#define KERNEL_TENSOR_10_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_10_TPC0_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_11_TPC0_EML_TPC_CFG_BASE 0x7FFF041668ull +#define KERNEL_TENSOR_11_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_11_TPC0_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_12_TPC0_EML_TPC_CFG_BASE 0x7FFF0416A0ull +#define KERNEL_TENSOR_12_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_12_TPC0_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_13_TPC0_EML_TPC_CFG_BASE 0x7FFF0416D8ull +#define KERNEL_TENSOR_13_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_13_TPC0_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_14_TPC0_EML_TPC_CFG_BASE 0x7FFF041710ull +#define KERNEL_TENSOR_14_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_14_TPC0_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_15_TPC0_EML_TPC_CFG_BASE 0x7FFF041748ull +#define KERNEL_TENSOR_15_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_15_TPC0_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_SYNC_OBJECT_TPC0_EML_TPC_CFG_BASE 0x7FFF041780ull +#define KERNEL_SYNC_OBJECT_TPC0_EML_TPC_CFG_MAX_OFFSET 0x8000 +#define KERNEL_SYNC_OBJECT_TPC0_EML_TPC_CFG_SECTION 0x8000 +#define mmKERNEL_TPC0_EML_TPC_CFG_BASE 0x7FFF041788ull +#define KERNEL_TPC0_EML_TPC_CFG_MAX_OFFSET 0xB800 +#define KERNEL_TPC0_EML_TPC_CFG_SECTION 0x2780 +#define mmQM_TENSOR_0_TPC0_EML_TPC_CFG_BASE 0x7FFF041A00ull +#define QM_TENSOR_0_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_0_TPC0_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_1_TPC0_EML_TPC_CFG_BASE 0x7FFF041A38ull +#define QM_TENSOR_1_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_1_TPC0_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_2_TPC0_EML_TPC_CFG_BASE 0x7FFF041A70ull +#define QM_TENSOR_2_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_2_TPC0_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_3_TPC0_EML_TPC_CFG_BASE 0x7FFF041AA8ull +#define QM_TENSOR_3_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_3_TPC0_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_4_TPC0_EML_TPC_CFG_BASE 0x7FFF041AE0ull +#define QM_TENSOR_4_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_4_TPC0_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_5_TPC0_EML_TPC_CFG_BASE 0x7FFF041B18ull +#define QM_TENSOR_5_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_5_TPC0_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_6_TPC0_EML_TPC_CFG_BASE 0x7FFF041B50ull +#define QM_TENSOR_6_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_6_TPC0_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_7_TPC0_EML_TPC_CFG_BASE 0x7FFF041B88ull +#define QM_TENSOR_7_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_7_TPC0_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_8_TPC0_EML_TPC_CFG_BASE 0x7FFF041BC0ull +#define QM_TENSOR_8_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_8_TPC0_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_9_TPC0_EML_TPC_CFG_BASE 0x7FFF041BF8ull +#define QM_TENSOR_9_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_9_TPC0_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_10_TPC0_EML_TPC_CFG_BASE 0x7FFF041C30ull +#define QM_TENSOR_10_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_10_TPC0_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_11_TPC0_EML_TPC_CFG_BASE 0x7FFF041C68ull +#define QM_TENSOR_11_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_11_TPC0_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_12_TPC0_EML_TPC_CFG_BASE 0x7FFF041CA0ull +#define QM_TENSOR_12_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_12_TPC0_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_13_TPC0_EML_TPC_CFG_BASE 0x7FFF041CD8ull +#define QM_TENSOR_13_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_13_TPC0_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_14_TPC0_EML_TPC_CFG_BASE 0x7FFF041D10ull +#define QM_TENSOR_14_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_14_TPC0_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_15_TPC0_EML_TPC_CFG_BASE 0x7FFF041D48ull +#define QM_TENSOR_15_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_15_TPC0_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_SYNC_OBJECT_TPC0_EML_TPC_CFG_BASE 0x7FFF041D80ull +#define QM_SYNC_OBJECT_TPC0_EML_TPC_CFG_MAX_OFFSET 0x8000 +#define QM_SYNC_OBJECT_TPC0_EML_TPC_CFG_SECTION 0x8000 +#define mmQM_TPC0_EML_TPC_CFG_BASE 0x7FFF041D88ull +#define QM_TPC0_EML_TPC_CFG_MAX_OFFSET 0xB800 +#define QM_TPC0_EML_TPC_CFG_SECTION 0x2780 +#define mmTPC0_EML_TPC_QM_BASE 0x7FFF042000ull +#define TPC0_EML_TPC_QM_MAX_OFFSET 0xD040 +#define TPC0_EML_TPC_QM_SECTION 0x1BD000 +#define mmTPC0_EML_CS_BASE 0x7FFF1FF000ull +#define TPC0_EML_CS_MAX_OFFSET 0x1000 +#define TPC0_EML_CS_SECTION 0x1000 +#define mmTPC1_ROM_TABLE_BASE 0x7FFF200000ull +#define TPC1_ROM_TABLE_MAX_OFFSET 0x1000 +#define TPC1_ROM_TABLE_SECTION 0x1000 +#define mmTPC1_EML_SPMU_BASE 0x7FFF201000ull +#define TPC1_EML_SPMU_MAX_OFFSET 0x1000 +#define TPC1_EML_SPMU_SECTION 0x1000 +#define mmTPC1_EML_ETF_BASE 0x7FFF202000ull +#define TPC1_EML_ETF_MAX_OFFSET 0x1000 +#define TPC1_EML_ETF_SECTION 0x1000 +#define mmTPC1_EML_STM_BASE 0x7FFF203000ull +#define TPC1_EML_STM_MAX_OFFSET 0x1000 +#define TPC1_EML_STM_SECTION 0x2000 +#define mmTPC1_EML_CTI_BASE 0x7FFF205000ull +#define TPC1_EML_CTI_MAX_OFFSET 0x1000 +#define TPC1_EML_CTI_SECTION 0x1000 +#define mmTPC1_EML_FUNNEL_BASE 0x7FFF206000ull +#define TPC1_EML_FUNNEL_MAX_OFFSET 0x1000 +#define TPC1_EML_FUNNEL_SECTION 0x1000 +#define mmTPC1_EML_BUSMON_0_BASE 0x7FFF207000ull +#define TPC1_EML_BUSMON_0_MAX_OFFSET 0x1000 +#define TPC1_EML_BUSMON_0_SECTION 0x1000 +#define mmTPC1_EML_BUSMON_1_BASE 0x7FFF208000ull +#define TPC1_EML_BUSMON_1_MAX_OFFSET 0x1000 +#define TPC1_EML_BUSMON_1_SECTION 0x1000 +#define mmTPC1_EML_BUSMON_2_BASE 0x7FFF209000ull +#define TPC1_EML_BUSMON_2_MAX_OFFSET 0x1000 +#define TPC1_EML_BUSMON_2_SECTION 0x1000 +#define mmTPC1_EML_BUSMON_3_BASE 0x7FFF20A000ull +#define TPC1_EML_BUSMON_3_MAX_OFFSET 0x1000 +#define TPC1_EML_BUSMON_3_SECTION 0x36000 +#define mmTPC1_EML_CFG_BASE 0x7FFF240000ull +#define TPC1_EML_CFG_MAX_OFFSET 0x3380 +#define TPC1_EML_CFG_SECTION 0x1000 +#define mmTPC1_EML_TPC_CFG_BASE 0x7FFF241000ull +#define TPC1_EML_TPC_CFG_MAX_OFFSET 0xE400 +#define TPC1_EML_TPC_CFG_SECTION 0x4000 +#define mmKERNEL_TENSOR_0_TPC1_EML_TPC_CFG_BASE 0x7FFF241400ull +#define KERNEL_TENSOR_0_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_0_TPC1_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_1_TPC1_EML_TPC_CFG_BASE 0x7FFF241438ull +#define KERNEL_TENSOR_1_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_1_TPC1_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_2_TPC1_EML_TPC_CFG_BASE 0x7FFF241470ull +#define KERNEL_TENSOR_2_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_2_TPC1_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_3_TPC1_EML_TPC_CFG_BASE 0x7FFF2414A8ull +#define KERNEL_TENSOR_3_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_3_TPC1_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_4_TPC1_EML_TPC_CFG_BASE 0x7FFF2414E0ull +#define KERNEL_TENSOR_4_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_4_TPC1_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_5_TPC1_EML_TPC_CFG_BASE 0x7FFF241518ull +#define KERNEL_TENSOR_5_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_5_TPC1_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_6_TPC1_EML_TPC_CFG_BASE 0x7FFF241550ull +#define KERNEL_TENSOR_6_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_6_TPC1_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_7_TPC1_EML_TPC_CFG_BASE 0x7FFF241588ull +#define KERNEL_TENSOR_7_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_7_TPC1_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_8_TPC1_EML_TPC_CFG_BASE 0x7FFF2415C0ull +#define KERNEL_TENSOR_8_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_8_TPC1_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_9_TPC1_EML_TPC_CFG_BASE 0x7FFF2415F8ull +#define KERNEL_TENSOR_9_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_9_TPC1_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_10_TPC1_EML_TPC_CFG_BASE 0x7FFF241630ull +#define KERNEL_TENSOR_10_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_10_TPC1_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_11_TPC1_EML_TPC_CFG_BASE 0x7FFF241668ull +#define KERNEL_TENSOR_11_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_11_TPC1_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_12_TPC1_EML_TPC_CFG_BASE 0x7FFF2416A0ull +#define KERNEL_TENSOR_12_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_12_TPC1_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_13_TPC1_EML_TPC_CFG_BASE 0x7FFF2416D8ull +#define KERNEL_TENSOR_13_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_13_TPC1_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_14_TPC1_EML_TPC_CFG_BASE 0x7FFF241710ull +#define KERNEL_TENSOR_14_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_14_TPC1_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_15_TPC1_EML_TPC_CFG_BASE 0x7FFF241748ull +#define KERNEL_TENSOR_15_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_15_TPC1_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_SYNC_OBJECT_TPC1_EML_TPC_CFG_BASE 0x7FFF241780ull +#define KERNEL_SYNC_OBJECT_TPC1_EML_TPC_CFG_MAX_OFFSET 0x8000 +#define KERNEL_SYNC_OBJECT_TPC1_EML_TPC_CFG_SECTION 0x8000 +#define mmKERNEL_TPC1_EML_TPC_CFG_BASE 0x7FFF241788ull +#define KERNEL_TPC1_EML_TPC_CFG_MAX_OFFSET 0xB800 +#define KERNEL_TPC1_EML_TPC_CFG_SECTION 0x2780 +#define mmQM_TENSOR_0_TPC1_EML_TPC_CFG_BASE 0x7FFF241A00ull +#define QM_TENSOR_0_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_0_TPC1_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_1_TPC1_EML_TPC_CFG_BASE 0x7FFF241A38ull +#define QM_TENSOR_1_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_1_TPC1_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_2_TPC1_EML_TPC_CFG_BASE 0x7FFF241A70ull +#define QM_TENSOR_2_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_2_TPC1_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_3_TPC1_EML_TPC_CFG_BASE 0x7FFF241AA8ull +#define QM_TENSOR_3_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_3_TPC1_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_4_TPC1_EML_TPC_CFG_BASE 0x7FFF241AE0ull +#define QM_TENSOR_4_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_4_TPC1_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_5_TPC1_EML_TPC_CFG_BASE 0x7FFF241B18ull +#define QM_TENSOR_5_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_5_TPC1_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_6_TPC1_EML_TPC_CFG_BASE 0x7FFF241B50ull +#define QM_TENSOR_6_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_6_TPC1_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_7_TPC1_EML_TPC_CFG_BASE 0x7FFF241B88ull +#define QM_TENSOR_7_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_7_TPC1_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_8_TPC1_EML_TPC_CFG_BASE 0x7FFF241BC0ull +#define QM_TENSOR_8_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_8_TPC1_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_9_TPC1_EML_TPC_CFG_BASE 0x7FFF241BF8ull +#define QM_TENSOR_9_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_9_TPC1_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_10_TPC1_EML_TPC_CFG_BASE 0x7FFF241C30ull +#define QM_TENSOR_10_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_10_TPC1_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_11_TPC1_EML_TPC_CFG_BASE 0x7FFF241C68ull +#define QM_TENSOR_11_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_11_TPC1_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_12_TPC1_EML_TPC_CFG_BASE 0x7FFF241CA0ull +#define QM_TENSOR_12_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_12_TPC1_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_13_TPC1_EML_TPC_CFG_BASE 0x7FFF241CD8ull +#define QM_TENSOR_13_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_13_TPC1_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_14_TPC1_EML_TPC_CFG_BASE 0x7FFF241D10ull +#define QM_TENSOR_14_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_14_TPC1_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_15_TPC1_EML_TPC_CFG_BASE 0x7FFF241D48ull +#define QM_TENSOR_15_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_15_TPC1_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_SYNC_OBJECT_TPC1_EML_TPC_CFG_BASE 0x7FFF241D80ull +#define QM_SYNC_OBJECT_TPC1_EML_TPC_CFG_MAX_OFFSET 0x8000 +#define QM_SYNC_OBJECT_TPC1_EML_TPC_CFG_SECTION 0x8000 +#define mmQM_TPC1_EML_TPC_CFG_BASE 0x7FFF241D88ull +#define QM_TPC1_EML_TPC_CFG_MAX_OFFSET 0xB800 +#define QM_TPC1_EML_TPC_CFG_SECTION 0x2780 +#define mmTPC1_EML_TPC_QM_BASE 0x7FFF242000ull +#define TPC1_EML_TPC_QM_MAX_OFFSET 0xD040 +#define TPC1_EML_TPC_QM_SECTION 0x1BD000 +#define mmTPC1_EML_CS_BASE 0x7FFF3FF000ull +#define TPC1_EML_CS_MAX_OFFSET 0x1000 +#define TPC1_EML_CS_SECTION 0x1000 +#define mmTPC2_ROM_TABLE_BASE 0x7FFF400000ull +#define TPC2_ROM_TABLE_MAX_OFFSET 0x1000 +#define TPC2_ROM_TABLE_SECTION 0x1000 +#define mmTPC2_EML_SPMU_BASE 0x7FFF401000ull +#define TPC2_EML_SPMU_MAX_OFFSET 0x1000 +#define TPC2_EML_SPMU_SECTION 0x1000 +#define mmTPC2_EML_ETF_BASE 0x7FFF402000ull +#define TPC2_EML_ETF_MAX_OFFSET 0x1000 +#define TPC2_EML_ETF_SECTION 0x1000 +#define mmTPC2_EML_STM_BASE 0x7FFF403000ull +#define TPC2_EML_STM_MAX_OFFSET 0x1000 +#define TPC2_EML_STM_SECTION 0x2000 +#define mmTPC2_EML_CTI_BASE 0x7FFF405000ull +#define TPC2_EML_CTI_MAX_OFFSET 0x1000 +#define TPC2_EML_CTI_SECTION 0x1000 +#define mmTPC2_EML_FUNNEL_BASE 0x7FFF406000ull +#define TPC2_EML_FUNNEL_MAX_OFFSET 0x1000 +#define TPC2_EML_FUNNEL_SECTION 0x1000 +#define mmTPC2_EML_BUSMON_0_BASE 0x7FFF407000ull +#define TPC2_EML_BUSMON_0_MAX_OFFSET 0x1000 +#define TPC2_EML_BUSMON_0_SECTION 0x1000 +#define mmTPC2_EML_BUSMON_1_BASE 0x7FFF408000ull +#define TPC2_EML_BUSMON_1_MAX_OFFSET 0x1000 +#define TPC2_EML_BUSMON_1_SECTION 0x1000 +#define mmTPC2_EML_BUSMON_2_BASE 0x7FFF409000ull +#define TPC2_EML_BUSMON_2_MAX_OFFSET 0x1000 +#define TPC2_EML_BUSMON_2_SECTION 0x1000 +#define mmTPC2_EML_BUSMON_3_BASE 0x7FFF40A000ull +#define TPC2_EML_BUSMON_3_MAX_OFFSET 0x1000 +#define TPC2_EML_BUSMON_3_SECTION 0x36000 +#define mmTPC2_EML_CFG_BASE 0x7FFF440000ull +#define TPC2_EML_CFG_MAX_OFFSET 0x3380 +#define TPC2_EML_CFG_SECTION 0x1000 +#define mmTPC2_EML_TPC_CFG_BASE 0x7FFF441000ull +#define TPC2_EML_TPC_CFG_MAX_OFFSET 0xE400 +#define TPC2_EML_TPC_CFG_SECTION 0x4000 +#define mmKERNEL_TENSOR_0_TPC2_EML_TPC_CFG_BASE 0x7FFF441400ull +#define KERNEL_TENSOR_0_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_0_TPC2_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_1_TPC2_EML_TPC_CFG_BASE 0x7FFF441438ull +#define KERNEL_TENSOR_1_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_1_TPC2_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_2_TPC2_EML_TPC_CFG_BASE 0x7FFF441470ull +#define KERNEL_TENSOR_2_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_2_TPC2_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_3_TPC2_EML_TPC_CFG_BASE 0x7FFF4414A8ull +#define KERNEL_TENSOR_3_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_3_TPC2_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_4_TPC2_EML_TPC_CFG_BASE 0x7FFF4414E0ull +#define KERNEL_TENSOR_4_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_4_TPC2_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_5_TPC2_EML_TPC_CFG_BASE 0x7FFF441518ull +#define KERNEL_TENSOR_5_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_5_TPC2_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_6_TPC2_EML_TPC_CFG_BASE 0x7FFF441550ull +#define KERNEL_TENSOR_6_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_6_TPC2_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_7_TPC2_EML_TPC_CFG_BASE 0x7FFF441588ull +#define KERNEL_TENSOR_7_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_7_TPC2_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_8_TPC2_EML_TPC_CFG_BASE 0x7FFF4415C0ull +#define KERNEL_TENSOR_8_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_8_TPC2_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_9_TPC2_EML_TPC_CFG_BASE 0x7FFF4415F8ull +#define KERNEL_TENSOR_9_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_9_TPC2_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_10_TPC2_EML_TPC_CFG_BASE 0x7FFF441630ull +#define KERNEL_TENSOR_10_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_10_TPC2_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_11_TPC2_EML_TPC_CFG_BASE 0x7FFF441668ull +#define KERNEL_TENSOR_11_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_11_TPC2_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_12_TPC2_EML_TPC_CFG_BASE 0x7FFF4416A0ull +#define KERNEL_TENSOR_12_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_12_TPC2_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_13_TPC2_EML_TPC_CFG_BASE 0x7FFF4416D8ull +#define KERNEL_TENSOR_13_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_13_TPC2_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_14_TPC2_EML_TPC_CFG_BASE 0x7FFF441710ull +#define KERNEL_TENSOR_14_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_14_TPC2_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_15_TPC2_EML_TPC_CFG_BASE 0x7FFF441748ull +#define KERNEL_TENSOR_15_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_15_TPC2_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_SYNC_OBJECT_TPC2_EML_TPC_CFG_BASE 0x7FFF441780ull +#define KERNEL_SYNC_OBJECT_TPC2_EML_TPC_CFG_MAX_OFFSET 0x8000 +#define KERNEL_SYNC_OBJECT_TPC2_EML_TPC_CFG_SECTION 0x8000 +#define mmKERNEL_TPC2_EML_TPC_CFG_BASE 0x7FFF441788ull +#define KERNEL_TPC2_EML_TPC_CFG_MAX_OFFSET 0xB800 +#define KERNEL_TPC2_EML_TPC_CFG_SECTION 0x2780 +#define mmQM_TENSOR_0_TPC2_EML_TPC_CFG_BASE 0x7FFF441A00ull +#define QM_TENSOR_0_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_0_TPC2_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_1_TPC2_EML_TPC_CFG_BASE 0x7FFF441A38ull +#define QM_TENSOR_1_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_1_TPC2_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_2_TPC2_EML_TPC_CFG_BASE 0x7FFF441A70ull +#define QM_TENSOR_2_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_2_TPC2_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_3_TPC2_EML_TPC_CFG_BASE 0x7FFF441AA8ull +#define QM_TENSOR_3_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_3_TPC2_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_4_TPC2_EML_TPC_CFG_BASE 0x7FFF441AE0ull +#define QM_TENSOR_4_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_4_TPC2_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_5_TPC2_EML_TPC_CFG_BASE 0x7FFF441B18ull +#define QM_TENSOR_5_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_5_TPC2_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_6_TPC2_EML_TPC_CFG_BASE 0x7FFF441B50ull +#define QM_TENSOR_6_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_6_TPC2_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_7_TPC2_EML_TPC_CFG_BASE 0x7FFF441B88ull +#define QM_TENSOR_7_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_7_TPC2_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_8_TPC2_EML_TPC_CFG_BASE 0x7FFF441BC0ull +#define QM_TENSOR_8_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_8_TPC2_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_9_TPC2_EML_TPC_CFG_BASE 0x7FFF441BF8ull +#define QM_TENSOR_9_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_9_TPC2_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_10_TPC2_EML_TPC_CFG_BASE 0x7FFF441C30ull +#define QM_TENSOR_10_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_10_TPC2_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_11_TPC2_EML_TPC_CFG_BASE 0x7FFF441C68ull +#define QM_TENSOR_11_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_11_TPC2_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_12_TPC2_EML_TPC_CFG_BASE 0x7FFF441CA0ull +#define QM_TENSOR_12_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_12_TPC2_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_13_TPC2_EML_TPC_CFG_BASE 0x7FFF441CD8ull +#define QM_TENSOR_13_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_13_TPC2_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_14_TPC2_EML_TPC_CFG_BASE 0x7FFF441D10ull +#define QM_TENSOR_14_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_14_TPC2_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_15_TPC2_EML_TPC_CFG_BASE 0x7FFF441D48ull +#define QM_TENSOR_15_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_15_TPC2_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_SYNC_OBJECT_TPC2_EML_TPC_CFG_BASE 0x7FFF441D80ull +#define QM_SYNC_OBJECT_TPC2_EML_TPC_CFG_MAX_OFFSET 0x8000 +#define QM_SYNC_OBJECT_TPC2_EML_TPC_CFG_SECTION 0x8000 +#define mmQM_TPC2_EML_TPC_CFG_BASE 0x7FFF441D88ull +#define QM_TPC2_EML_TPC_CFG_MAX_OFFSET 0xB800 +#define QM_TPC2_EML_TPC_CFG_SECTION 0x2780 +#define mmTPC2_EML_TPC_QM_BASE 0x7FFF442000ull +#define TPC2_EML_TPC_QM_MAX_OFFSET 0xD040 +#define TPC2_EML_TPC_QM_SECTION 0x1BD000 +#define mmTPC2_EML_CS_BASE 0x7FFF5FF000ull +#define TPC2_EML_CS_MAX_OFFSET 0x1000 +#define TPC2_EML_CS_SECTION 0x1000 +#define mmTPC3_ROM_TABLE_BASE 0x7FFF600000ull +#define TPC3_ROM_TABLE_MAX_OFFSET 0x1000 +#define TPC3_ROM_TABLE_SECTION 0x1000 +#define mmTPC3_EML_SPMU_BASE 0x7FFF601000ull +#define TPC3_EML_SPMU_MAX_OFFSET 0x1000 +#define TPC3_EML_SPMU_SECTION 0x1000 +#define mmTPC3_EML_ETF_BASE 0x7FFF602000ull +#define TPC3_EML_ETF_MAX_OFFSET 0x1000 +#define TPC3_EML_ETF_SECTION 0x1000 +#define mmTPC3_EML_STM_BASE 0x7FFF603000ull +#define TPC3_EML_STM_MAX_OFFSET 0x1000 +#define TPC3_EML_STM_SECTION 0x2000 +#define mmTPC3_EML_CTI_BASE 0x7FFF605000ull +#define TPC3_EML_CTI_MAX_OFFSET 0x1000 +#define TPC3_EML_CTI_SECTION 0x1000 +#define mmTPC3_EML_FUNNEL_BASE 0x7FFF606000ull +#define TPC3_EML_FUNNEL_MAX_OFFSET 0x1000 +#define TPC3_EML_FUNNEL_SECTION 0x1000 +#define mmTPC3_EML_BUSMON_0_BASE 0x7FFF607000ull +#define TPC3_EML_BUSMON_0_MAX_OFFSET 0x1000 +#define TPC3_EML_BUSMON_0_SECTION 0x1000 +#define mmTPC3_EML_BUSMON_1_BASE 0x7FFF608000ull +#define TPC3_EML_BUSMON_1_MAX_OFFSET 0x1000 +#define TPC3_EML_BUSMON_1_SECTION 0x1000 +#define mmTPC3_EML_BUSMON_2_BASE 0x7FFF609000ull +#define TPC3_EML_BUSMON_2_MAX_OFFSET 0x1000 +#define TPC3_EML_BUSMON_2_SECTION 0x1000 +#define mmTPC3_EML_BUSMON_3_BASE 0x7FFF60A000ull +#define TPC3_EML_BUSMON_3_MAX_OFFSET 0x1000 +#define TPC3_EML_BUSMON_3_SECTION 0x36000 +#define mmTPC3_EML_CFG_BASE 0x7FFF640000ull +#define TPC3_EML_CFG_MAX_OFFSET 0x3380 +#define TPC3_EML_CFG_SECTION 0x1000 +#define mmTPC3_EML_TPC_CFG_BASE 0x7FFF641000ull +#define TPC3_EML_TPC_CFG_MAX_OFFSET 0xE400 +#define TPC3_EML_TPC_CFG_SECTION 0x4000 +#define mmKERNEL_TENSOR_0_TPC3_EML_TPC_CFG_BASE 0x7FFF641400ull +#define KERNEL_TENSOR_0_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_0_TPC3_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_1_TPC3_EML_TPC_CFG_BASE 0x7FFF641438ull +#define KERNEL_TENSOR_1_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_1_TPC3_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_2_TPC3_EML_TPC_CFG_BASE 0x7FFF641470ull +#define KERNEL_TENSOR_2_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_2_TPC3_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_3_TPC3_EML_TPC_CFG_BASE 0x7FFF6414A8ull +#define KERNEL_TENSOR_3_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_3_TPC3_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_4_TPC3_EML_TPC_CFG_BASE 0x7FFF6414E0ull +#define KERNEL_TENSOR_4_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_4_TPC3_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_5_TPC3_EML_TPC_CFG_BASE 0x7FFF641518ull +#define KERNEL_TENSOR_5_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_5_TPC3_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_6_TPC3_EML_TPC_CFG_BASE 0x7FFF641550ull +#define KERNEL_TENSOR_6_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_6_TPC3_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_7_TPC3_EML_TPC_CFG_BASE 0x7FFF641588ull +#define KERNEL_TENSOR_7_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_7_TPC3_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_8_TPC3_EML_TPC_CFG_BASE 0x7FFF6415C0ull +#define KERNEL_TENSOR_8_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_8_TPC3_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_9_TPC3_EML_TPC_CFG_BASE 0x7FFF6415F8ull +#define KERNEL_TENSOR_9_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_9_TPC3_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_10_TPC3_EML_TPC_CFG_BASE 0x7FFF641630ull +#define KERNEL_TENSOR_10_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_10_TPC3_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_11_TPC3_EML_TPC_CFG_BASE 0x7FFF641668ull +#define KERNEL_TENSOR_11_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_11_TPC3_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_12_TPC3_EML_TPC_CFG_BASE 0x7FFF6416A0ull +#define KERNEL_TENSOR_12_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_12_TPC3_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_13_TPC3_EML_TPC_CFG_BASE 0x7FFF6416D8ull +#define KERNEL_TENSOR_13_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_13_TPC3_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_14_TPC3_EML_TPC_CFG_BASE 0x7FFF641710ull +#define KERNEL_TENSOR_14_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_14_TPC3_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_15_TPC3_EML_TPC_CFG_BASE 0x7FFF641748ull +#define KERNEL_TENSOR_15_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_15_TPC3_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_SYNC_OBJECT_TPC3_EML_TPC_CFG_BASE 0x7FFF641780ull +#define KERNEL_SYNC_OBJECT_TPC3_EML_TPC_CFG_MAX_OFFSET 0x8000 +#define KERNEL_SYNC_OBJECT_TPC3_EML_TPC_CFG_SECTION 0x8000 +#define mmKERNEL_TPC3_EML_TPC_CFG_BASE 0x7FFF641788ull +#define KERNEL_TPC3_EML_TPC_CFG_MAX_OFFSET 0xB800 +#define KERNEL_TPC3_EML_TPC_CFG_SECTION 0x2780 +#define mmQM_TENSOR_0_TPC3_EML_TPC_CFG_BASE 0x7FFF641A00ull +#define QM_TENSOR_0_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_0_TPC3_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_1_TPC3_EML_TPC_CFG_BASE 0x7FFF641A38ull +#define QM_TENSOR_1_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_1_TPC3_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_2_TPC3_EML_TPC_CFG_BASE 0x7FFF641A70ull +#define QM_TENSOR_2_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_2_TPC3_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_3_TPC3_EML_TPC_CFG_BASE 0x7FFF641AA8ull +#define QM_TENSOR_3_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_3_TPC3_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_4_TPC3_EML_TPC_CFG_BASE 0x7FFF641AE0ull +#define QM_TENSOR_4_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_4_TPC3_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_5_TPC3_EML_TPC_CFG_BASE 0x7FFF641B18ull +#define QM_TENSOR_5_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_5_TPC3_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_6_TPC3_EML_TPC_CFG_BASE 0x7FFF641B50ull +#define QM_TENSOR_6_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_6_TPC3_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_7_TPC3_EML_TPC_CFG_BASE 0x7FFF641B88ull +#define QM_TENSOR_7_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_7_TPC3_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_8_TPC3_EML_TPC_CFG_BASE 0x7FFF641BC0ull +#define QM_TENSOR_8_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_8_TPC3_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_9_TPC3_EML_TPC_CFG_BASE 0x7FFF641BF8ull +#define QM_TENSOR_9_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_9_TPC3_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_10_TPC3_EML_TPC_CFG_BASE 0x7FFF641C30ull +#define QM_TENSOR_10_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_10_TPC3_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_11_TPC3_EML_TPC_CFG_BASE 0x7FFF641C68ull +#define QM_TENSOR_11_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_11_TPC3_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_12_TPC3_EML_TPC_CFG_BASE 0x7FFF641CA0ull +#define QM_TENSOR_12_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_12_TPC3_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_13_TPC3_EML_TPC_CFG_BASE 0x7FFF641CD8ull +#define QM_TENSOR_13_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_13_TPC3_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_14_TPC3_EML_TPC_CFG_BASE 0x7FFF641D10ull +#define QM_TENSOR_14_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_14_TPC3_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_15_TPC3_EML_TPC_CFG_BASE 0x7FFF641D48ull +#define QM_TENSOR_15_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_15_TPC3_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_SYNC_OBJECT_TPC3_EML_TPC_CFG_BASE 0x7FFF641D80ull +#define QM_SYNC_OBJECT_TPC3_EML_TPC_CFG_MAX_OFFSET 0x8000 +#define QM_SYNC_OBJECT_TPC3_EML_TPC_CFG_SECTION 0x8000 +#define mmQM_TPC3_EML_TPC_CFG_BASE 0x7FFF641D88ull +#define QM_TPC3_EML_TPC_CFG_MAX_OFFSET 0xB800 +#define QM_TPC3_EML_TPC_CFG_SECTION 0x2780 +#define mmTPC3_EML_TPC_QM_BASE 0x7FFF642000ull +#define TPC3_EML_TPC_QM_MAX_OFFSET 0xD040 +#define TPC3_EML_TPC_QM_SECTION 0x1BD000 +#define mmTPC3_EML_CS_BASE 0x7FFF7FF000ull +#define TPC3_EML_CS_MAX_OFFSET 0x1000 +#define TPC3_EML_CS_SECTION 0x1000 +#define mmTPC4_ROM_TABLE_BASE 0x7FFF800000ull +#define TPC4_ROM_TABLE_MAX_OFFSET 0x1000 +#define TPC4_ROM_TABLE_SECTION 0x1000 +#define mmTPC4_EML_SPMU_BASE 0x7FFF801000ull +#define TPC4_EML_SPMU_MAX_OFFSET 0x1000 +#define TPC4_EML_SPMU_SECTION 0x1000 +#define mmTPC4_EML_ETF_BASE 0x7FFF802000ull +#define TPC4_EML_ETF_MAX_OFFSET 0x1000 +#define TPC4_EML_ETF_SECTION 0x1000 +#define mmTPC4_EML_STM_BASE 0x7FFF803000ull +#define TPC4_EML_STM_MAX_OFFSET 0x1000 +#define TPC4_EML_STM_SECTION 0x2000 +#define mmTPC4_EML_CTI_BASE 0x7FFF805000ull +#define TPC4_EML_CTI_MAX_OFFSET 0x1000 +#define TPC4_EML_CTI_SECTION 0x1000 +#define mmTPC4_EML_FUNNEL_BASE 0x7FFF806000ull +#define TPC4_EML_FUNNEL_MAX_OFFSET 0x1000 +#define TPC4_EML_FUNNEL_SECTION 0x1000 +#define mmTPC4_EML_BUSMON_0_BASE 0x7FFF807000ull +#define TPC4_EML_BUSMON_0_MAX_OFFSET 0x1000 +#define TPC4_EML_BUSMON_0_SECTION 0x1000 +#define mmTPC4_EML_BUSMON_1_BASE 0x7FFF808000ull +#define TPC4_EML_BUSMON_1_MAX_OFFSET 0x1000 +#define TPC4_EML_BUSMON_1_SECTION 0x1000 +#define mmTPC4_EML_BUSMON_2_BASE 0x7FFF809000ull +#define TPC4_EML_BUSMON_2_MAX_OFFSET 0x1000 +#define TPC4_EML_BUSMON_2_SECTION 0x1000 +#define mmTPC4_EML_BUSMON_3_BASE 0x7FFF80A000ull +#define TPC4_EML_BUSMON_3_MAX_OFFSET 0x1000 +#define TPC4_EML_BUSMON_3_SECTION 0x36000 +#define mmTPC4_EML_CFG_BASE 0x7FFF840000ull +#define TPC4_EML_CFG_MAX_OFFSET 0x3380 +#define TPC4_EML_CFG_SECTION 0x1000 +#define mmTPC4_EML_TPC_CFG_BASE 0x7FFF841000ull +#define TPC4_EML_TPC_CFG_MAX_OFFSET 0xE400 +#define TPC4_EML_TPC_CFG_SECTION 0x4000 +#define mmKERNEL_TENSOR_0_TPC4_EML_TPC_CFG_BASE 0x7FFF841400ull +#define KERNEL_TENSOR_0_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_0_TPC4_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_1_TPC4_EML_TPC_CFG_BASE 0x7FFF841438ull +#define KERNEL_TENSOR_1_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_1_TPC4_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_2_TPC4_EML_TPC_CFG_BASE 0x7FFF841470ull +#define KERNEL_TENSOR_2_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_2_TPC4_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_3_TPC4_EML_TPC_CFG_BASE 0x7FFF8414A8ull +#define KERNEL_TENSOR_3_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_3_TPC4_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_4_TPC4_EML_TPC_CFG_BASE 0x7FFF8414E0ull +#define KERNEL_TENSOR_4_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_4_TPC4_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_5_TPC4_EML_TPC_CFG_BASE 0x7FFF841518ull +#define KERNEL_TENSOR_5_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_5_TPC4_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_6_TPC4_EML_TPC_CFG_BASE 0x7FFF841550ull +#define KERNEL_TENSOR_6_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_6_TPC4_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_7_TPC4_EML_TPC_CFG_BASE 0x7FFF841588ull +#define KERNEL_TENSOR_7_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_7_TPC4_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_8_TPC4_EML_TPC_CFG_BASE 0x7FFF8415C0ull +#define KERNEL_TENSOR_8_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_8_TPC4_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_9_TPC4_EML_TPC_CFG_BASE 0x7FFF8415F8ull +#define KERNEL_TENSOR_9_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_9_TPC4_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_10_TPC4_EML_TPC_CFG_BASE 0x7FFF841630ull +#define KERNEL_TENSOR_10_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_10_TPC4_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_11_TPC4_EML_TPC_CFG_BASE 0x7FFF841668ull +#define KERNEL_TENSOR_11_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_11_TPC4_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_12_TPC4_EML_TPC_CFG_BASE 0x7FFF8416A0ull +#define KERNEL_TENSOR_12_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_12_TPC4_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_13_TPC4_EML_TPC_CFG_BASE 0x7FFF8416D8ull +#define KERNEL_TENSOR_13_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_13_TPC4_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_14_TPC4_EML_TPC_CFG_BASE 0x7FFF841710ull +#define KERNEL_TENSOR_14_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_14_TPC4_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_15_TPC4_EML_TPC_CFG_BASE 0x7FFF841748ull +#define KERNEL_TENSOR_15_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_15_TPC4_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_SYNC_OBJECT_TPC4_EML_TPC_CFG_BASE 0x7FFF841780ull +#define KERNEL_SYNC_OBJECT_TPC4_EML_TPC_CFG_MAX_OFFSET 0x8000 +#define KERNEL_SYNC_OBJECT_TPC4_EML_TPC_CFG_SECTION 0x8000 +#define mmKERNEL_TPC4_EML_TPC_CFG_BASE 0x7FFF841788ull +#define KERNEL_TPC4_EML_TPC_CFG_MAX_OFFSET 0xB800 +#define KERNEL_TPC4_EML_TPC_CFG_SECTION 0x2780 +#define mmQM_TENSOR_0_TPC4_EML_TPC_CFG_BASE 0x7FFF841A00ull +#define QM_TENSOR_0_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_0_TPC4_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_1_TPC4_EML_TPC_CFG_BASE 0x7FFF841A38ull +#define QM_TENSOR_1_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_1_TPC4_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_2_TPC4_EML_TPC_CFG_BASE 0x7FFF841A70ull +#define QM_TENSOR_2_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_2_TPC4_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_3_TPC4_EML_TPC_CFG_BASE 0x7FFF841AA8ull +#define QM_TENSOR_3_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_3_TPC4_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_4_TPC4_EML_TPC_CFG_BASE 0x7FFF841AE0ull +#define QM_TENSOR_4_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_4_TPC4_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_5_TPC4_EML_TPC_CFG_BASE 0x7FFF841B18ull +#define QM_TENSOR_5_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_5_TPC4_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_6_TPC4_EML_TPC_CFG_BASE 0x7FFF841B50ull +#define QM_TENSOR_6_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_6_TPC4_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_7_TPC4_EML_TPC_CFG_BASE 0x7FFF841B88ull +#define QM_TENSOR_7_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_7_TPC4_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_8_TPC4_EML_TPC_CFG_BASE 0x7FFF841BC0ull +#define QM_TENSOR_8_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_8_TPC4_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_9_TPC4_EML_TPC_CFG_BASE 0x7FFF841BF8ull +#define QM_TENSOR_9_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_9_TPC4_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_10_TPC4_EML_TPC_CFG_BASE 0x7FFF841C30ull +#define QM_TENSOR_10_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_10_TPC4_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_11_TPC4_EML_TPC_CFG_BASE 0x7FFF841C68ull +#define QM_TENSOR_11_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_11_TPC4_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_12_TPC4_EML_TPC_CFG_BASE 0x7FFF841CA0ull +#define QM_TENSOR_12_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_12_TPC4_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_13_TPC4_EML_TPC_CFG_BASE 0x7FFF841CD8ull +#define QM_TENSOR_13_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_13_TPC4_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_14_TPC4_EML_TPC_CFG_BASE 0x7FFF841D10ull +#define QM_TENSOR_14_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_14_TPC4_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_15_TPC4_EML_TPC_CFG_BASE 0x7FFF841D48ull +#define QM_TENSOR_15_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_15_TPC4_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_SYNC_OBJECT_TPC4_EML_TPC_CFG_BASE 0x7FFF841D80ull +#define QM_SYNC_OBJECT_TPC4_EML_TPC_CFG_MAX_OFFSET 0x8000 +#define QM_SYNC_OBJECT_TPC4_EML_TPC_CFG_SECTION 0x8000 +#define mmQM_TPC4_EML_TPC_CFG_BASE 0x7FFF841D88ull +#define QM_TPC4_EML_TPC_CFG_MAX_OFFSET 0xB800 +#define QM_TPC4_EML_TPC_CFG_SECTION 0x2780 +#define mmTPC4_EML_TPC_QM_BASE 0x7FFF842000ull +#define TPC4_EML_TPC_QM_MAX_OFFSET 0xD040 +#define TPC4_EML_TPC_QM_SECTION 0x1BD000 +#define mmTPC4_EML_CS_BASE 0x7FFF9FF000ull +#define TPC4_EML_CS_MAX_OFFSET 0x1000 +#define TPC4_EML_CS_SECTION 0x1000 +#define mmTPC5_ROM_TABLE_BASE 0x7FFFA00000ull +#define TPC5_ROM_TABLE_MAX_OFFSET 0x1000 +#define TPC5_ROM_TABLE_SECTION 0x1000 +#define mmTPC5_EML_SPMU_BASE 0x7FFFA01000ull +#define TPC5_EML_SPMU_MAX_OFFSET 0x1000 +#define TPC5_EML_SPMU_SECTION 0x1000 +#define mmTPC5_EML_ETF_BASE 0x7FFFA02000ull +#define TPC5_EML_ETF_MAX_OFFSET 0x1000 +#define TPC5_EML_ETF_SECTION 0x1000 +#define mmTPC5_EML_STM_BASE 0x7FFFA03000ull +#define TPC5_EML_STM_MAX_OFFSET 0x1000 +#define TPC5_EML_STM_SECTION 0x2000 +#define mmTPC5_EML_CTI_BASE 0x7FFFA05000ull +#define TPC5_EML_CTI_MAX_OFFSET 0x1000 +#define TPC5_EML_CTI_SECTION 0x1000 +#define mmTPC5_EML_FUNNEL_BASE 0x7FFFA06000ull +#define TPC5_EML_FUNNEL_MAX_OFFSET 0x1000 +#define TPC5_EML_FUNNEL_SECTION 0x1000 +#define mmTPC5_EML_BUSMON_0_BASE 0x7FFFA07000ull +#define TPC5_EML_BUSMON_0_MAX_OFFSET 0x1000 +#define TPC5_EML_BUSMON_0_SECTION 0x1000 +#define mmTPC5_EML_BUSMON_1_BASE 0x7FFFA08000ull +#define TPC5_EML_BUSMON_1_MAX_OFFSET 0x1000 +#define TPC5_EML_BUSMON_1_SECTION 0x1000 +#define mmTPC5_EML_BUSMON_2_BASE 0x7FFFA09000ull +#define TPC5_EML_BUSMON_2_MAX_OFFSET 0x1000 +#define TPC5_EML_BUSMON_2_SECTION 0x1000 +#define mmTPC5_EML_BUSMON_3_BASE 0x7FFFA0A000ull +#define TPC5_EML_BUSMON_3_MAX_OFFSET 0x1000 +#define TPC5_EML_BUSMON_3_SECTION 0x36000 +#define mmTPC5_EML_CFG_BASE 0x7FFFA40000ull +#define TPC5_EML_CFG_MAX_OFFSET 0x3380 +#define TPC5_EML_CFG_SECTION 0x1000 +#define mmTPC5_EML_TPC_CFG_BASE 0x7FFFA41000ull +#define TPC5_EML_TPC_CFG_MAX_OFFSET 0xE400 +#define TPC5_EML_TPC_CFG_SECTION 0x4000 +#define mmKERNEL_TENSOR_0_TPC5_EML_TPC_CFG_BASE 0x7FFFA41400ull +#define KERNEL_TENSOR_0_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_0_TPC5_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_1_TPC5_EML_TPC_CFG_BASE 0x7FFFA41438ull +#define KERNEL_TENSOR_1_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_1_TPC5_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_2_TPC5_EML_TPC_CFG_BASE 0x7FFFA41470ull +#define KERNEL_TENSOR_2_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_2_TPC5_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_3_TPC5_EML_TPC_CFG_BASE 0x7FFFA414A8ull +#define KERNEL_TENSOR_3_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_3_TPC5_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_4_TPC5_EML_TPC_CFG_BASE 0x7FFFA414E0ull +#define KERNEL_TENSOR_4_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_4_TPC5_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_5_TPC5_EML_TPC_CFG_BASE 0x7FFFA41518ull +#define KERNEL_TENSOR_5_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_5_TPC5_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_6_TPC5_EML_TPC_CFG_BASE 0x7FFFA41550ull +#define KERNEL_TENSOR_6_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_6_TPC5_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_7_TPC5_EML_TPC_CFG_BASE 0x7FFFA41588ull +#define KERNEL_TENSOR_7_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_7_TPC5_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_8_TPC5_EML_TPC_CFG_BASE 0x7FFFA415C0ull +#define KERNEL_TENSOR_8_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_8_TPC5_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_9_TPC5_EML_TPC_CFG_BASE 0x7FFFA415F8ull +#define KERNEL_TENSOR_9_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_9_TPC5_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_10_TPC5_EML_TPC_CFG_BASE 0x7FFFA41630ull +#define KERNEL_TENSOR_10_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_10_TPC5_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_11_TPC5_EML_TPC_CFG_BASE 0x7FFFA41668ull +#define KERNEL_TENSOR_11_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_11_TPC5_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_12_TPC5_EML_TPC_CFG_BASE 0x7FFFA416A0ull +#define KERNEL_TENSOR_12_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_12_TPC5_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_13_TPC5_EML_TPC_CFG_BASE 0x7FFFA416D8ull +#define KERNEL_TENSOR_13_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_13_TPC5_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_14_TPC5_EML_TPC_CFG_BASE 0x7FFFA41710ull +#define KERNEL_TENSOR_14_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_14_TPC5_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_15_TPC5_EML_TPC_CFG_BASE 0x7FFFA41748ull +#define KERNEL_TENSOR_15_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_15_TPC5_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_SYNC_OBJECT_TPC5_EML_TPC_CFG_BASE 0x7FFFA41780ull +#define KERNEL_SYNC_OBJECT_TPC5_EML_TPC_CFG_MAX_OFFSET 0x8000 +#define KERNEL_SYNC_OBJECT_TPC5_EML_TPC_CFG_SECTION 0x8000 +#define mmKERNEL_TPC5_EML_TPC_CFG_BASE 0x7FFFA41788ull +#define KERNEL_TPC5_EML_TPC_CFG_MAX_OFFSET 0xB800 +#define KERNEL_TPC5_EML_TPC_CFG_SECTION 0x2780 +#define mmQM_TENSOR_0_TPC5_EML_TPC_CFG_BASE 0x7FFFA41A00ull +#define QM_TENSOR_0_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_0_TPC5_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_1_TPC5_EML_TPC_CFG_BASE 0x7FFFA41A38ull +#define QM_TENSOR_1_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_1_TPC5_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_2_TPC5_EML_TPC_CFG_BASE 0x7FFFA41A70ull +#define QM_TENSOR_2_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_2_TPC5_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_3_TPC5_EML_TPC_CFG_BASE 0x7FFFA41AA8ull +#define QM_TENSOR_3_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_3_TPC5_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_4_TPC5_EML_TPC_CFG_BASE 0x7FFFA41AE0ull +#define QM_TENSOR_4_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_4_TPC5_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_5_TPC5_EML_TPC_CFG_BASE 0x7FFFA41B18ull +#define QM_TENSOR_5_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_5_TPC5_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_6_TPC5_EML_TPC_CFG_BASE 0x7FFFA41B50ull +#define QM_TENSOR_6_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_6_TPC5_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_7_TPC5_EML_TPC_CFG_BASE 0x7FFFA41B88ull +#define QM_TENSOR_7_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_7_TPC5_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_8_TPC5_EML_TPC_CFG_BASE 0x7FFFA41BC0ull +#define QM_TENSOR_8_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_8_TPC5_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_9_TPC5_EML_TPC_CFG_BASE 0x7FFFA41BF8ull +#define QM_TENSOR_9_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_9_TPC5_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_10_TPC5_EML_TPC_CFG_BASE 0x7FFFA41C30ull +#define QM_TENSOR_10_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_10_TPC5_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_11_TPC5_EML_TPC_CFG_BASE 0x7FFFA41C68ull +#define QM_TENSOR_11_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_11_TPC5_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_12_TPC5_EML_TPC_CFG_BASE 0x7FFFA41CA0ull +#define QM_TENSOR_12_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_12_TPC5_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_13_TPC5_EML_TPC_CFG_BASE 0x7FFFA41CD8ull +#define QM_TENSOR_13_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_13_TPC5_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_14_TPC5_EML_TPC_CFG_BASE 0x7FFFA41D10ull +#define QM_TENSOR_14_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_14_TPC5_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_15_TPC5_EML_TPC_CFG_BASE 0x7FFFA41D48ull +#define QM_TENSOR_15_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_15_TPC5_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_SYNC_OBJECT_TPC5_EML_TPC_CFG_BASE 0x7FFFA41D80ull +#define QM_SYNC_OBJECT_TPC5_EML_TPC_CFG_MAX_OFFSET 0x8000 +#define QM_SYNC_OBJECT_TPC5_EML_TPC_CFG_SECTION 0x8000 +#define mmQM_TPC5_EML_TPC_CFG_BASE 0x7FFFA41D88ull +#define QM_TPC5_EML_TPC_CFG_MAX_OFFSET 0xB800 +#define QM_TPC5_EML_TPC_CFG_SECTION 0x2780 +#define mmTPC5_EML_TPC_QM_BASE 0x7FFFA42000ull +#define TPC5_EML_TPC_QM_MAX_OFFSET 0xD040 +#define TPC5_EML_TPC_QM_SECTION 0x1BD000 +#define mmTPC5_EML_CS_BASE 0x7FFFBFF000ull +#define TPC5_EML_CS_MAX_OFFSET 0x1000 +#define TPC5_EML_CS_SECTION 0x1000 +#define mmTPC6_ROM_TABLE_BASE 0x7FFFC00000ull +#define TPC6_ROM_TABLE_MAX_OFFSET 0x1000 +#define TPC6_ROM_TABLE_SECTION 0x1000 +#define mmTPC6_EML_SPMU_BASE 0x7FFFC01000ull +#define TPC6_EML_SPMU_MAX_OFFSET 0x1000 +#define TPC6_EML_SPMU_SECTION 0x1000 +#define mmTPC6_EML_ETF_BASE 0x7FFFC02000ull +#define TPC6_EML_ETF_MAX_OFFSET 0x1000 +#define TPC6_EML_ETF_SECTION 0x1000 +#define mmTPC6_EML_STM_BASE 0x7FFFC03000ull +#define TPC6_EML_STM_MAX_OFFSET 0x1000 +#define TPC6_EML_STM_SECTION 0x2000 +#define mmTPC6_EML_CTI_BASE 0x7FFFC05000ull +#define TPC6_EML_CTI_MAX_OFFSET 0x1000 +#define TPC6_EML_CTI_SECTION 0x1000 +#define mmTPC6_EML_FUNNEL_BASE 0x7FFFC06000ull +#define TPC6_EML_FUNNEL_MAX_OFFSET 0x1000 +#define TPC6_EML_FUNNEL_SECTION 0x1000 +#define mmTPC6_EML_BUSMON_0_BASE 0x7FFFC07000ull +#define TPC6_EML_BUSMON_0_MAX_OFFSET 0x1000 +#define TPC6_EML_BUSMON_0_SECTION 0x1000 +#define mmTPC6_EML_BUSMON_1_BASE 0x7FFFC08000ull +#define TPC6_EML_BUSMON_1_MAX_OFFSET 0x1000 +#define TPC6_EML_BUSMON_1_SECTION 0x1000 +#define mmTPC6_EML_BUSMON_2_BASE 0x7FFFC09000ull +#define TPC6_EML_BUSMON_2_MAX_OFFSET 0x1000 +#define TPC6_EML_BUSMON_2_SECTION 0x1000 +#define mmTPC6_EML_BUSMON_3_BASE 0x7FFFC0A000ull +#define TPC6_EML_BUSMON_3_MAX_OFFSET 0x1000 +#define TPC6_EML_BUSMON_3_SECTION 0x36000 +#define mmTPC6_EML_CFG_BASE 0x7FFFC40000ull +#define TPC6_EML_CFG_MAX_OFFSET 0x3380 +#define TPC6_EML_CFG_SECTION 0x1000 +#define mmTPC6_EML_TPC_CFG_BASE 0x7FFFC41000ull +#define TPC6_EML_TPC_CFG_MAX_OFFSET 0xE400 +#define TPC6_EML_TPC_CFG_SECTION 0x4000 +#define mmKERNEL_TENSOR_0_TPC6_EML_TPC_CFG_BASE 0x7FFFC41400ull +#define KERNEL_TENSOR_0_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_0_TPC6_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_1_TPC6_EML_TPC_CFG_BASE 0x7FFFC41438ull +#define KERNEL_TENSOR_1_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_1_TPC6_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_2_TPC6_EML_TPC_CFG_BASE 0x7FFFC41470ull +#define KERNEL_TENSOR_2_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_2_TPC6_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_3_TPC6_EML_TPC_CFG_BASE 0x7FFFC414A8ull +#define KERNEL_TENSOR_3_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_3_TPC6_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_4_TPC6_EML_TPC_CFG_BASE 0x7FFFC414E0ull +#define KERNEL_TENSOR_4_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_4_TPC6_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_5_TPC6_EML_TPC_CFG_BASE 0x7FFFC41518ull +#define KERNEL_TENSOR_5_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_5_TPC6_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_6_TPC6_EML_TPC_CFG_BASE 0x7FFFC41550ull +#define KERNEL_TENSOR_6_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_6_TPC6_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_7_TPC6_EML_TPC_CFG_BASE 0x7FFFC41588ull +#define KERNEL_TENSOR_7_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_7_TPC6_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_8_TPC6_EML_TPC_CFG_BASE 0x7FFFC415C0ull +#define KERNEL_TENSOR_8_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_8_TPC6_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_9_TPC6_EML_TPC_CFG_BASE 0x7FFFC415F8ull +#define KERNEL_TENSOR_9_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_9_TPC6_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_10_TPC6_EML_TPC_CFG_BASE 0x7FFFC41630ull +#define KERNEL_TENSOR_10_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_10_TPC6_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_11_TPC6_EML_TPC_CFG_BASE 0x7FFFC41668ull +#define KERNEL_TENSOR_11_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_11_TPC6_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_12_TPC6_EML_TPC_CFG_BASE 0x7FFFC416A0ull +#define KERNEL_TENSOR_12_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_12_TPC6_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_13_TPC6_EML_TPC_CFG_BASE 0x7FFFC416D8ull +#define KERNEL_TENSOR_13_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_13_TPC6_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_14_TPC6_EML_TPC_CFG_BASE 0x7FFFC41710ull +#define KERNEL_TENSOR_14_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_14_TPC6_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_15_TPC6_EML_TPC_CFG_BASE 0x7FFFC41748ull +#define KERNEL_TENSOR_15_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_15_TPC6_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_SYNC_OBJECT_TPC6_EML_TPC_CFG_BASE 0x7FFFC41780ull +#define KERNEL_SYNC_OBJECT_TPC6_EML_TPC_CFG_MAX_OFFSET 0x8000 +#define KERNEL_SYNC_OBJECT_TPC6_EML_TPC_CFG_SECTION 0x8000 +#define mmKERNEL_TPC6_EML_TPC_CFG_BASE 0x7FFFC41788ull +#define KERNEL_TPC6_EML_TPC_CFG_MAX_OFFSET 0xB800 +#define KERNEL_TPC6_EML_TPC_CFG_SECTION 0x2780 +#define mmQM_TENSOR_0_TPC6_EML_TPC_CFG_BASE 0x7FFFC41A00ull +#define QM_TENSOR_0_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_0_TPC6_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_1_TPC6_EML_TPC_CFG_BASE 0x7FFFC41A38ull +#define QM_TENSOR_1_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_1_TPC6_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_2_TPC6_EML_TPC_CFG_BASE 0x7FFFC41A70ull +#define QM_TENSOR_2_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_2_TPC6_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_3_TPC6_EML_TPC_CFG_BASE 0x7FFFC41AA8ull +#define QM_TENSOR_3_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_3_TPC6_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_4_TPC6_EML_TPC_CFG_BASE 0x7FFFC41AE0ull +#define QM_TENSOR_4_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_4_TPC6_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_5_TPC6_EML_TPC_CFG_BASE 0x7FFFC41B18ull +#define QM_TENSOR_5_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_5_TPC6_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_6_TPC6_EML_TPC_CFG_BASE 0x7FFFC41B50ull +#define QM_TENSOR_6_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_6_TPC6_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_7_TPC6_EML_TPC_CFG_BASE 0x7FFFC41B88ull +#define QM_TENSOR_7_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_7_TPC6_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_8_TPC6_EML_TPC_CFG_BASE 0x7FFFC41BC0ull +#define QM_TENSOR_8_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_8_TPC6_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_9_TPC6_EML_TPC_CFG_BASE 0x7FFFC41BF8ull +#define QM_TENSOR_9_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_9_TPC6_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_10_TPC6_EML_TPC_CFG_BASE 0x7FFFC41C30ull +#define QM_TENSOR_10_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_10_TPC6_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_11_TPC6_EML_TPC_CFG_BASE 0x7FFFC41C68ull +#define QM_TENSOR_11_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_11_TPC6_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_12_TPC6_EML_TPC_CFG_BASE 0x7FFFC41CA0ull +#define QM_TENSOR_12_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_12_TPC6_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_13_TPC6_EML_TPC_CFG_BASE 0x7FFFC41CD8ull +#define QM_TENSOR_13_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_13_TPC6_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_14_TPC6_EML_TPC_CFG_BASE 0x7FFFC41D10ull +#define QM_TENSOR_14_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_14_TPC6_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_15_TPC6_EML_TPC_CFG_BASE 0x7FFFC41D48ull +#define QM_TENSOR_15_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_15_TPC6_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_SYNC_OBJECT_TPC6_EML_TPC_CFG_BASE 0x7FFFC41D80ull +#define QM_SYNC_OBJECT_TPC6_EML_TPC_CFG_MAX_OFFSET 0x8000 +#define QM_SYNC_OBJECT_TPC6_EML_TPC_CFG_SECTION 0x8000 +#define mmQM_TPC6_EML_TPC_CFG_BASE 0x7FFFC41D88ull +#define QM_TPC6_EML_TPC_CFG_MAX_OFFSET 0xB800 +#define QM_TPC6_EML_TPC_CFG_SECTION 0x2780 +#define mmTPC6_EML_TPC_QM_BASE 0x7FFFC42000ull +#define TPC6_EML_TPC_QM_MAX_OFFSET 0xD040 +#define TPC6_EML_TPC_QM_SECTION 0x1BD000 +#define mmTPC6_EML_CS_BASE 0x7FFFDFF000ull +#define TPC6_EML_CS_MAX_OFFSET 0x1000 +#define TPC6_EML_CS_SECTION 0x1000 +#define mmTPC7_ROM_TABLE_BASE 0x7FFFE00000ull +#define TPC7_ROM_TABLE_MAX_OFFSET 0x1000 +#define TPC7_ROM_TABLE_SECTION 0x1000 +#define mmTPC7_EML_SPMU_BASE 0x7FFFE01000ull +#define TPC7_EML_SPMU_MAX_OFFSET 0x1000 +#define TPC7_EML_SPMU_SECTION 0x1000 +#define mmTPC7_EML_ETF_BASE 0x7FFFE02000ull +#define TPC7_EML_ETF_MAX_OFFSET 0x1000 +#define TPC7_EML_ETF_SECTION 0x1000 +#define mmTPC7_EML_STM_BASE 0x7FFFE03000ull +#define TPC7_EML_STM_MAX_OFFSET 0x1000 +#define TPC7_EML_STM_SECTION 0x2000 +#define mmTPC7_EML_CTI_BASE 0x7FFFE05000ull +#define TPC7_EML_CTI_MAX_OFFSET 0x1000 +#define TPC7_EML_CTI_SECTION 0x1000 +#define mmTPC7_EML_FUNNEL_BASE 0x7FFFE06000ull +#define TPC7_EML_FUNNEL_MAX_OFFSET 0x1000 +#define TPC7_EML_FUNNEL_SECTION 0x1000 +#define mmTPC7_EML_BUSMON_0_BASE 0x7FFFE07000ull +#define TPC7_EML_BUSMON_0_MAX_OFFSET 0x1000 +#define TPC7_EML_BUSMON_0_SECTION 0x1000 +#define mmTPC7_EML_BUSMON_1_BASE 0x7FFFE08000ull +#define TPC7_EML_BUSMON_1_MAX_OFFSET 0x1000 +#define TPC7_EML_BUSMON_1_SECTION 0x1000 +#define mmTPC7_EML_BUSMON_2_BASE 0x7FFFE09000ull +#define TPC7_EML_BUSMON_2_MAX_OFFSET 0x1000 +#define TPC7_EML_BUSMON_2_SECTION 0x1000 +#define mmTPC7_EML_BUSMON_3_BASE 0x7FFFE0A000ull +#define TPC7_EML_BUSMON_3_MAX_OFFSET 0x1000 +#define TPC7_EML_BUSMON_3_SECTION 0x36000 +#define mmTPC7_EML_CFG_BASE 0x7FFFE40000ull +#define TPC7_EML_CFG_MAX_OFFSET 0x3380 +#define TPC7_EML_CFG_SECTION 0x1000 +#define mmTPC7_EML_TPC_CFG_BASE 0x7FFFE41000ull +#define TPC7_EML_TPC_CFG_MAX_OFFSET 0xE400 +#define TPC7_EML_TPC_CFG_SECTION 0x4000 +#define mmKERNEL_TENSOR_0_TPC7_EML_TPC_CFG_BASE 0x7FFFE41400ull +#define KERNEL_TENSOR_0_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_0_TPC7_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_1_TPC7_EML_TPC_CFG_BASE 0x7FFFE41438ull +#define KERNEL_TENSOR_1_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_1_TPC7_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_2_TPC7_EML_TPC_CFG_BASE 0x7FFFE41470ull +#define KERNEL_TENSOR_2_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_2_TPC7_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_3_TPC7_EML_TPC_CFG_BASE 0x7FFFE414A8ull +#define KERNEL_TENSOR_3_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_3_TPC7_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_4_TPC7_EML_TPC_CFG_BASE 0x7FFFE414E0ull +#define KERNEL_TENSOR_4_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_4_TPC7_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_5_TPC7_EML_TPC_CFG_BASE 0x7FFFE41518ull +#define KERNEL_TENSOR_5_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_5_TPC7_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_6_TPC7_EML_TPC_CFG_BASE 0x7FFFE41550ull +#define KERNEL_TENSOR_6_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_6_TPC7_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_7_TPC7_EML_TPC_CFG_BASE 0x7FFFE41588ull +#define KERNEL_TENSOR_7_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_7_TPC7_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_8_TPC7_EML_TPC_CFG_BASE 0x7FFFE415C0ull +#define KERNEL_TENSOR_8_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_8_TPC7_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_9_TPC7_EML_TPC_CFG_BASE 0x7FFFE415F8ull +#define KERNEL_TENSOR_9_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_9_TPC7_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_10_TPC7_EML_TPC_CFG_BASE 0x7FFFE41630ull +#define KERNEL_TENSOR_10_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_10_TPC7_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_11_TPC7_EML_TPC_CFG_BASE 0x7FFFE41668ull +#define KERNEL_TENSOR_11_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_11_TPC7_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_12_TPC7_EML_TPC_CFG_BASE 0x7FFFE416A0ull +#define KERNEL_TENSOR_12_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_12_TPC7_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_13_TPC7_EML_TPC_CFG_BASE 0x7FFFE416D8ull +#define KERNEL_TENSOR_13_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_13_TPC7_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_14_TPC7_EML_TPC_CFG_BASE 0x7FFFE41710ull +#define KERNEL_TENSOR_14_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_14_TPC7_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_TENSOR_15_TPC7_EML_TPC_CFG_BASE 0x7FFFE41748ull +#define KERNEL_TENSOR_15_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define KERNEL_TENSOR_15_TPC7_EML_TPC_CFG_SECTION 0x3800 +#define mmKERNEL_SYNC_OBJECT_TPC7_EML_TPC_CFG_BASE 0x7FFFE41780ull +#define KERNEL_SYNC_OBJECT_TPC7_EML_TPC_CFG_MAX_OFFSET 0x8000 +#define KERNEL_SYNC_OBJECT_TPC7_EML_TPC_CFG_SECTION 0x8000 +#define mmKERNEL_TPC7_EML_TPC_CFG_BASE 0x7FFFE41788ull +#define KERNEL_TPC7_EML_TPC_CFG_MAX_OFFSET 0xB800 +#define KERNEL_TPC7_EML_TPC_CFG_SECTION 0x2780 +#define mmQM_TENSOR_0_TPC7_EML_TPC_CFG_BASE 0x7FFFE41A00ull +#define QM_TENSOR_0_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_0_TPC7_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_1_TPC7_EML_TPC_CFG_BASE 0x7FFFE41A38ull +#define QM_TENSOR_1_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_1_TPC7_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_2_TPC7_EML_TPC_CFG_BASE 0x7FFFE41A70ull +#define QM_TENSOR_2_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_2_TPC7_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_3_TPC7_EML_TPC_CFG_BASE 0x7FFFE41AA8ull +#define QM_TENSOR_3_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_3_TPC7_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_4_TPC7_EML_TPC_CFG_BASE 0x7FFFE41AE0ull +#define QM_TENSOR_4_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_4_TPC7_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_5_TPC7_EML_TPC_CFG_BASE 0x7FFFE41B18ull +#define QM_TENSOR_5_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_5_TPC7_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_6_TPC7_EML_TPC_CFG_BASE 0x7FFFE41B50ull +#define QM_TENSOR_6_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_6_TPC7_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_7_TPC7_EML_TPC_CFG_BASE 0x7FFFE41B88ull +#define QM_TENSOR_7_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_7_TPC7_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_8_TPC7_EML_TPC_CFG_BASE 0x7FFFE41BC0ull +#define QM_TENSOR_8_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_8_TPC7_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_9_TPC7_EML_TPC_CFG_BASE 0x7FFFE41BF8ull +#define QM_TENSOR_9_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_9_TPC7_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_10_TPC7_EML_TPC_CFG_BASE 0x7FFFE41C30ull +#define QM_TENSOR_10_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_10_TPC7_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_11_TPC7_EML_TPC_CFG_BASE 0x7FFFE41C68ull +#define QM_TENSOR_11_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_11_TPC7_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_12_TPC7_EML_TPC_CFG_BASE 0x7FFFE41CA0ull +#define QM_TENSOR_12_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_12_TPC7_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_13_TPC7_EML_TPC_CFG_BASE 0x7FFFE41CD8ull +#define QM_TENSOR_13_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_13_TPC7_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_14_TPC7_EML_TPC_CFG_BASE 0x7FFFE41D10ull +#define QM_TENSOR_14_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_14_TPC7_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_TENSOR_15_TPC7_EML_TPC_CFG_BASE 0x7FFFE41D48ull +#define QM_TENSOR_15_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800 +#define QM_TENSOR_15_TPC7_EML_TPC_CFG_SECTION 0x3800 +#define mmQM_SYNC_OBJECT_TPC7_EML_TPC_CFG_BASE 0x7FFFE41D80ull +#define QM_SYNC_OBJECT_TPC7_EML_TPC_CFG_MAX_OFFSET 0x8000 +#define QM_SYNC_OBJECT_TPC7_EML_TPC_CFG_SECTION 0x8000 +#define mmQM_TPC7_EML_TPC_CFG_BASE 0x7FFFE41D88ull +#define QM_TPC7_EML_TPC_CFG_MAX_OFFSET 0xB800 +#define QM_TPC7_EML_TPC_CFG_SECTION 0x2780 +#define mmTPC7_EML_TPC_QM_BASE 0x7FFFE42000ull +#define TPC7_EML_TPC_QM_MAX_OFFSET 0xD040 +#define TPC7_EML_TPC_QM_SECTION 0x1BD000 +#define mmTPC7_EML_CS_BASE 0x7FFFFFF000ull +#define TPC7_EML_CS_MAX_OFFSET 0x1000 + +#endif /* GAUDI_BLOCKS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h new file mode 100644 index 000000000..f92dc53af --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h @@ -0,0 +1,298 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2020 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef ASIC_REG_GAUDI_REGS_H_ +#define ASIC_REG_GAUDI_REGS_H_ + +#include "gaudi_blocks.h" +#include "psoc_global_conf_regs.h" +#include "psoc_timestamp_regs.h" +#include "cpu_if_regs.h" +#include "mmu_up_regs.h" +#include "stlb_regs.h" +#include "dma0_qm_regs.h" +#include "dma1_qm_regs.h" +#include "dma2_qm_regs.h" +#include "dma3_qm_regs.h" +#include "dma4_qm_regs.h" +#include "dma5_qm_regs.h" +#include "dma6_qm_regs.h" +#include "dma7_qm_regs.h" +#include "dma0_core_regs.h" +#include "dma1_core_regs.h" +#include "dma2_core_regs.h" +#include "dma3_core_regs.h" +#include "dma4_core_regs.h" +#include "dma5_core_regs.h" +#include "dma6_core_regs.h" +#include "dma7_core_regs.h" +#include "mme0_ctrl_regs.h" +#include "mme1_ctrl_regs.h" +#include "mme2_ctrl_regs.h" +#include "mme3_ctrl_regs.h" +#include "mme0_qm_regs.h" +#include "mme2_qm_regs.h" +#include "tpc0_cfg_regs.h" +#include "tpc1_cfg_regs.h" +#include "tpc2_cfg_regs.h" +#include "tpc3_cfg_regs.h" +#include "tpc4_cfg_regs.h" +#include "tpc5_cfg_regs.h" +#include "tpc6_cfg_regs.h" +#include "tpc7_cfg_regs.h" +#include "tpc0_qm_regs.h" +#include "tpc1_qm_regs.h" +#include "tpc2_qm_regs.h" +#include "tpc3_qm_regs.h" +#include "tpc4_qm_regs.h" +#include "tpc5_qm_regs.h" +#include "tpc6_qm_regs.h" +#include "tpc7_qm_regs.h" +#include "dma_if_e_n_down_ch0_regs.h" +#include "dma_if_e_n_down_ch1_regs.h" +#include "dma_if_e_s_down_ch0_regs.h" +#include "dma_if_e_s_down_ch1_regs.h" +#include "dma_if_w_n_down_ch0_regs.h" +#include "dma_if_w_n_down_ch1_regs.h" +#include "dma_if_w_s_down_ch0_regs.h" +#include "dma_if_w_s_down_ch1_regs.h" +#include "dma_if_e_n_regs.h" +#include "dma_if_e_s_regs.h" +#include "dma_if_w_n_regs.h" +#include "dma_if_w_s_regs.h" +#include "nif_rtr_ctrl_0_regs.h" +#include "nif_rtr_ctrl_1_regs.h" +#include "nif_rtr_ctrl_2_regs.h" +#include "nif_rtr_ctrl_3_regs.h" +#include "nif_rtr_ctrl_4_regs.h" +#include "nif_rtr_ctrl_5_regs.h" +#include "nif_rtr_ctrl_6_regs.h" +#include "nif_rtr_ctrl_7_regs.h" +#include "sif_rtr_ctrl_0_regs.h" +#include "sif_rtr_ctrl_1_regs.h" +#include "sif_rtr_ctrl_2_regs.h" +#include "sif_rtr_ctrl_3_regs.h" +#include "sif_rtr_ctrl_4_regs.h" +#include "sif_rtr_ctrl_5_regs.h" +#include "sif_rtr_ctrl_6_regs.h" +#include "sif_rtr_ctrl_7_regs.h" +#include "psoc_etr_regs.h" + +#include "dma0_qm_masks.h" +#include "mme0_qm_masks.h" +#include "tpc0_qm_masks.h" +#include "dma0_core_masks.h" +#include "tpc0_cfg_masks.h" +#include "psoc_global_conf_masks.h" + +#include "psoc_pci_pll_regs.h" +#include "psoc_hbm_pll_regs.h" +#include "psoc_cpu_pll_regs.h" + +#define GAUDI_ECC_MEM_SEL_OFFSET 0xF18 +#define GAUDI_ECC_ADDRESS_OFFSET 0xF1C +#define GAUDI_ECC_SYNDROME_OFFSET 0xF20 +#define GAUDI_ECC_MEM_INFO_CLR_OFFSET 0xF28 +#define GAUDI_ECC_MEM_INFO_CLR_SERR_MASK BIT(8) +#define GAUDI_ECC_MEM_INFO_CLR_DERR_MASK BIT(9) +#define GAUDI_ECC_SERR0_OFFSET 0xF30 +#define GAUDI_ECC_DERR0_OFFSET 0xF40 + +#define mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 0x492000 +#define mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 0x494000 +#define mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRH_0 0x494800 +#define mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_DATA_0 0x495000 +#define mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0 0x495800 +#define mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_STATUS_0 0x496000 +#define mmSYNC_MNGR_E_S_SYNC_MNGR_OBJS_SOB_OBJ_0 0x4B2000 +#define mmSYNC_MNGR_E_S_SYNC_MNGR_OBJS_MON_STATUS_0 0x4B6000 +#define mmSYNC_MNGR_W_N_SYNC_MNGR_OBJS_SOB_OBJ_0 0x4D2000 +#define mmSYNC_MNGR_W_N_SYNC_MNGR_OBJS_MON_STATUS_0 0x4D6000 +#define mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0 0x4F2000 +#define mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_1 0x4F2004 +#define mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_2047 0x4F3FFC +#define mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 0x4F4000 +#define mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_STATUS_0 0x4F6000 +#define mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_STATUS_511 0x4F67FC + +#define mmSIF_RTR_0_LBW_RANGE_PROT_HIT_AW 0x300400 +#define mmSIF_RTR_1_LBW_RANGE_PROT_HIT_AW 0x310400 +#define mmSIF_RTR_2_LBW_RANGE_PROT_HIT_AW 0x320400 +#define mmSIF_RTR_3_LBW_RANGE_PROT_HIT_AW 0x330400 +#define mmSIF_RTR_4_LBW_RANGE_PROT_HIT_AW 0x340400 +#define mmSIF_RTR_5_LBW_RANGE_PROT_HIT_AW 0x350400 +#define mmSIF_RTR_6_LBW_RANGE_PROT_HIT_AW 0x360400 +#define mmSIF_RTR_7_LBW_RANGE_PROT_HIT_AW 0x370400 + +#define mmSIF_RTR_0_LBW_RANGE_PROT_HIT_AR 0x300490 +#define mmSIF_RTR_1_LBW_RANGE_PROT_HIT_AR 0x310490 +#define mmSIF_RTR_2_LBW_RANGE_PROT_HIT_AR 0x320490 +#define mmSIF_RTR_3_LBW_RANGE_PROT_HIT_AR 0x330490 +#define mmSIF_RTR_4_LBW_RANGE_PROT_HIT_AR 0x340490 +#define mmSIF_RTR_5_LBW_RANGE_PROT_HIT_AR 0x350490 +#define mmSIF_RTR_6_LBW_RANGE_PROT_HIT_AR 0x360490 +#define mmSIF_RTR_7_LBW_RANGE_PROT_HIT_AR 0x370490 + +#define mmSIF_RTR_0_LBW_RANGE_PROT_MIN_AW_0 0x300410 +#define mmSIF_RTR_1_LBW_RANGE_PROT_MIN_AW_0 0x310410 +#define mmSIF_RTR_2_LBW_RANGE_PROT_MIN_AW_0 0x320410 +#define mmSIF_RTR_3_LBW_RANGE_PROT_MIN_AW_0 0x330410 +#define mmSIF_RTR_4_LBW_RANGE_PROT_MIN_AW_0 0x340410 +#define mmSIF_RTR_5_LBW_RANGE_PROT_MIN_AW_0 0x350410 +#define mmSIF_RTR_6_LBW_RANGE_PROT_MIN_AW_0 0x360410 +#define mmSIF_RTR_7_LBW_RANGE_PROT_MIN_AW_0 0x370410 + +#define mmSIF_RTR_0_LBW_RANGE_PROT_MAX_AW_0 0x300450 +#define mmSIF_RTR_1_LBW_RANGE_PROT_MAX_AW_0 0x310450 +#define mmSIF_RTR_2_LBW_RANGE_PROT_MAX_AW_0 0x320450 +#define mmSIF_RTR_3_LBW_RANGE_PROT_MAX_AW_0 0x330450 +#define mmSIF_RTR_4_LBW_RANGE_PROT_MAX_AW_0 0x340450 +#define mmSIF_RTR_5_LBW_RANGE_PROT_MAX_AW_0 0x350450 +#define mmSIF_RTR_6_LBW_RANGE_PROT_MAX_AW_0 0x360450 +#define mmSIF_RTR_7_LBW_RANGE_PROT_MAX_AW_0 0x370450 + +#define mmSIF_RTR_0_LBW_RANGE_PROT_MIN_AR_0 0x3004A0 +#define mmSIF_RTR_1_LBW_RANGE_PROT_MIN_AR_0 0x3104A0 +#define mmSIF_RTR_2_LBW_RANGE_PROT_MIN_AR_0 0x3204A0 +#define mmSIF_RTR_3_LBW_RANGE_PROT_MIN_AR_0 0x3304A0 +#define mmSIF_RTR_4_LBW_RANGE_PROT_MIN_AR_0 0x3404A0 +#define mmSIF_RTR_5_LBW_RANGE_PROT_MIN_AR_0 0x3504A0 +#define mmSIF_RTR_6_LBW_RANGE_PROT_MIN_AR_0 0x3604A0 +#define mmSIF_RTR_7_LBW_RANGE_PROT_MIN_AR_0 0x3704A0 + +#define mmSIF_RTR_0_LBW_RANGE_PROT_MAX_AR_0 0x3004E0 +#define mmSIF_RTR_1_LBW_RANGE_PROT_MAX_AR_0 0x3104E0 +#define mmSIF_RTR_2_LBW_RANGE_PROT_MAX_AR_0 0x3204E0 +#define mmSIF_RTR_3_LBW_RANGE_PROT_MAX_AR_0 0x3304E0 +#define mmSIF_RTR_4_LBW_RANGE_PROT_MAX_AR_0 0x3404E0 +#define mmSIF_RTR_5_LBW_RANGE_PROT_MAX_AR_0 0x3504E0 +#define mmSIF_RTR_6_LBW_RANGE_PROT_MAX_AR_0 0x3604E0 +#define mmSIF_RTR_7_LBW_RANGE_PROT_MAX_AR_0 0x3704E0 + +#define mmNIF_RTR_0_LBW_RANGE_PROT_HIT_AW 0x380400 +#define mmNIF_RTR_1_LBW_RANGE_PROT_HIT_AW 0x390400 +#define mmNIF_RTR_2_LBW_RANGE_PROT_HIT_AW 0x3A0400 +#define mmNIF_RTR_3_LBW_RANGE_PROT_HIT_AW 0x3B0400 +#define mmNIF_RTR_4_LBW_RANGE_PROT_HIT_AW 0x3C0400 +#define mmNIF_RTR_5_LBW_RANGE_PROT_HIT_AW 0x3D0400 +#define mmNIF_RTR_6_LBW_RANGE_PROT_HIT_AW 0x3E0400 +#define mmNIF_RTR_7_LBW_RANGE_PROT_HIT_AW 0x3F0400 + +#define mmNIF_RTR_0_LBW_RANGE_PROT_HIT_AR 0x380490 +#define mmNIF_RTR_1_LBW_RANGE_PROT_HIT_AR 0x390490 +#define mmNIF_RTR_2_LBW_RANGE_PROT_HIT_AR 0x3A0490 +#define mmNIF_RTR_3_LBW_RANGE_PROT_HIT_AR 0x3B0490 +#define mmNIF_RTR_4_LBW_RANGE_PROT_HIT_AR 0x3C0490 +#define mmNIF_RTR_5_LBW_RANGE_PROT_HIT_AR 0x3D0490 +#define mmNIF_RTR_6_LBW_RANGE_PROT_HIT_AR 0x3E0490 +#define mmNIF_RTR_7_LBW_RANGE_PROT_HIT_AR 0x3F0490 + +#define mmNIF_RTR_0_LBW_RANGE_PROT_MIN_AW_0 0x380410 +#define mmNIF_RTR_1_LBW_RANGE_PROT_MIN_AW_0 0x390410 +#define mmNIF_RTR_2_LBW_RANGE_PROT_MIN_AW_0 0x3A0410 +#define mmNIF_RTR_3_LBW_RANGE_PROT_MIN_AW_0 0x3B0410 +#define mmNIF_RTR_4_LBW_RANGE_PROT_MIN_AW_0 0x3C0410 +#define mmNIF_RTR_5_LBW_RANGE_PROT_MIN_AW_0 0x3D0410 +#define mmNIF_RTR_6_LBW_RANGE_PROT_MIN_AW_0 0x3E0410 +#define mmNIF_RTR_7_LBW_RANGE_PROT_MIN_AW_0 0x3F0410 + +#define mmNIF_RTR_0_LBW_RANGE_PROT_MAX_AW_0 0x380450 +#define mmNIF_RTR_1_LBW_RANGE_PROT_MAX_AW_0 0x390450 +#define mmNIF_RTR_2_LBW_RANGE_PROT_MAX_AW_0 0x3A0450 +#define mmNIF_RTR_3_LBW_RANGE_PROT_MAX_AW_0 0x3B0450 +#define mmNIF_RTR_4_LBW_RANGE_PROT_MAX_AW_0 0x3C0450 +#define mmNIF_RTR_5_LBW_RANGE_PROT_MAX_AW_0 0x3D0450 +#define mmNIF_RTR_6_LBW_RANGE_PROT_MAX_AW_0 0x3E0450 +#define mmNIF_RTR_7_LBW_RANGE_PROT_MAX_AW_0 0x3F0450 + +#define mmNIF_RTR_0_LBW_RANGE_PROT_MIN_AR_0 0x3804A0 +#define mmNIF_RTR_1_LBW_RANGE_PROT_MIN_AR_0 0x3904A0 +#define mmNIF_RTR_2_LBW_RANGE_PROT_MIN_AR_0 0x3A04A0 +#define mmNIF_RTR_3_LBW_RANGE_PROT_MIN_AR_0 0x3B04A0 +#define mmNIF_RTR_4_LBW_RANGE_PROT_MIN_AR_0 0x3C04A0 +#define mmNIF_RTR_5_LBW_RANGE_PROT_MIN_AR_0 0x3D04A0 +#define mmNIF_RTR_6_LBW_RANGE_PROT_MIN_AR_0 0x3E04A0 +#define mmNIF_RTR_7_LBW_RANGE_PROT_MIN_AR_0 0x3F04A0 + +#define mmNIF_RTR_0_LBW_RANGE_PROT_MAX_AR_0 0x3804E0 +#define mmNIF_RTR_1_LBW_RANGE_PROT_MAX_AR_0 0x3904E0 +#define mmNIF_RTR_2_LBW_RANGE_PROT_MAX_AR_0 0x3A04E0 +#define mmNIF_RTR_3_LBW_RANGE_PROT_MAX_AR_0 0x3B04E0 +#define mmNIF_RTR_4_LBW_RANGE_PROT_MAX_AR_0 0x3C04E0 +#define mmNIF_RTR_5_LBW_RANGE_PROT_MAX_AR_0 0x3D04E0 +#define mmNIF_RTR_6_LBW_RANGE_PROT_MAX_AR_0 0x3E04E0 +#define mmNIF_RTR_7_LBW_RANGE_PROT_MAX_AR_0 0x3F04E0 + +#define mmDMA_IF_W_S_DOWN_RSP_MID_WGHT_0 0x489030 +#define mmDMA_IF_W_S_DOWN_RSP_MID_WGHT_1 0x489034 + +#define mmDMA_IF_E_S_DOWN_RSP_MID_WGHT_0 0x4A9030 +#define mmDMA_IF_E_S_DOWN_RSP_MID_WGHT_1 0x4A9034 + +#define mmDMA_IF_W_N_DOWN_RSP_MID_WGHT_0 0x4C9030 +#define mmDMA_IF_W_N_DOWN_RSP_MID_WGHT_1 0x4C9034 + +#define mmDMA_IF_E_N_DOWN_RSP_MID_WGHT_0 0x4E9030 +#define mmDMA_IF_E_N_DOWN_RSP_MID_WGHT_1 0x4E9034 + +#define mmMME1_QM_GLBL_CFG0 0xE8000 +#define mmMME1_QM_GLBL_STS0 0xE8038 + +#define mmMME0_SBAB_SB_STALL 0x4002C +#define mmMME0_SBAB_ARUSER0 0x40034 +#define mmMME0_SBAB_ARUSER1 0x40038 +#define mmMME0_SBAB_PROT 0x40050 + +#define mmMME1_SBAB_SB_STALL 0xC002C +#define mmMME1_SBAB_ARUSER0 0xC0034 +#define mmMME1_SBAB_ARUSER1 0xC0038 +#define mmMME1_SBAB_PROT 0xC0050 + +#define mmMME2_SBAB_SB_STALL 0x14002C +#define mmMME2_SBAB_ARUSER0 0x140034 +#define mmMME2_SBAB_ARUSER1 0x140038 +#define mmMME2_SBAB_PROT 0x140050 + +#define mmMME3_SBAB_SB_STALL 0x1C002C +#define mmMME3_SBAB_ARUSER0 0x1C0034 +#define mmMME3_SBAB_ARUSER1 0x1C0038 +#define mmMME3_SBAB_PROT 0x1C0050 + +#define mmMME0_ACC_ACC_STALL 0x20028 +#define mmMME0_ACC_WBC 0x20038 +#define mmMME0_ACC_PROT 0x20050 + +#define mmMME1_ACC_ACC_STALL 0xA0028 +#define mmMME1_ACC_WBC 0xA0038 +#define mmMME1_ACC_PROT 0xA0050 + +#define mmMME2_ACC_ACC_STALL 0x120028 +#define mmMME2_ACC_WBC 0x120038 +#define mmMME2_ACC_PROT 0x120050 + +#define mmMME3_ACC_ACC_STALL 0x1A0028 +#define mmMME3_ACC_WBC 0x1A0038 +#define mmMME3_ACC_PROT 0x1A0050 + +#define mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR 0x800040 + +#define mmPSOC_EFUSE_READ 0xC4A000 +#define mmPSOC_EFUSE_DATA_0 0xC4A080 + +#define mmPCIE_WRAP_MAX_OUTSTAND 0xC01B20 +#define mmPCIE_WRAP_LBW_PROT_OVR 0xC01B48 +#define mmPCIE_WRAP_HBW_DRAIN_CFG 0xC01D54 +#define mmPCIE_WRAP_LBW_DRAIN_CFG 0xC01D5C + +#define mmPCIE_MSI_INTR_0 0xC13000 + +#define mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG 0xC02000 + +#define mmPCIE_AUX_FLR_CTRL 0xC07394 +#define mmPCIE_AUX_DBI 0xC07490 + +#endif /* ASIC_REG_GAUDI_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_ctrl_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_ctrl_regs.h new file mode 100644 index 000000000..083d073a0 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_ctrl_regs.h @@ -0,0 +1,1456 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_MME0_CTRL_REGS_H_ +#define ASIC_REG_MME0_CTRL_REGS_H_ + +/* + ***************************************** + * MME0_CTRL (Prototype: MME) + ***************************************** + */ + +#define mmMME0_CTRL_ARCH_STATUS 0x60000 + +#define mmMME0_CTRL_ARCH_BASE_ADDR_HIGH_S 0x60008 + +#define mmMME0_CTRL_ARCH_BASE_ADDR_HIGH_L 0x6000C + +#define mmMME0_CTRL_ARCH_BASE_ADDR_HIGH_O 0x60010 + +#define mmMME0_CTRL_ARCH_BASE_ADDR_LOW_S 0x60014 + +#define mmMME0_CTRL_ARCH_BASE_ADDR_LOW_L 0x60018 + +#define mmMME0_CTRL_ARCH_BASE_ADDR_LOW_O 0x6001C + +#define mmMME0_CTRL_ARCH_HEADER_LOW 0x60020 + +#define mmMME0_CTRL_ARCH_HEADER_HIGH 0x60024 + +#define mmMME0_CTRL_ARCH_CONV_KERNEL_SIZE_MINUS_1 0x60028 + +#define mmMME0_CTRL_ARCH_CONV_ASSOCIATED_DIMS_LOW 0x6002C + +#define mmMME0_CTRL_ARCH_CONV_ASSOCIATED_DIMS_HIGH 0x60030 + +#define mmMME0_CTRL_ARCH_NUM_ITERATIONS_MINUS_1 0x60034 + +#define mmMME0_CTRL_ARCH_OUTER_LOOP 0x60038 + +#define mmMME0_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_0 0x6003C + +#define mmMME0_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_1 0x60040 + +#define mmMME0_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_2 0x60044 + +#define mmMME0_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_3 0x60048 + +#define mmMME0_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_4 0x6004C + +#define mmMME0_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_0 0x60050 + +#define mmMME0_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_1 0x60054 + +#define mmMME0_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_2 0x60058 + +#define mmMME0_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_3 0x6005C + +#define mmMME0_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_4 0x60060 + +#define mmMME0_CTRL_ARCH_TENSOR_S_ROI_SIZE_0 0x60064 + +#define mmMME0_CTRL_ARCH_TENSOR_S_ROI_SIZE_1 0x60068 + +#define mmMME0_CTRL_ARCH_TENSOR_S_ROI_SIZE_2 0x6006C + +#define mmMME0_CTRL_ARCH_TENSOR_S_ROI_SIZE_3 0x60070 + +#define mmMME0_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_0 0x60074 + +#define mmMME0_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_1 0x60078 + +#define mmMME0_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_2 0x6007C + +#define mmMME0_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_3 0x60080 + +#define mmMME0_CTRL_ARCH_TENSOR_S_SPATIAL_SIZE_MINUS_1 0x60084 + +#define mmMME0_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_0 0x60088 + +#define mmMME0_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_1 0x6008C + +#define mmMME0_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_2 0x60090 + +#define mmMME0_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_3 0x60094 + +#define mmMME0_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_4 0x60098 + +#define mmMME0_CTRL_ARCH_AGU_S_START_OFFSET_0 0x6009C + +#define mmMME0_CTRL_ARCH_AGU_S_START_OFFSET_1 0x600A0 + +#define mmMME0_CTRL_ARCH_AGU_S_START_OFFSET_2 0x600A4 + +#define mmMME0_CTRL_ARCH_AGU_S_START_OFFSET_3 0x600A8 + +#define mmMME0_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_0 0x600AC + +#define mmMME0_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_1 0x600B0 + +#define mmMME0_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_2 0x600B4 + +#define mmMME0_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_3 0x600B8 + +#define mmMME0_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_4 0x600BC + +#define mmMME0_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_0 0x600C0 + +#define mmMME0_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_1 0x600C4 + +#define mmMME0_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_2 0x600C8 + +#define mmMME0_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_3 0x600CC + +#define mmMME0_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_4 0x600D0 + +#define mmMME0_CTRL_ARCH_TENSOR_L_ROI_SIZE_0 0x600D4 + +#define mmMME0_CTRL_ARCH_TENSOR_L_ROI_SIZE_1 0x600D8 + +#define mmMME0_CTRL_ARCH_TENSOR_L_ROI_SIZE_2 0x600DC + +#define mmMME0_CTRL_ARCH_TENSOR_L_ROI_SIZE_3 0x600E0 + +#define mmMME0_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_0 0x600E4 + +#define mmMME0_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_1 0x600E8 + +#define mmMME0_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_2 0x600EC + +#define mmMME0_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_3 0x600F0 + +#define mmMME0_CTRL_ARCH_TENSOR_L_SPATIAL_SIZE_MINUS_1 0x600F4 + +#define mmMME0_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0x600F8 + +#define mmMME0_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0x600FC + +#define mmMME0_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0x60100 + +#define mmMME0_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0x60104 + +#define mmMME0_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0x60108 + +#define mmMME0_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_0 0x6010C + +#define mmMME0_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_1 0x60110 + +#define mmMME0_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_2 0x60114 + +#define mmMME0_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_3 0x60118 + +#define mmMME0_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0x6011C + +#define mmMME0_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0x60120 + +#define mmMME0_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0x60124 + +#define mmMME0_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0x60128 + +#define mmMME0_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0x6012C + +#define mmMME0_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_0 0x60130 + +#define mmMME0_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_1 0x60134 + +#define mmMME0_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_2 0x60138 + +#define mmMME0_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_3 0x6013C + +#define mmMME0_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_0 0x60140 + +#define mmMME0_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_1 0x60144 + +#define mmMME0_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_2 0x60148 + +#define mmMME0_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_3 0x6014C + +#define mmMME0_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_4 0x60150 + +#define mmMME0_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_0 0x60154 + +#define mmMME0_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_1 0x60158 + +#define mmMME0_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_2 0x6015C + +#define mmMME0_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_3 0x60160 + +#define mmMME0_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_4 0x60164 + +#define mmMME0_CTRL_ARCH_TENSOR_O_ROI_SIZE_0 0x60168 + +#define mmMME0_CTRL_ARCH_TENSOR_O_ROI_SIZE_1 0x6016C + +#define mmMME0_CTRL_ARCH_TENSOR_O_ROI_SIZE_2 0x60170 + +#define mmMME0_CTRL_ARCH_TENSOR_O_ROI_SIZE_3 0x60174 + +#define mmMME0_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_0 0x60178 + +#define mmMME0_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_1 0x6017C + +#define mmMME0_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_2 0x60180 + +#define mmMME0_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_3 0x60184 + +#define mmMME0_CTRL_ARCH_TENSOR_O_SPATIAL_SIZE_MINUS_1 0x60188 + +#define mmMME0_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0x6018C + +#define mmMME0_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0x60190 + +#define mmMME0_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0x60194 + +#define mmMME0_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0x60198 + +#define mmMME0_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0x6019C + +#define mmMME0_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_0 0x601A0 + +#define mmMME0_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_1 0x601A4 + +#define mmMME0_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_2 0x601A8 + +#define mmMME0_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_3 0x601AC + +#define mmMME0_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0x601B0 + +#define mmMME0_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0x601B4 + +#define mmMME0_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0x601B8 + +#define mmMME0_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0x601BC + +#define mmMME0_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0x601C0 + +#define mmMME0_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_0 0x601C4 + +#define mmMME0_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_1 0x601C8 + +#define mmMME0_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_2 0x601CC + +#define mmMME0_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_3 0x601D0 + +#define mmMME0_CTRL_ARCH_DESC_SB_REPEAT 0x601D4 + +#define mmMME0_CTRL_ARCH_DESC_RATE_LIMITER 0x601D8 + +#define mmMME0_CTRL_ARCH_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0x601DC + +#define mmMME0_CTRL_ARCH_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0x601E0 + +#define mmMME0_CTRL_ARCH_DESC_SYNC_OBJECT_ADDR_HIGH 0x601E4 + +#define mmMME0_CTRL_ARCH_DESC_SYNC_OBJECT_DATA 0x601E8 + +#define mmMME0_CTRL_ARCH_DESC_AXI_USER_DATA 0x601EC + +#define mmMME0_CTRL_ARCH_DESC_PERF_EVT_S 0x601F0 + +#define mmMME0_CTRL_ARCH_DESC_PERF_EVT_L_LOCAL 0x601F4 + +#define mmMME0_CTRL_ARCH_DESC_PERF_EVT_L_REMOTE 0x601F8 + +#define mmMME0_CTRL_ARCH_DESC_PERF_EVT_O_LOCAL 0x601FC + +#define mmMME0_CTRL_ARCH_DESC_PERF_EVT_O_REMOTE 0x60200 + +#define mmMME0_CTRL_ARCH_DESC_PADDING_VALUE_S 0x60204 + +#define mmMME0_CTRL_ARCH_DESC_PADDING_VALUE_L 0x60208 + +#define mmMME0_CTRL_ARCH_DESC_META_DATA_AGU_S 0x6020C + +#define mmMME0_CTRL_ARCH_DESC_META_DATA_AGU_L_LOCAL 0x60210 + +#define mmMME0_CTRL_ARCH_DESC_META_DATA_AGU_L_REMOTE 0x60214 + +#define mmMME0_CTRL_ARCH_DESC_META_DATA_AGU_O_LOCAL 0x60218 + +#define mmMME0_CTRL_ARCH_DESC_META_DATA_AGU_O_REMOTE 0x6021C + +#define mmMME0_CTRL_ARCH_DESC_PCU_RL_SATURATION 0x60220 + +#define mmMME0_CTRL_ARCH_DESC_DUMMY 0x60224 + +#define mmMME0_CTRL_CMD 0x60280 + +#define mmMME0_CTRL_STATUS1 0x60284 + +#define mmMME0_CTRL_RESET 0x60288 + +#define mmMME0_CTRL_QM_STALL 0x6028C + +#define mmMME0_CTRL_SYNC_OBJECT_FIFO_TH 0x60290 + +#define mmMME0_CTRL_EUS_ROLLUP_CNT_ADD 0x60294 + +#define mmMME0_CTRL_INTR_CAUSE 0x60298 + +#define mmMME0_CTRL_INTR_MASK 0x6029C + +#define mmMME0_CTRL_LOG_SHADOW 0x602A0 + +#define mmMME0_CTRL_PCU_RL_DESC0 0x602A4 + +#define mmMME0_CTRL_PCU_RL_TOKEN_UPDATE 0x602A8 + +#define mmMME0_CTRL_PCU_RL_TH 0x602AC + +#define mmMME0_CTRL_PCU_RL_MIN 0x602B0 + +#define mmMME0_CTRL_PCU_RL_CTRL_EN 0x602B4 + +#define mmMME0_CTRL_PCU_RL_HISTORY_LOG_SIZE 0x602B8 + +#define mmMME0_CTRL_PCU_DUMMY_A_BF16 0x602BC + +#define mmMME0_CTRL_PCU_DUMMY_B_BF16 0x602C0 + +#define mmMME0_CTRL_PCU_DUMMY_A_FP32_ODD 0x602C4 + +#define mmMME0_CTRL_PCU_DUMMY_A_FP32_EVEN 0x602C8 + +#define mmMME0_CTRL_PCU_DUMMY_B_FP32_ODD 0x602CC + +#define mmMME0_CTRL_PCU_DUMMY_B_FP32_EVEN 0x602D0 + +#define mmMME0_CTRL_PROT 0x602D4 + +#define mmMME0_CTRL_EU_POWER_SAVE_DISABLE 0x602D8 + +#define mmMME0_CTRL_CS_DBG_BLOCK_ID 0x602DC + +#define mmMME0_CTRL_CS_DBG_STATUS_DROP_CNT 0x602E0 + +#define mmMME0_CTRL_TE_CLOSE_CGATE 0x602E4 + +#define mmMME0_CTRL_AGU_SM_INFLIGHT_CNTR 0x602E8 + +#define mmMME0_CTRL_AGU_SM_TOTAL_CNTR 0x602EC + +#define mmMME0_CTRL_EZSYNC_OUT_CREDIT 0x602F0 + +#define mmMME0_CTRL_PCU_RL_SAT_SEC 0x602F4 + +#define mmMME0_CTRL_AGU_SYNC_MSG_AXI_USER 0x602F8 + +#define mmMME0_CTRL_QM_SLV_LBW_CLK_EN 0x602FC + +#define mmMME0_CTRL_SHADOW_0_STATUS 0x60400 + +#define mmMME0_CTRL_SHADOW_0_BASE_ADDR_HIGH_S 0x60408 + +#define mmMME0_CTRL_SHADOW_0_BASE_ADDR_HIGH_L 0x6040C + +#define mmMME0_CTRL_SHADOW_0_BASE_ADDR_HIGH_O 0x60410 + +#define mmMME0_CTRL_SHADOW_0_BASE_ADDR_LOW_S 0x60414 + +#define mmMME0_CTRL_SHADOW_0_BASE_ADDR_LOW_L 0x60418 + +#define mmMME0_CTRL_SHADOW_0_BASE_ADDR_LOW_O 0x6041C + +#define mmMME0_CTRL_SHADOW_0_HEADER_LOW 0x60420 + +#define mmMME0_CTRL_SHADOW_0_HEADER_HIGH 0x60424 + +#define mmMME0_CTRL_SHADOW_0_CONV_KERNEL_SIZE_MINUS_1 0x60428 + +#define mmMME0_CTRL_SHADOW_0_CONV_ASSOCIATED_DIMS_LOW 0x6042C + +#define mmMME0_CTRL_SHADOW_0_CONV_ASSOCIATED_DIMS_HIGH 0x60430 + +#define mmMME0_CTRL_SHADOW_0_NUM_ITERATIONS_MINUS_1 0x60434 + +#define mmMME0_CTRL_SHADOW_0_OUTER_LOOP 0x60438 + +#define mmMME0_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_0 0x6043C + +#define mmMME0_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_1 0x60440 + +#define mmMME0_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_2 0x60444 + +#define mmMME0_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_3 0x60448 + +#define mmMME0_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_4 0x6044C + +#define mmMME0_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_0 0x60450 + +#define mmMME0_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_1 0x60454 + +#define mmMME0_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_2 0x60458 + +#define mmMME0_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_3 0x6045C + +#define mmMME0_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_4 0x60460 + +#define mmMME0_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_0 0x60464 + +#define mmMME0_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_1 0x60468 + +#define mmMME0_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_2 0x6046C + +#define mmMME0_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_3 0x60470 + +#define mmMME0_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_0 0x60474 + +#define mmMME0_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_1 0x60478 + +#define mmMME0_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_2 0x6047C + +#define mmMME0_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_3 0x60480 + +#define mmMME0_CTRL_SHADOW_0_TENSOR_S_SPATIAL_SIZE_MINUS_1 0x60484 + +#define mmMME0_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_0 0x60488 + +#define mmMME0_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_1 0x6048C + +#define mmMME0_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_2 0x60490 + +#define mmMME0_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_3 0x60494 + +#define mmMME0_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_4 0x60498 + +#define mmMME0_CTRL_SHADOW_0_AGU_S_START_OFFSET_0 0x6049C + +#define mmMME0_CTRL_SHADOW_0_AGU_S_START_OFFSET_1 0x604A0 + +#define mmMME0_CTRL_SHADOW_0_AGU_S_START_OFFSET_2 0x604A4 + +#define mmMME0_CTRL_SHADOW_0_AGU_S_START_OFFSET_3 0x604A8 + +#define mmMME0_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_0 0x604AC + +#define mmMME0_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_1 0x604B0 + +#define mmMME0_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_2 0x604B4 + +#define mmMME0_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_3 0x604B8 + +#define mmMME0_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_4 0x604BC + +#define mmMME0_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_0 0x604C0 + +#define mmMME0_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_1 0x604C4 + +#define mmMME0_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_2 0x604C8 + +#define mmMME0_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_3 0x604CC + +#define mmMME0_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_4 0x604D0 + +#define mmMME0_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_0 0x604D4 + +#define mmMME0_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_1 0x604D8 + +#define mmMME0_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_2 0x604DC + +#define mmMME0_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_3 0x604E0 + +#define mmMME0_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_0 0x604E4 + +#define mmMME0_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_1 0x604E8 + +#define mmMME0_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_2 0x604EC + +#define mmMME0_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_3 0x604F0 + +#define mmMME0_CTRL_SHADOW_0_TENSOR_L_SPATIAL_SIZE_MINUS_1 0x604F4 + +#define mmMME0_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0x604F8 + +#define mmMME0_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0x604FC + +#define mmMME0_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0x60500 + +#define mmMME0_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0x60504 + +#define mmMME0_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0x60508 + +#define mmMME0_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_0 0x6050C + +#define mmMME0_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_1 0x60510 + +#define mmMME0_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_2 0x60514 + +#define mmMME0_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_3 0x60518 + +#define mmMME0_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0x6051C + +#define mmMME0_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0x60520 + +#define mmMME0_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0x60524 + +#define mmMME0_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0x60528 + +#define mmMME0_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0x6052C + +#define mmMME0_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_0 0x60530 + +#define mmMME0_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_1 0x60534 + +#define mmMME0_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_2 0x60538 + +#define mmMME0_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_3 0x6053C + +#define mmMME0_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_0 0x60540 + +#define mmMME0_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_1 0x60544 + +#define mmMME0_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_2 0x60548 + +#define mmMME0_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_3 0x6054C + +#define mmMME0_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_4 0x60550 + +#define mmMME0_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_0 0x60554 + +#define mmMME0_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_1 0x60558 + +#define mmMME0_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_2 0x6055C + +#define mmMME0_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_3 0x60560 + +#define mmMME0_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_4 0x60564 + +#define mmMME0_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_0 0x60568 + +#define mmMME0_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_1 0x6056C + +#define mmMME0_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_2 0x60570 + +#define mmMME0_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_3 0x60574 + +#define mmMME0_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_0 0x60578 + +#define mmMME0_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_1 0x6057C + +#define mmMME0_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_2 0x60580 + +#define mmMME0_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_3 0x60584 + +#define mmMME0_CTRL_SHADOW_0_TENSOR_O_SPATIAL_SIZE_MINUS_1 0x60588 + +#define mmMME0_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0x6058C + +#define mmMME0_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0x60590 + +#define mmMME0_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0x60594 + +#define mmMME0_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0x60598 + +#define mmMME0_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0x6059C + +#define mmMME0_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_0 0x605A0 + +#define mmMME0_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_1 0x605A4 + +#define mmMME0_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_2 0x605A8 + +#define mmMME0_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_3 0x605AC + +#define mmMME0_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0x605B0 + +#define mmMME0_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0x605B4 + +#define mmMME0_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0x605B8 + +#define mmMME0_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0x605BC + +#define mmMME0_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0x605C0 + +#define mmMME0_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_0 0x605C4 + +#define mmMME0_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_1 0x605C8 + +#define mmMME0_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_2 0x605CC + +#define mmMME0_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_3 0x605D0 + +#define mmMME0_CTRL_SHADOW_0_DESC_SB_REPEAT 0x605D4 + +#define mmMME0_CTRL_SHADOW_0_DESC_RATE_LIMITER 0x605D8 + +#define mmMME0_CTRL_SHADOW_0_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0x605DC + +#define mmMME0_CTRL_SHADOW_0_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0x605E0 + +#define mmMME0_CTRL_SHADOW_0_DESC_SYNC_OBJECT_ADDR_HIGH 0x605E4 + +#define mmMME0_CTRL_SHADOW_0_DESC_SYNC_OBJECT_DATA 0x605E8 + +#define mmMME0_CTRL_SHADOW_0_DESC_AXI_USER_DATA 0x605EC + +#define mmMME0_CTRL_SHADOW_0_DESC_PERF_EVT_S 0x605F0 + +#define mmMME0_CTRL_SHADOW_0_DESC_PERF_EVT_L_LOCAL 0x605F4 + +#define mmMME0_CTRL_SHADOW_0_DESC_PERF_EVT_L_REMOTE 0x605F8 + +#define mmMME0_CTRL_SHADOW_0_DESC_PERF_EVT_O_LOCAL 0x605FC + +#define mmMME0_CTRL_SHADOW_0_DESC_PERF_EVT_O_REMOTE 0x60600 + +#define mmMME0_CTRL_SHADOW_0_DESC_PADDING_VALUE_S 0x60604 + +#define mmMME0_CTRL_SHADOW_0_DESC_PADDING_VALUE_L 0x60608 + +#define mmMME0_CTRL_SHADOW_0_DESC_META_DATA_AGU_S 0x6060C + +#define mmMME0_CTRL_SHADOW_0_DESC_META_DATA_AGU_L_LOCAL 0x60610 + +#define mmMME0_CTRL_SHADOW_0_DESC_META_DATA_AGU_L_REMOTE 0x60614 + +#define mmMME0_CTRL_SHADOW_0_DESC_META_DATA_AGU_O_LOCAL 0x60618 + +#define mmMME0_CTRL_SHADOW_0_DESC_META_DATA_AGU_O_REMOTE 0x6061C + +#define mmMME0_CTRL_SHADOW_0_DESC_PCU_RL_SATURATION 0x60620 + +#define mmMME0_CTRL_SHADOW_0_DESC_DUMMY 0x60624 + +#define mmMME0_CTRL_SHADOW_1_STATUS 0x60680 + +#define mmMME0_CTRL_SHADOW_1_BASE_ADDR_HIGH_S 0x60688 + +#define mmMME0_CTRL_SHADOW_1_BASE_ADDR_HIGH_L 0x6068C + +#define mmMME0_CTRL_SHADOW_1_BASE_ADDR_HIGH_O 0x60690 + +#define mmMME0_CTRL_SHADOW_1_BASE_ADDR_LOW_S 0x60694 + +#define mmMME0_CTRL_SHADOW_1_BASE_ADDR_LOW_L 0x60698 + +#define mmMME0_CTRL_SHADOW_1_BASE_ADDR_LOW_O 0x6069C + +#define mmMME0_CTRL_SHADOW_1_HEADER_LOW 0x606A0 + +#define mmMME0_CTRL_SHADOW_1_HEADER_HIGH 0x606A4 + +#define mmMME0_CTRL_SHADOW_1_CONV_KERNEL_SIZE_MINUS_1 0x606A8 + +#define mmMME0_CTRL_SHADOW_1_CONV_ASSOCIATED_DIMS_LOW 0x606AC + +#define mmMME0_CTRL_SHADOW_1_CONV_ASSOCIATED_DIMS_HIGH 0x606B0 + +#define mmMME0_CTRL_SHADOW_1_NUM_ITERATIONS_MINUS_1 0x606B4 + +#define mmMME0_CTRL_SHADOW_1_OUTER_LOOP 0x606B8 + +#define mmMME0_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_0 0x606BC + +#define mmMME0_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_1 0x606C0 + +#define mmMME0_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_2 0x606C4 + +#define mmMME0_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_3 0x606C8 + +#define mmMME0_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_4 0x606CC + +#define mmMME0_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_0 0x606D0 + +#define mmMME0_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_1 0x606D4 + +#define mmMME0_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_2 0x606D8 + +#define mmMME0_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_3 0x606DC + +#define mmMME0_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_4 0x606E0 + +#define mmMME0_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_0 0x606E4 + +#define mmMME0_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_1 0x606E8 + +#define mmMME0_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_2 0x606EC + +#define mmMME0_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_3 0x606F0 + +#define mmMME0_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_0 0x606F4 + +#define mmMME0_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_1 0x606F8 + +#define mmMME0_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_2 0x606FC + +#define mmMME0_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_3 0x60700 + +#define mmMME0_CTRL_SHADOW_1_TENSOR_S_SPATIAL_SIZE_MINUS_1 0x60704 + +#define mmMME0_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_0 0x60708 + +#define mmMME0_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_1 0x6070C + +#define mmMME0_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_2 0x60710 + +#define mmMME0_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_3 0x60714 + +#define mmMME0_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_4 0x60718 + +#define mmMME0_CTRL_SHADOW_1_AGU_S_START_OFFSET_0 0x6071C + +#define mmMME0_CTRL_SHADOW_1_AGU_S_START_OFFSET_1 0x60720 + +#define mmMME0_CTRL_SHADOW_1_AGU_S_START_OFFSET_2 0x60724 + +#define mmMME0_CTRL_SHADOW_1_AGU_S_START_OFFSET_3 0x60728 + +#define mmMME0_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_0 0x6072C + +#define mmMME0_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_1 0x60730 + +#define mmMME0_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_2 0x60734 + +#define mmMME0_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_3 0x60738 + +#define mmMME0_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_4 0x6073C + +#define mmMME0_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_0 0x60740 + +#define mmMME0_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_1 0x60744 + +#define mmMME0_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_2 0x60748 + +#define mmMME0_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_3 0x6074C + +#define mmMME0_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_4 0x60750 + +#define mmMME0_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_0 0x60754 + +#define mmMME0_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_1 0x60758 + +#define mmMME0_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_2 0x6075C + +#define mmMME0_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_3 0x60760 + +#define mmMME0_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_0 0x60764 + +#define mmMME0_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_1 0x60768 + +#define mmMME0_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_2 0x6076C + +#define mmMME0_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_3 0x60770 + +#define mmMME0_CTRL_SHADOW_1_TENSOR_L_SPATIAL_SIZE_MINUS_1 0x60774 + +#define mmMME0_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0x60778 + +#define mmMME0_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0x6077C + +#define mmMME0_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0x60780 + +#define mmMME0_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0x60784 + +#define mmMME0_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0x60788 + +#define mmMME0_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_0 0x6078C + +#define mmMME0_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_1 0x60790 + +#define mmMME0_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_2 0x60794 + +#define mmMME0_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_3 0x60798 + +#define mmMME0_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0x6079C + +#define mmMME0_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0x607A0 + +#define mmMME0_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0x607A4 + +#define mmMME0_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0x607A8 + +#define mmMME0_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0x607AC + +#define mmMME0_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_0 0x607B0 + +#define mmMME0_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_1 0x607B4 + +#define mmMME0_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_2 0x607B8 + +#define mmMME0_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_3 0x607BC + +#define mmMME0_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_0 0x607C0 + +#define mmMME0_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_1 0x607C4 + +#define mmMME0_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_2 0x607C8 + +#define mmMME0_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_3 0x607CC + +#define mmMME0_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_4 0x607D0 + +#define mmMME0_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_0 0x607D4 + +#define mmMME0_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_1 0x607D8 + +#define mmMME0_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_2 0x607DC + +#define mmMME0_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_3 0x607E0 + +#define mmMME0_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_4 0x607E4 + +#define mmMME0_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_0 0x607E8 + +#define mmMME0_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_1 0x607EC + +#define mmMME0_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_2 0x607F0 + +#define mmMME0_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_3 0x607F4 + +#define mmMME0_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_0 0x607F8 + +#define mmMME0_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_1 0x607FC + +#define mmMME0_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_2 0x60800 + +#define mmMME0_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_3 0x60804 + +#define mmMME0_CTRL_SHADOW_1_TENSOR_O_SPATIAL_SIZE_MINUS_1 0x60808 + +#define mmMME0_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0x6080C + +#define mmMME0_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0x60810 + +#define mmMME0_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0x60814 + +#define mmMME0_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0x60818 + +#define mmMME0_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0x6081C + +#define mmMME0_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_0 0x60820 + +#define mmMME0_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_1 0x60824 + +#define mmMME0_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_2 0x60828 + +#define mmMME0_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_3 0x6082C + +#define mmMME0_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0x60830 + +#define mmMME0_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0x60834 + +#define mmMME0_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0x60838 + +#define mmMME0_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0x6083C + +#define mmMME0_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0x60840 + +#define mmMME0_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_0 0x60844 + +#define mmMME0_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_1 0x60848 + +#define mmMME0_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_2 0x6084C + +#define mmMME0_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_3 0x60850 + +#define mmMME0_CTRL_SHADOW_1_DESC_SB_REPEAT 0x60854 + +#define mmMME0_CTRL_SHADOW_1_DESC_RATE_LIMITER 0x60858 + +#define mmMME0_CTRL_SHADOW_1_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0x6085C + +#define mmMME0_CTRL_SHADOW_1_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0x60860 + +#define mmMME0_CTRL_SHADOW_1_DESC_SYNC_OBJECT_ADDR_HIGH 0x60864 + +#define mmMME0_CTRL_SHADOW_1_DESC_SYNC_OBJECT_DATA 0x60868 + +#define mmMME0_CTRL_SHADOW_1_DESC_AXI_USER_DATA 0x6086C + +#define mmMME0_CTRL_SHADOW_1_DESC_PERF_EVT_S 0x60870 + +#define mmMME0_CTRL_SHADOW_1_DESC_PERF_EVT_L_LOCAL 0x60874 + +#define mmMME0_CTRL_SHADOW_1_DESC_PERF_EVT_L_REMOTE 0x60878 + +#define mmMME0_CTRL_SHADOW_1_DESC_PERF_EVT_O_LOCAL 0x6087C + +#define mmMME0_CTRL_SHADOW_1_DESC_PERF_EVT_O_REMOTE 0x60880 + +#define mmMME0_CTRL_SHADOW_1_DESC_PADDING_VALUE_S 0x60884 + +#define mmMME0_CTRL_SHADOW_1_DESC_PADDING_VALUE_L 0x60888 + +#define mmMME0_CTRL_SHADOW_1_DESC_META_DATA_AGU_S 0x6088C + +#define mmMME0_CTRL_SHADOW_1_DESC_META_DATA_AGU_L_LOCAL 0x60890 + +#define mmMME0_CTRL_SHADOW_1_DESC_META_DATA_AGU_L_REMOTE 0x60894 + +#define mmMME0_CTRL_SHADOW_1_DESC_META_DATA_AGU_O_LOCAL 0x60898 + +#define mmMME0_CTRL_SHADOW_1_DESC_META_DATA_AGU_O_REMOTE 0x6089C + +#define mmMME0_CTRL_SHADOW_1_DESC_PCU_RL_SATURATION 0x608A0 + +#define mmMME0_CTRL_SHADOW_1_DESC_DUMMY 0x608A4 + +#define mmMME0_CTRL_SHADOW_2_STATUS 0x60900 + +#define mmMME0_CTRL_SHADOW_2_BASE_ADDR_HIGH_S 0x60908 + +#define mmMME0_CTRL_SHADOW_2_BASE_ADDR_HIGH_L 0x6090C + +#define mmMME0_CTRL_SHADOW_2_BASE_ADDR_HIGH_O 0x60910 + +#define mmMME0_CTRL_SHADOW_2_BASE_ADDR_LOW_S 0x60914 + +#define mmMME0_CTRL_SHADOW_2_BASE_ADDR_LOW_L 0x60918 + +#define mmMME0_CTRL_SHADOW_2_BASE_ADDR_LOW_O 0x6091C + +#define mmMME0_CTRL_SHADOW_2_HEADER_LOW 0x60920 + +#define mmMME0_CTRL_SHADOW_2_HEADER_HIGH 0x60924 + +#define mmMME0_CTRL_SHADOW_2_CONV_KERNEL_SIZE_MINUS_1 0x60928 + +#define mmMME0_CTRL_SHADOW_2_CONV_ASSOCIATED_DIMS_LOW 0x6092C + +#define mmMME0_CTRL_SHADOW_2_CONV_ASSOCIATED_DIMS_HIGH 0x60930 + +#define mmMME0_CTRL_SHADOW_2_NUM_ITERATIONS_MINUS_1 0x60934 + +#define mmMME0_CTRL_SHADOW_2_OUTER_LOOP 0x60938 + +#define mmMME0_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_0 0x6093C + +#define mmMME0_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_1 0x60940 + +#define mmMME0_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_2 0x60944 + +#define mmMME0_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_3 0x60948 + +#define mmMME0_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_4 0x6094C + +#define mmMME0_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_0 0x60950 + +#define mmMME0_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_1 0x60954 + +#define mmMME0_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_2 0x60958 + +#define mmMME0_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_3 0x6095C + +#define mmMME0_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_4 0x60960 + +#define mmMME0_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_0 0x60964 + +#define mmMME0_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_1 0x60968 + +#define mmMME0_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_2 0x6096C + +#define mmMME0_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_3 0x60970 + +#define mmMME0_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_0 0x60974 + +#define mmMME0_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_1 0x60978 + +#define mmMME0_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_2 0x6097C + +#define mmMME0_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_3 0x60980 + +#define mmMME0_CTRL_SHADOW_2_TENSOR_S_SPATIAL_SIZE_MINUS_1 0x60984 + +#define mmMME0_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_0 0x60988 + +#define mmMME0_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_1 0x6098C + +#define mmMME0_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_2 0x60990 + +#define mmMME0_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_3 0x60994 + +#define mmMME0_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_4 0x60998 + +#define mmMME0_CTRL_SHADOW_2_AGU_S_START_OFFSET_0 0x6099C + +#define mmMME0_CTRL_SHADOW_2_AGU_S_START_OFFSET_1 0x609A0 + +#define mmMME0_CTRL_SHADOW_2_AGU_S_START_OFFSET_2 0x609A4 + +#define mmMME0_CTRL_SHADOW_2_AGU_S_START_OFFSET_3 0x609A8 + +#define mmMME0_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_0 0x609AC + +#define mmMME0_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_1 0x609B0 + +#define mmMME0_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_2 0x609B4 + +#define mmMME0_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_3 0x609B8 + +#define mmMME0_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_4 0x609BC + +#define mmMME0_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_0 0x609C0 + +#define mmMME0_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_1 0x609C4 + +#define mmMME0_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_2 0x609C8 + +#define mmMME0_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_3 0x609CC + +#define mmMME0_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_4 0x609D0 + +#define mmMME0_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_0 0x609D4 + +#define mmMME0_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_1 0x609D8 + +#define mmMME0_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_2 0x609DC + +#define mmMME0_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_3 0x609E0 + +#define mmMME0_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_0 0x609E4 + +#define mmMME0_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_1 0x609E8 + +#define mmMME0_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_2 0x609EC + +#define mmMME0_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_3 0x609F0 + +#define mmMME0_CTRL_SHADOW_2_TENSOR_L_SPATIAL_SIZE_MINUS_1 0x609F4 + +#define mmMME0_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0x609F8 + +#define mmMME0_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0x609FC + +#define mmMME0_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0x60A00 + +#define mmMME0_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0x60A04 + +#define mmMME0_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0x60A08 + +#define mmMME0_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_0 0x60A0C + +#define mmMME0_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_1 0x60A10 + +#define mmMME0_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_2 0x60A14 + +#define mmMME0_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_3 0x60A18 + +#define mmMME0_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0x60A1C + +#define mmMME0_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0x60A20 + +#define mmMME0_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0x60A24 + +#define mmMME0_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0x60A28 + +#define mmMME0_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0x60A2C + +#define mmMME0_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_0 0x60A30 + +#define mmMME0_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_1 0x60A34 + +#define mmMME0_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_2 0x60A38 + +#define mmMME0_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_3 0x60A3C + +#define mmMME0_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_0 0x60A40 + +#define mmMME0_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_1 0x60A44 + +#define mmMME0_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_2 0x60A48 + +#define mmMME0_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_3 0x60A4C + +#define mmMME0_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_4 0x60A50 + +#define mmMME0_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_0 0x60A54 + +#define mmMME0_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_1 0x60A58 + +#define mmMME0_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_2 0x60A5C + +#define mmMME0_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_3 0x60A60 + +#define mmMME0_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_4 0x60A64 + +#define mmMME0_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_0 0x60A68 + +#define mmMME0_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_1 0x60A6C + +#define mmMME0_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_2 0x60A70 + +#define mmMME0_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_3 0x60A74 + +#define mmMME0_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_0 0x60A78 + +#define mmMME0_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_1 0x60A7C + +#define mmMME0_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_2 0x60A80 + +#define mmMME0_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_3 0x60A84 + +#define mmMME0_CTRL_SHADOW_2_TENSOR_O_SPATIAL_SIZE_MINUS_1 0x60A88 + +#define mmMME0_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0x60A8C + +#define mmMME0_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0x60A90 + +#define mmMME0_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0x60A94 + +#define mmMME0_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0x60A98 + +#define mmMME0_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0x60A9C + +#define mmMME0_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_0 0x60AA0 + +#define mmMME0_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_1 0x60AA4 + +#define mmMME0_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_2 0x60AA8 + +#define mmMME0_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_3 0x60AAC + +#define mmMME0_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0x60AB0 + +#define mmMME0_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0x60AB4 + +#define mmMME0_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0x60AB8 + +#define mmMME0_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0x60ABC + +#define mmMME0_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0x60AC0 + +#define mmMME0_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_0 0x60AC4 + +#define mmMME0_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_1 0x60AC8 + +#define mmMME0_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_2 0x60ACC + +#define mmMME0_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_3 0x60AD0 + +#define mmMME0_CTRL_SHADOW_2_DESC_SB_REPEAT 0x60AD4 + +#define mmMME0_CTRL_SHADOW_2_DESC_RATE_LIMITER 0x60AD8 + +#define mmMME0_CTRL_SHADOW_2_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0x60ADC + +#define mmMME0_CTRL_SHADOW_2_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0x60AE0 + +#define mmMME0_CTRL_SHADOW_2_DESC_SYNC_OBJECT_ADDR_HIGH 0x60AE4 + +#define mmMME0_CTRL_SHADOW_2_DESC_SYNC_OBJECT_DATA 0x60AE8 + +#define mmMME0_CTRL_SHADOW_2_DESC_AXI_USER_DATA 0x60AEC + +#define mmMME0_CTRL_SHADOW_2_DESC_PERF_EVT_S 0x60AF0 + +#define mmMME0_CTRL_SHADOW_2_DESC_PERF_EVT_L_LOCAL 0x60AF4 + +#define mmMME0_CTRL_SHADOW_2_DESC_PERF_EVT_L_REMOTE 0x60AF8 + +#define mmMME0_CTRL_SHADOW_2_DESC_PERF_EVT_O_LOCAL 0x60AFC + +#define mmMME0_CTRL_SHADOW_2_DESC_PERF_EVT_O_REMOTE 0x60B00 + +#define mmMME0_CTRL_SHADOW_2_DESC_PADDING_VALUE_S 0x60B04 + +#define mmMME0_CTRL_SHADOW_2_DESC_PADDING_VALUE_L 0x60B08 + +#define mmMME0_CTRL_SHADOW_2_DESC_META_DATA_AGU_S 0x60B0C + +#define mmMME0_CTRL_SHADOW_2_DESC_META_DATA_AGU_L_LOCAL 0x60B10 + +#define mmMME0_CTRL_SHADOW_2_DESC_META_DATA_AGU_L_REMOTE 0x60B14 + +#define mmMME0_CTRL_SHADOW_2_DESC_META_DATA_AGU_O_LOCAL 0x60B18 + +#define mmMME0_CTRL_SHADOW_2_DESC_META_DATA_AGU_O_REMOTE 0x60B1C + +#define mmMME0_CTRL_SHADOW_2_DESC_PCU_RL_SATURATION 0x60B20 + +#define mmMME0_CTRL_SHADOW_2_DESC_DUMMY 0x60B24 + +#define mmMME0_CTRL_SHADOW_3_STATUS 0x60B80 + +#define mmMME0_CTRL_SHADOW_3_BASE_ADDR_HIGH_S 0x60B88 + +#define mmMME0_CTRL_SHADOW_3_BASE_ADDR_HIGH_L 0x60B8C + +#define mmMME0_CTRL_SHADOW_3_BASE_ADDR_HIGH_O 0x60B90 + +#define mmMME0_CTRL_SHADOW_3_BASE_ADDR_LOW_S 0x60B94 + +#define mmMME0_CTRL_SHADOW_3_BASE_ADDR_LOW_L 0x60B98 + +#define mmMME0_CTRL_SHADOW_3_BASE_ADDR_LOW_O 0x60B9C + +#define mmMME0_CTRL_SHADOW_3_HEADER_LOW 0x60BA0 + +#define mmMME0_CTRL_SHADOW_3_HEADER_HIGH 0x60BA4 + +#define mmMME0_CTRL_SHADOW_3_CONV_KERNEL_SIZE_MINUS_1 0x60BA8 + +#define mmMME0_CTRL_SHADOW_3_CONV_ASSOCIATED_DIMS_LOW 0x60BAC + +#define mmMME0_CTRL_SHADOW_3_CONV_ASSOCIATED_DIMS_HIGH 0x60BB0 + +#define mmMME0_CTRL_SHADOW_3_NUM_ITERATIONS_MINUS_1 0x60BB4 + +#define mmMME0_CTRL_SHADOW_3_OUTER_LOOP 0x60BB8 + +#define mmMME0_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_0 0x60BBC + +#define mmMME0_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_1 0x60BC0 + +#define mmMME0_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_2 0x60BC4 + +#define mmMME0_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_3 0x60BC8 + +#define mmMME0_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_4 0x60BCC + +#define mmMME0_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_0 0x60BD0 + +#define mmMME0_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_1 0x60BD4 + +#define mmMME0_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_2 0x60BD8 + +#define mmMME0_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_3 0x60BDC + +#define mmMME0_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_4 0x60BE0 + +#define mmMME0_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_0 0x60BE4 + +#define mmMME0_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_1 0x60BE8 + +#define mmMME0_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_2 0x60BEC + +#define mmMME0_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_3 0x60BF0 + +#define mmMME0_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_0 0x60BF4 + +#define mmMME0_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_1 0x60BF8 + +#define mmMME0_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_2 0x60BFC + +#define mmMME0_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_3 0x60C00 + +#define mmMME0_CTRL_SHADOW_3_TENSOR_S_SPATIAL_SIZE_MINUS_1 0x60C04 + +#define mmMME0_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_0 0x60C08 + +#define mmMME0_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_1 0x60C0C + +#define mmMME0_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_2 0x60C10 + +#define mmMME0_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_3 0x60C14 + +#define mmMME0_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_4 0x60C18 + +#define mmMME0_CTRL_SHADOW_3_AGU_S_START_OFFSET_0 0x60C1C + +#define mmMME0_CTRL_SHADOW_3_AGU_S_START_OFFSET_1 0x60C20 + +#define mmMME0_CTRL_SHADOW_3_AGU_S_START_OFFSET_2 0x60C24 + +#define mmMME0_CTRL_SHADOW_3_AGU_S_START_OFFSET_3 0x60C28 + +#define mmMME0_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_0 0x60C2C + +#define mmMME0_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_1 0x60C30 + +#define mmMME0_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_2 0x60C34 + +#define mmMME0_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_3 0x60C38 + +#define mmMME0_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_4 0x60C3C + +#define mmMME0_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_0 0x60C40 + +#define mmMME0_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_1 0x60C44 + +#define mmMME0_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_2 0x60C48 + +#define mmMME0_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_3 0x60C4C + +#define mmMME0_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_4 0x60C50 + +#define mmMME0_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_0 0x60C54 + +#define mmMME0_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_1 0x60C58 + +#define mmMME0_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_2 0x60C5C + +#define mmMME0_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_3 0x60C60 + +#define mmMME0_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_0 0x60C64 + +#define mmMME0_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_1 0x60C68 + +#define mmMME0_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_2 0x60C6C + +#define mmMME0_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_3 0x60C70 + +#define mmMME0_CTRL_SHADOW_3_TENSOR_L_SPATIAL_SIZE_MINUS_1 0x60C74 + +#define mmMME0_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0x60C78 + +#define mmMME0_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0x60C7C + +#define mmMME0_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0x60C80 + +#define mmMME0_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0x60C84 + +#define mmMME0_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0x60C88 + +#define mmMME0_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_0 0x60C8C + +#define mmMME0_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_1 0x60C90 + +#define mmMME0_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_2 0x60C94 + +#define mmMME0_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_3 0x60C98 + +#define mmMME0_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0x60C9C + +#define mmMME0_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0x60CA0 + +#define mmMME0_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0x60CA4 + +#define mmMME0_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0x60CA8 + +#define mmMME0_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0x60CAC + +#define mmMME0_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_0 0x60CB0 + +#define mmMME0_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_1 0x60CB4 + +#define mmMME0_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_2 0x60CB8 + +#define mmMME0_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_3 0x60CBC + +#define mmMME0_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_0 0x60CC0 + +#define mmMME0_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_1 0x60CC4 + +#define mmMME0_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_2 0x60CC8 + +#define mmMME0_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_3 0x60CCC + +#define mmMME0_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_4 0x60CD0 + +#define mmMME0_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_0 0x60CD4 + +#define mmMME0_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_1 0x60CD8 + +#define mmMME0_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_2 0x60CDC + +#define mmMME0_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_3 0x60CE0 + +#define mmMME0_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_4 0x60CE4 + +#define mmMME0_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_0 0x60CE8 + +#define mmMME0_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_1 0x60CEC + +#define mmMME0_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_2 0x60CF0 + +#define mmMME0_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_3 0x60CF4 + +#define mmMME0_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_0 0x60CF8 + +#define mmMME0_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_1 0x60CFC + +#define mmMME0_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_2 0x60D00 + +#define mmMME0_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_3 0x60D04 + +#define mmMME0_CTRL_SHADOW_3_TENSOR_O_SPATIAL_SIZE_MINUS_1 0x60D08 + +#define mmMME0_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0x60D0C + +#define mmMME0_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0x60D10 + +#define mmMME0_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0x60D14 + +#define mmMME0_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0x60D18 + +#define mmMME0_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0x60D1C + +#define mmMME0_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_0 0x60D20 + +#define mmMME0_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_1 0x60D24 + +#define mmMME0_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_2 0x60D28 + +#define mmMME0_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_3 0x60D2C + +#define mmMME0_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0x60D30 + +#define mmMME0_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0x60D34 + +#define mmMME0_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0x60D38 + +#define mmMME0_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0x60D3C + +#define mmMME0_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0x60D40 + +#define mmMME0_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_0 0x60D44 + +#define mmMME0_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_1 0x60D48 + +#define mmMME0_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_2 0x60D4C + +#define mmMME0_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_3 0x60D50 + +#define mmMME0_CTRL_SHADOW_3_DESC_SB_REPEAT 0x60D54 + +#define mmMME0_CTRL_SHADOW_3_DESC_RATE_LIMITER 0x60D58 + +#define mmMME0_CTRL_SHADOW_3_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0x60D5C + +#define mmMME0_CTRL_SHADOW_3_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0x60D60 + +#define mmMME0_CTRL_SHADOW_3_DESC_SYNC_OBJECT_ADDR_HIGH 0x60D64 + +#define mmMME0_CTRL_SHADOW_3_DESC_SYNC_OBJECT_DATA 0x60D68 + +#define mmMME0_CTRL_SHADOW_3_DESC_AXI_USER_DATA 0x60D6C + +#define mmMME0_CTRL_SHADOW_3_DESC_PERF_EVT_S 0x60D70 + +#define mmMME0_CTRL_SHADOW_3_DESC_PERF_EVT_L_LOCAL 0x60D74 + +#define mmMME0_CTRL_SHADOW_3_DESC_PERF_EVT_L_REMOTE 0x60D78 + +#define mmMME0_CTRL_SHADOW_3_DESC_PERF_EVT_O_LOCAL 0x60D7C + +#define mmMME0_CTRL_SHADOW_3_DESC_PERF_EVT_O_REMOTE 0x60D80 + +#define mmMME0_CTRL_SHADOW_3_DESC_PADDING_VALUE_S 0x60D84 + +#define mmMME0_CTRL_SHADOW_3_DESC_PADDING_VALUE_L 0x60D88 + +#define mmMME0_CTRL_SHADOW_3_DESC_META_DATA_AGU_S 0x60D8C + +#define mmMME0_CTRL_SHADOW_3_DESC_META_DATA_AGU_L_LOCAL 0x60D90 + +#define mmMME0_CTRL_SHADOW_3_DESC_META_DATA_AGU_L_REMOTE 0x60D94 + +#define mmMME0_CTRL_SHADOW_3_DESC_META_DATA_AGU_O_LOCAL 0x60D98 + +#define mmMME0_CTRL_SHADOW_3_DESC_META_DATA_AGU_O_REMOTE 0x60D9C + +#define mmMME0_CTRL_SHADOW_3_DESC_PCU_RL_SATURATION 0x60DA0 + +#define mmMME0_CTRL_SHADOW_3_DESC_DUMMY 0x60DA4 + +#endif /* ASIC_REG_MME0_CTRL_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_qm_masks.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_qm_masks.h new file mode 100644 index 000000000..e6dd30ce0 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_qm_masks.h @@ -0,0 +1,800 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_MME0_QM_MASKS_H_ +#define ASIC_REG_MME0_QM_MASKS_H_ + +/* + ***************************************** + * MME0_QM (Prototype: QMAN) + ***************************************** + */ + +/* MME0_QM_GLBL_CFG0 */ +#define MME0_QM_GLBL_CFG0_PQF_EN_SHIFT 0 +#define MME0_QM_GLBL_CFG0_PQF_EN_MASK 0xF +#define MME0_QM_GLBL_CFG0_CQF_EN_SHIFT 4 +#define MME0_QM_GLBL_CFG0_CQF_EN_MASK 0x1F0 +#define MME0_QM_GLBL_CFG0_CP_EN_SHIFT 9 +#define MME0_QM_GLBL_CFG0_CP_EN_MASK 0x3E00 + +/* MME0_QM_GLBL_CFG1 */ +#define MME0_QM_GLBL_CFG1_PQF_STOP_SHIFT 0 +#define MME0_QM_GLBL_CFG1_PQF_STOP_MASK 0xF +#define MME0_QM_GLBL_CFG1_CQF_STOP_SHIFT 4 +#define MME0_QM_GLBL_CFG1_CQF_STOP_MASK 0x1F0 +#define MME0_QM_GLBL_CFG1_CP_STOP_SHIFT 9 +#define MME0_QM_GLBL_CFG1_CP_STOP_MASK 0x3E00 +#define MME0_QM_GLBL_CFG1_PQF_FLUSH_SHIFT 16 +#define MME0_QM_GLBL_CFG1_PQF_FLUSH_MASK 0xF0000 +#define MME0_QM_GLBL_CFG1_CQF_FLUSH_SHIFT 20 +#define MME0_QM_GLBL_CFG1_CQF_FLUSH_MASK 0x1F00000 +#define MME0_QM_GLBL_CFG1_CP_FLUSH_SHIFT 25 +#define MME0_QM_GLBL_CFG1_CP_FLUSH_MASK 0x3E000000 + +/* MME0_QM_GLBL_PROT */ +#define MME0_QM_GLBL_PROT_PQF_SHIFT 0 +#define MME0_QM_GLBL_PROT_PQF_MASK 0xF +#define MME0_QM_GLBL_PROT_CQF_SHIFT 4 +#define MME0_QM_GLBL_PROT_CQF_MASK 0x1F0 +#define MME0_QM_GLBL_PROT_CP_SHIFT 9 +#define MME0_QM_GLBL_PROT_CP_MASK 0x3E00 +#define MME0_QM_GLBL_PROT_ERR_SHIFT 14 +#define MME0_QM_GLBL_PROT_ERR_MASK 0x4000 +#define MME0_QM_GLBL_PROT_ARB_SHIFT 15 +#define MME0_QM_GLBL_PROT_ARB_MASK 0x8000 + +/* MME0_QM_GLBL_ERR_CFG */ +#define MME0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT 0 +#define MME0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK 0xF +#define MME0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT 4 +#define MME0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK 0x1F0 +#define MME0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT 9 +#define MME0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK 0x3E00 +#define MME0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT 16 +#define MME0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK 0xF0000 +#define MME0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT 20 +#define MME0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK 0x1F00000 +#define MME0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT 25 +#define MME0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK 0x3E000000 +#define MME0_QM_GLBL_ERR_CFG_ARB_STOP_ON_ERR_SHIFT 31 +#define MME0_QM_GLBL_ERR_CFG_ARB_STOP_ON_ERR_MASK 0x80000000 + +/* MME0_QM_GLBL_SECURE_PROPS */ +#define MME0_QM_GLBL_SECURE_PROPS_0_ASID_SHIFT 0 +#define MME0_QM_GLBL_SECURE_PROPS_0_ASID_MASK 0x3FF +#define MME0_QM_GLBL_SECURE_PROPS_1_ASID_SHIFT 0 +#define MME0_QM_GLBL_SECURE_PROPS_1_ASID_MASK 0x3FF +#define MME0_QM_GLBL_SECURE_PROPS_2_ASID_SHIFT 0 +#define MME0_QM_GLBL_SECURE_PROPS_2_ASID_MASK 0x3FF +#define MME0_QM_GLBL_SECURE_PROPS_3_ASID_SHIFT 0 +#define MME0_QM_GLBL_SECURE_PROPS_3_ASID_MASK 0x3FF +#define MME0_QM_GLBL_SECURE_PROPS_4_ASID_SHIFT 0 +#define MME0_QM_GLBL_SECURE_PROPS_4_ASID_MASK 0x3FF +#define MME0_QM_GLBL_SECURE_PROPS_0_MMBP_SHIFT 10 +#define MME0_QM_GLBL_SECURE_PROPS_0_MMBP_MASK 0x400 +#define MME0_QM_GLBL_SECURE_PROPS_1_MMBP_SHIFT 10 +#define MME0_QM_GLBL_SECURE_PROPS_1_MMBP_MASK 0x400 +#define MME0_QM_GLBL_SECURE_PROPS_2_MMBP_SHIFT 10 +#define MME0_QM_GLBL_SECURE_PROPS_2_MMBP_MASK 0x400 +#define MME0_QM_GLBL_SECURE_PROPS_3_MMBP_SHIFT 10 +#define MME0_QM_GLBL_SECURE_PROPS_3_MMBP_MASK 0x400 +#define MME0_QM_GLBL_SECURE_PROPS_4_MMBP_SHIFT 10 +#define MME0_QM_GLBL_SECURE_PROPS_4_MMBP_MASK 0x400 + +/* MME0_QM_GLBL_NON_SECURE_PROPS */ +#define MME0_QM_GLBL_NON_SECURE_PROPS_0_ASID_SHIFT 0 +#define MME0_QM_GLBL_NON_SECURE_PROPS_0_ASID_MASK 0x3FF +#define MME0_QM_GLBL_NON_SECURE_PROPS_1_ASID_SHIFT 0 +#define MME0_QM_GLBL_NON_SECURE_PROPS_1_ASID_MASK 0x3FF +#define MME0_QM_GLBL_NON_SECURE_PROPS_2_ASID_SHIFT 0 +#define MME0_QM_GLBL_NON_SECURE_PROPS_2_ASID_MASK 0x3FF +#define MME0_QM_GLBL_NON_SECURE_PROPS_3_ASID_SHIFT 0 +#define MME0_QM_GLBL_NON_SECURE_PROPS_3_ASID_MASK 0x3FF +#define MME0_QM_GLBL_NON_SECURE_PROPS_4_ASID_SHIFT 0 +#define MME0_QM_GLBL_NON_SECURE_PROPS_4_ASID_MASK 0x3FF +#define MME0_QM_GLBL_NON_SECURE_PROPS_0_MMBP_SHIFT 10 +#define MME0_QM_GLBL_NON_SECURE_PROPS_0_MMBP_MASK 0x400 +#define MME0_QM_GLBL_NON_SECURE_PROPS_1_MMBP_SHIFT 10 +#define MME0_QM_GLBL_NON_SECURE_PROPS_1_MMBP_MASK 0x400 +#define MME0_QM_GLBL_NON_SECURE_PROPS_2_MMBP_SHIFT 10 +#define MME0_QM_GLBL_NON_SECURE_PROPS_2_MMBP_MASK 0x400 +#define MME0_QM_GLBL_NON_SECURE_PROPS_3_MMBP_SHIFT 10 +#define MME0_QM_GLBL_NON_SECURE_PROPS_3_MMBP_MASK 0x400 +#define MME0_QM_GLBL_NON_SECURE_PROPS_4_MMBP_SHIFT 10 +#define MME0_QM_GLBL_NON_SECURE_PROPS_4_MMBP_MASK 0x400 + +/* MME0_QM_GLBL_STS0 */ +#define MME0_QM_GLBL_STS0_PQF_IDLE_SHIFT 0 +#define MME0_QM_GLBL_STS0_PQF_IDLE_MASK 0xF +#define MME0_QM_GLBL_STS0_CQF_IDLE_SHIFT 4 +#define MME0_QM_GLBL_STS0_CQF_IDLE_MASK 0x1F0 +#define MME0_QM_GLBL_STS0_CP_IDLE_SHIFT 9 +#define MME0_QM_GLBL_STS0_CP_IDLE_MASK 0x3E00 +#define MME0_QM_GLBL_STS0_PQF_IS_STOP_SHIFT 16 +#define MME0_QM_GLBL_STS0_PQF_IS_STOP_MASK 0xF0000 +#define MME0_QM_GLBL_STS0_CQF_IS_STOP_SHIFT 20 +#define MME0_QM_GLBL_STS0_CQF_IS_STOP_MASK 0x1F00000 +#define MME0_QM_GLBL_STS0_CP_IS_STOP_SHIFT 25 +#define MME0_QM_GLBL_STS0_CP_IS_STOP_MASK 0x3E000000 +#define MME0_QM_GLBL_STS0_ARB_IS_STOP_SHIFT 31 +#define MME0_QM_GLBL_STS0_ARB_IS_STOP_MASK 0x80000000 + +/* MME0_QM_GLBL_STS1 */ +#define MME0_QM_GLBL_STS1_PQF_RD_ERR_SHIFT 0 +#define MME0_QM_GLBL_STS1_PQF_RD_ERR_MASK 0x1 +#define MME0_QM_GLBL_STS1_CQF_RD_ERR_SHIFT 1 +#define MME0_QM_GLBL_STS1_CQF_RD_ERR_MASK 0x2 +#define MME0_QM_GLBL_STS1_CP_RD_ERR_SHIFT 2 +#define MME0_QM_GLBL_STS1_CP_RD_ERR_MASK 0x4 +#define MME0_QM_GLBL_STS1_CP_UNDEF_CMD_ERR_SHIFT 3 +#define MME0_QM_GLBL_STS1_CP_UNDEF_CMD_ERR_MASK 0x8 +#define MME0_QM_GLBL_STS1_CP_STOP_OP_SHIFT 4 +#define MME0_QM_GLBL_STS1_CP_STOP_OP_MASK 0x10 +#define MME0_QM_GLBL_STS1_CP_MSG_WR_ERR_SHIFT 5 +#define MME0_QM_GLBL_STS1_CP_MSG_WR_ERR_MASK 0x20 +#define MME0_QM_GLBL_STS1_CP_WREG_ERR_SHIFT 6 +#define MME0_QM_GLBL_STS1_CP_WREG_ERR_MASK 0x40 +#define MME0_QM_GLBL_STS1_CP_FENCE0_OVF_ERR_SHIFT 8 +#define MME0_QM_GLBL_STS1_CP_FENCE0_OVF_ERR_MASK 0x100 +#define MME0_QM_GLBL_STS1_CP_FENCE1_OVF_ERR_SHIFT 9 +#define MME0_QM_GLBL_STS1_CP_FENCE1_OVF_ERR_MASK 0x200 +#define MME0_QM_GLBL_STS1_CP_FENCE2_OVF_ERR_SHIFT 10 +#define MME0_QM_GLBL_STS1_CP_FENCE2_OVF_ERR_MASK 0x400 +#define MME0_QM_GLBL_STS1_CP_FENCE3_OVF_ERR_SHIFT 11 +#define MME0_QM_GLBL_STS1_CP_FENCE3_OVF_ERR_MASK 0x800 +#define MME0_QM_GLBL_STS1_CP_FENCE0_UDF_ERR_SHIFT 12 +#define MME0_QM_GLBL_STS1_CP_FENCE0_UDF_ERR_MASK 0x1000 +#define MME0_QM_GLBL_STS1_CP_FENCE1_UDF_ERR_SHIFT 13 +#define MME0_QM_GLBL_STS1_CP_FENCE1_UDF_ERR_MASK 0x2000 +#define MME0_QM_GLBL_STS1_CP_FENCE2_UDF_ERR_SHIFT 14 +#define MME0_QM_GLBL_STS1_CP_FENCE2_UDF_ERR_MASK 0x4000 +#define MME0_QM_GLBL_STS1_CP_FENCE3_UDF_ERR_SHIFT 15 +#define MME0_QM_GLBL_STS1_CP_FENCE3_UDF_ERR_MASK 0x8000 + +/* MME0_QM_GLBL_STS1_4 */ +#define MME0_QM_GLBL_STS1_4_CQF_RD_ERR_SHIFT 1 +#define MME0_QM_GLBL_STS1_4_CQF_RD_ERR_MASK 0x2 +#define MME0_QM_GLBL_STS1_4_CP_RD_ERR_SHIFT 2 +#define MME0_QM_GLBL_STS1_4_CP_RD_ERR_MASK 0x4 +#define MME0_QM_GLBL_STS1_4_CP_UNDEF_CMD_ERR_SHIFT 3 +#define MME0_QM_GLBL_STS1_4_CP_UNDEF_CMD_ERR_MASK 0x8 +#define MME0_QM_GLBL_STS1_4_CP_STOP_OP_SHIFT 4 +#define MME0_QM_GLBL_STS1_4_CP_STOP_OP_MASK 0x10 +#define MME0_QM_GLBL_STS1_4_CP_MSG_WR_ERR_SHIFT 5 +#define MME0_QM_GLBL_STS1_4_CP_MSG_WR_ERR_MASK 0x20 +#define MME0_QM_GLBL_STS1_4_CP_WREG_ERR_SHIFT 6 +#define MME0_QM_GLBL_STS1_4_CP_WREG_ERR_MASK 0x40 +#define MME0_QM_GLBL_STS1_4_CP_FENCE0_OVF_ERR_SHIFT 8 +#define MME0_QM_GLBL_STS1_4_CP_FENCE0_OVF_ERR_MASK 0x100 +#define MME0_QM_GLBL_STS1_4_CP_FENCE1_OVF_ERR_SHIFT 9 +#define MME0_QM_GLBL_STS1_4_CP_FENCE1_OVF_ERR_MASK 0x200 +#define MME0_QM_GLBL_STS1_4_CP_FENCE2_OVF_ERR_SHIFT 10 +#define MME0_QM_GLBL_STS1_4_CP_FENCE2_OVF_ERR_MASK 0x400 +#define MME0_QM_GLBL_STS1_4_CP_FENCE3_OVF_ERR_SHIFT 11 +#define MME0_QM_GLBL_STS1_4_CP_FENCE3_OVF_ERR_MASK 0x800 +#define MME0_QM_GLBL_STS1_4_CP_FENCE0_UDF_ERR_SHIFT 12 +#define MME0_QM_GLBL_STS1_4_CP_FENCE0_UDF_ERR_MASK 0x1000 +#define MME0_QM_GLBL_STS1_4_CP_FENCE1_UDF_ERR_SHIFT 13 +#define MME0_QM_GLBL_STS1_4_CP_FENCE1_UDF_ERR_MASK 0x2000 +#define MME0_QM_GLBL_STS1_4_CP_FENCE2_UDF_ERR_SHIFT 14 +#define MME0_QM_GLBL_STS1_4_CP_FENCE2_UDF_ERR_MASK 0x4000 +#define MME0_QM_GLBL_STS1_4_CP_FENCE3_UDF_ERR_SHIFT 15 +#define MME0_QM_GLBL_STS1_4_CP_FENCE3_UDF_ERR_MASK 0x8000 + +/* MME0_QM_GLBL_MSG_EN */ +#define MME0_QM_GLBL_MSG_EN_PQF_RD_ERR_SHIFT 0 +#define MME0_QM_GLBL_MSG_EN_PQF_RD_ERR_MASK 0x1 +#define MME0_QM_GLBL_MSG_EN_CQF_RD_ERR_SHIFT 1 +#define MME0_QM_GLBL_MSG_EN_CQF_RD_ERR_MASK 0x2 +#define MME0_QM_GLBL_MSG_EN_CP_RD_ERR_SHIFT 2 +#define MME0_QM_GLBL_MSG_EN_CP_RD_ERR_MASK 0x4 +#define MME0_QM_GLBL_MSG_EN_CP_UNDEF_CMD_ERR_SHIFT 3 +#define MME0_QM_GLBL_MSG_EN_CP_UNDEF_CMD_ERR_MASK 0x8 +#define MME0_QM_GLBL_MSG_EN_CP_STOP_OP_SHIFT 4 +#define MME0_QM_GLBL_MSG_EN_CP_STOP_OP_MASK 0x10 +#define MME0_QM_GLBL_MSG_EN_CP_MSG_WR_ERR_SHIFT 5 +#define MME0_QM_GLBL_MSG_EN_CP_MSG_WR_ERR_MASK 0x20 +#define MME0_QM_GLBL_MSG_EN_CP_WREG_ERR_SHIFT 6 +#define MME0_QM_GLBL_MSG_EN_CP_WREG_ERR_MASK 0x40 +#define MME0_QM_GLBL_MSG_EN_CP_FENCE0_OVF_ERR_SHIFT 8 +#define MME0_QM_GLBL_MSG_EN_CP_FENCE0_OVF_ERR_MASK 0x100 +#define MME0_QM_GLBL_MSG_EN_CP_FENCE1_OVF_ERR_SHIFT 9 +#define MME0_QM_GLBL_MSG_EN_CP_FENCE1_OVF_ERR_MASK 0x200 +#define MME0_QM_GLBL_MSG_EN_CP_FENCE2_OVF_ERR_SHIFT 10 +#define MME0_QM_GLBL_MSG_EN_CP_FENCE2_OVF_ERR_MASK 0x400 +#define MME0_QM_GLBL_MSG_EN_CP_FENCE3_OVF_ERR_SHIFT 11 +#define MME0_QM_GLBL_MSG_EN_CP_FENCE3_OVF_ERR_MASK 0x800 +#define MME0_QM_GLBL_MSG_EN_CP_FENCE0_UDF_ERR_SHIFT 12 +#define MME0_QM_GLBL_MSG_EN_CP_FENCE0_UDF_ERR_MASK 0x1000 +#define MME0_QM_GLBL_MSG_EN_CP_FENCE1_UDF_ERR_SHIFT 13 +#define MME0_QM_GLBL_MSG_EN_CP_FENCE1_UDF_ERR_MASK 0x2000 +#define MME0_QM_GLBL_MSG_EN_CP_FENCE2_UDF_ERR_SHIFT 14 +#define MME0_QM_GLBL_MSG_EN_CP_FENCE2_UDF_ERR_MASK 0x4000 +#define MME0_QM_GLBL_MSG_EN_CP_FENCE3_UDF_ERR_SHIFT 15 +#define MME0_QM_GLBL_MSG_EN_CP_FENCE3_UDF_ERR_MASK 0x8000 + +/* MME0_QM_GLBL_MSG_EN_4 */ +#define MME0_QM_GLBL_MSG_EN_4_CQF_RD_ERR_SHIFT 1 +#define MME0_QM_GLBL_MSG_EN_4_CQF_RD_ERR_MASK 0x2 +#define MME0_QM_GLBL_MSG_EN_4_CP_RD_ERR_SHIFT 2 +#define MME0_QM_GLBL_MSG_EN_4_CP_RD_ERR_MASK 0x4 +#define MME0_QM_GLBL_MSG_EN_4_CP_UNDEF_CMD_ERR_SHIFT 3 +#define MME0_QM_GLBL_MSG_EN_4_CP_UNDEF_CMD_ERR_MASK 0x8 +#define MME0_QM_GLBL_MSG_EN_4_CP_STOP_OP_SHIFT 4 +#define MME0_QM_GLBL_MSG_EN_4_CP_STOP_OP_MASK 0x10 +#define MME0_QM_GLBL_MSG_EN_4_CP_MSG_WR_ERR_SHIFT 5 +#define MME0_QM_GLBL_MSG_EN_4_CP_MSG_WR_ERR_MASK 0x20 +#define MME0_QM_GLBL_MSG_EN_4_CP_WREG_ERR_SHIFT 6 +#define MME0_QM_GLBL_MSG_EN_4_CP_WREG_ERR_MASK 0x40 +#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE0_OVF_ERR_SHIFT 8 +#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE0_OVF_ERR_MASK 0x100 +#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE1_OVF_ERR_SHIFT 9 +#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE1_OVF_ERR_MASK 0x200 +#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE2_OVF_ERR_SHIFT 10 +#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE2_OVF_ERR_MASK 0x400 +#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE3_OVF_ERR_SHIFT 11 +#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE3_OVF_ERR_MASK 0x800 +#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE0_UDF_ERR_SHIFT 12 +#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE0_UDF_ERR_MASK 0x1000 +#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE1_UDF_ERR_SHIFT 13 +#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE1_UDF_ERR_MASK 0x2000 +#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE2_UDF_ERR_SHIFT 14 +#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE2_UDF_ERR_MASK 0x4000 +#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE3_UDF_ERR_SHIFT 15 +#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE3_UDF_ERR_MASK 0x8000 + +/* MME0_QM_PQ_BASE_LO */ +#define MME0_QM_PQ_BASE_LO_VAL_SHIFT 0 +#define MME0_QM_PQ_BASE_LO_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_PQ_BASE_HI */ +#define MME0_QM_PQ_BASE_HI_VAL_SHIFT 0 +#define MME0_QM_PQ_BASE_HI_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_PQ_SIZE */ +#define MME0_QM_PQ_SIZE_VAL_SHIFT 0 +#define MME0_QM_PQ_SIZE_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_PQ_PI */ +#define MME0_QM_PQ_PI_VAL_SHIFT 0 +#define MME0_QM_PQ_PI_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_PQ_CI */ +#define MME0_QM_PQ_CI_VAL_SHIFT 0 +#define MME0_QM_PQ_CI_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_PQ_CFG0 */ +#define MME0_QM_PQ_CFG0_RESERVED_SHIFT 0 +#define MME0_QM_PQ_CFG0_RESERVED_MASK 0x1 + +/* MME0_QM_PQ_CFG1 */ +#define MME0_QM_PQ_CFG1_CREDIT_LIM_SHIFT 0 +#define MME0_QM_PQ_CFG1_CREDIT_LIM_MASK 0xFFFF +#define MME0_QM_PQ_CFG1_MAX_INFLIGHT_SHIFT 16 +#define MME0_QM_PQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000 + +/* MME0_QM_PQ_ARUSER_31_11 */ +#define MME0_QM_PQ_ARUSER_31_11_VAL_SHIFT 0 +#define MME0_QM_PQ_ARUSER_31_11_VAL_MASK 0x1FFFFF + +/* MME0_QM_PQ_STS0 */ +#define MME0_QM_PQ_STS0_PQ_CREDIT_CNT_SHIFT 0 +#define MME0_QM_PQ_STS0_PQ_CREDIT_CNT_MASK 0xFFFF +#define MME0_QM_PQ_STS0_PQ_FREE_CNT_SHIFT 16 +#define MME0_QM_PQ_STS0_PQ_FREE_CNT_MASK 0xFFFF0000 + +/* MME0_QM_PQ_STS1 */ +#define MME0_QM_PQ_STS1_PQ_INFLIGHT_CNT_SHIFT 0 +#define MME0_QM_PQ_STS1_PQ_INFLIGHT_CNT_MASK 0xFFFF +#define MME0_QM_PQ_STS1_PQ_BUF_EMPTY_SHIFT 30 +#define MME0_QM_PQ_STS1_PQ_BUF_EMPTY_MASK 0x40000000 +#define MME0_QM_PQ_STS1_PQ_BUSY_SHIFT 31 +#define MME0_QM_PQ_STS1_PQ_BUSY_MASK 0x80000000 + +/* MME0_QM_CQ_CFG0 */ +#define MME0_QM_CQ_CFG0_RESERVED_SHIFT 0 +#define MME0_QM_CQ_CFG0_RESERVED_MASK 0x1 + +/* MME0_QM_CQ_CFG1 */ +#define MME0_QM_CQ_CFG1_CREDIT_LIM_SHIFT 0 +#define MME0_QM_CQ_CFG1_CREDIT_LIM_MASK 0xFFFF +#define MME0_QM_CQ_CFG1_MAX_INFLIGHT_SHIFT 16 +#define MME0_QM_CQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000 + +/* MME0_QM_CQ_ARUSER_31_11 */ +#define MME0_QM_CQ_ARUSER_31_11_VAL_SHIFT 0 +#define MME0_QM_CQ_ARUSER_31_11_VAL_MASK 0x1FFFFF + +/* MME0_QM_CQ_STS0 */ +#define MME0_QM_CQ_STS0_CQ_CREDIT_CNT_SHIFT 0 +#define MME0_QM_CQ_STS0_CQ_CREDIT_CNT_MASK 0xFFFF +#define MME0_QM_CQ_STS0_CQ_FREE_CNT_SHIFT 16 +#define MME0_QM_CQ_STS0_CQ_FREE_CNT_MASK 0xFFFF0000 + +/* MME0_QM_CQ_STS1 */ +#define MME0_QM_CQ_STS1_CQ_INFLIGHT_CNT_SHIFT 0 +#define MME0_QM_CQ_STS1_CQ_INFLIGHT_CNT_MASK 0xFFFF +#define MME0_QM_CQ_STS1_CQ_BUF_EMPTY_SHIFT 30 +#define MME0_QM_CQ_STS1_CQ_BUF_EMPTY_MASK 0x40000000 +#define MME0_QM_CQ_STS1_CQ_BUSY_SHIFT 31 +#define MME0_QM_CQ_STS1_CQ_BUSY_MASK 0x80000000 + +/* MME0_QM_CQ_PTR_LO_0 */ +#define MME0_QM_CQ_PTR_LO_0_VAL_SHIFT 0 +#define MME0_QM_CQ_PTR_LO_0_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_CQ_PTR_HI_0 */ +#define MME0_QM_CQ_PTR_HI_0_VAL_SHIFT 0 +#define MME0_QM_CQ_PTR_HI_0_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_CQ_TSIZE_0 */ +#define MME0_QM_CQ_TSIZE_0_VAL_SHIFT 0 +#define MME0_QM_CQ_TSIZE_0_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_CQ_CTL_0 */ +#define MME0_QM_CQ_CTL_0_RPT_SHIFT 0 +#define MME0_QM_CQ_CTL_0_RPT_MASK 0xFFFF +#define MME0_QM_CQ_CTL_0_CTL_SHIFT 16 +#define MME0_QM_CQ_CTL_0_CTL_MASK 0xFFFF0000 + +/* MME0_QM_CQ_PTR_LO_1 */ +#define MME0_QM_CQ_PTR_LO_1_VAL_SHIFT 0 +#define MME0_QM_CQ_PTR_LO_1_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_CQ_PTR_HI_1 */ +#define MME0_QM_CQ_PTR_HI_1_VAL_SHIFT 0 +#define MME0_QM_CQ_PTR_HI_1_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_CQ_TSIZE_1 */ +#define MME0_QM_CQ_TSIZE_1_VAL_SHIFT 0 +#define MME0_QM_CQ_TSIZE_1_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_CQ_CTL_1 */ +#define MME0_QM_CQ_CTL_1_RPT_SHIFT 0 +#define MME0_QM_CQ_CTL_1_RPT_MASK 0xFFFF +#define MME0_QM_CQ_CTL_1_CTL_SHIFT 16 +#define MME0_QM_CQ_CTL_1_CTL_MASK 0xFFFF0000 + +/* MME0_QM_CQ_PTR_LO_2 */ +#define MME0_QM_CQ_PTR_LO_2_VAL_SHIFT 0 +#define MME0_QM_CQ_PTR_LO_2_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_CQ_PTR_HI_2 */ +#define MME0_QM_CQ_PTR_HI_2_VAL_SHIFT 0 +#define MME0_QM_CQ_PTR_HI_2_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_CQ_TSIZE_2 */ +#define MME0_QM_CQ_TSIZE_2_VAL_SHIFT 0 +#define MME0_QM_CQ_TSIZE_2_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_CQ_CTL_2 */ +#define MME0_QM_CQ_CTL_2_RPT_SHIFT 0 +#define MME0_QM_CQ_CTL_2_RPT_MASK 0xFFFF +#define MME0_QM_CQ_CTL_2_CTL_SHIFT 16 +#define MME0_QM_CQ_CTL_2_CTL_MASK 0xFFFF0000 + +/* MME0_QM_CQ_PTR_LO_3 */ +#define MME0_QM_CQ_PTR_LO_3_VAL_SHIFT 0 +#define MME0_QM_CQ_PTR_LO_3_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_CQ_PTR_HI_3 */ +#define MME0_QM_CQ_PTR_HI_3_VAL_SHIFT 0 +#define MME0_QM_CQ_PTR_HI_3_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_CQ_TSIZE_3 */ +#define MME0_QM_CQ_TSIZE_3_VAL_SHIFT 0 +#define MME0_QM_CQ_TSIZE_3_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_CQ_CTL_3 */ +#define MME0_QM_CQ_CTL_3_RPT_SHIFT 0 +#define MME0_QM_CQ_CTL_3_RPT_MASK 0xFFFF +#define MME0_QM_CQ_CTL_3_CTL_SHIFT 16 +#define MME0_QM_CQ_CTL_3_CTL_MASK 0xFFFF0000 + +/* MME0_QM_CQ_PTR_LO_4 */ +#define MME0_QM_CQ_PTR_LO_4_VAL_SHIFT 0 +#define MME0_QM_CQ_PTR_LO_4_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_CQ_PTR_HI_4 */ +#define MME0_QM_CQ_PTR_HI_4_VAL_SHIFT 0 +#define MME0_QM_CQ_PTR_HI_4_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_CQ_TSIZE_4 */ +#define MME0_QM_CQ_TSIZE_4_VAL_SHIFT 0 +#define MME0_QM_CQ_TSIZE_4_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_CQ_CTL_4 */ +#define MME0_QM_CQ_CTL_4_RPT_SHIFT 0 +#define MME0_QM_CQ_CTL_4_RPT_MASK 0xFFFF +#define MME0_QM_CQ_CTL_4_CTL_SHIFT 16 +#define MME0_QM_CQ_CTL_4_CTL_MASK 0xFFFF0000 + +/* MME0_QM_CQ_PTR_LO_STS */ +#define MME0_QM_CQ_PTR_LO_STS_VAL_SHIFT 0 +#define MME0_QM_CQ_PTR_LO_STS_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_CQ_PTR_HI_STS */ +#define MME0_QM_CQ_PTR_HI_STS_VAL_SHIFT 0 +#define MME0_QM_CQ_PTR_HI_STS_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_CQ_TSIZE_STS */ +#define MME0_QM_CQ_TSIZE_STS_VAL_SHIFT 0 +#define MME0_QM_CQ_TSIZE_STS_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_CQ_CTL_STS */ +#define MME0_QM_CQ_CTL_STS_RPT_SHIFT 0 +#define MME0_QM_CQ_CTL_STS_RPT_MASK 0xFFFF +#define MME0_QM_CQ_CTL_STS_CTL_SHIFT 16 +#define MME0_QM_CQ_CTL_STS_CTL_MASK 0xFFFF0000 + +/* MME0_QM_CQ_IFIFO_CNT */ +#define MME0_QM_CQ_IFIFO_CNT_VAL_SHIFT 0 +#define MME0_QM_CQ_IFIFO_CNT_VAL_MASK 0x3 + +/* MME0_QM_CP_MSG_BASE0_ADDR_LO */ +#define MME0_QM_CP_MSG_BASE0_ADDR_LO_VAL_SHIFT 0 +#define MME0_QM_CP_MSG_BASE0_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_CP_MSG_BASE0_ADDR_HI */ +#define MME0_QM_CP_MSG_BASE0_ADDR_HI_VAL_SHIFT 0 +#define MME0_QM_CP_MSG_BASE0_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_CP_MSG_BASE1_ADDR_LO */ +#define MME0_QM_CP_MSG_BASE1_ADDR_LO_VAL_SHIFT 0 +#define MME0_QM_CP_MSG_BASE1_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_CP_MSG_BASE1_ADDR_HI */ +#define MME0_QM_CP_MSG_BASE1_ADDR_HI_VAL_SHIFT 0 +#define MME0_QM_CP_MSG_BASE1_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_CP_MSG_BASE2_ADDR_LO */ +#define MME0_QM_CP_MSG_BASE2_ADDR_LO_VAL_SHIFT 0 +#define MME0_QM_CP_MSG_BASE2_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_CP_MSG_BASE2_ADDR_HI */ +#define MME0_QM_CP_MSG_BASE2_ADDR_HI_VAL_SHIFT 0 +#define MME0_QM_CP_MSG_BASE2_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_CP_MSG_BASE3_ADDR_LO */ +#define MME0_QM_CP_MSG_BASE3_ADDR_LO_VAL_SHIFT 0 +#define MME0_QM_CP_MSG_BASE3_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_CP_MSG_BASE3_ADDR_HI */ +#define MME0_QM_CP_MSG_BASE3_ADDR_HI_VAL_SHIFT 0 +#define MME0_QM_CP_MSG_BASE3_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_CP_LDMA_TSIZE_OFFSET */ +#define MME0_QM_CP_LDMA_TSIZE_OFFSET_VAL_SHIFT 0 +#define MME0_QM_CP_LDMA_TSIZE_OFFSET_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET */ +#define MME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_SHIFT 0 +#define MME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_CP_LDMA_DST_BASE_LO_OFFSET */ +#define MME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_VAL_SHIFT 0 +#define MME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_CP_FENCE0_RDATA */ +#define MME0_QM_CP_FENCE0_RDATA_INC_VAL_SHIFT 0 +#define MME0_QM_CP_FENCE0_RDATA_INC_VAL_MASK 0xF + +/* MME0_QM_CP_FENCE1_RDATA */ +#define MME0_QM_CP_FENCE1_RDATA_INC_VAL_SHIFT 0 +#define MME0_QM_CP_FENCE1_RDATA_INC_VAL_MASK 0xF + +/* MME0_QM_CP_FENCE2_RDATA */ +#define MME0_QM_CP_FENCE2_RDATA_INC_VAL_SHIFT 0 +#define MME0_QM_CP_FENCE2_RDATA_INC_VAL_MASK 0xF + +/* MME0_QM_CP_FENCE3_RDATA */ +#define MME0_QM_CP_FENCE3_RDATA_INC_VAL_SHIFT 0 +#define MME0_QM_CP_FENCE3_RDATA_INC_VAL_MASK 0xF + +/* MME0_QM_CP_FENCE0_CNT */ +#define MME0_QM_CP_FENCE0_CNT_VAL_SHIFT 0 +#define MME0_QM_CP_FENCE0_CNT_VAL_MASK 0x3FFF + +/* MME0_QM_CP_FENCE1_CNT */ +#define MME0_QM_CP_FENCE1_CNT_VAL_SHIFT 0 +#define MME0_QM_CP_FENCE1_CNT_VAL_MASK 0x3FFF + +/* MME0_QM_CP_FENCE2_CNT */ +#define MME0_QM_CP_FENCE2_CNT_VAL_SHIFT 0 +#define MME0_QM_CP_FENCE2_CNT_VAL_MASK 0x3FFF + +/* MME0_QM_CP_FENCE3_CNT */ +#define MME0_QM_CP_FENCE3_CNT_VAL_SHIFT 0 +#define MME0_QM_CP_FENCE3_CNT_VAL_MASK 0x3FFF + +/* MME0_QM_CP_STS */ +#define MME0_QM_CP_STS_MSG_INFLIGHT_CNT_SHIFT 0 +#define MME0_QM_CP_STS_MSG_INFLIGHT_CNT_MASK 0xFFFF +#define MME0_QM_CP_STS_ERDY_SHIFT 16 +#define MME0_QM_CP_STS_ERDY_MASK 0x10000 +#define MME0_QM_CP_STS_RRDY_SHIFT 17 +#define MME0_QM_CP_STS_RRDY_MASK 0x20000 +#define MME0_QM_CP_STS_MRDY_SHIFT 18 +#define MME0_QM_CP_STS_MRDY_MASK 0x40000 +#define MME0_QM_CP_STS_SW_STOP_SHIFT 19 +#define MME0_QM_CP_STS_SW_STOP_MASK 0x80000 +#define MME0_QM_CP_STS_FENCE_ID_SHIFT 20 +#define MME0_QM_CP_STS_FENCE_ID_MASK 0x300000 +#define MME0_QM_CP_STS_FENCE_IN_PROGRESS_SHIFT 22 +#define MME0_QM_CP_STS_FENCE_IN_PROGRESS_MASK 0x400000 + +/* MME0_QM_CP_CURRENT_INST_LO */ +#define MME0_QM_CP_CURRENT_INST_LO_VAL_SHIFT 0 +#define MME0_QM_CP_CURRENT_INST_LO_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_CP_CURRENT_INST_HI */ +#define MME0_QM_CP_CURRENT_INST_HI_VAL_SHIFT 0 +#define MME0_QM_CP_CURRENT_INST_HI_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_CP_BARRIER_CFG */ +#define MME0_QM_CP_BARRIER_CFG_EBGUARD_SHIFT 0 +#define MME0_QM_CP_BARRIER_CFG_EBGUARD_MASK 0xFFF +#define MME0_QM_CP_BARRIER_CFG_RBGUARD_SHIFT 16 +#define MME0_QM_CP_BARRIER_CFG_RBGUARD_MASK 0xF0000 + +/* MME0_QM_CP_DBG_0 */ +#define MME0_QM_CP_DBG_0_CS_SHIFT 0 +#define MME0_QM_CP_DBG_0_CS_MASK 0xF +#define MME0_QM_CP_DBG_0_EB_CNT_NOT_ZERO_SHIFT 4 +#define MME0_QM_CP_DBG_0_EB_CNT_NOT_ZERO_MASK 0x10 +#define MME0_QM_CP_DBG_0_BULK_CNT_NOT_ZERO_SHIFT 5 +#define MME0_QM_CP_DBG_0_BULK_CNT_NOT_ZERO_MASK 0x20 +#define MME0_QM_CP_DBG_0_MREB_STALL_SHIFT 6 +#define MME0_QM_CP_DBG_0_MREB_STALL_MASK 0x40 +#define MME0_QM_CP_DBG_0_STALL_SHIFT 7 +#define MME0_QM_CP_DBG_0_STALL_MASK 0x80 + +/* MME0_QM_CP_ARUSER_31_11 */ +#define MME0_QM_CP_ARUSER_31_11_VAL_SHIFT 0 +#define MME0_QM_CP_ARUSER_31_11_VAL_MASK 0x1FFFFF + +/* MME0_QM_CP_AWUSER_31_11 */ +#define MME0_QM_CP_AWUSER_31_11_VAL_SHIFT 0 +#define MME0_QM_CP_AWUSER_31_11_VAL_MASK 0x1FFFFF + +/* MME0_QM_ARB_CFG_0 */ +#define MME0_QM_ARB_CFG_0_TYPE_SHIFT 0 +#define MME0_QM_ARB_CFG_0_TYPE_MASK 0x1 +#define MME0_QM_ARB_CFG_0_IS_MASTER_SHIFT 4 +#define MME0_QM_ARB_CFG_0_IS_MASTER_MASK 0x10 +#define MME0_QM_ARB_CFG_0_EN_SHIFT 8 +#define MME0_QM_ARB_CFG_0_EN_MASK 0x100 +#define MME0_QM_ARB_CFG_0_MASK_SHIFT 12 +#define MME0_QM_ARB_CFG_0_MASK_MASK 0xF000 +#define MME0_QM_ARB_CFG_0_MST_MSG_NOSTALL_SHIFT 16 +#define MME0_QM_ARB_CFG_0_MST_MSG_NOSTALL_MASK 0x10000 + +/* MME0_QM_ARB_CHOISE_Q_PUSH */ +#define MME0_QM_ARB_CHOISE_Q_PUSH_VAL_SHIFT 0 +#define MME0_QM_ARB_CHOISE_Q_PUSH_VAL_MASK 0x3 + +/* MME0_QM_ARB_WRR_WEIGHT */ +#define MME0_QM_ARB_WRR_WEIGHT_VAL_SHIFT 0 +#define MME0_QM_ARB_WRR_WEIGHT_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_ARB_CFG_1 */ +#define MME0_QM_ARB_CFG_1_CLR_SHIFT 0 +#define MME0_QM_ARB_CFG_1_CLR_MASK 0x1 + +/* MME0_QM_ARB_MST_AVAIL_CRED */ +#define MME0_QM_ARB_MST_AVAIL_CRED_VAL_SHIFT 0 +#define MME0_QM_ARB_MST_AVAIL_CRED_VAL_MASK 0x7F + +/* MME0_QM_ARB_MST_CRED_INC */ +#define MME0_QM_ARB_MST_CRED_INC_VAL_SHIFT 0 +#define MME0_QM_ARB_MST_CRED_INC_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_ARB_MST_CHOISE_PUSH_OFST */ +#define MME0_QM_ARB_MST_CHOISE_PUSH_OFST_VAL_SHIFT 0 +#define MME0_QM_ARB_MST_CHOISE_PUSH_OFST_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_ARB_SLV_MASTER_INC_CRED_OFST */ +#define MME0_QM_ARB_SLV_MASTER_INC_CRED_OFST_VAL_SHIFT 0 +#define MME0_QM_ARB_SLV_MASTER_INC_CRED_OFST_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_ARB_MST_SLAVE_EN */ +#define MME0_QM_ARB_MST_SLAVE_EN_VAL_SHIFT 0 +#define MME0_QM_ARB_MST_SLAVE_EN_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_ARB_MST_QUIET_PER */ +#define MME0_QM_ARB_MST_QUIET_PER_VAL_SHIFT 0 +#define MME0_QM_ARB_MST_QUIET_PER_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_ARB_SLV_CHOISE_WDT */ +#define MME0_QM_ARB_SLV_CHOISE_WDT_VAL_SHIFT 0 +#define MME0_QM_ARB_SLV_CHOISE_WDT_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_ARB_SLV_ID */ +#define MME0_QM_ARB_SLV_ID_VAL_SHIFT 0 +#define MME0_QM_ARB_SLV_ID_VAL_MASK 0x1F + +/* MME0_QM_ARB_MSG_MAX_INFLIGHT */ +#define MME0_QM_ARB_MSG_MAX_INFLIGHT_VAL_SHIFT 0 +#define MME0_QM_ARB_MSG_MAX_INFLIGHT_VAL_MASK 0x3F + +/* MME0_QM_ARB_MSG_AWUSER_31_11 */ +#define MME0_QM_ARB_MSG_AWUSER_31_11_VAL_SHIFT 0 +#define MME0_QM_ARB_MSG_AWUSER_31_11_VAL_MASK 0x1FFFFF + +/* MME0_QM_ARB_MSG_AWUSER_SEC_PROP */ +#define MME0_QM_ARB_MSG_AWUSER_SEC_PROP_ASID_SHIFT 0 +#define MME0_QM_ARB_MSG_AWUSER_SEC_PROP_ASID_MASK 0x3FF +#define MME0_QM_ARB_MSG_AWUSER_SEC_PROP_MMBP_SHIFT 10 +#define MME0_QM_ARB_MSG_AWUSER_SEC_PROP_MMBP_MASK 0x400 + +/* MME0_QM_ARB_MSG_AWUSER_NON_SEC_PROP */ +#define MME0_QM_ARB_MSG_AWUSER_NON_SEC_PROP_ASID_SHIFT 0 +#define MME0_QM_ARB_MSG_AWUSER_NON_SEC_PROP_ASID_MASK 0x3FF +#define MME0_QM_ARB_MSG_AWUSER_NON_SEC_PROP_MMBP_SHIFT 10 +#define MME0_QM_ARB_MSG_AWUSER_NON_SEC_PROP_MMBP_MASK 0x400 + +/* MME0_QM_ARB_BASE_LO */ +#define MME0_QM_ARB_BASE_LO_VAL_SHIFT 0 +#define MME0_QM_ARB_BASE_LO_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_ARB_BASE_HI */ +#define MME0_QM_ARB_BASE_HI_VAL_SHIFT 0 +#define MME0_QM_ARB_BASE_HI_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_ARB_STATE_STS */ +#define MME0_QM_ARB_STATE_STS_VAL_SHIFT 0 +#define MME0_QM_ARB_STATE_STS_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_ARB_CHOISE_FULLNESS_STS */ +#define MME0_QM_ARB_CHOISE_FULLNESS_STS_VAL_SHIFT 0 +#define MME0_QM_ARB_CHOISE_FULLNESS_STS_VAL_MASK 0x7F + +/* MME0_QM_ARB_MSG_STS */ +#define MME0_QM_ARB_MSG_STS_FULL_SHIFT 0 +#define MME0_QM_ARB_MSG_STS_FULL_MASK 0x1 +#define MME0_QM_ARB_MSG_STS_NO_INFLIGHT_SHIFT 1 +#define MME0_QM_ARB_MSG_STS_NO_INFLIGHT_MASK 0x2 + +/* MME0_QM_ARB_SLV_CHOISE_Q_HEAD */ +#define MME0_QM_ARB_SLV_CHOISE_Q_HEAD_VAL_SHIFT 0 +#define MME0_QM_ARB_SLV_CHOISE_Q_HEAD_VAL_MASK 0x3 + +/* MME0_QM_ARB_ERR_CAUSE */ +#define MME0_QM_ARB_ERR_CAUSE_CHOISE_OVF_SHIFT 0 +#define MME0_QM_ARB_ERR_CAUSE_CHOISE_OVF_MASK 0x1 +#define MME0_QM_ARB_ERR_CAUSE_CHOISE_WDT_SHIFT 1 +#define MME0_QM_ARB_ERR_CAUSE_CHOISE_WDT_MASK 0x2 +#define MME0_QM_ARB_ERR_CAUSE_AXI_LBW_ERR_SHIFT 2 +#define MME0_QM_ARB_ERR_CAUSE_AXI_LBW_ERR_MASK 0x4 + +/* MME0_QM_ARB_ERR_MSG_EN */ +#define MME0_QM_ARB_ERR_MSG_EN_CHOISE_OVF_SHIFT 0 +#define MME0_QM_ARB_ERR_MSG_EN_CHOISE_OVF_MASK 0x1 +#define MME0_QM_ARB_ERR_MSG_EN_CHOISE_WDT_SHIFT 1 +#define MME0_QM_ARB_ERR_MSG_EN_CHOISE_WDT_MASK 0x2 +#define MME0_QM_ARB_ERR_MSG_EN_AXI_LBW_ERR_SHIFT 2 +#define MME0_QM_ARB_ERR_MSG_EN_AXI_LBW_ERR_MASK 0x4 + +/* MME0_QM_ARB_ERR_STS_DRP */ +#define MME0_QM_ARB_ERR_STS_DRP_VAL_SHIFT 0 +#define MME0_QM_ARB_ERR_STS_DRP_VAL_MASK 0x3 + +/* MME0_QM_ARB_MST_CRED_STS */ +#define MME0_QM_ARB_MST_CRED_STS_VAL_SHIFT 0 +#define MME0_QM_ARB_MST_CRED_STS_VAL_MASK 0x7F + +/* MME0_QM_CGM_CFG */ +#define MME0_QM_CGM_CFG_IDLE_TH_SHIFT 0 +#define MME0_QM_CGM_CFG_IDLE_TH_MASK 0xFFF +#define MME0_QM_CGM_CFG_G2F_TH_SHIFT 16 +#define MME0_QM_CGM_CFG_G2F_TH_MASK 0xFF0000 +#define MME0_QM_CGM_CFG_CP_IDLE_MASK_SHIFT 24 +#define MME0_QM_CGM_CFG_CP_IDLE_MASK_MASK 0x1F000000 +#define MME0_QM_CGM_CFG_EN_SHIFT 31 +#define MME0_QM_CGM_CFG_EN_MASK 0x80000000 + +/* MME0_QM_CGM_STS */ +#define MME0_QM_CGM_STS_ST_SHIFT 0 +#define MME0_QM_CGM_STS_ST_MASK 0x3 +#define MME0_QM_CGM_STS_CG_SHIFT 4 +#define MME0_QM_CGM_STS_CG_MASK 0x10 +#define MME0_QM_CGM_STS_AGENT_IDLE_SHIFT 8 +#define MME0_QM_CGM_STS_AGENT_IDLE_MASK 0x100 +#define MME0_QM_CGM_STS_AXI_IDLE_SHIFT 9 +#define MME0_QM_CGM_STS_AXI_IDLE_MASK 0x200 +#define MME0_QM_CGM_STS_CP_IDLE_SHIFT 10 +#define MME0_QM_CGM_STS_CP_IDLE_MASK 0x400 + +/* MME0_QM_CGM_CFG1 */ +#define MME0_QM_CGM_CFG1_MASK_TH_SHIFT 0 +#define MME0_QM_CGM_CFG1_MASK_TH_MASK 0xFF + +/* MME0_QM_LOCAL_RANGE_BASE */ +#define MME0_QM_LOCAL_RANGE_BASE_VAL_SHIFT 0 +#define MME0_QM_LOCAL_RANGE_BASE_VAL_MASK 0xFFFF + +/* MME0_QM_LOCAL_RANGE_SIZE */ +#define MME0_QM_LOCAL_RANGE_SIZE_VAL_SHIFT 0 +#define MME0_QM_LOCAL_RANGE_SIZE_VAL_MASK 0xFFFF + +/* MME0_QM_CSMR_STRICT_PRIO_CFG */ +#define MME0_QM_CSMR_STRICT_PRIO_CFG_TYPE_SHIFT 0 +#define MME0_QM_CSMR_STRICT_PRIO_CFG_TYPE_MASK 0x1 + +/* MME0_QM_HBW_RD_RATE_LIM_CFG_1 */ +#define MME0_QM_HBW_RD_RATE_LIM_CFG_1_TOUT_SHIFT 0 +#define MME0_QM_HBW_RD_RATE_LIM_CFG_1_TOUT_MASK 0xFF +#define MME0_QM_HBW_RD_RATE_LIM_CFG_1_EN_SHIFT 31 +#define MME0_QM_HBW_RD_RATE_LIM_CFG_1_EN_MASK 0x80000000 + +/* MME0_QM_LBW_WR_RATE_LIM_CFG_0 */ +#define MME0_QM_LBW_WR_RATE_LIM_CFG_0_RST_TOKEN_SHIFT 0 +#define MME0_QM_LBW_WR_RATE_LIM_CFG_0_RST_TOKEN_MASK 0xFF +#define MME0_QM_LBW_WR_RATE_LIM_CFG_0_SAT_SHIFT 16 +#define MME0_QM_LBW_WR_RATE_LIM_CFG_0_SAT_MASK 0xFF0000 + +/* MME0_QM_LBW_WR_RATE_LIM_CFG_1 */ +#define MME0_QM_LBW_WR_RATE_LIM_CFG_1_TOUT_SHIFT 0 +#define MME0_QM_LBW_WR_RATE_LIM_CFG_1_TOUT_MASK 0xFF +#define MME0_QM_LBW_WR_RATE_LIM_CFG_1_EN_SHIFT 31 +#define MME0_QM_LBW_WR_RATE_LIM_CFG_1_EN_MASK 0x80000000 + +/* MME0_QM_HBW_RD_RATE_LIM_CFG_0 */ +#define MME0_QM_HBW_RD_RATE_LIM_CFG_0_RST_TOKEN_SHIFT 0 +#define MME0_QM_HBW_RD_RATE_LIM_CFG_0_RST_TOKEN_MASK 0xFF +#define MME0_QM_HBW_RD_RATE_LIM_CFG_0_SAT_SHIFT 16 +#define MME0_QM_HBW_RD_RATE_LIM_CFG_0_SAT_MASK 0xFF0000 + +/* MME0_QM_GLBL_AXCACHE */ +#define MME0_QM_GLBL_AXCACHE_AR_SHIFT 0 +#define MME0_QM_GLBL_AXCACHE_AR_MASK 0xF +#define MME0_QM_GLBL_AXCACHE_AW_SHIFT 16 +#define MME0_QM_GLBL_AXCACHE_AW_MASK 0xF0000 + +/* MME0_QM_IND_GW_APB_CFG */ +#define MME0_QM_IND_GW_APB_CFG_ADDR_SHIFT 0 +#define MME0_QM_IND_GW_APB_CFG_ADDR_MASK 0x7FFFFFFF +#define MME0_QM_IND_GW_APB_CFG_CMD_SHIFT 31 +#define MME0_QM_IND_GW_APB_CFG_CMD_MASK 0x80000000 + +/* MME0_QM_IND_GW_APB_WDATA */ +#define MME0_QM_IND_GW_APB_WDATA_VAL_SHIFT 0 +#define MME0_QM_IND_GW_APB_WDATA_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_IND_GW_APB_RDATA */ +#define MME0_QM_IND_GW_APB_RDATA_VAL_SHIFT 0 +#define MME0_QM_IND_GW_APB_RDATA_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_IND_GW_APB_STATUS */ +#define MME0_QM_IND_GW_APB_STATUS_RDY_SHIFT 0 +#define MME0_QM_IND_GW_APB_STATUS_RDY_MASK 0x1 +#define MME0_QM_IND_GW_APB_STATUS_ERR_SHIFT 1 +#define MME0_QM_IND_GW_APB_STATUS_ERR_MASK 0x2 + +/* MME0_QM_GLBL_ERR_ADDR_LO */ +#define MME0_QM_GLBL_ERR_ADDR_LO_VAL_SHIFT 0 +#define MME0_QM_GLBL_ERR_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_GLBL_ERR_ADDR_HI */ +#define MME0_QM_GLBL_ERR_ADDR_HI_VAL_SHIFT 0 +#define MME0_QM_GLBL_ERR_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_GLBL_ERR_WDATA */ +#define MME0_QM_GLBL_ERR_WDATA_VAL_SHIFT 0 +#define MME0_QM_GLBL_ERR_WDATA_VAL_MASK 0xFFFFFFFF + +/* MME0_QM_GLBL_MEM_INIT_BUSY */ +#define MME0_QM_GLBL_MEM_INIT_BUSY_RBUF_SHIFT 0 +#define MME0_QM_GLBL_MEM_INIT_BUSY_RBUF_MASK 0xF + +#endif /* ASIC_REG_MME0_QM_MASKS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_qm_regs.h new file mode 100644 index 000000000..4f078b328 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_qm_regs.h @@ -0,0 +1,834 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_MME0_QM_REGS_H_ +#define ASIC_REG_MME0_QM_REGS_H_ + +/* + ***************************************** + * MME0_QM (Prototype: QMAN) + ***************************************** + */ + +#define mmMME0_QM_GLBL_CFG0 0x68000 + +#define mmMME0_QM_GLBL_CFG1 0x68004 + +#define mmMME0_QM_GLBL_PROT 0x68008 + +#define mmMME0_QM_GLBL_ERR_CFG 0x6800C + +#define mmMME0_QM_GLBL_SECURE_PROPS_0 0x68010 + +#define mmMME0_QM_GLBL_SECURE_PROPS_1 0x68014 + +#define mmMME0_QM_GLBL_SECURE_PROPS_2 0x68018 + +#define mmMME0_QM_GLBL_SECURE_PROPS_3 0x6801C + +#define mmMME0_QM_GLBL_SECURE_PROPS_4 0x68020 + +#define mmMME0_QM_GLBL_NON_SECURE_PROPS_0 0x68024 + +#define mmMME0_QM_GLBL_NON_SECURE_PROPS_1 0x68028 + +#define mmMME0_QM_GLBL_NON_SECURE_PROPS_2 0x6802C + +#define mmMME0_QM_GLBL_NON_SECURE_PROPS_3 0x68030 + +#define mmMME0_QM_GLBL_NON_SECURE_PROPS_4 0x68034 + +#define mmMME0_QM_GLBL_STS0 0x68038 + +#define mmMME0_QM_GLBL_STS1_0 0x68040 + +#define mmMME0_QM_GLBL_STS1_1 0x68044 + +#define mmMME0_QM_GLBL_STS1_2 0x68048 + +#define mmMME0_QM_GLBL_STS1_3 0x6804C + +#define mmMME0_QM_GLBL_STS1_4 0x68050 + +#define mmMME0_QM_GLBL_MSG_EN_0 0x68054 + +#define mmMME0_QM_GLBL_MSG_EN_1 0x68058 + +#define mmMME0_QM_GLBL_MSG_EN_2 0x6805C + +#define mmMME0_QM_GLBL_MSG_EN_3 0x68060 + +#define mmMME0_QM_GLBL_MSG_EN_4 0x68068 + +#define mmMME0_QM_PQ_BASE_LO_0 0x68070 + +#define mmMME0_QM_PQ_BASE_LO_1 0x68074 + +#define mmMME0_QM_PQ_BASE_LO_2 0x68078 + +#define mmMME0_QM_PQ_BASE_LO_3 0x6807C + +#define mmMME0_QM_PQ_BASE_HI_0 0x68080 + +#define mmMME0_QM_PQ_BASE_HI_1 0x68084 + +#define mmMME0_QM_PQ_BASE_HI_2 0x68088 + +#define mmMME0_QM_PQ_BASE_HI_3 0x6808C + +#define mmMME0_QM_PQ_SIZE_0 0x68090 + +#define mmMME0_QM_PQ_SIZE_1 0x68094 + +#define mmMME0_QM_PQ_SIZE_2 0x68098 + +#define mmMME0_QM_PQ_SIZE_3 0x6809C + +#define mmMME0_QM_PQ_PI_0 0x680A0 + +#define mmMME0_QM_PQ_PI_1 0x680A4 + +#define mmMME0_QM_PQ_PI_2 0x680A8 + +#define mmMME0_QM_PQ_PI_3 0x680AC + +#define mmMME0_QM_PQ_CI_0 0x680B0 + +#define mmMME0_QM_PQ_CI_1 0x680B4 + +#define mmMME0_QM_PQ_CI_2 0x680B8 + +#define mmMME0_QM_PQ_CI_3 0x680BC + +#define mmMME0_QM_PQ_CFG0_0 0x680C0 + +#define mmMME0_QM_PQ_CFG0_1 0x680C4 + +#define mmMME0_QM_PQ_CFG0_2 0x680C8 + +#define mmMME0_QM_PQ_CFG0_3 0x680CC + +#define mmMME0_QM_PQ_CFG1_0 0x680D0 + +#define mmMME0_QM_PQ_CFG1_1 0x680D4 + +#define mmMME0_QM_PQ_CFG1_2 0x680D8 + +#define mmMME0_QM_PQ_CFG1_3 0x680DC + +#define mmMME0_QM_PQ_ARUSER_31_11_0 0x680E0 + +#define mmMME0_QM_PQ_ARUSER_31_11_1 0x680E4 + +#define mmMME0_QM_PQ_ARUSER_31_11_2 0x680E8 + +#define mmMME0_QM_PQ_ARUSER_31_11_3 0x680EC + +#define mmMME0_QM_PQ_STS0_0 0x680F0 + +#define mmMME0_QM_PQ_STS0_1 0x680F4 + +#define mmMME0_QM_PQ_STS0_2 0x680F8 + +#define mmMME0_QM_PQ_STS0_3 0x680FC + +#define mmMME0_QM_PQ_STS1_0 0x68100 + +#define mmMME0_QM_PQ_STS1_1 0x68104 + +#define mmMME0_QM_PQ_STS1_2 0x68108 + +#define mmMME0_QM_PQ_STS1_3 0x6810C + +#define mmMME0_QM_CQ_CFG0_0 0x68110 + +#define mmMME0_QM_CQ_CFG0_1 0x68114 + +#define mmMME0_QM_CQ_CFG0_2 0x68118 + +#define mmMME0_QM_CQ_CFG0_3 0x6811C + +#define mmMME0_QM_CQ_CFG0_4 0x68120 + +#define mmMME0_QM_CQ_CFG1_0 0x68124 + +#define mmMME0_QM_CQ_CFG1_1 0x68128 + +#define mmMME0_QM_CQ_CFG1_2 0x6812C + +#define mmMME0_QM_CQ_CFG1_3 0x68130 + +#define mmMME0_QM_CQ_CFG1_4 0x68134 + +#define mmMME0_QM_CQ_ARUSER_31_11_0 0x68138 + +#define mmMME0_QM_CQ_ARUSER_31_11_1 0x6813C + +#define mmMME0_QM_CQ_ARUSER_31_11_2 0x68140 + +#define mmMME0_QM_CQ_ARUSER_31_11_3 0x68144 + +#define mmMME0_QM_CQ_ARUSER_31_11_4 0x68148 + +#define mmMME0_QM_CQ_STS0_0 0x6814C + +#define mmMME0_QM_CQ_STS0_1 0x68150 + +#define mmMME0_QM_CQ_STS0_2 0x68154 + +#define mmMME0_QM_CQ_STS0_3 0x68158 + +#define mmMME0_QM_CQ_STS0_4 0x6815C + +#define mmMME0_QM_CQ_STS1_0 0x68160 + +#define mmMME0_QM_CQ_STS1_1 0x68164 + +#define mmMME0_QM_CQ_STS1_2 0x68168 + +#define mmMME0_QM_CQ_STS1_3 0x6816C + +#define mmMME0_QM_CQ_STS1_4 0x68170 + +#define mmMME0_QM_CQ_PTR_LO_0 0x68174 + +#define mmMME0_QM_CQ_PTR_HI_0 0x68178 + +#define mmMME0_QM_CQ_TSIZE_0 0x6817C + +#define mmMME0_QM_CQ_CTL_0 0x68180 + +#define mmMME0_QM_CQ_PTR_LO_1 0x68184 + +#define mmMME0_QM_CQ_PTR_HI_1 0x68188 + +#define mmMME0_QM_CQ_TSIZE_1 0x6818C + +#define mmMME0_QM_CQ_CTL_1 0x68190 + +#define mmMME0_QM_CQ_PTR_LO_2 0x68194 + +#define mmMME0_QM_CQ_PTR_HI_2 0x68198 + +#define mmMME0_QM_CQ_TSIZE_2 0x6819C + +#define mmMME0_QM_CQ_CTL_2 0x681A0 + +#define mmMME0_QM_CQ_PTR_LO_3 0x681A4 + +#define mmMME0_QM_CQ_PTR_HI_3 0x681A8 + +#define mmMME0_QM_CQ_TSIZE_3 0x681AC + +#define mmMME0_QM_CQ_CTL_3 0x681B0 + +#define mmMME0_QM_CQ_PTR_LO_4 0x681B4 + +#define mmMME0_QM_CQ_PTR_HI_4 0x681B8 + +#define mmMME0_QM_CQ_TSIZE_4 0x681BC + +#define mmMME0_QM_CQ_CTL_4 0x681C0 + +#define mmMME0_QM_CQ_PTR_LO_STS_0 0x681C4 + +#define mmMME0_QM_CQ_PTR_LO_STS_1 0x681C8 + +#define mmMME0_QM_CQ_PTR_LO_STS_2 0x681CC + +#define mmMME0_QM_CQ_PTR_LO_STS_3 0x681D0 + +#define mmMME0_QM_CQ_PTR_LO_STS_4 0x681D4 + +#define mmMME0_QM_CQ_PTR_HI_STS_0 0x681D8 + +#define mmMME0_QM_CQ_PTR_HI_STS_1 0x681DC + +#define mmMME0_QM_CQ_PTR_HI_STS_2 0x681E0 + +#define mmMME0_QM_CQ_PTR_HI_STS_3 0x681E4 + +#define mmMME0_QM_CQ_PTR_HI_STS_4 0x681E8 + +#define mmMME0_QM_CQ_TSIZE_STS_0 0x681EC + +#define mmMME0_QM_CQ_TSIZE_STS_1 0x681F0 + +#define mmMME0_QM_CQ_TSIZE_STS_2 0x681F4 + +#define mmMME0_QM_CQ_TSIZE_STS_3 0x681F8 + +#define mmMME0_QM_CQ_TSIZE_STS_4 0x681FC + +#define mmMME0_QM_CQ_CTL_STS_0 0x68200 + +#define mmMME0_QM_CQ_CTL_STS_1 0x68204 + +#define mmMME0_QM_CQ_CTL_STS_2 0x68208 + +#define mmMME0_QM_CQ_CTL_STS_3 0x6820C + +#define mmMME0_QM_CQ_CTL_STS_4 0x68210 + +#define mmMME0_QM_CQ_IFIFO_CNT_0 0x68214 + +#define mmMME0_QM_CQ_IFIFO_CNT_1 0x68218 + +#define mmMME0_QM_CQ_IFIFO_CNT_2 0x6821C + +#define mmMME0_QM_CQ_IFIFO_CNT_3 0x68220 + +#define mmMME0_QM_CQ_IFIFO_CNT_4 0x68224 + +#define mmMME0_QM_CP_MSG_BASE0_ADDR_LO_0 0x68228 + +#define mmMME0_QM_CP_MSG_BASE0_ADDR_LO_1 0x6822C + +#define mmMME0_QM_CP_MSG_BASE0_ADDR_LO_2 0x68230 + +#define mmMME0_QM_CP_MSG_BASE0_ADDR_LO_3 0x68234 + +#define mmMME0_QM_CP_MSG_BASE0_ADDR_LO_4 0x68238 + +#define mmMME0_QM_CP_MSG_BASE0_ADDR_HI_0 0x6823C + +#define mmMME0_QM_CP_MSG_BASE0_ADDR_HI_1 0x68240 + +#define mmMME0_QM_CP_MSG_BASE0_ADDR_HI_2 0x68244 + +#define mmMME0_QM_CP_MSG_BASE0_ADDR_HI_3 0x68248 + +#define mmMME0_QM_CP_MSG_BASE0_ADDR_HI_4 0x6824C + +#define mmMME0_QM_CP_MSG_BASE1_ADDR_LO_0 0x68250 + +#define mmMME0_QM_CP_MSG_BASE1_ADDR_LO_1 0x68254 + +#define mmMME0_QM_CP_MSG_BASE1_ADDR_LO_2 0x68258 + +#define mmMME0_QM_CP_MSG_BASE1_ADDR_LO_3 0x6825C + +#define mmMME0_QM_CP_MSG_BASE1_ADDR_LO_4 0x68260 + +#define mmMME0_QM_CP_MSG_BASE1_ADDR_HI_0 0x68264 + +#define mmMME0_QM_CP_MSG_BASE1_ADDR_HI_1 0x68268 + +#define mmMME0_QM_CP_MSG_BASE1_ADDR_HI_2 0x6826C + +#define mmMME0_QM_CP_MSG_BASE1_ADDR_HI_3 0x68270 + +#define mmMME0_QM_CP_MSG_BASE1_ADDR_HI_4 0x68274 + +#define mmMME0_QM_CP_MSG_BASE2_ADDR_LO_0 0x68278 + +#define mmMME0_QM_CP_MSG_BASE2_ADDR_LO_1 0x6827C + +#define mmMME0_QM_CP_MSG_BASE2_ADDR_LO_2 0x68280 + +#define mmMME0_QM_CP_MSG_BASE2_ADDR_LO_3 0x68284 + +#define mmMME0_QM_CP_MSG_BASE2_ADDR_LO_4 0x68288 + +#define mmMME0_QM_CP_MSG_BASE2_ADDR_HI_0 0x6828C + +#define mmMME0_QM_CP_MSG_BASE2_ADDR_HI_1 0x68290 + +#define mmMME0_QM_CP_MSG_BASE2_ADDR_HI_2 0x68294 + +#define mmMME0_QM_CP_MSG_BASE2_ADDR_HI_3 0x68298 + +#define mmMME0_QM_CP_MSG_BASE2_ADDR_HI_4 0x6829C + +#define mmMME0_QM_CP_MSG_BASE3_ADDR_LO_0 0x682A0 + +#define mmMME0_QM_CP_MSG_BASE3_ADDR_LO_1 0x682A4 + +#define mmMME0_QM_CP_MSG_BASE3_ADDR_LO_2 0x682A8 + +#define mmMME0_QM_CP_MSG_BASE3_ADDR_LO_3 0x682AC + +#define mmMME0_QM_CP_MSG_BASE3_ADDR_LO_4 0x682B0 + +#define mmMME0_QM_CP_MSG_BASE3_ADDR_HI_0 0x682B4 + +#define mmMME0_QM_CP_MSG_BASE3_ADDR_HI_1 0x682B8 + +#define mmMME0_QM_CP_MSG_BASE3_ADDR_HI_2 0x682BC + +#define mmMME0_QM_CP_MSG_BASE3_ADDR_HI_3 0x682C0 + +#define mmMME0_QM_CP_MSG_BASE3_ADDR_HI_4 0x682C4 + +#define mmMME0_QM_CP_LDMA_TSIZE_OFFSET_0 0x682C8 + +#define mmMME0_QM_CP_LDMA_TSIZE_OFFSET_1 0x682CC + +#define mmMME0_QM_CP_LDMA_TSIZE_OFFSET_2 0x682D0 + +#define mmMME0_QM_CP_LDMA_TSIZE_OFFSET_3 0x682D4 + +#define mmMME0_QM_CP_LDMA_TSIZE_OFFSET_4 0x682D8 + +#define mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0x682E0 + +#define mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0x682E4 + +#define mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0x682E8 + +#define mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0x682EC + +#define mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0x682F0 + +#define mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0x682F4 + +#define mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0x682F8 + +#define mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0x682FC + +#define mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0x68300 + +#define mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0x68304 + +#define mmMME0_QM_CP_FENCE0_RDATA_0 0x68308 + +#define mmMME0_QM_CP_FENCE0_RDATA_1 0x6830C + +#define mmMME0_QM_CP_FENCE0_RDATA_2 0x68310 + +#define mmMME0_QM_CP_FENCE0_RDATA_3 0x68314 + +#define mmMME0_QM_CP_FENCE0_RDATA_4 0x68318 + +#define mmMME0_QM_CP_FENCE1_RDATA_0 0x6831C + +#define mmMME0_QM_CP_FENCE1_RDATA_1 0x68320 + +#define mmMME0_QM_CP_FENCE1_RDATA_2 0x68324 + +#define mmMME0_QM_CP_FENCE1_RDATA_3 0x68328 + +#define mmMME0_QM_CP_FENCE1_RDATA_4 0x6832C + +#define mmMME0_QM_CP_FENCE2_RDATA_0 0x68330 + +#define mmMME0_QM_CP_FENCE2_RDATA_1 0x68334 + +#define mmMME0_QM_CP_FENCE2_RDATA_2 0x68338 + +#define mmMME0_QM_CP_FENCE2_RDATA_3 0x6833C + +#define mmMME0_QM_CP_FENCE2_RDATA_4 0x68340 + +#define mmMME0_QM_CP_FENCE3_RDATA_0 0x68344 + +#define mmMME0_QM_CP_FENCE3_RDATA_1 0x68348 + +#define mmMME0_QM_CP_FENCE3_RDATA_2 0x6834C + +#define mmMME0_QM_CP_FENCE3_RDATA_3 0x68350 + +#define mmMME0_QM_CP_FENCE3_RDATA_4 0x68354 + +#define mmMME0_QM_CP_FENCE0_CNT_0 0x68358 + +#define mmMME0_QM_CP_FENCE0_CNT_1 0x6835C + +#define mmMME0_QM_CP_FENCE0_CNT_2 0x68360 + +#define mmMME0_QM_CP_FENCE0_CNT_3 0x68364 + +#define mmMME0_QM_CP_FENCE0_CNT_4 0x68368 + +#define mmMME0_QM_CP_FENCE1_CNT_0 0x6836C + +#define mmMME0_QM_CP_FENCE1_CNT_1 0x68370 + +#define mmMME0_QM_CP_FENCE1_CNT_2 0x68374 + +#define mmMME0_QM_CP_FENCE1_CNT_3 0x68378 + +#define mmMME0_QM_CP_FENCE1_CNT_4 0x6837C + +#define mmMME0_QM_CP_FENCE2_CNT_0 0x68380 + +#define mmMME0_QM_CP_FENCE2_CNT_1 0x68384 + +#define mmMME0_QM_CP_FENCE2_CNT_2 0x68388 + +#define mmMME0_QM_CP_FENCE2_CNT_3 0x6838C + +#define mmMME0_QM_CP_FENCE2_CNT_4 0x68390 + +#define mmMME0_QM_CP_FENCE3_CNT_0 0x68394 + +#define mmMME0_QM_CP_FENCE3_CNT_1 0x68398 + +#define mmMME0_QM_CP_FENCE3_CNT_2 0x6839C + +#define mmMME0_QM_CP_FENCE3_CNT_3 0x683A0 + +#define mmMME0_QM_CP_FENCE3_CNT_4 0x683A4 + +#define mmMME0_QM_CP_STS_0 0x683A8 + +#define mmMME0_QM_CP_STS_1 0x683AC + +#define mmMME0_QM_CP_STS_2 0x683B0 + +#define mmMME0_QM_CP_STS_3 0x683B4 + +#define mmMME0_QM_CP_STS_4 0x683B8 + +#define mmMME0_QM_CP_CURRENT_INST_LO_0 0x683BC + +#define mmMME0_QM_CP_CURRENT_INST_LO_1 0x683C0 + +#define mmMME0_QM_CP_CURRENT_INST_LO_2 0x683C4 + +#define mmMME0_QM_CP_CURRENT_INST_LO_3 0x683C8 + +#define mmMME0_QM_CP_CURRENT_INST_LO_4 0x683CC + +#define mmMME0_QM_CP_CURRENT_INST_HI_0 0x683D0 + +#define mmMME0_QM_CP_CURRENT_INST_HI_1 0x683D4 + +#define mmMME0_QM_CP_CURRENT_INST_HI_2 0x683D8 + +#define mmMME0_QM_CP_CURRENT_INST_HI_3 0x683DC + +#define mmMME0_QM_CP_CURRENT_INST_HI_4 0x683E0 + +#define mmMME0_QM_CP_BARRIER_CFG_0 0x683F4 + +#define mmMME0_QM_CP_BARRIER_CFG_1 0x683F8 + +#define mmMME0_QM_CP_BARRIER_CFG_2 0x683FC + +#define mmMME0_QM_CP_BARRIER_CFG_3 0x68400 + +#define mmMME0_QM_CP_BARRIER_CFG_4 0x68404 + +#define mmMME0_QM_CP_DBG_0_0 0x68408 + +#define mmMME0_QM_CP_DBG_0_1 0x6840C + +#define mmMME0_QM_CP_DBG_0_2 0x68410 + +#define mmMME0_QM_CP_DBG_0_3 0x68414 + +#define mmMME0_QM_CP_DBG_0_4 0x68418 + +#define mmMME0_QM_CP_ARUSER_31_11_0 0x6841C + +#define mmMME0_QM_CP_ARUSER_31_11_1 0x68420 + +#define mmMME0_QM_CP_ARUSER_31_11_2 0x68424 + +#define mmMME0_QM_CP_ARUSER_31_11_3 0x68428 + +#define mmMME0_QM_CP_ARUSER_31_11_4 0x6842C + +#define mmMME0_QM_CP_AWUSER_31_11_0 0x68430 + +#define mmMME0_QM_CP_AWUSER_31_11_1 0x68434 + +#define mmMME0_QM_CP_AWUSER_31_11_2 0x68438 + +#define mmMME0_QM_CP_AWUSER_31_11_3 0x6843C + +#define mmMME0_QM_CP_AWUSER_31_11_4 0x68440 + +#define mmMME0_QM_ARB_CFG_0 0x68A00 + +#define mmMME0_QM_ARB_CHOISE_Q_PUSH 0x68A04 + +#define mmMME0_QM_ARB_WRR_WEIGHT_0 0x68A08 + +#define mmMME0_QM_ARB_WRR_WEIGHT_1 0x68A0C + +#define mmMME0_QM_ARB_WRR_WEIGHT_2 0x68A10 + +#define mmMME0_QM_ARB_WRR_WEIGHT_3 0x68A14 + +#define mmMME0_QM_ARB_CFG_1 0x68A18 + +#define mmMME0_QM_ARB_MST_AVAIL_CRED_0 0x68A20 + +#define mmMME0_QM_ARB_MST_AVAIL_CRED_1 0x68A24 + +#define mmMME0_QM_ARB_MST_AVAIL_CRED_2 0x68A28 + +#define mmMME0_QM_ARB_MST_AVAIL_CRED_3 0x68A2C + +#define mmMME0_QM_ARB_MST_AVAIL_CRED_4 0x68A30 + +#define mmMME0_QM_ARB_MST_AVAIL_CRED_5 0x68A34 + +#define mmMME0_QM_ARB_MST_AVAIL_CRED_6 0x68A38 + +#define mmMME0_QM_ARB_MST_AVAIL_CRED_7 0x68A3C + +#define mmMME0_QM_ARB_MST_AVAIL_CRED_8 0x68A40 + +#define mmMME0_QM_ARB_MST_AVAIL_CRED_9 0x68A44 + +#define mmMME0_QM_ARB_MST_AVAIL_CRED_10 0x68A48 + +#define mmMME0_QM_ARB_MST_AVAIL_CRED_11 0x68A4C + +#define mmMME0_QM_ARB_MST_AVAIL_CRED_12 0x68A50 + +#define mmMME0_QM_ARB_MST_AVAIL_CRED_13 0x68A54 + +#define mmMME0_QM_ARB_MST_AVAIL_CRED_14 0x68A58 + +#define mmMME0_QM_ARB_MST_AVAIL_CRED_15 0x68A5C + +#define mmMME0_QM_ARB_MST_AVAIL_CRED_16 0x68A60 + +#define mmMME0_QM_ARB_MST_AVAIL_CRED_17 0x68A64 + +#define mmMME0_QM_ARB_MST_AVAIL_CRED_18 0x68A68 + +#define mmMME0_QM_ARB_MST_AVAIL_CRED_19 0x68A6C + +#define mmMME0_QM_ARB_MST_AVAIL_CRED_20 0x68A70 + +#define mmMME0_QM_ARB_MST_AVAIL_CRED_21 0x68A74 + +#define mmMME0_QM_ARB_MST_AVAIL_CRED_22 0x68A78 + +#define mmMME0_QM_ARB_MST_AVAIL_CRED_23 0x68A7C + +#define mmMME0_QM_ARB_MST_AVAIL_CRED_24 0x68A80 + +#define mmMME0_QM_ARB_MST_AVAIL_CRED_25 0x68A84 + +#define mmMME0_QM_ARB_MST_AVAIL_CRED_26 0x68A88 + +#define mmMME0_QM_ARB_MST_AVAIL_CRED_27 0x68A8C + +#define mmMME0_QM_ARB_MST_AVAIL_CRED_28 0x68A90 + +#define mmMME0_QM_ARB_MST_AVAIL_CRED_29 0x68A94 + +#define mmMME0_QM_ARB_MST_AVAIL_CRED_30 0x68A98 + +#define mmMME0_QM_ARB_MST_AVAIL_CRED_31 0x68A9C + +#define mmMME0_QM_ARB_MST_CRED_INC 0x68AA0 + +#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_0 0x68AA4 + +#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_1 0x68AA8 + +#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_2 0x68AAC + +#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_3 0x68AB0 + +#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_4 0x68AB4 + +#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_5 0x68AB8 + +#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_6 0x68ABC + +#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_7 0x68AC0 + +#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_8 0x68AC4 + +#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_9 0x68AC8 + +#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_10 0x68ACC + +#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_11 0x68AD0 + +#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_12 0x68AD4 + +#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_13 0x68AD8 + +#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_14 0x68ADC + +#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_15 0x68AE0 + +#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_16 0x68AE4 + +#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_17 0x68AE8 + +#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_18 0x68AEC + +#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_19 0x68AF0 + +#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_20 0x68AF4 + +#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_21 0x68AF8 + +#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_22 0x68AFC + +#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_23 0x68B00 + +#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_24 0x68B04 + +#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_25 0x68B08 + +#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_26 0x68B0C + +#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_27 0x68B10 + +#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_28 0x68B14 + +#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_29 0x68B18 + +#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_30 0x68B1C + +#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_31 0x68B20 + +#define mmMME0_QM_ARB_SLV_MASTER_INC_CRED_OFST 0x68B28 + +#define mmMME0_QM_ARB_MST_SLAVE_EN 0x68B2C + +#define mmMME0_QM_ARB_MST_QUIET_PER 0x68B34 + +#define mmMME0_QM_ARB_SLV_CHOISE_WDT 0x68B38 + +#define mmMME0_QM_ARB_SLV_ID 0x68B3C + +#define mmMME0_QM_ARB_MSG_MAX_INFLIGHT 0x68B44 + +#define mmMME0_QM_ARB_MSG_AWUSER_31_11 0x68B48 + +#define mmMME0_QM_ARB_MSG_AWUSER_SEC_PROP 0x68B4C + +#define mmMME0_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0x68B50 + +#define mmMME0_QM_ARB_BASE_LO 0x68B54 + +#define mmMME0_QM_ARB_BASE_HI 0x68B58 + +#define mmMME0_QM_ARB_STATE_STS 0x68B80 + +#define mmMME0_QM_ARB_CHOISE_FULLNESS_STS 0x68B84 + +#define mmMME0_QM_ARB_MSG_STS 0x68B88 + +#define mmMME0_QM_ARB_SLV_CHOISE_Q_HEAD 0x68B8C + +#define mmMME0_QM_ARB_ERR_CAUSE 0x68B9C + +#define mmMME0_QM_ARB_ERR_MSG_EN 0x68BA0 + +#define mmMME0_QM_ARB_ERR_STS_DRP 0x68BA8 + +#define mmMME0_QM_ARB_MST_CRED_STS_0 0x68BB0 + +#define mmMME0_QM_ARB_MST_CRED_STS_1 0x68BB4 + +#define mmMME0_QM_ARB_MST_CRED_STS_2 0x68BB8 + +#define mmMME0_QM_ARB_MST_CRED_STS_3 0x68BBC + +#define mmMME0_QM_ARB_MST_CRED_STS_4 0x68BC0 + +#define mmMME0_QM_ARB_MST_CRED_STS_5 0x68BC4 + +#define mmMME0_QM_ARB_MST_CRED_STS_6 0x68BC8 + +#define mmMME0_QM_ARB_MST_CRED_STS_7 0x68BCC + +#define mmMME0_QM_ARB_MST_CRED_STS_8 0x68BD0 + +#define mmMME0_QM_ARB_MST_CRED_STS_9 0x68BD4 + +#define mmMME0_QM_ARB_MST_CRED_STS_10 0x68BD8 + +#define mmMME0_QM_ARB_MST_CRED_STS_11 0x68BDC + +#define mmMME0_QM_ARB_MST_CRED_STS_12 0x68BE0 + +#define mmMME0_QM_ARB_MST_CRED_STS_13 0x68BE4 + +#define mmMME0_QM_ARB_MST_CRED_STS_14 0x68BE8 + +#define mmMME0_QM_ARB_MST_CRED_STS_15 0x68BEC + +#define mmMME0_QM_ARB_MST_CRED_STS_16 0x68BF0 + +#define mmMME0_QM_ARB_MST_CRED_STS_17 0x68BF4 + +#define mmMME0_QM_ARB_MST_CRED_STS_18 0x68BF8 + +#define mmMME0_QM_ARB_MST_CRED_STS_19 0x68BFC + +#define mmMME0_QM_ARB_MST_CRED_STS_20 0x68C00 + +#define mmMME0_QM_ARB_MST_CRED_STS_21 0x68C04 + +#define mmMME0_QM_ARB_MST_CRED_STS_22 0x68C08 + +#define mmMME0_QM_ARB_MST_CRED_STS_23 0x68C0C + +#define mmMME0_QM_ARB_MST_CRED_STS_24 0x68C10 + +#define mmMME0_QM_ARB_MST_CRED_STS_25 0x68C14 + +#define mmMME0_QM_ARB_MST_CRED_STS_26 0x68C18 + +#define mmMME0_QM_ARB_MST_CRED_STS_27 0x68C1C + +#define mmMME0_QM_ARB_MST_CRED_STS_28 0x68C20 + +#define mmMME0_QM_ARB_MST_CRED_STS_29 0x68C24 + +#define mmMME0_QM_ARB_MST_CRED_STS_30 0x68C28 + +#define mmMME0_QM_ARB_MST_CRED_STS_31 0x68C2C + +#define mmMME0_QM_CGM_CFG 0x68C70 + +#define mmMME0_QM_CGM_STS 0x68C74 + +#define mmMME0_QM_CGM_CFG1 0x68C78 + +#define mmMME0_QM_LOCAL_RANGE_BASE 0x68C80 + +#define mmMME0_QM_LOCAL_RANGE_SIZE 0x68C84 + +#define mmMME0_QM_CSMR_STRICT_PRIO_CFG 0x68C90 + +#define mmMME0_QM_HBW_RD_RATE_LIM_CFG_1 0x68C94 + +#define mmMME0_QM_LBW_WR_RATE_LIM_CFG_0 0x68C98 + +#define mmMME0_QM_LBW_WR_RATE_LIM_CFG_1 0x68C9C + +#define mmMME0_QM_HBW_RD_RATE_LIM_CFG_0 0x68CA0 + +#define mmMME0_QM_GLBL_AXCACHE 0x68CA4 + +#define mmMME0_QM_IND_GW_APB_CFG 0x68CB0 + +#define mmMME0_QM_IND_GW_APB_WDATA 0x68CB4 + +#define mmMME0_QM_IND_GW_APB_RDATA 0x68CB8 + +#define mmMME0_QM_IND_GW_APB_STATUS 0x68CBC + +#define mmMME0_QM_GLBL_ERR_ADDR_LO 0x68CD0 + +#define mmMME0_QM_GLBL_ERR_ADDR_HI 0x68CD4 + +#define mmMME0_QM_GLBL_ERR_WDATA 0x68CD8 + +#define mmMME0_QM_GLBL_MEM_INIT_BUSY 0x68D00 + +#endif /* ASIC_REG_MME0_QM_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme1_ctrl_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/mme1_ctrl_regs.h new file mode 100644 index 000000000..6c07f7d45 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/mme1_ctrl_regs.h @@ -0,0 +1,1456 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_MME1_CTRL_REGS_H_ +#define ASIC_REG_MME1_CTRL_REGS_H_ + +/* + ***************************************** + * MME1_CTRL (Prototype: MME) + ***************************************** + */ + +#define mmMME1_CTRL_ARCH_STATUS 0xE0000 + +#define mmMME1_CTRL_ARCH_BASE_ADDR_HIGH_S 0xE0008 + +#define mmMME1_CTRL_ARCH_BASE_ADDR_HIGH_L 0xE000C + +#define mmMME1_CTRL_ARCH_BASE_ADDR_HIGH_O 0xE0010 + +#define mmMME1_CTRL_ARCH_BASE_ADDR_LOW_S 0xE0014 + +#define mmMME1_CTRL_ARCH_BASE_ADDR_LOW_L 0xE0018 + +#define mmMME1_CTRL_ARCH_BASE_ADDR_LOW_O 0xE001C + +#define mmMME1_CTRL_ARCH_HEADER_LOW 0xE0020 + +#define mmMME1_CTRL_ARCH_HEADER_HIGH 0xE0024 + +#define mmMME1_CTRL_ARCH_CONV_KERNEL_SIZE_MINUS_1 0xE0028 + +#define mmMME1_CTRL_ARCH_CONV_ASSOCIATED_DIMS_LOW 0xE002C + +#define mmMME1_CTRL_ARCH_CONV_ASSOCIATED_DIMS_HIGH 0xE0030 + +#define mmMME1_CTRL_ARCH_NUM_ITERATIONS_MINUS_1 0xE0034 + +#define mmMME1_CTRL_ARCH_OUTER_LOOP 0xE0038 + +#define mmMME1_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_0 0xE003C + +#define mmMME1_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_1 0xE0040 + +#define mmMME1_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_2 0xE0044 + +#define mmMME1_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_3 0xE0048 + +#define mmMME1_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_4 0xE004C + +#define mmMME1_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_0 0xE0050 + +#define mmMME1_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_1 0xE0054 + +#define mmMME1_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_2 0xE0058 + +#define mmMME1_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_3 0xE005C + +#define mmMME1_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_4 0xE0060 + +#define mmMME1_CTRL_ARCH_TENSOR_S_ROI_SIZE_0 0xE0064 + +#define mmMME1_CTRL_ARCH_TENSOR_S_ROI_SIZE_1 0xE0068 + +#define mmMME1_CTRL_ARCH_TENSOR_S_ROI_SIZE_2 0xE006C + +#define mmMME1_CTRL_ARCH_TENSOR_S_ROI_SIZE_3 0xE0070 + +#define mmMME1_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_0 0xE0074 + +#define mmMME1_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_1 0xE0078 + +#define mmMME1_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_2 0xE007C + +#define mmMME1_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_3 0xE0080 + +#define mmMME1_CTRL_ARCH_TENSOR_S_SPATIAL_SIZE_MINUS_1 0xE0084 + +#define mmMME1_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_0 0xE0088 + +#define mmMME1_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_1 0xE008C + +#define mmMME1_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_2 0xE0090 + +#define mmMME1_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_3 0xE0094 + +#define mmMME1_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_4 0xE0098 + +#define mmMME1_CTRL_ARCH_AGU_S_START_OFFSET_0 0xE009C + +#define mmMME1_CTRL_ARCH_AGU_S_START_OFFSET_1 0xE00A0 + +#define mmMME1_CTRL_ARCH_AGU_S_START_OFFSET_2 0xE00A4 + +#define mmMME1_CTRL_ARCH_AGU_S_START_OFFSET_3 0xE00A8 + +#define mmMME1_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_0 0xE00AC + +#define mmMME1_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_1 0xE00B0 + +#define mmMME1_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_2 0xE00B4 + +#define mmMME1_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_3 0xE00B8 + +#define mmMME1_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_4 0xE00BC + +#define mmMME1_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_0 0xE00C0 + +#define mmMME1_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_1 0xE00C4 + +#define mmMME1_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_2 0xE00C8 + +#define mmMME1_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_3 0xE00CC + +#define mmMME1_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_4 0xE00D0 + +#define mmMME1_CTRL_ARCH_TENSOR_L_ROI_SIZE_0 0xE00D4 + +#define mmMME1_CTRL_ARCH_TENSOR_L_ROI_SIZE_1 0xE00D8 + +#define mmMME1_CTRL_ARCH_TENSOR_L_ROI_SIZE_2 0xE00DC + +#define mmMME1_CTRL_ARCH_TENSOR_L_ROI_SIZE_3 0xE00E0 + +#define mmMME1_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_0 0xE00E4 + +#define mmMME1_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_1 0xE00E8 + +#define mmMME1_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_2 0xE00EC + +#define mmMME1_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_3 0xE00F0 + +#define mmMME1_CTRL_ARCH_TENSOR_L_SPATIAL_SIZE_MINUS_1 0xE00F4 + +#define mmMME1_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0xE00F8 + +#define mmMME1_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0xE00FC + +#define mmMME1_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0xE0100 + +#define mmMME1_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0xE0104 + +#define mmMME1_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0xE0108 + +#define mmMME1_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_0 0xE010C + +#define mmMME1_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_1 0xE0110 + +#define mmMME1_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_2 0xE0114 + +#define mmMME1_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_3 0xE0118 + +#define mmMME1_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0xE011C + +#define mmMME1_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0xE0120 + +#define mmMME1_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0xE0124 + +#define mmMME1_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0xE0128 + +#define mmMME1_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0xE012C + +#define mmMME1_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_0 0xE0130 + +#define mmMME1_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_1 0xE0134 + +#define mmMME1_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_2 0xE0138 + +#define mmMME1_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_3 0xE013C + +#define mmMME1_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_0 0xE0140 + +#define mmMME1_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_1 0xE0144 + +#define mmMME1_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_2 0xE0148 + +#define mmMME1_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_3 0xE014C + +#define mmMME1_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_4 0xE0150 + +#define mmMME1_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_0 0xE0154 + +#define mmMME1_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_1 0xE0158 + +#define mmMME1_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_2 0xE015C + +#define mmMME1_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_3 0xE0160 + +#define mmMME1_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_4 0xE0164 + +#define mmMME1_CTRL_ARCH_TENSOR_O_ROI_SIZE_0 0xE0168 + +#define mmMME1_CTRL_ARCH_TENSOR_O_ROI_SIZE_1 0xE016C + +#define mmMME1_CTRL_ARCH_TENSOR_O_ROI_SIZE_2 0xE0170 + +#define mmMME1_CTRL_ARCH_TENSOR_O_ROI_SIZE_3 0xE0174 + +#define mmMME1_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_0 0xE0178 + +#define mmMME1_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_1 0xE017C + +#define mmMME1_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_2 0xE0180 + +#define mmMME1_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_3 0xE0184 + +#define mmMME1_CTRL_ARCH_TENSOR_O_SPATIAL_SIZE_MINUS_1 0xE0188 + +#define mmMME1_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0xE018C + +#define mmMME1_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0xE0190 + +#define mmMME1_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0xE0194 + +#define mmMME1_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0xE0198 + +#define mmMME1_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0xE019C + +#define mmMME1_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_0 0xE01A0 + +#define mmMME1_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_1 0xE01A4 + +#define mmMME1_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_2 0xE01A8 + +#define mmMME1_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_3 0xE01AC + +#define mmMME1_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0xE01B0 + +#define mmMME1_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0xE01B4 + +#define mmMME1_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0xE01B8 + +#define mmMME1_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0xE01BC + +#define mmMME1_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0xE01C0 + +#define mmMME1_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_0 0xE01C4 + +#define mmMME1_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_1 0xE01C8 + +#define mmMME1_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_2 0xE01CC + +#define mmMME1_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_3 0xE01D0 + +#define mmMME1_CTRL_ARCH_DESC_SB_REPEAT 0xE01D4 + +#define mmMME1_CTRL_ARCH_DESC_RATE_LIMITER 0xE01D8 + +#define mmMME1_CTRL_ARCH_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0xE01DC + +#define mmMME1_CTRL_ARCH_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0xE01E0 + +#define mmMME1_CTRL_ARCH_DESC_SYNC_OBJECT_ADDR_HIGH 0xE01E4 + +#define mmMME1_CTRL_ARCH_DESC_SYNC_OBJECT_DATA 0xE01E8 + +#define mmMME1_CTRL_ARCH_DESC_AXI_USER_DATA 0xE01EC + +#define mmMME1_CTRL_ARCH_DESC_PERF_EVT_S 0xE01F0 + +#define mmMME1_CTRL_ARCH_DESC_PERF_EVT_L_LOCAL 0xE01F4 + +#define mmMME1_CTRL_ARCH_DESC_PERF_EVT_L_REMOTE 0xE01F8 + +#define mmMME1_CTRL_ARCH_DESC_PERF_EVT_O_LOCAL 0xE01FC + +#define mmMME1_CTRL_ARCH_DESC_PERF_EVT_O_REMOTE 0xE0200 + +#define mmMME1_CTRL_ARCH_DESC_PADDING_VALUE_S 0xE0204 + +#define mmMME1_CTRL_ARCH_DESC_PADDING_VALUE_L 0xE0208 + +#define mmMME1_CTRL_ARCH_DESC_META_DATA_AGU_S 0xE020C + +#define mmMME1_CTRL_ARCH_DESC_META_DATA_AGU_L_LOCAL 0xE0210 + +#define mmMME1_CTRL_ARCH_DESC_META_DATA_AGU_L_REMOTE 0xE0214 + +#define mmMME1_CTRL_ARCH_DESC_META_DATA_AGU_O_LOCAL 0xE0218 + +#define mmMME1_CTRL_ARCH_DESC_META_DATA_AGU_O_REMOTE 0xE021C + +#define mmMME1_CTRL_ARCH_DESC_PCU_RL_SATURATION 0xE0220 + +#define mmMME1_CTRL_ARCH_DESC_DUMMY 0xE0224 + +#define mmMME1_CTRL_CMD 0xE0280 + +#define mmMME1_CTRL_STATUS1 0xE0284 + +#define mmMME1_CTRL_RESET 0xE0288 + +#define mmMME1_CTRL_QM_STALL 0xE028C + +#define mmMME1_CTRL_SYNC_OBJECT_FIFO_TH 0xE0290 + +#define mmMME1_CTRL_EUS_ROLLUP_CNT_ADD 0xE0294 + +#define mmMME1_CTRL_INTR_CAUSE 0xE0298 + +#define mmMME1_CTRL_INTR_MASK 0xE029C + +#define mmMME1_CTRL_LOG_SHADOW 0xE02A0 + +#define mmMME1_CTRL_PCU_RL_DESC0 0xE02A4 + +#define mmMME1_CTRL_PCU_RL_TOKEN_UPDATE 0xE02A8 + +#define mmMME1_CTRL_PCU_RL_TH 0xE02AC + +#define mmMME1_CTRL_PCU_RL_MIN 0xE02B0 + +#define mmMME1_CTRL_PCU_RL_CTRL_EN 0xE02B4 + +#define mmMME1_CTRL_PCU_RL_HISTORY_LOG_SIZE 0xE02B8 + +#define mmMME1_CTRL_PCU_DUMMY_A_BF16 0xE02BC + +#define mmMME1_CTRL_PCU_DUMMY_B_BF16 0xE02C0 + +#define mmMME1_CTRL_PCU_DUMMY_A_FP32_ODD 0xE02C4 + +#define mmMME1_CTRL_PCU_DUMMY_A_FP32_EVEN 0xE02C8 + +#define mmMME1_CTRL_PCU_DUMMY_B_FP32_ODD 0xE02CC + +#define mmMME1_CTRL_PCU_DUMMY_B_FP32_EVEN 0xE02D0 + +#define mmMME1_CTRL_PROT 0xE02D4 + +#define mmMME1_CTRL_EU_POWER_SAVE_DISABLE 0xE02D8 + +#define mmMME1_CTRL_CS_DBG_BLOCK_ID 0xE02DC + +#define mmMME1_CTRL_CS_DBG_STATUS_DROP_CNT 0xE02E0 + +#define mmMME1_CTRL_TE_CLOSE_CGATE 0xE02E4 + +#define mmMME1_CTRL_AGU_SM_INFLIGHT_CNTR 0xE02E8 + +#define mmMME1_CTRL_AGU_SM_TOTAL_CNTR 0xE02EC + +#define mmMME1_CTRL_EZSYNC_OUT_CREDIT 0xE02F0 + +#define mmMME1_CTRL_PCU_RL_SAT_SEC 0xE02F4 + +#define mmMME1_CTRL_AGU_SYNC_MSG_AXI_USER 0xE02F8 + +#define mmMME1_CTRL_QM_SLV_LBW_CLK_EN 0xE02FC + +#define mmMME1_CTRL_SHADOW_0_STATUS 0xE0400 + +#define mmMME1_CTRL_SHADOW_0_BASE_ADDR_HIGH_S 0xE0408 + +#define mmMME1_CTRL_SHADOW_0_BASE_ADDR_HIGH_L 0xE040C + +#define mmMME1_CTRL_SHADOW_0_BASE_ADDR_HIGH_O 0xE0410 + +#define mmMME1_CTRL_SHADOW_0_BASE_ADDR_LOW_S 0xE0414 + +#define mmMME1_CTRL_SHADOW_0_BASE_ADDR_LOW_L 0xE0418 + +#define mmMME1_CTRL_SHADOW_0_BASE_ADDR_LOW_O 0xE041C + +#define mmMME1_CTRL_SHADOW_0_HEADER_LOW 0xE0420 + +#define mmMME1_CTRL_SHADOW_0_HEADER_HIGH 0xE0424 + +#define mmMME1_CTRL_SHADOW_0_CONV_KERNEL_SIZE_MINUS_1 0xE0428 + +#define mmMME1_CTRL_SHADOW_0_CONV_ASSOCIATED_DIMS_LOW 0xE042C + +#define mmMME1_CTRL_SHADOW_0_CONV_ASSOCIATED_DIMS_HIGH 0xE0430 + +#define mmMME1_CTRL_SHADOW_0_NUM_ITERATIONS_MINUS_1 0xE0434 + +#define mmMME1_CTRL_SHADOW_0_OUTER_LOOP 0xE0438 + +#define mmMME1_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_0 0xE043C + +#define mmMME1_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_1 0xE0440 + +#define mmMME1_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_2 0xE0444 + +#define mmMME1_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_3 0xE0448 + +#define mmMME1_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_4 0xE044C + +#define mmMME1_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_0 0xE0450 + +#define mmMME1_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_1 0xE0454 + +#define mmMME1_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_2 0xE0458 + +#define mmMME1_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_3 0xE045C + +#define mmMME1_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_4 0xE0460 + +#define mmMME1_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_0 0xE0464 + +#define mmMME1_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_1 0xE0468 + +#define mmMME1_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_2 0xE046C + +#define mmMME1_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_3 0xE0470 + +#define mmMME1_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_0 0xE0474 + +#define mmMME1_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_1 0xE0478 + +#define mmMME1_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_2 0xE047C + +#define mmMME1_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_3 0xE0480 + +#define mmMME1_CTRL_SHADOW_0_TENSOR_S_SPATIAL_SIZE_MINUS_1 0xE0484 + +#define mmMME1_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_0 0xE0488 + +#define mmMME1_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_1 0xE048C + +#define mmMME1_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_2 0xE0490 + +#define mmMME1_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_3 0xE0494 + +#define mmMME1_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_4 0xE0498 + +#define mmMME1_CTRL_SHADOW_0_AGU_S_START_OFFSET_0 0xE049C + +#define mmMME1_CTRL_SHADOW_0_AGU_S_START_OFFSET_1 0xE04A0 + +#define mmMME1_CTRL_SHADOW_0_AGU_S_START_OFFSET_2 0xE04A4 + +#define mmMME1_CTRL_SHADOW_0_AGU_S_START_OFFSET_3 0xE04A8 + +#define mmMME1_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_0 0xE04AC + +#define mmMME1_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_1 0xE04B0 + +#define mmMME1_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_2 0xE04B4 + +#define mmMME1_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_3 0xE04B8 + +#define mmMME1_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_4 0xE04BC + +#define mmMME1_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_0 0xE04C0 + +#define mmMME1_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_1 0xE04C4 + +#define mmMME1_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_2 0xE04C8 + +#define mmMME1_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_3 0xE04CC + +#define mmMME1_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_4 0xE04D0 + +#define mmMME1_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_0 0xE04D4 + +#define mmMME1_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_1 0xE04D8 + +#define mmMME1_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_2 0xE04DC + +#define mmMME1_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_3 0xE04E0 + +#define mmMME1_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_0 0xE04E4 + +#define mmMME1_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_1 0xE04E8 + +#define mmMME1_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_2 0xE04EC + +#define mmMME1_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_3 0xE04F0 + +#define mmMME1_CTRL_SHADOW_0_TENSOR_L_SPATIAL_SIZE_MINUS_1 0xE04F4 + +#define mmMME1_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0xE04F8 + +#define mmMME1_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0xE04FC + +#define mmMME1_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0xE0500 + +#define mmMME1_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0xE0504 + +#define mmMME1_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0xE0508 + +#define mmMME1_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_0 0xE050C + +#define mmMME1_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_1 0xE0510 + +#define mmMME1_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_2 0xE0514 + +#define mmMME1_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_3 0xE0518 + +#define mmMME1_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0xE051C + +#define mmMME1_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0xE0520 + +#define mmMME1_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0xE0524 + +#define mmMME1_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0xE0528 + +#define mmMME1_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0xE052C + +#define mmMME1_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_0 0xE0530 + +#define mmMME1_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_1 0xE0534 + +#define mmMME1_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_2 0xE0538 + +#define mmMME1_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_3 0xE053C + +#define mmMME1_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_0 0xE0540 + +#define mmMME1_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_1 0xE0544 + +#define mmMME1_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_2 0xE0548 + +#define mmMME1_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_3 0xE054C + +#define mmMME1_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_4 0xE0550 + +#define mmMME1_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_0 0xE0554 + +#define mmMME1_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_1 0xE0558 + +#define mmMME1_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_2 0xE055C + +#define mmMME1_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_3 0xE0560 + +#define mmMME1_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_4 0xE0564 + +#define mmMME1_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_0 0xE0568 + +#define mmMME1_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_1 0xE056C + +#define mmMME1_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_2 0xE0570 + +#define mmMME1_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_3 0xE0574 + +#define mmMME1_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_0 0xE0578 + +#define mmMME1_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_1 0xE057C + +#define mmMME1_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_2 0xE0580 + +#define mmMME1_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_3 0xE0584 + +#define mmMME1_CTRL_SHADOW_0_TENSOR_O_SPATIAL_SIZE_MINUS_1 0xE0588 + +#define mmMME1_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0xE058C + +#define mmMME1_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0xE0590 + +#define mmMME1_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0xE0594 + +#define mmMME1_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0xE0598 + +#define mmMME1_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0xE059C + +#define mmMME1_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_0 0xE05A0 + +#define mmMME1_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_1 0xE05A4 + +#define mmMME1_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_2 0xE05A8 + +#define mmMME1_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_3 0xE05AC + +#define mmMME1_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0xE05B0 + +#define mmMME1_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0xE05B4 + +#define mmMME1_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0xE05B8 + +#define mmMME1_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0xE05BC + +#define mmMME1_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0xE05C0 + +#define mmMME1_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_0 0xE05C4 + +#define mmMME1_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_1 0xE05C8 + +#define mmMME1_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_2 0xE05CC + +#define mmMME1_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_3 0xE05D0 + +#define mmMME1_CTRL_SHADOW_0_DESC_SB_REPEAT 0xE05D4 + +#define mmMME1_CTRL_SHADOW_0_DESC_RATE_LIMITER 0xE05D8 + +#define mmMME1_CTRL_SHADOW_0_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0xE05DC + +#define mmMME1_CTRL_SHADOW_0_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0xE05E0 + +#define mmMME1_CTRL_SHADOW_0_DESC_SYNC_OBJECT_ADDR_HIGH 0xE05E4 + +#define mmMME1_CTRL_SHADOW_0_DESC_SYNC_OBJECT_DATA 0xE05E8 + +#define mmMME1_CTRL_SHADOW_0_DESC_AXI_USER_DATA 0xE05EC + +#define mmMME1_CTRL_SHADOW_0_DESC_PERF_EVT_S 0xE05F0 + +#define mmMME1_CTRL_SHADOW_0_DESC_PERF_EVT_L_LOCAL 0xE05F4 + +#define mmMME1_CTRL_SHADOW_0_DESC_PERF_EVT_L_REMOTE 0xE05F8 + +#define mmMME1_CTRL_SHADOW_0_DESC_PERF_EVT_O_LOCAL 0xE05FC + +#define mmMME1_CTRL_SHADOW_0_DESC_PERF_EVT_O_REMOTE 0xE0600 + +#define mmMME1_CTRL_SHADOW_0_DESC_PADDING_VALUE_S 0xE0604 + +#define mmMME1_CTRL_SHADOW_0_DESC_PADDING_VALUE_L 0xE0608 + +#define mmMME1_CTRL_SHADOW_0_DESC_META_DATA_AGU_S 0xE060C + +#define mmMME1_CTRL_SHADOW_0_DESC_META_DATA_AGU_L_LOCAL 0xE0610 + +#define mmMME1_CTRL_SHADOW_0_DESC_META_DATA_AGU_L_REMOTE 0xE0614 + +#define mmMME1_CTRL_SHADOW_0_DESC_META_DATA_AGU_O_LOCAL 0xE0618 + +#define mmMME1_CTRL_SHADOW_0_DESC_META_DATA_AGU_O_REMOTE 0xE061C + +#define mmMME1_CTRL_SHADOW_0_DESC_PCU_RL_SATURATION 0xE0620 + +#define mmMME1_CTRL_SHADOW_0_DESC_DUMMY 0xE0624 + +#define mmMME1_CTRL_SHADOW_1_STATUS 0xE0680 + +#define mmMME1_CTRL_SHADOW_1_BASE_ADDR_HIGH_S 0xE0688 + +#define mmMME1_CTRL_SHADOW_1_BASE_ADDR_HIGH_L 0xE068C + +#define mmMME1_CTRL_SHADOW_1_BASE_ADDR_HIGH_O 0xE0690 + +#define mmMME1_CTRL_SHADOW_1_BASE_ADDR_LOW_S 0xE0694 + +#define mmMME1_CTRL_SHADOW_1_BASE_ADDR_LOW_L 0xE0698 + +#define mmMME1_CTRL_SHADOW_1_BASE_ADDR_LOW_O 0xE069C + +#define mmMME1_CTRL_SHADOW_1_HEADER_LOW 0xE06A0 + +#define mmMME1_CTRL_SHADOW_1_HEADER_HIGH 0xE06A4 + +#define mmMME1_CTRL_SHADOW_1_CONV_KERNEL_SIZE_MINUS_1 0xE06A8 + +#define mmMME1_CTRL_SHADOW_1_CONV_ASSOCIATED_DIMS_LOW 0xE06AC + +#define mmMME1_CTRL_SHADOW_1_CONV_ASSOCIATED_DIMS_HIGH 0xE06B0 + +#define mmMME1_CTRL_SHADOW_1_NUM_ITERATIONS_MINUS_1 0xE06B4 + +#define mmMME1_CTRL_SHADOW_1_OUTER_LOOP 0xE06B8 + +#define mmMME1_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_0 0xE06BC + +#define mmMME1_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_1 0xE06C0 + +#define mmMME1_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_2 0xE06C4 + +#define mmMME1_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_3 0xE06C8 + +#define mmMME1_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_4 0xE06CC + +#define mmMME1_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_0 0xE06D0 + +#define mmMME1_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_1 0xE06D4 + +#define mmMME1_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_2 0xE06D8 + +#define mmMME1_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_3 0xE06DC + +#define mmMME1_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_4 0xE06E0 + +#define mmMME1_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_0 0xE06E4 + +#define mmMME1_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_1 0xE06E8 + +#define mmMME1_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_2 0xE06EC + +#define mmMME1_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_3 0xE06F0 + +#define mmMME1_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_0 0xE06F4 + +#define mmMME1_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_1 0xE06F8 + +#define mmMME1_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_2 0xE06FC + +#define mmMME1_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_3 0xE0700 + +#define mmMME1_CTRL_SHADOW_1_TENSOR_S_SPATIAL_SIZE_MINUS_1 0xE0704 + +#define mmMME1_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_0 0xE0708 + +#define mmMME1_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_1 0xE070C + +#define mmMME1_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_2 0xE0710 + +#define mmMME1_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_3 0xE0714 + +#define mmMME1_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_4 0xE0718 + +#define mmMME1_CTRL_SHADOW_1_AGU_S_START_OFFSET_0 0xE071C + +#define mmMME1_CTRL_SHADOW_1_AGU_S_START_OFFSET_1 0xE0720 + +#define mmMME1_CTRL_SHADOW_1_AGU_S_START_OFFSET_2 0xE0724 + +#define mmMME1_CTRL_SHADOW_1_AGU_S_START_OFFSET_3 0xE0728 + +#define mmMME1_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_0 0xE072C + +#define mmMME1_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_1 0xE0730 + +#define mmMME1_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_2 0xE0734 + +#define mmMME1_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_3 0xE0738 + +#define mmMME1_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_4 0xE073C + +#define mmMME1_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_0 0xE0740 + +#define mmMME1_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_1 0xE0744 + +#define mmMME1_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_2 0xE0748 + +#define mmMME1_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_3 0xE074C + +#define mmMME1_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_4 0xE0750 + +#define mmMME1_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_0 0xE0754 + +#define mmMME1_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_1 0xE0758 + +#define mmMME1_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_2 0xE075C + +#define mmMME1_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_3 0xE0760 + +#define mmMME1_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_0 0xE0764 + +#define mmMME1_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_1 0xE0768 + +#define mmMME1_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_2 0xE076C + +#define mmMME1_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_3 0xE0770 + +#define mmMME1_CTRL_SHADOW_1_TENSOR_L_SPATIAL_SIZE_MINUS_1 0xE0774 + +#define mmMME1_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0xE0778 + +#define mmMME1_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0xE077C + +#define mmMME1_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0xE0780 + +#define mmMME1_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0xE0784 + +#define mmMME1_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0xE0788 + +#define mmMME1_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_0 0xE078C + +#define mmMME1_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_1 0xE0790 + +#define mmMME1_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_2 0xE0794 + +#define mmMME1_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_3 0xE0798 + +#define mmMME1_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0xE079C + +#define mmMME1_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0xE07A0 + +#define mmMME1_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0xE07A4 + +#define mmMME1_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0xE07A8 + +#define mmMME1_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0xE07AC + +#define mmMME1_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_0 0xE07B0 + +#define mmMME1_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_1 0xE07B4 + +#define mmMME1_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_2 0xE07B8 + +#define mmMME1_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_3 0xE07BC + +#define mmMME1_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_0 0xE07C0 + +#define mmMME1_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_1 0xE07C4 + +#define mmMME1_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_2 0xE07C8 + +#define mmMME1_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_3 0xE07CC + +#define mmMME1_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_4 0xE07D0 + +#define mmMME1_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_0 0xE07D4 + +#define mmMME1_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_1 0xE07D8 + +#define mmMME1_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_2 0xE07DC + +#define mmMME1_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_3 0xE07E0 + +#define mmMME1_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_4 0xE07E4 + +#define mmMME1_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_0 0xE07E8 + +#define mmMME1_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_1 0xE07EC + +#define mmMME1_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_2 0xE07F0 + +#define mmMME1_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_3 0xE07F4 + +#define mmMME1_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_0 0xE07F8 + +#define mmMME1_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_1 0xE07FC + +#define mmMME1_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_2 0xE0800 + +#define mmMME1_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_3 0xE0804 + +#define mmMME1_CTRL_SHADOW_1_TENSOR_O_SPATIAL_SIZE_MINUS_1 0xE0808 + +#define mmMME1_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0xE080C + +#define mmMME1_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0xE0810 + +#define mmMME1_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0xE0814 + +#define mmMME1_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0xE0818 + +#define mmMME1_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0xE081C + +#define mmMME1_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_0 0xE0820 + +#define mmMME1_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_1 0xE0824 + +#define mmMME1_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_2 0xE0828 + +#define mmMME1_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_3 0xE082C + +#define mmMME1_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0xE0830 + +#define mmMME1_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0xE0834 + +#define mmMME1_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0xE0838 + +#define mmMME1_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0xE083C + +#define mmMME1_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0xE0840 + +#define mmMME1_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_0 0xE0844 + +#define mmMME1_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_1 0xE0848 + +#define mmMME1_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_2 0xE084C + +#define mmMME1_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_3 0xE0850 + +#define mmMME1_CTRL_SHADOW_1_DESC_SB_REPEAT 0xE0854 + +#define mmMME1_CTRL_SHADOW_1_DESC_RATE_LIMITER 0xE0858 + +#define mmMME1_CTRL_SHADOW_1_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0xE085C + +#define mmMME1_CTRL_SHADOW_1_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0xE0860 + +#define mmMME1_CTRL_SHADOW_1_DESC_SYNC_OBJECT_ADDR_HIGH 0xE0864 + +#define mmMME1_CTRL_SHADOW_1_DESC_SYNC_OBJECT_DATA 0xE0868 + +#define mmMME1_CTRL_SHADOW_1_DESC_AXI_USER_DATA 0xE086C + +#define mmMME1_CTRL_SHADOW_1_DESC_PERF_EVT_S 0xE0870 + +#define mmMME1_CTRL_SHADOW_1_DESC_PERF_EVT_L_LOCAL 0xE0874 + +#define mmMME1_CTRL_SHADOW_1_DESC_PERF_EVT_L_REMOTE 0xE0878 + +#define mmMME1_CTRL_SHADOW_1_DESC_PERF_EVT_O_LOCAL 0xE087C + +#define mmMME1_CTRL_SHADOW_1_DESC_PERF_EVT_O_REMOTE 0xE0880 + +#define mmMME1_CTRL_SHADOW_1_DESC_PADDING_VALUE_S 0xE0884 + +#define mmMME1_CTRL_SHADOW_1_DESC_PADDING_VALUE_L 0xE0888 + +#define mmMME1_CTRL_SHADOW_1_DESC_META_DATA_AGU_S 0xE088C + +#define mmMME1_CTRL_SHADOW_1_DESC_META_DATA_AGU_L_LOCAL 0xE0890 + +#define mmMME1_CTRL_SHADOW_1_DESC_META_DATA_AGU_L_REMOTE 0xE0894 + +#define mmMME1_CTRL_SHADOW_1_DESC_META_DATA_AGU_O_LOCAL 0xE0898 + +#define mmMME1_CTRL_SHADOW_1_DESC_META_DATA_AGU_O_REMOTE 0xE089C + +#define mmMME1_CTRL_SHADOW_1_DESC_PCU_RL_SATURATION 0xE08A0 + +#define mmMME1_CTRL_SHADOW_1_DESC_DUMMY 0xE08A4 + +#define mmMME1_CTRL_SHADOW_2_STATUS 0xE0900 + +#define mmMME1_CTRL_SHADOW_2_BASE_ADDR_HIGH_S 0xE0908 + +#define mmMME1_CTRL_SHADOW_2_BASE_ADDR_HIGH_L 0xE090C + +#define mmMME1_CTRL_SHADOW_2_BASE_ADDR_HIGH_O 0xE0910 + +#define mmMME1_CTRL_SHADOW_2_BASE_ADDR_LOW_S 0xE0914 + +#define mmMME1_CTRL_SHADOW_2_BASE_ADDR_LOW_L 0xE0918 + +#define mmMME1_CTRL_SHADOW_2_BASE_ADDR_LOW_O 0xE091C + +#define mmMME1_CTRL_SHADOW_2_HEADER_LOW 0xE0920 + +#define mmMME1_CTRL_SHADOW_2_HEADER_HIGH 0xE0924 + +#define mmMME1_CTRL_SHADOW_2_CONV_KERNEL_SIZE_MINUS_1 0xE0928 + +#define mmMME1_CTRL_SHADOW_2_CONV_ASSOCIATED_DIMS_LOW 0xE092C + +#define mmMME1_CTRL_SHADOW_2_CONV_ASSOCIATED_DIMS_HIGH 0xE0930 + +#define mmMME1_CTRL_SHADOW_2_NUM_ITERATIONS_MINUS_1 0xE0934 + +#define mmMME1_CTRL_SHADOW_2_OUTER_LOOP 0xE0938 + +#define mmMME1_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_0 0xE093C + +#define mmMME1_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_1 0xE0940 + +#define mmMME1_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_2 0xE0944 + +#define mmMME1_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_3 0xE0948 + +#define mmMME1_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_4 0xE094C + +#define mmMME1_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_0 0xE0950 + +#define mmMME1_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_1 0xE0954 + +#define mmMME1_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_2 0xE0958 + +#define mmMME1_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_3 0xE095C + +#define mmMME1_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_4 0xE0960 + +#define mmMME1_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_0 0xE0964 + +#define mmMME1_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_1 0xE0968 + +#define mmMME1_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_2 0xE096C + +#define mmMME1_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_3 0xE0970 + +#define mmMME1_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_0 0xE0974 + +#define mmMME1_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_1 0xE0978 + +#define mmMME1_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_2 0xE097C + +#define mmMME1_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_3 0xE0980 + +#define mmMME1_CTRL_SHADOW_2_TENSOR_S_SPATIAL_SIZE_MINUS_1 0xE0984 + +#define mmMME1_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_0 0xE0988 + +#define mmMME1_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_1 0xE098C + +#define mmMME1_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_2 0xE0990 + +#define mmMME1_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_3 0xE0994 + +#define mmMME1_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_4 0xE0998 + +#define mmMME1_CTRL_SHADOW_2_AGU_S_START_OFFSET_0 0xE099C + +#define mmMME1_CTRL_SHADOW_2_AGU_S_START_OFFSET_1 0xE09A0 + +#define mmMME1_CTRL_SHADOW_2_AGU_S_START_OFFSET_2 0xE09A4 + +#define mmMME1_CTRL_SHADOW_2_AGU_S_START_OFFSET_3 0xE09A8 + +#define mmMME1_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_0 0xE09AC + +#define mmMME1_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_1 0xE09B0 + +#define mmMME1_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_2 0xE09B4 + +#define mmMME1_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_3 0xE09B8 + +#define mmMME1_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_4 0xE09BC + +#define mmMME1_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_0 0xE09C0 + +#define mmMME1_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_1 0xE09C4 + +#define mmMME1_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_2 0xE09C8 + +#define mmMME1_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_3 0xE09CC + +#define mmMME1_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_4 0xE09D0 + +#define mmMME1_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_0 0xE09D4 + +#define mmMME1_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_1 0xE09D8 + +#define mmMME1_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_2 0xE09DC + +#define mmMME1_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_3 0xE09E0 + +#define mmMME1_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_0 0xE09E4 + +#define mmMME1_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_1 0xE09E8 + +#define mmMME1_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_2 0xE09EC + +#define mmMME1_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_3 0xE09F0 + +#define mmMME1_CTRL_SHADOW_2_TENSOR_L_SPATIAL_SIZE_MINUS_1 0xE09F4 + +#define mmMME1_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0xE09F8 + +#define mmMME1_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0xE09FC + +#define mmMME1_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0xE0A00 + +#define mmMME1_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0xE0A04 + +#define mmMME1_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0xE0A08 + +#define mmMME1_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_0 0xE0A0C + +#define mmMME1_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_1 0xE0A10 + +#define mmMME1_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_2 0xE0A14 + +#define mmMME1_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_3 0xE0A18 + +#define mmMME1_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0xE0A1C + +#define mmMME1_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0xE0A20 + +#define mmMME1_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0xE0A24 + +#define mmMME1_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0xE0A28 + +#define mmMME1_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0xE0A2C + +#define mmMME1_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_0 0xE0A30 + +#define mmMME1_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_1 0xE0A34 + +#define mmMME1_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_2 0xE0A38 + +#define mmMME1_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_3 0xE0A3C + +#define mmMME1_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_0 0xE0A40 + +#define mmMME1_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_1 0xE0A44 + +#define mmMME1_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_2 0xE0A48 + +#define mmMME1_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_3 0xE0A4C + +#define mmMME1_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_4 0xE0A50 + +#define mmMME1_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_0 0xE0A54 + +#define mmMME1_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_1 0xE0A58 + +#define mmMME1_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_2 0xE0A5C + +#define mmMME1_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_3 0xE0A60 + +#define mmMME1_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_4 0xE0A64 + +#define mmMME1_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_0 0xE0A68 + +#define mmMME1_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_1 0xE0A6C + +#define mmMME1_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_2 0xE0A70 + +#define mmMME1_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_3 0xE0A74 + +#define mmMME1_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_0 0xE0A78 + +#define mmMME1_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_1 0xE0A7C + +#define mmMME1_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_2 0xE0A80 + +#define mmMME1_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_3 0xE0A84 + +#define mmMME1_CTRL_SHADOW_2_TENSOR_O_SPATIAL_SIZE_MINUS_1 0xE0A88 + +#define mmMME1_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0xE0A8C + +#define mmMME1_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0xE0A90 + +#define mmMME1_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0xE0A94 + +#define mmMME1_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0xE0A98 + +#define mmMME1_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0xE0A9C + +#define mmMME1_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_0 0xE0AA0 + +#define mmMME1_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_1 0xE0AA4 + +#define mmMME1_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_2 0xE0AA8 + +#define mmMME1_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_3 0xE0AAC + +#define mmMME1_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0xE0AB0 + +#define mmMME1_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0xE0AB4 + +#define mmMME1_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0xE0AB8 + +#define mmMME1_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0xE0ABC + +#define mmMME1_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0xE0AC0 + +#define mmMME1_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_0 0xE0AC4 + +#define mmMME1_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_1 0xE0AC8 + +#define mmMME1_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_2 0xE0ACC + +#define mmMME1_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_3 0xE0AD0 + +#define mmMME1_CTRL_SHADOW_2_DESC_SB_REPEAT 0xE0AD4 + +#define mmMME1_CTRL_SHADOW_2_DESC_RATE_LIMITER 0xE0AD8 + +#define mmMME1_CTRL_SHADOW_2_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0xE0ADC + +#define mmMME1_CTRL_SHADOW_2_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0xE0AE0 + +#define mmMME1_CTRL_SHADOW_2_DESC_SYNC_OBJECT_ADDR_HIGH 0xE0AE4 + +#define mmMME1_CTRL_SHADOW_2_DESC_SYNC_OBJECT_DATA 0xE0AE8 + +#define mmMME1_CTRL_SHADOW_2_DESC_AXI_USER_DATA 0xE0AEC + +#define mmMME1_CTRL_SHADOW_2_DESC_PERF_EVT_S 0xE0AF0 + +#define mmMME1_CTRL_SHADOW_2_DESC_PERF_EVT_L_LOCAL 0xE0AF4 + +#define mmMME1_CTRL_SHADOW_2_DESC_PERF_EVT_L_REMOTE 0xE0AF8 + +#define mmMME1_CTRL_SHADOW_2_DESC_PERF_EVT_O_LOCAL 0xE0AFC + +#define mmMME1_CTRL_SHADOW_2_DESC_PERF_EVT_O_REMOTE 0xE0B00 + +#define mmMME1_CTRL_SHADOW_2_DESC_PADDING_VALUE_S 0xE0B04 + +#define mmMME1_CTRL_SHADOW_2_DESC_PADDING_VALUE_L 0xE0B08 + +#define mmMME1_CTRL_SHADOW_2_DESC_META_DATA_AGU_S 0xE0B0C + +#define mmMME1_CTRL_SHADOW_2_DESC_META_DATA_AGU_L_LOCAL 0xE0B10 + +#define mmMME1_CTRL_SHADOW_2_DESC_META_DATA_AGU_L_REMOTE 0xE0B14 + +#define mmMME1_CTRL_SHADOW_2_DESC_META_DATA_AGU_O_LOCAL 0xE0B18 + +#define mmMME1_CTRL_SHADOW_2_DESC_META_DATA_AGU_O_REMOTE 0xE0B1C + +#define mmMME1_CTRL_SHADOW_2_DESC_PCU_RL_SATURATION 0xE0B20 + +#define mmMME1_CTRL_SHADOW_2_DESC_DUMMY 0xE0B24 + +#define mmMME1_CTRL_SHADOW_3_STATUS 0xE0B80 + +#define mmMME1_CTRL_SHADOW_3_BASE_ADDR_HIGH_S 0xE0B88 + +#define mmMME1_CTRL_SHADOW_3_BASE_ADDR_HIGH_L 0xE0B8C + +#define mmMME1_CTRL_SHADOW_3_BASE_ADDR_HIGH_O 0xE0B90 + +#define mmMME1_CTRL_SHADOW_3_BASE_ADDR_LOW_S 0xE0B94 + +#define mmMME1_CTRL_SHADOW_3_BASE_ADDR_LOW_L 0xE0B98 + +#define mmMME1_CTRL_SHADOW_3_BASE_ADDR_LOW_O 0xE0B9C + +#define mmMME1_CTRL_SHADOW_3_HEADER_LOW 0xE0BA0 + +#define mmMME1_CTRL_SHADOW_3_HEADER_HIGH 0xE0BA4 + +#define mmMME1_CTRL_SHADOW_3_CONV_KERNEL_SIZE_MINUS_1 0xE0BA8 + +#define mmMME1_CTRL_SHADOW_3_CONV_ASSOCIATED_DIMS_LOW 0xE0BAC + +#define mmMME1_CTRL_SHADOW_3_CONV_ASSOCIATED_DIMS_HIGH 0xE0BB0 + +#define mmMME1_CTRL_SHADOW_3_NUM_ITERATIONS_MINUS_1 0xE0BB4 + +#define mmMME1_CTRL_SHADOW_3_OUTER_LOOP 0xE0BB8 + +#define mmMME1_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_0 0xE0BBC + +#define mmMME1_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_1 0xE0BC0 + +#define mmMME1_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_2 0xE0BC4 + +#define mmMME1_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_3 0xE0BC8 + +#define mmMME1_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_4 0xE0BCC + +#define mmMME1_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_0 0xE0BD0 + +#define mmMME1_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_1 0xE0BD4 + +#define mmMME1_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_2 0xE0BD8 + +#define mmMME1_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_3 0xE0BDC + +#define mmMME1_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_4 0xE0BE0 + +#define mmMME1_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_0 0xE0BE4 + +#define mmMME1_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_1 0xE0BE8 + +#define mmMME1_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_2 0xE0BEC + +#define mmMME1_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_3 0xE0BF0 + +#define mmMME1_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_0 0xE0BF4 + +#define mmMME1_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_1 0xE0BF8 + +#define mmMME1_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_2 0xE0BFC + +#define mmMME1_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_3 0xE0C00 + +#define mmMME1_CTRL_SHADOW_3_TENSOR_S_SPATIAL_SIZE_MINUS_1 0xE0C04 + +#define mmMME1_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_0 0xE0C08 + +#define mmMME1_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_1 0xE0C0C + +#define mmMME1_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_2 0xE0C10 + +#define mmMME1_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_3 0xE0C14 + +#define mmMME1_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_4 0xE0C18 + +#define mmMME1_CTRL_SHADOW_3_AGU_S_START_OFFSET_0 0xE0C1C + +#define mmMME1_CTRL_SHADOW_3_AGU_S_START_OFFSET_1 0xE0C20 + +#define mmMME1_CTRL_SHADOW_3_AGU_S_START_OFFSET_2 0xE0C24 + +#define mmMME1_CTRL_SHADOW_3_AGU_S_START_OFFSET_3 0xE0C28 + +#define mmMME1_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_0 0xE0C2C + +#define mmMME1_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_1 0xE0C30 + +#define mmMME1_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_2 0xE0C34 + +#define mmMME1_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_3 0xE0C38 + +#define mmMME1_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_4 0xE0C3C + +#define mmMME1_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_0 0xE0C40 + +#define mmMME1_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_1 0xE0C44 + +#define mmMME1_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_2 0xE0C48 + +#define mmMME1_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_3 0xE0C4C + +#define mmMME1_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_4 0xE0C50 + +#define mmMME1_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_0 0xE0C54 + +#define mmMME1_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_1 0xE0C58 + +#define mmMME1_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_2 0xE0C5C + +#define mmMME1_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_3 0xE0C60 + +#define mmMME1_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_0 0xE0C64 + +#define mmMME1_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_1 0xE0C68 + +#define mmMME1_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_2 0xE0C6C + +#define mmMME1_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_3 0xE0C70 + +#define mmMME1_CTRL_SHADOW_3_TENSOR_L_SPATIAL_SIZE_MINUS_1 0xE0C74 + +#define mmMME1_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0xE0C78 + +#define mmMME1_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0xE0C7C + +#define mmMME1_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0xE0C80 + +#define mmMME1_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0xE0C84 + +#define mmMME1_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0xE0C88 + +#define mmMME1_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_0 0xE0C8C + +#define mmMME1_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_1 0xE0C90 + +#define mmMME1_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_2 0xE0C94 + +#define mmMME1_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_3 0xE0C98 + +#define mmMME1_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0xE0C9C + +#define mmMME1_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0xE0CA0 + +#define mmMME1_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0xE0CA4 + +#define mmMME1_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0xE0CA8 + +#define mmMME1_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0xE0CAC + +#define mmMME1_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_0 0xE0CB0 + +#define mmMME1_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_1 0xE0CB4 + +#define mmMME1_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_2 0xE0CB8 + +#define mmMME1_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_3 0xE0CBC + +#define mmMME1_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_0 0xE0CC0 + +#define mmMME1_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_1 0xE0CC4 + +#define mmMME1_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_2 0xE0CC8 + +#define mmMME1_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_3 0xE0CCC + +#define mmMME1_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_4 0xE0CD0 + +#define mmMME1_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_0 0xE0CD4 + +#define mmMME1_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_1 0xE0CD8 + +#define mmMME1_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_2 0xE0CDC + +#define mmMME1_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_3 0xE0CE0 + +#define mmMME1_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_4 0xE0CE4 + +#define mmMME1_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_0 0xE0CE8 + +#define mmMME1_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_1 0xE0CEC + +#define mmMME1_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_2 0xE0CF0 + +#define mmMME1_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_3 0xE0CF4 + +#define mmMME1_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_0 0xE0CF8 + +#define mmMME1_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_1 0xE0CFC + +#define mmMME1_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_2 0xE0D00 + +#define mmMME1_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_3 0xE0D04 + +#define mmMME1_CTRL_SHADOW_3_TENSOR_O_SPATIAL_SIZE_MINUS_1 0xE0D08 + +#define mmMME1_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0xE0D0C + +#define mmMME1_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0xE0D10 + +#define mmMME1_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0xE0D14 + +#define mmMME1_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0xE0D18 + +#define mmMME1_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0xE0D1C + +#define mmMME1_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_0 0xE0D20 + +#define mmMME1_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_1 0xE0D24 + +#define mmMME1_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_2 0xE0D28 + +#define mmMME1_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_3 0xE0D2C + +#define mmMME1_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0xE0D30 + +#define mmMME1_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0xE0D34 + +#define mmMME1_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0xE0D38 + +#define mmMME1_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0xE0D3C + +#define mmMME1_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0xE0D40 + +#define mmMME1_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_0 0xE0D44 + +#define mmMME1_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_1 0xE0D48 + +#define mmMME1_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_2 0xE0D4C + +#define mmMME1_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_3 0xE0D50 + +#define mmMME1_CTRL_SHADOW_3_DESC_SB_REPEAT 0xE0D54 + +#define mmMME1_CTRL_SHADOW_3_DESC_RATE_LIMITER 0xE0D58 + +#define mmMME1_CTRL_SHADOW_3_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0xE0D5C + +#define mmMME1_CTRL_SHADOW_3_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0xE0D60 + +#define mmMME1_CTRL_SHADOW_3_DESC_SYNC_OBJECT_ADDR_HIGH 0xE0D64 + +#define mmMME1_CTRL_SHADOW_3_DESC_SYNC_OBJECT_DATA 0xE0D68 + +#define mmMME1_CTRL_SHADOW_3_DESC_AXI_USER_DATA 0xE0D6C + +#define mmMME1_CTRL_SHADOW_3_DESC_PERF_EVT_S 0xE0D70 + +#define mmMME1_CTRL_SHADOW_3_DESC_PERF_EVT_L_LOCAL 0xE0D74 + +#define mmMME1_CTRL_SHADOW_3_DESC_PERF_EVT_L_REMOTE 0xE0D78 + +#define mmMME1_CTRL_SHADOW_3_DESC_PERF_EVT_O_LOCAL 0xE0D7C + +#define mmMME1_CTRL_SHADOW_3_DESC_PERF_EVT_O_REMOTE 0xE0D80 + +#define mmMME1_CTRL_SHADOW_3_DESC_PADDING_VALUE_S 0xE0D84 + +#define mmMME1_CTRL_SHADOW_3_DESC_PADDING_VALUE_L 0xE0D88 + +#define mmMME1_CTRL_SHADOW_3_DESC_META_DATA_AGU_S 0xE0D8C + +#define mmMME1_CTRL_SHADOW_3_DESC_META_DATA_AGU_L_LOCAL 0xE0D90 + +#define mmMME1_CTRL_SHADOW_3_DESC_META_DATA_AGU_L_REMOTE 0xE0D94 + +#define mmMME1_CTRL_SHADOW_3_DESC_META_DATA_AGU_O_LOCAL 0xE0D98 + +#define mmMME1_CTRL_SHADOW_3_DESC_META_DATA_AGU_O_REMOTE 0xE0D9C + +#define mmMME1_CTRL_SHADOW_3_DESC_PCU_RL_SATURATION 0xE0DA0 + +#define mmMME1_CTRL_SHADOW_3_DESC_DUMMY 0xE0DA4 + +#endif /* ASIC_REG_MME1_CTRL_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme2_ctrl_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/mme2_ctrl_regs.h new file mode 100644 index 000000000..a1f2eb8b9 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/mme2_ctrl_regs.h @@ -0,0 +1,1456 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_MME2_CTRL_REGS_H_ +#define ASIC_REG_MME2_CTRL_REGS_H_ + +/* + ***************************************** + * MME2_CTRL (Prototype: MME) + ***************************************** + */ + +#define mmMME2_CTRL_ARCH_STATUS 0x160000 + +#define mmMME2_CTRL_ARCH_BASE_ADDR_HIGH_S 0x160008 + +#define mmMME2_CTRL_ARCH_BASE_ADDR_HIGH_L 0x16000C + +#define mmMME2_CTRL_ARCH_BASE_ADDR_HIGH_O 0x160010 + +#define mmMME2_CTRL_ARCH_BASE_ADDR_LOW_S 0x160014 + +#define mmMME2_CTRL_ARCH_BASE_ADDR_LOW_L 0x160018 + +#define mmMME2_CTRL_ARCH_BASE_ADDR_LOW_O 0x16001C + +#define mmMME2_CTRL_ARCH_HEADER_LOW 0x160020 + +#define mmMME2_CTRL_ARCH_HEADER_HIGH 0x160024 + +#define mmMME2_CTRL_ARCH_CONV_KERNEL_SIZE_MINUS_1 0x160028 + +#define mmMME2_CTRL_ARCH_CONV_ASSOCIATED_DIMS_LOW 0x16002C + +#define mmMME2_CTRL_ARCH_CONV_ASSOCIATED_DIMS_HIGH 0x160030 + +#define mmMME2_CTRL_ARCH_NUM_ITERATIONS_MINUS_1 0x160034 + +#define mmMME2_CTRL_ARCH_OUTER_LOOP 0x160038 + +#define mmMME2_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_0 0x16003C + +#define mmMME2_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_1 0x160040 + +#define mmMME2_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_2 0x160044 + +#define mmMME2_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_3 0x160048 + +#define mmMME2_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_4 0x16004C + +#define mmMME2_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_0 0x160050 + +#define mmMME2_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_1 0x160054 + +#define mmMME2_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_2 0x160058 + +#define mmMME2_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_3 0x16005C + +#define mmMME2_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_4 0x160060 + +#define mmMME2_CTRL_ARCH_TENSOR_S_ROI_SIZE_0 0x160064 + +#define mmMME2_CTRL_ARCH_TENSOR_S_ROI_SIZE_1 0x160068 + +#define mmMME2_CTRL_ARCH_TENSOR_S_ROI_SIZE_2 0x16006C + +#define mmMME2_CTRL_ARCH_TENSOR_S_ROI_SIZE_3 0x160070 + +#define mmMME2_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_0 0x160074 + +#define mmMME2_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_1 0x160078 + +#define mmMME2_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_2 0x16007C + +#define mmMME2_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_3 0x160080 + +#define mmMME2_CTRL_ARCH_TENSOR_S_SPATIAL_SIZE_MINUS_1 0x160084 + +#define mmMME2_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_0 0x160088 + +#define mmMME2_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_1 0x16008C + +#define mmMME2_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_2 0x160090 + +#define mmMME2_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_3 0x160094 + +#define mmMME2_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_4 0x160098 + +#define mmMME2_CTRL_ARCH_AGU_S_START_OFFSET_0 0x16009C + +#define mmMME2_CTRL_ARCH_AGU_S_START_OFFSET_1 0x1600A0 + +#define mmMME2_CTRL_ARCH_AGU_S_START_OFFSET_2 0x1600A4 + +#define mmMME2_CTRL_ARCH_AGU_S_START_OFFSET_3 0x1600A8 + +#define mmMME2_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_0 0x1600AC + +#define mmMME2_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_1 0x1600B0 + +#define mmMME2_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_2 0x1600B4 + +#define mmMME2_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_3 0x1600B8 + +#define mmMME2_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_4 0x1600BC + +#define mmMME2_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_0 0x1600C0 + +#define mmMME2_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_1 0x1600C4 + +#define mmMME2_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_2 0x1600C8 + +#define mmMME2_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_3 0x1600CC + +#define mmMME2_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_4 0x1600D0 + +#define mmMME2_CTRL_ARCH_TENSOR_L_ROI_SIZE_0 0x1600D4 + +#define mmMME2_CTRL_ARCH_TENSOR_L_ROI_SIZE_1 0x1600D8 + +#define mmMME2_CTRL_ARCH_TENSOR_L_ROI_SIZE_2 0x1600DC + +#define mmMME2_CTRL_ARCH_TENSOR_L_ROI_SIZE_3 0x1600E0 + +#define mmMME2_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_0 0x1600E4 + +#define mmMME2_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_1 0x1600E8 + +#define mmMME2_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_2 0x1600EC + +#define mmMME2_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_3 0x1600F0 + +#define mmMME2_CTRL_ARCH_TENSOR_L_SPATIAL_SIZE_MINUS_1 0x1600F4 + +#define mmMME2_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0x1600F8 + +#define mmMME2_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0x1600FC + +#define mmMME2_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0x160100 + +#define mmMME2_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0x160104 + +#define mmMME2_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0x160108 + +#define mmMME2_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_0 0x16010C + +#define mmMME2_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_1 0x160110 + +#define mmMME2_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_2 0x160114 + +#define mmMME2_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_3 0x160118 + +#define mmMME2_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0x16011C + +#define mmMME2_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0x160120 + +#define mmMME2_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0x160124 + +#define mmMME2_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0x160128 + +#define mmMME2_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0x16012C + +#define mmMME2_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_0 0x160130 + +#define mmMME2_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_1 0x160134 + +#define mmMME2_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_2 0x160138 + +#define mmMME2_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_3 0x16013C + +#define mmMME2_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_0 0x160140 + +#define mmMME2_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_1 0x160144 + +#define mmMME2_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_2 0x160148 + +#define mmMME2_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_3 0x16014C + +#define mmMME2_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_4 0x160150 + +#define mmMME2_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_0 0x160154 + +#define mmMME2_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_1 0x160158 + +#define mmMME2_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_2 0x16015C + +#define mmMME2_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_3 0x160160 + +#define mmMME2_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_4 0x160164 + +#define mmMME2_CTRL_ARCH_TENSOR_O_ROI_SIZE_0 0x160168 + +#define mmMME2_CTRL_ARCH_TENSOR_O_ROI_SIZE_1 0x16016C + +#define mmMME2_CTRL_ARCH_TENSOR_O_ROI_SIZE_2 0x160170 + +#define mmMME2_CTRL_ARCH_TENSOR_O_ROI_SIZE_3 0x160174 + +#define mmMME2_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_0 0x160178 + +#define mmMME2_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_1 0x16017C + +#define mmMME2_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_2 0x160180 + +#define mmMME2_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_3 0x160184 + +#define mmMME2_CTRL_ARCH_TENSOR_O_SPATIAL_SIZE_MINUS_1 0x160188 + +#define mmMME2_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0x16018C + +#define mmMME2_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0x160190 + +#define mmMME2_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0x160194 + +#define mmMME2_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0x160198 + +#define mmMME2_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0x16019C + +#define mmMME2_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_0 0x1601A0 + +#define mmMME2_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_1 0x1601A4 + +#define mmMME2_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_2 0x1601A8 + +#define mmMME2_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_3 0x1601AC + +#define mmMME2_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0x1601B0 + +#define mmMME2_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0x1601B4 + +#define mmMME2_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0x1601B8 + +#define mmMME2_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0x1601BC + +#define mmMME2_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0x1601C0 + +#define mmMME2_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_0 0x1601C4 + +#define mmMME2_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_1 0x1601C8 + +#define mmMME2_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_2 0x1601CC + +#define mmMME2_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_3 0x1601D0 + +#define mmMME2_CTRL_ARCH_DESC_SB_REPEAT 0x1601D4 + +#define mmMME2_CTRL_ARCH_DESC_RATE_LIMITER 0x1601D8 + +#define mmMME2_CTRL_ARCH_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0x1601DC + +#define mmMME2_CTRL_ARCH_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0x1601E0 + +#define mmMME2_CTRL_ARCH_DESC_SYNC_OBJECT_ADDR_HIGH 0x1601E4 + +#define mmMME2_CTRL_ARCH_DESC_SYNC_OBJECT_DATA 0x1601E8 + +#define mmMME2_CTRL_ARCH_DESC_AXI_USER_DATA 0x1601EC + +#define mmMME2_CTRL_ARCH_DESC_PERF_EVT_S 0x1601F0 + +#define mmMME2_CTRL_ARCH_DESC_PERF_EVT_L_LOCAL 0x1601F4 + +#define mmMME2_CTRL_ARCH_DESC_PERF_EVT_L_REMOTE 0x1601F8 + +#define mmMME2_CTRL_ARCH_DESC_PERF_EVT_O_LOCAL 0x1601FC + +#define mmMME2_CTRL_ARCH_DESC_PERF_EVT_O_REMOTE 0x160200 + +#define mmMME2_CTRL_ARCH_DESC_PADDING_VALUE_S 0x160204 + +#define mmMME2_CTRL_ARCH_DESC_PADDING_VALUE_L 0x160208 + +#define mmMME2_CTRL_ARCH_DESC_META_DATA_AGU_S 0x16020C + +#define mmMME2_CTRL_ARCH_DESC_META_DATA_AGU_L_LOCAL 0x160210 + +#define mmMME2_CTRL_ARCH_DESC_META_DATA_AGU_L_REMOTE 0x160214 + +#define mmMME2_CTRL_ARCH_DESC_META_DATA_AGU_O_LOCAL 0x160218 + +#define mmMME2_CTRL_ARCH_DESC_META_DATA_AGU_O_REMOTE 0x16021C + +#define mmMME2_CTRL_ARCH_DESC_PCU_RL_SATURATION 0x160220 + +#define mmMME2_CTRL_ARCH_DESC_DUMMY 0x160224 + +#define mmMME2_CTRL_CMD 0x160280 + +#define mmMME2_CTRL_STATUS1 0x160284 + +#define mmMME2_CTRL_RESET 0x160288 + +#define mmMME2_CTRL_QM_STALL 0x16028C + +#define mmMME2_CTRL_SYNC_OBJECT_FIFO_TH 0x160290 + +#define mmMME2_CTRL_EUS_ROLLUP_CNT_ADD 0x160294 + +#define mmMME2_CTRL_INTR_CAUSE 0x160298 + +#define mmMME2_CTRL_INTR_MASK 0x16029C + +#define mmMME2_CTRL_LOG_SHADOW 0x1602A0 + +#define mmMME2_CTRL_PCU_RL_DESC0 0x1602A4 + +#define mmMME2_CTRL_PCU_RL_TOKEN_UPDATE 0x1602A8 + +#define mmMME2_CTRL_PCU_RL_TH 0x1602AC + +#define mmMME2_CTRL_PCU_RL_MIN 0x1602B0 + +#define mmMME2_CTRL_PCU_RL_CTRL_EN 0x1602B4 + +#define mmMME2_CTRL_PCU_RL_HISTORY_LOG_SIZE 0x1602B8 + +#define mmMME2_CTRL_PCU_DUMMY_A_BF16 0x1602BC + +#define mmMME2_CTRL_PCU_DUMMY_B_BF16 0x1602C0 + +#define mmMME2_CTRL_PCU_DUMMY_A_FP32_ODD 0x1602C4 + +#define mmMME2_CTRL_PCU_DUMMY_A_FP32_EVEN 0x1602C8 + +#define mmMME2_CTRL_PCU_DUMMY_B_FP32_ODD 0x1602CC + +#define mmMME2_CTRL_PCU_DUMMY_B_FP32_EVEN 0x1602D0 + +#define mmMME2_CTRL_PROT 0x1602D4 + +#define mmMME2_CTRL_EU_POWER_SAVE_DISABLE 0x1602D8 + +#define mmMME2_CTRL_CS_DBG_BLOCK_ID 0x1602DC + +#define mmMME2_CTRL_CS_DBG_STATUS_DROP_CNT 0x1602E0 + +#define mmMME2_CTRL_TE_CLOSE_CGATE 0x1602E4 + +#define mmMME2_CTRL_AGU_SM_INFLIGHT_CNTR 0x1602E8 + +#define mmMME2_CTRL_AGU_SM_TOTAL_CNTR 0x1602EC + +#define mmMME2_CTRL_EZSYNC_OUT_CREDIT 0x1602F0 + +#define mmMME2_CTRL_PCU_RL_SAT_SEC 0x1602F4 + +#define mmMME2_CTRL_AGU_SYNC_MSG_AXI_USER 0x1602F8 + +#define mmMME2_CTRL_QM_SLV_LBW_CLK_EN 0x1602FC + +#define mmMME2_CTRL_SHADOW_0_STATUS 0x160400 + +#define mmMME2_CTRL_SHADOW_0_BASE_ADDR_HIGH_S 0x160408 + +#define mmMME2_CTRL_SHADOW_0_BASE_ADDR_HIGH_L 0x16040C + +#define mmMME2_CTRL_SHADOW_0_BASE_ADDR_HIGH_O 0x160410 + +#define mmMME2_CTRL_SHADOW_0_BASE_ADDR_LOW_S 0x160414 + +#define mmMME2_CTRL_SHADOW_0_BASE_ADDR_LOW_L 0x160418 + +#define mmMME2_CTRL_SHADOW_0_BASE_ADDR_LOW_O 0x16041C + +#define mmMME2_CTRL_SHADOW_0_HEADER_LOW 0x160420 + +#define mmMME2_CTRL_SHADOW_0_HEADER_HIGH 0x160424 + +#define mmMME2_CTRL_SHADOW_0_CONV_KERNEL_SIZE_MINUS_1 0x160428 + +#define mmMME2_CTRL_SHADOW_0_CONV_ASSOCIATED_DIMS_LOW 0x16042C + +#define mmMME2_CTRL_SHADOW_0_CONV_ASSOCIATED_DIMS_HIGH 0x160430 + +#define mmMME2_CTRL_SHADOW_0_NUM_ITERATIONS_MINUS_1 0x160434 + +#define mmMME2_CTRL_SHADOW_0_OUTER_LOOP 0x160438 + +#define mmMME2_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_0 0x16043C + +#define mmMME2_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_1 0x160440 + +#define mmMME2_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_2 0x160444 + +#define mmMME2_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_3 0x160448 + +#define mmMME2_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_4 0x16044C + +#define mmMME2_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_0 0x160450 + +#define mmMME2_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_1 0x160454 + +#define mmMME2_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_2 0x160458 + +#define mmMME2_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_3 0x16045C + +#define mmMME2_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_4 0x160460 + +#define mmMME2_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_0 0x160464 + +#define mmMME2_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_1 0x160468 + +#define mmMME2_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_2 0x16046C + +#define mmMME2_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_3 0x160470 + +#define mmMME2_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_0 0x160474 + +#define mmMME2_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_1 0x160478 + +#define mmMME2_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_2 0x16047C + +#define mmMME2_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_3 0x160480 + +#define mmMME2_CTRL_SHADOW_0_TENSOR_S_SPATIAL_SIZE_MINUS_1 0x160484 + +#define mmMME2_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_0 0x160488 + +#define mmMME2_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_1 0x16048C + +#define mmMME2_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_2 0x160490 + +#define mmMME2_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_3 0x160494 + +#define mmMME2_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_4 0x160498 + +#define mmMME2_CTRL_SHADOW_0_AGU_S_START_OFFSET_0 0x16049C + +#define mmMME2_CTRL_SHADOW_0_AGU_S_START_OFFSET_1 0x1604A0 + +#define mmMME2_CTRL_SHADOW_0_AGU_S_START_OFFSET_2 0x1604A4 + +#define mmMME2_CTRL_SHADOW_0_AGU_S_START_OFFSET_3 0x1604A8 + +#define mmMME2_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_0 0x1604AC + +#define mmMME2_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_1 0x1604B0 + +#define mmMME2_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_2 0x1604B4 + +#define mmMME2_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_3 0x1604B8 + +#define mmMME2_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_4 0x1604BC + +#define mmMME2_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_0 0x1604C0 + +#define mmMME2_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_1 0x1604C4 + +#define mmMME2_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_2 0x1604C8 + +#define mmMME2_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_3 0x1604CC + +#define mmMME2_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_4 0x1604D0 + +#define mmMME2_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_0 0x1604D4 + +#define mmMME2_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_1 0x1604D8 + +#define mmMME2_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_2 0x1604DC + +#define mmMME2_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_3 0x1604E0 + +#define mmMME2_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_0 0x1604E4 + +#define mmMME2_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_1 0x1604E8 + +#define mmMME2_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_2 0x1604EC + +#define mmMME2_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_3 0x1604F0 + +#define mmMME2_CTRL_SHADOW_0_TENSOR_L_SPATIAL_SIZE_MINUS_1 0x1604F4 + +#define mmMME2_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0x1604F8 + +#define mmMME2_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0x1604FC + +#define mmMME2_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0x160500 + +#define mmMME2_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0x160504 + +#define mmMME2_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0x160508 + +#define mmMME2_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_0 0x16050C + +#define mmMME2_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_1 0x160510 + +#define mmMME2_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_2 0x160514 + +#define mmMME2_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_3 0x160518 + +#define mmMME2_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0x16051C + +#define mmMME2_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0x160520 + +#define mmMME2_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0x160524 + +#define mmMME2_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0x160528 + +#define mmMME2_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0x16052C + +#define mmMME2_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_0 0x160530 + +#define mmMME2_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_1 0x160534 + +#define mmMME2_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_2 0x160538 + +#define mmMME2_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_3 0x16053C + +#define mmMME2_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_0 0x160540 + +#define mmMME2_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_1 0x160544 + +#define mmMME2_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_2 0x160548 + +#define mmMME2_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_3 0x16054C + +#define mmMME2_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_4 0x160550 + +#define mmMME2_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_0 0x160554 + +#define mmMME2_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_1 0x160558 + +#define mmMME2_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_2 0x16055C + +#define mmMME2_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_3 0x160560 + +#define mmMME2_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_4 0x160564 + +#define mmMME2_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_0 0x160568 + +#define mmMME2_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_1 0x16056C + +#define mmMME2_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_2 0x160570 + +#define mmMME2_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_3 0x160574 + +#define mmMME2_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_0 0x160578 + +#define mmMME2_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_1 0x16057C + +#define mmMME2_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_2 0x160580 + +#define mmMME2_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_3 0x160584 + +#define mmMME2_CTRL_SHADOW_0_TENSOR_O_SPATIAL_SIZE_MINUS_1 0x160588 + +#define mmMME2_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0x16058C + +#define mmMME2_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0x160590 + +#define mmMME2_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0x160594 + +#define mmMME2_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0x160598 + +#define mmMME2_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0x16059C + +#define mmMME2_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_0 0x1605A0 + +#define mmMME2_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_1 0x1605A4 + +#define mmMME2_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_2 0x1605A8 + +#define mmMME2_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_3 0x1605AC + +#define mmMME2_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0x1605B0 + +#define mmMME2_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0x1605B4 + +#define mmMME2_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0x1605B8 + +#define mmMME2_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0x1605BC + +#define mmMME2_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0x1605C0 + +#define mmMME2_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_0 0x1605C4 + +#define mmMME2_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_1 0x1605C8 + +#define mmMME2_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_2 0x1605CC + +#define mmMME2_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_3 0x1605D0 + +#define mmMME2_CTRL_SHADOW_0_DESC_SB_REPEAT 0x1605D4 + +#define mmMME2_CTRL_SHADOW_0_DESC_RATE_LIMITER 0x1605D8 + +#define mmMME2_CTRL_SHADOW_0_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0x1605DC + +#define mmMME2_CTRL_SHADOW_0_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0x1605E0 + +#define mmMME2_CTRL_SHADOW_0_DESC_SYNC_OBJECT_ADDR_HIGH 0x1605E4 + +#define mmMME2_CTRL_SHADOW_0_DESC_SYNC_OBJECT_DATA 0x1605E8 + +#define mmMME2_CTRL_SHADOW_0_DESC_AXI_USER_DATA 0x1605EC + +#define mmMME2_CTRL_SHADOW_0_DESC_PERF_EVT_S 0x1605F0 + +#define mmMME2_CTRL_SHADOW_0_DESC_PERF_EVT_L_LOCAL 0x1605F4 + +#define mmMME2_CTRL_SHADOW_0_DESC_PERF_EVT_L_REMOTE 0x1605F8 + +#define mmMME2_CTRL_SHADOW_0_DESC_PERF_EVT_O_LOCAL 0x1605FC + +#define mmMME2_CTRL_SHADOW_0_DESC_PERF_EVT_O_REMOTE 0x160600 + +#define mmMME2_CTRL_SHADOW_0_DESC_PADDING_VALUE_S 0x160604 + +#define mmMME2_CTRL_SHADOW_0_DESC_PADDING_VALUE_L 0x160608 + +#define mmMME2_CTRL_SHADOW_0_DESC_META_DATA_AGU_S 0x16060C + +#define mmMME2_CTRL_SHADOW_0_DESC_META_DATA_AGU_L_LOCAL 0x160610 + +#define mmMME2_CTRL_SHADOW_0_DESC_META_DATA_AGU_L_REMOTE 0x160614 + +#define mmMME2_CTRL_SHADOW_0_DESC_META_DATA_AGU_O_LOCAL 0x160618 + +#define mmMME2_CTRL_SHADOW_0_DESC_META_DATA_AGU_O_REMOTE 0x16061C + +#define mmMME2_CTRL_SHADOW_0_DESC_PCU_RL_SATURATION 0x160620 + +#define mmMME2_CTRL_SHADOW_0_DESC_DUMMY 0x160624 + +#define mmMME2_CTRL_SHADOW_1_STATUS 0x160680 + +#define mmMME2_CTRL_SHADOW_1_BASE_ADDR_HIGH_S 0x160688 + +#define mmMME2_CTRL_SHADOW_1_BASE_ADDR_HIGH_L 0x16068C + +#define mmMME2_CTRL_SHADOW_1_BASE_ADDR_HIGH_O 0x160690 + +#define mmMME2_CTRL_SHADOW_1_BASE_ADDR_LOW_S 0x160694 + +#define mmMME2_CTRL_SHADOW_1_BASE_ADDR_LOW_L 0x160698 + +#define mmMME2_CTRL_SHADOW_1_BASE_ADDR_LOW_O 0x16069C + +#define mmMME2_CTRL_SHADOW_1_HEADER_LOW 0x1606A0 + +#define mmMME2_CTRL_SHADOW_1_HEADER_HIGH 0x1606A4 + +#define mmMME2_CTRL_SHADOW_1_CONV_KERNEL_SIZE_MINUS_1 0x1606A8 + +#define mmMME2_CTRL_SHADOW_1_CONV_ASSOCIATED_DIMS_LOW 0x1606AC + +#define mmMME2_CTRL_SHADOW_1_CONV_ASSOCIATED_DIMS_HIGH 0x1606B0 + +#define mmMME2_CTRL_SHADOW_1_NUM_ITERATIONS_MINUS_1 0x1606B4 + +#define mmMME2_CTRL_SHADOW_1_OUTER_LOOP 0x1606B8 + +#define mmMME2_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_0 0x1606BC + +#define mmMME2_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_1 0x1606C0 + +#define mmMME2_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_2 0x1606C4 + +#define mmMME2_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_3 0x1606C8 + +#define mmMME2_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_4 0x1606CC + +#define mmMME2_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_0 0x1606D0 + +#define mmMME2_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_1 0x1606D4 + +#define mmMME2_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_2 0x1606D8 + +#define mmMME2_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_3 0x1606DC + +#define mmMME2_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_4 0x1606E0 + +#define mmMME2_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_0 0x1606E4 + +#define mmMME2_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_1 0x1606E8 + +#define mmMME2_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_2 0x1606EC + +#define mmMME2_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_3 0x1606F0 + +#define mmMME2_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_0 0x1606F4 + +#define mmMME2_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_1 0x1606F8 + +#define mmMME2_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_2 0x1606FC + +#define mmMME2_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_3 0x160700 + +#define mmMME2_CTRL_SHADOW_1_TENSOR_S_SPATIAL_SIZE_MINUS_1 0x160704 + +#define mmMME2_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_0 0x160708 + +#define mmMME2_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_1 0x16070C + +#define mmMME2_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_2 0x160710 + +#define mmMME2_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_3 0x160714 + +#define mmMME2_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_4 0x160718 + +#define mmMME2_CTRL_SHADOW_1_AGU_S_START_OFFSET_0 0x16071C + +#define mmMME2_CTRL_SHADOW_1_AGU_S_START_OFFSET_1 0x160720 + +#define mmMME2_CTRL_SHADOW_1_AGU_S_START_OFFSET_2 0x160724 + +#define mmMME2_CTRL_SHADOW_1_AGU_S_START_OFFSET_3 0x160728 + +#define mmMME2_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_0 0x16072C + +#define mmMME2_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_1 0x160730 + +#define mmMME2_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_2 0x160734 + +#define mmMME2_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_3 0x160738 + +#define mmMME2_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_4 0x16073C + +#define mmMME2_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_0 0x160740 + +#define mmMME2_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_1 0x160744 + +#define mmMME2_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_2 0x160748 + +#define mmMME2_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_3 0x16074C + +#define mmMME2_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_4 0x160750 + +#define mmMME2_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_0 0x160754 + +#define mmMME2_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_1 0x160758 + +#define mmMME2_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_2 0x16075C + +#define mmMME2_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_3 0x160760 + +#define mmMME2_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_0 0x160764 + +#define mmMME2_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_1 0x160768 + +#define mmMME2_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_2 0x16076C + +#define mmMME2_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_3 0x160770 + +#define mmMME2_CTRL_SHADOW_1_TENSOR_L_SPATIAL_SIZE_MINUS_1 0x160774 + +#define mmMME2_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0x160778 + +#define mmMME2_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0x16077C + +#define mmMME2_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0x160780 + +#define mmMME2_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0x160784 + +#define mmMME2_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0x160788 + +#define mmMME2_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_0 0x16078C + +#define mmMME2_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_1 0x160790 + +#define mmMME2_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_2 0x160794 + +#define mmMME2_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_3 0x160798 + +#define mmMME2_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0x16079C + +#define mmMME2_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0x1607A0 + +#define mmMME2_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0x1607A4 + +#define mmMME2_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0x1607A8 + +#define mmMME2_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0x1607AC + +#define mmMME2_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_0 0x1607B0 + +#define mmMME2_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_1 0x1607B4 + +#define mmMME2_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_2 0x1607B8 + +#define mmMME2_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_3 0x1607BC + +#define mmMME2_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_0 0x1607C0 + +#define mmMME2_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_1 0x1607C4 + +#define mmMME2_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_2 0x1607C8 + +#define mmMME2_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_3 0x1607CC + +#define mmMME2_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_4 0x1607D0 + +#define mmMME2_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_0 0x1607D4 + +#define mmMME2_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_1 0x1607D8 + +#define mmMME2_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_2 0x1607DC + +#define mmMME2_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_3 0x1607E0 + +#define mmMME2_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_4 0x1607E4 + +#define mmMME2_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_0 0x1607E8 + +#define mmMME2_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_1 0x1607EC + +#define mmMME2_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_2 0x1607F0 + +#define mmMME2_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_3 0x1607F4 + +#define mmMME2_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_0 0x1607F8 + +#define mmMME2_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_1 0x1607FC + +#define mmMME2_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_2 0x160800 + +#define mmMME2_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_3 0x160804 + +#define mmMME2_CTRL_SHADOW_1_TENSOR_O_SPATIAL_SIZE_MINUS_1 0x160808 + +#define mmMME2_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0x16080C + +#define mmMME2_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0x160810 + +#define mmMME2_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0x160814 + +#define mmMME2_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0x160818 + +#define mmMME2_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0x16081C + +#define mmMME2_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_0 0x160820 + +#define mmMME2_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_1 0x160824 + +#define mmMME2_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_2 0x160828 + +#define mmMME2_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_3 0x16082C + +#define mmMME2_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0x160830 + +#define mmMME2_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0x160834 + +#define mmMME2_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0x160838 + +#define mmMME2_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0x16083C + +#define mmMME2_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0x160840 + +#define mmMME2_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_0 0x160844 + +#define mmMME2_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_1 0x160848 + +#define mmMME2_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_2 0x16084C + +#define mmMME2_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_3 0x160850 + +#define mmMME2_CTRL_SHADOW_1_DESC_SB_REPEAT 0x160854 + +#define mmMME2_CTRL_SHADOW_1_DESC_RATE_LIMITER 0x160858 + +#define mmMME2_CTRL_SHADOW_1_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0x16085C + +#define mmMME2_CTRL_SHADOW_1_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0x160860 + +#define mmMME2_CTRL_SHADOW_1_DESC_SYNC_OBJECT_ADDR_HIGH 0x160864 + +#define mmMME2_CTRL_SHADOW_1_DESC_SYNC_OBJECT_DATA 0x160868 + +#define mmMME2_CTRL_SHADOW_1_DESC_AXI_USER_DATA 0x16086C + +#define mmMME2_CTRL_SHADOW_1_DESC_PERF_EVT_S 0x160870 + +#define mmMME2_CTRL_SHADOW_1_DESC_PERF_EVT_L_LOCAL 0x160874 + +#define mmMME2_CTRL_SHADOW_1_DESC_PERF_EVT_L_REMOTE 0x160878 + +#define mmMME2_CTRL_SHADOW_1_DESC_PERF_EVT_O_LOCAL 0x16087C + +#define mmMME2_CTRL_SHADOW_1_DESC_PERF_EVT_O_REMOTE 0x160880 + +#define mmMME2_CTRL_SHADOW_1_DESC_PADDING_VALUE_S 0x160884 + +#define mmMME2_CTRL_SHADOW_1_DESC_PADDING_VALUE_L 0x160888 + +#define mmMME2_CTRL_SHADOW_1_DESC_META_DATA_AGU_S 0x16088C + +#define mmMME2_CTRL_SHADOW_1_DESC_META_DATA_AGU_L_LOCAL 0x160890 + +#define mmMME2_CTRL_SHADOW_1_DESC_META_DATA_AGU_L_REMOTE 0x160894 + +#define mmMME2_CTRL_SHADOW_1_DESC_META_DATA_AGU_O_LOCAL 0x160898 + +#define mmMME2_CTRL_SHADOW_1_DESC_META_DATA_AGU_O_REMOTE 0x16089C + +#define mmMME2_CTRL_SHADOW_1_DESC_PCU_RL_SATURATION 0x1608A0 + +#define mmMME2_CTRL_SHADOW_1_DESC_DUMMY 0x1608A4 + +#define mmMME2_CTRL_SHADOW_2_STATUS 0x160900 + +#define mmMME2_CTRL_SHADOW_2_BASE_ADDR_HIGH_S 0x160908 + +#define mmMME2_CTRL_SHADOW_2_BASE_ADDR_HIGH_L 0x16090C + +#define mmMME2_CTRL_SHADOW_2_BASE_ADDR_HIGH_O 0x160910 + +#define mmMME2_CTRL_SHADOW_2_BASE_ADDR_LOW_S 0x160914 + +#define mmMME2_CTRL_SHADOW_2_BASE_ADDR_LOW_L 0x160918 + +#define mmMME2_CTRL_SHADOW_2_BASE_ADDR_LOW_O 0x16091C + +#define mmMME2_CTRL_SHADOW_2_HEADER_LOW 0x160920 + +#define mmMME2_CTRL_SHADOW_2_HEADER_HIGH 0x160924 + +#define mmMME2_CTRL_SHADOW_2_CONV_KERNEL_SIZE_MINUS_1 0x160928 + +#define mmMME2_CTRL_SHADOW_2_CONV_ASSOCIATED_DIMS_LOW 0x16092C + +#define mmMME2_CTRL_SHADOW_2_CONV_ASSOCIATED_DIMS_HIGH 0x160930 + +#define mmMME2_CTRL_SHADOW_2_NUM_ITERATIONS_MINUS_1 0x160934 + +#define mmMME2_CTRL_SHADOW_2_OUTER_LOOP 0x160938 + +#define mmMME2_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_0 0x16093C + +#define mmMME2_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_1 0x160940 + +#define mmMME2_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_2 0x160944 + +#define mmMME2_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_3 0x160948 + +#define mmMME2_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_4 0x16094C + +#define mmMME2_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_0 0x160950 + +#define mmMME2_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_1 0x160954 + +#define mmMME2_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_2 0x160958 + +#define mmMME2_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_3 0x16095C + +#define mmMME2_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_4 0x160960 + +#define mmMME2_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_0 0x160964 + +#define mmMME2_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_1 0x160968 + +#define mmMME2_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_2 0x16096C + +#define mmMME2_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_3 0x160970 + +#define mmMME2_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_0 0x160974 + +#define mmMME2_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_1 0x160978 + +#define mmMME2_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_2 0x16097C + +#define mmMME2_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_3 0x160980 + +#define mmMME2_CTRL_SHADOW_2_TENSOR_S_SPATIAL_SIZE_MINUS_1 0x160984 + +#define mmMME2_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_0 0x160988 + +#define mmMME2_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_1 0x16098C + +#define mmMME2_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_2 0x160990 + +#define mmMME2_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_3 0x160994 + +#define mmMME2_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_4 0x160998 + +#define mmMME2_CTRL_SHADOW_2_AGU_S_START_OFFSET_0 0x16099C + +#define mmMME2_CTRL_SHADOW_2_AGU_S_START_OFFSET_1 0x1609A0 + +#define mmMME2_CTRL_SHADOW_2_AGU_S_START_OFFSET_2 0x1609A4 + +#define mmMME2_CTRL_SHADOW_2_AGU_S_START_OFFSET_3 0x1609A8 + +#define mmMME2_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_0 0x1609AC + +#define mmMME2_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_1 0x1609B0 + +#define mmMME2_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_2 0x1609B4 + +#define mmMME2_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_3 0x1609B8 + +#define mmMME2_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_4 0x1609BC + +#define mmMME2_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_0 0x1609C0 + +#define mmMME2_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_1 0x1609C4 + +#define mmMME2_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_2 0x1609C8 + +#define mmMME2_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_3 0x1609CC + +#define mmMME2_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_4 0x1609D0 + +#define mmMME2_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_0 0x1609D4 + +#define mmMME2_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_1 0x1609D8 + +#define mmMME2_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_2 0x1609DC + +#define mmMME2_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_3 0x1609E0 + +#define mmMME2_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_0 0x1609E4 + +#define mmMME2_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_1 0x1609E8 + +#define mmMME2_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_2 0x1609EC + +#define mmMME2_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_3 0x1609F0 + +#define mmMME2_CTRL_SHADOW_2_TENSOR_L_SPATIAL_SIZE_MINUS_1 0x1609F4 + +#define mmMME2_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0x1609F8 + +#define mmMME2_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0x1609FC + +#define mmMME2_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0x160A00 + +#define mmMME2_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0x160A04 + +#define mmMME2_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0x160A08 + +#define mmMME2_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_0 0x160A0C + +#define mmMME2_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_1 0x160A10 + +#define mmMME2_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_2 0x160A14 + +#define mmMME2_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_3 0x160A18 + +#define mmMME2_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0x160A1C + +#define mmMME2_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0x160A20 + +#define mmMME2_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0x160A24 + +#define mmMME2_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0x160A28 + +#define mmMME2_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0x160A2C + +#define mmMME2_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_0 0x160A30 + +#define mmMME2_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_1 0x160A34 + +#define mmMME2_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_2 0x160A38 + +#define mmMME2_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_3 0x160A3C + +#define mmMME2_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_0 0x160A40 + +#define mmMME2_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_1 0x160A44 + +#define mmMME2_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_2 0x160A48 + +#define mmMME2_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_3 0x160A4C + +#define mmMME2_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_4 0x160A50 + +#define mmMME2_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_0 0x160A54 + +#define mmMME2_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_1 0x160A58 + +#define mmMME2_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_2 0x160A5C + +#define mmMME2_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_3 0x160A60 + +#define mmMME2_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_4 0x160A64 + +#define mmMME2_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_0 0x160A68 + +#define mmMME2_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_1 0x160A6C + +#define mmMME2_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_2 0x160A70 + +#define mmMME2_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_3 0x160A74 + +#define mmMME2_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_0 0x160A78 + +#define mmMME2_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_1 0x160A7C + +#define mmMME2_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_2 0x160A80 + +#define mmMME2_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_3 0x160A84 + +#define mmMME2_CTRL_SHADOW_2_TENSOR_O_SPATIAL_SIZE_MINUS_1 0x160A88 + +#define mmMME2_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0x160A8C + +#define mmMME2_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0x160A90 + +#define mmMME2_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0x160A94 + +#define mmMME2_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0x160A98 + +#define mmMME2_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0x160A9C + +#define mmMME2_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_0 0x160AA0 + +#define mmMME2_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_1 0x160AA4 + +#define mmMME2_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_2 0x160AA8 + +#define mmMME2_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_3 0x160AAC + +#define mmMME2_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0x160AB0 + +#define mmMME2_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0x160AB4 + +#define mmMME2_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0x160AB8 + +#define mmMME2_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0x160ABC + +#define mmMME2_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0x160AC0 + +#define mmMME2_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_0 0x160AC4 + +#define mmMME2_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_1 0x160AC8 + +#define mmMME2_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_2 0x160ACC + +#define mmMME2_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_3 0x160AD0 + +#define mmMME2_CTRL_SHADOW_2_DESC_SB_REPEAT 0x160AD4 + +#define mmMME2_CTRL_SHADOW_2_DESC_RATE_LIMITER 0x160AD8 + +#define mmMME2_CTRL_SHADOW_2_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0x160ADC + +#define mmMME2_CTRL_SHADOW_2_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0x160AE0 + +#define mmMME2_CTRL_SHADOW_2_DESC_SYNC_OBJECT_ADDR_HIGH 0x160AE4 + +#define mmMME2_CTRL_SHADOW_2_DESC_SYNC_OBJECT_DATA 0x160AE8 + +#define mmMME2_CTRL_SHADOW_2_DESC_AXI_USER_DATA 0x160AEC + +#define mmMME2_CTRL_SHADOW_2_DESC_PERF_EVT_S 0x160AF0 + +#define mmMME2_CTRL_SHADOW_2_DESC_PERF_EVT_L_LOCAL 0x160AF4 + +#define mmMME2_CTRL_SHADOW_2_DESC_PERF_EVT_L_REMOTE 0x160AF8 + +#define mmMME2_CTRL_SHADOW_2_DESC_PERF_EVT_O_LOCAL 0x160AFC + +#define mmMME2_CTRL_SHADOW_2_DESC_PERF_EVT_O_REMOTE 0x160B00 + +#define mmMME2_CTRL_SHADOW_2_DESC_PADDING_VALUE_S 0x160B04 + +#define mmMME2_CTRL_SHADOW_2_DESC_PADDING_VALUE_L 0x160B08 + +#define mmMME2_CTRL_SHADOW_2_DESC_META_DATA_AGU_S 0x160B0C + +#define mmMME2_CTRL_SHADOW_2_DESC_META_DATA_AGU_L_LOCAL 0x160B10 + +#define mmMME2_CTRL_SHADOW_2_DESC_META_DATA_AGU_L_REMOTE 0x160B14 + +#define mmMME2_CTRL_SHADOW_2_DESC_META_DATA_AGU_O_LOCAL 0x160B18 + +#define mmMME2_CTRL_SHADOW_2_DESC_META_DATA_AGU_O_REMOTE 0x160B1C + +#define mmMME2_CTRL_SHADOW_2_DESC_PCU_RL_SATURATION 0x160B20 + +#define mmMME2_CTRL_SHADOW_2_DESC_DUMMY 0x160B24 + +#define mmMME2_CTRL_SHADOW_3_STATUS 0x160B80 + +#define mmMME2_CTRL_SHADOW_3_BASE_ADDR_HIGH_S 0x160B88 + +#define mmMME2_CTRL_SHADOW_3_BASE_ADDR_HIGH_L 0x160B8C + +#define mmMME2_CTRL_SHADOW_3_BASE_ADDR_HIGH_O 0x160B90 + +#define mmMME2_CTRL_SHADOW_3_BASE_ADDR_LOW_S 0x160B94 + +#define mmMME2_CTRL_SHADOW_3_BASE_ADDR_LOW_L 0x160B98 + +#define mmMME2_CTRL_SHADOW_3_BASE_ADDR_LOW_O 0x160B9C + +#define mmMME2_CTRL_SHADOW_3_HEADER_LOW 0x160BA0 + +#define mmMME2_CTRL_SHADOW_3_HEADER_HIGH 0x160BA4 + +#define mmMME2_CTRL_SHADOW_3_CONV_KERNEL_SIZE_MINUS_1 0x160BA8 + +#define mmMME2_CTRL_SHADOW_3_CONV_ASSOCIATED_DIMS_LOW 0x160BAC + +#define mmMME2_CTRL_SHADOW_3_CONV_ASSOCIATED_DIMS_HIGH 0x160BB0 + +#define mmMME2_CTRL_SHADOW_3_NUM_ITERATIONS_MINUS_1 0x160BB4 + +#define mmMME2_CTRL_SHADOW_3_OUTER_LOOP 0x160BB8 + +#define mmMME2_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_0 0x160BBC + +#define mmMME2_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_1 0x160BC0 + +#define mmMME2_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_2 0x160BC4 + +#define mmMME2_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_3 0x160BC8 + +#define mmMME2_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_4 0x160BCC + +#define mmMME2_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_0 0x160BD0 + +#define mmMME2_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_1 0x160BD4 + +#define mmMME2_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_2 0x160BD8 + +#define mmMME2_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_3 0x160BDC + +#define mmMME2_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_4 0x160BE0 + +#define mmMME2_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_0 0x160BE4 + +#define mmMME2_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_1 0x160BE8 + +#define mmMME2_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_2 0x160BEC + +#define mmMME2_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_3 0x160BF0 + +#define mmMME2_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_0 0x160BF4 + +#define mmMME2_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_1 0x160BF8 + +#define mmMME2_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_2 0x160BFC + +#define mmMME2_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_3 0x160C00 + +#define mmMME2_CTRL_SHADOW_3_TENSOR_S_SPATIAL_SIZE_MINUS_1 0x160C04 + +#define mmMME2_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_0 0x160C08 + +#define mmMME2_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_1 0x160C0C + +#define mmMME2_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_2 0x160C10 + +#define mmMME2_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_3 0x160C14 + +#define mmMME2_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_4 0x160C18 + +#define mmMME2_CTRL_SHADOW_3_AGU_S_START_OFFSET_0 0x160C1C + +#define mmMME2_CTRL_SHADOW_3_AGU_S_START_OFFSET_1 0x160C20 + +#define mmMME2_CTRL_SHADOW_3_AGU_S_START_OFFSET_2 0x160C24 + +#define mmMME2_CTRL_SHADOW_3_AGU_S_START_OFFSET_3 0x160C28 + +#define mmMME2_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_0 0x160C2C + +#define mmMME2_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_1 0x160C30 + +#define mmMME2_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_2 0x160C34 + +#define mmMME2_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_3 0x160C38 + +#define mmMME2_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_4 0x160C3C + +#define mmMME2_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_0 0x160C40 + +#define mmMME2_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_1 0x160C44 + +#define mmMME2_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_2 0x160C48 + +#define mmMME2_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_3 0x160C4C + +#define mmMME2_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_4 0x160C50 + +#define mmMME2_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_0 0x160C54 + +#define mmMME2_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_1 0x160C58 + +#define mmMME2_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_2 0x160C5C + +#define mmMME2_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_3 0x160C60 + +#define mmMME2_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_0 0x160C64 + +#define mmMME2_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_1 0x160C68 + +#define mmMME2_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_2 0x160C6C + +#define mmMME2_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_3 0x160C70 + +#define mmMME2_CTRL_SHADOW_3_TENSOR_L_SPATIAL_SIZE_MINUS_1 0x160C74 + +#define mmMME2_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0x160C78 + +#define mmMME2_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0x160C7C + +#define mmMME2_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0x160C80 + +#define mmMME2_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0x160C84 + +#define mmMME2_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0x160C88 + +#define mmMME2_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_0 0x160C8C + +#define mmMME2_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_1 0x160C90 + +#define mmMME2_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_2 0x160C94 + +#define mmMME2_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_3 0x160C98 + +#define mmMME2_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0x160C9C + +#define mmMME2_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0x160CA0 + +#define mmMME2_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0x160CA4 + +#define mmMME2_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0x160CA8 + +#define mmMME2_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0x160CAC + +#define mmMME2_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_0 0x160CB0 + +#define mmMME2_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_1 0x160CB4 + +#define mmMME2_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_2 0x160CB8 + +#define mmMME2_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_3 0x160CBC + +#define mmMME2_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_0 0x160CC0 + +#define mmMME2_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_1 0x160CC4 + +#define mmMME2_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_2 0x160CC8 + +#define mmMME2_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_3 0x160CCC + +#define mmMME2_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_4 0x160CD0 + +#define mmMME2_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_0 0x160CD4 + +#define mmMME2_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_1 0x160CD8 + +#define mmMME2_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_2 0x160CDC + +#define mmMME2_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_3 0x160CE0 + +#define mmMME2_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_4 0x160CE4 + +#define mmMME2_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_0 0x160CE8 + +#define mmMME2_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_1 0x160CEC + +#define mmMME2_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_2 0x160CF0 + +#define mmMME2_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_3 0x160CF4 + +#define mmMME2_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_0 0x160CF8 + +#define mmMME2_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_1 0x160CFC + +#define mmMME2_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_2 0x160D00 + +#define mmMME2_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_3 0x160D04 + +#define mmMME2_CTRL_SHADOW_3_TENSOR_O_SPATIAL_SIZE_MINUS_1 0x160D08 + +#define mmMME2_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0x160D0C + +#define mmMME2_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0x160D10 + +#define mmMME2_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0x160D14 + +#define mmMME2_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0x160D18 + +#define mmMME2_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0x160D1C + +#define mmMME2_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_0 0x160D20 + +#define mmMME2_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_1 0x160D24 + +#define mmMME2_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_2 0x160D28 + +#define mmMME2_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_3 0x160D2C + +#define mmMME2_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0x160D30 + +#define mmMME2_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0x160D34 + +#define mmMME2_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0x160D38 + +#define mmMME2_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0x160D3C + +#define mmMME2_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0x160D40 + +#define mmMME2_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_0 0x160D44 + +#define mmMME2_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_1 0x160D48 + +#define mmMME2_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_2 0x160D4C + +#define mmMME2_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_3 0x160D50 + +#define mmMME2_CTRL_SHADOW_3_DESC_SB_REPEAT 0x160D54 + +#define mmMME2_CTRL_SHADOW_3_DESC_RATE_LIMITER 0x160D58 + +#define mmMME2_CTRL_SHADOW_3_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0x160D5C + +#define mmMME2_CTRL_SHADOW_3_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0x160D60 + +#define mmMME2_CTRL_SHADOW_3_DESC_SYNC_OBJECT_ADDR_HIGH 0x160D64 + +#define mmMME2_CTRL_SHADOW_3_DESC_SYNC_OBJECT_DATA 0x160D68 + +#define mmMME2_CTRL_SHADOW_3_DESC_AXI_USER_DATA 0x160D6C + +#define mmMME2_CTRL_SHADOW_3_DESC_PERF_EVT_S 0x160D70 + +#define mmMME2_CTRL_SHADOW_3_DESC_PERF_EVT_L_LOCAL 0x160D74 + +#define mmMME2_CTRL_SHADOW_3_DESC_PERF_EVT_L_REMOTE 0x160D78 + +#define mmMME2_CTRL_SHADOW_3_DESC_PERF_EVT_O_LOCAL 0x160D7C + +#define mmMME2_CTRL_SHADOW_3_DESC_PERF_EVT_O_REMOTE 0x160D80 + +#define mmMME2_CTRL_SHADOW_3_DESC_PADDING_VALUE_S 0x160D84 + +#define mmMME2_CTRL_SHADOW_3_DESC_PADDING_VALUE_L 0x160D88 + +#define mmMME2_CTRL_SHADOW_3_DESC_META_DATA_AGU_S 0x160D8C + +#define mmMME2_CTRL_SHADOW_3_DESC_META_DATA_AGU_L_LOCAL 0x160D90 + +#define mmMME2_CTRL_SHADOW_3_DESC_META_DATA_AGU_L_REMOTE 0x160D94 + +#define mmMME2_CTRL_SHADOW_3_DESC_META_DATA_AGU_O_LOCAL 0x160D98 + +#define mmMME2_CTRL_SHADOW_3_DESC_META_DATA_AGU_O_REMOTE 0x160D9C + +#define mmMME2_CTRL_SHADOW_3_DESC_PCU_RL_SATURATION 0x160DA0 + +#define mmMME2_CTRL_SHADOW_3_DESC_DUMMY 0x160DA4 + +#endif /* ASIC_REG_MME2_CTRL_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme2_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/mme2_qm_regs.h new file mode 100644 index 000000000..c1ea6a422 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/mme2_qm_regs.h @@ -0,0 +1,834 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_MME2_QM_REGS_H_ +#define ASIC_REG_MME2_QM_REGS_H_ + +/* + ***************************************** + * MME2_QM (Prototype: QMAN) + ***************************************** + */ + +#define mmMME2_QM_GLBL_CFG0 0x168000 + +#define mmMME2_QM_GLBL_CFG1 0x168004 + +#define mmMME2_QM_GLBL_PROT 0x168008 + +#define mmMME2_QM_GLBL_ERR_CFG 0x16800C + +#define mmMME2_QM_GLBL_SECURE_PROPS_0 0x168010 + +#define mmMME2_QM_GLBL_SECURE_PROPS_1 0x168014 + +#define mmMME2_QM_GLBL_SECURE_PROPS_2 0x168018 + +#define mmMME2_QM_GLBL_SECURE_PROPS_3 0x16801C + +#define mmMME2_QM_GLBL_SECURE_PROPS_4 0x168020 + +#define mmMME2_QM_GLBL_NON_SECURE_PROPS_0 0x168024 + +#define mmMME2_QM_GLBL_NON_SECURE_PROPS_1 0x168028 + +#define mmMME2_QM_GLBL_NON_SECURE_PROPS_2 0x16802C + +#define mmMME2_QM_GLBL_NON_SECURE_PROPS_3 0x168030 + +#define mmMME2_QM_GLBL_NON_SECURE_PROPS_4 0x168034 + +#define mmMME2_QM_GLBL_STS0 0x168038 + +#define mmMME2_QM_GLBL_STS1_0 0x168040 + +#define mmMME2_QM_GLBL_STS1_1 0x168044 + +#define mmMME2_QM_GLBL_STS1_2 0x168048 + +#define mmMME2_QM_GLBL_STS1_3 0x16804C + +#define mmMME2_QM_GLBL_STS1_4 0x168050 + +#define mmMME2_QM_GLBL_MSG_EN_0 0x168054 + +#define mmMME2_QM_GLBL_MSG_EN_1 0x168058 + +#define mmMME2_QM_GLBL_MSG_EN_2 0x16805C + +#define mmMME2_QM_GLBL_MSG_EN_3 0x168060 + +#define mmMME2_QM_GLBL_MSG_EN_4 0x168068 + +#define mmMME2_QM_PQ_BASE_LO_0 0x168070 + +#define mmMME2_QM_PQ_BASE_LO_1 0x168074 + +#define mmMME2_QM_PQ_BASE_LO_2 0x168078 + +#define mmMME2_QM_PQ_BASE_LO_3 0x16807C + +#define mmMME2_QM_PQ_BASE_HI_0 0x168080 + +#define mmMME2_QM_PQ_BASE_HI_1 0x168084 + +#define mmMME2_QM_PQ_BASE_HI_2 0x168088 + +#define mmMME2_QM_PQ_BASE_HI_3 0x16808C + +#define mmMME2_QM_PQ_SIZE_0 0x168090 + +#define mmMME2_QM_PQ_SIZE_1 0x168094 + +#define mmMME2_QM_PQ_SIZE_2 0x168098 + +#define mmMME2_QM_PQ_SIZE_3 0x16809C + +#define mmMME2_QM_PQ_PI_0 0x1680A0 + +#define mmMME2_QM_PQ_PI_1 0x1680A4 + +#define mmMME2_QM_PQ_PI_2 0x1680A8 + +#define mmMME2_QM_PQ_PI_3 0x1680AC + +#define mmMME2_QM_PQ_CI_0 0x1680B0 + +#define mmMME2_QM_PQ_CI_1 0x1680B4 + +#define mmMME2_QM_PQ_CI_2 0x1680B8 + +#define mmMME2_QM_PQ_CI_3 0x1680BC + +#define mmMME2_QM_PQ_CFG0_0 0x1680C0 + +#define mmMME2_QM_PQ_CFG0_1 0x1680C4 + +#define mmMME2_QM_PQ_CFG0_2 0x1680C8 + +#define mmMME2_QM_PQ_CFG0_3 0x1680CC + +#define mmMME2_QM_PQ_CFG1_0 0x1680D0 + +#define mmMME2_QM_PQ_CFG1_1 0x1680D4 + +#define mmMME2_QM_PQ_CFG1_2 0x1680D8 + +#define mmMME2_QM_PQ_CFG1_3 0x1680DC + +#define mmMME2_QM_PQ_ARUSER_31_11_0 0x1680E0 + +#define mmMME2_QM_PQ_ARUSER_31_11_1 0x1680E4 + +#define mmMME2_QM_PQ_ARUSER_31_11_2 0x1680E8 + +#define mmMME2_QM_PQ_ARUSER_31_11_3 0x1680EC + +#define mmMME2_QM_PQ_STS0_0 0x1680F0 + +#define mmMME2_QM_PQ_STS0_1 0x1680F4 + +#define mmMME2_QM_PQ_STS0_2 0x1680F8 + +#define mmMME2_QM_PQ_STS0_3 0x1680FC + +#define mmMME2_QM_PQ_STS1_0 0x168100 + +#define mmMME2_QM_PQ_STS1_1 0x168104 + +#define mmMME2_QM_PQ_STS1_2 0x168108 + +#define mmMME2_QM_PQ_STS1_3 0x16810C + +#define mmMME2_QM_CQ_CFG0_0 0x168110 + +#define mmMME2_QM_CQ_CFG0_1 0x168114 + +#define mmMME2_QM_CQ_CFG0_2 0x168118 + +#define mmMME2_QM_CQ_CFG0_3 0x16811C + +#define mmMME2_QM_CQ_CFG0_4 0x168120 + +#define mmMME2_QM_CQ_CFG1_0 0x168124 + +#define mmMME2_QM_CQ_CFG1_1 0x168128 + +#define mmMME2_QM_CQ_CFG1_2 0x16812C + +#define mmMME2_QM_CQ_CFG1_3 0x168130 + +#define mmMME2_QM_CQ_CFG1_4 0x168134 + +#define mmMME2_QM_CQ_ARUSER_31_11_0 0x168138 + +#define mmMME2_QM_CQ_ARUSER_31_11_1 0x16813C + +#define mmMME2_QM_CQ_ARUSER_31_11_2 0x168140 + +#define mmMME2_QM_CQ_ARUSER_31_11_3 0x168144 + +#define mmMME2_QM_CQ_ARUSER_31_11_4 0x168148 + +#define mmMME2_QM_CQ_STS0_0 0x16814C + +#define mmMME2_QM_CQ_STS0_1 0x168150 + +#define mmMME2_QM_CQ_STS0_2 0x168154 + +#define mmMME2_QM_CQ_STS0_3 0x168158 + +#define mmMME2_QM_CQ_STS0_4 0x16815C + +#define mmMME2_QM_CQ_STS1_0 0x168160 + +#define mmMME2_QM_CQ_STS1_1 0x168164 + +#define mmMME2_QM_CQ_STS1_2 0x168168 + +#define mmMME2_QM_CQ_STS1_3 0x16816C + +#define mmMME2_QM_CQ_STS1_4 0x168170 + +#define mmMME2_QM_CQ_PTR_LO_0 0x168174 + +#define mmMME2_QM_CQ_PTR_HI_0 0x168178 + +#define mmMME2_QM_CQ_TSIZE_0 0x16817C + +#define mmMME2_QM_CQ_CTL_0 0x168180 + +#define mmMME2_QM_CQ_PTR_LO_1 0x168184 + +#define mmMME2_QM_CQ_PTR_HI_1 0x168188 + +#define mmMME2_QM_CQ_TSIZE_1 0x16818C + +#define mmMME2_QM_CQ_CTL_1 0x168190 + +#define mmMME2_QM_CQ_PTR_LO_2 0x168194 + +#define mmMME2_QM_CQ_PTR_HI_2 0x168198 + +#define mmMME2_QM_CQ_TSIZE_2 0x16819C + +#define mmMME2_QM_CQ_CTL_2 0x1681A0 + +#define mmMME2_QM_CQ_PTR_LO_3 0x1681A4 + +#define mmMME2_QM_CQ_PTR_HI_3 0x1681A8 + +#define mmMME2_QM_CQ_TSIZE_3 0x1681AC + +#define mmMME2_QM_CQ_CTL_3 0x1681B0 + +#define mmMME2_QM_CQ_PTR_LO_4 0x1681B4 + +#define mmMME2_QM_CQ_PTR_HI_4 0x1681B8 + +#define mmMME2_QM_CQ_TSIZE_4 0x1681BC + +#define mmMME2_QM_CQ_CTL_4 0x1681C0 + +#define mmMME2_QM_CQ_PTR_LO_STS_0 0x1681C4 + +#define mmMME2_QM_CQ_PTR_LO_STS_1 0x1681C8 + +#define mmMME2_QM_CQ_PTR_LO_STS_2 0x1681CC + +#define mmMME2_QM_CQ_PTR_LO_STS_3 0x1681D0 + +#define mmMME2_QM_CQ_PTR_LO_STS_4 0x1681D4 + +#define mmMME2_QM_CQ_PTR_HI_STS_0 0x1681D8 + +#define mmMME2_QM_CQ_PTR_HI_STS_1 0x1681DC + +#define mmMME2_QM_CQ_PTR_HI_STS_2 0x1681E0 + +#define mmMME2_QM_CQ_PTR_HI_STS_3 0x1681E4 + +#define mmMME2_QM_CQ_PTR_HI_STS_4 0x1681E8 + +#define mmMME2_QM_CQ_TSIZE_STS_0 0x1681EC + +#define mmMME2_QM_CQ_TSIZE_STS_1 0x1681F0 + +#define mmMME2_QM_CQ_TSIZE_STS_2 0x1681F4 + +#define mmMME2_QM_CQ_TSIZE_STS_3 0x1681F8 + +#define mmMME2_QM_CQ_TSIZE_STS_4 0x1681FC + +#define mmMME2_QM_CQ_CTL_STS_0 0x168200 + +#define mmMME2_QM_CQ_CTL_STS_1 0x168204 + +#define mmMME2_QM_CQ_CTL_STS_2 0x168208 + +#define mmMME2_QM_CQ_CTL_STS_3 0x16820C + +#define mmMME2_QM_CQ_CTL_STS_4 0x168210 + +#define mmMME2_QM_CQ_IFIFO_CNT_0 0x168214 + +#define mmMME2_QM_CQ_IFIFO_CNT_1 0x168218 + +#define mmMME2_QM_CQ_IFIFO_CNT_2 0x16821C + +#define mmMME2_QM_CQ_IFIFO_CNT_3 0x168220 + +#define mmMME2_QM_CQ_IFIFO_CNT_4 0x168224 + +#define mmMME2_QM_CP_MSG_BASE0_ADDR_LO_0 0x168228 + +#define mmMME2_QM_CP_MSG_BASE0_ADDR_LO_1 0x16822C + +#define mmMME2_QM_CP_MSG_BASE0_ADDR_LO_2 0x168230 + +#define mmMME2_QM_CP_MSG_BASE0_ADDR_LO_3 0x168234 + +#define mmMME2_QM_CP_MSG_BASE0_ADDR_LO_4 0x168238 + +#define mmMME2_QM_CP_MSG_BASE0_ADDR_HI_0 0x16823C + +#define mmMME2_QM_CP_MSG_BASE0_ADDR_HI_1 0x168240 + +#define mmMME2_QM_CP_MSG_BASE0_ADDR_HI_2 0x168244 + +#define mmMME2_QM_CP_MSG_BASE0_ADDR_HI_3 0x168248 + +#define mmMME2_QM_CP_MSG_BASE0_ADDR_HI_4 0x16824C + +#define mmMME2_QM_CP_MSG_BASE1_ADDR_LO_0 0x168250 + +#define mmMME2_QM_CP_MSG_BASE1_ADDR_LO_1 0x168254 + +#define mmMME2_QM_CP_MSG_BASE1_ADDR_LO_2 0x168258 + +#define mmMME2_QM_CP_MSG_BASE1_ADDR_LO_3 0x16825C + +#define mmMME2_QM_CP_MSG_BASE1_ADDR_LO_4 0x168260 + +#define mmMME2_QM_CP_MSG_BASE1_ADDR_HI_0 0x168264 + +#define mmMME2_QM_CP_MSG_BASE1_ADDR_HI_1 0x168268 + +#define mmMME2_QM_CP_MSG_BASE1_ADDR_HI_2 0x16826C + +#define mmMME2_QM_CP_MSG_BASE1_ADDR_HI_3 0x168270 + +#define mmMME2_QM_CP_MSG_BASE1_ADDR_HI_4 0x168274 + +#define mmMME2_QM_CP_MSG_BASE2_ADDR_LO_0 0x168278 + +#define mmMME2_QM_CP_MSG_BASE2_ADDR_LO_1 0x16827C + +#define mmMME2_QM_CP_MSG_BASE2_ADDR_LO_2 0x168280 + +#define mmMME2_QM_CP_MSG_BASE2_ADDR_LO_3 0x168284 + +#define mmMME2_QM_CP_MSG_BASE2_ADDR_LO_4 0x168288 + +#define mmMME2_QM_CP_MSG_BASE2_ADDR_HI_0 0x16828C + +#define mmMME2_QM_CP_MSG_BASE2_ADDR_HI_1 0x168290 + +#define mmMME2_QM_CP_MSG_BASE2_ADDR_HI_2 0x168294 + +#define mmMME2_QM_CP_MSG_BASE2_ADDR_HI_3 0x168298 + +#define mmMME2_QM_CP_MSG_BASE2_ADDR_HI_4 0x16829C + +#define mmMME2_QM_CP_MSG_BASE3_ADDR_LO_0 0x1682A0 + +#define mmMME2_QM_CP_MSG_BASE3_ADDR_LO_1 0x1682A4 + +#define mmMME2_QM_CP_MSG_BASE3_ADDR_LO_2 0x1682A8 + +#define mmMME2_QM_CP_MSG_BASE3_ADDR_LO_3 0x1682AC + +#define mmMME2_QM_CP_MSG_BASE3_ADDR_LO_4 0x1682B0 + +#define mmMME2_QM_CP_MSG_BASE3_ADDR_HI_0 0x1682B4 + +#define mmMME2_QM_CP_MSG_BASE3_ADDR_HI_1 0x1682B8 + +#define mmMME2_QM_CP_MSG_BASE3_ADDR_HI_2 0x1682BC + +#define mmMME2_QM_CP_MSG_BASE3_ADDR_HI_3 0x1682C0 + +#define mmMME2_QM_CP_MSG_BASE3_ADDR_HI_4 0x1682C4 + +#define mmMME2_QM_CP_LDMA_TSIZE_OFFSET_0 0x1682C8 + +#define mmMME2_QM_CP_LDMA_TSIZE_OFFSET_1 0x1682CC + +#define mmMME2_QM_CP_LDMA_TSIZE_OFFSET_2 0x1682D0 + +#define mmMME2_QM_CP_LDMA_TSIZE_OFFSET_3 0x1682D4 + +#define mmMME2_QM_CP_LDMA_TSIZE_OFFSET_4 0x1682D8 + +#define mmMME2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0x1682E0 + +#define mmMME2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0x1682E4 + +#define mmMME2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0x1682E8 + +#define mmMME2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0x1682EC + +#define mmMME2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0x1682F0 + +#define mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0x1682F4 + +#define mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0x1682F8 + +#define mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0x1682FC + +#define mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0x168300 + +#define mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0x168304 + +#define mmMME2_QM_CP_FENCE0_RDATA_0 0x168308 + +#define mmMME2_QM_CP_FENCE0_RDATA_1 0x16830C + +#define mmMME2_QM_CP_FENCE0_RDATA_2 0x168310 + +#define mmMME2_QM_CP_FENCE0_RDATA_3 0x168314 + +#define mmMME2_QM_CP_FENCE0_RDATA_4 0x168318 + +#define mmMME2_QM_CP_FENCE1_RDATA_0 0x16831C + +#define mmMME2_QM_CP_FENCE1_RDATA_1 0x168320 + +#define mmMME2_QM_CP_FENCE1_RDATA_2 0x168324 + +#define mmMME2_QM_CP_FENCE1_RDATA_3 0x168328 + +#define mmMME2_QM_CP_FENCE1_RDATA_4 0x16832C + +#define mmMME2_QM_CP_FENCE2_RDATA_0 0x168330 + +#define mmMME2_QM_CP_FENCE2_RDATA_1 0x168334 + +#define mmMME2_QM_CP_FENCE2_RDATA_2 0x168338 + +#define mmMME2_QM_CP_FENCE2_RDATA_3 0x16833C + +#define mmMME2_QM_CP_FENCE2_RDATA_4 0x168340 + +#define mmMME2_QM_CP_FENCE3_RDATA_0 0x168344 + +#define mmMME2_QM_CP_FENCE3_RDATA_1 0x168348 + +#define mmMME2_QM_CP_FENCE3_RDATA_2 0x16834C + +#define mmMME2_QM_CP_FENCE3_RDATA_3 0x168350 + +#define mmMME2_QM_CP_FENCE3_RDATA_4 0x168354 + +#define mmMME2_QM_CP_FENCE0_CNT_0 0x168358 + +#define mmMME2_QM_CP_FENCE0_CNT_1 0x16835C + +#define mmMME2_QM_CP_FENCE0_CNT_2 0x168360 + +#define mmMME2_QM_CP_FENCE0_CNT_3 0x168364 + +#define mmMME2_QM_CP_FENCE0_CNT_4 0x168368 + +#define mmMME2_QM_CP_FENCE1_CNT_0 0x16836C + +#define mmMME2_QM_CP_FENCE1_CNT_1 0x168370 + +#define mmMME2_QM_CP_FENCE1_CNT_2 0x168374 + +#define mmMME2_QM_CP_FENCE1_CNT_3 0x168378 + +#define mmMME2_QM_CP_FENCE1_CNT_4 0x16837C + +#define mmMME2_QM_CP_FENCE2_CNT_0 0x168380 + +#define mmMME2_QM_CP_FENCE2_CNT_1 0x168384 + +#define mmMME2_QM_CP_FENCE2_CNT_2 0x168388 + +#define mmMME2_QM_CP_FENCE2_CNT_3 0x16838C + +#define mmMME2_QM_CP_FENCE2_CNT_4 0x168390 + +#define mmMME2_QM_CP_FENCE3_CNT_0 0x168394 + +#define mmMME2_QM_CP_FENCE3_CNT_1 0x168398 + +#define mmMME2_QM_CP_FENCE3_CNT_2 0x16839C + +#define mmMME2_QM_CP_FENCE3_CNT_3 0x1683A0 + +#define mmMME2_QM_CP_FENCE3_CNT_4 0x1683A4 + +#define mmMME2_QM_CP_STS_0 0x1683A8 + +#define mmMME2_QM_CP_STS_1 0x1683AC + +#define mmMME2_QM_CP_STS_2 0x1683B0 + +#define mmMME2_QM_CP_STS_3 0x1683B4 + +#define mmMME2_QM_CP_STS_4 0x1683B8 + +#define mmMME2_QM_CP_CURRENT_INST_LO_0 0x1683BC + +#define mmMME2_QM_CP_CURRENT_INST_LO_1 0x1683C0 + +#define mmMME2_QM_CP_CURRENT_INST_LO_2 0x1683C4 + +#define mmMME2_QM_CP_CURRENT_INST_LO_3 0x1683C8 + +#define mmMME2_QM_CP_CURRENT_INST_LO_4 0x1683CC + +#define mmMME2_QM_CP_CURRENT_INST_HI_0 0x1683D0 + +#define mmMME2_QM_CP_CURRENT_INST_HI_1 0x1683D4 + +#define mmMME2_QM_CP_CURRENT_INST_HI_2 0x1683D8 + +#define mmMME2_QM_CP_CURRENT_INST_HI_3 0x1683DC + +#define mmMME2_QM_CP_CURRENT_INST_HI_4 0x1683E0 + +#define mmMME2_QM_CP_BARRIER_CFG_0 0x1683F4 + +#define mmMME2_QM_CP_BARRIER_CFG_1 0x1683F8 + +#define mmMME2_QM_CP_BARRIER_CFG_2 0x1683FC + +#define mmMME2_QM_CP_BARRIER_CFG_3 0x168400 + +#define mmMME2_QM_CP_BARRIER_CFG_4 0x168404 + +#define mmMME2_QM_CP_DBG_0_0 0x168408 + +#define mmMME2_QM_CP_DBG_0_1 0x16840C + +#define mmMME2_QM_CP_DBG_0_2 0x168410 + +#define mmMME2_QM_CP_DBG_0_3 0x168414 + +#define mmMME2_QM_CP_DBG_0_4 0x168418 + +#define mmMME2_QM_CP_ARUSER_31_11_0 0x16841C + +#define mmMME2_QM_CP_ARUSER_31_11_1 0x168420 + +#define mmMME2_QM_CP_ARUSER_31_11_2 0x168424 + +#define mmMME2_QM_CP_ARUSER_31_11_3 0x168428 + +#define mmMME2_QM_CP_ARUSER_31_11_4 0x16842C + +#define mmMME2_QM_CP_AWUSER_31_11_0 0x168430 + +#define mmMME2_QM_CP_AWUSER_31_11_1 0x168434 + +#define mmMME2_QM_CP_AWUSER_31_11_2 0x168438 + +#define mmMME2_QM_CP_AWUSER_31_11_3 0x16843C + +#define mmMME2_QM_CP_AWUSER_31_11_4 0x168440 + +#define mmMME2_QM_ARB_CFG_0 0x168A00 + +#define mmMME2_QM_ARB_CHOISE_Q_PUSH 0x168A04 + +#define mmMME2_QM_ARB_WRR_WEIGHT_0 0x168A08 + +#define mmMME2_QM_ARB_WRR_WEIGHT_1 0x168A0C + +#define mmMME2_QM_ARB_WRR_WEIGHT_2 0x168A10 + +#define mmMME2_QM_ARB_WRR_WEIGHT_3 0x168A14 + +#define mmMME2_QM_ARB_CFG_1 0x168A18 + +#define mmMME2_QM_ARB_MST_AVAIL_CRED_0 0x168A20 + +#define mmMME2_QM_ARB_MST_AVAIL_CRED_1 0x168A24 + +#define mmMME2_QM_ARB_MST_AVAIL_CRED_2 0x168A28 + +#define mmMME2_QM_ARB_MST_AVAIL_CRED_3 0x168A2C + +#define mmMME2_QM_ARB_MST_AVAIL_CRED_4 0x168A30 + +#define mmMME2_QM_ARB_MST_AVAIL_CRED_5 0x168A34 + +#define mmMME2_QM_ARB_MST_AVAIL_CRED_6 0x168A38 + +#define mmMME2_QM_ARB_MST_AVAIL_CRED_7 0x168A3C + +#define mmMME2_QM_ARB_MST_AVAIL_CRED_8 0x168A40 + +#define mmMME2_QM_ARB_MST_AVAIL_CRED_9 0x168A44 + +#define mmMME2_QM_ARB_MST_AVAIL_CRED_10 0x168A48 + +#define mmMME2_QM_ARB_MST_AVAIL_CRED_11 0x168A4C + +#define mmMME2_QM_ARB_MST_AVAIL_CRED_12 0x168A50 + +#define mmMME2_QM_ARB_MST_AVAIL_CRED_13 0x168A54 + +#define mmMME2_QM_ARB_MST_AVAIL_CRED_14 0x168A58 + +#define mmMME2_QM_ARB_MST_AVAIL_CRED_15 0x168A5C + +#define mmMME2_QM_ARB_MST_AVAIL_CRED_16 0x168A60 + +#define mmMME2_QM_ARB_MST_AVAIL_CRED_17 0x168A64 + +#define mmMME2_QM_ARB_MST_AVAIL_CRED_18 0x168A68 + +#define mmMME2_QM_ARB_MST_AVAIL_CRED_19 0x168A6C + +#define mmMME2_QM_ARB_MST_AVAIL_CRED_20 0x168A70 + +#define mmMME2_QM_ARB_MST_AVAIL_CRED_21 0x168A74 + +#define mmMME2_QM_ARB_MST_AVAIL_CRED_22 0x168A78 + +#define mmMME2_QM_ARB_MST_AVAIL_CRED_23 0x168A7C + +#define mmMME2_QM_ARB_MST_AVAIL_CRED_24 0x168A80 + +#define mmMME2_QM_ARB_MST_AVAIL_CRED_25 0x168A84 + +#define mmMME2_QM_ARB_MST_AVAIL_CRED_26 0x168A88 + +#define mmMME2_QM_ARB_MST_AVAIL_CRED_27 0x168A8C + +#define mmMME2_QM_ARB_MST_AVAIL_CRED_28 0x168A90 + +#define mmMME2_QM_ARB_MST_AVAIL_CRED_29 0x168A94 + +#define mmMME2_QM_ARB_MST_AVAIL_CRED_30 0x168A98 + +#define mmMME2_QM_ARB_MST_AVAIL_CRED_31 0x168A9C + +#define mmMME2_QM_ARB_MST_CRED_INC 0x168AA0 + +#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_0 0x168AA4 + +#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_1 0x168AA8 + +#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_2 0x168AAC + +#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_3 0x168AB0 + +#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_4 0x168AB4 + +#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_5 0x168AB8 + +#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_6 0x168ABC + +#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_7 0x168AC0 + +#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_8 0x168AC4 + +#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_9 0x168AC8 + +#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_10 0x168ACC + +#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_11 0x168AD0 + +#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_12 0x168AD4 + +#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_13 0x168AD8 + +#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_14 0x168ADC + +#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_15 0x168AE0 + +#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_16 0x168AE4 + +#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_17 0x168AE8 + +#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_18 0x168AEC + +#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_19 0x168AF0 + +#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_20 0x168AF4 + +#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_21 0x168AF8 + +#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_22 0x168AFC + +#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_23 0x168B00 + +#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_24 0x168B04 + +#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_25 0x168B08 + +#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_26 0x168B0C + +#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_27 0x168B10 + +#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_28 0x168B14 + +#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_29 0x168B18 + +#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_30 0x168B1C + +#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_31 0x168B20 + +#define mmMME2_QM_ARB_SLV_MASTER_INC_CRED_OFST 0x168B28 + +#define mmMME2_QM_ARB_MST_SLAVE_EN 0x168B2C + +#define mmMME2_QM_ARB_MST_QUIET_PER 0x168B34 + +#define mmMME2_QM_ARB_SLV_CHOISE_WDT 0x168B38 + +#define mmMME2_QM_ARB_SLV_ID 0x168B3C + +#define mmMME2_QM_ARB_MSG_MAX_INFLIGHT 0x168B44 + +#define mmMME2_QM_ARB_MSG_AWUSER_31_11 0x168B48 + +#define mmMME2_QM_ARB_MSG_AWUSER_SEC_PROP 0x168B4C + +#define mmMME2_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0x168B50 + +#define mmMME2_QM_ARB_BASE_LO 0x168B54 + +#define mmMME2_QM_ARB_BASE_HI 0x168B58 + +#define mmMME2_QM_ARB_STATE_STS 0x168B80 + +#define mmMME2_QM_ARB_CHOISE_FULLNESS_STS 0x168B84 + +#define mmMME2_QM_ARB_MSG_STS 0x168B88 + +#define mmMME2_QM_ARB_SLV_CHOISE_Q_HEAD 0x168B8C + +#define mmMME2_QM_ARB_ERR_CAUSE 0x168B9C + +#define mmMME2_QM_ARB_ERR_MSG_EN 0x168BA0 + +#define mmMME2_QM_ARB_ERR_STS_DRP 0x168BA8 + +#define mmMME2_QM_ARB_MST_CRED_STS_0 0x168BB0 + +#define mmMME2_QM_ARB_MST_CRED_STS_1 0x168BB4 + +#define mmMME2_QM_ARB_MST_CRED_STS_2 0x168BB8 + +#define mmMME2_QM_ARB_MST_CRED_STS_3 0x168BBC + +#define mmMME2_QM_ARB_MST_CRED_STS_4 0x168BC0 + +#define mmMME2_QM_ARB_MST_CRED_STS_5 0x168BC4 + +#define mmMME2_QM_ARB_MST_CRED_STS_6 0x168BC8 + +#define mmMME2_QM_ARB_MST_CRED_STS_7 0x168BCC + +#define mmMME2_QM_ARB_MST_CRED_STS_8 0x168BD0 + +#define mmMME2_QM_ARB_MST_CRED_STS_9 0x168BD4 + +#define mmMME2_QM_ARB_MST_CRED_STS_10 0x168BD8 + +#define mmMME2_QM_ARB_MST_CRED_STS_11 0x168BDC + +#define mmMME2_QM_ARB_MST_CRED_STS_12 0x168BE0 + +#define mmMME2_QM_ARB_MST_CRED_STS_13 0x168BE4 + +#define mmMME2_QM_ARB_MST_CRED_STS_14 0x168BE8 + +#define mmMME2_QM_ARB_MST_CRED_STS_15 0x168BEC + +#define mmMME2_QM_ARB_MST_CRED_STS_16 0x168BF0 + +#define mmMME2_QM_ARB_MST_CRED_STS_17 0x168BF4 + +#define mmMME2_QM_ARB_MST_CRED_STS_18 0x168BF8 + +#define mmMME2_QM_ARB_MST_CRED_STS_19 0x168BFC + +#define mmMME2_QM_ARB_MST_CRED_STS_20 0x168C00 + +#define mmMME2_QM_ARB_MST_CRED_STS_21 0x168C04 + +#define mmMME2_QM_ARB_MST_CRED_STS_22 0x168C08 + +#define mmMME2_QM_ARB_MST_CRED_STS_23 0x168C0C + +#define mmMME2_QM_ARB_MST_CRED_STS_24 0x168C10 + +#define mmMME2_QM_ARB_MST_CRED_STS_25 0x168C14 + +#define mmMME2_QM_ARB_MST_CRED_STS_26 0x168C18 + +#define mmMME2_QM_ARB_MST_CRED_STS_27 0x168C1C + +#define mmMME2_QM_ARB_MST_CRED_STS_28 0x168C20 + +#define mmMME2_QM_ARB_MST_CRED_STS_29 0x168C24 + +#define mmMME2_QM_ARB_MST_CRED_STS_30 0x168C28 + +#define mmMME2_QM_ARB_MST_CRED_STS_31 0x168C2C + +#define mmMME2_QM_CGM_CFG 0x168C70 + +#define mmMME2_QM_CGM_STS 0x168C74 + +#define mmMME2_QM_CGM_CFG1 0x168C78 + +#define mmMME2_QM_LOCAL_RANGE_BASE 0x168C80 + +#define mmMME2_QM_LOCAL_RANGE_SIZE 0x168C84 + +#define mmMME2_QM_CSMR_STRICT_PRIO_CFG 0x168C90 + +#define mmMME2_QM_HBW_RD_RATE_LIM_CFG_1 0x168C94 + +#define mmMME2_QM_LBW_WR_RATE_LIM_CFG_0 0x168C98 + +#define mmMME2_QM_LBW_WR_RATE_LIM_CFG_1 0x168C9C + +#define mmMME2_QM_HBW_RD_RATE_LIM_CFG_0 0x168CA0 + +#define mmMME2_QM_GLBL_AXCACHE 0x168CA4 + +#define mmMME2_QM_IND_GW_APB_CFG 0x168CB0 + +#define mmMME2_QM_IND_GW_APB_WDATA 0x168CB4 + +#define mmMME2_QM_IND_GW_APB_RDATA 0x168CB8 + +#define mmMME2_QM_IND_GW_APB_STATUS 0x168CBC + +#define mmMME2_QM_GLBL_ERR_ADDR_LO 0x168CD0 + +#define mmMME2_QM_GLBL_ERR_ADDR_HI 0x168CD4 + +#define mmMME2_QM_GLBL_ERR_WDATA 0x168CD8 + +#define mmMME2_QM_GLBL_MEM_INIT_BUSY 0x168D00 + +#endif /* ASIC_REG_MME2_QM_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme3_ctrl_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/mme3_ctrl_regs.h new file mode 100644 index 000000000..36f6edc72 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/mme3_ctrl_regs.h @@ -0,0 +1,1456 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_MME3_CTRL_REGS_H_ +#define ASIC_REG_MME3_CTRL_REGS_H_ + +/* + ***************************************** + * MME3_CTRL (Prototype: MME) + ***************************************** + */ + +#define mmMME3_CTRL_ARCH_STATUS 0x1E0000 + +#define mmMME3_CTRL_ARCH_BASE_ADDR_HIGH_S 0x1E0008 + +#define mmMME3_CTRL_ARCH_BASE_ADDR_HIGH_L 0x1E000C + +#define mmMME3_CTRL_ARCH_BASE_ADDR_HIGH_O 0x1E0010 + +#define mmMME3_CTRL_ARCH_BASE_ADDR_LOW_S 0x1E0014 + +#define mmMME3_CTRL_ARCH_BASE_ADDR_LOW_L 0x1E0018 + +#define mmMME3_CTRL_ARCH_BASE_ADDR_LOW_O 0x1E001C + +#define mmMME3_CTRL_ARCH_HEADER_LOW 0x1E0020 + +#define mmMME3_CTRL_ARCH_HEADER_HIGH 0x1E0024 + +#define mmMME3_CTRL_ARCH_CONV_KERNEL_SIZE_MINUS_1 0x1E0028 + +#define mmMME3_CTRL_ARCH_CONV_ASSOCIATED_DIMS_LOW 0x1E002C + +#define mmMME3_CTRL_ARCH_CONV_ASSOCIATED_DIMS_HIGH 0x1E0030 + +#define mmMME3_CTRL_ARCH_NUM_ITERATIONS_MINUS_1 0x1E0034 + +#define mmMME3_CTRL_ARCH_OUTER_LOOP 0x1E0038 + +#define mmMME3_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_0 0x1E003C + +#define mmMME3_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_1 0x1E0040 + +#define mmMME3_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_2 0x1E0044 + +#define mmMME3_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_3 0x1E0048 + +#define mmMME3_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_4 0x1E004C + +#define mmMME3_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_0 0x1E0050 + +#define mmMME3_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_1 0x1E0054 + +#define mmMME3_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_2 0x1E0058 + +#define mmMME3_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_3 0x1E005C + +#define mmMME3_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_4 0x1E0060 + +#define mmMME3_CTRL_ARCH_TENSOR_S_ROI_SIZE_0 0x1E0064 + +#define mmMME3_CTRL_ARCH_TENSOR_S_ROI_SIZE_1 0x1E0068 + +#define mmMME3_CTRL_ARCH_TENSOR_S_ROI_SIZE_2 0x1E006C + +#define mmMME3_CTRL_ARCH_TENSOR_S_ROI_SIZE_3 0x1E0070 + +#define mmMME3_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_0 0x1E0074 + +#define mmMME3_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_1 0x1E0078 + +#define mmMME3_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_2 0x1E007C + +#define mmMME3_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_3 0x1E0080 + +#define mmMME3_CTRL_ARCH_TENSOR_S_SPATIAL_SIZE_MINUS_1 0x1E0084 + +#define mmMME3_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_0 0x1E0088 + +#define mmMME3_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_1 0x1E008C + +#define mmMME3_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_2 0x1E0090 + +#define mmMME3_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_3 0x1E0094 + +#define mmMME3_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_4 0x1E0098 + +#define mmMME3_CTRL_ARCH_AGU_S_START_OFFSET_0 0x1E009C + +#define mmMME3_CTRL_ARCH_AGU_S_START_OFFSET_1 0x1E00A0 + +#define mmMME3_CTRL_ARCH_AGU_S_START_OFFSET_2 0x1E00A4 + +#define mmMME3_CTRL_ARCH_AGU_S_START_OFFSET_3 0x1E00A8 + +#define mmMME3_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_0 0x1E00AC + +#define mmMME3_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_1 0x1E00B0 + +#define mmMME3_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_2 0x1E00B4 + +#define mmMME3_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_3 0x1E00B8 + +#define mmMME3_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_4 0x1E00BC + +#define mmMME3_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_0 0x1E00C0 + +#define mmMME3_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_1 0x1E00C4 + +#define mmMME3_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_2 0x1E00C8 + +#define mmMME3_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_3 0x1E00CC + +#define mmMME3_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_4 0x1E00D0 + +#define mmMME3_CTRL_ARCH_TENSOR_L_ROI_SIZE_0 0x1E00D4 + +#define mmMME3_CTRL_ARCH_TENSOR_L_ROI_SIZE_1 0x1E00D8 + +#define mmMME3_CTRL_ARCH_TENSOR_L_ROI_SIZE_2 0x1E00DC + +#define mmMME3_CTRL_ARCH_TENSOR_L_ROI_SIZE_3 0x1E00E0 + +#define mmMME3_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_0 0x1E00E4 + +#define mmMME3_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_1 0x1E00E8 + +#define mmMME3_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_2 0x1E00EC + +#define mmMME3_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_3 0x1E00F0 + +#define mmMME3_CTRL_ARCH_TENSOR_L_SPATIAL_SIZE_MINUS_1 0x1E00F4 + +#define mmMME3_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0x1E00F8 + +#define mmMME3_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0x1E00FC + +#define mmMME3_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0x1E0100 + +#define mmMME3_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0x1E0104 + +#define mmMME3_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0x1E0108 + +#define mmMME3_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_0 0x1E010C + +#define mmMME3_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_1 0x1E0110 + +#define mmMME3_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_2 0x1E0114 + +#define mmMME3_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_3 0x1E0118 + +#define mmMME3_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0x1E011C + +#define mmMME3_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0x1E0120 + +#define mmMME3_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0x1E0124 + +#define mmMME3_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0x1E0128 + +#define mmMME3_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0x1E012C + +#define mmMME3_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_0 0x1E0130 + +#define mmMME3_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_1 0x1E0134 + +#define mmMME3_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_2 0x1E0138 + +#define mmMME3_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_3 0x1E013C + +#define mmMME3_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_0 0x1E0140 + +#define mmMME3_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_1 0x1E0144 + +#define mmMME3_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_2 0x1E0148 + +#define mmMME3_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_3 0x1E014C + +#define mmMME3_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_4 0x1E0150 + +#define mmMME3_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_0 0x1E0154 + +#define mmMME3_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_1 0x1E0158 + +#define mmMME3_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_2 0x1E015C + +#define mmMME3_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_3 0x1E0160 + +#define mmMME3_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_4 0x1E0164 + +#define mmMME3_CTRL_ARCH_TENSOR_O_ROI_SIZE_0 0x1E0168 + +#define mmMME3_CTRL_ARCH_TENSOR_O_ROI_SIZE_1 0x1E016C + +#define mmMME3_CTRL_ARCH_TENSOR_O_ROI_SIZE_2 0x1E0170 + +#define mmMME3_CTRL_ARCH_TENSOR_O_ROI_SIZE_3 0x1E0174 + +#define mmMME3_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_0 0x1E0178 + +#define mmMME3_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_1 0x1E017C + +#define mmMME3_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_2 0x1E0180 + +#define mmMME3_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_3 0x1E0184 + +#define mmMME3_CTRL_ARCH_TENSOR_O_SPATIAL_SIZE_MINUS_1 0x1E0188 + +#define mmMME3_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0x1E018C + +#define mmMME3_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0x1E0190 + +#define mmMME3_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0x1E0194 + +#define mmMME3_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0x1E0198 + +#define mmMME3_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0x1E019C + +#define mmMME3_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_0 0x1E01A0 + +#define mmMME3_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_1 0x1E01A4 + +#define mmMME3_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_2 0x1E01A8 + +#define mmMME3_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_3 0x1E01AC + +#define mmMME3_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0x1E01B0 + +#define mmMME3_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0x1E01B4 + +#define mmMME3_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0x1E01B8 + +#define mmMME3_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0x1E01BC + +#define mmMME3_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0x1E01C0 + +#define mmMME3_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_0 0x1E01C4 + +#define mmMME3_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_1 0x1E01C8 + +#define mmMME3_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_2 0x1E01CC + +#define mmMME3_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_3 0x1E01D0 + +#define mmMME3_CTRL_ARCH_DESC_SB_REPEAT 0x1E01D4 + +#define mmMME3_CTRL_ARCH_DESC_RATE_LIMITER 0x1E01D8 + +#define mmMME3_CTRL_ARCH_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0x1E01DC + +#define mmMME3_CTRL_ARCH_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0x1E01E0 + +#define mmMME3_CTRL_ARCH_DESC_SYNC_OBJECT_ADDR_HIGH 0x1E01E4 + +#define mmMME3_CTRL_ARCH_DESC_SYNC_OBJECT_DATA 0x1E01E8 + +#define mmMME3_CTRL_ARCH_DESC_AXI_USER_DATA 0x1E01EC + +#define mmMME3_CTRL_ARCH_DESC_PERF_EVT_S 0x1E01F0 + +#define mmMME3_CTRL_ARCH_DESC_PERF_EVT_L_LOCAL 0x1E01F4 + +#define mmMME3_CTRL_ARCH_DESC_PERF_EVT_L_REMOTE 0x1E01F8 + +#define mmMME3_CTRL_ARCH_DESC_PERF_EVT_O_LOCAL 0x1E01FC + +#define mmMME3_CTRL_ARCH_DESC_PERF_EVT_O_REMOTE 0x1E0200 + +#define mmMME3_CTRL_ARCH_DESC_PADDING_VALUE_S 0x1E0204 + +#define mmMME3_CTRL_ARCH_DESC_PADDING_VALUE_L 0x1E0208 + +#define mmMME3_CTRL_ARCH_DESC_META_DATA_AGU_S 0x1E020C + +#define mmMME3_CTRL_ARCH_DESC_META_DATA_AGU_L_LOCAL 0x1E0210 + +#define mmMME3_CTRL_ARCH_DESC_META_DATA_AGU_L_REMOTE 0x1E0214 + +#define mmMME3_CTRL_ARCH_DESC_META_DATA_AGU_O_LOCAL 0x1E0218 + +#define mmMME3_CTRL_ARCH_DESC_META_DATA_AGU_O_REMOTE 0x1E021C + +#define mmMME3_CTRL_ARCH_DESC_PCU_RL_SATURATION 0x1E0220 + +#define mmMME3_CTRL_ARCH_DESC_DUMMY 0x1E0224 + +#define mmMME3_CTRL_CMD 0x1E0280 + +#define mmMME3_CTRL_STATUS1 0x1E0284 + +#define mmMME3_CTRL_RESET 0x1E0288 + +#define mmMME3_CTRL_QM_STALL 0x1E028C + +#define mmMME3_CTRL_SYNC_OBJECT_FIFO_TH 0x1E0290 + +#define mmMME3_CTRL_EUS_ROLLUP_CNT_ADD 0x1E0294 + +#define mmMME3_CTRL_INTR_CAUSE 0x1E0298 + +#define mmMME3_CTRL_INTR_MASK 0x1E029C + +#define mmMME3_CTRL_LOG_SHADOW 0x1E02A0 + +#define mmMME3_CTRL_PCU_RL_DESC0 0x1E02A4 + +#define mmMME3_CTRL_PCU_RL_TOKEN_UPDATE 0x1E02A8 + +#define mmMME3_CTRL_PCU_RL_TH 0x1E02AC + +#define mmMME3_CTRL_PCU_RL_MIN 0x1E02B0 + +#define mmMME3_CTRL_PCU_RL_CTRL_EN 0x1E02B4 + +#define mmMME3_CTRL_PCU_RL_HISTORY_LOG_SIZE 0x1E02B8 + +#define mmMME3_CTRL_PCU_DUMMY_A_BF16 0x1E02BC + +#define mmMME3_CTRL_PCU_DUMMY_B_BF16 0x1E02C0 + +#define mmMME3_CTRL_PCU_DUMMY_A_FP32_ODD 0x1E02C4 + +#define mmMME3_CTRL_PCU_DUMMY_A_FP32_EVEN 0x1E02C8 + +#define mmMME3_CTRL_PCU_DUMMY_B_FP32_ODD 0x1E02CC + +#define mmMME3_CTRL_PCU_DUMMY_B_FP32_EVEN 0x1E02D0 + +#define mmMME3_CTRL_PROT 0x1E02D4 + +#define mmMME3_CTRL_EU_POWER_SAVE_DISABLE 0x1E02D8 + +#define mmMME3_CTRL_CS_DBG_BLOCK_ID 0x1E02DC + +#define mmMME3_CTRL_CS_DBG_STATUS_DROP_CNT 0x1E02E0 + +#define mmMME3_CTRL_TE_CLOSE_CGATE 0x1E02E4 + +#define mmMME3_CTRL_AGU_SM_INFLIGHT_CNTR 0x1E02E8 + +#define mmMME3_CTRL_AGU_SM_TOTAL_CNTR 0x1E02EC + +#define mmMME3_CTRL_EZSYNC_OUT_CREDIT 0x1E02F0 + +#define mmMME3_CTRL_PCU_RL_SAT_SEC 0x1E02F4 + +#define mmMME3_CTRL_AGU_SYNC_MSG_AXI_USER 0x1E02F8 + +#define mmMME3_CTRL_QM_SLV_LBW_CLK_EN 0x1E02FC + +#define mmMME3_CTRL_SHADOW_0_STATUS 0x1E0400 + +#define mmMME3_CTRL_SHADOW_0_BASE_ADDR_HIGH_S 0x1E0408 + +#define mmMME3_CTRL_SHADOW_0_BASE_ADDR_HIGH_L 0x1E040C + +#define mmMME3_CTRL_SHADOW_0_BASE_ADDR_HIGH_O 0x1E0410 + +#define mmMME3_CTRL_SHADOW_0_BASE_ADDR_LOW_S 0x1E0414 + +#define mmMME3_CTRL_SHADOW_0_BASE_ADDR_LOW_L 0x1E0418 + +#define mmMME3_CTRL_SHADOW_0_BASE_ADDR_LOW_O 0x1E041C + +#define mmMME3_CTRL_SHADOW_0_HEADER_LOW 0x1E0420 + +#define mmMME3_CTRL_SHADOW_0_HEADER_HIGH 0x1E0424 + +#define mmMME3_CTRL_SHADOW_0_CONV_KERNEL_SIZE_MINUS_1 0x1E0428 + +#define mmMME3_CTRL_SHADOW_0_CONV_ASSOCIATED_DIMS_LOW 0x1E042C + +#define mmMME3_CTRL_SHADOW_0_CONV_ASSOCIATED_DIMS_HIGH 0x1E0430 + +#define mmMME3_CTRL_SHADOW_0_NUM_ITERATIONS_MINUS_1 0x1E0434 + +#define mmMME3_CTRL_SHADOW_0_OUTER_LOOP 0x1E0438 + +#define mmMME3_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_0 0x1E043C + +#define mmMME3_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_1 0x1E0440 + +#define mmMME3_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_2 0x1E0444 + +#define mmMME3_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_3 0x1E0448 + +#define mmMME3_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_4 0x1E044C + +#define mmMME3_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_0 0x1E0450 + +#define mmMME3_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_1 0x1E0454 + +#define mmMME3_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_2 0x1E0458 + +#define mmMME3_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_3 0x1E045C + +#define mmMME3_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_4 0x1E0460 + +#define mmMME3_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_0 0x1E0464 + +#define mmMME3_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_1 0x1E0468 + +#define mmMME3_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_2 0x1E046C + +#define mmMME3_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_3 0x1E0470 + +#define mmMME3_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_0 0x1E0474 + +#define mmMME3_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_1 0x1E0478 + +#define mmMME3_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_2 0x1E047C + +#define mmMME3_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_3 0x1E0480 + +#define mmMME3_CTRL_SHADOW_0_TENSOR_S_SPATIAL_SIZE_MINUS_1 0x1E0484 + +#define mmMME3_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_0 0x1E0488 + +#define mmMME3_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_1 0x1E048C + +#define mmMME3_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_2 0x1E0490 + +#define mmMME3_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_3 0x1E0494 + +#define mmMME3_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_4 0x1E0498 + +#define mmMME3_CTRL_SHADOW_0_AGU_S_START_OFFSET_0 0x1E049C + +#define mmMME3_CTRL_SHADOW_0_AGU_S_START_OFFSET_1 0x1E04A0 + +#define mmMME3_CTRL_SHADOW_0_AGU_S_START_OFFSET_2 0x1E04A4 + +#define mmMME3_CTRL_SHADOW_0_AGU_S_START_OFFSET_3 0x1E04A8 + +#define mmMME3_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_0 0x1E04AC + +#define mmMME3_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_1 0x1E04B0 + +#define mmMME3_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_2 0x1E04B4 + +#define mmMME3_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_3 0x1E04B8 + +#define mmMME3_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_4 0x1E04BC + +#define mmMME3_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_0 0x1E04C0 + +#define mmMME3_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_1 0x1E04C4 + +#define mmMME3_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_2 0x1E04C8 + +#define mmMME3_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_3 0x1E04CC + +#define mmMME3_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_4 0x1E04D0 + +#define mmMME3_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_0 0x1E04D4 + +#define mmMME3_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_1 0x1E04D8 + +#define mmMME3_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_2 0x1E04DC + +#define mmMME3_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_3 0x1E04E0 + +#define mmMME3_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_0 0x1E04E4 + +#define mmMME3_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_1 0x1E04E8 + +#define mmMME3_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_2 0x1E04EC + +#define mmMME3_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_3 0x1E04F0 + +#define mmMME3_CTRL_SHADOW_0_TENSOR_L_SPATIAL_SIZE_MINUS_1 0x1E04F4 + +#define mmMME3_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0x1E04F8 + +#define mmMME3_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0x1E04FC + +#define mmMME3_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0x1E0500 + +#define mmMME3_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0x1E0504 + +#define mmMME3_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0x1E0508 + +#define mmMME3_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_0 0x1E050C + +#define mmMME3_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_1 0x1E0510 + +#define mmMME3_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_2 0x1E0514 + +#define mmMME3_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_3 0x1E0518 + +#define mmMME3_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0x1E051C + +#define mmMME3_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0x1E0520 + +#define mmMME3_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0x1E0524 + +#define mmMME3_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0x1E0528 + +#define mmMME3_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0x1E052C + +#define mmMME3_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_0 0x1E0530 + +#define mmMME3_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_1 0x1E0534 + +#define mmMME3_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_2 0x1E0538 + +#define mmMME3_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_3 0x1E053C + +#define mmMME3_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_0 0x1E0540 + +#define mmMME3_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_1 0x1E0544 + +#define mmMME3_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_2 0x1E0548 + +#define mmMME3_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_3 0x1E054C + +#define mmMME3_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_4 0x1E0550 + +#define mmMME3_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_0 0x1E0554 + +#define mmMME3_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_1 0x1E0558 + +#define mmMME3_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_2 0x1E055C + +#define mmMME3_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_3 0x1E0560 + +#define mmMME3_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_4 0x1E0564 + +#define mmMME3_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_0 0x1E0568 + +#define mmMME3_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_1 0x1E056C + +#define mmMME3_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_2 0x1E0570 + +#define mmMME3_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_3 0x1E0574 + +#define mmMME3_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_0 0x1E0578 + +#define mmMME3_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_1 0x1E057C + +#define mmMME3_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_2 0x1E0580 + +#define mmMME3_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_3 0x1E0584 + +#define mmMME3_CTRL_SHADOW_0_TENSOR_O_SPATIAL_SIZE_MINUS_1 0x1E0588 + +#define mmMME3_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0x1E058C + +#define mmMME3_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0x1E0590 + +#define mmMME3_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0x1E0594 + +#define mmMME3_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0x1E0598 + +#define mmMME3_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0x1E059C + +#define mmMME3_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_0 0x1E05A0 + +#define mmMME3_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_1 0x1E05A4 + +#define mmMME3_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_2 0x1E05A8 + +#define mmMME3_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_3 0x1E05AC + +#define mmMME3_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0x1E05B0 + +#define mmMME3_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0x1E05B4 + +#define mmMME3_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0x1E05B8 + +#define mmMME3_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0x1E05BC + +#define mmMME3_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0x1E05C0 + +#define mmMME3_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_0 0x1E05C4 + +#define mmMME3_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_1 0x1E05C8 + +#define mmMME3_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_2 0x1E05CC + +#define mmMME3_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_3 0x1E05D0 + +#define mmMME3_CTRL_SHADOW_0_DESC_SB_REPEAT 0x1E05D4 + +#define mmMME3_CTRL_SHADOW_0_DESC_RATE_LIMITER 0x1E05D8 + +#define mmMME3_CTRL_SHADOW_0_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0x1E05DC + +#define mmMME3_CTRL_SHADOW_0_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0x1E05E0 + +#define mmMME3_CTRL_SHADOW_0_DESC_SYNC_OBJECT_ADDR_HIGH 0x1E05E4 + +#define mmMME3_CTRL_SHADOW_0_DESC_SYNC_OBJECT_DATA 0x1E05E8 + +#define mmMME3_CTRL_SHADOW_0_DESC_AXI_USER_DATA 0x1E05EC + +#define mmMME3_CTRL_SHADOW_0_DESC_PERF_EVT_S 0x1E05F0 + +#define mmMME3_CTRL_SHADOW_0_DESC_PERF_EVT_L_LOCAL 0x1E05F4 + +#define mmMME3_CTRL_SHADOW_0_DESC_PERF_EVT_L_REMOTE 0x1E05F8 + +#define mmMME3_CTRL_SHADOW_0_DESC_PERF_EVT_O_LOCAL 0x1E05FC + +#define mmMME3_CTRL_SHADOW_0_DESC_PERF_EVT_O_REMOTE 0x1E0600 + +#define mmMME3_CTRL_SHADOW_0_DESC_PADDING_VALUE_S 0x1E0604 + +#define mmMME3_CTRL_SHADOW_0_DESC_PADDING_VALUE_L 0x1E0608 + +#define mmMME3_CTRL_SHADOW_0_DESC_META_DATA_AGU_S 0x1E060C + +#define mmMME3_CTRL_SHADOW_0_DESC_META_DATA_AGU_L_LOCAL 0x1E0610 + +#define mmMME3_CTRL_SHADOW_0_DESC_META_DATA_AGU_L_REMOTE 0x1E0614 + +#define mmMME3_CTRL_SHADOW_0_DESC_META_DATA_AGU_O_LOCAL 0x1E0618 + +#define mmMME3_CTRL_SHADOW_0_DESC_META_DATA_AGU_O_REMOTE 0x1E061C + +#define mmMME3_CTRL_SHADOW_0_DESC_PCU_RL_SATURATION 0x1E0620 + +#define mmMME3_CTRL_SHADOW_0_DESC_DUMMY 0x1E0624 + +#define mmMME3_CTRL_SHADOW_1_STATUS 0x1E0680 + +#define mmMME3_CTRL_SHADOW_1_BASE_ADDR_HIGH_S 0x1E0688 + +#define mmMME3_CTRL_SHADOW_1_BASE_ADDR_HIGH_L 0x1E068C + +#define mmMME3_CTRL_SHADOW_1_BASE_ADDR_HIGH_O 0x1E0690 + +#define mmMME3_CTRL_SHADOW_1_BASE_ADDR_LOW_S 0x1E0694 + +#define mmMME3_CTRL_SHADOW_1_BASE_ADDR_LOW_L 0x1E0698 + +#define mmMME3_CTRL_SHADOW_1_BASE_ADDR_LOW_O 0x1E069C + +#define mmMME3_CTRL_SHADOW_1_HEADER_LOW 0x1E06A0 + +#define mmMME3_CTRL_SHADOW_1_HEADER_HIGH 0x1E06A4 + +#define mmMME3_CTRL_SHADOW_1_CONV_KERNEL_SIZE_MINUS_1 0x1E06A8 + +#define mmMME3_CTRL_SHADOW_1_CONV_ASSOCIATED_DIMS_LOW 0x1E06AC + +#define mmMME3_CTRL_SHADOW_1_CONV_ASSOCIATED_DIMS_HIGH 0x1E06B0 + +#define mmMME3_CTRL_SHADOW_1_NUM_ITERATIONS_MINUS_1 0x1E06B4 + +#define mmMME3_CTRL_SHADOW_1_OUTER_LOOP 0x1E06B8 + +#define mmMME3_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_0 0x1E06BC + +#define mmMME3_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_1 0x1E06C0 + +#define mmMME3_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_2 0x1E06C4 + +#define mmMME3_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_3 0x1E06C8 + +#define mmMME3_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_4 0x1E06CC + +#define mmMME3_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_0 0x1E06D0 + +#define mmMME3_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_1 0x1E06D4 + +#define mmMME3_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_2 0x1E06D8 + +#define mmMME3_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_3 0x1E06DC + +#define mmMME3_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_4 0x1E06E0 + +#define mmMME3_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_0 0x1E06E4 + +#define mmMME3_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_1 0x1E06E8 + +#define mmMME3_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_2 0x1E06EC + +#define mmMME3_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_3 0x1E06F0 + +#define mmMME3_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_0 0x1E06F4 + +#define mmMME3_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_1 0x1E06F8 + +#define mmMME3_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_2 0x1E06FC + +#define mmMME3_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_3 0x1E0700 + +#define mmMME3_CTRL_SHADOW_1_TENSOR_S_SPATIAL_SIZE_MINUS_1 0x1E0704 + +#define mmMME3_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_0 0x1E0708 + +#define mmMME3_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_1 0x1E070C + +#define mmMME3_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_2 0x1E0710 + +#define mmMME3_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_3 0x1E0714 + +#define mmMME3_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_4 0x1E0718 + +#define mmMME3_CTRL_SHADOW_1_AGU_S_START_OFFSET_0 0x1E071C + +#define mmMME3_CTRL_SHADOW_1_AGU_S_START_OFFSET_1 0x1E0720 + +#define mmMME3_CTRL_SHADOW_1_AGU_S_START_OFFSET_2 0x1E0724 + +#define mmMME3_CTRL_SHADOW_1_AGU_S_START_OFFSET_3 0x1E0728 + +#define mmMME3_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_0 0x1E072C + +#define mmMME3_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_1 0x1E0730 + +#define mmMME3_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_2 0x1E0734 + +#define mmMME3_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_3 0x1E0738 + +#define mmMME3_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_4 0x1E073C + +#define mmMME3_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_0 0x1E0740 + +#define mmMME3_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_1 0x1E0744 + +#define mmMME3_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_2 0x1E0748 + +#define mmMME3_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_3 0x1E074C + +#define mmMME3_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_4 0x1E0750 + +#define mmMME3_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_0 0x1E0754 + +#define mmMME3_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_1 0x1E0758 + +#define mmMME3_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_2 0x1E075C + +#define mmMME3_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_3 0x1E0760 + +#define mmMME3_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_0 0x1E0764 + +#define mmMME3_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_1 0x1E0768 + +#define mmMME3_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_2 0x1E076C + +#define mmMME3_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_3 0x1E0770 + +#define mmMME3_CTRL_SHADOW_1_TENSOR_L_SPATIAL_SIZE_MINUS_1 0x1E0774 + +#define mmMME3_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0x1E0778 + +#define mmMME3_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0x1E077C + +#define mmMME3_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0x1E0780 + +#define mmMME3_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0x1E0784 + +#define mmMME3_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0x1E0788 + +#define mmMME3_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_0 0x1E078C + +#define mmMME3_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_1 0x1E0790 + +#define mmMME3_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_2 0x1E0794 + +#define mmMME3_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_3 0x1E0798 + +#define mmMME3_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0x1E079C + +#define mmMME3_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0x1E07A0 + +#define mmMME3_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0x1E07A4 + +#define mmMME3_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0x1E07A8 + +#define mmMME3_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0x1E07AC + +#define mmMME3_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_0 0x1E07B0 + +#define mmMME3_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_1 0x1E07B4 + +#define mmMME3_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_2 0x1E07B8 + +#define mmMME3_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_3 0x1E07BC + +#define mmMME3_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_0 0x1E07C0 + +#define mmMME3_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_1 0x1E07C4 + +#define mmMME3_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_2 0x1E07C8 + +#define mmMME3_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_3 0x1E07CC + +#define mmMME3_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_4 0x1E07D0 + +#define mmMME3_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_0 0x1E07D4 + +#define mmMME3_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_1 0x1E07D8 + +#define mmMME3_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_2 0x1E07DC + +#define mmMME3_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_3 0x1E07E0 + +#define mmMME3_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_4 0x1E07E4 + +#define mmMME3_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_0 0x1E07E8 + +#define mmMME3_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_1 0x1E07EC + +#define mmMME3_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_2 0x1E07F0 + +#define mmMME3_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_3 0x1E07F4 + +#define mmMME3_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_0 0x1E07F8 + +#define mmMME3_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_1 0x1E07FC + +#define mmMME3_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_2 0x1E0800 + +#define mmMME3_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_3 0x1E0804 + +#define mmMME3_CTRL_SHADOW_1_TENSOR_O_SPATIAL_SIZE_MINUS_1 0x1E0808 + +#define mmMME3_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0x1E080C + +#define mmMME3_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0x1E0810 + +#define mmMME3_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0x1E0814 + +#define mmMME3_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0x1E0818 + +#define mmMME3_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0x1E081C + +#define mmMME3_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_0 0x1E0820 + +#define mmMME3_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_1 0x1E0824 + +#define mmMME3_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_2 0x1E0828 + +#define mmMME3_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_3 0x1E082C + +#define mmMME3_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0x1E0830 + +#define mmMME3_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0x1E0834 + +#define mmMME3_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0x1E0838 + +#define mmMME3_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0x1E083C + +#define mmMME3_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0x1E0840 + +#define mmMME3_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_0 0x1E0844 + +#define mmMME3_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_1 0x1E0848 + +#define mmMME3_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_2 0x1E084C + +#define mmMME3_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_3 0x1E0850 + +#define mmMME3_CTRL_SHADOW_1_DESC_SB_REPEAT 0x1E0854 + +#define mmMME3_CTRL_SHADOW_1_DESC_RATE_LIMITER 0x1E0858 + +#define mmMME3_CTRL_SHADOW_1_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0x1E085C + +#define mmMME3_CTRL_SHADOW_1_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0x1E0860 + +#define mmMME3_CTRL_SHADOW_1_DESC_SYNC_OBJECT_ADDR_HIGH 0x1E0864 + +#define mmMME3_CTRL_SHADOW_1_DESC_SYNC_OBJECT_DATA 0x1E0868 + +#define mmMME3_CTRL_SHADOW_1_DESC_AXI_USER_DATA 0x1E086C + +#define mmMME3_CTRL_SHADOW_1_DESC_PERF_EVT_S 0x1E0870 + +#define mmMME3_CTRL_SHADOW_1_DESC_PERF_EVT_L_LOCAL 0x1E0874 + +#define mmMME3_CTRL_SHADOW_1_DESC_PERF_EVT_L_REMOTE 0x1E0878 + +#define mmMME3_CTRL_SHADOW_1_DESC_PERF_EVT_O_LOCAL 0x1E087C + +#define mmMME3_CTRL_SHADOW_1_DESC_PERF_EVT_O_REMOTE 0x1E0880 + +#define mmMME3_CTRL_SHADOW_1_DESC_PADDING_VALUE_S 0x1E0884 + +#define mmMME3_CTRL_SHADOW_1_DESC_PADDING_VALUE_L 0x1E0888 + +#define mmMME3_CTRL_SHADOW_1_DESC_META_DATA_AGU_S 0x1E088C + +#define mmMME3_CTRL_SHADOW_1_DESC_META_DATA_AGU_L_LOCAL 0x1E0890 + +#define mmMME3_CTRL_SHADOW_1_DESC_META_DATA_AGU_L_REMOTE 0x1E0894 + +#define mmMME3_CTRL_SHADOW_1_DESC_META_DATA_AGU_O_LOCAL 0x1E0898 + +#define mmMME3_CTRL_SHADOW_1_DESC_META_DATA_AGU_O_REMOTE 0x1E089C + +#define mmMME3_CTRL_SHADOW_1_DESC_PCU_RL_SATURATION 0x1E08A0 + +#define mmMME3_CTRL_SHADOW_1_DESC_DUMMY 0x1E08A4 + +#define mmMME3_CTRL_SHADOW_2_STATUS 0x1E0900 + +#define mmMME3_CTRL_SHADOW_2_BASE_ADDR_HIGH_S 0x1E0908 + +#define mmMME3_CTRL_SHADOW_2_BASE_ADDR_HIGH_L 0x1E090C + +#define mmMME3_CTRL_SHADOW_2_BASE_ADDR_HIGH_O 0x1E0910 + +#define mmMME3_CTRL_SHADOW_2_BASE_ADDR_LOW_S 0x1E0914 + +#define mmMME3_CTRL_SHADOW_2_BASE_ADDR_LOW_L 0x1E0918 + +#define mmMME3_CTRL_SHADOW_2_BASE_ADDR_LOW_O 0x1E091C + +#define mmMME3_CTRL_SHADOW_2_HEADER_LOW 0x1E0920 + +#define mmMME3_CTRL_SHADOW_2_HEADER_HIGH 0x1E0924 + +#define mmMME3_CTRL_SHADOW_2_CONV_KERNEL_SIZE_MINUS_1 0x1E0928 + +#define mmMME3_CTRL_SHADOW_2_CONV_ASSOCIATED_DIMS_LOW 0x1E092C + +#define mmMME3_CTRL_SHADOW_2_CONV_ASSOCIATED_DIMS_HIGH 0x1E0930 + +#define mmMME3_CTRL_SHADOW_2_NUM_ITERATIONS_MINUS_1 0x1E0934 + +#define mmMME3_CTRL_SHADOW_2_OUTER_LOOP 0x1E0938 + +#define mmMME3_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_0 0x1E093C + +#define mmMME3_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_1 0x1E0940 + +#define mmMME3_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_2 0x1E0944 + +#define mmMME3_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_3 0x1E0948 + +#define mmMME3_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_4 0x1E094C + +#define mmMME3_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_0 0x1E0950 + +#define mmMME3_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_1 0x1E0954 + +#define mmMME3_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_2 0x1E0958 + +#define mmMME3_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_3 0x1E095C + +#define mmMME3_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_4 0x1E0960 + +#define mmMME3_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_0 0x1E0964 + +#define mmMME3_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_1 0x1E0968 + +#define mmMME3_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_2 0x1E096C + +#define mmMME3_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_3 0x1E0970 + +#define mmMME3_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_0 0x1E0974 + +#define mmMME3_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_1 0x1E0978 + +#define mmMME3_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_2 0x1E097C + +#define mmMME3_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_3 0x1E0980 + +#define mmMME3_CTRL_SHADOW_2_TENSOR_S_SPATIAL_SIZE_MINUS_1 0x1E0984 + +#define mmMME3_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_0 0x1E0988 + +#define mmMME3_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_1 0x1E098C + +#define mmMME3_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_2 0x1E0990 + +#define mmMME3_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_3 0x1E0994 + +#define mmMME3_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_4 0x1E0998 + +#define mmMME3_CTRL_SHADOW_2_AGU_S_START_OFFSET_0 0x1E099C + +#define mmMME3_CTRL_SHADOW_2_AGU_S_START_OFFSET_1 0x1E09A0 + +#define mmMME3_CTRL_SHADOW_2_AGU_S_START_OFFSET_2 0x1E09A4 + +#define mmMME3_CTRL_SHADOW_2_AGU_S_START_OFFSET_3 0x1E09A8 + +#define mmMME3_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_0 0x1E09AC + +#define mmMME3_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_1 0x1E09B0 + +#define mmMME3_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_2 0x1E09B4 + +#define mmMME3_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_3 0x1E09B8 + +#define mmMME3_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_4 0x1E09BC + +#define mmMME3_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_0 0x1E09C0 + +#define mmMME3_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_1 0x1E09C4 + +#define mmMME3_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_2 0x1E09C8 + +#define mmMME3_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_3 0x1E09CC + +#define mmMME3_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_4 0x1E09D0 + +#define mmMME3_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_0 0x1E09D4 + +#define mmMME3_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_1 0x1E09D8 + +#define mmMME3_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_2 0x1E09DC + +#define mmMME3_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_3 0x1E09E0 + +#define mmMME3_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_0 0x1E09E4 + +#define mmMME3_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_1 0x1E09E8 + +#define mmMME3_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_2 0x1E09EC + +#define mmMME3_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_3 0x1E09F0 + +#define mmMME3_CTRL_SHADOW_2_TENSOR_L_SPATIAL_SIZE_MINUS_1 0x1E09F4 + +#define mmMME3_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0x1E09F8 + +#define mmMME3_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0x1E09FC + +#define mmMME3_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0x1E0A00 + +#define mmMME3_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0x1E0A04 + +#define mmMME3_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0x1E0A08 + +#define mmMME3_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_0 0x1E0A0C + +#define mmMME3_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_1 0x1E0A10 + +#define mmMME3_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_2 0x1E0A14 + +#define mmMME3_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_3 0x1E0A18 + +#define mmMME3_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0x1E0A1C + +#define mmMME3_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0x1E0A20 + +#define mmMME3_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0x1E0A24 + +#define mmMME3_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0x1E0A28 + +#define mmMME3_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0x1E0A2C + +#define mmMME3_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_0 0x1E0A30 + +#define mmMME3_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_1 0x1E0A34 + +#define mmMME3_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_2 0x1E0A38 + +#define mmMME3_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_3 0x1E0A3C + +#define mmMME3_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_0 0x1E0A40 + +#define mmMME3_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_1 0x1E0A44 + +#define mmMME3_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_2 0x1E0A48 + +#define mmMME3_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_3 0x1E0A4C + +#define mmMME3_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_4 0x1E0A50 + +#define mmMME3_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_0 0x1E0A54 + +#define mmMME3_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_1 0x1E0A58 + +#define mmMME3_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_2 0x1E0A5C + +#define mmMME3_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_3 0x1E0A60 + +#define mmMME3_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_4 0x1E0A64 + +#define mmMME3_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_0 0x1E0A68 + +#define mmMME3_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_1 0x1E0A6C + +#define mmMME3_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_2 0x1E0A70 + +#define mmMME3_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_3 0x1E0A74 + +#define mmMME3_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_0 0x1E0A78 + +#define mmMME3_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_1 0x1E0A7C + +#define mmMME3_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_2 0x1E0A80 + +#define mmMME3_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_3 0x1E0A84 + +#define mmMME3_CTRL_SHADOW_2_TENSOR_O_SPATIAL_SIZE_MINUS_1 0x1E0A88 + +#define mmMME3_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0x1E0A8C + +#define mmMME3_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0x1E0A90 + +#define mmMME3_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0x1E0A94 + +#define mmMME3_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0x1E0A98 + +#define mmMME3_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0x1E0A9C + +#define mmMME3_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_0 0x1E0AA0 + +#define mmMME3_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_1 0x1E0AA4 + +#define mmMME3_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_2 0x1E0AA8 + +#define mmMME3_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_3 0x1E0AAC + +#define mmMME3_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0x1E0AB0 + +#define mmMME3_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0x1E0AB4 + +#define mmMME3_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0x1E0AB8 + +#define mmMME3_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0x1E0ABC + +#define mmMME3_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0x1E0AC0 + +#define mmMME3_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_0 0x1E0AC4 + +#define mmMME3_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_1 0x1E0AC8 + +#define mmMME3_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_2 0x1E0ACC + +#define mmMME3_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_3 0x1E0AD0 + +#define mmMME3_CTRL_SHADOW_2_DESC_SB_REPEAT 0x1E0AD4 + +#define mmMME3_CTRL_SHADOW_2_DESC_RATE_LIMITER 0x1E0AD8 + +#define mmMME3_CTRL_SHADOW_2_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0x1E0ADC + +#define mmMME3_CTRL_SHADOW_2_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0x1E0AE0 + +#define mmMME3_CTRL_SHADOW_2_DESC_SYNC_OBJECT_ADDR_HIGH 0x1E0AE4 + +#define mmMME3_CTRL_SHADOW_2_DESC_SYNC_OBJECT_DATA 0x1E0AE8 + +#define mmMME3_CTRL_SHADOW_2_DESC_AXI_USER_DATA 0x1E0AEC + +#define mmMME3_CTRL_SHADOW_2_DESC_PERF_EVT_S 0x1E0AF0 + +#define mmMME3_CTRL_SHADOW_2_DESC_PERF_EVT_L_LOCAL 0x1E0AF4 + +#define mmMME3_CTRL_SHADOW_2_DESC_PERF_EVT_L_REMOTE 0x1E0AF8 + +#define mmMME3_CTRL_SHADOW_2_DESC_PERF_EVT_O_LOCAL 0x1E0AFC + +#define mmMME3_CTRL_SHADOW_2_DESC_PERF_EVT_O_REMOTE 0x1E0B00 + +#define mmMME3_CTRL_SHADOW_2_DESC_PADDING_VALUE_S 0x1E0B04 + +#define mmMME3_CTRL_SHADOW_2_DESC_PADDING_VALUE_L 0x1E0B08 + +#define mmMME3_CTRL_SHADOW_2_DESC_META_DATA_AGU_S 0x1E0B0C + +#define mmMME3_CTRL_SHADOW_2_DESC_META_DATA_AGU_L_LOCAL 0x1E0B10 + +#define mmMME3_CTRL_SHADOW_2_DESC_META_DATA_AGU_L_REMOTE 0x1E0B14 + +#define mmMME3_CTRL_SHADOW_2_DESC_META_DATA_AGU_O_LOCAL 0x1E0B18 + +#define mmMME3_CTRL_SHADOW_2_DESC_META_DATA_AGU_O_REMOTE 0x1E0B1C + +#define mmMME3_CTRL_SHADOW_2_DESC_PCU_RL_SATURATION 0x1E0B20 + +#define mmMME3_CTRL_SHADOW_2_DESC_DUMMY 0x1E0B24 + +#define mmMME3_CTRL_SHADOW_3_STATUS 0x1E0B80 + +#define mmMME3_CTRL_SHADOW_3_BASE_ADDR_HIGH_S 0x1E0B88 + +#define mmMME3_CTRL_SHADOW_3_BASE_ADDR_HIGH_L 0x1E0B8C + +#define mmMME3_CTRL_SHADOW_3_BASE_ADDR_HIGH_O 0x1E0B90 + +#define mmMME3_CTRL_SHADOW_3_BASE_ADDR_LOW_S 0x1E0B94 + +#define mmMME3_CTRL_SHADOW_3_BASE_ADDR_LOW_L 0x1E0B98 + +#define mmMME3_CTRL_SHADOW_3_BASE_ADDR_LOW_O 0x1E0B9C + +#define mmMME3_CTRL_SHADOW_3_HEADER_LOW 0x1E0BA0 + +#define mmMME3_CTRL_SHADOW_3_HEADER_HIGH 0x1E0BA4 + +#define mmMME3_CTRL_SHADOW_3_CONV_KERNEL_SIZE_MINUS_1 0x1E0BA8 + +#define mmMME3_CTRL_SHADOW_3_CONV_ASSOCIATED_DIMS_LOW 0x1E0BAC + +#define mmMME3_CTRL_SHADOW_3_CONV_ASSOCIATED_DIMS_HIGH 0x1E0BB0 + +#define mmMME3_CTRL_SHADOW_3_NUM_ITERATIONS_MINUS_1 0x1E0BB4 + +#define mmMME3_CTRL_SHADOW_3_OUTER_LOOP 0x1E0BB8 + +#define mmMME3_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_0 0x1E0BBC + +#define mmMME3_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_1 0x1E0BC0 + +#define mmMME3_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_2 0x1E0BC4 + +#define mmMME3_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_3 0x1E0BC8 + +#define mmMME3_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_4 0x1E0BCC + +#define mmMME3_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_0 0x1E0BD0 + +#define mmMME3_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_1 0x1E0BD4 + +#define mmMME3_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_2 0x1E0BD8 + +#define mmMME3_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_3 0x1E0BDC + +#define mmMME3_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_4 0x1E0BE0 + +#define mmMME3_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_0 0x1E0BE4 + +#define mmMME3_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_1 0x1E0BE8 + +#define mmMME3_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_2 0x1E0BEC + +#define mmMME3_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_3 0x1E0BF0 + +#define mmMME3_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_0 0x1E0BF4 + +#define mmMME3_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_1 0x1E0BF8 + +#define mmMME3_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_2 0x1E0BFC + +#define mmMME3_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_3 0x1E0C00 + +#define mmMME3_CTRL_SHADOW_3_TENSOR_S_SPATIAL_SIZE_MINUS_1 0x1E0C04 + +#define mmMME3_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_0 0x1E0C08 + +#define mmMME3_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_1 0x1E0C0C + +#define mmMME3_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_2 0x1E0C10 + +#define mmMME3_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_3 0x1E0C14 + +#define mmMME3_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_4 0x1E0C18 + +#define mmMME3_CTRL_SHADOW_3_AGU_S_START_OFFSET_0 0x1E0C1C + +#define mmMME3_CTRL_SHADOW_3_AGU_S_START_OFFSET_1 0x1E0C20 + +#define mmMME3_CTRL_SHADOW_3_AGU_S_START_OFFSET_2 0x1E0C24 + +#define mmMME3_CTRL_SHADOW_3_AGU_S_START_OFFSET_3 0x1E0C28 + +#define mmMME3_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_0 0x1E0C2C + +#define mmMME3_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_1 0x1E0C30 + +#define mmMME3_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_2 0x1E0C34 + +#define mmMME3_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_3 0x1E0C38 + +#define mmMME3_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_4 0x1E0C3C + +#define mmMME3_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_0 0x1E0C40 + +#define mmMME3_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_1 0x1E0C44 + +#define mmMME3_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_2 0x1E0C48 + +#define mmMME3_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_3 0x1E0C4C + +#define mmMME3_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_4 0x1E0C50 + +#define mmMME3_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_0 0x1E0C54 + +#define mmMME3_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_1 0x1E0C58 + +#define mmMME3_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_2 0x1E0C5C + +#define mmMME3_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_3 0x1E0C60 + +#define mmMME3_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_0 0x1E0C64 + +#define mmMME3_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_1 0x1E0C68 + +#define mmMME3_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_2 0x1E0C6C + +#define mmMME3_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_3 0x1E0C70 + +#define mmMME3_CTRL_SHADOW_3_TENSOR_L_SPATIAL_SIZE_MINUS_1 0x1E0C74 + +#define mmMME3_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0x1E0C78 + +#define mmMME3_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0x1E0C7C + +#define mmMME3_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0x1E0C80 + +#define mmMME3_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0x1E0C84 + +#define mmMME3_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0x1E0C88 + +#define mmMME3_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_0 0x1E0C8C + +#define mmMME3_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_1 0x1E0C90 + +#define mmMME3_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_2 0x1E0C94 + +#define mmMME3_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_3 0x1E0C98 + +#define mmMME3_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0x1E0C9C + +#define mmMME3_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0x1E0CA0 + +#define mmMME3_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0x1E0CA4 + +#define mmMME3_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0x1E0CA8 + +#define mmMME3_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0x1E0CAC + +#define mmMME3_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_0 0x1E0CB0 + +#define mmMME3_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_1 0x1E0CB4 + +#define mmMME3_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_2 0x1E0CB8 + +#define mmMME3_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_3 0x1E0CBC + +#define mmMME3_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_0 0x1E0CC0 + +#define mmMME3_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_1 0x1E0CC4 + +#define mmMME3_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_2 0x1E0CC8 + +#define mmMME3_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_3 0x1E0CCC + +#define mmMME3_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_4 0x1E0CD0 + +#define mmMME3_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_0 0x1E0CD4 + +#define mmMME3_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_1 0x1E0CD8 + +#define mmMME3_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_2 0x1E0CDC + +#define mmMME3_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_3 0x1E0CE0 + +#define mmMME3_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_4 0x1E0CE4 + +#define mmMME3_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_0 0x1E0CE8 + +#define mmMME3_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_1 0x1E0CEC + +#define mmMME3_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_2 0x1E0CF0 + +#define mmMME3_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_3 0x1E0CF4 + +#define mmMME3_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_0 0x1E0CF8 + +#define mmMME3_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_1 0x1E0CFC + +#define mmMME3_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_2 0x1E0D00 + +#define mmMME3_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_3 0x1E0D04 + +#define mmMME3_CTRL_SHADOW_3_TENSOR_O_SPATIAL_SIZE_MINUS_1 0x1E0D08 + +#define mmMME3_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0x1E0D0C + +#define mmMME3_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0x1E0D10 + +#define mmMME3_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0x1E0D14 + +#define mmMME3_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0x1E0D18 + +#define mmMME3_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0x1E0D1C + +#define mmMME3_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_0 0x1E0D20 + +#define mmMME3_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_1 0x1E0D24 + +#define mmMME3_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_2 0x1E0D28 + +#define mmMME3_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_3 0x1E0D2C + +#define mmMME3_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0x1E0D30 + +#define mmMME3_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0x1E0D34 + +#define mmMME3_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0x1E0D38 + +#define mmMME3_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0x1E0D3C + +#define mmMME3_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0x1E0D40 + +#define mmMME3_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_0 0x1E0D44 + +#define mmMME3_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_1 0x1E0D48 + +#define mmMME3_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_2 0x1E0D4C + +#define mmMME3_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_3 0x1E0D50 + +#define mmMME3_CTRL_SHADOW_3_DESC_SB_REPEAT 0x1E0D54 + +#define mmMME3_CTRL_SHADOW_3_DESC_RATE_LIMITER 0x1E0D58 + +#define mmMME3_CTRL_SHADOW_3_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0x1E0D5C + +#define mmMME3_CTRL_SHADOW_3_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0x1E0D60 + +#define mmMME3_CTRL_SHADOW_3_DESC_SYNC_OBJECT_ADDR_HIGH 0x1E0D64 + +#define mmMME3_CTRL_SHADOW_3_DESC_SYNC_OBJECT_DATA 0x1E0D68 + +#define mmMME3_CTRL_SHADOW_3_DESC_AXI_USER_DATA 0x1E0D6C + +#define mmMME3_CTRL_SHADOW_3_DESC_PERF_EVT_S 0x1E0D70 + +#define mmMME3_CTRL_SHADOW_3_DESC_PERF_EVT_L_LOCAL 0x1E0D74 + +#define mmMME3_CTRL_SHADOW_3_DESC_PERF_EVT_L_REMOTE 0x1E0D78 + +#define mmMME3_CTRL_SHADOW_3_DESC_PERF_EVT_O_LOCAL 0x1E0D7C + +#define mmMME3_CTRL_SHADOW_3_DESC_PERF_EVT_O_REMOTE 0x1E0D80 + +#define mmMME3_CTRL_SHADOW_3_DESC_PADDING_VALUE_S 0x1E0D84 + +#define mmMME3_CTRL_SHADOW_3_DESC_PADDING_VALUE_L 0x1E0D88 + +#define mmMME3_CTRL_SHADOW_3_DESC_META_DATA_AGU_S 0x1E0D8C + +#define mmMME3_CTRL_SHADOW_3_DESC_META_DATA_AGU_L_LOCAL 0x1E0D90 + +#define mmMME3_CTRL_SHADOW_3_DESC_META_DATA_AGU_L_REMOTE 0x1E0D94 + +#define mmMME3_CTRL_SHADOW_3_DESC_META_DATA_AGU_O_LOCAL 0x1E0D98 + +#define mmMME3_CTRL_SHADOW_3_DESC_META_DATA_AGU_O_REMOTE 0x1E0D9C + +#define mmMME3_CTRL_SHADOW_3_DESC_PCU_RL_SATURATION 0x1E0DA0 + +#define mmMME3_CTRL_SHADOW_3_DESC_DUMMY 0x1E0DA4 + +#endif /* ASIC_REG_MME3_CTRL_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mmu_up_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/mmu_up_regs.h new file mode 100644 index 000000000..61465b599 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/mmu_up_regs.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_MMU_UP_REGS_H_ +#define ASIC_REG_MMU_UP_REGS_H_ + +/* + ***************************************** + * MMU_UP (Prototype: MMU) + ***************************************** + */ + +#define mmMMU_UP_MMU_ENABLE 0xC1100C + +#define mmMMU_UP_FORCE_ORDERING 0xC11010 + +#define mmMMU_UP_FEATURE_ENABLE 0xC11014 + +#define mmMMU_UP_VA_ORDERING_MASK_31_7 0xC11018 + +#define mmMMU_UP_VA_ORDERING_MASK_49_32 0xC1101C + +#define mmMMU_UP_LOG2_DDR_SIZE 0xC11020 + +#define mmMMU_UP_SCRAMBLER 0xC11024 + +#define mmMMU_UP_MEM_INIT_BUSY 0xC11028 + +#define mmMMU_UP_SPI_MASK 0xC1102C + +#define mmMMU_UP_SPI_CAUSE 0xC11030 + +#define mmMMU_UP_PAGE_ERROR_CAPTURE 0xC11034 + +#define mmMMU_UP_PAGE_ERROR_CAPTURE_VA 0xC11038 + +#define mmMMU_UP_ACCESS_ERROR_CAPTURE 0xC1103C + +#define mmMMU_UP_ACCESS_ERROR_CAPTURE_VA 0xC11040 + +#define mmMMU_UP_SPI_INTERRUPT_CLR 0xC11044 + +#define mmMMU_UP_SPI_INTERRUPT_MASK 0xC11048 + +#define mmMMU_UP_DBG_MEM_WRAP_RM 0xC1104C + +#define mmMMU_UP_SPI_CAUSE_CLR 0xC11050 + +#define mmMMU_UP_SLICE_CREDIT 0xC11054 + +#define mmMMU_UP_PIPE_CREDIT 0xC11058 + +#define mmMMU_UP_RAZWI_WRITE_VLD 0xC1105C + +#define mmMMU_UP_RAZWI_WRITE_ID 0xC11060 + +#define mmMMU_UP_RAZWI_READ_VLD 0xC11064 + +#define mmMMU_UP_RAZWI_READ_ID 0xC11068 + +#define mmMMU_UP_MMU_BYPASS 0xC1106C + +#endif /* ASIC_REG_MMU_UP_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_0_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_0_regs.h new file mode 100644 index 000000000..2efa2a54d --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_0_regs.h @@ -0,0 +1,896 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_NIF_RTR_CTRL_0_REGS_H_ +#define ASIC_REG_NIF_RTR_CTRL_0_REGS_H_ + +/* + ***************************************** + * NIF_RTR_CTRL_0 (Prototype: RTR_CTRL) + ***************************************** + */ + +#define mmNIF_RTR_CTRL_0_PERM_SEL 0x386108 + +#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_0 0x386114 + +#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_1 0x386118 + +#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_2 0x38611C + +#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_3 0x386120 + +#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_4 0x386124 + +#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_5 0x386128 + +#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_6 0x38612C + +#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_7 0x386130 + +#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_8 0x386134 + +#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_9 0x386138 + +#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_10 0x38613C + +#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_11 0x386140 + +#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_12 0x386144 + +#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_13 0x386148 + +#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_14 0x38614C + +#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_15 0x386150 + +#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_16 0x386154 + +#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_17 0x386158 + +#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_18 0x38615C + +#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_19 0x386160 + +#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_20 0x386164 + +#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_21 0x386168 + +#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_22 0x38616C + +#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_23 0x386170 + +#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_24 0x386174 + +#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_25 0x386178 + +#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_26 0x38617C + +#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_27 0x386180 + +#define mmNIF_RTR_CTRL_0_SRAM_POLY_H3_0 0x386184 + +#define mmNIF_RTR_CTRL_0_SRAM_POLY_H3_1 0x386188 + +#define mmNIF_RTR_CTRL_0_SRAM_POLY_H3_2 0x38618C + +#define mmNIF_RTR_CTRL_0_SRAM_POLY_H3_3 0x386190 + +#define mmNIF_RTR_CTRL_0_SRAM_POLY_H3_4 0x386194 + +#define mmNIF_RTR_CTRL_0_SRAM_POLY_H3_5 0x386198 + +#define mmNIF_RTR_CTRL_0_SRAM_POLY_H3_6 0x38619C + +#define mmNIF_RTR_CTRL_0_SRAM_POLY_H3_7 0x3861A0 + +#define mmNIF_RTR_CTRL_0_SRAM_POLY_H3_8 0x3861A4 + +#define mmNIF_RTR_CTRL_0_SRAM_POLY_H3_9 0x3861A8 + +#define mmNIF_RTR_CTRL_0_SRAM_POLY_H3_10 0x3861AC + +#define mmNIF_RTR_CTRL_0_SRAM_POLY_H3_11 0x3861B0 + +#define mmNIF_RTR_CTRL_0_SRAM_POLY_H3_12 0x3861B4 + +#define mmNIF_RTR_CTRL_0_SRAM_POLY_H3_13 0x3861B8 + +#define mmNIF_RTR_CTRL_0_SRAM_POLY_H3_14 0x3861BC + +#define mmNIF_RTR_CTRL_0_SCRAM_SRAM_EN 0x38626C + +#define mmNIF_RTR_CTRL_0_RL_HBM_EN 0x386274 + +#define mmNIF_RTR_CTRL_0_RL_HBM_SAT 0x386278 + +#define mmNIF_RTR_CTRL_0_RL_HBM_RST 0x38627C + +#define mmNIF_RTR_CTRL_0_RL_HBM_TIMEOUT 0x386280 + +#define mmNIF_RTR_CTRL_0_SCRAM_HBM_EN 0x386284 + +#define mmNIF_RTR_CTRL_0_RL_PCI_EN 0x386288 + +#define mmNIF_RTR_CTRL_0_RL_PCI_SAT 0x38628C + +#define mmNIF_RTR_CTRL_0_RL_PCI_RST 0x386290 + +#define mmNIF_RTR_CTRL_0_RL_PCI_TIMEOUT 0x386294 + +#define mmNIF_RTR_CTRL_0_RL_SRAM_EN 0x38629C + +#define mmNIF_RTR_CTRL_0_RL_SRAM_SAT 0x3862A0 + +#define mmNIF_RTR_CTRL_0_RL_SRAM_RST 0x3862A4 + +#define mmNIF_RTR_CTRL_0_RL_SRAM_TIMEOUT 0x3862AC + +#define mmNIF_RTR_CTRL_0_RL_SRAM_RED 0x3862B4 + +#define mmNIF_RTR_CTRL_0_E2E_HBM_EN 0x3862EC + +#define mmNIF_RTR_CTRL_0_E2E_PCI_EN 0x3862F0 + +#define mmNIF_RTR_CTRL_0_E2E_HBM_WR_SIZE 0x3862F4 + +#define mmNIF_RTR_CTRL_0_E2E_PCI_WR_SIZE 0x3862F8 + +#define mmNIF_RTR_CTRL_0_E2E_AW_PCI_CTR_SET_EN 0x386404 + +#define mmNIF_RTR_CTRL_0_E2E_AW_PCI_CTR_SET 0x386408 + +#define mmNIF_RTR_CTRL_0_E2E_AW_PCI_CTR_WRAP 0x38640C + +#define mmNIF_RTR_CTRL_0_E2E_AW_PCI_CTR_CNT 0x386410 + +#define mmNIF_RTR_CTRL_0_E2E_AW_HBM_CTR_SET_EN 0x386414 + +#define mmNIF_RTR_CTRL_0_E2E_AW_HBM_CTR_SET 0x386418 + +#define mmNIF_RTR_CTRL_0_E2E_HBM_RD_SIZE 0x38641C + +#define mmNIF_RTR_CTRL_0_E2E_PCI_RD_SIZE 0x386420 + +#define mmNIF_RTR_CTRL_0_E2E_AR_PCI_CTR_SET_EN 0x386424 + +#define mmNIF_RTR_CTRL_0_E2E_AR_PCI_CTR_SET 0x386428 + +#define mmNIF_RTR_CTRL_0_E2E_AR_PCI_CTR_WRAP 0x38642C + +#define mmNIF_RTR_CTRL_0_E2E_AR_PCI_CTR_CNT 0x386430 + +#define mmNIF_RTR_CTRL_0_E2E_AR_HBM_CTR_SET_EN 0x386434 + +#define mmNIF_RTR_CTRL_0_E2E_AR_HBM_CTR_SET 0x386438 + +#define mmNIF_RTR_CTRL_0_NL_HBM_SEL_0 0x386450 + +#define mmNIF_RTR_CTRL_0_NL_HBM_SEL_1 0x386454 + +#define mmNIF_RTR_CTRL_0_NON_LIN_EN 0x386480 + +#define mmNIF_RTR_CTRL_0_NL_SRAM_BANK_0 0x386500 + +#define mmNIF_RTR_CTRL_0_NL_SRAM_BANK_1 0x386504 + +#define mmNIF_RTR_CTRL_0_NL_SRAM_BANK_2 0x386508 + +#define mmNIF_RTR_CTRL_0_NL_SRAM_BANK_3 0x38650C + +#define mmNIF_RTR_CTRL_0_NL_SRAM_BANK_4 0x386510 + +#define mmNIF_RTR_CTRL_0_NL_SRAM_OFFSET_0 0x386514 + +#define mmNIF_RTR_CTRL_0_NL_SRAM_OFFSET_1 0x386520 + +#define mmNIF_RTR_CTRL_0_NL_SRAM_OFFSET_2 0x386524 + +#define mmNIF_RTR_CTRL_0_NL_SRAM_OFFSET_3 0x386528 + +#define mmNIF_RTR_CTRL_0_NL_SRAM_OFFSET_4 0x38652C + +#define mmNIF_RTR_CTRL_0_NL_SRAM_OFFSET_5 0x386530 + +#define mmNIF_RTR_CTRL_0_NL_SRAM_OFFSET_6 0x386534 + +#define mmNIF_RTR_CTRL_0_NL_SRAM_OFFSET_7 0x386538 + +#define mmNIF_RTR_CTRL_0_NL_SRAM_OFFSET_8 0x38653C + +#define mmNIF_RTR_CTRL_0_NL_SRAM_OFFSET_9 0x386540 + +#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_0 0x386550 + +#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_1 0x386554 + +#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_2 0x386558 + +#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_3 0x38655C + +#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_4 0x386560 + +#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_5 0x386564 + +#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_6 0x386568 + +#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_7 0x38656C + +#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_8 0x386570 + +#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_9 0x386574 + +#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_10 0x386578 + +#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_11 0x38657C + +#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_12 0x386580 + +#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_13 0x386584 + +#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_14 0x386588 + +#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_15 0x38658C + +#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_16 0x386590 + +#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_17 0x386594 + +#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_18 0x386598 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_0 0x3865E4 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_1 0x3865E8 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_2 0x3865EC + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_3 0x3865F0 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_4 0x3865F4 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_5 0x3865F8 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_6 0x3865FC + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_7 0x386600 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_8 0x386604 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_9 0x386608 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_10 0x38660C + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_11 0x386610 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_12 0x386614 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_13 0x386618 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_14 0x38661C + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_15 0x386620 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_0 0x386624 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_1 0x386628 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_2 0x38662C + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_3 0x386630 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_4 0x386634 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_5 0x386638 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_6 0x38663C + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_7 0x386640 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_8 0x386644 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_9 0x386648 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_10 0x38664C + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_11 0x386650 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_12 0x386654 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_13 0x386658 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_14 0x38665C + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_15 0x386660 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_0 0x386664 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_1 0x386668 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_2 0x38666C + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_3 0x386670 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_4 0x386674 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_5 0x386678 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_6 0x38667C + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_7 0x386680 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_8 0x386684 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_9 0x386688 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_10 0x38668C + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_11 0x386690 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_12 0x386694 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_13 0x386698 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_14 0x38669C + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_15 0x3866A0 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_0 0x3866A4 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_1 0x3866A8 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_2 0x3866AC + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_3 0x3866B0 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_4 0x3866B4 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_5 0x3866B8 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_6 0x3866BC + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_7 0x3866C0 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_8 0x3866C4 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_9 0x3866C8 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_10 0x3866CC + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_11 0x3866D0 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_12 0x3866D4 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_13 0x3866D8 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_14 0x3866DC + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_15 0x3866E0 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_0 0x3866E4 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_1 0x3866E8 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_2 0x3866EC + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_3 0x3866F0 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_4 0x3866F4 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_5 0x3866F8 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_6 0x3866FC + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_7 0x386700 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_8 0x386704 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_9 0x386708 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_10 0x38670C + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_11 0x386710 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_12 0x386714 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_13 0x386718 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_14 0x38671C + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_15 0x386720 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_0 0x386724 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_1 0x386728 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_2 0x38672C + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_3 0x386730 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_4 0x386734 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_5 0x386738 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_6 0x38673C + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_7 0x386740 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_8 0x386744 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_9 0x386748 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_10 0x38674C + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_11 0x386750 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_12 0x386754 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_13 0x386758 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_14 0x38675C + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_15 0x386760 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_0 0x386764 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_1 0x386768 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_2 0x38676C + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_3 0x386770 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_4 0x386774 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_5 0x386778 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_6 0x38677C + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_7 0x386780 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_8 0x386784 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_9 0x386788 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_10 0x38678C + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_11 0x386790 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_12 0x386794 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_13 0x386798 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_14 0x38679C + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_15 0x3867A0 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_0 0x3867A4 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_1 0x3867A8 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_2 0x3867AC + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_3 0x3867B0 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_4 0x3867B4 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_5 0x3867B8 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_6 0x3867BC + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_7 0x3867C0 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_8 0x3867C4 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_9 0x3867C8 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_10 0x3867CC + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_11 0x3867D0 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_12 0x3867D4 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_13 0x3867D8 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_14 0x3867DC + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_15 0x3867E0 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_0 0x386824 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_1 0x386828 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_2 0x38682C + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_3 0x386830 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_4 0x386834 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_5 0x386838 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_6 0x38683C + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_7 0x386840 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_8 0x386844 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_9 0x386848 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_10 0x38684C + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_11 0x386850 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_12 0x386854 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_13 0x386858 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_14 0x38685C + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_15 0x386860 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_0 0x386864 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_1 0x386868 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_2 0x38686C + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_3 0x386870 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_4 0x386874 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_5 0x386878 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_6 0x38687C + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_7 0x386880 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_8 0x386884 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_9 0x386888 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_10 0x38688C + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_11 0x386890 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_12 0x386894 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_13 0x386898 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_14 0x38689C + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_15 0x3868A0 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_0 0x3868A4 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_1 0x3868A8 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_2 0x3868AC + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_3 0x3868B0 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_4 0x3868B4 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_5 0x3868B8 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_6 0x3868BC + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_7 0x3868C0 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_8 0x3868C4 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_9 0x3868C8 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_10 0x3868CC + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_11 0x3868D0 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_12 0x3868D4 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_13 0x3868D8 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_14 0x3868DC + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_15 0x3868E0 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_0 0x3868E4 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_1 0x3868E8 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_2 0x3868EC + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_3 0x3868F0 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_4 0x3868F4 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_5 0x3868F8 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_6 0x3868FC + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_7 0x386900 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_8 0x386904 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_9 0x386908 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_10 0x38690C + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_11 0x386910 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_12 0x386914 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_13 0x386918 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_14 0x38691C + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_15 0x386920 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_0 0x386924 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_1 0x386928 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_2 0x38692C + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_3 0x386930 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_4 0x386934 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_5 0x386938 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_6 0x38693C + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_7 0x386940 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_8 0x386944 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_9 0x386948 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_10 0x38694C + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_11 0x386950 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_12 0x386954 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_13 0x386958 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_14 0x38695C + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_15 0x386960 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_0 0x386964 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_1 0x386968 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_2 0x38696C + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_3 0x386970 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_4 0x386974 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_5 0x386978 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_6 0x38697C + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_7 0x386980 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_8 0x386984 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_9 0x386988 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_10 0x38698C + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_11 0x386990 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_12 0x386994 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_13 0x386998 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_14 0x38699C + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_15 0x3869A0 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_0 0x3869A4 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_1 0x3869A8 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_2 0x3869AC + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_3 0x3869B0 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_4 0x3869B4 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_5 0x3869B8 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_6 0x3869BC + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_7 0x3869C0 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_8 0x3869C4 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_9 0x3869C8 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_10 0x3869CC + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_11 0x3869D0 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_12 0x3869D4 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_13 0x3869D8 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_14 0x3869DC + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_15 0x3869E0 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_0 0x3869E4 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_1 0x3869E8 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_2 0x3869EC + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_3 0x3869F0 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_4 0x3869F4 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_5 0x3869F8 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_6 0x3869FC + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_7 0x386A00 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_8 0x386A04 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_9 0x386A08 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_10 0x386A0C + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_11 0x386A10 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_12 0x386A14 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_13 0x386A18 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_14 0x386A1C + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_15 0x386A20 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_HIT_AW 0x386A64 + +#define mmNIF_RTR_CTRL_0_RANGE_SEC_HIT_AR 0x386A68 + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_HIT_AW 0x386A6C + +#define mmNIF_RTR_CTRL_0_RANGE_PRIV_HIT_AR 0x386A70 + +#define mmNIF_RTR_CTRL_0_RGL_CFG 0x386B64 + +#define mmNIF_RTR_CTRL_0_RGL_SHIFT 0x386B68 + +#define mmNIF_RTR_CTRL_0_RGL_EXPECTED_LAT_0 0x386B6C + +#define mmNIF_RTR_CTRL_0_RGL_EXPECTED_LAT_1 0x386B70 + +#define mmNIF_RTR_CTRL_0_RGL_EXPECTED_LAT_2 0x386B74 + +#define mmNIF_RTR_CTRL_0_RGL_EXPECTED_LAT_3 0x386B78 + +#define mmNIF_RTR_CTRL_0_RGL_EXPECTED_LAT_4 0x386B7C + +#define mmNIF_RTR_CTRL_0_RGL_EXPECTED_LAT_5 0x386B80 + +#define mmNIF_RTR_CTRL_0_RGL_EXPECTED_LAT_6 0x386B84 + +#define mmNIF_RTR_CTRL_0_RGL_EXPECTED_LAT_7 0x386B88 + +#define mmNIF_RTR_CTRL_0_RGL_TOKEN_0 0x386BAC + +#define mmNIF_RTR_CTRL_0_RGL_TOKEN_1 0x386BB0 + +#define mmNIF_RTR_CTRL_0_RGL_TOKEN_2 0x386BB4 + +#define mmNIF_RTR_CTRL_0_RGL_TOKEN_3 0x386BB8 + +#define mmNIF_RTR_CTRL_0_RGL_TOKEN_4 0x386BBC + +#define mmNIF_RTR_CTRL_0_RGL_TOKEN_5 0x386BC0 + +#define mmNIF_RTR_CTRL_0_RGL_TOKEN_6 0x386BC4 + +#define mmNIF_RTR_CTRL_0_RGL_TOKEN_7 0x386BC8 + +#define mmNIF_RTR_CTRL_0_RGL_BANK_ID_0 0x386BEC + +#define mmNIF_RTR_CTRL_0_RGL_BANK_ID_1 0x386BF0 + +#define mmNIF_RTR_CTRL_0_RGL_BANK_ID_2 0x386BF4 + +#define mmNIF_RTR_CTRL_0_RGL_BANK_ID_3 0x386BF8 + +#define mmNIF_RTR_CTRL_0_RGL_BANK_ID_4 0x386BFC + +#define mmNIF_RTR_CTRL_0_RGL_BANK_ID_5 0x386C00 + +#define mmNIF_RTR_CTRL_0_RGL_BANK_ID_6 0x386C04 + +#define mmNIF_RTR_CTRL_0_RGL_BANK_ID_7 0x386C08 + +#define mmNIF_RTR_CTRL_0_RGL_WDT 0x386C2C + +#define mmNIF_RTR_CTRL_0_E2E_AR_HBM0_CH0_CTR_WRAP 0x386C30 + +#define mmNIF_RTR_CTRL_0_E2E_AR_HBM0_CH1_CTR_WRAP 0x386C34 + +#define mmNIF_RTR_CTRL_0_E2E_AR_HBM1_CH0_CTR_WRAP 0x386C38 + +#define mmNIF_RTR_CTRL_0_E2E_AR_HBM1_CH1_CTR_WRAP 0x386C3C + +#define mmNIF_RTR_CTRL_0_E2E_AR_HBM2_CH0_CTR_WRAP 0x386C40 + +#define mmNIF_RTR_CTRL_0_E2E_AR_HBM2_CH1_CTR_WRAP 0x386C44 + +#define mmNIF_RTR_CTRL_0_E2E_AR_HBM3_CH0_CTR_WRAP 0x386C48 + +#define mmNIF_RTR_CTRL_0_E2E_AR_HBM3_CH1_CTR_WRAP 0x386C4C + +#define mmNIF_RTR_CTRL_0_E2E_AR_HBM0_CH0_CTR_CNT 0x386C50 + +#define mmNIF_RTR_CTRL_0_E2E_AR_HBM0_CH1_CTR_CNT 0x386C54 + +#define mmNIF_RTR_CTRL_0_E2E_AR_HBM1_CH0_CTR_CNT 0x386C58 + +#define mmNIF_RTR_CTRL_0_E2E_AR_HBM1_CH1_CTR_CNT 0x386C5C + +#define mmNIF_RTR_CTRL_0_E2E_AR_HBM2_CH0_CTR_CNT 0x386C60 + +#define mmNIF_RTR_CTRL_0_E2E_AR_HBM2_CH1_CTR_CNT 0x386C64 + +#define mmNIF_RTR_CTRL_0_E2E_AR_HBM3_CH0_CTR_CNT 0x386C68 + +#define mmNIF_RTR_CTRL_0_E2E_AR_HBM3_CH1_CTR_CNT 0x386C6C + +#define mmNIF_RTR_CTRL_0_E2E_AW_HBM0_CH0_CTR_WRAP 0x386C70 + +#define mmNIF_RTR_CTRL_0_E2E_AW_HBM0_CH1_CTR_WRAP 0x386C74 + +#define mmNIF_RTR_CTRL_0_E2E_AW_HBM1_CH0_CTR_WRAP 0x386C78 + +#define mmNIF_RTR_CTRL_0_E2E_AW_HBM1_CH1_CTR_WRAP 0x386C7C + +#define mmNIF_RTR_CTRL_0_E2E_AW_HBM2_CH0_CTR_WRAP 0x386C80 + +#define mmNIF_RTR_CTRL_0_E2E_AW_HBM2_CH1_CTR_WRAP 0x386C84 + +#define mmNIF_RTR_CTRL_0_E2E_AW_HBM3_CH0_CTR_WRAP 0x386C88 + +#define mmNIF_RTR_CTRL_0_E2E_AW_HBM3_CH1_CTR_WRAP 0x386C8C + +#define mmNIF_RTR_CTRL_0_E2E_AW_HBM0_CH0_CTR_CNT 0x386C90 + +#define mmNIF_RTR_CTRL_0_E2E_AW_HBM0_CH1_CTR_CNT 0x386C94 + +#define mmNIF_RTR_CTRL_0_E2E_AW_HBM1_CH0_CTR_CNT 0x386C98 + +#define mmNIF_RTR_CTRL_0_E2E_AW_HBM1_CH1_CTR_CNT 0x386C9C + +#define mmNIF_RTR_CTRL_0_E2E_AW_HBM2_CH0_CTR_CNT 0x386CA0 + +#define mmNIF_RTR_CTRL_0_E2E_AW_HBM2_CH1_CTR_CNT 0x386CA4 + +#define mmNIF_RTR_CTRL_0_E2E_AW_HBM3_CH0_CTR_CNT 0x386CA8 + +#define mmNIF_RTR_CTRL_0_E2E_AW_HBM3_CH1_CTR_CNT 0x386CAC + +#define mmNIF_RTR_CTRL_0_NL_HBM_PC_SEL_0 0x386CB0 + +#define mmNIF_RTR_CTRL_0_NL_HBM_PC_SEL_1 0x386CB4 + +#define mmNIF_RTR_CTRL_0_NL_HBM_PC_SEL_2 0x386CB8 + +#define mmNIF_RTR_CTRL_0_NL_HBM_PC_SEL_3 0x386CBC + +#endif /* ASIC_REG_NIF_RTR_CTRL_0_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_1_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_1_regs.h new file mode 100644 index 000000000..a6047d4e2 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_1_regs.h @@ -0,0 +1,896 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_NIF_RTR_CTRL_1_REGS_H_ +#define ASIC_REG_NIF_RTR_CTRL_1_REGS_H_ + +/* + ***************************************** + * NIF_RTR_CTRL_1 (Prototype: RTR_CTRL) + ***************************************** + */ + +#define mmNIF_RTR_CTRL_1_PERM_SEL 0x396108 + +#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_0 0x396114 + +#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_1 0x396118 + +#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_2 0x39611C + +#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_3 0x396120 + +#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_4 0x396124 + +#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_5 0x396128 + +#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_6 0x39612C + +#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_7 0x396130 + +#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_8 0x396134 + +#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_9 0x396138 + +#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_10 0x39613C + +#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_11 0x396140 + +#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_12 0x396144 + +#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_13 0x396148 + +#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_14 0x39614C + +#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_15 0x396150 + +#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_16 0x396154 + +#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_17 0x396158 + +#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_18 0x39615C + +#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_19 0x396160 + +#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_20 0x396164 + +#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_21 0x396168 + +#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_22 0x39616C + +#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_23 0x396170 + +#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_24 0x396174 + +#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_25 0x396178 + +#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_26 0x39617C + +#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_27 0x396180 + +#define mmNIF_RTR_CTRL_1_SRAM_POLY_H3_0 0x396184 + +#define mmNIF_RTR_CTRL_1_SRAM_POLY_H3_1 0x396188 + +#define mmNIF_RTR_CTRL_1_SRAM_POLY_H3_2 0x39618C + +#define mmNIF_RTR_CTRL_1_SRAM_POLY_H3_3 0x396190 + +#define mmNIF_RTR_CTRL_1_SRAM_POLY_H3_4 0x396194 + +#define mmNIF_RTR_CTRL_1_SRAM_POLY_H3_5 0x396198 + +#define mmNIF_RTR_CTRL_1_SRAM_POLY_H3_6 0x39619C + +#define mmNIF_RTR_CTRL_1_SRAM_POLY_H3_7 0x3961A0 + +#define mmNIF_RTR_CTRL_1_SRAM_POLY_H3_8 0x3961A4 + +#define mmNIF_RTR_CTRL_1_SRAM_POLY_H3_9 0x3961A8 + +#define mmNIF_RTR_CTRL_1_SRAM_POLY_H3_10 0x3961AC + +#define mmNIF_RTR_CTRL_1_SRAM_POLY_H3_11 0x3961B0 + +#define mmNIF_RTR_CTRL_1_SRAM_POLY_H3_12 0x3961B4 + +#define mmNIF_RTR_CTRL_1_SRAM_POLY_H3_13 0x3961B8 + +#define mmNIF_RTR_CTRL_1_SRAM_POLY_H3_14 0x3961BC + +#define mmNIF_RTR_CTRL_1_SCRAM_SRAM_EN 0x39626C + +#define mmNIF_RTR_CTRL_1_RL_HBM_EN 0x396274 + +#define mmNIF_RTR_CTRL_1_RL_HBM_SAT 0x396278 + +#define mmNIF_RTR_CTRL_1_RL_HBM_RST 0x39627C + +#define mmNIF_RTR_CTRL_1_RL_HBM_TIMEOUT 0x396280 + +#define mmNIF_RTR_CTRL_1_SCRAM_HBM_EN 0x396284 + +#define mmNIF_RTR_CTRL_1_RL_PCI_EN 0x396288 + +#define mmNIF_RTR_CTRL_1_RL_PCI_SAT 0x39628C + +#define mmNIF_RTR_CTRL_1_RL_PCI_RST 0x396290 + +#define mmNIF_RTR_CTRL_1_RL_PCI_TIMEOUT 0x396294 + +#define mmNIF_RTR_CTRL_1_RL_SRAM_EN 0x39629C + +#define mmNIF_RTR_CTRL_1_RL_SRAM_SAT 0x3962A0 + +#define mmNIF_RTR_CTRL_1_RL_SRAM_RST 0x3962A4 + +#define mmNIF_RTR_CTRL_1_RL_SRAM_TIMEOUT 0x3962AC + +#define mmNIF_RTR_CTRL_1_RL_SRAM_RED 0x3962B4 + +#define mmNIF_RTR_CTRL_1_E2E_HBM_EN 0x3962EC + +#define mmNIF_RTR_CTRL_1_E2E_PCI_EN 0x3962F0 + +#define mmNIF_RTR_CTRL_1_E2E_HBM_WR_SIZE 0x3962F4 + +#define mmNIF_RTR_CTRL_1_E2E_PCI_WR_SIZE 0x3962F8 + +#define mmNIF_RTR_CTRL_1_E2E_AW_PCI_CTR_SET_EN 0x396404 + +#define mmNIF_RTR_CTRL_1_E2E_AW_PCI_CTR_SET 0x396408 + +#define mmNIF_RTR_CTRL_1_E2E_AW_PCI_CTR_WRAP 0x39640C + +#define mmNIF_RTR_CTRL_1_E2E_AW_PCI_CTR_CNT 0x396410 + +#define mmNIF_RTR_CTRL_1_E2E_AW_HBM_CTR_SET_EN 0x396414 + +#define mmNIF_RTR_CTRL_1_E2E_AW_HBM_CTR_SET 0x396418 + +#define mmNIF_RTR_CTRL_1_E2E_HBM_RD_SIZE 0x39641C + +#define mmNIF_RTR_CTRL_1_E2E_PCI_RD_SIZE 0x396420 + +#define mmNIF_RTR_CTRL_1_E2E_AR_PCI_CTR_SET_EN 0x396424 + +#define mmNIF_RTR_CTRL_1_E2E_AR_PCI_CTR_SET 0x396428 + +#define mmNIF_RTR_CTRL_1_E2E_AR_PCI_CTR_WRAP 0x39642C + +#define mmNIF_RTR_CTRL_1_E2E_AR_PCI_CTR_CNT 0x396430 + +#define mmNIF_RTR_CTRL_1_E2E_AR_HBM_CTR_SET_EN 0x396434 + +#define mmNIF_RTR_CTRL_1_E2E_AR_HBM_CTR_SET 0x396438 + +#define mmNIF_RTR_CTRL_1_NL_HBM_SEL_0 0x396450 + +#define mmNIF_RTR_CTRL_1_NL_HBM_SEL_1 0x396454 + +#define mmNIF_RTR_CTRL_1_NON_LIN_EN 0x396480 + +#define mmNIF_RTR_CTRL_1_NL_SRAM_BANK_0 0x396500 + +#define mmNIF_RTR_CTRL_1_NL_SRAM_BANK_1 0x396504 + +#define mmNIF_RTR_CTRL_1_NL_SRAM_BANK_2 0x396508 + +#define mmNIF_RTR_CTRL_1_NL_SRAM_BANK_3 0x39650C + +#define mmNIF_RTR_CTRL_1_NL_SRAM_BANK_4 0x396510 + +#define mmNIF_RTR_CTRL_1_NL_SRAM_OFFSET_0 0x396514 + +#define mmNIF_RTR_CTRL_1_NL_SRAM_OFFSET_1 0x396520 + +#define mmNIF_RTR_CTRL_1_NL_SRAM_OFFSET_2 0x396524 + +#define mmNIF_RTR_CTRL_1_NL_SRAM_OFFSET_3 0x396528 + +#define mmNIF_RTR_CTRL_1_NL_SRAM_OFFSET_4 0x39652C + +#define mmNIF_RTR_CTRL_1_NL_SRAM_OFFSET_5 0x396530 + +#define mmNIF_RTR_CTRL_1_NL_SRAM_OFFSET_6 0x396534 + +#define mmNIF_RTR_CTRL_1_NL_SRAM_OFFSET_7 0x396538 + +#define mmNIF_RTR_CTRL_1_NL_SRAM_OFFSET_8 0x39653C + +#define mmNIF_RTR_CTRL_1_NL_SRAM_OFFSET_9 0x396540 + +#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_0 0x396550 + +#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_1 0x396554 + +#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_2 0x396558 + +#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_3 0x39655C + +#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_4 0x396560 + +#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_5 0x396564 + +#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_6 0x396568 + +#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_7 0x39656C + +#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_8 0x396570 + +#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_9 0x396574 + +#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_10 0x396578 + +#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_11 0x39657C + +#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_12 0x396580 + +#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_13 0x396584 + +#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_14 0x396588 + +#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_15 0x39658C + +#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_16 0x396590 + +#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_17 0x396594 + +#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_18 0x396598 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_0 0x3965E4 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_1 0x3965E8 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_2 0x3965EC + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_3 0x3965F0 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_4 0x3965F4 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_5 0x3965F8 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_6 0x3965FC + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_7 0x396600 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_8 0x396604 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_9 0x396608 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_10 0x39660C + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_11 0x396610 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_12 0x396614 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_13 0x396618 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_14 0x39661C + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_15 0x396620 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_0 0x396624 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_1 0x396628 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_2 0x39662C + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_3 0x396630 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_4 0x396634 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_5 0x396638 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_6 0x39663C + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_7 0x396640 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_8 0x396644 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_9 0x396648 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_10 0x39664C + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_11 0x396650 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_12 0x396654 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_13 0x396658 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_14 0x39665C + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_15 0x396660 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_0 0x396664 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_1 0x396668 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_2 0x39666C + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_3 0x396670 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_4 0x396674 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_5 0x396678 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_6 0x39667C + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_7 0x396680 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_8 0x396684 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_9 0x396688 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_10 0x39668C + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_11 0x396690 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_12 0x396694 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_13 0x396698 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_14 0x39669C + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_15 0x3966A0 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_0 0x3966A4 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_1 0x3966A8 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_2 0x3966AC + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_3 0x3966B0 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_4 0x3966B4 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_5 0x3966B8 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_6 0x3966BC + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_7 0x3966C0 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_8 0x3966C4 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_9 0x3966C8 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_10 0x3966CC + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_11 0x3966D0 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_12 0x3966D4 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_13 0x3966D8 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_14 0x3966DC + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_15 0x3966E0 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_0 0x3966E4 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_1 0x3966E8 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_2 0x3966EC + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_3 0x3966F0 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_4 0x3966F4 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_5 0x3966F8 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_6 0x3966FC + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_7 0x396700 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_8 0x396704 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_9 0x396708 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_10 0x39670C + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_11 0x396710 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_12 0x396714 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_13 0x396718 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_14 0x39671C + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_15 0x396720 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_0 0x396724 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_1 0x396728 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_2 0x39672C + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_3 0x396730 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_4 0x396734 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_5 0x396738 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_6 0x39673C + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_7 0x396740 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_8 0x396744 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_9 0x396748 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_10 0x39674C + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_11 0x396750 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_12 0x396754 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_13 0x396758 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_14 0x39675C + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_15 0x396760 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_0 0x396764 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_1 0x396768 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_2 0x39676C + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_3 0x396770 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_4 0x396774 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_5 0x396778 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_6 0x39677C + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_7 0x396780 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_8 0x396784 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_9 0x396788 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_10 0x39678C + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_11 0x396790 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_12 0x396794 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_13 0x396798 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_14 0x39679C + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_15 0x3967A0 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_0 0x3967A4 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_1 0x3967A8 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_2 0x3967AC + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_3 0x3967B0 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_4 0x3967B4 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_5 0x3967B8 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_6 0x3967BC + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_7 0x3967C0 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_8 0x3967C4 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_9 0x3967C8 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_10 0x3967CC + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_11 0x3967D0 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_12 0x3967D4 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_13 0x3967D8 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_14 0x3967DC + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_15 0x3967E0 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_0 0x396824 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_1 0x396828 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_2 0x39682C + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_3 0x396830 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_4 0x396834 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_5 0x396838 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_6 0x39683C + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_7 0x396840 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_8 0x396844 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_9 0x396848 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_10 0x39684C + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_11 0x396850 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_12 0x396854 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_13 0x396858 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_14 0x39685C + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_15 0x396860 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_0 0x396864 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_1 0x396868 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_2 0x39686C + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_3 0x396870 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_4 0x396874 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_5 0x396878 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_6 0x39687C + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_7 0x396880 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_8 0x396884 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_9 0x396888 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_10 0x39688C + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_11 0x396890 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_12 0x396894 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_13 0x396898 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_14 0x39689C + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_15 0x3968A0 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_0 0x3968A4 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_1 0x3968A8 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_2 0x3968AC + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_3 0x3968B0 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_4 0x3968B4 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_5 0x3968B8 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_6 0x3968BC + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_7 0x3968C0 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_8 0x3968C4 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_9 0x3968C8 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_10 0x3968CC + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_11 0x3968D0 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_12 0x3968D4 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_13 0x3968D8 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_14 0x3968DC + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_15 0x3968E0 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_0 0x3968E4 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_1 0x3968E8 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_2 0x3968EC + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_3 0x3968F0 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_4 0x3968F4 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_5 0x3968F8 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_6 0x3968FC + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_7 0x396900 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_8 0x396904 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_9 0x396908 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_10 0x39690C + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_11 0x396910 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_12 0x396914 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_13 0x396918 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_14 0x39691C + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_15 0x396920 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_0 0x396924 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_1 0x396928 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_2 0x39692C + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_3 0x396930 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_4 0x396934 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_5 0x396938 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_6 0x39693C + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_7 0x396940 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_8 0x396944 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_9 0x396948 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_10 0x39694C + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_11 0x396950 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_12 0x396954 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_13 0x396958 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_14 0x39695C + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_15 0x396960 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_0 0x396964 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_1 0x396968 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_2 0x39696C + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_3 0x396970 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_4 0x396974 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_5 0x396978 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_6 0x39697C + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_7 0x396980 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_8 0x396984 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_9 0x396988 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_10 0x39698C + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_11 0x396990 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_12 0x396994 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_13 0x396998 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_14 0x39699C + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_15 0x3969A0 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_0 0x3969A4 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_1 0x3969A8 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_2 0x3969AC + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_3 0x3969B0 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_4 0x3969B4 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_5 0x3969B8 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_6 0x3969BC + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_7 0x3969C0 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_8 0x3969C4 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_9 0x3969C8 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_10 0x3969CC + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_11 0x3969D0 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_12 0x3969D4 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_13 0x3969D8 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_14 0x3969DC + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_15 0x3969E0 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_0 0x3969E4 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_1 0x3969E8 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_2 0x3969EC + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_3 0x3969F0 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_4 0x3969F4 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_5 0x3969F8 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_6 0x3969FC + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_7 0x396A00 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_8 0x396A04 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_9 0x396A08 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_10 0x396A0C + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_11 0x396A10 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_12 0x396A14 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_13 0x396A18 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_14 0x396A1C + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_15 0x396A20 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_HIT_AW 0x396A64 + +#define mmNIF_RTR_CTRL_1_RANGE_SEC_HIT_AR 0x396A68 + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_HIT_AW 0x396A6C + +#define mmNIF_RTR_CTRL_1_RANGE_PRIV_HIT_AR 0x396A70 + +#define mmNIF_RTR_CTRL_1_RGL_CFG 0x396B64 + +#define mmNIF_RTR_CTRL_1_RGL_SHIFT 0x396B68 + +#define mmNIF_RTR_CTRL_1_RGL_EXPECTED_LAT_0 0x396B6C + +#define mmNIF_RTR_CTRL_1_RGL_EXPECTED_LAT_1 0x396B70 + +#define mmNIF_RTR_CTRL_1_RGL_EXPECTED_LAT_2 0x396B74 + +#define mmNIF_RTR_CTRL_1_RGL_EXPECTED_LAT_3 0x396B78 + +#define mmNIF_RTR_CTRL_1_RGL_EXPECTED_LAT_4 0x396B7C + +#define mmNIF_RTR_CTRL_1_RGL_EXPECTED_LAT_5 0x396B80 + +#define mmNIF_RTR_CTRL_1_RGL_EXPECTED_LAT_6 0x396B84 + +#define mmNIF_RTR_CTRL_1_RGL_EXPECTED_LAT_7 0x396B88 + +#define mmNIF_RTR_CTRL_1_RGL_TOKEN_0 0x396BAC + +#define mmNIF_RTR_CTRL_1_RGL_TOKEN_1 0x396BB0 + +#define mmNIF_RTR_CTRL_1_RGL_TOKEN_2 0x396BB4 + +#define mmNIF_RTR_CTRL_1_RGL_TOKEN_3 0x396BB8 + +#define mmNIF_RTR_CTRL_1_RGL_TOKEN_4 0x396BBC + +#define mmNIF_RTR_CTRL_1_RGL_TOKEN_5 0x396BC0 + +#define mmNIF_RTR_CTRL_1_RGL_TOKEN_6 0x396BC4 + +#define mmNIF_RTR_CTRL_1_RGL_TOKEN_7 0x396BC8 + +#define mmNIF_RTR_CTRL_1_RGL_BANK_ID_0 0x396BEC + +#define mmNIF_RTR_CTRL_1_RGL_BANK_ID_1 0x396BF0 + +#define mmNIF_RTR_CTRL_1_RGL_BANK_ID_2 0x396BF4 + +#define mmNIF_RTR_CTRL_1_RGL_BANK_ID_3 0x396BF8 + +#define mmNIF_RTR_CTRL_1_RGL_BANK_ID_4 0x396BFC + +#define mmNIF_RTR_CTRL_1_RGL_BANK_ID_5 0x396C00 + +#define mmNIF_RTR_CTRL_1_RGL_BANK_ID_6 0x396C04 + +#define mmNIF_RTR_CTRL_1_RGL_BANK_ID_7 0x396C08 + +#define mmNIF_RTR_CTRL_1_RGL_WDT 0x396C2C + +#define mmNIF_RTR_CTRL_1_E2E_AR_HBM0_CH0_CTR_WRAP 0x396C30 + +#define mmNIF_RTR_CTRL_1_E2E_AR_HBM0_CH1_CTR_WRAP 0x396C34 + +#define mmNIF_RTR_CTRL_1_E2E_AR_HBM1_CH0_CTR_WRAP 0x396C38 + +#define mmNIF_RTR_CTRL_1_E2E_AR_HBM1_CH1_CTR_WRAP 0x396C3C + +#define mmNIF_RTR_CTRL_1_E2E_AR_HBM2_CH0_CTR_WRAP 0x396C40 + +#define mmNIF_RTR_CTRL_1_E2E_AR_HBM2_CH1_CTR_WRAP 0x396C44 + +#define mmNIF_RTR_CTRL_1_E2E_AR_HBM3_CH0_CTR_WRAP 0x396C48 + +#define mmNIF_RTR_CTRL_1_E2E_AR_HBM3_CH1_CTR_WRAP 0x396C4C + +#define mmNIF_RTR_CTRL_1_E2E_AR_HBM0_CH0_CTR_CNT 0x396C50 + +#define mmNIF_RTR_CTRL_1_E2E_AR_HBM0_CH1_CTR_CNT 0x396C54 + +#define mmNIF_RTR_CTRL_1_E2E_AR_HBM1_CH0_CTR_CNT 0x396C58 + +#define mmNIF_RTR_CTRL_1_E2E_AR_HBM1_CH1_CTR_CNT 0x396C5C + +#define mmNIF_RTR_CTRL_1_E2E_AR_HBM2_CH0_CTR_CNT 0x396C60 + +#define mmNIF_RTR_CTRL_1_E2E_AR_HBM2_CH1_CTR_CNT 0x396C64 + +#define mmNIF_RTR_CTRL_1_E2E_AR_HBM3_CH0_CTR_CNT 0x396C68 + +#define mmNIF_RTR_CTRL_1_E2E_AR_HBM3_CH1_CTR_CNT 0x396C6C + +#define mmNIF_RTR_CTRL_1_E2E_AW_HBM0_CH0_CTR_WRAP 0x396C70 + +#define mmNIF_RTR_CTRL_1_E2E_AW_HBM0_CH1_CTR_WRAP 0x396C74 + +#define mmNIF_RTR_CTRL_1_E2E_AW_HBM1_CH0_CTR_WRAP 0x396C78 + +#define mmNIF_RTR_CTRL_1_E2E_AW_HBM1_CH1_CTR_WRAP 0x396C7C + +#define mmNIF_RTR_CTRL_1_E2E_AW_HBM2_CH0_CTR_WRAP 0x396C80 + +#define mmNIF_RTR_CTRL_1_E2E_AW_HBM2_CH1_CTR_WRAP 0x396C84 + +#define mmNIF_RTR_CTRL_1_E2E_AW_HBM3_CH0_CTR_WRAP 0x396C88 + +#define mmNIF_RTR_CTRL_1_E2E_AW_HBM3_CH1_CTR_WRAP 0x396C8C + +#define mmNIF_RTR_CTRL_1_E2E_AW_HBM0_CH0_CTR_CNT 0x396C90 + +#define mmNIF_RTR_CTRL_1_E2E_AW_HBM0_CH1_CTR_CNT 0x396C94 + +#define mmNIF_RTR_CTRL_1_E2E_AW_HBM1_CH0_CTR_CNT 0x396C98 + +#define mmNIF_RTR_CTRL_1_E2E_AW_HBM1_CH1_CTR_CNT 0x396C9C + +#define mmNIF_RTR_CTRL_1_E2E_AW_HBM2_CH0_CTR_CNT 0x396CA0 + +#define mmNIF_RTR_CTRL_1_E2E_AW_HBM2_CH1_CTR_CNT 0x396CA4 + +#define mmNIF_RTR_CTRL_1_E2E_AW_HBM3_CH0_CTR_CNT 0x396CA8 + +#define mmNIF_RTR_CTRL_1_E2E_AW_HBM3_CH1_CTR_CNT 0x396CAC + +#define mmNIF_RTR_CTRL_1_NL_HBM_PC_SEL_0 0x396CB0 + +#define mmNIF_RTR_CTRL_1_NL_HBM_PC_SEL_1 0x396CB4 + +#define mmNIF_RTR_CTRL_1_NL_HBM_PC_SEL_2 0x396CB8 + +#define mmNIF_RTR_CTRL_1_NL_HBM_PC_SEL_3 0x396CBC + +#endif /* ASIC_REG_NIF_RTR_CTRL_1_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_2_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_2_regs.h new file mode 100644 index 000000000..9de8442f9 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_2_regs.h @@ -0,0 +1,896 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_NIF_RTR_CTRL_2_REGS_H_ +#define ASIC_REG_NIF_RTR_CTRL_2_REGS_H_ + +/* + ***************************************** + * NIF_RTR_CTRL_2 (Prototype: RTR_CTRL) + ***************************************** + */ + +#define mmNIF_RTR_CTRL_2_PERM_SEL 0x3A6108 + +#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_0 0x3A6114 + +#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_1 0x3A6118 + +#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_2 0x3A611C + +#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_3 0x3A6120 + +#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_4 0x3A6124 + +#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_5 0x3A6128 + +#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_6 0x3A612C + +#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_7 0x3A6130 + +#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_8 0x3A6134 + +#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_9 0x3A6138 + +#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_10 0x3A613C + +#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_11 0x3A6140 + +#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_12 0x3A6144 + +#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_13 0x3A6148 + +#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_14 0x3A614C + +#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_15 0x3A6150 + +#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_16 0x3A6154 + +#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_17 0x3A6158 + +#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_18 0x3A615C + +#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_19 0x3A6160 + +#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_20 0x3A6164 + +#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_21 0x3A6168 + +#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_22 0x3A616C + +#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_23 0x3A6170 + +#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_24 0x3A6174 + +#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_25 0x3A6178 + +#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_26 0x3A617C + +#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_27 0x3A6180 + +#define mmNIF_RTR_CTRL_2_SRAM_POLY_H3_0 0x3A6184 + +#define mmNIF_RTR_CTRL_2_SRAM_POLY_H3_1 0x3A6188 + +#define mmNIF_RTR_CTRL_2_SRAM_POLY_H3_2 0x3A618C + +#define mmNIF_RTR_CTRL_2_SRAM_POLY_H3_3 0x3A6190 + +#define mmNIF_RTR_CTRL_2_SRAM_POLY_H3_4 0x3A6194 + +#define mmNIF_RTR_CTRL_2_SRAM_POLY_H3_5 0x3A6198 + +#define mmNIF_RTR_CTRL_2_SRAM_POLY_H3_6 0x3A619C + +#define mmNIF_RTR_CTRL_2_SRAM_POLY_H3_7 0x3A61A0 + +#define mmNIF_RTR_CTRL_2_SRAM_POLY_H3_8 0x3A61A4 + +#define mmNIF_RTR_CTRL_2_SRAM_POLY_H3_9 0x3A61A8 + +#define mmNIF_RTR_CTRL_2_SRAM_POLY_H3_10 0x3A61AC + +#define mmNIF_RTR_CTRL_2_SRAM_POLY_H3_11 0x3A61B0 + +#define mmNIF_RTR_CTRL_2_SRAM_POLY_H3_12 0x3A61B4 + +#define mmNIF_RTR_CTRL_2_SRAM_POLY_H3_13 0x3A61B8 + +#define mmNIF_RTR_CTRL_2_SRAM_POLY_H3_14 0x3A61BC + +#define mmNIF_RTR_CTRL_2_SCRAM_SRAM_EN 0x3A626C + +#define mmNIF_RTR_CTRL_2_RL_HBM_EN 0x3A6274 + +#define mmNIF_RTR_CTRL_2_RL_HBM_SAT 0x3A6278 + +#define mmNIF_RTR_CTRL_2_RL_HBM_RST 0x3A627C + +#define mmNIF_RTR_CTRL_2_RL_HBM_TIMEOUT 0x3A6280 + +#define mmNIF_RTR_CTRL_2_SCRAM_HBM_EN 0x3A6284 + +#define mmNIF_RTR_CTRL_2_RL_PCI_EN 0x3A6288 + +#define mmNIF_RTR_CTRL_2_RL_PCI_SAT 0x3A628C + +#define mmNIF_RTR_CTRL_2_RL_PCI_RST 0x3A6290 + +#define mmNIF_RTR_CTRL_2_RL_PCI_TIMEOUT 0x3A6294 + +#define mmNIF_RTR_CTRL_2_RL_SRAM_EN 0x3A629C + +#define mmNIF_RTR_CTRL_2_RL_SRAM_SAT 0x3A62A0 + +#define mmNIF_RTR_CTRL_2_RL_SRAM_RST 0x3A62A4 + +#define mmNIF_RTR_CTRL_2_RL_SRAM_TIMEOUT 0x3A62AC + +#define mmNIF_RTR_CTRL_2_RL_SRAM_RED 0x3A62B4 + +#define mmNIF_RTR_CTRL_2_E2E_HBM_EN 0x3A62EC + +#define mmNIF_RTR_CTRL_2_E2E_PCI_EN 0x3A62F0 + +#define mmNIF_RTR_CTRL_2_E2E_HBM_WR_SIZE 0x3A62F4 + +#define mmNIF_RTR_CTRL_2_E2E_PCI_WR_SIZE 0x3A62F8 + +#define mmNIF_RTR_CTRL_2_E2E_AW_PCI_CTR_SET_EN 0x3A6404 + +#define mmNIF_RTR_CTRL_2_E2E_AW_PCI_CTR_SET 0x3A6408 + +#define mmNIF_RTR_CTRL_2_E2E_AW_PCI_CTR_WRAP 0x3A640C + +#define mmNIF_RTR_CTRL_2_E2E_AW_PCI_CTR_CNT 0x3A6410 + +#define mmNIF_RTR_CTRL_2_E2E_AW_HBM_CTR_SET_EN 0x3A6414 + +#define mmNIF_RTR_CTRL_2_E2E_AW_HBM_CTR_SET 0x3A6418 + +#define mmNIF_RTR_CTRL_2_E2E_HBM_RD_SIZE 0x3A641C + +#define mmNIF_RTR_CTRL_2_E2E_PCI_RD_SIZE 0x3A6420 + +#define mmNIF_RTR_CTRL_2_E2E_AR_PCI_CTR_SET_EN 0x3A6424 + +#define mmNIF_RTR_CTRL_2_E2E_AR_PCI_CTR_SET 0x3A6428 + +#define mmNIF_RTR_CTRL_2_E2E_AR_PCI_CTR_WRAP 0x3A642C + +#define mmNIF_RTR_CTRL_2_E2E_AR_PCI_CTR_CNT 0x3A6430 + +#define mmNIF_RTR_CTRL_2_E2E_AR_HBM_CTR_SET_EN 0x3A6434 + +#define mmNIF_RTR_CTRL_2_E2E_AR_HBM_CTR_SET 0x3A6438 + +#define mmNIF_RTR_CTRL_2_NL_HBM_SEL_0 0x3A6450 + +#define mmNIF_RTR_CTRL_2_NL_HBM_SEL_1 0x3A6454 + +#define mmNIF_RTR_CTRL_2_NON_LIN_EN 0x3A6480 + +#define mmNIF_RTR_CTRL_2_NL_SRAM_BANK_0 0x3A6500 + +#define mmNIF_RTR_CTRL_2_NL_SRAM_BANK_1 0x3A6504 + +#define mmNIF_RTR_CTRL_2_NL_SRAM_BANK_2 0x3A6508 + +#define mmNIF_RTR_CTRL_2_NL_SRAM_BANK_3 0x3A650C + +#define mmNIF_RTR_CTRL_2_NL_SRAM_BANK_4 0x3A6510 + +#define mmNIF_RTR_CTRL_2_NL_SRAM_OFFSET_0 0x3A6514 + +#define mmNIF_RTR_CTRL_2_NL_SRAM_OFFSET_1 0x3A6520 + +#define mmNIF_RTR_CTRL_2_NL_SRAM_OFFSET_2 0x3A6524 + +#define mmNIF_RTR_CTRL_2_NL_SRAM_OFFSET_3 0x3A6528 + +#define mmNIF_RTR_CTRL_2_NL_SRAM_OFFSET_4 0x3A652C + +#define mmNIF_RTR_CTRL_2_NL_SRAM_OFFSET_5 0x3A6530 + +#define mmNIF_RTR_CTRL_2_NL_SRAM_OFFSET_6 0x3A6534 + +#define mmNIF_RTR_CTRL_2_NL_SRAM_OFFSET_7 0x3A6538 + +#define mmNIF_RTR_CTRL_2_NL_SRAM_OFFSET_8 0x3A653C + +#define mmNIF_RTR_CTRL_2_NL_SRAM_OFFSET_9 0x3A6540 + +#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_0 0x3A6550 + +#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_1 0x3A6554 + +#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_2 0x3A6558 + +#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_3 0x3A655C + +#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_4 0x3A6560 + +#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_5 0x3A6564 + +#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_6 0x3A6568 + +#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_7 0x3A656C + +#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_8 0x3A6570 + +#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_9 0x3A6574 + +#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_10 0x3A6578 + +#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_11 0x3A657C + +#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_12 0x3A6580 + +#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_13 0x3A6584 + +#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_14 0x3A6588 + +#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_15 0x3A658C + +#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_16 0x3A6590 + +#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_17 0x3A6594 + +#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_18 0x3A6598 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_0 0x3A65E4 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_1 0x3A65E8 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_2 0x3A65EC + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_3 0x3A65F0 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_4 0x3A65F4 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_5 0x3A65F8 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_6 0x3A65FC + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_7 0x3A6600 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_8 0x3A6604 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_9 0x3A6608 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_10 0x3A660C + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_11 0x3A6610 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_12 0x3A6614 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_13 0x3A6618 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_14 0x3A661C + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_15 0x3A6620 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_0 0x3A6624 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_1 0x3A6628 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_2 0x3A662C + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_3 0x3A6630 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_4 0x3A6634 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_5 0x3A6638 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_6 0x3A663C + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_7 0x3A6640 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_8 0x3A6644 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_9 0x3A6648 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_10 0x3A664C + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_11 0x3A6650 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_12 0x3A6654 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_13 0x3A6658 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_14 0x3A665C + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_15 0x3A6660 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_0 0x3A6664 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_1 0x3A6668 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_2 0x3A666C + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_3 0x3A6670 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_4 0x3A6674 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_5 0x3A6678 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_6 0x3A667C + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_7 0x3A6680 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_8 0x3A6684 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_9 0x3A6688 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_10 0x3A668C + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_11 0x3A6690 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_12 0x3A6694 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_13 0x3A6698 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_14 0x3A669C + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_15 0x3A66A0 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_0 0x3A66A4 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_1 0x3A66A8 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_2 0x3A66AC + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_3 0x3A66B0 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_4 0x3A66B4 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_5 0x3A66B8 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_6 0x3A66BC + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_7 0x3A66C0 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_8 0x3A66C4 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_9 0x3A66C8 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_10 0x3A66CC + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_11 0x3A66D0 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_12 0x3A66D4 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_13 0x3A66D8 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_14 0x3A66DC + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_15 0x3A66E0 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_0 0x3A66E4 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_1 0x3A66E8 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_2 0x3A66EC + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_3 0x3A66F0 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_4 0x3A66F4 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_5 0x3A66F8 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_6 0x3A66FC + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_7 0x3A6700 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_8 0x3A6704 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_9 0x3A6708 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_10 0x3A670C + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_11 0x3A6710 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_12 0x3A6714 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_13 0x3A6718 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_14 0x3A671C + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_15 0x3A6720 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_0 0x3A6724 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_1 0x3A6728 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_2 0x3A672C + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_3 0x3A6730 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_4 0x3A6734 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_5 0x3A6738 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_6 0x3A673C + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_7 0x3A6740 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_8 0x3A6744 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_9 0x3A6748 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_10 0x3A674C + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_11 0x3A6750 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_12 0x3A6754 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_13 0x3A6758 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_14 0x3A675C + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_15 0x3A6760 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_0 0x3A6764 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_1 0x3A6768 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_2 0x3A676C + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_3 0x3A6770 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_4 0x3A6774 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_5 0x3A6778 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_6 0x3A677C + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_7 0x3A6780 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_8 0x3A6784 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_9 0x3A6788 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_10 0x3A678C + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_11 0x3A6790 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_12 0x3A6794 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_13 0x3A6798 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_14 0x3A679C + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_15 0x3A67A0 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_0 0x3A67A4 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_1 0x3A67A8 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_2 0x3A67AC + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_3 0x3A67B0 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_4 0x3A67B4 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_5 0x3A67B8 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_6 0x3A67BC + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_7 0x3A67C0 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_8 0x3A67C4 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_9 0x3A67C8 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_10 0x3A67CC + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_11 0x3A67D0 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_12 0x3A67D4 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_13 0x3A67D8 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_14 0x3A67DC + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_15 0x3A67E0 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_0 0x3A6824 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_1 0x3A6828 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_2 0x3A682C + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_3 0x3A6830 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_4 0x3A6834 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_5 0x3A6838 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_6 0x3A683C + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_7 0x3A6840 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_8 0x3A6844 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_9 0x3A6848 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_10 0x3A684C + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_11 0x3A6850 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_12 0x3A6854 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_13 0x3A6858 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_14 0x3A685C + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_15 0x3A6860 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_0 0x3A6864 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_1 0x3A6868 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_2 0x3A686C + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_3 0x3A6870 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_4 0x3A6874 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_5 0x3A6878 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_6 0x3A687C + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_7 0x3A6880 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_8 0x3A6884 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_9 0x3A6888 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_10 0x3A688C + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_11 0x3A6890 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_12 0x3A6894 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_13 0x3A6898 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_14 0x3A689C + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_15 0x3A68A0 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_0 0x3A68A4 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_1 0x3A68A8 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_2 0x3A68AC + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_3 0x3A68B0 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_4 0x3A68B4 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_5 0x3A68B8 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_6 0x3A68BC + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_7 0x3A68C0 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_8 0x3A68C4 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_9 0x3A68C8 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_10 0x3A68CC + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_11 0x3A68D0 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_12 0x3A68D4 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_13 0x3A68D8 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_14 0x3A68DC + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_15 0x3A68E0 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_0 0x3A68E4 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_1 0x3A68E8 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_2 0x3A68EC + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_3 0x3A68F0 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_4 0x3A68F4 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_5 0x3A68F8 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_6 0x3A68FC + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_7 0x3A6900 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_8 0x3A6904 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_9 0x3A6908 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_10 0x3A690C + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_11 0x3A6910 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_12 0x3A6914 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_13 0x3A6918 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_14 0x3A691C + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_15 0x3A6920 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_0 0x3A6924 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_1 0x3A6928 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_2 0x3A692C + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_3 0x3A6930 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_4 0x3A6934 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_5 0x3A6938 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_6 0x3A693C + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_7 0x3A6940 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_8 0x3A6944 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_9 0x3A6948 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_10 0x3A694C + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_11 0x3A6950 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_12 0x3A6954 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_13 0x3A6958 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_14 0x3A695C + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_15 0x3A6960 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_0 0x3A6964 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_1 0x3A6968 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_2 0x3A696C + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_3 0x3A6970 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_4 0x3A6974 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_5 0x3A6978 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_6 0x3A697C + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_7 0x3A6980 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_8 0x3A6984 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_9 0x3A6988 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_10 0x3A698C + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_11 0x3A6990 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_12 0x3A6994 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_13 0x3A6998 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_14 0x3A699C + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_15 0x3A69A0 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_0 0x3A69A4 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_1 0x3A69A8 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_2 0x3A69AC + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_3 0x3A69B0 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_4 0x3A69B4 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_5 0x3A69B8 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_6 0x3A69BC + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_7 0x3A69C0 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_8 0x3A69C4 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_9 0x3A69C8 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_10 0x3A69CC + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_11 0x3A69D0 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_12 0x3A69D4 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_13 0x3A69D8 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_14 0x3A69DC + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_15 0x3A69E0 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_0 0x3A69E4 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_1 0x3A69E8 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_2 0x3A69EC + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_3 0x3A69F0 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_4 0x3A69F4 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_5 0x3A69F8 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_6 0x3A69FC + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_7 0x3A6A00 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_8 0x3A6A04 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_9 0x3A6A08 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_10 0x3A6A0C + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_11 0x3A6A10 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_12 0x3A6A14 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_13 0x3A6A18 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_14 0x3A6A1C + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_15 0x3A6A20 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_HIT_AW 0x3A6A64 + +#define mmNIF_RTR_CTRL_2_RANGE_SEC_HIT_AR 0x3A6A68 + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_HIT_AW 0x3A6A6C + +#define mmNIF_RTR_CTRL_2_RANGE_PRIV_HIT_AR 0x3A6A70 + +#define mmNIF_RTR_CTRL_2_RGL_CFG 0x3A6B64 + +#define mmNIF_RTR_CTRL_2_RGL_SHIFT 0x3A6B68 + +#define mmNIF_RTR_CTRL_2_RGL_EXPECTED_LAT_0 0x3A6B6C + +#define mmNIF_RTR_CTRL_2_RGL_EXPECTED_LAT_1 0x3A6B70 + +#define mmNIF_RTR_CTRL_2_RGL_EXPECTED_LAT_2 0x3A6B74 + +#define mmNIF_RTR_CTRL_2_RGL_EXPECTED_LAT_3 0x3A6B78 + +#define mmNIF_RTR_CTRL_2_RGL_EXPECTED_LAT_4 0x3A6B7C + +#define mmNIF_RTR_CTRL_2_RGL_EXPECTED_LAT_5 0x3A6B80 + +#define mmNIF_RTR_CTRL_2_RGL_EXPECTED_LAT_6 0x3A6B84 + +#define mmNIF_RTR_CTRL_2_RGL_EXPECTED_LAT_7 0x3A6B88 + +#define mmNIF_RTR_CTRL_2_RGL_TOKEN_0 0x3A6BAC + +#define mmNIF_RTR_CTRL_2_RGL_TOKEN_1 0x3A6BB0 + +#define mmNIF_RTR_CTRL_2_RGL_TOKEN_2 0x3A6BB4 + +#define mmNIF_RTR_CTRL_2_RGL_TOKEN_3 0x3A6BB8 + +#define mmNIF_RTR_CTRL_2_RGL_TOKEN_4 0x3A6BBC + +#define mmNIF_RTR_CTRL_2_RGL_TOKEN_5 0x3A6BC0 + +#define mmNIF_RTR_CTRL_2_RGL_TOKEN_6 0x3A6BC4 + +#define mmNIF_RTR_CTRL_2_RGL_TOKEN_7 0x3A6BC8 + +#define mmNIF_RTR_CTRL_2_RGL_BANK_ID_0 0x3A6BEC + +#define mmNIF_RTR_CTRL_2_RGL_BANK_ID_1 0x3A6BF0 + +#define mmNIF_RTR_CTRL_2_RGL_BANK_ID_2 0x3A6BF4 + +#define mmNIF_RTR_CTRL_2_RGL_BANK_ID_3 0x3A6BF8 + +#define mmNIF_RTR_CTRL_2_RGL_BANK_ID_4 0x3A6BFC + +#define mmNIF_RTR_CTRL_2_RGL_BANK_ID_5 0x3A6C00 + +#define mmNIF_RTR_CTRL_2_RGL_BANK_ID_6 0x3A6C04 + +#define mmNIF_RTR_CTRL_2_RGL_BANK_ID_7 0x3A6C08 + +#define mmNIF_RTR_CTRL_2_RGL_WDT 0x3A6C2C + +#define mmNIF_RTR_CTRL_2_E2E_AR_HBM0_CH0_CTR_WRAP 0x3A6C30 + +#define mmNIF_RTR_CTRL_2_E2E_AR_HBM0_CH1_CTR_WRAP 0x3A6C34 + +#define mmNIF_RTR_CTRL_2_E2E_AR_HBM1_CH0_CTR_WRAP 0x3A6C38 + +#define mmNIF_RTR_CTRL_2_E2E_AR_HBM1_CH1_CTR_WRAP 0x3A6C3C + +#define mmNIF_RTR_CTRL_2_E2E_AR_HBM2_CH0_CTR_WRAP 0x3A6C40 + +#define mmNIF_RTR_CTRL_2_E2E_AR_HBM2_CH1_CTR_WRAP 0x3A6C44 + +#define mmNIF_RTR_CTRL_2_E2E_AR_HBM3_CH0_CTR_WRAP 0x3A6C48 + +#define mmNIF_RTR_CTRL_2_E2E_AR_HBM3_CH1_CTR_WRAP 0x3A6C4C + +#define mmNIF_RTR_CTRL_2_E2E_AR_HBM0_CH0_CTR_CNT 0x3A6C50 + +#define mmNIF_RTR_CTRL_2_E2E_AR_HBM0_CH1_CTR_CNT 0x3A6C54 + +#define mmNIF_RTR_CTRL_2_E2E_AR_HBM1_CH0_CTR_CNT 0x3A6C58 + +#define mmNIF_RTR_CTRL_2_E2E_AR_HBM1_CH1_CTR_CNT 0x3A6C5C + +#define mmNIF_RTR_CTRL_2_E2E_AR_HBM2_CH0_CTR_CNT 0x3A6C60 + +#define mmNIF_RTR_CTRL_2_E2E_AR_HBM2_CH1_CTR_CNT 0x3A6C64 + +#define mmNIF_RTR_CTRL_2_E2E_AR_HBM3_CH0_CTR_CNT 0x3A6C68 + +#define mmNIF_RTR_CTRL_2_E2E_AR_HBM3_CH1_CTR_CNT 0x3A6C6C + +#define mmNIF_RTR_CTRL_2_E2E_AW_HBM0_CH0_CTR_WRAP 0x3A6C70 + +#define mmNIF_RTR_CTRL_2_E2E_AW_HBM0_CH1_CTR_WRAP 0x3A6C74 + +#define mmNIF_RTR_CTRL_2_E2E_AW_HBM1_CH0_CTR_WRAP 0x3A6C78 + +#define mmNIF_RTR_CTRL_2_E2E_AW_HBM1_CH1_CTR_WRAP 0x3A6C7C + +#define mmNIF_RTR_CTRL_2_E2E_AW_HBM2_CH0_CTR_WRAP 0x3A6C80 + +#define mmNIF_RTR_CTRL_2_E2E_AW_HBM2_CH1_CTR_WRAP 0x3A6C84 + +#define mmNIF_RTR_CTRL_2_E2E_AW_HBM3_CH0_CTR_WRAP 0x3A6C88 + +#define mmNIF_RTR_CTRL_2_E2E_AW_HBM3_CH1_CTR_WRAP 0x3A6C8C + +#define mmNIF_RTR_CTRL_2_E2E_AW_HBM0_CH0_CTR_CNT 0x3A6C90 + +#define mmNIF_RTR_CTRL_2_E2E_AW_HBM0_CH1_CTR_CNT 0x3A6C94 + +#define mmNIF_RTR_CTRL_2_E2E_AW_HBM1_CH0_CTR_CNT 0x3A6C98 + +#define mmNIF_RTR_CTRL_2_E2E_AW_HBM1_CH1_CTR_CNT 0x3A6C9C + +#define mmNIF_RTR_CTRL_2_E2E_AW_HBM2_CH0_CTR_CNT 0x3A6CA0 + +#define mmNIF_RTR_CTRL_2_E2E_AW_HBM2_CH1_CTR_CNT 0x3A6CA4 + +#define mmNIF_RTR_CTRL_2_E2E_AW_HBM3_CH0_CTR_CNT 0x3A6CA8 + +#define mmNIF_RTR_CTRL_2_E2E_AW_HBM3_CH1_CTR_CNT 0x3A6CAC + +#define mmNIF_RTR_CTRL_2_NL_HBM_PC_SEL_0 0x3A6CB0 + +#define mmNIF_RTR_CTRL_2_NL_HBM_PC_SEL_1 0x3A6CB4 + +#define mmNIF_RTR_CTRL_2_NL_HBM_PC_SEL_2 0x3A6CB8 + +#define mmNIF_RTR_CTRL_2_NL_HBM_PC_SEL_3 0x3A6CBC + +#endif /* ASIC_REG_NIF_RTR_CTRL_2_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_3_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_3_regs.h new file mode 100644 index 000000000..34fd47685 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_3_regs.h @@ -0,0 +1,896 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_NIF_RTR_CTRL_3_REGS_H_ +#define ASIC_REG_NIF_RTR_CTRL_3_REGS_H_ + +/* + ***************************************** + * NIF_RTR_CTRL_3 (Prototype: RTR_CTRL) + ***************************************** + */ + +#define mmNIF_RTR_CTRL_3_PERM_SEL 0x3B6108 + +#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_0 0x3B6114 + +#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_1 0x3B6118 + +#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_2 0x3B611C + +#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_3 0x3B6120 + +#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_4 0x3B6124 + +#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_5 0x3B6128 + +#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_6 0x3B612C + +#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_7 0x3B6130 + +#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_8 0x3B6134 + +#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_9 0x3B6138 + +#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_10 0x3B613C + +#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_11 0x3B6140 + +#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_12 0x3B6144 + +#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_13 0x3B6148 + +#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_14 0x3B614C + +#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_15 0x3B6150 + +#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_16 0x3B6154 + +#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_17 0x3B6158 + +#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_18 0x3B615C + +#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_19 0x3B6160 + +#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_20 0x3B6164 + +#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_21 0x3B6168 + +#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_22 0x3B616C + +#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_23 0x3B6170 + +#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_24 0x3B6174 + +#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_25 0x3B6178 + +#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_26 0x3B617C + +#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_27 0x3B6180 + +#define mmNIF_RTR_CTRL_3_SRAM_POLY_H3_0 0x3B6184 + +#define mmNIF_RTR_CTRL_3_SRAM_POLY_H3_1 0x3B6188 + +#define mmNIF_RTR_CTRL_3_SRAM_POLY_H3_2 0x3B618C + +#define mmNIF_RTR_CTRL_3_SRAM_POLY_H3_3 0x3B6190 + +#define mmNIF_RTR_CTRL_3_SRAM_POLY_H3_4 0x3B6194 + +#define mmNIF_RTR_CTRL_3_SRAM_POLY_H3_5 0x3B6198 + +#define mmNIF_RTR_CTRL_3_SRAM_POLY_H3_6 0x3B619C + +#define mmNIF_RTR_CTRL_3_SRAM_POLY_H3_7 0x3B61A0 + +#define mmNIF_RTR_CTRL_3_SRAM_POLY_H3_8 0x3B61A4 + +#define mmNIF_RTR_CTRL_3_SRAM_POLY_H3_9 0x3B61A8 + +#define mmNIF_RTR_CTRL_3_SRAM_POLY_H3_10 0x3B61AC + +#define mmNIF_RTR_CTRL_3_SRAM_POLY_H3_11 0x3B61B0 + +#define mmNIF_RTR_CTRL_3_SRAM_POLY_H3_12 0x3B61B4 + +#define mmNIF_RTR_CTRL_3_SRAM_POLY_H3_13 0x3B61B8 + +#define mmNIF_RTR_CTRL_3_SRAM_POLY_H3_14 0x3B61BC + +#define mmNIF_RTR_CTRL_3_SCRAM_SRAM_EN 0x3B626C + +#define mmNIF_RTR_CTRL_3_RL_HBM_EN 0x3B6274 + +#define mmNIF_RTR_CTRL_3_RL_HBM_SAT 0x3B6278 + +#define mmNIF_RTR_CTRL_3_RL_HBM_RST 0x3B627C + +#define mmNIF_RTR_CTRL_3_RL_HBM_TIMEOUT 0x3B6280 + +#define mmNIF_RTR_CTRL_3_SCRAM_HBM_EN 0x3B6284 + +#define mmNIF_RTR_CTRL_3_RL_PCI_EN 0x3B6288 + +#define mmNIF_RTR_CTRL_3_RL_PCI_SAT 0x3B628C + +#define mmNIF_RTR_CTRL_3_RL_PCI_RST 0x3B6290 + +#define mmNIF_RTR_CTRL_3_RL_PCI_TIMEOUT 0x3B6294 + +#define mmNIF_RTR_CTRL_3_RL_SRAM_EN 0x3B629C + +#define mmNIF_RTR_CTRL_3_RL_SRAM_SAT 0x3B62A0 + +#define mmNIF_RTR_CTRL_3_RL_SRAM_RST 0x3B62A4 + +#define mmNIF_RTR_CTRL_3_RL_SRAM_TIMEOUT 0x3B62AC + +#define mmNIF_RTR_CTRL_3_RL_SRAM_RED 0x3B62B4 + +#define mmNIF_RTR_CTRL_3_E2E_HBM_EN 0x3B62EC + +#define mmNIF_RTR_CTRL_3_E2E_PCI_EN 0x3B62F0 + +#define mmNIF_RTR_CTRL_3_E2E_HBM_WR_SIZE 0x3B62F4 + +#define mmNIF_RTR_CTRL_3_E2E_PCI_WR_SIZE 0x3B62F8 + +#define mmNIF_RTR_CTRL_3_E2E_AW_PCI_CTR_SET_EN 0x3B6404 + +#define mmNIF_RTR_CTRL_3_E2E_AW_PCI_CTR_SET 0x3B6408 + +#define mmNIF_RTR_CTRL_3_E2E_AW_PCI_CTR_WRAP 0x3B640C + +#define mmNIF_RTR_CTRL_3_E2E_AW_PCI_CTR_CNT 0x3B6410 + +#define mmNIF_RTR_CTRL_3_E2E_AW_HBM_CTR_SET_EN 0x3B6414 + +#define mmNIF_RTR_CTRL_3_E2E_AW_HBM_CTR_SET 0x3B6418 + +#define mmNIF_RTR_CTRL_3_E2E_HBM_RD_SIZE 0x3B641C + +#define mmNIF_RTR_CTRL_3_E2E_PCI_RD_SIZE 0x3B6420 + +#define mmNIF_RTR_CTRL_3_E2E_AR_PCI_CTR_SET_EN 0x3B6424 + +#define mmNIF_RTR_CTRL_3_E2E_AR_PCI_CTR_SET 0x3B6428 + +#define mmNIF_RTR_CTRL_3_E2E_AR_PCI_CTR_WRAP 0x3B642C + +#define mmNIF_RTR_CTRL_3_E2E_AR_PCI_CTR_CNT 0x3B6430 + +#define mmNIF_RTR_CTRL_3_E2E_AR_HBM_CTR_SET_EN 0x3B6434 + +#define mmNIF_RTR_CTRL_3_E2E_AR_HBM_CTR_SET 0x3B6438 + +#define mmNIF_RTR_CTRL_3_NL_HBM_SEL_0 0x3B6450 + +#define mmNIF_RTR_CTRL_3_NL_HBM_SEL_1 0x3B6454 + +#define mmNIF_RTR_CTRL_3_NON_LIN_EN 0x3B6480 + +#define mmNIF_RTR_CTRL_3_NL_SRAM_BANK_0 0x3B6500 + +#define mmNIF_RTR_CTRL_3_NL_SRAM_BANK_1 0x3B6504 + +#define mmNIF_RTR_CTRL_3_NL_SRAM_BANK_2 0x3B6508 + +#define mmNIF_RTR_CTRL_3_NL_SRAM_BANK_3 0x3B650C + +#define mmNIF_RTR_CTRL_3_NL_SRAM_BANK_4 0x3B6510 + +#define mmNIF_RTR_CTRL_3_NL_SRAM_OFFSET_0 0x3B6514 + +#define mmNIF_RTR_CTRL_3_NL_SRAM_OFFSET_1 0x3B6520 + +#define mmNIF_RTR_CTRL_3_NL_SRAM_OFFSET_2 0x3B6524 + +#define mmNIF_RTR_CTRL_3_NL_SRAM_OFFSET_3 0x3B6528 + +#define mmNIF_RTR_CTRL_3_NL_SRAM_OFFSET_4 0x3B652C + +#define mmNIF_RTR_CTRL_3_NL_SRAM_OFFSET_5 0x3B6530 + +#define mmNIF_RTR_CTRL_3_NL_SRAM_OFFSET_6 0x3B6534 + +#define mmNIF_RTR_CTRL_3_NL_SRAM_OFFSET_7 0x3B6538 + +#define mmNIF_RTR_CTRL_3_NL_SRAM_OFFSET_8 0x3B653C + +#define mmNIF_RTR_CTRL_3_NL_SRAM_OFFSET_9 0x3B6540 + +#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_0 0x3B6550 + +#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_1 0x3B6554 + +#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_2 0x3B6558 + +#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_3 0x3B655C + +#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_4 0x3B6560 + +#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_5 0x3B6564 + +#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_6 0x3B6568 + +#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_7 0x3B656C + +#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_8 0x3B6570 + +#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_9 0x3B6574 + +#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_10 0x3B6578 + +#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_11 0x3B657C + +#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_12 0x3B6580 + +#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_13 0x3B6584 + +#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_14 0x3B6588 + +#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_15 0x3B658C + +#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_16 0x3B6590 + +#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_17 0x3B6594 + +#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_18 0x3B6598 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_0 0x3B65E4 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_1 0x3B65E8 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_2 0x3B65EC + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_3 0x3B65F0 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_4 0x3B65F4 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_5 0x3B65F8 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_6 0x3B65FC + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_7 0x3B6600 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_8 0x3B6604 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_9 0x3B6608 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_10 0x3B660C + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_11 0x3B6610 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_12 0x3B6614 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_13 0x3B6618 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_14 0x3B661C + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_15 0x3B6620 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_0 0x3B6624 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_1 0x3B6628 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_2 0x3B662C + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_3 0x3B6630 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_4 0x3B6634 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_5 0x3B6638 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_6 0x3B663C + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_7 0x3B6640 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_8 0x3B6644 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_9 0x3B6648 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_10 0x3B664C + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_11 0x3B6650 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_12 0x3B6654 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_13 0x3B6658 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_14 0x3B665C + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_15 0x3B6660 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_0 0x3B6664 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_1 0x3B6668 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_2 0x3B666C + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_3 0x3B6670 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_4 0x3B6674 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_5 0x3B6678 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_6 0x3B667C + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_7 0x3B6680 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_8 0x3B6684 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_9 0x3B6688 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_10 0x3B668C + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_11 0x3B6690 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_12 0x3B6694 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_13 0x3B6698 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_14 0x3B669C + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_15 0x3B66A0 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_0 0x3B66A4 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_1 0x3B66A8 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_2 0x3B66AC + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_3 0x3B66B0 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_4 0x3B66B4 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_5 0x3B66B8 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_6 0x3B66BC + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_7 0x3B66C0 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_8 0x3B66C4 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_9 0x3B66C8 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_10 0x3B66CC + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_11 0x3B66D0 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_12 0x3B66D4 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_13 0x3B66D8 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_14 0x3B66DC + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_15 0x3B66E0 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_0 0x3B66E4 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_1 0x3B66E8 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_2 0x3B66EC + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_3 0x3B66F0 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_4 0x3B66F4 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_5 0x3B66F8 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_6 0x3B66FC + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_7 0x3B6700 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_8 0x3B6704 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_9 0x3B6708 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_10 0x3B670C + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_11 0x3B6710 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_12 0x3B6714 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_13 0x3B6718 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_14 0x3B671C + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_15 0x3B6720 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_0 0x3B6724 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_1 0x3B6728 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_2 0x3B672C + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_3 0x3B6730 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_4 0x3B6734 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_5 0x3B6738 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_6 0x3B673C + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_7 0x3B6740 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_8 0x3B6744 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_9 0x3B6748 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_10 0x3B674C + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_11 0x3B6750 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_12 0x3B6754 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_13 0x3B6758 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_14 0x3B675C + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_15 0x3B6760 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_0 0x3B6764 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_1 0x3B6768 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_2 0x3B676C + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_3 0x3B6770 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_4 0x3B6774 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_5 0x3B6778 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_6 0x3B677C + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_7 0x3B6780 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_8 0x3B6784 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_9 0x3B6788 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_10 0x3B678C + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_11 0x3B6790 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_12 0x3B6794 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_13 0x3B6798 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_14 0x3B679C + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_15 0x3B67A0 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_0 0x3B67A4 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_1 0x3B67A8 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_2 0x3B67AC + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_3 0x3B67B0 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_4 0x3B67B4 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_5 0x3B67B8 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_6 0x3B67BC + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_7 0x3B67C0 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_8 0x3B67C4 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_9 0x3B67C8 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_10 0x3B67CC + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_11 0x3B67D0 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_12 0x3B67D4 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_13 0x3B67D8 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_14 0x3B67DC + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_15 0x3B67E0 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_0 0x3B6824 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_1 0x3B6828 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_2 0x3B682C + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_3 0x3B6830 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_4 0x3B6834 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_5 0x3B6838 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_6 0x3B683C + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_7 0x3B6840 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_8 0x3B6844 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_9 0x3B6848 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_10 0x3B684C + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_11 0x3B6850 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_12 0x3B6854 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_13 0x3B6858 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_14 0x3B685C + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_15 0x3B6860 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_0 0x3B6864 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_1 0x3B6868 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_2 0x3B686C + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_3 0x3B6870 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_4 0x3B6874 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_5 0x3B6878 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_6 0x3B687C + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_7 0x3B6880 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_8 0x3B6884 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_9 0x3B6888 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_10 0x3B688C + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_11 0x3B6890 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_12 0x3B6894 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_13 0x3B6898 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_14 0x3B689C + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_15 0x3B68A0 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_0 0x3B68A4 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_1 0x3B68A8 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_2 0x3B68AC + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_3 0x3B68B0 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_4 0x3B68B4 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_5 0x3B68B8 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_6 0x3B68BC + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_7 0x3B68C0 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_8 0x3B68C4 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_9 0x3B68C8 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_10 0x3B68CC + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_11 0x3B68D0 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_12 0x3B68D4 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_13 0x3B68D8 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_14 0x3B68DC + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_15 0x3B68E0 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_0 0x3B68E4 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_1 0x3B68E8 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_2 0x3B68EC + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_3 0x3B68F0 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_4 0x3B68F4 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_5 0x3B68F8 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_6 0x3B68FC + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_7 0x3B6900 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_8 0x3B6904 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_9 0x3B6908 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_10 0x3B690C + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_11 0x3B6910 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_12 0x3B6914 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_13 0x3B6918 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_14 0x3B691C + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_15 0x3B6920 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_0 0x3B6924 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_1 0x3B6928 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_2 0x3B692C + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_3 0x3B6930 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_4 0x3B6934 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_5 0x3B6938 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_6 0x3B693C + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_7 0x3B6940 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_8 0x3B6944 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_9 0x3B6948 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_10 0x3B694C + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_11 0x3B6950 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_12 0x3B6954 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_13 0x3B6958 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_14 0x3B695C + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_15 0x3B6960 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_0 0x3B6964 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_1 0x3B6968 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_2 0x3B696C + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_3 0x3B6970 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_4 0x3B6974 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_5 0x3B6978 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_6 0x3B697C + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_7 0x3B6980 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_8 0x3B6984 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_9 0x3B6988 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_10 0x3B698C + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_11 0x3B6990 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_12 0x3B6994 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_13 0x3B6998 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_14 0x3B699C + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_15 0x3B69A0 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_0 0x3B69A4 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_1 0x3B69A8 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_2 0x3B69AC + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_3 0x3B69B0 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_4 0x3B69B4 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_5 0x3B69B8 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_6 0x3B69BC + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_7 0x3B69C0 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_8 0x3B69C4 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_9 0x3B69C8 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_10 0x3B69CC + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_11 0x3B69D0 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_12 0x3B69D4 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_13 0x3B69D8 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_14 0x3B69DC + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_15 0x3B69E0 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_0 0x3B69E4 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_1 0x3B69E8 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_2 0x3B69EC + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_3 0x3B69F0 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_4 0x3B69F4 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_5 0x3B69F8 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_6 0x3B69FC + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_7 0x3B6A00 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_8 0x3B6A04 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_9 0x3B6A08 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_10 0x3B6A0C + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_11 0x3B6A10 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_12 0x3B6A14 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_13 0x3B6A18 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_14 0x3B6A1C + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_15 0x3B6A20 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_HIT_AW 0x3B6A64 + +#define mmNIF_RTR_CTRL_3_RANGE_SEC_HIT_AR 0x3B6A68 + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_HIT_AW 0x3B6A6C + +#define mmNIF_RTR_CTRL_3_RANGE_PRIV_HIT_AR 0x3B6A70 + +#define mmNIF_RTR_CTRL_3_RGL_CFG 0x3B6B64 + +#define mmNIF_RTR_CTRL_3_RGL_SHIFT 0x3B6B68 + +#define mmNIF_RTR_CTRL_3_RGL_EXPECTED_LAT_0 0x3B6B6C + +#define mmNIF_RTR_CTRL_3_RGL_EXPECTED_LAT_1 0x3B6B70 + +#define mmNIF_RTR_CTRL_3_RGL_EXPECTED_LAT_2 0x3B6B74 + +#define mmNIF_RTR_CTRL_3_RGL_EXPECTED_LAT_3 0x3B6B78 + +#define mmNIF_RTR_CTRL_3_RGL_EXPECTED_LAT_4 0x3B6B7C + +#define mmNIF_RTR_CTRL_3_RGL_EXPECTED_LAT_5 0x3B6B80 + +#define mmNIF_RTR_CTRL_3_RGL_EXPECTED_LAT_6 0x3B6B84 + +#define mmNIF_RTR_CTRL_3_RGL_EXPECTED_LAT_7 0x3B6B88 + +#define mmNIF_RTR_CTRL_3_RGL_TOKEN_0 0x3B6BAC + +#define mmNIF_RTR_CTRL_3_RGL_TOKEN_1 0x3B6BB0 + +#define mmNIF_RTR_CTRL_3_RGL_TOKEN_2 0x3B6BB4 + +#define mmNIF_RTR_CTRL_3_RGL_TOKEN_3 0x3B6BB8 + +#define mmNIF_RTR_CTRL_3_RGL_TOKEN_4 0x3B6BBC + +#define mmNIF_RTR_CTRL_3_RGL_TOKEN_5 0x3B6BC0 + +#define mmNIF_RTR_CTRL_3_RGL_TOKEN_6 0x3B6BC4 + +#define mmNIF_RTR_CTRL_3_RGL_TOKEN_7 0x3B6BC8 + +#define mmNIF_RTR_CTRL_3_RGL_BANK_ID_0 0x3B6BEC + +#define mmNIF_RTR_CTRL_3_RGL_BANK_ID_1 0x3B6BF0 + +#define mmNIF_RTR_CTRL_3_RGL_BANK_ID_2 0x3B6BF4 + +#define mmNIF_RTR_CTRL_3_RGL_BANK_ID_3 0x3B6BF8 + +#define mmNIF_RTR_CTRL_3_RGL_BANK_ID_4 0x3B6BFC + +#define mmNIF_RTR_CTRL_3_RGL_BANK_ID_5 0x3B6C00 + +#define mmNIF_RTR_CTRL_3_RGL_BANK_ID_6 0x3B6C04 + +#define mmNIF_RTR_CTRL_3_RGL_BANK_ID_7 0x3B6C08 + +#define mmNIF_RTR_CTRL_3_RGL_WDT 0x3B6C2C + +#define mmNIF_RTR_CTRL_3_E2E_AR_HBM0_CH0_CTR_WRAP 0x3B6C30 + +#define mmNIF_RTR_CTRL_3_E2E_AR_HBM0_CH1_CTR_WRAP 0x3B6C34 + +#define mmNIF_RTR_CTRL_3_E2E_AR_HBM1_CH0_CTR_WRAP 0x3B6C38 + +#define mmNIF_RTR_CTRL_3_E2E_AR_HBM1_CH1_CTR_WRAP 0x3B6C3C + +#define mmNIF_RTR_CTRL_3_E2E_AR_HBM2_CH0_CTR_WRAP 0x3B6C40 + +#define mmNIF_RTR_CTRL_3_E2E_AR_HBM2_CH1_CTR_WRAP 0x3B6C44 + +#define mmNIF_RTR_CTRL_3_E2E_AR_HBM3_CH0_CTR_WRAP 0x3B6C48 + +#define mmNIF_RTR_CTRL_3_E2E_AR_HBM3_CH1_CTR_WRAP 0x3B6C4C + +#define mmNIF_RTR_CTRL_3_E2E_AR_HBM0_CH0_CTR_CNT 0x3B6C50 + +#define mmNIF_RTR_CTRL_3_E2E_AR_HBM0_CH1_CTR_CNT 0x3B6C54 + +#define mmNIF_RTR_CTRL_3_E2E_AR_HBM1_CH0_CTR_CNT 0x3B6C58 + +#define mmNIF_RTR_CTRL_3_E2E_AR_HBM1_CH1_CTR_CNT 0x3B6C5C + +#define mmNIF_RTR_CTRL_3_E2E_AR_HBM2_CH0_CTR_CNT 0x3B6C60 + +#define mmNIF_RTR_CTRL_3_E2E_AR_HBM2_CH1_CTR_CNT 0x3B6C64 + +#define mmNIF_RTR_CTRL_3_E2E_AR_HBM3_CH0_CTR_CNT 0x3B6C68 + +#define mmNIF_RTR_CTRL_3_E2E_AR_HBM3_CH1_CTR_CNT 0x3B6C6C + +#define mmNIF_RTR_CTRL_3_E2E_AW_HBM0_CH0_CTR_WRAP 0x3B6C70 + +#define mmNIF_RTR_CTRL_3_E2E_AW_HBM0_CH1_CTR_WRAP 0x3B6C74 + +#define mmNIF_RTR_CTRL_3_E2E_AW_HBM1_CH0_CTR_WRAP 0x3B6C78 + +#define mmNIF_RTR_CTRL_3_E2E_AW_HBM1_CH1_CTR_WRAP 0x3B6C7C + +#define mmNIF_RTR_CTRL_3_E2E_AW_HBM2_CH0_CTR_WRAP 0x3B6C80 + +#define mmNIF_RTR_CTRL_3_E2E_AW_HBM2_CH1_CTR_WRAP 0x3B6C84 + +#define mmNIF_RTR_CTRL_3_E2E_AW_HBM3_CH0_CTR_WRAP 0x3B6C88 + +#define mmNIF_RTR_CTRL_3_E2E_AW_HBM3_CH1_CTR_WRAP 0x3B6C8C + +#define mmNIF_RTR_CTRL_3_E2E_AW_HBM0_CH0_CTR_CNT 0x3B6C90 + +#define mmNIF_RTR_CTRL_3_E2E_AW_HBM0_CH1_CTR_CNT 0x3B6C94 + +#define mmNIF_RTR_CTRL_3_E2E_AW_HBM1_CH0_CTR_CNT 0x3B6C98 + +#define mmNIF_RTR_CTRL_3_E2E_AW_HBM1_CH1_CTR_CNT 0x3B6C9C + +#define mmNIF_RTR_CTRL_3_E2E_AW_HBM2_CH0_CTR_CNT 0x3B6CA0 + +#define mmNIF_RTR_CTRL_3_E2E_AW_HBM2_CH1_CTR_CNT 0x3B6CA4 + +#define mmNIF_RTR_CTRL_3_E2E_AW_HBM3_CH0_CTR_CNT 0x3B6CA8 + +#define mmNIF_RTR_CTRL_3_E2E_AW_HBM3_CH1_CTR_CNT 0x3B6CAC + +#define mmNIF_RTR_CTRL_3_NL_HBM_PC_SEL_0 0x3B6CB0 + +#define mmNIF_RTR_CTRL_3_NL_HBM_PC_SEL_1 0x3B6CB4 + +#define mmNIF_RTR_CTRL_3_NL_HBM_PC_SEL_2 0x3B6CB8 + +#define mmNIF_RTR_CTRL_3_NL_HBM_PC_SEL_3 0x3B6CBC + +#endif /* ASIC_REG_NIF_RTR_CTRL_3_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_4_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_4_regs.h new file mode 100644 index 000000000..543a98f81 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_4_regs.h @@ -0,0 +1,896 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_NIF_RTR_CTRL_4_REGS_H_ +#define ASIC_REG_NIF_RTR_CTRL_4_REGS_H_ + +/* + ***************************************** + * NIF_RTR_CTRL_4 (Prototype: RTR_CTRL) + ***************************************** + */ + +#define mmNIF_RTR_CTRL_4_PERM_SEL 0x3C6108 + +#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_0 0x3C6114 + +#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_1 0x3C6118 + +#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_2 0x3C611C + +#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_3 0x3C6120 + +#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_4 0x3C6124 + +#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_5 0x3C6128 + +#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_6 0x3C612C + +#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_7 0x3C6130 + +#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_8 0x3C6134 + +#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_9 0x3C6138 + +#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_10 0x3C613C + +#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_11 0x3C6140 + +#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_12 0x3C6144 + +#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_13 0x3C6148 + +#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_14 0x3C614C + +#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_15 0x3C6150 + +#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_16 0x3C6154 + +#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_17 0x3C6158 + +#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_18 0x3C615C + +#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_19 0x3C6160 + +#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_20 0x3C6164 + +#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_21 0x3C6168 + +#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_22 0x3C616C + +#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_23 0x3C6170 + +#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_24 0x3C6174 + +#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_25 0x3C6178 + +#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_26 0x3C617C + +#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_27 0x3C6180 + +#define mmNIF_RTR_CTRL_4_SRAM_POLY_H3_0 0x3C6184 + +#define mmNIF_RTR_CTRL_4_SRAM_POLY_H3_1 0x3C6188 + +#define mmNIF_RTR_CTRL_4_SRAM_POLY_H3_2 0x3C618C + +#define mmNIF_RTR_CTRL_4_SRAM_POLY_H3_3 0x3C6190 + +#define mmNIF_RTR_CTRL_4_SRAM_POLY_H3_4 0x3C6194 + +#define mmNIF_RTR_CTRL_4_SRAM_POLY_H3_5 0x3C6198 + +#define mmNIF_RTR_CTRL_4_SRAM_POLY_H3_6 0x3C619C + +#define mmNIF_RTR_CTRL_4_SRAM_POLY_H3_7 0x3C61A0 + +#define mmNIF_RTR_CTRL_4_SRAM_POLY_H3_8 0x3C61A4 + +#define mmNIF_RTR_CTRL_4_SRAM_POLY_H3_9 0x3C61A8 + +#define mmNIF_RTR_CTRL_4_SRAM_POLY_H3_10 0x3C61AC + +#define mmNIF_RTR_CTRL_4_SRAM_POLY_H3_11 0x3C61B0 + +#define mmNIF_RTR_CTRL_4_SRAM_POLY_H3_12 0x3C61B4 + +#define mmNIF_RTR_CTRL_4_SRAM_POLY_H3_13 0x3C61B8 + +#define mmNIF_RTR_CTRL_4_SRAM_POLY_H3_14 0x3C61BC + +#define mmNIF_RTR_CTRL_4_SCRAM_SRAM_EN 0x3C626C + +#define mmNIF_RTR_CTRL_4_RL_HBM_EN 0x3C6274 + +#define mmNIF_RTR_CTRL_4_RL_HBM_SAT 0x3C6278 + +#define mmNIF_RTR_CTRL_4_RL_HBM_RST 0x3C627C + +#define mmNIF_RTR_CTRL_4_RL_HBM_TIMEOUT 0x3C6280 + +#define mmNIF_RTR_CTRL_4_SCRAM_HBM_EN 0x3C6284 + +#define mmNIF_RTR_CTRL_4_RL_PCI_EN 0x3C6288 + +#define mmNIF_RTR_CTRL_4_RL_PCI_SAT 0x3C628C + +#define mmNIF_RTR_CTRL_4_RL_PCI_RST 0x3C6290 + +#define mmNIF_RTR_CTRL_4_RL_PCI_TIMEOUT 0x3C6294 + +#define mmNIF_RTR_CTRL_4_RL_SRAM_EN 0x3C629C + +#define mmNIF_RTR_CTRL_4_RL_SRAM_SAT 0x3C62A0 + +#define mmNIF_RTR_CTRL_4_RL_SRAM_RST 0x3C62A4 + +#define mmNIF_RTR_CTRL_4_RL_SRAM_TIMEOUT 0x3C62AC + +#define mmNIF_RTR_CTRL_4_RL_SRAM_RED 0x3C62B4 + +#define mmNIF_RTR_CTRL_4_E2E_HBM_EN 0x3C62EC + +#define mmNIF_RTR_CTRL_4_E2E_PCI_EN 0x3C62F0 + +#define mmNIF_RTR_CTRL_4_E2E_HBM_WR_SIZE 0x3C62F4 + +#define mmNIF_RTR_CTRL_4_E2E_PCI_WR_SIZE 0x3C62F8 + +#define mmNIF_RTR_CTRL_4_E2E_AW_PCI_CTR_SET_EN 0x3C6404 + +#define mmNIF_RTR_CTRL_4_E2E_AW_PCI_CTR_SET 0x3C6408 + +#define mmNIF_RTR_CTRL_4_E2E_AW_PCI_CTR_WRAP 0x3C640C + +#define mmNIF_RTR_CTRL_4_E2E_AW_PCI_CTR_CNT 0x3C6410 + +#define mmNIF_RTR_CTRL_4_E2E_AW_HBM_CTR_SET_EN 0x3C6414 + +#define mmNIF_RTR_CTRL_4_E2E_AW_HBM_CTR_SET 0x3C6418 + +#define mmNIF_RTR_CTRL_4_E2E_HBM_RD_SIZE 0x3C641C + +#define mmNIF_RTR_CTRL_4_E2E_PCI_RD_SIZE 0x3C6420 + +#define mmNIF_RTR_CTRL_4_E2E_AR_PCI_CTR_SET_EN 0x3C6424 + +#define mmNIF_RTR_CTRL_4_E2E_AR_PCI_CTR_SET 0x3C6428 + +#define mmNIF_RTR_CTRL_4_E2E_AR_PCI_CTR_WRAP 0x3C642C + +#define mmNIF_RTR_CTRL_4_E2E_AR_PCI_CTR_CNT 0x3C6430 + +#define mmNIF_RTR_CTRL_4_E2E_AR_HBM_CTR_SET_EN 0x3C6434 + +#define mmNIF_RTR_CTRL_4_E2E_AR_HBM_CTR_SET 0x3C6438 + +#define mmNIF_RTR_CTRL_4_NL_HBM_SEL_0 0x3C6450 + +#define mmNIF_RTR_CTRL_4_NL_HBM_SEL_1 0x3C6454 + +#define mmNIF_RTR_CTRL_4_NON_LIN_EN 0x3C6480 + +#define mmNIF_RTR_CTRL_4_NL_SRAM_BANK_0 0x3C6500 + +#define mmNIF_RTR_CTRL_4_NL_SRAM_BANK_1 0x3C6504 + +#define mmNIF_RTR_CTRL_4_NL_SRAM_BANK_2 0x3C6508 + +#define mmNIF_RTR_CTRL_4_NL_SRAM_BANK_3 0x3C650C + +#define mmNIF_RTR_CTRL_4_NL_SRAM_BANK_4 0x3C6510 + +#define mmNIF_RTR_CTRL_4_NL_SRAM_OFFSET_0 0x3C6514 + +#define mmNIF_RTR_CTRL_4_NL_SRAM_OFFSET_1 0x3C6520 + +#define mmNIF_RTR_CTRL_4_NL_SRAM_OFFSET_2 0x3C6524 + +#define mmNIF_RTR_CTRL_4_NL_SRAM_OFFSET_3 0x3C6528 + +#define mmNIF_RTR_CTRL_4_NL_SRAM_OFFSET_4 0x3C652C + +#define mmNIF_RTR_CTRL_4_NL_SRAM_OFFSET_5 0x3C6530 + +#define mmNIF_RTR_CTRL_4_NL_SRAM_OFFSET_6 0x3C6534 + +#define mmNIF_RTR_CTRL_4_NL_SRAM_OFFSET_7 0x3C6538 + +#define mmNIF_RTR_CTRL_4_NL_SRAM_OFFSET_8 0x3C653C + +#define mmNIF_RTR_CTRL_4_NL_SRAM_OFFSET_9 0x3C6540 + +#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_0 0x3C6550 + +#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_1 0x3C6554 + +#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_2 0x3C6558 + +#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_3 0x3C655C + +#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_4 0x3C6560 + +#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_5 0x3C6564 + +#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_6 0x3C6568 + +#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_7 0x3C656C + +#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_8 0x3C6570 + +#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_9 0x3C6574 + +#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_10 0x3C6578 + +#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_11 0x3C657C + +#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_12 0x3C6580 + +#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_13 0x3C6584 + +#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_14 0x3C6588 + +#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_15 0x3C658C + +#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_16 0x3C6590 + +#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_17 0x3C6594 + +#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_18 0x3C6598 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_0 0x3C65E4 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_1 0x3C65E8 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_2 0x3C65EC + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_3 0x3C65F0 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_4 0x3C65F4 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_5 0x3C65F8 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_6 0x3C65FC + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_7 0x3C6600 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_8 0x3C6604 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_9 0x3C6608 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_10 0x3C660C + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_11 0x3C6610 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_12 0x3C6614 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_13 0x3C6618 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_14 0x3C661C + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_15 0x3C6620 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_0 0x3C6624 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_1 0x3C6628 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_2 0x3C662C + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_3 0x3C6630 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_4 0x3C6634 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_5 0x3C6638 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_6 0x3C663C + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_7 0x3C6640 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_8 0x3C6644 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_9 0x3C6648 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_10 0x3C664C + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_11 0x3C6650 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_12 0x3C6654 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_13 0x3C6658 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_14 0x3C665C + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_15 0x3C6660 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_0 0x3C6664 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_1 0x3C6668 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_2 0x3C666C + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_3 0x3C6670 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_4 0x3C6674 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_5 0x3C6678 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_6 0x3C667C + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_7 0x3C6680 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_8 0x3C6684 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_9 0x3C6688 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_10 0x3C668C + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_11 0x3C6690 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_12 0x3C6694 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_13 0x3C6698 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_14 0x3C669C + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_15 0x3C66A0 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_0 0x3C66A4 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_1 0x3C66A8 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_2 0x3C66AC + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_3 0x3C66B0 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_4 0x3C66B4 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_5 0x3C66B8 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_6 0x3C66BC + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_7 0x3C66C0 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_8 0x3C66C4 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_9 0x3C66C8 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_10 0x3C66CC + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_11 0x3C66D0 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_12 0x3C66D4 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_13 0x3C66D8 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_14 0x3C66DC + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_15 0x3C66E0 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_0 0x3C66E4 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_1 0x3C66E8 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_2 0x3C66EC + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_3 0x3C66F0 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_4 0x3C66F4 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_5 0x3C66F8 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_6 0x3C66FC + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_7 0x3C6700 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_8 0x3C6704 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_9 0x3C6708 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_10 0x3C670C + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_11 0x3C6710 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_12 0x3C6714 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_13 0x3C6718 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_14 0x3C671C + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_15 0x3C6720 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_0 0x3C6724 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_1 0x3C6728 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_2 0x3C672C + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_3 0x3C6730 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_4 0x3C6734 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_5 0x3C6738 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_6 0x3C673C + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_7 0x3C6740 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_8 0x3C6744 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_9 0x3C6748 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_10 0x3C674C + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_11 0x3C6750 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_12 0x3C6754 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_13 0x3C6758 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_14 0x3C675C + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_15 0x3C6760 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_0 0x3C6764 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_1 0x3C6768 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_2 0x3C676C + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_3 0x3C6770 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_4 0x3C6774 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_5 0x3C6778 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_6 0x3C677C + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_7 0x3C6780 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_8 0x3C6784 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_9 0x3C6788 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_10 0x3C678C + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_11 0x3C6790 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_12 0x3C6794 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_13 0x3C6798 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_14 0x3C679C + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_15 0x3C67A0 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_0 0x3C67A4 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_1 0x3C67A8 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_2 0x3C67AC + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_3 0x3C67B0 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_4 0x3C67B4 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_5 0x3C67B8 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_6 0x3C67BC + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_7 0x3C67C0 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_8 0x3C67C4 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_9 0x3C67C8 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_10 0x3C67CC + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_11 0x3C67D0 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_12 0x3C67D4 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_13 0x3C67D8 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_14 0x3C67DC + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_15 0x3C67E0 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_0 0x3C6824 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_1 0x3C6828 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_2 0x3C682C + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_3 0x3C6830 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_4 0x3C6834 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_5 0x3C6838 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_6 0x3C683C + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_7 0x3C6840 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_8 0x3C6844 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_9 0x3C6848 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_10 0x3C684C + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_11 0x3C6850 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_12 0x3C6854 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_13 0x3C6858 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_14 0x3C685C + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_15 0x3C6860 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_0 0x3C6864 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_1 0x3C6868 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_2 0x3C686C + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_3 0x3C6870 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_4 0x3C6874 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_5 0x3C6878 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_6 0x3C687C + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_7 0x3C6880 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_8 0x3C6884 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_9 0x3C6888 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_10 0x3C688C + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_11 0x3C6890 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_12 0x3C6894 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_13 0x3C6898 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_14 0x3C689C + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_15 0x3C68A0 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_0 0x3C68A4 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_1 0x3C68A8 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_2 0x3C68AC + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_3 0x3C68B0 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_4 0x3C68B4 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_5 0x3C68B8 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_6 0x3C68BC + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_7 0x3C68C0 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_8 0x3C68C4 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_9 0x3C68C8 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_10 0x3C68CC + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_11 0x3C68D0 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_12 0x3C68D4 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_13 0x3C68D8 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_14 0x3C68DC + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_15 0x3C68E0 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_0 0x3C68E4 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_1 0x3C68E8 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_2 0x3C68EC + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_3 0x3C68F0 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_4 0x3C68F4 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_5 0x3C68F8 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_6 0x3C68FC + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_7 0x3C6900 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_8 0x3C6904 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_9 0x3C6908 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_10 0x3C690C + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_11 0x3C6910 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_12 0x3C6914 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_13 0x3C6918 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_14 0x3C691C + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_15 0x3C6920 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_0 0x3C6924 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_1 0x3C6928 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_2 0x3C692C + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_3 0x3C6930 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_4 0x3C6934 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_5 0x3C6938 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_6 0x3C693C + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_7 0x3C6940 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_8 0x3C6944 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_9 0x3C6948 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_10 0x3C694C + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_11 0x3C6950 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_12 0x3C6954 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_13 0x3C6958 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_14 0x3C695C + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_15 0x3C6960 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_0 0x3C6964 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_1 0x3C6968 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_2 0x3C696C + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_3 0x3C6970 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_4 0x3C6974 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_5 0x3C6978 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_6 0x3C697C + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_7 0x3C6980 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_8 0x3C6984 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_9 0x3C6988 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_10 0x3C698C + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_11 0x3C6990 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_12 0x3C6994 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_13 0x3C6998 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_14 0x3C699C + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_15 0x3C69A0 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_0 0x3C69A4 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_1 0x3C69A8 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_2 0x3C69AC + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_3 0x3C69B0 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_4 0x3C69B4 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_5 0x3C69B8 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_6 0x3C69BC + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_7 0x3C69C0 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_8 0x3C69C4 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_9 0x3C69C8 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_10 0x3C69CC + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_11 0x3C69D0 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_12 0x3C69D4 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_13 0x3C69D8 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_14 0x3C69DC + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_15 0x3C69E0 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_0 0x3C69E4 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_1 0x3C69E8 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_2 0x3C69EC + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_3 0x3C69F0 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_4 0x3C69F4 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_5 0x3C69F8 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_6 0x3C69FC + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_7 0x3C6A00 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_8 0x3C6A04 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_9 0x3C6A08 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_10 0x3C6A0C + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_11 0x3C6A10 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_12 0x3C6A14 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_13 0x3C6A18 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_14 0x3C6A1C + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_15 0x3C6A20 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_HIT_AW 0x3C6A64 + +#define mmNIF_RTR_CTRL_4_RANGE_SEC_HIT_AR 0x3C6A68 + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_HIT_AW 0x3C6A6C + +#define mmNIF_RTR_CTRL_4_RANGE_PRIV_HIT_AR 0x3C6A70 + +#define mmNIF_RTR_CTRL_4_RGL_CFG 0x3C6B64 + +#define mmNIF_RTR_CTRL_4_RGL_SHIFT 0x3C6B68 + +#define mmNIF_RTR_CTRL_4_RGL_EXPECTED_LAT_0 0x3C6B6C + +#define mmNIF_RTR_CTRL_4_RGL_EXPECTED_LAT_1 0x3C6B70 + +#define mmNIF_RTR_CTRL_4_RGL_EXPECTED_LAT_2 0x3C6B74 + +#define mmNIF_RTR_CTRL_4_RGL_EXPECTED_LAT_3 0x3C6B78 + +#define mmNIF_RTR_CTRL_4_RGL_EXPECTED_LAT_4 0x3C6B7C + +#define mmNIF_RTR_CTRL_4_RGL_EXPECTED_LAT_5 0x3C6B80 + +#define mmNIF_RTR_CTRL_4_RGL_EXPECTED_LAT_6 0x3C6B84 + +#define mmNIF_RTR_CTRL_4_RGL_EXPECTED_LAT_7 0x3C6B88 + +#define mmNIF_RTR_CTRL_4_RGL_TOKEN_0 0x3C6BAC + +#define mmNIF_RTR_CTRL_4_RGL_TOKEN_1 0x3C6BB0 + +#define mmNIF_RTR_CTRL_4_RGL_TOKEN_2 0x3C6BB4 + +#define mmNIF_RTR_CTRL_4_RGL_TOKEN_3 0x3C6BB8 + +#define mmNIF_RTR_CTRL_4_RGL_TOKEN_4 0x3C6BBC + +#define mmNIF_RTR_CTRL_4_RGL_TOKEN_5 0x3C6BC0 + +#define mmNIF_RTR_CTRL_4_RGL_TOKEN_6 0x3C6BC4 + +#define mmNIF_RTR_CTRL_4_RGL_TOKEN_7 0x3C6BC8 + +#define mmNIF_RTR_CTRL_4_RGL_BANK_ID_0 0x3C6BEC + +#define mmNIF_RTR_CTRL_4_RGL_BANK_ID_1 0x3C6BF0 + +#define mmNIF_RTR_CTRL_4_RGL_BANK_ID_2 0x3C6BF4 + +#define mmNIF_RTR_CTRL_4_RGL_BANK_ID_3 0x3C6BF8 + +#define mmNIF_RTR_CTRL_4_RGL_BANK_ID_4 0x3C6BFC + +#define mmNIF_RTR_CTRL_4_RGL_BANK_ID_5 0x3C6C00 + +#define mmNIF_RTR_CTRL_4_RGL_BANK_ID_6 0x3C6C04 + +#define mmNIF_RTR_CTRL_4_RGL_BANK_ID_7 0x3C6C08 + +#define mmNIF_RTR_CTRL_4_RGL_WDT 0x3C6C2C + +#define mmNIF_RTR_CTRL_4_E2E_AR_HBM0_CH0_CTR_WRAP 0x3C6C30 + +#define mmNIF_RTR_CTRL_4_E2E_AR_HBM0_CH1_CTR_WRAP 0x3C6C34 + +#define mmNIF_RTR_CTRL_4_E2E_AR_HBM1_CH0_CTR_WRAP 0x3C6C38 + +#define mmNIF_RTR_CTRL_4_E2E_AR_HBM1_CH1_CTR_WRAP 0x3C6C3C + +#define mmNIF_RTR_CTRL_4_E2E_AR_HBM2_CH0_CTR_WRAP 0x3C6C40 + +#define mmNIF_RTR_CTRL_4_E2E_AR_HBM2_CH1_CTR_WRAP 0x3C6C44 + +#define mmNIF_RTR_CTRL_4_E2E_AR_HBM3_CH0_CTR_WRAP 0x3C6C48 + +#define mmNIF_RTR_CTRL_4_E2E_AR_HBM3_CH1_CTR_WRAP 0x3C6C4C + +#define mmNIF_RTR_CTRL_4_E2E_AR_HBM0_CH0_CTR_CNT 0x3C6C50 + +#define mmNIF_RTR_CTRL_4_E2E_AR_HBM0_CH1_CTR_CNT 0x3C6C54 + +#define mmNIF_RTR_CTRL_4_E2E_AR_HBM1_CH0_CTR_CNT 0x3C6C58 + +#define mmNIF_RTR_CTRL_4_E2E_AR_HBM1_CH1_CTR_CNT 0x3C6C5C + +#define mmNIF_RTR_CTRL_4_E2E_AR_HBM2_CH0_CTR_CNT 0x3C6C60 + +#define mmNIF_RTR_CTRL_4_E2E_AR_HBM2_CH1_CTR_CNT 0x3C6C64 + +#define mmNIF_RTR_CTRL_4_E2E_AR_HBM3_CH0_CTR_CNT 0x3C6C68 + +#define mmNIF_RTR_CTRL_4_E2E_AR_HBM3_CH1_CTR_CNT 0x3C6C6C + +#define mmNIF_RTR_CTRL_4_E2E_AW_HBM0_CH0_CTR_WRAP 0x3C6C70 + +#define mmNIF_RTR_CTRL_4_E2E_AW_HBM0_CH1_CTR_WRAP 0x3C6C74 + +#define mmNIF_RTR_CTRL_4_E2E_AW_HBM1_CH0_CTR_WRAP 0x3C6C78 + +#define mmNIF_RTR_CTRL_4_E2E_AW_HBM1_CH1_CTR_WRAP 0x3C6C7C + +#define mmNIF_RTR_CTRL_4_E2E_AW_HBM2_CH0_CTR_WRAP 0x3C6C80 + +#define mmNIF_RTR_CTRL_4_E2E_AW_HBM2_CH1_CTR_WRAP 0x3C6C84 + +#define mmNIF_RTR_CTRL_4_E2E_AW_HBM3_CH0_CTR_WRAP 0x3C6C88 + +#define mmNIF_RTR_CTRL_4_E2E_AW_HBM3_CH1_CTR_WRAP 0x3C6C8C + +#define mmNIF_RTR_CTRL_4_E2E_AW_HBM0_CH0_CTR_CNT 0x3C6C90 + +#define mmNIF_RTR_CTRL_4_E2E_AW_HBM0_CH1_CTR_CNT 0x3C6C94 + +#define mmNIF_RTR_CTRL_4_E2E_AW_HBM1_CH0_CTR_CNT 0x3C6C98 + +#define mmNIF_RTR_CTRL_4_E2E_AW_HBM1_CH1_CTR_CNT 0x3C6C9C + +#define mmNIF_RTR_CTRL_4_E2E_AW_HBM2_CH0_CTR_CNT 0x3C6CA0 + +#define mmNIF_RTR_CTRL_4_E2E_AW_HBM2_CH1_CTR_CNT 0x3C6CA4 + +#define mmNIF_RTR_CTRL_4_E2E_AW_HBM3_CH0_CTR_CNT 0x3C6CA8 + +#define mmNIF_RTR_CTRL_4_E2E_AW_HBM3_CH1_CTR_CNT 0x3C6CAC + +#define mmNIF_RTR_CTRL_4_NL_HBM_PC_SEL_0 0x3C6CB0 + +#define mmNIF_RTR_CTRL_4_NL_HBM_PC_SEL_1 0x3C6CB4 + +#define mmNIF_RTR_CTRL_4_NL_HBM_PC_SEL_2 0x3C6CB8 + +#define mmNIF_RTR_CTRL_4_NL_HBM_PC_SEL_3 0x3C6CBC + +#endif /* ASIC_REG_NIF_RTR_CTRL_4_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_5_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_5_regs.h new file mode 100644 index 000000000..95486b7dd --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_5_regs.h @@ -0,0 +1,896 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_NIF_RTR_CTRL_5_REGS_H_ +#define ASIC_REG_NIF_RTR_CTRL_5_REGS_H_ + +/* + ***************************************** + * NIF_RTR_CTRL_5 (Prototype: RTR_CTRL) + ***************************************** + */ + +#define mmNIF_RTR_CTRL_5_PERM_SEL 0x3D6108 + +#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_0 0x3D6114 + +#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_1 0x3D6118 + +#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_2 0x3D611C + +#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_3 0x3D6120 + +#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_4 0x3D6124 + +#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_5 0x3D6128 + +#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_6 0x3D612C + +#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_7 0x3D6130 + +#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_8 0x3D6134 + +#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_9 0x3D6138 + +#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_10 0x3D613C + +#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_11 0x3D6140 + +#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_12 0x3D6144 + +#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_13 0x3D6148 + +#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_14 0x3D614C + +#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_15 0x3D6150 + +#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_16 0x3D6154 + +#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_17 0x3D6158 + +#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_18 0x3D615C + +#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_19 0x3D6160 + +#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_20 0x3D6164 + +#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_21 0x3D6168 + +#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_22 0x3D616C + +#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_23 0x3D6170 + +#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_24 0x3D6174 + +#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_25 0x3D6178 + +#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_26 0x3D617C + +#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_27 0x3D6180 + +#define mmNIF_RTR_CTRL_5_SRAM_POLY_H3_0 0x3D6184 + +#define mmNIF_RTR_CTRL_5_SRAM_POLY_H3_1 0x3D6188 + +#define mmNIF_RTR_CTRL_5_SRAM_POLY_H3_2 0x3D618C + +#define mmNIF_RTR_CTRL_5_SRAM_POLY_H3_3 0x3D6190 + +#define mmNIF_RTR_CTRL_5_SRAM_POLY_H3_4 0x3D6194 + +#define mmNIF_RTR_CTRL_5_SRAM_POLY_H3_5 0x3D6198 + +#define mmNIF_RTR_CTRL_5_SRAM_POLY_H3_6 0x3D619C + +#define mmNIF_RTR_CTRL_5_SRAM_POLY_H3_7 0x3D61A0 + +#define mmNIF_RTR_CTRL_5_SRAM_POLY_H3_8 0x3D61A4 + +#define mmNIF_RTR_CTRL_5_SRAM_POLY_H3_9 0x3D61A8 + +#define mmNIF_RTR_CTRL_5_SRAM_POLY_H3_10 0x3D61AC + +#define mmNIF_RTR_CTRL_5_SRAM_POLY_H3_11 0x3D61B0 + +#define mmNIF_RTR_CTRL_5_SRAM_POLY_H3_12 0x3D61B4 + +#define mmNIF_RTR_CTRL_5_SRAM_POLY_H3_13 0x3D61B8 + +#define mmNIF_RTR_CTRL_5_SRAM_POLY_H3_14 0x3D61BC + +#define mmNIF_RTR_CTRL_5_SCRAM_SRAM_EN 0x3D626C + +#define mmNIF_RTR_CTRL_5_RL_HBM_EN 0x3D6274 + +#define mmNIF_RTR_CTRL_5_RL_HBM_SAT 0x3D6278 + +#define mmNIF_RTR_CTRL_5_RL_HBM_RST 0x3D627C + +#define mmNIF_RTR_CTRL_5_RL_HBM_TIMEOUT 0x3D6280 + +#define mmNIF_RTR_CTRL_5_SCRAM_HBM_EN 0x3D6284 + +#define mmNIF_RTR_CTRL_5_RL_PCI_EN 0x3D6288 + +#define mmNIF_RTR_CTRL_5_RL_PCI_SAT 0x3D628C + +#define mmNIF_RTR_CTRL_5_RL_PCI_RST 0x3D6290 + +#define mmNIF_RTR_CTRL_5_RL_PCI_TIMEOUT 0x3D6294 + +#define mmNIF_RTR_CTRL_5_RL_SRAM_EN 0x3D629C + +#define mmNIF_RTR_CTRL_5_RL_SRAM_SAT 0x3D62A0 + +#define mmNIF_RTR_CTRL_5_RL_SRAM_RST 0x3D62A4 + +#define mmNIF_RTR_CTRL_5_RL_SRAM_TIMEOUT 0x3D62AC + +#define mmNIF_RTR_CTRL_5_RL_SRAM_RED 0x3D62B4 + +#define mmNIF_RTR_CTRL_5_E2E_HBM_EN 0x3D62EC + +#define mmNIF_RTR_CTRL_5_E2E_PCI_EN 0x3D62F0 + +#define mmNIF_RTR_CTRL_5_E2E_HBM_WR_SIZE 0x3D62F4 + +#define mmNIF_RTR_CTRL_5_E2E_PCI_WR_SIZE 0x3D62F8 + +#define mmNIF_RTR_CTRL_5_E2E_AW_PCI_CTR_SET_EN 0x3D6404 + +#define mmNIF_RTR_CTRL_5_E2E_AW_PCI_CTR_SET 0x3D6408 + +#define mmNIF_RTR_CTRL_5_E2E_AW_PCI_CTR_WRAP 0x3D640C + +#define mmNIF_RTR_CTRL_5_E2E_AW_PCI_CTR_CNT 0x3D6410 + +#define mmNIF_RTR_CTRL_5_E2E_AW_HBM_CTR_SET_EN 0x3D6414 + +#define mmNIF_RTR_CTRL_5_E2E_AW_HBM_CTR_SET 0x3D6418 + +#define mmNIF_RTR_CTRL_5_E2E_HBM_RD_SIZE 0x3D641C + +#define mmNIF_RTR_CTRL_5_E2E_PCI_RD_SIZE 0x3D6420 + +#define mmNIF_RTR_CTRL_5_E2E_AR_PCI_CTR_SET_EN 0x3D6424 + +#define mmNIF_RTR_CTRL_5_E2E_AR_PCI_CTR_SET 0x3D6428 + +#define mmNIF_RTR_CTRL_5_E2E_AR_PCI_CTR_WRAP 0x3D642C + +#define mmNIF_RTR_CTRL_5_E2E_AR_PCI_CTR_CNT 0x3D6430 + +#define mmNIF_RTR_CTRL_5_E2E_AR_HBM_CTR_SET_EN 0x3D6434 + +#define mmNIF_RTR_CTRL_5_E2E_AR_HBM_CTR_SET 0x3D6438 + +#define mmNIF_RTR_CTRL_5_NL_HBM_SEL_0 0x3D6450 + +#define mmNIF_RTR_CTRL_5_NL_HBM_SEL_1 0x3D6454 + +#define mmNIF_RTR_CTRL_5_NON_LIN_EN 0x3D6480 + +#define mmNIF_RTR_CTRL_5_NL_SRAM_BANK_0 0x3D6500 + +#define mmNIF_RTR_CTRL_5_NL_SRAM_BANK_1 0x3D6504 + +#define mmNIF_RTR_CTRL_5_NL_SRAM_BANK_2 0x3D6508 + +#define mmNIF_RTR_CTRL_5_NL_SRAM_BANK_3 0x3D650C + +#define mmNIF_RTR_CTRL_5_NL_SRAM_BANK_4 0x3D6510 + +#define mmNIF_RTR_CTRL_5_NL_SRAM_OFFSET_0 0x3D6514 + +#define mmNIF_RTR_CTRL_5_NL_SRAM_OFFSET_1 0x3D6520 + +#define mmNIF_RTR_CTRL_5_NL_SRAM_OFFSET_2 0x3D6524 + +#define mmNIF_RTR_CTRL_5_NL_SRAM_OFFSET_3 0x3D6528 + +#define mmNIF_RTR_CTRL_5_NL_SRAM_OFFSET_4 0x3D652C + +#define mmNIF_RTR_CTRL_5_NL_SRAM_OFFSET_5 0x3D6530 + +#define mmNIF_RTR_CTRL_5_NL_SRAM_OFFSET_6 0x3D6534 + +#define mmNIF_RTR_CTRL_5_NL_SRAM_OFFSET_7 0x3D6538 + +#define mmNIF_RTR_CTRL_5_NL_SRAM_OFFSET_8 0x3D653C + +#define mmNIF_RTR_CTRL_5_NL_SRAM_OFFSET_9 0x3D6540 + +#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_0 0x3D6550 + +#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_1 0x3D6554 + +#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_2 0x3D6558 + +#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_3 0x3D655C + +#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_4 0x3D6560 + +#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_5 0x3D6564 + +#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_6 0x3D6568 + +#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_7 0x3D656C + +#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_8 0x3D6570 + +#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_9 0x3D6574 + +#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_10 0x3D6578 + +#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_11 0x3D657C + +#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_12 0x3D6580 + +#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_13 0x3D6584 + +#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_14 0x3D6588 + +#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_15 0x3D658C + +#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_16 0x3D6590 + +#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_17 0x3D6594 + +#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_18 0x3D6598 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_0 0x3D65E4 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_1 0x3D65E8 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_2 0x3D65EC + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_3 0x3D65F0 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_4 0x3D65F4 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_5 0x3D65F8 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_6 0x3D65FC + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_7 0x3D6600 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_8 0x3D6604 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_9 0x3D6608 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_10 0x3D660C + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_11 0x3D6610 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_12 0x3D6614 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_13 0x3D6618 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_14 0x3D661C + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_15 0x3D6620 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_0 0x3D6624 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_1 0x3D6628 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_2 0x3D662C + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_3 0x3D6630 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_4 0x3D6634 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_5 0x3D6638 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_6 0x3D663C + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_7 0x3D6640 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_8 0x3D6644 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_9 0x3D6648 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_10 0x3D664C + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_11 0x3D6650 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_12 0x3D6654 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_13 0x3D6658 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_14 0x3D665C + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_15 0x3D6660 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_0 0x3D6664 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_1 0x3D6668 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_2 0x3D666C + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_3 0x3D6670 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_4 0x3D6674 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_5 0x3D6678 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_6 0x3D667C + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_7 0x3D6680 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_8 0x3D6684 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_9 0x3D6688 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_10 0x3D668C + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_11 0x3D6690 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_12 0x3D6694 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_13 0x3D6698 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_14 0x3D669C + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_15 0x3D66A0 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_0 0x3D66A4 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_1 0x3D66A8 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_2 0x3D66AC + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_3 0x3D66B0 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_4 0x3D66B4 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_5 0x3D66B8 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_6 0x3D66BC + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_7 0x3D66C0 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_8 0x3D66C4 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_9 0x3D66C8 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_10 0x3D66CC + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_11 0x3D66D0 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_12 0x3D66D4 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_13 0x3D66D8 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_14 0x3D66DC + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_15 0x3D66E0 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_0 0x3D66E4 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_1 0x3D66E8 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_2 0x3D66EC + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_3 0x3D66F0 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_4 0x3D66F4 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_5 0x3D66F8 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_6 0x3D66FC + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_7 0x3D6700 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_8 0x3D6704 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_9 0x3D6708 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_10 0x3D670C + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_11 0x3D6710 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_12 0x3D6714 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_13 0x3D6718 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_14 0x3D671C + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_15 0x3D6720 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_0 0x3D6724 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_1 0x3D6728 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_2 0x3D672C + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_3 0x3D6730 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_4 0x3D6734 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_5 0x3D6738 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_6 0x3D673C + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_7 0x3D6740 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_8 0x3D6744 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_9 0x3D6748 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_10 0x3D674C + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_11 0x3D6750 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_12 0x3D6754 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_13 0x3D6758 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_14 0x3D675C + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_15 0x3D6760 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_0 0x3D6764 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_1 0x3D6768 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_2 0x3D676C + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_3 0x3D6770 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_4 0x3D6774 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_5 0x3D6778 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_6 0x3D677C + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_7 0x3D6780 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_8 0x3D6784 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_9 0x3D6788 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_10 0x3D678C + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_11 0x3D6790 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_12 0x3D6794 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_13 0x3D6798 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_14 0x3D679C + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_15 0x3D67A0 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_0 0x3D67A4 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_1 0x3D67A8 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_2 0x3D67AC + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_3 0x3D67B0 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_4 0x3D67B4 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_5 0x3D67B8 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_6 0x3D67BC + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_7 0x3D67C0 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_8 0x3D67C4 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_9 0x3D67C8 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_10 0x3D67CC + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_11 0x3D67D0 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_12 0x3D67D4 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_13 0x3D67D8 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_14 0x3D67DC + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_15 0x3D67E0 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_0 0x3D6824 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_1 0x3D6828 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_2 0x3D682C + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_3 0x3D6830 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_4 0x3D6834 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_5 0x3D6838 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_6 0x3D683C + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_7 0x3D6840 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_8 0x3D6844 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_9 0x3D6848 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_10 0x3D684C + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_11 0x3D6850 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_12 0x3D6854 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_13 0x3D6858 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_14 0x3D685C + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_15 0x3D6860 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_0 0x3D6864 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_1 0x3D6868 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_2 0x3D686C + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_3 0x3D6870 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_4 0x3D6874 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_5 0x3D6878 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_6 0x3D687C + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_7 0x3D6880 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_8 0x3D6884 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_9 0x3D6888 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_10 0x3D688C + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_11 0x3D6890 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_12 0x3D6894 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_13 0x3D6898 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_14 0x3D689C + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_15 0x3D68A0 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_0 0x3D68A4 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_1 0x3D68A8 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_2 0x3D68AC + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_3 0x3D68B0 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_4 0x3D68B4 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_5 0x3D68B8 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_6 0x3D68BC + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_7 0x3D68C0 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_8 0x3D68C4 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_9 0x3D68C8 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_10 0x3D68CC + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_11 0x3D68D0 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_12 0x3D68D4 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_13 0x3D68D8 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_14 0x3D68DC + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_15 0x3D68E0 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_0 0x3D68E4 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_1 0x3D68E8 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_2 0x3D68EC + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_3 0x3D68F0 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_4 0x3D68F4 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_5 0x3D68F8 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_6 0x3D68FC + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_7 0x3D6900 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_8 0x3D6904 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_9 0x3D6908 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_10 0x3D690C + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_11 0x3D6910 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_12 0x3D6914 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_13 0x3D6918 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_14 0x3D691C + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_15 0x3D6920 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_0 0x3D6924 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_1 0x3D6928 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_2 0x3D692C + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_3 0x3D6930 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_4 0x3D6934 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_5 0x3D6938 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_6 0x3D693C + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_7 0x3D6940 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_8 0x3D6944 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_9 0x3D6948 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_10 0x3D694C + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_11 0x3D6950 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_12 0x3D6954 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_13 0x3D6958 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_14 0x3D695C + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_15 0x3D6960 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_0 0x3D6964 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_1 0x3D6968 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_2 0x3D696C + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_3 0x3D6970 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_4 0x3D6974 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_5 0x3D6978 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_6 0x3D697C + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_7 0x3D6980 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_8 0x3D6984 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_9 0x3D6988 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_10 0x3D698C + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_11 0x3D6990 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_12 0x3D6994 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_13 0x3D6998 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_14 0x3D699C + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_15 0x3D69A0 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_0 0x3D69A4 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_1 0x3D69A8 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_2 0x3D69AC + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_3 0x3D69B0 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_4 0x3D69B4 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_5 0x3D69B8 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_6 0x3D69BC + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_7 0x3D69C0 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_8 0x3D69C4 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_9 0x3D69C8 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_10 0x3D69CC + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_11 0x3D69D0 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_12 0x3D69D4 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_13 0x3D69D8 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_14 0x3D69DC + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_15 0x3D69E0 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_0 0x3D69E4 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_1 0x3D69E8 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_2 0x3D69EC + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_3 0x3D69F0 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_4 0x3D69F4 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_5 0x3D69F8 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_6 0x3D69FC + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_7 0x3D6A00 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_8 0x3D6A04 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_9 0x3D6A08 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_10 0x3D6A0C + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_11 0x3D6A10 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_12 0x3D6A14 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_13 0x3D6A18 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_14 0x3D6A1C + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_15 0x3D6A20 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_HIT_AW 0x3D6A64 + +#define mmNIF_RTR_CTRL_5_RANGE_SEC_HIT_AR 0x3D6A68 + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_HIT_AW 0x3D6A6C + +#define mmNIF_RTR_CTRL_5_RANGE_PRIV_HIT_AR 0x3D6A70 + +#define mmNIF_RTR_CTRL_5_RGL_CFG 0x3D6B64 + +#define mmNIF_RTR_CTRL_5_RGL_SHIFT 0x3D6B68 + +#define mmNIF_RTR_CTRL_5_RGL_EXPECTED_LAT_0 0x3D6B6C + +#define mmNIF_RTR_CTRL_5_RGL_EXPECTED_LAT_1 0x3D6B70 + +#define mmNIF_RTR_CTRL_5_RGL_EXPECTED_LAT_2 0x3D6B74 + +#define mmNIF_RTR_CTRL_5_RGL_EXPECTED_LAT_3 0x3D6B78 + +#define mmNIF_RTR_CTRL_5_RGL_EXPECTED_LAT_4 0x3D6B7C + +#define mmNIF_RTR_CTRL_5_RGL_EXPECTED_LAT_5 0x3D6B80 + +#define mmNIF_RTR_CTRL_5_RGL_EXPECTED_LAT_6 0x3D6B84 + +#define mmNIF_RTR_CTRL_5_RGL_EXPECTED_LAT_7 0x3D6B88 + +#define mmNIF_RTR_CTRL_5_RGL_TOKEN_0 0x3D6BAC + +#define mmNIF_RTR_CTRL_5_RGL_TOKEN_1 0x3D6BB0 + +#define mmNIF_RTR_CTRL_5_RGL_TOKEN_2 0x3D6BB4 + +#define mmNIF_RTR_CTRL_5_RGL_TOKEN_3 0x3D6BB8 + +#define mmNIF_RTR_CTRL_5_RGL_TOKEN_4 0x3D6BBC + +#define mmNIF_RTR_CTRL_5_RGL_TOKEN_5 0x3D6BC0 + +#define mmNIF_RTR_CTRL_5_RGL_TOKEN_6 0x3D6BC4 + +#define mmNIF_RTR_CTRL_5_RGL_TOKEN_7 0x3D6BC8 + +#define mmNIF_RTR_CTRL_5_RGL_BANK_ID_0 0x3D6BEC + +#define mmNIF_RTR_CTRL_5_RGL_BANK_ID_1 0x3D6BF0 + +#define mmNIF_RTR_CTRL_5_RGL_BANK_ID_2 0x3D6BF4 + +#define mmNIF_RTR_CTRL_5_RGL_BANK_ID_3 0x3D6BF8 + +#define mmNIF_RTR_CTRL_5_RGL_BANK_ID_4 0x3D6BFC + +#define mmNIF_RTR_CTRL_5_RGL_BANK_ID_5 0x3D6C00 + +#define mmNIF_RTR_CTRL_5_RGL_BANK_ID_6 0x3D6C04 + +#define mmNIF_RTR_CTRL_5_RGL_BANK_ID_7 0x3D6C08 + +#define mmNIF_RTR_CTRL_5_RGL_WDT 0x3D6C2C + +#define mmNIF_RTR_CTRL_5_E2E_AR_HBM0_CH0_CTR_WRAP 0x3D6C30 + +#define mmNIF_RTR_CTRL_5_E2E_AR_HBM0_CH1_CTR_WRAP 0x3D6C34 + +#define mmNIF_RTR_CTRL_5_E2E_AR_HBM1_CH0_CTR_WRAP 0x3D6C38 + +#define mmNIF_RTR_CTRL_5_E2E_AR_HBM1_CH1_CTR_WRAP 0x3D6C3C + +#define mmNIF_RTR_CTRL_5_E2E_AR_HBM2_CH0_CTR_WRAP 0x3D6C40 + +#define mmNIF_RTR_CTRL_5_E2E_AR_HBM2_CH1_CTR_WRAP 0x3D6C44 + +#define mmNIF_RTR_CTRL_5_E2E_AR_HBM3_CH0_CTR_WRAP 0x3D6C48 + +#define mmNIF_RTR_CTRL_5_E2E_AR_HBM3_CH1_CTR_WRAP 0x3D6C4C + +#define mmNIF_RTR_CTRL_5_E2E_AR_HBM0_CH0_CTR_CNT 0x3D6C50 + +#define mmNIF_RTR_CTRL_5_E2E_AR_HBM0_CH1_CTR_CNT 0x3D6C54 + +#define mmNIF_RTR_CTRL_5_E2E_AR_HBM1_CH0_CTR_CNT 0x3D6C58 + +#define mmNIF_RTR_CTRL_5_E2E_AR_HBM1_CH1_CTR_CNT 0x3D6C5C + +#define mmNIF_RTR_CTRL_5_E2E_AR_HBM2_CH0_CTR_CNT 0x3D6C60 + +#define mmNIF_RTR_CTRL_5_E2E_AR_HBM2_CH1_CTR_CNT 0x3D6C64 + +#define mmNIF_RTR_CTRL_5_E2E_AR_HBM3_CH0_CTR_CNT 0x3D6C68 + +#define mmNIF_RTR_CTRL_5_E2E_AR_HBM3_CH1_CTR_CNT 0x3D6C6C + +#define mmNIF_RTR_CTRL_5_E2E_AW_HBM0_CH0_CTR_WRAP 0x3D6C70 + +#define mmNIF_RTR_CTRL_5_E2E_AW_HBM0_CH1_CTR_WRAP 0x3D6C74 + +#define mmNIF_RTR_CTRL_5_E2E_AW_HBM1_CH0_CTR_WRAP 0x3D6C78 + +#define mmNIF_RTR_CTRL_5_E2E_AW_HBM1_CH1_CTR_WRAP 0x3D6C7C + +#define mmNIF_RTR_CTRL_5_E2E_AW_HBM2_CH0_CTR_WRAP 0x3D6C80 + +#define mmNIF_RTR_CTRL_5_E2E_AW_HBM2_CH1_CTR_WRAP 0x3D6C84 + +#define mmNIF_RTR_CTRL_5_E2E_AW_HBM3_CH0_CTR_WRAP 0x3D6C88 + +#define mmNIF_RTR_CTRL_5_E2E_AW_HBM3_CH1_CTR_WRAP 0x3D6C8C + +#define mmNIF_RTR_CTRL_5_E2E_AW_HBM0_CH0_CTR_CNT 0x3D6C90 + +#define mmNIF_RTR_CTRL_5_E2E_AW_HBM0_CH1_CTR_CNT 0x3D6C94 + +#define mmNIF_RTR_CTRL_5_E2E_AW_HBM1_CH0_CTR_CNT 0x3D6C98 + +#define mmNIF_RTR_CTRL_5_E2E_AW_HBM1_CH1_CTR_CNT 0x3D6C9C + +#define mmNIF_RTR_CTRL_5_E2E_AW_HBM2_CH0_CTR_CNT 0x3D6CA0 + +#define mmNIF_RTR_CTRL_5_E2E_AW_HBM2_CH1_CTR_CNT 0x3D6CA4 + +#define mmNIF_RTR_CTRL_5_E2E_AW_HBM3_CH0_CTR_CNT 0x3D6CA8 + +#define mmNIF_RTR_CTRL_5_E2E_AW_HBM3_CH1_CTR_CNT 0x3D6CAC + +#define mmNIF_RTR_CTRL_5_NL_HBM_PC_SEL_0 0x3D6CB0 + +#define mmNIF_RTR_CTRL_5_NL_HBM_PC_SEL_1 0x3D6CB4 + +#define mmNIF_RTR_CTRL_5_NL_HBM_PC_SEL_2 0x3D6CB8 + +#define mmNIF_RTR_CTRL_5_NL_HBM_PC_SEL_3 0x3D6CBC + +#endif /* ASIC_REG_NIF_RTR_CTRL_5_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_6_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_6_regs.h new file mode 100644 index 000000000..b79c59887 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_6_regs.h @@ -0,0 +1,896 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_NIF_RTR_CTRL_6_REGS_H_ +#define ASIC_REG_NIF_RTR_CTRL_6_REGS_H_ + +/* + ***************************************** + * NIF_RTR_CTRL_6 (Prototype: RTR_CTRL) + ***************************************** + */ + +#define mmNIF_RTR_CTRL_6_PERM_SEL 0x3E6108 + +#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_0 0x3E6114 + +#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_1 0x3E6118 + +#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_2 0x3E611C + +#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_3 0x3E6120 + +#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_4 0x3E6124 + +#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_5 0x3E6128 + +#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_6 0x3E612C + +#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_7 0x3E6130 + +#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_8 0x3E6134 + +#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_9 0x3E6138 + +#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_10 0x3E613C + +#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_11 0x3E6140 + +#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_12 0x3E6144 + +#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_13 0x3E6148 + +#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_14 0x3E614C + +#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_15 0x3E6150 + +#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_16 0x3E6154 + +#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_17 0x3E6158 + +#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_18 0x3E615C + +#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_19 0x3E6160 + +#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_20 0x3E6164 + +#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_21 0x3E6168 + +#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_22 0x3E616C + +#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_23 0x3E6170 + +#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_24 0x3E6174 + +#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_25 0x3E6178 + +#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_26 0x3E617C + +#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_27 0x3E6180 + +#define mmNIF_RTR_CTRL_6_SRAM_POLY_H3_0 0x3E6184 + +#define mmNIF_RTR_CTRL_6_SRAM_POLY_H3_1 0x3E6188 + +#define mmNIF_RTR_CTRL_6_SRAM_POLY_H3_2 0x3E618C + +#define mmNIF_RTR_CTRL_6_SRAM_POLY_H3_3 0x3E6190 + +#define mmNIF_RTR_CTRL_6_SRAM_POLY_H3_4 0x3E6194 + +#define mmNIF_RTR_CTRL_6_SRAM_POLY_H3_5 0x3E6198 + +#define mmNIF_RTR_CTRL_6_SRAM_POLY_H3_6 0x3E619C + +#define mmNIF_RTR_CTRL_6_SRAM_POLY_H3_7 0x3E61A0 + +#define mmNIF_RTR_CTRL_6_SRAM_POLY_H3_8 0x3E61A4 + +#define mmNIF_RTR_CTRL_6_SRAM_POLY_H3_9 0x3E61A8 + +#define mmNIF_RTR_CTRL_6_SRAM_POLY_H3_10 0x3E61AC + +#define mmNIF_RTR_CTRL_6_SRAM_POLY_H3_11 0x3E61B0 + +#define mmNIF_RTR_CTRL_6_SRAM_POLY_H3_12 0x3E61B4 + +#define mmNIF_RTR_CTRL_6_SRAM_POLY_H3_13 0x3E61B8 + +#define mmNIF_RTR_CTRL_6_SRAM_POLY_H3_14 0x3E61BC + +#define mmNIF_RTR_CTRL_6_SCRAM_SRAM_EN 0x3E626C + +#define mmNIF_RTR_CTRL_6_RL_HBM_EN 0x3E6274 + +#define mmNIF_RTR_CTRL_6_RL_HBM_SAT 0x3E6278 + +#define mmNIF_RTR_CTRL_6_RL_HBM_RST 0x3E627C + +#define mmNIF_RTR_CTRL_6_RL_HBM_TIMEOUT 0x3E6280 + +#define mmNIF_RTR_CTRL_6_SCRAM_HBM_EN 0x3E6284 + +#define mmNIF_RTR_CTRL_6_RL_PCI_EN 0x3E6288 + +#define mmNIF_RTR_CTRL_6_RL_PCI_SAT 0x3E628C + +#define mmNIF_RTR_CTRL_6_RL_PCI_RST 0x3E6290 + +#define mmNIF_RTR_CTRL_6_RL_PCI_TIMEOUT 0x3E6294 + +#define mmNIF_RTR_CTRL_6_RL_SRAM_EN 0x3E629C + +#define mmNIF_RTR_CTRL_6_RL_SRAM_SAT 0x3E62A0 + +#define mmNIF_RTR_CTRL_6_RL_SRAM_RST 0x3E62A4 + +#define mmNIF_RTR_CTRL_6_RL_SRAM_TIMEOUT 0x3E62AC + +#define mmNIF_RTR_CTRL_6_RL_SRAM_RED 0x3E62B4 + +#define mmNIF_RTR_CTRL_6_E2E_HBM_EN 0x3E62EC + +#define mmNIF_RTR_CTRL_6_E2E_PCI_EN 0x3E62F0 + +#define mmNIF_RTR_CTRL_6_E2E_HBM_WR_SIZE 0x3E62F4 + +#define mmNIF_RTR_CTRL_6_E2E_PCI_WR_SIZE 0x3E62F8 + +#define mmNIF_RTR_CTRL_6_E2E_AW_PCI_CTR_SET_EN 0x3E6404 + +#define mmNIF_RTR_CTRL_6_E2E_AW_PCI_CTR_SET 0x3E6408 + +#define mmNIF_RTR_CTRL_6_E2E_AW_PCI_CTR_WRAP 0x3E640C + +#define mmNIF_RTR_CTRL_6_E2E_AW_PCI_CTR_CNT 0x3E6410 + +#define mmNIF_RTR_CTRL_6_E2E_AW_HBM_CTR_SET_EN 0x3E6414 + +#define mmNIF_RTR_CTRL_6_E2E_AW_HBM_CTR_SET 0x3E6418 + +#define mmNIF_RTR_CTRL_6_E2E_HBM_RD_SIZE 0x3E641C + +#define mmNIF_RTR_CTRL_6_E2E_PCI_RD_SIZE 0x3E6420 + +#define mmNIF_RTR_CTRL_6_E2E_AR_PCI_CTR_SET_EN 0x3E6424 + +#define mmNIF_RTR_CTRL_6_E2E_AR_PCI_CTR_SET 0x3E6428 + +#define mmNIF_RTR_CTRL_6_E2E_AR_PCI_CTR_WRAP 0x3E642C + +#define mmNIF_RTR_CTRL_6_E2E_AR_PCI_CTR_CNT 0x3E6430 + +#define mmNIF_RTR_CTRL_6_E2E_AR_HBM_CTR_SET_EN 0x3E6434 + +#define mmNIF_RTR_CTRL_6_E2E_AR_HBM_CTR_SET 0x3E6438 + +#define mmNIF_RTR_CTRL_6_NL_HBM_SEL_0 0x3E6450 + +#define mmNIF_RTR_CTRL_6_NL_HBM_SEL_1 0x3E6454 + +#define mmNIF_RTR_CTRL_6_NON_LIN_EN 0x3E6480 + +#define mmNIF_RTR_CTRL_6_NL_SRAM_BANK_0 0x3E6500 + +#define mmNIF_RTR_CTRL_6_NL_SRAM_BANK_1 0x3E6504 + +#define mmNIF_RTR_CTRL_6_NL_SRAM_BANK_2 0x3E6508 + +#define mmNIF_RTR_CTRL_6_NL_SRAM_BANK_3 0x3E650C + +#define mmNIF_RTR_CTRL_6_NL_SRAM_BANK_4 0x3E6510 + +#define mmNIF_RTR_CTRL_6_NL_SRAM_OFFSET_0 0x3E6514 + +#define mmNIF_RTR_CTRL_6_NL_SRAM_OFFSET_1 0x3E6520 + +#define mmNIF_RTR_CTRL_6_NL_SRAM_OFFSET_2 0x3E6524 + +#define mmNIF_RTR_CTRL_6_NL_SRAM_OFFSET_3 0x3E6528 + +#define mmNIF_RTR_CTRL_6_NL_SRAM_OFFSET_4 0x3E652C + +#define mmNIF_RTR_CTRL_6_NL_SRAM_OFFSET_5 0x3E6530 + +#define mmNIF_RTR_CTRL_6_NL_SRAM_OFFSET_6 0x3E6534 + +#define mmNIF_RTR_CTRL_6_NL_SRAM_OFFSET_7 0x3E6538 + +#define mmNIF_RTR_CTRL_6_NL_SRAM_OFFSET_8 0x3E653C + +#define mmNIF_RTR_CTRL_6_NL_SRAM_OFFSET_9 0x3E6540 + +#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_0 0x3E6550 + +#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_1 0x3E6554 + +#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_2 0x3E6558 + +#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_3 0x3E655C + +#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_4 0x3E6560 + +#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_5 0x3E6564 + +#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_6 0x3E6568 + +#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_7 0x3E656C + +#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_8 0x3E6570 + +#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_9 0x3E6574 + +#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_10 0x3E6578 + +#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_11 0x3E657C + +#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_12 0x3E6580 + +#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_13 0x3E6584 + +#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_14 0x3E6588 + +#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_15 0x3E658C + +#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_16 0x3E6590 + +#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_17 0x3E6594 + +#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_18 0x3E6598 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_0 0x3E65E4 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_1 0x3E65E8 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_2 0x3E65EC + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_3 0x3E65F0 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_4 0x3E65F4 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_5 0x3E65F8 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_6 0x3E65FC + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_7 0x3E6600 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_8 0x3E6604 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_9 0x3E6608 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_10 0x3E660C + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_11 0x3E6610 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_12 0x3E6614 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_13 0x3E6618 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_14 0x3E661C + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_15 0x3E6620 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_0 0x3E6624 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_1 0x3E6628 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_2 0x3E662C + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_3 0x3E6630 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_4 0x3E6634 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_5 0x3E6638 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_6 0x3E663C + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_7 0x3E6640 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_8 0x3E6644 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_9 0x3E6648 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_10 0x3E664C + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_11 0x3E6650 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_12 0x3E6654 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_13 0x3E6658 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_14 0x3E665C + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_15 0x3E6660 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_0 0x3E6664 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_1 0x3E6668 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_2 0x3E666C + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_3 0x3E6670 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_4 0x3E6674 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_5 0x3E6678 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_6 0x3E667C + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_7 0x3E6680 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_8 0x3E6684 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_9 0x3E6688 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_10 0x3E668C + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_11 0x3E6690 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_12 0x3E6694 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_13 0x3E6698 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_14 0x3E669C + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_15 0x3E66A0 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_0 0x3E66A4 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_1 0x3E66A8 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_2 0x3E66AC + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_3 0x3E66B0 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_4 0x3E66B4 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_5 0x3E66B8 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_6 0x3E66BC + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_7 0x3E66C0 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_8 0x3E66C4 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_9 0x3E66C8 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_10 0x3E66CC + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_11 0x3E66D0 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_12 0x3E66D4 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_13 0x3E66D8 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_14 0x3E66DC + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_15 0x3E66E0 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_0 0x3E66E4 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_1 0x3E66E8 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_2 0x3E66EC + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_3 0x3E66F0 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_4 0x3E66F4 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_5 0x3E66F8 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_6 0x3E66FC + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_7 0x3E6700 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_8 0x3E6704 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_9 0x3E6708 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_10 0x3E670C + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_11 0x3E6710 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_12 0x3E6714 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_13 0x3E6718 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_14 0x3E671C + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_15 0x3E6720 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_0 0x3E6724 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_1 0x3E6728 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_2 0x3E672C + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_3 0x3E6730 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_4 0x3E6734 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_5 0x3E6738 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_6 0x3E673C + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_7 0x3E6740 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_8 0x3E6744 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_9 0x3E6748 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_10 0x3E674C + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_11 0x3E6750 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_12 0x3E6754 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_13 0x3E6758 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_14 0x3E675C + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_15 0x3E6760 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_0 0x3E6764 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_1 0x3E6768 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_2 0x3E676C + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_3 0x3E6770 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_4 0x3E6774 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_5 0x3E6778 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_6 0x3E677C + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_7 0x3E6780 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_8 0x3E6784 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_9 0x3E6788 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_10 0x3E678C + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_11 0x3E6790 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_12 0x3E6794 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_13 0x3E6798 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_14 0x3E679C + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_15 0x3E67A0 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_0 0x3E67A4 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_1 0x3E67A8 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_2 0x3E67AC + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_3 0x3E67B0 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_4 0x3E67B4 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_5 0x3E67B8 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_6 0x3E67BC + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_7 0x3E67C0 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_8 0x3E67C4 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_9 0x3E67C8 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_10 0x3E67CC + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_11 0x3E67D0 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_12 0x3E67D4 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_13 0x3E67D8 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_14 0x3E67DC + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_15 0x3E67E0 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_0 0x3E6824 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_1 0x3E6828 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_2 0x3E682C + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_3 0x3E6830 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_4 0x3E6834 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_5 0x3E6838 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_6 0x3E683C + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_7 0x3E6840 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_8 0x3E6844 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_9 0x3E6848 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_10 0x3E684C + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_11 0x3E6850 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_12 0x3E6854 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_13 0x3E6858 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_14 0x3E685C + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_15 0x3E6860 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_0 0x3E6864 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_1 0x3E6868 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_2 0x3E686C + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_3 0x3E6870 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_4 0x3E6874 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_5 0x3E6878 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_6 0x3E687C + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_7 0x3E6880 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_8 0x3E6884 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_9 0x3E6888 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_10 0x3E688C + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_11 0x3E6890 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_12 0x3E6894 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_13 0x3E6898 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_14 0x3E689C + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_15 0x3E68A0 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_0 0x3E68A4 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_1 0x3E68A8 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_2 0x3E68AC + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_3 0x3E68B0 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_4 0x3E68B4 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_5 0x3E68B8 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_6 0x3E68BC + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_7 0x3E68C0 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_8 0x3E68C4 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_9 0x3E68C8 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_10 0x3E68CC + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_11 0x3E68D0 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_12 0x3E68D4 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_13 0x3E68D8 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_14 0x3E68DC + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_15 0x3E68E0 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_0 0x3E68E4 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_1 0x3E68E8 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_2 0x3E68EC + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_3 0x3E68F0 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_4 0x3E68F4 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_5 0x3E68F8 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_6 0x3E68FC + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_7 0x3E6900 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_8 0x3E6904 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_9 0x3E6908 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_10 0x3E690C + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_11 0x3E6910 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_12 0x3E6914 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_13 0x3E6918 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_14 0x3E691C + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_15 0x3E6920 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_0 0x3E6924 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_1 0x3E6928 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_2 0x3E692C + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_3 0x3E6930 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_4 0x3E6934 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_5 0x3E6938 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_6 0x3E693C + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_7 0x3E6940 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_8 0x3E6944 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_9 0x3E6948 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_10 0x3E694C + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_11 0x3E6950 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_12 0x3E6954 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_13 0x3E6958 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_14 0x3E695C + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_15 0x3E6960 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_0 0x3E6964 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_1 0x3E6968 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_2 0x3E696C + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_3 0x3E6970 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_4 0x3E6974 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_5 0x3E6978 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_6 0x3E697C + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_7 0x3E6980 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_8 0x3E6984 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_9 0x3E6988 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_10 0x3E698C + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_11 0x3E6990 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_12 0x3E6994 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_13 0x3E6998 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_14 0x3E699C + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_15 0x3E69A0 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_0 0x3E69A4 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_1 0x3E69A8 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_2 0x3E69AC + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_3 0x3E69B0 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_4 0x3E69B4 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_5 0x3E69B8 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_6 0x3E69BC + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_7 0x3E69C0 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_8 0x3E69C4 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_9 0x3E69C8 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_10 0x3E69CC + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_11 0x3E69D0 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_12 0x3E69D4 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_13 0x3E69D8 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_14 0x3E69DC + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_15 0x3E69E0 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_0 0x3E69E4 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_1 0x3E69E8 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_2 0x3E69EC + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_3 0x3E69F0 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_4 0x3E69F4 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_5 0x3E69F8 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_6 0x3E69FC + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_7 0x3E6A00 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_8 0x3E6A04 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_9 0x3E6A08 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_10 0x3E6A0C + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_11 0x3E6A10 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_12 0x3E6A14 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_13 0x3E6A18 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_14 0x3E6A1C + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_15 0x3E6A20 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_HIT_AW 0x3E6A64 + +#define mmNIF_RTR_CTRL_6_RANGE_SEC_HIT_AR 0x3E6A68 + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_HIT_AW 0x3E6A6C + +#define mmNIF_RTR_CTRL_6_RANGE_PRIV_HIT_AR 0x3E6A70 + +#define mmNIF_RTR_CTRL_6_RGL_CFG 0x3E6B64 + +#define mmNIF_RTR_CTRL_6_RGL_SHIFT 0x3E6B68 + +#define mmNIF_RTR_CTRL_6_RGL_EXPECTED_LAT_0 0x3E6B6C + +#define mmNIF_RTR_CTRL_6_RGL_EXPECTED_LAT_1 0x3E6B70 + +#define mmNIF_RTR_CTRL_6_RGL_EXPECTED_LAT_2 0x3E6B74 + +#define mmNIF_RTR_CTRL_6_RGL_EXPECTED_LAT_3 0x3E6B78 + +#define mmNIF_RTR_CTRL_6_RGL_EXPECTED_LAT_4 0x3E6B7C + +#define mmNIF_RTR_CTRL_6_RGL_EXPECTED_LAT_5 0x3E6B80 + +#define mmNIF_RTR_CTRL_6_RGL_EXPECTED_LAT_6 0x3E6B84 + +#define mmNIF_RTR_CTRL_6_RGL_EXPECTED_LAT_7 0x3E6B88 + +#define mmNIF_RTR_CTRL_6_RGL_TOKEN_0 0x3E6BAC + +#define mmNIF_RTR_CTRL_6_RGL_TOKEN_1 0x3E6BB0 + +#define mmNIF_RTR_CTRL_6_RGL_TOKEN_2 0x3E6BB4 + +#define mmNIF_RTR_CTRL_6_RGL_TOKEN_3 0x3E6BB8 + +#define mmNIF_RTR_CTRL_6_RGL_TOKEN_4 0x3E6BBC + +#define mmNIF_RTR_CTRL_6_RGL_TOKEN_5 0x3E6BC0 + +#define mmNIF_RTR_CTRL_6_RGL_TOKEN_6 0x3E6BC4 + +#define mmNIF_RTR_CTRL_6_RGL_TOKEN_7 0x3E6BC8 + +#define mmNIF_RTR_CTRL_6_RGL_BANK_ID_0 0x3E6BEC + +#define mmNIF_RTR_CTRL_6_RGL_BANK_ID_1 0x3E6BF0 + +#define mmNIF_RTR_CTRL_6_RGL_BANK_ID_2 0x3E6BF4 + +#define mmNIF_RTR_CTRL_6_RGL_BANK_ID_3 0x3E6BF8 + +#define mmNIF_RTR_CTRL_6_RGL_BANK_ID_4 0x3E6BFC + +#define mmNIF_RTR_CTRL_6_RGL_BANK_ID_5 0x3E6C00 + +#define mmNIF_RTR_CTRL_6_RGL_BANK_ID_6 0x3E6C04 + +#define mmNIF_RTR_CTRL_6_RGL_BANK_ID_7 0x3E6C08 + +#define mmNIF_RTR_CTRL_6_RGL_WDT 0x3E6C2C + +#define mmNIF_RTR_CTRL_6_E2E_AR_HBM0_CH0_CTR_WRAP 0x3E6C30 + +#define mmNIF_RTR_CTRL_6_E2E_AR_HBM0_CH1_CTR_WRAP 0x3E6C34 + +#define mmNIF_RTR_CTRL_6_E2E_AR_HBM1_CH0_CTR_WRAP 0x3E6C38 + +#define mmNIF_RTR_CTRL_6_E2E_AR_HBM1_CH1_CTR_WRAP 0x3E6C3C + +#define mmNIF_RTR_CTRL_6_E2E_AR_HBM2_CH0_CTR_WRAP 0x3E6C40 + +#define mmNIF_RTR_CTRL_6_E2E_AR_HBM2_CH1_CTR_WRAP 0x3E6C44 + +#define mmNIF_RTR_CTRL_6_E2E_AR_HBM3_CH0_CTR_WRAP 0x3E6C48 + +#define mmNIF_RTR_CTRL_6_E2E_AR_HBM3_CH1_CTR_WRAP 0x3E6C4C + +#define mmNIF_RTR_CTRL_6_E2E_AR_HBM0_CH0_CTR_CNT 0x3E6C50 + +#define mmNIF_RTR_CTRL_6_E2E_AR_HBM0_CH1_CTR_CNT 0x3E6C54 + +#define mmNIF_RTR_CTRL_6_E2E_AR_HBM1_CH0_CTR_CNT 0x3E6C58 + +#define mmNIF_RTR_CTRL_6_E2E_AR_HBM1_CH1_CTR_CNT 0x3E6C5C + +#define mmNIF_RTR_CTRL_6_E2E_AR_HBM2_CH0_CTR_CNT 0x3E6C60 + +#define mmNIF_RTR_CTRL_6_E2E_AR_HBM2_CH1_CTR_CNT 0x3E6C64 + +#define mmNIF_RTR_CTRL_6_E2E_AR_HBM3_CH0_CTR_CNT 0x3E6C68 + +#define mmNIF_RTR_CTRL_6_E2E_AR_HBM3_CH1_CTR_CNT 0x3E6C6C + +#define mmNIF_RTR_CTRL_6_E2E_AW_HBM0_CH0_CTR_WRAP 0x3E6C70 + +#define mmNIF_RTR_CTRL_6_E2E_AW_HBM0_CH1_CTR_WRAP 0x3E6C74 + +#define mmNIF_RTR_CTRL_6_E2E_AW_HBM1_CH0_CTR_WRAP 0x3E6C78 + +#define mmNIF_RTR_CTRL_6_E2E_AW_HBM1_CH1_CTR_WRAP 0x3E6C7C + +#define mmNIF_RTR_CTRL_6_E2E_AW_HBM2_CH0_CTR_WRAP 0x3E6C80 + +#define mmNIF_RTR_CTRL_6_E2E_AW_HBM2_CH1_CTR_WRAP 0x3E6C84 + +#define mmNIF_RTR_CTRL_6_E2E_AW_HBM3_CH0_CTR_WRAP 0x3E6C88 + +#define mmNIF_RTR_CTRL_6_E2E_AW_HBM3_CH1_CTR_WRAP 0x3E6C8C + +#define mmNIF_RTR_CTRL_6_E2E_AW_HBM0_CH0_CTR_CNT 0x3E6C90 + +#define mmNIF_RTR_CTRL_6_E2E_AW_HBM0_CH1_CTR_CNT 0x3E6C94 + +#define mmNIF_RTR_CTRL_6_E2E_AW_HBM1_CH0_CTR_CNT 0x3E6C98 + +#define mmNIF_RTR_CTRL_6_E2E_AW_HBM1_CH1_CTR_CNT 0x3E6C9C + +#define mmNIF_RTR_CTRL_6_E2E_AW_HBM2_CH0_CTR_CNT 0x3E6CA0 + +#define mmNIF_RTR_CTRL_6_E2E_AW_HBM2_CH1_CTR_CNT 0x3E6CA4 + +#define mmNIF_RTR_CTRL_6_E2E_AW_HBM3_CH0_CTR_CNT 0x3E6CA8 + +#define mmNIF_RTR_CTRL_6_E2E_AW_HBM3_CH1_CTR_CNT 0x3E6CAC + +#define mmNIF_RTR_CTRL_6_NL_HBM_PC_SEL_0 0x3E6CB0 + +#define mmNIF_RTR_CTRL_6_NL_HBM_PC_SEL_1 0x3E6CB4 + +#define mmNIF_RTR_CTRL_6_NL_HBM_PC_SEL_2 0x3E6CB8 + +#define mmNIF_RTR_CTRL_6_NL_HBM_PC_SEL_3 0x3E6CBC + +#endif /* ASIC_REG_NIF_RTR_CTRL_6_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_7_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_7_regs.h new file mode 100644 index 000000000..3a6a34ba2 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_7_regs.h @@ -0,0 +1,896 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_NIF_RTR_CTRL_7_REGS_H_ +#define ASIC_REG_NIF_RTR_CTRL_7_REGS_H_ + +/* + ***************************************** + * NIF_RTR_CTRL_7 (Prototype: RTR_CTRL) + ***************************************** + */ + +#define mmNIF_RTR_CTRL_7_PERM_SEL 0x3F6108 + +#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_0 0x3F6114 + +#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_1 0x3F6118 + +#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_2 0x3F611C + +#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_3 0x3F6120 + +#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_4 0x3F6124 + +#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_5 0x3F6128 + +#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_6 0x3F612C + +#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_7 0x3F6130 + +#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_8 0x3F6134 + +#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_9 0x3F6138 + +#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_10 0x3F613C + +#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_11 0x3F6140 + +#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_12 0x3F6144 + +#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_13 0x3F6148 + +#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_14 0x3F614C + +#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_15 0x3F6150 + +#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_16 0x3F6154 + +#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_17 0x3F6158 + +#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_18 0x3F615C + +#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_19 0x3F6160 + +#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_20 0x3F6164 + +#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_21 0x3F6168 + +#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_22 0x3F616C + +#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_23 0x3F6170 + +#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_24 0x3F6174 + +#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_25 0x3F6178 + +#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_26 0x3F617C + +#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_27 0x3F6180 + +#define mmNIF_RTR_CTRL_7_SRAM_POLY_H3_0 0x3F6184 + +#define mmNIF_RTR_CTRL_7_SRAM_POLY_H3_1 0x3F6188 + +#define mmNIF_RTR_CTRL_7_SRAM_POLY_H3_2 0x3F618C + +#define mmNIF_RTR_CTRL_7_SRAM_POLY_H3_3 0x3F6190 + +#define mmNIF_RTR_CTRL_7_SRAM_POLY_H3_4 0x3F6194 + +#define mmNIF_RTR_CTRL_7_SRAM_POLY_H3_5 0x3F6198 + +#define mmNIF_RTR_CTRL_7_SRAM_POLY_H3_6 0x3F619C + +#define mmNIF_RTR_CTRL_7_SRAM_POLY_H3_7 0x3F61A0 + +#define mmNIF_RTR_CTRL_7_SRAM_POLY_H3_8 0x3F61A4 + +#define mmNIF_RTR_CTRL_7_SRAM_POLY_H3_9 0x3F61A8 + +#define mmNIF_RTR_CTRL_7_SRAM_POLY_H3_10 0x3F61AC + +#define mmNIF_RTR_CTRL_7_SRAM_POLY_H3_11 0x3F61B0 + +#define mmNIF_RTR_CTRL_7_SRAM_POLY_H3_12 0x3F61B4 + +#define mmNIF_RTR_CTRL_7_SRAM_POLY_H3_13 0x3F61B8 + +#define mmNIF_RTR_CTRL_7_SRAM_POLY_H3_14 0x3F61BC + +#define mmNIF_RTR_CTRL_7_SCRAM_SRAM_EN 0x3F626C + +#define mmNIF_RTR_CTRL_7_RL_HBM_EN 0x3F6274 + +#define mmNIF_RTR_CTRL_7_RL_HBM_SAT 0x3F6278 + +#define mmNIF_RTR_CTRL_7_RL_HBM_RST 0x3F627C + +#define mmNIF_RTR_CTRL_7_RL_HBM_TIMEOUT 0x3F6280 + +#define mmNIF_RTR_CTRL_7_SCRAM_HBM_EN 0x3F6284 + +#define mmNIF_RTR_CTRL_7_RL_PCI_EN 0x3F6288 + +#define mmNIF_RTR_CTRL_7_RL_PCI_SAT 0x3F628C + +#define mmNIF_RTR_CTRL_7_RL_PCI_RST 0x3F6290 + +#define mmNIF_RTR_CTRL_7_RL_PCI_TIMEOUT 0x3F6294 + +#define mmNIF_RTR_CTRL_7_RL_SRAM_EN 0x3F629C + +#define mmNIF_RTR_CTRL_7_RL_SRAM_SAT 0x3F62A0 + +#define mmNIF_RTR_CTRL_7_RL_SRAM_RST 0x3F62A4 + +#define mmNIF_RTR_CTRL_7_RL_SRAM_TIMEOUT 0x3F62AC + +#define mmNIF_RTR_CTRL_7_RL_SRAM_RED 0x3F62B4 + +#define mmNIF_RTR_CTRL_7_E2E_HBM_EN 0x3F62EC + +#define mmNIF_RTR_CTRL_7_E2E_PCI_EN 0x3F62F0 + +#define mmNIF_RTR_CTRL_7_E2E_HBM_WR_SIZE 0x3F62F4 + +#define mmNIF_RTR_CTRL_7_E2E_PCI_WR_SIZE 0x3F62F8 + +#define mmNIF_RTR_CTRL_7_E2E_AW_PCI_CTR_SET_EN 0x3F6404 + +#define mmNIF_RTR_CTRL_7_E2E_AW_PCI_CTR_SET 0x3F6408 + +#define mmNIF_RTR_CTRL_7_E2E_AW_PCI_CTR_WRAP 0x3F640C + +#define mmNIF_RTR_CTRL_7_E2E_AW_PCI_CTR_CNT 0x3F6410 + +#define mmNIF_RTR_CTRL_7_E2E_AW_HBM_CTR_SET_EN 0x3F6414 + +#define mmNIF_RTR_CTRL_7_E2E_AW_HBM_CTR_SET 0x3F6418 + +#define mmNIF_RTR_CTRL_7_E2E_HBM_RD_SIZE 0x3F641C + +#define mmNIF_RTR_CTRL_7_E2E_PCI_RD_SIZE 0x3F6420 + +#define mmNIF_RTR_CTRL_7_E2E_AR_PCI_CTR_SET_EN 0x3F6424 + +#define mmNIF_RTR_CTRL_7_E2E_AR_PCI_CTR_SET 0x3F6428 + +#define mmNIF_RTR_CTRL_7_E2E_AR_PCI_CTR_WRAP 0x3F642C + +#define mmNIF_RTR_CTRL_7_E2E_AR_PCI_CTR_CNT 0x3F6430 + +#define mmNIF_RTR_CTRL_7_E2E_AR_HBM_CTR_SET_EN 0x3F6434 + +#define mmNIF_RTR_CTRL_7_E2E_AR_HBM_CTR_SET 0x3F6438 + +#define mmNIF_RTR_CTRL_7_NL_HBM_SEL_0 0x3F6450 + +#define mmNIF_RTR_CTRL_7_NL_HBM_SEL_1 0x3F6454 + +#define mmNIF_RTR_CTRL_7_NON_LIN_EN 0x3F6480 + +#define mmNIF_RTR_CTRL_7_NL_SRAM_BANK_0 0x3F6500 + +#define mmNIF_RTR_CTRL_7_NL_SRAM_BANK_1 0x3F6504 + +#define mmNIF_RTR_CTRL_7_NL_SRAM_BANK_2 0x3F6508 + +#define mmNIF_RTR_CTRL_7_NL_SRAM_BANK_3 0x3F650C + +#define mmNIF_RTR_CTRL_7_NL_SRAM_BANK_4 0x3F6510 + +#define mmNIF_RTR_CTRL_7_NL_SRAM_OFFSET_0 0x3F6514 + +#define mmNIF_RTR_CTRL_7_NL_SRAM_OFFSET_1 0x3F6520 + +#define mmNIF_RTR_CTRL_7_NL_SRAM_OFFSET_2 0x3F6524 + +#define mmNIF_RTR_CTRL_7_NL_SRAM_OFFSET_3 0x3F6528 + +#define mmNIF_RTR_CTRL_7_NL_SRAM_OFFSET_4 0x3F652C + +#define mmNIF_RTR_CTRL_7_NL_SRAM_OFFSET_5 0x3F6530 + +#define mmNIF_RTR_CTRL_7_NL_SRAM_OFFSET_6 0x3F6534 + +#define mmNIF_RTR_CTRL_7_NL_SRAM_OFFSET_7 0x3F6538 + +#define mmNIF_RTR_CTRL_7_NL_SRAM_OFFSET_8 0x3F653C + +#define mmNIF_RTR_CTRL_7_NL_SRAM_OFFSET_9 0x3F6540 + +#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_0 0x3F6550 + +#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_1 0x3F6554 + +#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_2 0x3F6558 + +#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_3 0x3F655C + +#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_4 0x3F6560 + +#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_5 0x3F6564 + +#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_6 0x3F6568 + +#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_7 0x3F656C + +#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_8 0x3F6570 + +#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_9 0x3F6574 + +#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_10 0x3F6578 + +#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_11 0x3F657C + +#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_12 0x3F6580 + +#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_13 0x3F6584 + +#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_14 0x3F6588 + +#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_15 0x3F658C + +#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_16 0x3F6590 + +#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_17 0x3F6594 + +#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_18 0x3F6598 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_0 0x3F65E4 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_1 0x3F65E8 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_2 0x3F65EC + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_3 0x3F65F0 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_4 0x3F65F4 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_5 0x3F65F8 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_6 0x3F65FC + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_7 0x3F6600 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_8 0x3F6604 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_9 0x3F6608 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_10 0x3F660C + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_11 0x3F6610 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_12 0x3F6614 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_13 0x3F6618 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_14 0x3F661C + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_15 0x3F6620 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_0 0x3F6624 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_1 0x3F6628 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_2 0x3F662C + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_3 0x3F6630 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_4 0x3F6634 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_5 0x3F6638 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_6 0x3F663C + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_7 0x3F6640 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_8 0x3F6644 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_9 0x3F6648 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_10 0x3F664C + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_11 0x3F6650 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_12 0x3F6654 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_13 0x3F6658 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_14 0x3F665C + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_15 0x3F6660 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_0 0x3F6664 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_1 0x3F6668 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_2 0x3F666C + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_3 0x3F6670 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_4 0x3F6674 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_5 0x3F6678 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_6 0x3F667C + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_7 0x3F6680 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_8 0x3F6684 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_9 0x3F6688 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_10 0x3F668C + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_11 0x3F6690 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_12 0x3F6694 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_13 0x3F6698 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_14 0x3F669C + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_15 0x3F66A0 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_0 0x3F66A4 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_1 0x3F66A8 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_2 0x3F66AC + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_3 0x3F66B0 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_4 0x3F66B4 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_5 0x3F66B8 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_6 0x3F66BC + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_7 0x3F66C0 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_8 0x3F66C4 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_9 0x3F66C8 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_10 0x3F66CC + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_11 0x3F66D0 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_12 0x3F66D4 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_13 0x3F66D8 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_14 0x3F66DC + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_15 0x3F66E0 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_0 0x3F66E4 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_1 0x3F66E8 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_2 0x3F66EC + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_3 0x3F66F0 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_4 0x3F66F4 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_5 0x3F66F8 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_6 0x3F66FC + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_7 0x3F6700 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_8 0x3F6704 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_9 0x3F6708 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_10 0x3F670C + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_11 0x3F6710 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_12 0x3F6714 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_13 0x3F6718 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_14 0x3F671C + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_15 0x3F6720 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_0 0x3F6724 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_1 0x3F6728 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_2 0x3F672C + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_3 0x3F6730 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_4 0x3F6734 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_5 0x3F6738 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_6 0x3F673C + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_7 0x3F6740 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_8 0x3F6744 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_9 0x3F6748 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_10 0x3F674C + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_11 0x3F6750 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_12 0x3F6754 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_13 0x3F6758 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_14 0x3F675C + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_15 0x3F6760 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_0 0x3F6764 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_1 0x3F6768 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_2 0x3F676C + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_3 0x3F6770 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_4 0x3F6774 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_5 0x3F6778 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_6 0x3F677C + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_7 0x3F6780 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_8 0x3F6784 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_9 0x3F6788 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_10 0x3F678C + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_11 0x3F6790 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_12 0x3F6794 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_13 0x3F6798 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_14 0x3F679C + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_15 0x3F67A0 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_0 0x3F67A4 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_1 0x3F67A8 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_2 0x3F67AC + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_3 0x3F67B0 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_4 0x3F67B4 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_5 0x3F67B8 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_6 0x3F67BC + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_7 0x3F67C0 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_8 0x3F67C4 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_9 0x3F67C8 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_10 0x3F67CC + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_11 0x3F67D0 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_12 0x3F67D4 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_13 0x3F67D8 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_14 0x3F67DC + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_15 0x3F67E0 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_0 0x3F6824 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_1 0x3F6828 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_2 0x3F682C + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_3 0x3F6830 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_4 0x3F6834 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_5 0x3F6838 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_6 0x3F683C + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_7 0x3F6840 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_8 0x3F6844 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_9 0x3F6848 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_10 0x3F684C + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_11 0x3F6850 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_12 0x3F6854 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_13 0x3F6858 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_14 0x3F685C + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_15 0x3F6860 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_0 0x3F6864 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_1 0x3F6868 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_2 0x3F686C + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_3 0x3F6870 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_4 0x3F6874 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_5 0x3F6878 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_6 0x3F687C + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_7 0x3F6880 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_8 0x3F6884 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_9 0x3F6888 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_10 0x3F688C + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_11 0x3F6890 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_12 0x3F6894 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_13 0x3F6898 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_14 0x3F689C + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_15 0x3F68A0 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_0 0x3F68A4 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_1 0x3F68A8 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_2 0x3F68AC + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_3 0x3F68B0 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_4 0x3F68B4 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_5 0x3F68B8 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_6 0x3F68BC + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_7 0x3F68C0 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_8 0x3F68C4 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_9 0x3F68C8 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_10 0x3F68CC + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_11 0x3F68D0 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_12 0x3F68D4 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_13 0x3F68D8 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_14 0x3F68DC + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_15 0x3F68E0 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_0 0x3F68E4 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_1 0x3F68E8 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_2 0x3F68EC + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_3 0x3F68F0 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_4 0x3F68F4 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_5 0x3F68F8 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_6 0x3F68FC + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_7 0x3F6900 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_8 0x3F6904 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_9 0x3F6908 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_10 0x3F690C + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_11 0x3F6910 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_12 0x3F6914 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_13 0x3F6918 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_14 0x3F691C + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_15 0x3F6920 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_0 0x3F6924 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_1 0x3F6928 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_2 0x3F692C + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_3 0x3F6930 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_4 0x3F6934 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_5 0x3F6938 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_6 0x3F693C + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_7 0x3F6940 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_8 0x3F6944 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_9 0x3F6948 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_10 0x3F694C + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_11 0x3F6950 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_12 0x3F6954 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_13 0x3F6958 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_14 0x3F695C + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_15 0x3F6960 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_0 0x3F6964 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_1 0x3F6968 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_2 0x3F696C + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_3 0x3F6970 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_4 0x3F6974 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_5 0x3F6978 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_6 0x3F697C + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_7 0x3F6980 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_8 0x3F6984 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_9 0x3F6988 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_10 0x3F698C + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_11 0x3F6990 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_12 0x3F6994 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_13 0x3F6998 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_14 0x3F699C + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_15 0x3F69A0 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_0 0x3F69A4 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_1 0x3F69A8 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_2 0x3F69AC + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_3 0x3F69B0 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_4 0x3F69B4 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_5 0x3F69B8 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_6 0x3F69BC + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_7 0x3F69C0 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_8 0x3F69C4 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_9 0x3F69C8 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_10 0x3F69CC + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_11 0x3F69D0 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_12 0x3F69D4 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_13 0x3F69D8 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_14 0x3F69DC + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_15 0x3F69E0 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_0 0x3F69E4 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_1 0x3F69E8 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_2 0x3F69EC + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_3 0x3F69F0 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_4 0x3F69F4 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_5 0x3F69F8 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_6 0x3F69FC + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_7 0x3F6A00 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_8 0x3F6A04 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_9 0x3F6A08 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_10 0x3F6A0C + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_11 0x3F6A10 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_12 0x3F6A14 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_13 0x3F6A18 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_14 0x3F6A1C + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_15 0x3F6A20 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_HIT_AW 0x3F6A64 + +#define mmNIF_RTR_CTRL_7_RANGE_SEC_HIT_AR 0x3F6A68 + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_HIT_AW 0x3F6A6C + +#define mmNIF_RTR_CTRL_7_RANGE_PRIV_HIT_AR 0x3F6A70 + +#define mmNIF_RTR_CTRL_7_RGL_CFG 0x3F6B64 + +#define mmNIF_RTR_CTRL_7_RGL_SHIFT 0x3F6B68 + +#define mmNIF_RTR_CTRL_7_RGL_EXPECTED_LAT_0 0x3F6B6C + +#define mmNIF_RTR_CTRL_7_RGL_EXPECTED_LAT_1 0x3F6B70 + +#define mmNIF_RTR_CTRL_7_RGL_EXPECTED_LAT_2 0x3F6B74 + +#define mmNIF_RTR_CTRL_7_RGL_EXPECTED_LAT_3 0x3F6B78 + +#define mmNIF_RTR_CTRL_7_RGL_EXPECTED_LAT_4 0x3F6B7C + +#define mmNIF_RTR_CTRL_7_RGL_EXPECTED_LAT_5 0x3F6B80 + +#define mmNIF_RTR_CTRL_7_RGL_EXPECTED_LAT_6 0x3F6B84 + +#define mmNIF_RTR_CTRL_7_RGL_EXPECTED_LAT_7 0x3F6B88 + +#define mmNIF_RTR_CTRL_7_RGL_TOKEN_0 0x3F6BAC + +#define mmNIF_RTR_CTRL_7_RGL_TOKEN_1 0x3F6BB0 + +#define mmNIF_RTR_CTRL_7_RGL_TOKEN_2 0x3F6BB4 + +#define mmNIF_RTR_CTRL_7_RGL_TOKEN_3 0x3F6BB8 + +#define mmNIF_RTR_CTRL_7_RGL_TOKEN_4 0x3F6BBC + +#define mmNIF_RTR_CTRL_7_RGL_TOKEN_5 0x3F6BC0 + +#define mmNIF_RTR_CTRL_7_RGL_TOKEN_6 0x3F6BC4 + +#define mmNIF_RTR_CTRL_7_RGL_TOKEN_7 0x3F6BC8 + +#define mmNIF_RTR_CTRL_7_RGL_BANK_ID_0 0x3F6BEC + +#define mmNIF_RTR_CTRL_7_RGL_BANK_ID_1 0x3F6BF0 + +#define mmNIF_RTR_CTRL_7_RGL_BANK_ID_2 0x3F6BF4 + +#define mmNIF_RTR_CTRL_7_RGL_BANK_ID_3 0x3F6BF8 + +#define mmNIF_RTR_CTRL_7_RGL_BANK_ID_4 0x3F6BFC + +#define mmNIF_RTR_CTRL_7_RGL_BANK_ID_5 0x3F6C00 + +#define mmNIF_RTR_CTRL_7_RGL_BANK_ID_6 0x3F6C04 + +#define mmNIF_RTR_CTRL_7_RGL_BANK_ID_7 0x3F6C08 + +#define mmNIF_RTR_CTRL_7_RGL_WDT 0x3F6C2C + +#define mmNIF_RTR_CTRL_7_E2E_AR_HBM0_CH0_CTR_WRAP 0x3F6C30 + +#define mmNIF_RTR_CTRL_7_E2E_AR_HBM0_CH1_CTR_WRAP 0x3F6C34 + +#define mmNIF_RTR_CTRL_7_E2E_AR_HBM1_CH0_CTR_WRAP 0x3F6C38 + +#define mmNIF_RTR_CTRL_7_E2E_AR_HBM1_CH1_CTR_WRAP 0x3F6C3C + +#define mmNIF_RTR_CTRL_7_E2E_AR_HBM2_CH0_CTR_WRAP 0x3F6C40 + +#define mmNIF_RTR_CTRL_7_E2E_AR_HBM2_CH1_CTR_WRAP 0x3F6C44 + +#define mmNIF_RTR_CTRL_7_E2E_AR_HBM3_CH0_CTR_WRAP 0x3F6C48 + +#define mmNIF_RTR_CTRL_7_E2E_AR_HBM3_CH1_CTR_WRAP 0x3F6C4C + +#define mmNIF_RTR_CTRL_7_E2E_AR_HBM0_CH0_CTR_CNT 0x3F6C50 + +#define mmNIF_RTR_CTRL_7_E2E_AR_HBM0_CH1_CTR_CNT 0x3F6C54 + +#define mmNIF_RTR_CTRL_7_E2E_AR_HBM1_CH0_CTR_CNT 0x3F6C58 + +#define mmNIF_RTR_CTRL_7_E2E_AR_HBM1_CH1_CTR_CNT 0x3F6C5C + +#define mmNIF_RTR_CTRL_7_E2E_AR_HBM2_CH0_CTR_CNT 0x3F6C60 + +#define mmNIF_RTR_CTRL_7_E2E_AR_HBM2_CH1_CTR_CNT 0x3F6C64 + +#define mmNIF_RTR_CTRL_7_E2E_AR_HBM3_CH0_CTR_CNT 0x3F6C68 + +#define mmNIF_RTR_CTRL_7_E2E_AR_HBM3_CH1_CTR_CNT 0x3F6C6C + +#define mmNIF_RTR_CTRL_7_E2E_AW_HBM0_CH0_CTR_WRAP 0x3F6C70 + +#define mmNIF_RTR_CTRL_7_E2E_AW_HBM0_CH1_CTR_WRAP 0x3F6C74 + +#define mmNIF_RTR_CTRL_7_E2E_AW_HBM1_CH0_CTR_WRAP 0x3F6C78 + +#define mmNIF_RTR_CTRL_7_E2E_AW_HBM1_CH1_CTR_WRAP 0x3F6C7C + +#define mmNIF_RTR_CTRL_7_E2E_AW_HBM2_CH0_CTR_WRAP 0x3F6C80 + +#define mmNIF_RTR_CTRL_7_E2E_AW_HBM2_CH1_CTR_WRAP 0x3F6C84 + +#define mmNIF_RTR_CTRL_7_E2E_AW_HBM3_CH0_CTR_WRAP 0x3F6C88 + +#define mmNIF_RTR_CTRL_7_E2E_AW_HBM3_CH1_CTR_WRAP 0x3F6C8C + +#define mmNIF_RTR_CTRL_7_E2E_AW_HBM0_CH0_CTR_CNT 0x3F6C90 + +#define mmNIF_RTR_CTRL_7_E2E_AW_HBM0_CH1_CTR_CNT 0x3F6C94 + +#define mmNIF_RTR_CTRL_7_E2E_AW_HBM1_CH0_CTR_CNT 0x3F6C98 + +#define mmNIF_RTR_CTRL_7_E2E_AW_HBM1_CH1_CTR_CNT 0x3F6C9C + +#define mmNIF_RTR_CTRL_7_E2E_AW_HBM2_CH0_CTR_CNT 0x3F6CA0 + +#define mmNIF_RTR_CTRL_7_E2E_AW_HBM2_CH1_CTR_CNT 0x3F6CA4 + +#define mmNIF_RTR_CTRL_7_E2E_AW_HBM3_CH0_CTR_CNT 0x3F6CA8 + +#define mmNIF_RTR_CTRL_7_E2E_AW_HBM3_CH1_CTR_CNT 0x3F6CAC + +#define mmNIF_RTR_CTRL_7_NL_HBM_PC_SEL_0 0x3F6CB0 + +#define mmNIF_RTR_CTRL_7_NL_HBM_PC_SEL_1 0x3F6CB4 + +#define mmNIF_RTR_CTRL_7_NL_HBM_PC_SEL_2 0x3F6CB8 + +#define mmNIF_RTR_CTRL_7_NL_HBM_PC_SEL_3 0x3F6CBC + +#endif /* ASIC_REG_NIF_RTR_CTRL_7_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_cpu_pll_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_cpu_pll_regs.h new file mode 100644 index 000000000..2585c70f5 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_cpu_pll_regs.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_PSOC_CPU_PLL_REGS_H_ +#define ASIC_REG_PSOC_CPU_PLL_REGS_H_ + +/* + ***************************************** + * PSOC_CPU_PLL (Prototype: PLL) + ***************************************** + */ + +#define mmPSOC_CPU_PLL_NR 0xC70100 + +#define mmPSOC_CPU_PLL_NF 0xC70104 + +#define mmPSOC_CPU_PLL_OD 0xC70108 + +#define mmPSOC_CPU_PLL_NB 0xC7010C + +#define mmPSOC_CPU_PLL_CFG 0xC70110 + +#define mmPSOC_CPU_PLL_LOSE_MASK 0xC70120 + +#define mmPSOC_CPU_PLL_LOCK_INTR 0xC70128 + +#define mmPSOC_CPU_PLL_LOCK_BYPASS 0xC7012C + +#define mmPSOC_CPU_PLL_DATA_CHNG 0xC70130 + +#define mmPSOC_CPU_PLL_RST 0xC70134 + +#define mmPSOC_CPU_PLL_SLIP_WD_CNTR 0xC70150 + +#define mmPSOC_CPU_PLL_DIV_FACTOR_0 0xC70200 + +#define mmPSOC_CPU_PLL_DIV_FACTOR_1 0xC70204 + +#define mmPSOC_CPU_PLL_DIV_FACTOR_2 0xC70208 + +#define mmPSOC_CPU_PLL_DIV_FACTOR_3 0xC7020C + +#define mmPSOC_CPU_PLL_DIV_FACTOR_CMD_0 0xC70220 + +#define mmPSOC_CPU_PLL_DIV_FACTOR_CMD_1 0xC70224 + +#define mmPSOC_CPU_PLL_DIV_FACTOR_CMD_2 0xC70228 + +#define mmPSOC_CPU_PLL_DIV_FACTOR_CMD_3 0xC7022C + +#define mmPSOC_CPU_PLL_DIV_SEL_0 0xC70280 + +#define mmPSOC_CPU_PLL_DIV_SEL_1 0xC70284 + +#define mmPSOC_CPU_PLL_DIV_SEL_2 0xC70288 + +#define mmPSOC_CPU_PLL_DIV_SEL_3 0xC7028C + +#define mmPSOC_CPU_PLL_DIV_EN_0 0xC702A0 + +#define mmPSOC_CPU_PLL_DIV_EN_1 0xC702A4 + +#define mmPSOC_CPU_PLL_DIV_EN_2 0xC702A8 + +#define mmPSOC_CPU_PLL_DIV_EN_3 0xC702AC + +#define mmPSOC_CPU_PLL_DIV_FACTOR_BUSY_0 0xC702C0 + +#define mmPSOC_CPU_PLL_DIV_FACTOR_BUSY_1 0xC702C4 + +#define mmPSOC_CPU_PLL_DIV_FACTOR_BUSY_2 0xC702C8 + +#define mmPSOC_CPU_PLL_DIV_FACTOR_BUSY_3 0xC702CC + +#define mmPSOC_CPU_PLL_CLK_GATER 0xC70300 + +#define mmPSOC_CPU_PLL_CLK_RLX_0 0xC70310 + +#define mmPSOC_CPU_PLL_CLK_RLX_1 0xC70314 + +#define mmPSOC_CPU_PLL_CLK_RLX_2 0xC70318 + +#define mmPSOC_CPU_PLL_CLK_RLX_3 0xC7031C + +#define mmPSOC_CPU_PLL_REF_CNTR_PERIOD 0xC70400 + +#define mmPSOC_CPU_PLL_REF_LOW_THRESHOLD 0xC70410 + +#define mmPSOC_CPU_PLL_REF_HIGH_THRESHOLD 0xC70420 + +#define mmPSOC_CPU_PLL_PLL_NOT_STABLE 0xC70430 + +#define mmPSOC_CPU_PLL_FREQ_CALC_EN 0xC70440 + +#define mmPSOC_CPU_PLL_RLX_BITMAP_CFG 0xC70500 + +#define mmPSOC_CPU_PLL_RLX_BITMAP_0 0xC70510 + +#define mmPSOC_CPU_PLL_RLX_BITMAP_1 0xC70514 + +#define mmPSOC_CPU_PLL_RLX_BITMAP_2 0xC70518 + +#define mmPSOC_CPU_PLL_RLX_BITMAP_3 0xC7051C + +#endif /* ASIC_REG_PSOC_CPU_PLL_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_etr_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_etr_regs.h new file mode 100644 index 000000000..b7c33e025 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_etr_regs.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_PSOC_ETR_REGS_H_ +#define ASIC_REG_PSOC_ETR_REGS_H_ + +/* + ***************************************** + * PSOC_ETR (Prototype: ETR) + ***************************************** + */ + +#define mmPSOC_ETR_RSZ 0x2C43004 + +#define mmPSOC_ETR_STS 0x2C4300C + +#define mmPSOC_ETR_RRD 0x2C43010 + +#define mmPSOC_ETR_RRP 0x2C43014 + +#define mmPSOC_ETR_RWP 0x2C43018 + +#define mmPSOC_ETR_TRG 0x2C4301C + +#define mmPSOC_ETR_CTL 0x2C43020 + +#define mmPSOC_ETR_RWD 0x2C43024 + +#define mmPSOC_ETR_MODE 0x2C43028 + +#define mmPSOC_ETR_LBUFLEVEL 0x2C4302C + +#define mmPSOC_ETR_CBUFLEVEL 0x2C43030 + +#define mmPSOC_ETR_BUFWM 0x2C43034 + +#define mmPSOC_ETR_RRPHI 0x2C43038 + +#define mmPSOC_ETR_RWPHI 0x2C4303C + +#define mmPSOC_ETR_AXICTL 0x2C43110 + +#define mmPSOC_ETR_DBALO 0x2C43118 + +#define mmPSOC_ETR_DBAHI 0x2C4311C + +#define mmPSOC_ETR_FFSR 0x2C43300 + +#define mmPSOC_ETR_FFCR 0x2C43304 + +#define mmPSOC_ETR_PSCR 0x2C43308 + +#define mmPSOC_ETR_ITMISCOP0 0x2C43EE0 + +#define mmPSOC_ETR_ITTRFLIN 0x2C43EE8 + +#define mmPSOC_ETR_ITATBDATA0 0x2C43EEC + +#define mmPSOC_ETR_ITATBCTR2 0x2C43EF0 + +#define mmPSOC_ETR_ITATBCTR1 0x2C43EF4 + +#define mmPSOC_ETR_ITATBCTR0 0x2C43EF8 + +#define mmPSOC_ETR_ITCTRL 0x2C43F00 + +#define mmPSOC_ETR_CLAIMSET 0x2C43FA0 + +#define mmPSOC_ETR_CLAIMCLR 0x2C43FA4 + +#define mmPSOC_ETR_LAR 0x2C43FB0 + +#define mmPSOC_ETR_LSR 0x2C43FB4 + +#define mmPSOC_ETR_AUTHSTATUS 0x2C43FB8 + +#define mmPSOC_ETR_DEVID 0x2C43FC8 + +#define mmPSOC_ETR_DEVTYPE 0x2C43FCC + +#define mmPSOC_ETR_PERIPHID4 0x2C43FD0 + +#define mmPSOC_ETR_PERIPHID5 0x2C43FD4 + +#define mmPSOC_ETR_PERIPHID6 0x2C43FD8 + +#define mmPSOC_ETR_PERIPHID7 0x2C43FDC + +#define mmPSOC_ETR_PERIPHID0 0x2C43FE0 + +#define mmPSOC_ETR_PERIPHID1 0x2C43FE4 + +#define mmPSOC_ETR_PERIPHID2 0x2C43FE8 + +#define mmPSOC_ETR_PERIPHID3 0x2C43FEC + +#define mmPSOC_ETR_COMPID0 0x2C43FF0 + +#define mmPSOC_ETR_COMPID1 0x2C43FF4 + +#define mmPSOC_ETR_COMPID2 0x2C43FF8 + +#define mmPSOC_ETR_COMPID3 0x2C43FFC + +#endif /* ASIC_REG_PSOC_ETR_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_global_conf_masks.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_global_conf_masks.h new file mode 100644 index 000000000..6703e678e --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_global_conf_masks.h @@ -0,0 +1,502 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_PSOC_GLOBAL_CONF_MASKS_H_ +#define ASIC_REG_PSOC_GLOBAL_CONF_MASKS_H_ + +/* + ***************************************** + * PSOC_GLOBAL_CONF (Prototype: GLOBAL_CONF) + ***************************************** + */ + +/* PSOC_GLOBAL_CONF_NON_RST_FLOPS */ +#define PSOC_GLOBAL_CONF_NON_RST_FLOPS_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_NON_RST_FLOPS_VAL_MASK 0xFFFFFFFF + +/* PSOC_GLOBAL_CONF_PCI_FW_FSM */ +#define PSOC_GLOBAL_CONF_PCI_FW_FSM_EN_SHIFT 0 +#define PSOC_GLOBAL_CONF_PCI_FW_FSM_EN_MASK 0x1 + +/* PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START */ +#define PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_IND_SHIFT 0 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_IND_MASK 0x1 + +/* PSOC_GLOBAL_CONF_BTM_FSM */ +#define PSOC_GLOBAL_CONF_BTM_FSM_STATE_SHIFT 0 +#define PSOC_GLOBAL_CONF_BTM_FSM_STATE_MASK 0xF + +/* PSOC_GLOBAL_CONF_SW_BTM_FSM */ +#define PSOC_GLOBAL_CONF_SW_BTM_FSM_CTRL_SHIFT 0 +#define PSOC_GLOBAL_CONF_SW_BTM_FSM_CTRL_MASK 0xF + +/* PSOC_GLOBAL_CONF_SW_BOOT_SEQ_FSM */ +#define PSOC_GLOBAL_CONF_SW_BOOT_SEQ_FSM_CTRL_SHIFT 0 +#define PSOC_GLOBAL_CONF_SW_BOOT_SEQ_FSM_CTRL_MASK 0xF + +/* PSOC_GLOBAL_CONF_BOOT_SEQ_TIMEOUT */ +#define PSOC_GLOBAL_CONF_BOOT_SEQ_TIMEOUT_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_TIMEOUT_VAL_MASK 0xFFFFFFFF + +/* PSOC_GLOBAL_CONF_SPI_MEM_EN */ +#define PSOC_GLOBAL_CONF_SPI_MEM_EN_IND_SHIFT 0 +#define PSOC_GLOBAL_CONF_SPI_MEM_EN_IND_MASK 0x1 + +/* PSOC_GLOBAL_CONF_PRSTN */ +#define PSOC_GLOBAL_CONF_PRSTN_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_PRSTN_VAL_MASK 0x1 + +/* PSOC_GLOBAL_CONF_PCIE_EN */ +#define PSOC_GLOBAL_CONF_PCIE_EN_MASK_SHIFT 0 +#define PSOC_GLOBAL_CONF_PCIE_EN_MASK_MASK 0x1 + +/* PSOC_GLOBAL_CONF_PCIE_PRSTN_INTR */ +#define PSOC_GLOBAL_CONF_PCIE_PRSTN_INTR_IND_SHIFT 0 +#define PSOC_GLOBAL_CONF_PCIE_PRSTN_INTR_IND_MASK 0x1 + +/* PSOC_GLOBAL_CONF_SPI_IMG_STS */ +#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PRI_SHIFT 0 +#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PRI_MASK 0x1 +#define PSOC_GLOBAL_CONF_SPI_IMG_STS_SEC_SHIFT 1 +#define PSOC_GLOBAL_CONF_SPI_IMG_STS_SEC_MASK 0x2 +#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PRSTN_SHIFT 2 +#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PRSTN_MASK 0x4 +#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PCI_SHIFT 3 +#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PCI_MASK 0x8 + +/* PSOC_GLOBAL_CONF_BOOT_SEQ_FSM */ +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_IDLE_SHIFT 0 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_IDLE_MASK 0x1 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_BOOT_INIT_SHIFT 1 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_BOOT_INIT_MASK 0x2 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PRI_SHIFT 2 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PRI_MASK 0x4 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_SEC_SHIFT 3 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_SEC_MASK 0x8 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PRSTN_SHIFT 4 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PRSTN_MASK 0x10 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PCIE_SHIFT 5 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PCIE_MASK 0x20 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_ROM_SHIFT 6 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_ROM_MASK 0x40 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_PCLK_READY_SHIFT 7 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_PCLK_READY_MASK 0x80 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_LTSSM_EN_SHIFT 8 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_LTSSM_EN_MASK 0x100 + +/* PSOC_GLOBAL_CONF_BOOT_SEQ_EXT_LD */ +#define PSOC_GLOBAL_CONF_BOOT_SEQ_EXT_LD_DONE_SHIFT 0 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_EXT_LD_DONE_MASK 0x1 + +/* PSOC_GLOBAL_CONF_PHY_STABLE */ +#define PSOC_GLOBAL_CONF_PHY_STABLE_PRSTN_SHIFT 0 +#define PSOC_GLOBAL_CONF_PHY_STABLE_PRSTN_MASK 0x1 + +/* PSOC_GLOBAL_CONF_PRSTN_OVR */ +#define PSOC_GLOBAL_CONF_PRSTN_OVR_EN_SHIFT 0 +#define PSOC_GLOBAL_CONF_PRSTN_OVR_EN_MASK 0x1 +#define PSOC_GLOBAL_CONF_PRSTN_OVR_VAL_SHIFT 4 +#define PSOC_GLOBAL_CONF_PRSTN_OVR_VAL_MASK 0x10 + +/* PSOC_GLOBAL_CONF_ETR_FLUSH */ +#define PSOC_GLOBAL_CONF_ETR_FLUSH_MASK_SHIFT 0 +#define PSOC_GLOBAL_CONF_ETR_FLUSH_MASK_MASK 0x1 + +/* PSOC_GLOBAL_CONF_COLD_RST_FLOPS */ +#define PSOC_GLOBAL_CONF_COLD_RST_FLOPS_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_COLD_RST_FLOPS_VAL_MASK 0xFFFFFFFF + +/* PSOC_GLOBAL_CONF_DIS_RAZWI_ERR */ +#define PSOC_GLOBAL_CONF_DIS_RAZWI_ERR_IND_SHIFT 0 +#define PSOC_GLOBAL_CONF_DIS_RAZWI_ERR_IND_MASK 0x1 + +/* PSOC_GLOBAL_CONF_PCIE_PHY_RST_N */ +#define PSOC_GLOBAL_CONF_PCIE_PHY_RST_N_IND_SHIFT 0 +#define PSOC_GLOBAL_CONF_PCIE_PHY_RST_N_IND_MASK 0x1 + +/* PSOC_GLOBAL_CONF_RAZWI */ +#define PSOC_GLOBAL_CONF_RAZWI_INTR_SHIFT 0 +#define PSOC_GLOBAL_CONF_RAZWI_INTR_MASK 0x1 +#define PSOC_GLOBAL_CONF_RAZWI_MASK_SHIFT 4 +#define PSOC_GLOBAL_CONF_RAZWI_MASK_MASK 0x10 + +/* PSOC_GLOBAL_CONF_PROT */ +#define PSOC_GLOBAL_CONF_PROT_AR_SHIFT 0 +#define PSOC_GLOBAL_CONF_PROT_AR_MASK 0x7 +#define PSOC_GLOBAL_CONF_PROT_AW_SHIFT 4 +#define PSOC_GLOBAL_CONF_PROT_AW_MASK 0x70 + +/* PSOC_GLOBAL_CONF_ADC */ +#define PSOC_GLOBAL_CONF_ADC_INTR_SHIFT 0 +#define PSOC_GLOBAL_CONF_ADC_INTR_MASK 0x1 +#define PSOC_GLOBAL_CONF_ADC_MASK_SHIFT 4 +#define PSOC_GLOBAL_CONF_ADC_MASK_MASK 0x10 + +/* PSOC_GLOBAL_CONF_BOOT_SEQ_TO */ +#define PSOC_GLOBAL_CONF_BOOT_SEQ_TO_MASK_SHIFT 0 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_TO_MASK_MASK 0x1 + +/* PSOC_GLOBAL_CONF_SCRATCHPAD */ +#define PSOC_GLOBAL_CONF_SCRATCHPAD_REG_SHIFT 0 +#define PSOC_GLOBAL_CONF_SCRATCHPAD_REG_MASK 0xFFFFFFFF + +/* PSOC_GLOBAL_CONF_SEMAPHORE */ +#define PSOC_GLOBAL_CONF_SEMAPHORE_REG_SHIFT 0 +#define PSOC_GLOBAL_CONF_SEMAPHORE_REG_MASK 0xFFFFFFFF + +/* PSOC_GLOBAL_CONF_CPU_BOOT_STATUS */ +#define PSOC_GLOBAL_CONF_CPU_BOOT_STATUS_CNTR_SHIFT 0 +#define PSOC_GLOBAL_CONF_CPU_BOOT_STATUS_CNTR_MASK 0xFFFFFFFF + +/* PSOC_GLOBAL_CONF_KMD_MSG_TO_CPU */ +#define PSOC_GLOBAL_CONF_KMD_MSG_TO_CPU_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_KMD_MSG_TO_CPU_VAL_MASK 0xFFFFFFFF + +/* PSOC_GLOBAL_CONF_SPL_SOURCE */ +#define PSOC_GLOBAL_CONF_SPL_SOURCE_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_SPL_SOURCE_VAL_MASK 0x7 + +/* PSOC_GLOBAL_CONF_I2C_MSTR1_DBG */ +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_S_GEN_SHIFT 0 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_S_GEN_MASK 0x1 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_P_GEN_SHIFT 1 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_P_GEN_MASK 0x2 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_DATA_SHIFT 2 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_DATA_MASK 0x4 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_ADDR_SHIFT 3 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_ADDR_MASK 0x8 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_RD_SHIFT 4 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_RD_MASK 0x10 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_WR_SHIFT 5 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_WR_MASK 0x20 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_HS_SHIFT 6 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_HS_MASK 0x40 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_MASTER_ACT_SHIFT 7 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_MASTER_ACT_MASK 0x80 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_SLAVE_ACT_SHIFT 8 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_SLAVE_ACT_MASK 0x100 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_ADDR_10BIT_SHIFT 9 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_ADDR_10BIT_MASK 0x200 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_MST_CSTATE_SHIFT 10 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_MST_CSTATE_MASK 0x7C00 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_SLV_CSTATE_SHIFT 15 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_SLV_CSTATE_MASK 0x78000 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_IC_EN_SHIFT 19 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_IC_EN_MASK 0x80000 + +/* PSOC_GLOBAL_CONF_I2C_SLV */ +#define PSOC_GLOBAL_CONF_I2C_SLV_CPU_CTRL_SHIFT 0 +#define PSOC_GLOBAL_CONF_I2C_SLV_CPU_CTRL_MASK 0x1 + +/* PSOC_GLOBAL_CONF_I2C_SLV_INTR_MASK */ +#define PSOC_GLOBAL_CONF_I2C_SLV_INTR_MASK_FLD_INT_SHIFT 0 +#define PSOC_GLOBAL_CONF_I2C_SLV_INTR_MASK_FLD_INT_MASK 0x1 + +/* PSOC_GLOBAL_CONF_TRACE_ADDR */ +#define PSOC_GLOBAL_CONF_TRACE_ADDR_MSB_SHIFT 0 +#define PSOC_GLOBAL_CONF_TRACE_ADDR_MSB_MASK 0x3FF + +/* PSOC_GLOBAL_CONF_ARUSER */ +#define PSOC_GLOBAL_CONF_ARUSER_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_ARUSER_VAL_MASK 0xFFFFFFFF + +/* PSOC_GLOBAL_CONF_AWUSER */ +#define PSOC_GLOBAL_CONF_AWUSER_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_AWUSER_VAL_MASK 0xFFFFFFFF + +/* PSOC_GLOBAL_CONF_TRACE_AWUSER */ +#define PSOC_GLOBAL_CONF_TRACE_AWUSER_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_TRACE_AWUSER_VAL_MASK 0xFFFFFFFF + +/* PSOC_GLOBAL_CONF_TRACE_ARUSER */ +#define PSOC_GLOBAL_CONF_TRACE_ARUSER_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_TRACE_ARUSER_VAL_MASK 0xFFFFFFFF + +/* PSOC_GLOBAL_CONF_BTL_STS */ +#define PSOC_GLOBAL_CONF_BTL_STS_DONE_SHIFT 0 +#define PSOC_GLOBAL_CONF_BTL_STS_DONE_MASK 0x1 +#define PSOC_GLOBAL_CONF_BTL_STS_FAIL_SHIFT 4 +#define PSOC_GLOBAL_CONF_BTL_STS_FAIL_MASK 0x10 +#define PSOC_GLOBAL_CONF_BTL_STS_FAIL_CODE_SHIFT 8 +#define PSOC_GLOBAL_CONF_BTL_STS_FAIL_CODE_MASK 0xF00 + +/* PSOC_GLOBAL_CONF_TIMEOUT_INTR */ +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_0_SHIFT 0 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_0_MASK 0x1 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_1_SHIFT 1 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_1_MASK 0x2 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_2_SHIFT 2 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_2_MASK 0x4 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_3_SHIFT 3 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_3_MASK 0x8 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_4_SHIFT 4 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_4_MASK 0x10 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_TIMER_SHIFT 5 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_TIMER_MASK 0x20 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_UART_0_SHIFT 6 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_UART_0_MASK 0x40 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_UART_1_SHIFT 7 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_UART_1_MASK 0x80 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_5_SHIFT 8 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_5_MASK 0x100 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_6_SHIFT 9 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_6_MASK 0x200 + +/* PSOC_GLOBAL_CONF_COMB_TIMEOUT_INTR */ +#define PSOC_GLOBAL_CONF_COMB_TIMEOUT_INTR_IND_SHIFT 0 +#define PSOC_GLOBAL_CONF_COMB_TIMEOUT_INTR_IND_MASK 0x1 + +/* PSOC_GLOBAL_CONF_PERIPH_INTR */ +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_TX_SHIFT 0 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_TX_MASK 0x1 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_RX_SHIFT 1 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_RX_MASK 0x2 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_TXOVR_SHIFT 2 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_TXOVR_MASK 0x4 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_RXOVR_SHIFT 3 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_RXOVR_MASK 0x8 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_TX_SHIFT 4 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_TX_MASK 0x10 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_RX_SHIFT 5 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_RX_MASK 0x20 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_TXOVR_SHIFT 6 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_TXOVR_MASK 0x40 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_RXOVR_SHIFT 7 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_RXOVR_MASK 0x80 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_EMMC_SHIFT 12 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_EMMC_MASK 0x1000 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_EMMC_WAKEUP_SHIFT 13 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_EMMC_WAKEUP_MASK 0x2000 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_MII_SHIFT 16 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_MII_MASK 0x10000 + +/* PSOC_GLOBAL_CONF_COMB_PERIPH_INTR */ +#define PSOC_GLOBAL_CONF_COMB_PERIPH_INTR_IND_SHIFT 0 +#define PSOC_GLOBAL_CONF_COMB_PERIPH_INTR_IND_MASK 0x1 + +/* PSOC_GLOBAL_CONF_AXI_ERR_INTR */ +#define PSOC_GLOBAL_CONF_AXI_ERR_INTR_IND_SHIFT 0 +#define PSOC_GLOBAL_CONF_AXI_ERR_INTR_IND_MASK 0x1 + +/* PSOC_GLOBAL_CONF_TARGETID */ +#define PSOC_GLOBAL_CONF_TARGETID_TDESIGNER_SHIFT 1 +#define PSOC_GLOBAL_CONF_TARGETID_TDESIGNER_MASK 0xFFE +#define PSOC_GLOBAL_CONF_TARGETID_TPARTNO_SHIFT 16 +#define PSOC_GLOBAL_CONF_TARGETID_TPARTNO_MASK 0xFFF0000 +#define PSOC_GLOBAL_CONF_TARGETID_TREVISION_SHIFT 28 +#define PSOC_GLOBAL_CONF_TARGETID_TREVISION_MASK 0xF0000000 + +/* PSOC_GLOBAL_CONF_EMMC_INT_VOL_STABLE */ +#define PSOC_GLOBAL_CONF_EMMC_INT_VOL_STABLE_IND_SHIFT 0 +#define PSOC_GLOBAL_CONF_EMMC_INT_VOL_STABLE_IND_MASK 0x1 + +/* PSOC_GLOBAL_CONF_BOOT_STRAP_PINS */ +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_I2C_SLV_ADDR_SHIFT 0 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_I2C_SLV_ADDR_MASK 0x1 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_PCIE_EN_SHIFT 1 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_PCIE_EN_MASK 0x2 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_REPAIR_CFG_SHIFT 2 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_REPAIR_CFG_MASK 0xC +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_CPOL_SHIFT 4 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_CPOL_MASK 0x10 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_CPHA_SHIFT 5 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_CPHA_MASK 0x20 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_BTL_EN_SHIFT 6 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_BTL_EN_MASK 0x40 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_BTL_ROM_EN_SHIFT 7 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_BTL_ROM_EN_MASK 0x80 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_DUMP_SEL_SHIFT 8 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_DUMP_SEL_MASK 0x1FFF00 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_GRAD_RST_SHIFT 22 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_GRAD_RST_MASK 0x400000 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_DUMP_DIS_SHIFT 23 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_DUMP_DIS_MASK 0x800000 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_I2C_SHIFT 24 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_I2C_MASK 0x1F000000 + +/* PSOC_GLOBAL_CONF_MEM_REPAIR_DIV */ +#define PSOC_GLOBAL_CONF_MEM_REPAIR_DIV_EN_SHIFT 0 +#define PSOC_GLOBAL_CONF_MEM_REPAIR_DIV_EN_MASK 0x1 +#define PSOC_GLOBAL_CONF_MEM_REPAIR_DIV_VAL_SHIFT 8 +#define PSOC_GLOBAL_CONF_MEM_REPAIR_DIV_VAL_MASK 0xFF00 + +/* PSOC_GLOBAL_CONF_MEM_REPAIR_CTRL */ +#define PSOC_GLOBAL_CONF_MEM_REPAIR_CTRL_SET_SHIFT 0 +#define PSOC_GLOBAL_CONF_MEM_REPAIR_CTRL_SET_MASK 0x1 +#define PSOC_GLOBAL_CONF_MEM_REPAIR_CTRL_CLR_SHIFT 1 +#define PSOC_GLOBAL_CONF_MEM_REPAIR_CTRL_CLR_MASK 0x2 + +/* PSOC_GLOBAL_CONF_MEM_REPAIR_STS */ +#define PSOC_GLOBAL_CONF_MEM_REPAIR_STS_IND_SHIFT 0 +#define PSOC_GLOBAL_CONF_MEM_REPAIR_STS_IND_MASK 0x1 +#define PSOC_GLOBAL_CONF_MEM_REPAIR_STS_FAIL_SHIFT 4 +#define PSOC_GLOBAL_CONF_MEM_REPAIR_STS_FAIL_MASK 0x10 + +/* PSOC_GLOBAL_CONF_OUTSTANT_TRANS */ +#define PSOC_GLOBAL_CONF_OUTSTANT_TRANS_RD_SHIFT 0 +#define PSOC_GLOBAL_CONF_OUTSTANT_TRANS_RD_MASK 0x1 +#define PSOC_GLOBAL_CONF_OUTSTANT_TRANS_WR_SHIFT 1 +#define PSOC_GLOBAL_CONF_OUTSTANT_TRANS_WR_MASK 0x2 + +/* PSOC_GLOBAL_CONF_MASK_REQ */ +#define PSOC_GLOBAL_CONF_MASK_REQ_IND_SHIFT 0 +#define PSOC_GLOBAL_CONF_MASK_REQ_IND_MASK 0x1 + +/* PSOC_GLOBAL_CONF_WD_RST_CFG_L */ +#define PSOC_GLOBAL_CONF_WD_RST_CFG_L_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_L_VAL_MASK 0xFFFFFFFF + +/* PSOC_GLOBAL_CONF_WD_RST_CFG_H */ +#define PSOC_GLOBAL_CONF_WD_RST_CFG_H_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_H_VAL_MASK 0x3FFFFF + +/* PSOC_GLOBAL_CONF_MNL_RST_CFG_L */ +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_L_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_L_VAL_MASK 0xFFFFFFFF + +/* PSOC_GLOBAL_CONF_MNL_RST_CFG_H */ +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_H_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_H_VAL_MASK 0x3FFFFF + +/* PSOC_GLOBAL_CONF_PRSTN_RST_CFG_L */ +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_L_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_L_VAL_MASK 0xFFFFFFFF + +/* PSOC_GLOBAL_CONF_PRSTN_RST_CFG_H */ +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_H_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_H_VAL_MASK 0xFFFFFFFF + +/* PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_L */ +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_L_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_L_VAL_MASK 0xFFFFFFFF + +/* PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_H */ +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_H_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_H_VAL_MASK 0x3FFFFF + +/* PSOC_GLOBAL_CONF_SW_ALL_RST */ +#define PSOC_GLOBAL_CONF_SW_ALL_RST_IND_SHIFT 0 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_IND_MASK 0x1 + +/* PSOC_GLOBAL_CONF_SOFT_RST */ +#define PSOC_GLOBAL_CONF_SOFT_RST_IND_SHIFT 0 +#define PSOC_GLOBAL_CONF_SOFT_RST_IND_MASK 0x1 + +/* PSOC_GLOBAL_CONF_SOFT_RST_CFG_L */ +#define PSOC_GLOBAL_CONF_SOFT_RST_CFG_L_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_SOFT_RST_CFG_L_VAL_MASK 0xFFFFFFFF + +/* PSOC_GLOBAL_CONF_SOFT_RST_CFG_H */ +#define PSOC_GLOBAL_CONF_SOFT_RST_CFG_H_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_SOFT_RST_CFG_H_VAL_MASK 0x3FFFFF + +/* PSOC_GLOBAL_CONF_UNIT_RST_N */ +#define PSOC_GLOBAL_CONF_UNIT_RST_N_IND_SHIFT 0 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_IND_MASK 0x1 + +/* PSOC_GLOBAL_CONF_UNIT_RST_N_L */ +#define PSOC_GLOBAL_CONF_UNIT_RST_N_L_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_L_VAL_MASK 0xFFFFFFFF + +/* PSOC_GLOBAL_CONF_UNIT_RST_N_H */ +#define PSOC_GLOBAL_CONF_UNIT_RST_N_H_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_H_VAL_MASK 0x3FFFFF + +/* PSOC_GLOBAL_CONF_BTL_IMG */ +#define PSOC_GLOBAL_CONF_BTL_IMG_SEL_SHIFT 0 +#define PSOC_GLOBAL_CONF_BTL_IMG_SEL_MASK 0x1 + +/* PSOC_GLOBAL_CONF_PRSTN_MASK */ +#define PSOC_GLOBAL_CONF_PRSTN_MASK_IND_SHIFT 0 +#define PSOC_GLOBAL_CONF_PRSTN_MASK_IND_MASK 0x1 + +/* PSOC_GLOBAL_CONF_WD_MASK */ +#define PSOC_GLOBAL_CONF_WD_MASK_IND_SHIFT 0 +#define PSOC_GLOBAL_CONF_WD_MASK_IND_MASK 0x1 + +/* PSOC_GLOBAL_CONF_RST_SRC */ +#define PSOC_GLOBAL_CONF_RST_SRC_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_RST_SRC_VAL_MASK 0xF + +/* PSOC_GLOBAL_CONF_BOOT_STATE */ +#define PSOC_GLOBAL_CONF_BOOT_STATE_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_BOOT_STATE_VAL_MASK 0x1 + +/* PSOC_GLOBAL_CONF_PAD_1V8_CFG */ +#define PSOC_GLOBAL_CONF_PAD_1V8_CFG_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_PAD_1V8_CFG_VAL_MASK 0x7F + +/* PSOC_GLOBAL_CONF_PAD_3V3_CFG */ +#define PSOC_GLOBAL_CONF_PAD_3V3_CFG_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_PAD_3V3_CFG_VAL_MASK 0x7F + +/* PSOC_GLOBAL_CONF_PAD_1V8_INPUT */ +#define PSOC_GLOBAL_CONF_PAD_1V8_INPUT_CFG_SHIFT 0 +#define PSOC_GLOBAL_CONF_PAD_1V8_INPUT_CFG_MASK 0x7 + +/* PSOC_GLOBAL_CONF_BNK3V3_MS */ +#define PSOC_GLOBAL_CONF_BNK3V3_MS_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_BNK3V3_MS_VAL_MASK 0x3 + +/* PSOC_GLOBAL_CONF_ADC_CLK_FREQ */ +#define PSOC_GLOBAL_CONF_ADC_CLK_FREQ_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_ADC_CLK_FREQ_VAL_MASK 0xFF + +/* PSOC_GLOBAL_CONF_ADC_DELAY_FROM_START */ +#define PSOC_GLOBAL_CONF_ADC_DELAY_FROM_START_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_ADC_DELAY_FROM_START_VAL_MASK 0xFF + +/* PSOC_GLOBAL_CONF_ADC_DATA_SAMPLES */ +#define PSOC_GLOBAL_CONF_ADC_DATA_SAMPLES_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_ADC_DATA_SAMPLES_VAL_MASK 0x1F + +/* PSOC_GLOBAL_CONF_ADC_TPH_CS */ +#define PSOC_GLOBAL_CONF_ADC_TPH_CS_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_ADC_TPH_CS_VAL_MASK 0xFF + +/* PSOC_GLOBAL_CONF_ADC_LSB_NMSB */ +#define PSOC_GLOBAL_CONF_ADC_LSB_NMSB_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_ADC_LSB_NMSB_VAL_MASK 0x1 + +/* PSOC_GLOBAL_CONF_ADC_ONE_NCONTIUES */ +#define PSOC_GLOBAL_CONF_ADC_ONE_NCONTIUES_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_ADC_ONE_NCONTIUES_VAL_MASK 0x1 + +/* PSOC_GLOBAL_CONF_ADC_BLOCK_ENABLE */ +#define PSOC_GLOBAL_CONF_ADC_BLOCK_ENABLE_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_ADC_BLOCK_ENABLE_VAL_MASK 0x1 + +/* PSOC_GLOBAL_CONF_ADC_CFG_DATA */ +#define PSOC_GLOBAL_CONF_ADC_CFG_DATA_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_ADC_CFG_DATA_VAL_MASK 0xFFFFFFFF + +/* PSOC_GLOBAL_CONF_ADC_TDV_CSDO */ +#define PSOC_GLOBAL_CONF_ADC_TDV_CSDO_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_ADC_TDV_CSDO_VAL_MASK 0xFF + +/* PSOC_GLOBAL_CONF_ADC_TSU_CSCK */ +#define PSOC_GLOBAL_CONF_ADC_TSU_CSCK_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_ADC_TSU_CSCK_VAL_MASK 0xFF + +/* PSOC_GLOBAL_CONF_PAD_DEFAULT */ +#define PSOC_GLOBAL_CONF_PAD_DEFAULT_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_PAD_DEFAULT_VAL_MASK 0xF + +/* PSOC_GLOBAL_CONF_PAD_SEL */ +#define PSOC_GLOBAL_CONF_PAD_SEL_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_PAD_SEL_VAL_MASK 0x3 + +/* PSOC_GLOBAL_CONF_RST_CTRL */ +#define PSOC_GLOBAL_CONF_RST_CTRL_SEL_SHIFT 0 +#define PSOC_GLOBAL_CONF_RST_CTRL_SEL_MASK 0xFF + +#endif /* ASIC_REG_PSOC_GLOBAL_CONF_MASKS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_global_conf_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_global_conf_regs.h new file mode 100644 index 000000000..1b5cfcc1d --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_global_conf_regs.h @@ -0,0 +1,1062 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_PSOC_GLOBAL_CONF_REGS_H_ +#define ASIC_REG_PSOC_GLOBAL_CONF_REGS_H_ + +/* + ***************************************** + * PSOC_GLOBAL_CONF (Prototype: GLOBAL_CONF) + ***************************************** + */ + +#define mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_0 0xC4B000 + +#define mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_1 0xC4B004 + +#define mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_2 0xC4B008 + +#define mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_3 0xC4B00C + +#define mmPSOC_GLOBAL_CONF_PCI_FW_FSM 0xC4B020 + +#define mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START 0xC4B024 + +#define mmPSOC_GLOBAL_CONF_BTM_FSM 0xC4B028 + +#define mmPSOC_GLOBAL_CONF_SW_BTM_FSM 0xC4B030 + +#define mmPSOC_GLOBAL_CONF_SW_BOOT_SEQ_FSM 0xC4B034 + +#define mmPSOC_GLOBAL_CONF_BOOT_SEQ_TIMEOUT 0xC4B038 + +#define mmPSOC_GLOBAL_CONF_SPI_MEM_EN 0xC4B040 + +#define mmPSOC_GLOBAL_CONF_PRSTN 0xC4B044 + +#define mmPSOC_GLOBAL_CONF_PCIE_EN 0xC4B048 + +#define mmPSOC_GLOBAL_CONF_PCIE_PRSTN_INTR 0xC4B04C + +#define mmPSOC_GLOBAL_CONF_SPI_IMG_STS 0xC4B050 + +#define mmPSOC_GLOBAL_CONF_BOOT_SEQ_FSM 0xC4B054 + +#define mmPSOC_GLOBAL_CONF_BOOT_SEQ_EXT_LD 0xC4B058 + +#define mmPSOC_GLOBAL_CONF_PHY_STABLE 0xC4B060 + +#define mmPSOC_GLOBAL_CONF_PRSTN_OVR 0xC4B064 + +#define mmPSOC_GLOBAL_CONF_ETR_FLUSH 0xC4B068 + +#define mmPSOC_GLOBAL_CONF_COLD_RST_FLOPS_0 0xC4B070 + +#define mmPSOC_GLOBAL_CONF_COLD_RST_FLOPS_1 0xC4B074 + +#define mmPSOC_GLOBAL_CONF_COLD_RST_FLOPS_2 0xC4B078 + +#define mmPSOC_GLOBAL_CONF_COLD_RST_FLOPS_3 0xC4B07C + +#define mmPSOC_GLOBAL_CONF_DIS_RAZWI_ERR 0xC4B080 + +#define mmPSOC_GLOBAL_CONF_PCIE_PHY_RST_N 0xC4B084 + +#define mmPSOC_GLOBAL_CONF_RAZWI 0xC4B088 + +#define mmPSOC_GLOBAL_CONF_PROT 0xC4B090 + +#define mmPSOC_GLOBAL_CONF_ADC 0xC4B094 + +#define mmPSOC_GLOBAL_CONF_BOOT_SEQ_TO 0xC4B098 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_0 0xC4B100 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_1 0xC4B104 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_2 0xC4B108 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_3 0xC4B10C + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_4 0xC4B110 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_5 0xC4B114 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_6 0xC4B118 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_7 0xC4B11C + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_8 0xC4B120 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_9 0xC4B124 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_10 0xC4B128 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_11 0xC4B12C + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_12 0xC4B130 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_13 0xC4B134 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_14 0xC4B138 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_15 0xC4B13C + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_16 0xC4B140 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_17 0xC4B144 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_18 0xC4B148 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_19 0xC4B14C + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_20 0xC4B150 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_21 0xC4B154 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_22 0xC4B158 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_23 0xC4B15C + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_24 0xC4B160 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_25 0xC4B164 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_26 0xC4B168 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_27 0xC4B16C + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_28 0xC4B170 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_29 0xC4B174 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_30 0xC4B178 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_31 0xC4B17C + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_0 0xC4B200 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_1 0xC4B204 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_2 0xC4B208 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_3 0xC4B20C + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_4 0xC4B210 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_5 0xC4B214 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_6 0xC4B218 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_7 0xC4B21C + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_8 0xC4B220 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_9 0xC4B224 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_10 0xC4B228 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_11 0xC4B22C + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_12 0xC4B230 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_13 0xC4B234 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_14 0xC4B238 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_15 0xC4B23C + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_16 0xC4B240 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_17 0xC4B244 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_18 0xC4B248 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_19 0xC4B24C + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_20 0xC4B250 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_21 0xC4B254 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_22 0xC4B258 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_23 0xC4B25C + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_24 0xC4B260 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_25 0xC4B264 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_26 0xC4B268 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_27 0xC4B26C + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_28 0xC4B270 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_29 0xC4B274 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_30 0xC4B278 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_31 0xC4B27C + +#define mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS 0xC4B300 + +#define mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU 0xC4B304 + +#define mmPSOC_GLOBAL_CONF_SPL_SOURCE 0xC4B308 + +#define mmPSOC_GLOBAL_CONF_I2C_MSTR1_DBG 0xC4B30C + +#define mmPSOC_GLOBAL_CONF_I2C_SLV 0xC4B310 + +#define mmPSOC_GLOBAL_CONF_I2C_SLV_INTR_MASK 0xC4B314 + +#define mmPSOC_GLOBAL_CONF_TRACE_ADDR 0xC4B320 + +#define mmPSOC_GLOBAL_CONF_ARUSER 0xC4B330 + +#define mmPSOC_GLOBAL_CONF_AWUSER 0xC4B334 + +#define mmPSOC_GLOBAL_CONF_TRACE_AWUSER 0xC4B338 + +#define mmPSOC_GLOBAL_CONF_TRACE_ARUSER 0xC4B33C + +#define mmPSOC_GLOBAL_CONF_BTL_STS 0xC4B340 + +#define mmPSOC_GLOBAL_CONF_TIMEOUT_INTR 0xC4B350 + +#define mmPSOC_GLOBAL_CONF_COMB_TIMEOUT_INTR 0xC4B354 + +#define mmPSOC_GLOBAL_CONF_PERIPH_INTR 0xC4B358 + +#define mmPSOC_GLOBAL_CONF_COMB_PERIPH_INTR 0xC4B35C + +#define mmPSOC_GLOBAL_CONF_AXI_ERR_INTR 0xC4B360 + +#define mmPSOC_GLOBAL_CONF_TARGETID 0xC4B400 + +#define mmPSOC_GLOBAL_CONF_EMMC_INT_VOL_STABLE 0xC4B420 + +#define mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS 0xC4B430 + +#define mmPSOC_GLOBAL_CONF_MEM_REPAIR_DIV 0xC4B44C + +#define mmPSOC_GLOBAL_CONF_MEM_REPAIR_CTRL 0xC4B450 + +#define mmPSOC_GLOBAL_CONF_MEM_REPAIR_STS 0xC4B454 + +#define mmPSOC_GLOBAL_CONF_OUTSTANT_TRANS 0xC4B458 + +#define mmPSOC_GLOBAL_CONF_MASK_REQ 0xC4B45C + +#define mmPSOC_GLOBAL_CONF_WD_RST_CFG_L 0xC4B460 + +#define mmPSOC_GLOBAL_CONF_WD_RST_CFG_H 0xC4B464 + +#define mmPSOC_GLOBAL_CONF_MNL_RST_CFG_L 0xC4B470 + +#define mmPSOC_GLOBAL_CONF_MNL_RST_CFG_H 0xC4B474 + +#define mmPSOC_GLOBAL_CONF_PRSTN_RST_CFG_L 0xC4B480 + +#define mmPSOC_GLOBAL_CONF_PRSTN_RST_CFG_H 0xC4B484 + +#define mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG_L 0xC4B490 + +#define mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG_H 0xC4B494 + +#define mmPSOC_GLOBAL_CONF_SW_ALL_RST 0xC4B498 + +#define mmPSOC_GLOBAL_CONF_SOFT_RST 0xC4B4A0 + +#define mmPSOC_GLOBAL_CONF_SOFT_RST_CFG_L 0xC4B4A4 + +#define mmPSOC_GLOBAL_CONF_SOFT_RST_CFG_H 0xC4B4A8 + +#define mmPSOC_GLOBAL_CONF_UNIT_RST_N 0xC4B4B0 + +#define mmPSOC_GLOBAL_CONF_UNIT_RST_N_L 0xC4B4B4 + +#define mmPSOC_GLOBAL_CONF_UNIT_RST_N_H 0xC4B4B8 + +#define mmPSOC_GLOBAL_CONF_BTL_IMG 0xC4B4E0 + +#define mmPSOC_GLOBAL_CONF_PRSTN_MASK 0xC4B4E4 + +#define mmPSOC_GLOBAL_CONF_WD_MASK 0xC4B4E8 + +#define mmPSOC_GLOBAL_CONF_RST_SRC 0xC4B4F0 + +#define mmPSOC_GLOBAL_CONF_BOOT_STATE 0xC4B4F4 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_0 0xC4B500 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_1 0xC4B504 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_2 0xC4B508 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_3 0xC4B50C + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_4 0xC4B510 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_5 0xC4B514 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_6 0xC4B518 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_7 0xC4B51C + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_8 0xC4B520 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_9 0xC4B524 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_10 0xC4B528 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_11 0xC4B52C + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_12 0xC4B530 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_13 0xC4B534 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_14 0xC4B538 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_15 0xC4B53C + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_16 0xC4B540 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_17 0xC4B544 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_18 0xC4B548 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_19 0xC4B54C + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_20 0xC4B550 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_21 0xC4B554 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_22 0xC4B558 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_23 0xC4B55C + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_24 0xC4B560 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_25 0xC4B564 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_26 0xC4B568 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_27 0xC4B56C + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_28 0xC4B570 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_29 0xC4B574 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_30 0xC4B578 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_31 0xC4B57C + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_32 0xC4B580 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_33 0xC4B584 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_34 0xC4B588 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_35 0xC4B58C + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_36 0xC4B590 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_37 0xC4B594 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_38 0xC4B598 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_39 0xC4B59C + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_40 0xC4B5A0 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_41 0xC4B5A4 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_42 0xC4B5A8 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_43 0xC4B5AC + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_44 0xC4B5B0 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_45 0xC4B5B4 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_46 0xC4B5B8 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_47 0xC4B5BC + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_48 0xC4B5C0 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_49 0xC4B5C4 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_50 0xC4B5C8 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_51 0xC4B5CC + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_52 0xC4B5D0 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_53 0xC4B5D4 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_54 0xC4B5D8 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_55 0xC4B5DC + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_56 0xC4B5E0 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_57 0xC4B5E4 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_58 0xC4B5E8 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_59 0xC4B5EC + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_60 0xC4B5F0 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_61 0xC4B5F4 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_62 0xC4B5F8 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_63 0xC4B5FC + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_64 0xC4B600 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_65 0xC4B604 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_66 0xC4B608 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_67 0xC4B60C + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_68 0xC4B610 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_69 0xC4B614 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_70 0xC4B618 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_71 0xC4B61C + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_72 0xC4B620 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_73 0xC4B624 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_74 0xC4B628 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_75 0xC4B62C + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_76 0xC4B630 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_77 0xC4B634 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_78 0xC4B638 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_79 0xC4B63C + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_80 0xC4B640 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_81 0xC4B644 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_82 0xC4B648 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_83 0xC4B64C + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_84 0xC4B650 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_85 0xC4B654 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_86 0xC4B658 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_87 0xC4B65C + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_88 0xC4B660 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_89 0xC4B664 + +#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_0 0xC4B690 + +#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_1 0xC4B694 + +#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_2 0xC4B698 + +#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_3 0xC4B69C + +#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_4 0xC4B6A0 + +#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_5 0xC4B6A4 + +#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_6 0xC4B6A8 + +#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_7 0xC4B6AC + +#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_8 0xC4B6B0 + +#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_9 0xC4B6B4 + +#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_10 0xC4B6B8 + +#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_11 0xC4B6BC + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_0 0xC4B6C0 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_1 0xC4B6C4 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_2 0xC4B6C8 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_3 0xC4B6CC + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_4 0xC4B6D0 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_5 0xC4B6D4 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_6 0xC4B6D8 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_7 0xC4B6DC + +#define mmPSOC_GLOBAL_CONF_BNK3V3_MS 0xC4B710 + +#define mmPSOC_GLOBAL_CONF_ADC_CLK_FREQ 0xC4B720 + +#define mmPSOC_GLOBAL_CONF_ADC_DELAY_FROM_START 0xC4B724 + +#define mmPSOC_GLOBAL_CONF_ADC_DATA_SAMPLES 0xC4B728 + +#define mmPSOC_GLOBAL_CONF_ADC_TPH_CS 0xC4B72C + +#define mmPSOC_GLOBAL_CONF_ADC_LSB_NMSB 0xC4B730 + +#define mmPSOC_GLOBAL_CONF_ADC_ONE_NCONTIUES 0xC4B734 + +#define mmPSOC_GLOBAL_CONF_ADC_BLOCK_ENABLE 0xC4B738 + +#define mmPSOC_GLOBAL_CONF_ADC_CFG_DATA 0xC4B73C + +#define mmPSOC_GLOBAL_CONF_ADC_TDV_CSDO 0xC4B740 + +#define mmPSOC_GLOBAL_CONF_ADC_TSU_CSCK 0xC4B744 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_0 0xC4B800 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_1 0xC4B804 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_2 0xC4B808 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_3 0xC4B80C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_4 0xC4B810 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_5 0xC4B814 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_6 0xC4B818 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_7 0xC4B81C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_8 0xC4B820 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_9 0xC4B824 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_10 0xC4B828 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_11 0xC4B82C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_12 0xC4B830 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_13 0xC4B834 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_14 0xC4B838 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_15 0xC4B83C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_16 0xC4B840 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_17 0xC4B844 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_18 0xC4B848 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_19 0xC4B84C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_20 0xC4B850 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_21 0xC4B854 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_22 0xC4B858 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_23 0xC4B85C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_24 0xC4B860 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_25 0xC4B864 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_26 0xC4B868 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_27 0xC4B86C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_28 0xC4B870 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_29 0xC4B874 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_30 0xC4B878 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_31 0xC4B87C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_32 0xC4B880 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_33 0xC4B884 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_34 0xC4B888 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_35 0xC4B88C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_36 0xC4B890 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_37 0xC4B894 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_38 0xC4B898 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_39 0xC4B89C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_40 0xC4B8A0 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_41 0xC4B8A4 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_42 0xC4B8A8 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_43 0xC4B8AC + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_44 0xC4B8B0 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_45 0xC4B8B4 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_46 0xC4B8B8 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_47 0xC4B8BC + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_48 0xC4B8C0 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_49 0xC4B8C4 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_50 0xC4B8C8 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_51 0xC4B8CC + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_52 0xC4B8D0 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_53 0xC4B8D4 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_54 0xC4B8D8 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_55 0xC4B8DC + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_56 0xC4B8E0 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_57 0xC4B8E4 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_58 0xC4B8E8 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_59 0xC4B8EC + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_60 0xC4B8F0 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_61 0xC4B8F4 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_62 0xC4B8F8 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_63 0xC4B8FC + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_64 0xC4B900 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_65 0xC4B904 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_66 0xC4B908 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_67 0xC4B90C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_68 0xC4B910 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_69 0xC4B914 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_70 0xC4B918 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_71 0xC4B91C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_72 0xC4B920 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_73 0xC4B924 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_74 0xC4B928 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_75 0xC4B92C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_76 0xC4B930 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_77 0xC4B934 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_78 0xC4B938 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_79 0xC4B93C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_80 0xC4B940 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_81 0xC4B944 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_82 0xC4B948 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_83 0xC4B94C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_84 0xC4B950 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_85 0xC4B954 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_86 0xC4B958 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_87 0xC4B95C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_88 0xC4B960 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_89 0xC4B964 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_90 0xC4B968 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_91 0xC4B96C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_92 0xC4B970 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_93 0xC4B974 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_94 0xC4B978 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_95 0xC4B97C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_96 0xC4B980 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_97 0xC4B984 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_98 0xC4B988 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_99 0xC4B98C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_100 0xC4B990 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_101 0xC4B994 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_102 0xC4B998 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_0 0xC4BA00 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_1 0xC4BA04 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_2 0xC4BA08 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_3 0xC4BA0C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_4 0xC4BA10 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_5 0xC4BA14 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_6 0xC4BA18 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_7 0xC4BA1C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_8 0xC4BA20 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_9 0xC4BA24 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_10 0xC4BA28 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_11 0xC4BA2C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_12 0xC4BA30 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_13 0xC4BA34 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_14 0xC4BA38 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_15 0xC4BA3C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_16 0xC4BA40 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_17 0xC4BA44 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_18 0xC4BA48 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_19 0xC4BA4C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_20 0xC4BA50 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_21 0xC4BA54 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_22 0xC4BA58 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_23 0xC4BA5C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_24 0xC4BA60 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_25 0xC4BA64 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_26 0xC4BA68 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_27 0xC4BA6C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_28 0xC4BA70 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_29 0xC4BA74 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_30 0xC4BA78 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_31 0xC4BA7C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_32 0xC4BA80 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_33 0xC4BA84 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_34 0xC4BA88 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_35 0xC4BA8C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_36 0xC4BA90 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_37 0xC4BA94 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_38 0xC4BA98 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_39 0xC4BA9C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_40 0xC4BAA0 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_41 0xC4BAA4 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_42 0xC4BAA8 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_43 0xC4BAAC + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_44 0xC4BAB0 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_45 0xC4BAB4 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_46 0xC4BAB8 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_47 0xC4BABC + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_48 0xC4BAC0 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_49 0xC4BAC4 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_50 0xC4BAC8 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_51 0xC4BACC + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_52 0xC4BAD0 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_53 0xC4BAD4 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_54 0xC4BAD8 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_55 0xC4BADC + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_56 0xC4BAE0 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_57 0xC4BAE4 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_58 0xC4BAE8 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_59 0xC4BAEC + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_60 0xC4BAF0 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_61 0xC4BAF4 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_62 0xC4BAF8 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_63 0xC4BAFC + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_64 0xC4BB00 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_65 0xC4BB04 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_66 0xC4BB08 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_67 0xC4BB0C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_68 0xC4BB10 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_69 0xC4BB14 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_70 0xC4BB18 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_71 0xC4BB1C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_72 0xC4BB20 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_73 0xC4BB24 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_74 0xC4BB28 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_75 0xC4BB2C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_76 0xC4BB30 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_77 0xC4BB34 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_78 0xC4BB38 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_79 0xC4BB3C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_80 0xC4BB40 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_81 0xC4BB44 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_82 0xC4BB48 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_83 0xC4BB4C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_84 0xC4BB50 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_85 0xC4BB54 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_86 0xC4BB58 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_87 0xC4BB5C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_88 0xC4BB60 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_89 0xC4BB64 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_90 0xC4BB68 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_91 0xC4BB6C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_92 0xC4BB70 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_93 0xC4BB74 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_94 0xC4BB78 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_95 0xC4BB7C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_96 0xC4BB80 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_97 0xC4BB84 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_98 0xC4BB88 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_99 0xC4BB8C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_100 0xC4BB90 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_101 0xC4BB94 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_102 0xC4BB98 + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_0 0xC4BC00 + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_1 0xC4BC04 + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_2 0xC4BC08 + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_3 0xC4BC0C + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_4 0xC4BC10 + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_5 0xC4BC14 + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_6 0xC4BC18 + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_7 0xC4BC1C + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_8 0xC4BC20 + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_9 0xC4BC24 + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_10 0xC4BC28 + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_11 0xC4BC2C + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_12 0xC4BC30 + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_13 0xC4BC34 + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_14 0xC4BC38 + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_15 0xC4BC3C + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_16 0xC4BC40 + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_17 0xC4BC44 + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_18 0xC4BC48 + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_19 0xC4BC4C + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_20 0xC4BC50 + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_21 0xC4BC54 + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_22 0xC4BC58 + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_23 0xC4BC5C + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_24 0xC4BC60 + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_25 0xC4BC64 + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_26 0xC4BC68 + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_27 0xC4BC6C + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_28 0xC4BC70 + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_29 0xC4BC74 + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_30 0xC4BC78 + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_31 0xC4BC7C + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_32 0xC4BC80 + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_33 0xC4BC84 + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_34 0xC4BC88 + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_35 0xC4BC8C + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_36 0xC4BC90 + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_37 0xC4BC94 + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_38 0xC4BC98 + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_39 0xC4BC9C + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_40 0xC4BCA0 + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_41 0xC4BCA4 + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_42 0xC4BCA8 + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_43 0xC4BCAC + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_44 0xC4BCB0 + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_45 0xC4BCB4 + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_46 0xC4BCB8 + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_47 0xC4BCBC + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_48 0xC4BCC0 + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_49 0xC4BCC4 + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_50 0xC4BCC8 + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_51 0xC4BCCC + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_52 0xC4BCD0 + +#define mmPSOC_GLOBAL_CONF_RST_CTRL_53 0xC4BCD4 + +#endif /* ASIC_REG_PSOC_GLOBAL_CONF_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_hbm_pll_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_hbm_pll_regs.h new file mode 100644 index 000000000..687e2255c --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_hbm_pll_regs.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_PSOC_HBM_PLL_REGS_H_ +#define ASIC_REG_PSOC_HBM_PLL_REGS_H_ + +/* + ***************************************** + * PSOC_HBM_PLL (Prototype: PLL) + ***************************************** + */ + +#define mmPSOC_HBM_PLL_NR 0xC74100 + +#define mmPSOC_HBM_PLL_NF 0xC74104 + +#define mmPSOC_HBM_PLL_OD 0xC74108 + +#define mmPSOC_HBM_PLL_NB 0xC7410C + +#define mmPSOC_HBM_PLL_CFG 0xC74110 + +#define mmPSOC_HBM_PLL_LOSE_MASK 0xC74120 + +#define mmPSOC_HBM_PLL_LOCK_INTR 0xC74128 + +#define mmPSOC_HBM_PLL_LOCK_BYPASS 0xC7412C + +#define mmPSOC_HBM_PLL_DATA_CHNG 0xC74130 + +#define mmPSOC_HBM_PLL_RST 0xC74134 + +#define mmPSOC_HBM_PLL_SLIP_WD_CNTR 0xC74150 + +#define mmPSOC_HBM_PLL_DIV_FACTOR_0 0xC74200 + +#define mmPSOC_HBM_PLL_DIV_FACTOR_1 0xC74204 + +#define mmPSOC_HBM_PLL_DIV_FACTOR_2 0xC74208 + +#define mmPSOC_HBM_PLL_DIV_FACTOR_3 0xC7420C + +#define mmPSOC_HBM_PLL_DIV_FACTOR_CMD_0 0xC74220 + +#define mmPSOC_HBM_PLL_DIV_FACTOR_CMD_1 0xC74224 + +#define mmPSOC_HBM_PLL_DIV_FACTOR_CMD_2 0xC74228 + +#define mmPSOC_HBM_PLL_DIV_FACTOR_CMD_3 0xC7422C + +#define mmPSOC_HBM_PLL_DIV_SEL_0 0xC74280 + +#define mmPSOC_HBM_PLL_DIV_SEL_1 0xC74284 + +#define mmPSOC_HBM_PLL_DIV_SEL_2 0xC74288 + +#define mmPSOC_HBM_PLL_DIV_SEL_3 0xC7428C + +#define mmPSOC_HBM_PLL_DIV_EN_0 0xC742A0 + +#define mmPSOC_HBM_PLL_DIV_EN_1 0xC742A4 + +#define mmPSOC_HBM_PLL_DIV_EN_2 0xC742A8 + +#define mmPSOC_HBM_PLL_DIV_EN_3 0xC742AC + +#define mmPSOC_HBM_PLL_DIV_FACTOR_BUSY_0 0xC742C0 + +#define mmPSOC_HBM_PLL_DIV_FACTOR_BUSY_1 0xC742C4 + +#define mmPSOC_HBM_PLL_DIV_FACTOR_BUSY_2 0xC742C8 + +#define mmPSOC_HBM_PLL_DIV_FACTOR_BUSY_3 0xC742CC + +#define mmPSOC_HBM_PLL_CLK_GATER 0xC74300 + +#define mmPSOC_HBM_PLL_CLK_RLX_0 0xC74310 + +#define mmPSOC_HBM_PLL_CLK_RLX_1 0xC74314 + +#define mmPSOC_HBM_PLL_CLK_RLX_2 0xC74318 + +#define mmPSOC_HBM_PLL_CLK_RLX_3 0xC7431C + +#define mmPSOC_HBM_PLL_REF_CNTR_PERIOD 0xC74400 + +#define mmPSOC_HBM_PLL_REF_LOW_THRESHOLD 0xC74410 + +#define mmPSOC_HBM_PLL_REF_HIGH_THRESHOLD 0xC74420 + +#define mmPSOC_HBM_PLL_PLL_NOT_STABLE 0xC74430 + +#define mmPSOC_HBM_PLL_FREQ_CALC_EN 0xC74440 + +#define mmPSOC_HBM_PLL_RLX_BITMAP_CFG 0xC74500 + +#define mmPSOC_HBM_PLL_RLX_BITMAP_0 0xC74510 + +#define mmPSOC_HBM_PLL_RLX_BITMAP_1 0xC74514 + +#define mmPSOC_HBM_PLL_RLX_BITMAP_2 0xC74518 + +#define mmPSOC_HBM_PLL_RLX_BITMAP_3 0xC7451C + +#endif /* ASIC_REG_PSOC_HBM_PLL_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_pci_pll_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_pci_pll_regs.h new file mode 100644 index 000000000..3dc9bb454 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_pci_pll_regs.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_PSOC_PCI_PLL_REGS_H_ +#define ASIC_REG_PSOC_PCI_PLL_REGS_H_ + +/* + ***************************************** + * PSOC_PCI_PLL (Prototype: PLL) + ***************************************** + */ + +#define mmPSOC_PCI_PLL_NR 0xC72100 + +#define mmPSOC_PCI_PLL_NF 0xC72104 + +#define mmPSOC_PCI_PLL_OD 0xC72108 + +#define mmPSOC_PCI_PLL_NB 0xC7210C + +#define mmPSOC_PCI_PLL_CFG 0xC72110 + +#define mmPSOC_PCI_PLL_LOSE_MASK 0xC72120 + +#define mmPSOC_PCI_PLL_LOCK_INTR 0xC72128 + +#define mmPSOC_PCI_PLL_LOCK_BYPASS 0xC7212C + +#define mmPSOC_PCI_PLL_DATA_CHNG 0xC72130 + +#define mmPSOC_PCI_PLL_RST 0xC72134 + +#define mmPSOC_PCI_PLL_SLIP_WD_CNTR 0xC72150 + +#define mmPSOC_PCI_PLL_DIV_FACTOR_0 0xC72200 + +#define mmPSOC_PCI_PLL_DIV_FACTOR_1 0xC72204 + +#define mmPSOC_PCI_PLL_DIV_FACTOR_2 0xC72208 + +#define mmPSOC_PCI_PLL_DIV_FACTOR_3 0xC7220C + +#define mmPSOC_PCI_PLL_DIV_FACTOR_CMD_0 0xC72220 + +#define mmPSOC_PCI_PLL_DIV_FACTOR_CMD_1 0xC72224 + +#define mmPSOC_PCI_PLL_DIV_FACTOR_CMD_2 0xC72228 + +#define mmPSOC_PCI_PLL_DIV_FACTOR_CMD_3 0xC7222C + +#define mmPSOC_PCI_PLL_DIV_SEL_0 0xC72280 + +#define mmPSOC_PCI_PLL_DIV_SEL_1 0xC72284 + +#define mmPSOC_PCI_PLL_DIV_SEL_2 0xC72288 + +#define mmPSOC_PCI_PLL_DIV_SEL_3 0xC7228C + +#define mmPSOC_PCI_PLL_DIV_EN_0 0xC722A0 + +#define mmPSOC_PCI_PLL_DIV_EN_1 0xC722A4 + +#define mmPSOC_PCI_PLL_DIV_EN_2 0xC722A8 + +#define mmPSOC_PCI_PLL_DIV_EN_3 0xC722AC + +#define mmPSOC_PCI_PLL_DIV_FACTOR_BUSY_0 0xC722C0 + +#define mmPSOC_PCI_PLL_DIV_FACTOR_BUSY_1 0xC722C4 + +#define mmPSOC_PCI_PLL_DIV_FACTOR_BUSY_2 0xC722C8 + +#define mmPSOC_PCI_PLL_DIV_FACTOR_BUSY_3 0xC722CC + +#define mmPSOC_PCI_PLL_CLK_GATER 0xC72300 + +#define mmPSOC_PCI_PLL_CLK_RLX_0 0xC72310 + +#define mmPSOC_PCI_PLL_CLK_RLX_1 0xC72314 + +#define mmPSOC_PCI_PLL_CLK_RLX_2 0xC72318 + +#define mmPSOC_PCI_PLL_CLK_RLX_3 0xC7231C + +#define mmPSOC_PCI_PLL_REF_CNTR_PERIOD 0xC72400 + +#define mmPSOC_PCI_PLL_REF_LOW_THRESHOLD 0xC72410 + +#define mmPSOC_PCI_PLL_REF_HIGH_THRESHOLD 0xC72420 + +#define mmPSOC_PCI_PLL_PLL_NOT_STABLE 0xC72430 + +#define mmPSOC_PCI_PLL_FREQ_CALC_EN 0xC72440 + +#define mmPSOC_PCI_PLL_RLX_BITMAP_CFG 0xC72500 + +#define mmPSOC_PCI_PLL_RLX_BITMAP_0 0xC72510 + +#define mmPSOC_PCI_PLL_RLX_BITMAP_1 0xC72514 + +#define mmPSOC_PCI_PLL_RLX_BITMAP_2 0xC72518 + +#define mmPSOC_PCI_PLL_RLX_BITMAP_3 0xC7251C + +#endif /* ASIC_REG_PSOC_PCI_PLL_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_timestamp_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_timestamp_regs.h new file mode 100644 index 000000000..9ce24597d --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_timestamp_regs.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_PSOC_TIMESTAMP_REGS_H_ +#define ASIC_REG_PSOC_TIMESTAMP_REGS_H_ + +/* + ***************************************** + * PSOC_TIMESTAMP (Prototype: TIMESTAMP) + ***************************************** + */ + +#define mmPSOC_TIMESTAMP_CNTCR 0xC49000 + +#define mmPSOC_TIMESTAMP_CNTSR 0xC49004 + +#define mmPSOC_TIMESTAMP_CNTCVL 0xC49008 + +#define mmPSOC_TIMESTAMP_CNTCVU 0xC4900C + +#define mmPSOC_TIMESTAMP_CNTFID0 0xC49020 + +#define mmPSOC_TIMESTAMP_PIDR4 0xC49FD0 + +#define mmPSOC_TIMESTAMP_PIDR5 0xC49FD4 + +#define mmPSOC_TIMESTAMP_PIDR6 0xC49FD8 + +#define mmPSOC_TIMESTAMP_PIDR7 0xC49FDC + +#define mmPSOC_TIMESTAMP_PIDR0 0xC49FE0 + +#define mmPSOC_TIMESTAMP_PIDR1 0xC49FE4 + +#define mmPSOC_TIMESTAMP_PIDR2 0xC49FE8 + +#define mmPSOC_TIMESTAMP_PIDR3 0xC49FEC + +#define mmPSOC_TIMESTAMP_CIDR0 0xC49FF0 + +#define mmPSOC_TIMESTAMP_CIDR1 0xC49FF4 + +#define mmPSOC_TIMESTAMP_CIDR2 0xC49FF8 + +#define mmPSOC_TIMESTAMP_CIDR3 0xC49FFC + +#endif /* ASIC_REG_PSOC_TIMESTAMP_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_0_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_0_regs.h new file mode 100644 index 000000000..ddf824392 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_0_regs.h @@ -0,0 +1,896 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_SIF_RTR_CTRL_0_REGS_H_ +#define ASIC_REG_SIF_RTR_CTRL_0_REGS_H_ + +/* + ***************************************** + * SIF_RTR_CTRL_0 (Prototype: RTR_CTRL) + ***************************************** + */ + +#define mmSIF_RTR_CTRL_0_PERM_SEL 0x306108 + +#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_0 0x306114 + +#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_1 0x306118 + +#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_2 0x30611C + +#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_3 0x306120 + +#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_4 0x306124 + +#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_5 0x306128 + +#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_6 0x30612C + +#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_7 0x306130 + +#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_8 0x306134 + +#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_9 0x306138 + +#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_10 0x30613C + +#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_11 0x306140 + +#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_12 0x306144 + +#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_13 0x306148 + +#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_14 0x30614C + +#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_15 0x306150 + +#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_16 0x306154 + +#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_17 0x306158 + +#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_18 0x30615C + +#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_19 0x306160 + +#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_20 0x306164 + +#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_21 0x306168 + +#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_22 0x30616C + +#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_23 0x306170 + +#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_24 0x306174 + +#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_25 0x306178 + +#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_26 0x30617C + +#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_27 0x306180 + +#define mmSIF_RTR_CTRL_0_SRAM_POLY_H3_0 0x306184 + +#define mmSIF_RTR_CTRL_0_SRAM_POLY_H3_1 0x306188 + +#define mmSIF_RTR_CTRL_0_SRAM_POLY_H3_2 0x30618C + +#define mmSIF_RTR_CTRL_0_SRAM_POLY_H3_3 0x306190 + +#define mmSIF_RTR_CTRL_0_SRAM_POLY_H3_4 0x306194 + +#define mmSIF_RTR_CTRL_0_SRAM_POLY_H3_5 0x306198 + +#define mmSIF_RTR_CTRL_0_SRAM_POLY_H3_6 0x30619C + +#define mmSIF_RTR_CTRL_0_SRAM_POLY_H3_7 0x3061A0 + +#define mmSIF_RTR_CTRL_0_SRAM_POLY_H3_8 0x3061A4 + +#define mmSIF_RTR_CTRL_0_SRAM_POLY_H3_9 0x3061A8 + +#define mmSIF_RTR_CTRL_0_SRAM_POLY_H3_10 0x3061AC + +#define mmSIF_RTR_CTRL_0_SRAM_POLY_H3_11 0x3061B0 + +#define mmSIF_RTR_CTRL_0_SRAM_POLY_H3_12 0x3061B4 + +#define mmSIF_RTR_CTRL_0_SRAM_POLY_H3_13 0x3061B8 + +#define mmSIF_RTR_CTRL_0_SRAM_POLY_H3_14 0x3061BC + +#define mmSIF_RTR_CTRL_0_SCRAM_SRAM_EN 0x30626C + +#define mmSIF_RTR_CTRL_0_RL_HBM_EN 0x306274 + +#define mmSIF_RTR_CTRL_0_RL_HBM_SAT 0x306278 + +#define mmSIF_RTR_CTRL_0_RL_HBM_RST 0x30627C + +#define mmSIF_RTR_CTRL_0_RL_HBM_TIMEOUT 0x306280 + +#define mmSIF_RTR_CTRL_0_SCRAM_HBM_EN 0x306284 + +#define mmSIF_RTR_CTRL_0_RL_PCI_EN 0x306288 + +#define mmSIF_RTR_CTRL_0_RL_PCI_SAT 0x30628C + +#define mmSIF_RTR_CTRL_0_RL_PCI_RST 0x306290 + +#define mmSIF_RTR_CTRL_0_RL_PCI_TIMEOUT 0x306294 + +#define mmSIF_RTR_CTRL_0_RL_SRAM_EN 0x30629C + +#define mmSIF_RTR_CTRL_0_RL_SRAM_SAT 0x3062A0 + +#define mmSIF_RTR_CTRL_0_RL_SRAM_RST 0x3062A4 + +#define mmSIF_RTR_CTRL_0_RL_SRAM_TIMEOUT 0x3062AC + +#define mmSIF_RTR_CTRL_0_RL_SRAM_RED 0x3062B4 + +#define mmSIF_RTR_CTRL_0_E2E_HBM_EN 0x3062EC + +#define mmSIF_RTR_CTRL_0_E2E_PCI_EN 0x3062F0 + +#define mmSIF_RTR_CTRL_0_E2E_HBM_WR_SIZE 0x3062F4 + +#define mmSIF_RTR_CTRL_0_E2E_PCI_WR_SIZE 0x3062F8 + +#define mmSIF_RTR_CTRL_0_E2E_AW_PCI_CTR_SET_EN 0x306404 + +#define mmSIF_RTR_CTRL_0_E2E_AW_PCI_CTR_SET 0x306408 + +#define mmSIF_RTR_CTRL_0_E2E_AW_PCI_CTR_WRAP 0x30640C + +#define mmSIF_RTR_CTRL_0_E2E_AW_PCI_CTR_CNT 0x306410 + +#define mmSIF_RTR_CTRL_0_E2E_AW_HBM_CTR_SET_EN 0x306414 + +#define mmSIF_RTR_CTRL_0_E2E_AW_HBM_CTR_SET 0x306418 + +#define mmSIF_RTR_CTRL_0_E2E_HBM_RD_SIZE 0x30641C + +#define mmSIF_RTR_CTRL_0_E2E_PCI_RD_SIZE 0x306420 + +#define mmSIF_RTR_CTRL_0_E2E_AR_PCI_CTR_SET_EN 0x306424 + +#define mmSIF_RTR_CTRL_0_E2E_AR_PCI_CTR_SET 0x306428 + +#define mmSIF_RTR_CTRL_0_E2E_AR_PCI_CTR_WRAP 0x30642C + +#define mmSIF_RTR_CTRL_0_E2E_AR_PCI_CTR_CNT 0x306430 + +#define mmSIF_RTR_CTRL_0_E2E_AR_HBM_CTR_SET_EN 0x306434 + +#define mmSIF_RTR_CTRL_0_E2E_AR_HBM_CTR_SET 0x306438 + +#define mmSIF_RTR_CTRL_0_NL_HBM_SEL_0 0x306450 + +#define mmSIF_RTR_CTRL_0_NL_HBM_SEL_1 0x306454 + +#define mmSIF_RTR_CTRL_0_NON_LIN_EN 0x306480 + +#define mmSIF_RTR_CTRL_0_NL_SRAM_BANK_0 0x306500 + +#define mmSIF_RTR_CTRL_0_NL_SRAM_BANK_1 0x306504 + +#define mmSIF_RTR_CTRL_0_NL_SRAM_BANK_2 0x306508 + +#define mmSIF_RTR_CTRL_0_NL_SRAM_BANK_3 0x30650C + +#define mmSIF_RTR_CTRL_0_NL_SRAM_BANK_4 0x306510 + +#define mmSIF_RTR_CTRL_0_NL_SRAM_OFFSET_0 0x306514 + +#define mmSIF_RTR_CTRL_0_NL_SRAM_OFFSET_1 0x306520 + +#define mmSIF_RTR_CTRL_0_NL_SRAM_OFFSET_2 0x306524 + +#define mmSIF_RTR_CTRL_0_NL_SRAM_OFFSET_3 0x306528 + +#define mmSIF_RTR_CTRL_0_NL_SRAM_OFFSET_4 0x30652C + +#define mmSIF_RTR_CTRL_0_NL_SRAM_OFFSET_5 0x306530 + +#define mmSIF_RTR_CTRL_0_NL_SRAM_OFFSET_6 0x306534 + +#define mmSIF_RTR_CTRL_0_NL_SRAM_OFFSET_7 0x306538 + +#define mmSIF_RTR_CTRL_0_NL_SRAM_OFFSET_8 0x30653C + +#define mmSIF_RTR_CTRL_0_NL_SRAM_OFFSET_9 0x306540 + +#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_0 0x306550 + +#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_1 0x306554 + +#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_2 0x306558 + +#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_3 0x30655C + +#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_4 0x306560 + +#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_5 0x306564 + +#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_6 0x306568 + +#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_7 0x30656C + +#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_8 0x306570 + +#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_9 0x306574 + +#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_10 0x306578 + +#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_11 0x30657C + +#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_12 0x306580 + +#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_13 0x306584 + +#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_14 0x306588 + +#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_15 0x30658C + +#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_16 0x306590 + +#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_17 0x306594 + +#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_18 0x306598 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_0 0x3065E4 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_1 0x3065E8 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_2 0x3065EC + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_3 0x3065F0 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_4 0x3065F4 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_5 0x3065F8 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_6 0x3065FC + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_7 0x306600 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_8 0x306604 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_9 0x306608 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_10 0x30660C + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_11 0x306610 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_12 0x306614 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_13 0x306618 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_14 0x30661C + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_15 0x306620 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_0 0x306624 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_1 0x306628 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_2 0x30662C + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_3 0x306630 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_4 0x306634 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_5 0x306638 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_6 0x30663C + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_7 0x306640 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_8 0x306644 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_9 0x306648 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_10 0x30664C + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_11 0x306650 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_12 0x306654 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_13 0x306658 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_14 0x30665C + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_15 0x306660 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_0 0x306664 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_1 0x306668 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_2 0x30666C + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_3 0x306670 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_4 0x306674 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_5 0x306678 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_6 0x30667C + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_7 0x306680 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_8 0x306684 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_9 0x306688 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_10 0x30668C + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_11 0x306690 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_12 0x306694 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_13 0x306698 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_14 0x30669C + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_15 0x3066A0 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_0 0x3066A4 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_1 0x3066A8 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_2 0x3066AC + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_3 0x3066B0 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_4 0x3066B4 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_5 0x3066B8 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_6 0x3066BC + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_7 0x3066C0 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_8 0x3066C4 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_9 0x3066C8 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_10 0x3066CC + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_11 0x3066D0 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_12 0x3066D4 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_13 0x3066D8 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_14 0x3066DC + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_15 0x3066E0 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_0 0x3066E4 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_1 0x3066E8 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_2 0x3066EC + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_3 0x3066F0 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_4 0x3066F4 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_5 0x3066F8 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_6 0x3066FC + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_7 0x306700 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_8 0x306704 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_9 0x306708 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_10 0x30670C + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_11 0x306710 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_12 0x306714 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_13 0x306718 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_14 0x30671C + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_15 0x306720 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_0 0x306724 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_1 0x306728 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_2 0x30672C + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_3 0x306730 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_4 0x306734 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_5 0x306738 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_6 0x30673C + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_7 0x306740 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_8 0x306744 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_9 0x306748 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_10 0x30674C + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_11 0x306750 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_12 0x306754 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_13 0x306758 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_14 0x30675C + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_15 0x306760 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_0 0x306764 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_1 0x306768 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_2 0x30676C + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_3 0x306770 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_4 0x306774 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_5 0x306778 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_6 0x30677C + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_7 0x306780 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_8 0x306784 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_9 0x306788 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_10 0x30678C + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_11 0x306790 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_12 0x306794 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_13 0x306798 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_14 0x30679C + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_15 0x3067A0 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_0 0x3067A4 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_1 0x3067A8 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_2 0x3067AC + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_3 0x3067B0 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_4 0x3067B4 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_5 0x3067B8 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_6 0x3067BC + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_7 0x3067C0 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_8 0x3067C4 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_9 0x3067C8 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_10 0x3067CC + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_11 0x3067D0 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_12 0x3067D4 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_13 0x3067D8 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_14 0x3067DC + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_15 0x3067E0 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_0 0x306824 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_1 0x306828 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_2 0x30682C + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_3 0x306830 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_4 0x306834 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_5 0x306838 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_6 0x30683C + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_7 0x306840 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_8 0x306844 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_9 0x306848 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_10 0x30684C + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_11 0x306850 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_12 0x306854 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_13 0x306858 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_14 0x30685C + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_15 0x306860 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_0 0x306864 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_1 0x306868 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_2 0x30686C + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_3 0x306870 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_4 0x306874 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_5 0x306878 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_6 0x30687C + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_7 0x306880 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_8 0x306884 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_9 0x306888 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_10 0x30688C + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_11 0x306890 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_12 0x306894 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_13 0x306898 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_14 0x30689C + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_15 0x3068A0 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_0 0x3068A4 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_1 0x3068A8 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_2 0x3068AC + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_3 0x3068B0 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_4 0x3068B4 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_5 0x3068B8 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_6 0x3068BC + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_7 0x3068C0 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_8 0x3068C4 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_9 0x3068C8 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_10 0x3068CC + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_11 0x3068D0 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_12 0x3068D4 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_13 0x3068D8 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_14 0x3068DC + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_15 0x3068E0 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_0 0x3068E4 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_1 0x3068E8 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_2 0x3068EC + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_3 0x3068F0 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_4 0x3068F4 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_5 0x3068F8 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_6 0x3068FC + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_7 0x306900 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_8 0x306904 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_9 0x306908 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_10 0x30690C + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_11 0x306910 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_12 0x306914 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_13 0x306918 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_14 0x30691C + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_15 0x306920 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_0 0x306924 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_1 0x306928 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_2 0x30692C + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_3 0x306930 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_4 0x306934 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_5 0x306938 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_6 0x30693C + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_7 0x306940 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_8 0x306944 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_9 0x306948 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_10 0x30694C + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_11 0x306950 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_12 0x306954 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_13 0x306958 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_14 0x30695C + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_15 0x306960 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_0 0x306964 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_1 0x306968 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_2 0x30696C + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_3 0x306970 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_4 0x306974 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_5 0x306978 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_6 0x30697C + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_7 0x306980 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_8 0x306984 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_9 0x306988 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_10 0x30698C + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_11 0x306990 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_12 0x306994 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_13 0x306998 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_14 0x30699C + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_15 0x3069A0 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_0 0x3069A4 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_1 0x3069A8 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_2 0x3069AC + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_3 0x3069B0 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_4 0x3069B4 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_5 0x3069B8 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_6 0x3069BC + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_7 0x3069C0 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_8 0x3069C4 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_9 0x3069C8 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_10 0x3069CC + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_11 0x3069D0 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_12 0x3069D4 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_13 0x3069D8 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_14 0x3069DC + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_15 0x3069E0 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_0 0x3069E4 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_1 0x3069E8 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_2 0x3069EC + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_3 0x3069F0 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_4 0x3069F4 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_5 0x3069F8 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_6 0x3069FC + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_7 0x306A00 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_8 0x306A04 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_9 0x306A08 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_10 0x306A0C + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_11 0x306A10 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_12 0x306A14 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_13 0x306A18 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_14 0x306A1C + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_15 0x306A20 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_HIT_AW 0x306A64 + +#define mmSIF_RTR_CTRL_0_RANGE_SEC_HIT_AR 0x306A68 + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_HIT_AW 0x306A6C + +#define mmSIF_RTR_CTRL_0_RANGE_PRIV_HIT_AR 0x306A70 + +#define mmSIF_RTR_CTRL_0_RGL_CFG 0x306B64 + +#define mmSIF_RTR_CTRL_0_RGL_SHIFT 0x306B68 + +#define mmSIF_RTR_CTRL_0_RGL_EXPECTED_LAT_0 0x306B6C + +#define mmSIF_RTR_CTRL_0_RGL_EXPECTED_LAT_1 0x306B70 + +#define mmSIF_RTR_CTRL_0_RGL_EXPECTED_LAT_2 0x306B74 + +#define mmSIF_RTR_CTRL_0_RGL_EXPECTED_LAT_3 0x306B78 + +#define mmSIF_RTR_CTRL_0_RGL_EXPECTED_LAT_4 0x306B7C + +#define mmSIF_RTR_CTRL_0_RGL_EXPECTED_LAT_5 0x306B80 + +#define mmSIF_RTR_CTRL_0_RGL_EXPECTED_LAT_6 0x306B84 + +#define mmSIF_RTR_CTRL_0_RGL_EXPECTED_LAT_7 0x306B88 + +#define mmSIF_RTR_CTRL_0_RGL_TOKEN_0 0x306BAC + +#define mmSIF_RTR_CTRL_0_RGL_TOKEN_1 0x306BB0 + +#define mmSIF_RTR_CTRL_0_RGL_TOKEN_2 0x306BB4 + +#define mmSIF_RTR_CTRL_0_RGL_TOKEN_3 0x306BB8 + +#define mmSIF_RTR_CTRL_0_RGL_TOKEN_4 0x306BBC + +#define mmSIF_RTR_CTRL_0_RGL_TOKEN_5 0x306BC0 + +#define mmSIF_RTR_CTRL_0_RGL_TOKEN_6 0x306BC4 + +#define mmSIF_RTR_CTRL_0_RGL_TOKEN_7 0x306BC8 + +#define mmSIF_RTR_CTRL_0_RGL_BANK_ID_0 0x306BEC + +#define mmSIF_RTR_CTRL_0_RGL_BANK_ID_1 0x306BF0 + +#define mmSIF_RTR_CTRL_0_RGL_BANK_ID_2 0x306BF4 + +#define mmSIF_RTR_CTRL_0_RGL_BANK_ID_3 0x306BF8 + +#define mmSIF_RTR_CTRL_0_RGL_BANK_ID_4 0x306BFC + +#define mmSIF_RTR_CTRL_0_RGL_BANK_ID_5 0x306C00 + +#define mmSIF_RTR_CTRL_0_RGL_BANK_ID_6 0x306C04 + +#define mmSIF_RTR_CTRL_0_RGL_BANK_ID_7 0x306C08 + +#define mmSIF_RTR_CTRL_0_RGL_WDT 0x306C2C + +#define mmSIF_RTR_CTRL_0_E2E_AR_HBM0_CH0_CTR_WRAP 0x306C30 + +#define mmSIF_RTR_CTRL_0_E2E_AR_HBM0_CH1_CTR_WRAP 0x306C34 + +#define mmSIF_RTR_CTRL_0_E2E_AR_HBM1_CH0_CTR_WRAP 0x306C38 + +#define mmSIF_RTR_CTRL_0_E2E_AR_HBM1_CH1_CTR_WRAP 0x306C3C + +#define mmSIF_RTR_CTRL_0_E2E_AR_HBM2_CH0_CTR_WRAP 0x306C40 + +#define mmSIF_RTR_CTRL_0_E2E_AR_HBM2_CH1_CTR_WRAP 0x306C44 + +#define mmSIF_RTR_CTRL_0_E2E_AR_HBM3_CH0_CTR_WRAP 0x306C48 + +#define mmSIF_RTR_CTRL_0_E2E_AR_HBM3_CH1_CTR_WRAP 0x306C4C + +#define mmSIF_RTR_CTRL_0_E2E_AR_HBM0_CH0_CTR_CNT 0x306C50 + +#define mmSIF_RTR_CTRL_0_E2E_AR_HBM0_CH1_CTR_CNT 0x306C54 + +#define mmSIF_RTR_CTRL_0_E2E_AR_HBM1_CH0_CTR_CNT 0x306C58 + +#define mmSIF_RTR_CTRL_0_E2E_AR_HBM1_CH1_CTR_CNT 0x306C5C + +#define mmSIF_RTR_CTRL_0_E2E_AR_HBM2_CH0_CTR_CNT 0x306C60 + +#define mmSIF_RTR_CTRL_0_E2E_AR_HBM2_CH1_CTR_CNT 0x306C64 + +#define mmSIF_RTR_CTRL_0_E2E_AR_HBM3_CH0_CTR_CNT 0x306C68 + +#define mmSIF_RTR_CTRL_0_E2E_AR_HBM3_CH1_CTR_CNT 0x306C6C + +#define mmSIF_RTR_CTRL_0_E2E_AW_HBM0_CH0_CTR_WRAP 0x306C70 + +#define mmSIF_RTR_CTRL_0_E2E_AW_HBM0_CH1_CTR_WRAP 0x306C74 + +#define mmSIF_RTR_CTRL_0_E2E_AW_HBM1_CH0_CTR_WRAP 0x306C78 + +#define mmSIF_RTR_CTRL_0_E2E_AW_HBM1_CH1_CTR_WRAP 0x306C7C + +#define mmSIF_RTR_CTRL_0_E2E_AW_HBM2_CH0_CTR_WRAP 0x306C80 + +#define mmSIF_RTR_CTRL_0_E2E_AW_HBM2_CH1_CTR_WRAP 0x306C84 + +#define mmSIF_RTR_CTRL_0_E2E_AW_HBM3_CH0_CTR_WRAP 0x306C88 + +#define mmSIF_RTR_CTRL_0_E2E_AW_HBM3_CH1_CTR_WRAP 0x306C8C + +#define mmSIF_RTR_CTRL_0_E2E_AW_HBM0_CH0_CTR_CNT 0x306C90 + +#define mmSIF_RTR_CTRL_0_E2E_AW_HBM0_CH1_CTR_CNT 0x306C94 + +#define mmSIF_RTR_CTRL_0_E2E_AW_HBM1_CH0_CTR_CNT 0x306C98 + +#define mmSIF_RTR_CTRL_0_E2E_AW_HBM1_CH1_CTR_CNT 0x306C9C + +#define mmSIF_RTR_CTRL_0_E2E_AW_HBM2_CH0_CTR_CNT 0x306CA0 + +#define mmSIF_RTR_CTRL_0_E2E_AW_HBM2_CH1_CTR_CNT 0x306CA4 + +#define mmSIF_RTR_CTRL_0_E2E_AW_HBM3_CH0_CTR_CNT 0x306CA8 + +#define mmSIF_RTR_CTRL_0_E2E_AW_HBM3_CH1_CTR_CNT 0x306CAC + +#define mmSIF_RTR_CTRL_0_NL_HBM_PC_SEL_0 0x306CB0 + +#define mmSIF_RTR_CTRL_0_NL_HBM_PC_SEL_1 0x306CB4 + +#define mmSIF_RTR_CTRL_0_NL_HBM_PC_SEL_2 0x306CB8 + +#define mmSIF_RTR_CTRL_0_NL_HBM_PC_SEL_3 0x306CBC + +#endif /* ASIC_REG_SIF_RTR_CTRL_0_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_1_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_1_regs.h new file mode 100644 index 000000000..c6d517dbb --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_1_regs.h @@ -0,0 +1,896 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_SIF_RTR_CTRL_1_REGS_H_ +#define ASIC_REG_SIF_RTR_CTRL_1_REGS_H_ + +/* + ***************************************** + * SIF_RTR_CTRL_1 (Prototype: RTR_CTRL) + ***************************************** + */ + +#define mmSIF_RTR_CTRL_1_PERM_SEL 0x316108 + +#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_0 0x316114 + +#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_1 0x316118 + +#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_2 0x31611C + +#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_3 0x316120 + +#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_4 0x316124 + +#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_5 0x316128 + +#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_6 0x31612C + +#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_7 0x316130 + +#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_8 0x316134 + +#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_9 0x316138 + +#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_10 0x31613C + +#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_11 0x316140 + +#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_12 0x316144 + +#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_13 0x316148 + +#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_14 0x31614C + +#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_15 0x316150 + +#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_16 0x316154 + +#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_17 0x316158 + +#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_18 0x31615C + +#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_19 0x316160 + +#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_20 0x316164 + +#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_21 0x316168 + +#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_22 0x31616C + +#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_23 0x316170 + +#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_24 0x316174 + +#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_25 0x316178 + +#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_26 0x31617C + +#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_27 0x316180 + +#define mmSIF_RTR_CTRL_1_SRAM_POLY_H3_0 0x316184 + +#define mmSIF_RTR_CTRL_1_SRAM_POLY_H3_1 0x316188 + +#define mmSIF_RTR_CTRL_1_SRAM_POLY_H3_2 0x31618C + +#define mmSIF_RTR_CTRL_1_SRAM_POLY_H3_3 0x316190 + +#define mmSIF_RTR_CTRL_1_SRAM_POLY_H3_4 0x316194 + +#define mmSIF_RTR_CTRL_1_SRAM_POLY_H3_5 0x316198 + +#define mmSIF_RTR_CTRL_1_SRAM_POLY_H3_6 0x31619C + +#define mmSIF_RTR_CTRL_1_SRAM_POLY_H3_7 0x3161A0 + +#define mmSIF_RTR_CTRL_1_SRAM_POLY_H3_8 0x3161A4 + +#define mmSIF_RTR_CTRL_1_SRAM_POLY_H3_9 0x3161A8 + +#define mmSIF_RTR_CTRL_1_SRAM_POLY_H3_10 0x3161AC + +#define mmSIF_RTR_CTRL_1_SRAM_POLY_H3_11 0x3161B0 + +#define mmSIF_RTR_CTRL_1_SRAM_POLY_H3_12 0x3161B4 + +#define mmSIF_RTR_CTRL_1_SRAM_POLY_H3_13 0x3161B8 + +#define mmSIF_RTR_CTRL_1_SRAM_POLY_H3_14 0x3161BC + +#define mmSIF_RTR_CTRL_1_SCRAM_SRAM_EN 0x31626C + +#define mmSIF_RTR_CTRL_1_RL_HBM_EN 0x316274 + +#define mmSIF_RTR_CTRL_1_RL_HBM_SAT 0x316278 + +#define mmSIF_RTR_CTRL_1_RL_HBM_RST 0x31627C + +#define mmSIF_RTR_CTRL_1_RL_HBM_TIMEOUT 0x316280 + +#define mmSIF_RTR_CTRL_1_SCRAM_HBM_EN 0x316284 + +#define mmSIF_RTR_CTRL_1_RL_PCI_EN 0x316288 + +#define mmSIF_RTR_CTRL_1_RL_PCI_SAT 0x31628C + +#define mmSIF_RTR_CTRL_1_RL_PCI_RST 0x316290 + +#define mmSIF_RTR_CTRL_1_RL_PCI_TIMEOUT 0x316294 + +#define mmSIF_RTR_CTRL_1_RL_SRAM_EN 0x31629C + +#define mmSIF_RTR_CTRL_1_RL_SRAM_SAT 0x3162A0 + +#define mmSIF_RTR_CTRL_1_RL_SRAM_RST 0x3162A4 + +#define mmSIF_RTR_CTRL_1_RL_SRAM_TIMEOUT 0x3162AC + +#define mmSIF_RTR_CTRL_1_RL_SRAM_RED 0x3162B4 + +#define mmSIF_RTR_CTRL_1_E2E_HBM_EN 0x3162EC + +#define mmSIF_RTR_CTRL_1_E2E_PCI_EN 0x3162F0 + +#define mmSIF_RTR_CTRL_1_E2E_HBM_WR_SIZE 0x3162F4 + +#define mmSIF_RTR_CTRL_1_E2E_PCI_WR_SIZE 0x3162F8 + +#define mmSIF_RTR_CTRL_1_E2E_AW_PCI_CTR_SET_EN 0x316404 + +#define mmSIF_RTR_CTRL_1_E2E_AW_PCI_CTR_SET 0x316408 + +#define mmSIF_RTR_CTRL_1_E2E_AW_PCI_CTR_WRAP 0x31640C + +#define mmSIF_RTR_CTRL_1_E2E_AW_PCI_CTR_CNT 0x316410 + +#define mmSIF_RTR_CTRL_1_E2E_AW_HBM_CTR_SET_EN 0x316414 + +#define mmSIF_RTR_CTRL_1_E2E_AW_HBM_CTR_SET 0x316418 + +#define mmSIF_RTR_CTRL_1_E2E_HBM_RD_SIZE 0x31641C + +#define mmSIF_RTR_CTRL_1_E2E_PCI_RD_SIZE 0x316420 + +#define mmSIF_RTR_CTRL_1_E2E_AR_PCI_CTR_SET_EN 0x316424 + +#define mmSIF_RTR_CTRL_1_E2E_AR_PCI_CTR_SET 0x316428 + +#define mmSIF_RTR_CTRL_1_E2E_AR_PCI_CTR_WRAP 0x31642C + +#define mmSIF_RTR_CTRL_1_E2E_AR_PCI_CTR_CNT 0x316430 + +#define mmSIF_RTR_CTRL_1_E2E_AR_HBM_CTR_SET_EN 0x316434 + +#define mmSIF_RTR_CTRL_1_E2E_AR_HBM_CTR_SET 0x316438 + +#define mmSIF_RTR_CTRL_1_NL_HBM_SEL_0 0x316450 + +#define mmSIF_RTR_CTRL_1_NL_HBM_SEL_1 0x316454 + +#define mmSIF_RTR_CTRL_1_NON_LIN_EN 0x316480 + +#define mmSIF_RTR_CTRL_1_NL_SRAM_BANK_0 0x316500 + +#define mmSIF_RTR_CTRL_1_NL_SRAM_BANK_1 0x316504 + +#define mmSIF_RTR_CTRL_1_NL_SRAM_BANK_2 0x316508 + +#define mmSIF_RTR_CTRL_1_NL_SRAM_BANK_3 0x31650C + +#define mmSIF_RTR_CTRL_1_NL_SRAM_BANK_4 0x316510 + +#define mmSIF_RTR_CTRL_1_NL_SRAM_OFFSET_0 0x316514 + +#define mmSIF_RTR_CTRL_1_NL_SRAM_OFFSET_1 0x316520 + +#define mmSIF_RTR_CTRL_1_NL_SRAM_OFFSET_2 0x316524 + +#define mmSIF_RTR_CTRL_1_NL_SRAM_OFFSET_3 0x316528 + +#define mmSIF_RTR_CTRL_1_NL_SRAM_OFFSET_4 0x31652C + +#define mmSIF_RTR_CTRL_1_NL_SRAM_OFFSET_5 0x316530 + +#define mmSIF_RTR_CTRL_1_NL_SRAM_OFFSET_6 0x316534 + +#define mmSIF_RTR_CTRL_1_NL_SRAM_OFFSET_7 0x316538 + +#define mmSIF_RTR_CTRL_1_NL_SRAM_OFFSET_8 0x31653C + +#define mmSIF_RTR_CTRL_1_NL_SRAM_OFFSET_9 0x316540 + +#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_0 0x316550 + +#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_1 0x316554 + +#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_2 0x316558 + +#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_3 0x31655C + +#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_4 0x316560 + +#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_5 0x316564 + +#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_6 0x316568 + +#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_7 0x31656C + +#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_8 0x316570 + +#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_9 0x316574 + +#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_10 0x316578 + +#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_11 0x31657C + +#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_12 0x316580 + +#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_13 0x316584 + +#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_14 0x316588 + +#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_15 0x31658C + +#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_16 0x316590 + +#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_17 0x316594 + +#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_18 0x316598 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_0 0x3165E4 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_1 0x3165E8 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_2 0x3165EC + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_3 0x3165F0 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_4 0x3165F4 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_5 0x3165F8 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_6 0x3165FC + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_7 0x316600 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_8 0x316604 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_9 0x316608 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_10 0x31660C + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_11 0x316610 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_12 0x316614 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_13 0x316618 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_14 0x31661C + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_15 0x316620 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_0 0x316624 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_1 0x316628 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_2 0x31662C + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_3 0x316630 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_4 0x316634 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_5 0x316638 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_6 0x31663C + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_7 0x316640 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_8 0x316644 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_9 0x316648 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_10 0x31664C + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_11 0x316650 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_12 0x316654 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_13 0x316658 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_14 0x31665C + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_15 0x316660 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_0 0x316664 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_1 0x316668 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_2 0x31666C + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_3 0x316670 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_4 0x316674 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_5 0x316678 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_6 0x31667C + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_7 0x316680 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_8 0x316684 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_9 0x316688 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_10 0x31668C + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_11 0x316690 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_12 0x316694 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_13 0x316698 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_14 0x31669C + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_15 0x3166A0 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_0 0x3166A4 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_1 0x3166A8 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_2 0x3166AC + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_3 0x3166B0 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_4 0x3166B4 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_5 0x3166B8 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_6 0x3166BC + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_7 0x3166C0 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_8 0x3166C4 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_9 0x3166C8 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_10 0x3166CC + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_11 0x3166D0 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_12 0x3166D4 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_13 0x3166D8 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_14 0x3166DC + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_15 0x3166E0 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_0 0x3166E4 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_1 0x3166E8 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_2 0x3166EC + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_3 0x3166F0 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_4 0x3166F4 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_5 0x3166F8 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_6 0x3166FC + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_7 0x316700 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_8 0x316704 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_9 0x316708 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_10 0x31670C + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_11 0x316710 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_12 0x316714 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_13 0x316718 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_14 0x31671C + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_15 0x316720 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_0 0x316724 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_1 0x316728 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_2 0x31672C + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_3 0x316730 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_4 0x316734 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_5 0x316738 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_6 0x31673C + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_7 0x316740 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_8 0x316744 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_9 0x316748 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_10 0x31674C + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_11 0x316750 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_12 0x316754 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_13 0x316758 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_14 0x31675C + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_15 0x316760 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_0 0x316764 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_1 0x316768 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_2 0x31676C + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_3 0x316770 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_4 0x316774 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_5 0x316778 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_6 0x31677C + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_7 0x316780 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_8 0x316784 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_9 0x316788 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_10 0x31678C + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_11 0x316790 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_12 0x316794 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_13 0x316798 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_14 0x31679C + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_15 0x3167A0 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_0 0x3167A4 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_1 0x3167A8 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_2 0x3167AC + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_3 0x3167B0 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_4 0x3167B4 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_5 0x3167B8 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_6 0x3167BC + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_7 0x3167C0 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_8 0x3167C4 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_9 0x3167C8 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_10 0x3167CC + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_11 0x3167D0 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_12 0x3167D4 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_13 0x3167D8 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_14 0x3167DC + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_15 0x3167E0 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_0 0x316824 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_1 0x316828 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_2 0x31682C + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_3 0x316830 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_4 0x316834 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_5 0x316838 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_6 0x31683C + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_7 0x316840 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_8 0x316844 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_9 0x316848 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_10 0x31684C + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_11 0x316850 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_12 0x316854 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_13 0x316858 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_14 0x31685C + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_15 0x316860 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_0 0x316864 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_1 0x316868 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_2 0x31686C + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_3 0x316870 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_4 0x316874 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_5 0x316878 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_6 0x31687C + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_7 0x316880 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_8 0x316884 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_9 0x316888 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_10 0x31688C + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_11 0x316890 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_12 0x316894 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_13 0x316898 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_14 0x31689C + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_15 0x3168A0 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_0 0x3168A4 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_1 0x3168A8 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_2 0x3168AC + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_3 0x3168B0 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_4 0x3168B4 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_5 0x3168B8 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_6 0x3168BC + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_7 0x3168C0 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_8 0x3168C4 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_9 0x3168C8 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_10 0x3168CC + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_11 0x3168D0 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_12 0x3168D4 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_13 0x3168D8 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_14 0x3168DC + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_15 0x3168E0 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_0 0x3168E4 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_1 0x3168E8 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_2 0x3168EC + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_3 0x3168F0 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_4 0x3168F4 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_5 0x3168F8 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_6 0x3168FC + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_7 0x316900 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_8 0x316904 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_9 0x316908 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_10 0x31690C + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_11 0x316910 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_12 0x316914 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_13 0x316918 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_14 0x31691C + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_15 0x316920 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_0 0x316924 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_1 0x316928 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_2 0x31692C + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_3 0x316930 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_4 0x316934 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_5 0x316938 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_6 0x31693C + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_7 0x316940 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_8 0x316944 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_9 0x316948 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_10 0x31694C + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_11 0x316950 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_12 0x316954 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_13 0x316958 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_14 0x31695C + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_15 0x316960 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_0 0x316964 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_1 0x316968 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_2 0x31696C + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_3 0x316970 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_4 0x316974 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_5 0x316978 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_6 0x31697C + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_7 0x316980 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_8 0x316984 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_9 0x316988 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_10 0x31698C + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_11 0x316990 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_12 0x316994 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_13 0x316998 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_14 0x31699C + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_15 0x3169A0 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_0 0x3169A4 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_1 0x3169A8 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_2 0x3169AC + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_3 0x3169B0 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_4 0x3169B4 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_5 0x3169B8 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_6 0x3169BC + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_7 0x3169C0 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_8 0x3169C4 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_9 0x3169C8 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_10 0x3169CC + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_11 0x3169D0 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_12 0x3169D4 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_13 0x3169D8 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_14 0x3169DC + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_15 0x3169E0 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_0 0x3169E4 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_1 0x3169E8 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_2 0x3169EC + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_3 0x3169F0 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_4 0x3169F4 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_5 0x3169F8 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_6 0x3169FC + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_7 0x316A00 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_8 0x316A04 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_9 0x316A08 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_10 0x316A0C + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_11 0x316A10 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_12 0x316A14 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_13 0x316A18 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_14 0x316A1C + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_15 0x316A20 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_HIT_AW 0x316A64 + +#define mmSIF_RTR_CTRL_1_RANGE_SEC_HIT_AR 0x316A68 + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_HIT_AW 0x316A6C + +#define mmSIF_RTR_CTRL_1_RANGE_PRIV_HIT_AR 0x316A70 + +#define mmSIF_RTR_CTRL_1_RGL_CFG 0x316B64 + +#define mmSIF_RTR_CTRL_1_RGL_SHIFT 0x316B68 + +#define mmSIF_RTR_CTRL_1_RGL_EXPECTED_LAT_0 0x316B6C + +#define mmSIF_RTR_CTRL_1_RGL_EXPECTED_LAT_1 0x316B70 + +#define mmSIF_RTR_CTRL_1_RGL_EXPECTED_LAT_2 0x316B74 + +#define mmSIF_RTR_CTRL_1_RGL_EXPECTED_LAT_3 0x316B78 + +#define mmSIF_RTR_CTRL_1_RGL_EXPECTED_LAT_4 0x316B7C + +#define mmSIF_RTR_CTRL_1_RGL_EXPECTED_LAT_5 0x316B80 + +#define mmSIF_RTR_CTRL_1_RGL_EXPECTED_LAT_6 0x316B84 + +#define mmSIF_RTR_CTRL_1_RGL_EXPECTED_LAT_7 0x316B88 + +#define mmSIF_RTR_CTRL_1_RGL_TOKEN_0 0x316BAC + +#define mmSIF_RTR_CTRL_1_RGL_TOKEN_1 0x316BB0 + +#define mmSIF_RTR_CTRL_1_RGL_TOKEN_2 0x316BB4 + +#define mmSIF_RTR_CTRL_1_RGL_TOKEN_3 0x316BB8 + +#define mmSIF_RTR_CTRL_1_RGL_TOKEN_4 0x316BBC + +#define mmSIF_RTR_CTRL_1_RGL_TOKEN_5 0x316BC0 + +#define mmSIF_RTR_CTRL_1_RGL_TOKEN_6 0x316BC4 + +#define mmSIF_RTR_CTRL_1_RGL_TOKEN_7 0x316BC8 + +#define mmSIF_RTR_CTRL_1_RGL_BANK_ID_0 0x316BEC + +#define mmSIF_RTR_CTRL_1_RGL_BANK_ID_1 0x316BF0 + +#define mmSIF_RTR_CTRL_1_RGL_BANK_ID_2 0x316BF4 + +#define mmSIF_RTR_CTRL_1_RGL_BANK_ID_3 0x316BF8 + +#define mmSIF_RTR_CTRL_1_RGL_BANK_ID_4 0x316BFC + +#define mmSIF_RTR_CTRL_1_RGL_BANK_ID_5 0x316C00 + +#define mmSIF_RTR_CTRL_1_RGL_BANK_ID_6 0x316C04 + +#define mmSIF_RTR_CTRL_1_RGL_BANK_ID_7 0x316C08 + +#define mmSIF_RTR_CTRL_1_RGL_WDT 0x316C2C + +#define mmSIF_RTR_CTRL_1_E2E_AR_HBM0_CH0_CTR_WRAP 0x316C30 + +#define mmSIF_RTR_CTRL_1_E2E_AR_HBM0_CH1_CTR_WRAP 0x316C34 + +#define mmSIF_RTR_CTRL_1_E2E_AR_HBM1_CH0_CTR_WRAP 0x316C38 + +#define mmSIF_RTR_CTRL_1_E2E_AR_HBM1_CH1_CTR_WRAP 0x316C3C + +#define mmSIF_RTR_CTRL_1_E2E_AR_HBM2_CH0_CTR_WRAP 0x316C40 + +#define mmSIF_RTR_CTRL_1_E2E_AR_HBM2_CH1_CTR_WRAP 0x316C44 + +#define mmSIF_RTR_CTRL_1_E2E_AR_HBM3_CH0_CTR_WRAP 0x316C48 + +#define mmSIF_RTR_CTRL_1_E2E_AR_HBM3_CH1_CTR_WRAP 0x316C4C + +#define mmSIF_RTR_CTRL_1_E2E_AR_HBM0_CH0_CTR_CNT 0x316C50 + +#define mmSIF_RTR_CTRL_1_E2E_AR_HBM0_CH1_CTR_CNT 0x316C54 + +#define mmSIF_RTR_CTRL_1_E2E_AR_HBM1_CH0_CTR_CNT 0x316C58 + +#define mmSIF_RTR_CTRL_1_E2E_AR_HBM1_CH1_CTR_CNT 0x316C5C + +#define mmSIF_RTR_CTRL_1_E2E_AR_HBM2_CH0_CTR_CNT 0x316C60 + +#define mmSIF_RTR_CTRL_1_E2E_AR_HBM2_CH1_CTR_CNT 0x316C64 + +#define mmSIF_RTR_CTRL_1_E2E_AR_HBM3_CH0_CTR_CNT 0x316C68 + +#define mmSIF_RTR_CTRL_1_E2E_AR_HBM3_CH1_CTR_CNT 0x316C6C + +#define mmSIF_RTR_CTRL_1_E2E_AW_HBM0_CH0_CTR_WRAP 0x316C70 + +#define mmSIF_RTR_CTRL_1_E2E_AW_HBM0_CH1_CTR_WRAP 0x316C74 + +#define mmSIF_RTR_CTRL_1_E2E_AW_HBM1_CH0_CTR_WRAP 0x316C78 + +#define mmSIF_RTR_CTRL_1_E2E_AW_HBM1_CH1_CTR_WRAP 0x316C7C + +#define mmSIF_RTR_CTRL_1_E2E_AW_HBM2_CH0_CTR_WRAP 0x316C80 + +#define mmSIF_RTR_CTRL_1_E2E_AW_HBM2_CH1_CTR_WRAP 0x316C84 + +#define mmSIF_RTR_CTRL_1_E2E_AW_HBM3_CH0_CTR_WRAP 0x316C88 + +#define mmSIF_RTR_CTRL_1_E2E_AW_HBM3_CH1_CTR_WRAP 0x316C8C + +#define mmSIF_RTR_CTRL_1_E2E_AW_HBM0_CH0_CTR_CNT 0x316C90 + +#define mmSIF_RTR_CTRL_1_E2E_AW_HBM0_CH1_CTR_CNT 0x316C94 + +#define mmSIF_RTR_CTRL_1_E2E_AW_HBM1_CH0_CTR_CNT 0x316C98 + +#define mmSIF_RTR_CTRL_1_E2E_AW_HBM1_CH1_CTR_CNT 0x316C9C + +#define mmSIF_RTR_CTRL_1_E2E_AW_HBM2_CH0_CTR_CNT 0x316CA0 + +#define mmSIF_RTR_CTRL_1_E2E_AW_HBM2_CH1_CTR_CNT 0x316CA4 + +#define mmSIF_RTR_CTRL_1_E2E_AW_HBM3_CH0_CTR_CNT 0x316CA8 + +#define mmSIF_RTR_CTRL_1_E2E_AW_HBM3_CH1_CTR_CNT 0x316CAC + +#define mmSIF_RTR_CTRL_1_NL_HBM_PC_SEL_0 0x316CB0 + +#define mmSIF_RTR_CTRL_1_NL_HBM_PC_SEL_1 0x316CB4 + +#define mmSIF_RTR_CTRL_1_NL_HBM_PC_SEL_2 0x316CB8 + +#define mmSIF_RTR_CTRL_1_NL_HBM_PC_SEL_3 0x316CBC + +#endif /* ASIC_REG_SIF_RTR_CTRL_1_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_2_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_2_regs.h new file mode 100644 index 000000000..330e5b42d --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_2_regs.h @@ -0,0 +1,896 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_SIF_RTR_CTRL_2_REGS_H_ +#define ASIC_REG_SIF_RTR_CTRL_2_REGS_H_ + +/* + ***************************************** + * SIF_RTR_CTRL_2 (Prototype: RTR_CTRL) + ***************************************** + */ + +#define mmSIF_RTR_CTRL_2_PERM_SEL 0x326108 + +#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_0 0x326114 + +#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_1 0x326118 + +#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_2 0x32611C + +#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_3 0x326120 + +#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_4 0x326124 + +#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_5 0x326128 + +#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_6 0x32612C + +#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_7 0x326130 + +#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_8 0x326134 + +#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_9 0x326138 + +#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_10 0x32613C + +#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_11 0x326140 + +#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_12 0x326144 + +#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_13 0x326148 + +#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_14 0x32614C + +#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_15 0x326150 + +#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_16 0x326154 + +#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_17 0x326158 + +#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_18 0x32615C + +#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_19 0x326160 + +#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_20 0x326164 + +#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_21 0x326168 + +#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_22 0x32616C + +#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_23 0x326170 + +#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_24 0x326174 + +#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_25 0x326178 + +#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_26 0x32617C + +#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_27 0x326180 + +#define mmSIF_RTR_CTRL_2_SRAM_POLY_H3_0 0x326184 + +#define mmSIF_RTR_CTRL_2_SRAM_POLY_H3_1 0x326188 + +#define mmSIF_RTR_CTRL_2_SRAM_POLY_H3_2 0x32618C + +#define mmSIF_RTR_CTRL_2_SRAM_POLY_H3_3 0x326190 + +#define mmSIF_RTR_CTRL_2_SRAM_POLY_H3_4 0x326194 + +#define mmSIF_RTR_CTRL_2_SRAM_POLY_H3_5 0x326198 + +#define mmSIF_RTR_CTRL_2_SRAM_POLY_H3_6 0x32619C + +#define mmSIF_RTR_CTRL_2_SRAM_POLY_H3_7 0x3261A0 + +#define mmSIF_RTR_CTRL_2_SRAM_POLY_H3_8 0x3261A4 + +#define mmSIF_RTR_CTRL_2_SRAM_POLY_H3_9 0x3261A8 + +#define mmSIF_RTR_CTRL_2_SRAM_POLY_H3_10 0x3261AC + +#define mmSIF_RTR_CTRL_2_SRAM_POLY_H3_11 0x3261B0 + +#define mmSIF_RTR_CTRL_2_SRAM_POLY_H3_12 0x3261B4 + +#define mmSIF_RTR_CTRL_2_SRAM_POLY_H3_13 0x3261B8 + +#define mmSIF_RTR_CTRL_2_SRAM_POLY_H3_14 0x3261BC + +#define mmSIF_RTR_CTRL_2_SCRAM_SRAM_EN 0x32626C + +#define mmSIF_RTR_CTRL_2_RL_HBM_EN 0x326274 + +#define mmSIF_RTR_CTRL_2_RL_HBM_SAT 0x326278 + +#define mmSIF_RTR_CTRL_2_RL_HBM_RST 0x32627C + +#define mmSIF_RTR_CTRL_2_RL_HBM_TIMEOUT 0x326280 + +#define mmSIF_RTR_CTRL_2_SCRAM_HBM_EN 0x326284 + +#define mmSIF_RTR_CTRL_2_RL_PCI_EN 0x326288 + +#define mmSIF_RTR_CTRL_2_RL_PCI_SAT 0x32628C + +#define mmSIF_RTR_CTRL_2_RL_PCI_RST 0x326290 + +#define mmSIF_RTR_CTRL_2_RL_PCI_TIMEOUT 0x326294 + +#define mmSIF_RTR_CTRL_2_RL_SRAM_EN 0x32629C + +#define mmSIF_RTR_CTRL_2_RL_SRAM_SAT 0x3262A0 + +#define mmSIF_RTR_CTRL_2_RL_SRAM_RST 0x3262A4 + +#define mmSIF_RTR_CTRL_2_RL_SRAM_TIMEOUT 0x3262AC + +#define mmSIF_RTR_CTRL_2_RL_SRAM_RED 0x3262B4 + +#define mmSIF_RTR_CTRL_2_E2E_HBM_EN 0x3262EC + +#define mmSIF_RTR_CTRL_2_E2E_PCI_EN 0x3262F0 + +#define mmSIF_RTR_CTRL_2_E2E_HBM_WR_SIZE 0x3262F4 + +#define mmSIF_RTR_CTRL_2_E2E_PCI_WR_SIZE 0x3262F8 + +#define mmSIF_RTR_CTRL_2_E2E_AW_PCI_CTR_SET_EN 0x326404 + +#define mmSIF_RTR_CTRL_2_E2E_AW_PCI_CTR_SET 0x326408 + +#define mmSIF_RTR_CTRL_2_E2E_AW_PCI_CTR_WRAP 0x32640C + +#define mmSIF_RTR_CTRL_2_E2E_AW_PCI_CTR_CNT 0x326410 + +#define mmSIF_RTR_CTRL_2_E2E_AW_HBM_CTR_SET_EN 0x326414 + +#define mmSIF_RTR_CTRL_2_E2E_AW_HBM_CTR_SET 0x326418 + +#define mmSIF_RTR_CTRL_2_E2E_HBM_RD_SIZE 0x32641C + +#define mmSIF_RTR_CTRL_2_E2E_PCI_RD_SIZE 0x326420 + +#define mmSIF_RTR_CTRL_2_E2E_AR_PCI_CTR_SET_EN 0x326424 + +#define mmSIF_RTR_CTRL_2_E2E_AR_PCI_CTR_SET 0x326428 + +#define mmSIF_RTR_CTRL_2_E2E_AR_PCI_CTR_WRAP 0x32642C + +#define mmSIF_RTR_CTRL_2_E2E_AR_PCI_CTR_CNT 0x326430 + +#define mmSIF_RTR_CTRL_2_E2E_AR_HBM_CTR_SET_EN 0x326434 + +#define mmSIF_RTR_CTRL_2_E2E_AR_HBM_CTR_SET 0x326438 + +#define mmSIF_RTR_CTRL_2_NL_HBM_SEL_0 0x326450 + +#define mmSIF_RTR_CTRL_2_NL_HBM_SEL_1 0x326454 + +#define mmSIF_RTR_CTRL_2_NON_LIN_EN 0x326480 + +#define mmSIF_RTR_CTRL_2_NL_SRAM_BANK_0 0x326500 + +#define mmSIF_RTR_CTRL_2_NL_SRAM_BANK_1 0x326504 + +#define mmSIF_RTR_CTRL_2_NL_SRAM_BANK_2 0x326508 + +#define mmSIF_RTR_CTRL_2_NL_SRAM_BANK_3 0x32650C + +#define mmSIF_RTR_CTRL_2_NL_SRAM_BANK_4 0x326510 + +#define mmSIF_RTR_CTRL_2_NL_SRAM_OFFSET_0 0x326514 + +#define mmSIF_RTR_CTRL_2_NL_SRAM_OFFSET_1 0x326520 + +#define mmSIF_RTR_CTRL_2_NL_SRAM_OFFSET_2 0x326524 + +#define mmSIF_RTR_CTRL_2_NL_SRAM_OFFSET_3 0x326528 + +#define mmSIF_RTR_CTRL_2_NL_SRAM_OFFSET_4 0x32652C + +#define mmSIF_RTR_CTRL_2_NL_SRAM_OFFSET_5 0x326530 + +#define mmSIF_RTR_CTRL_2_NL_SRAM_OFFSET_6 0x326534 + +#define mmSIF_RTR_CTRL_2_NL_SRAM_OFFSET_7 0x326538 + +#define mmSIF_RTR_CTRL_2_NL_SRAM_OFFSET_8 0x32653C + +#define mmSIF_RTR_CTRL_2_NL_SRAM_OFFSET_9 0x326540 + +#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_0 0x326550 + +#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_1 0x326554 + +#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_2 0x326558 + +#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_3 0x32655C + +#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_4 0x326560 + +#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_5 0x326564 + +#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_6 0x326568 + +#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_7 0x32656C + +#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_8 0x326570 + +#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_9 0x326574 + +#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_10 0x326578 + +#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_11 0x32657C + +#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_12 0x326580 + +#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_13 0x326584 + +#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_14 0x326588 + +#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_15 0x32658C + +#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_16 0x326590 + +#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_17 0x326594 + +#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_18 0x326598 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_0 0x3265E4 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_1 0x3265E8 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_2 0x3265EC + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_3 0x3265F0 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_4 0x3265F4 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_5 0x3265F8 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_6 0x3265FC + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_7 0x326600 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_8 0x326604 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_9 0x326608 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_10 0x32660C + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_11 0x326610 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_12 0x326614 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_13 0x326618 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_14 0x32661C + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_15 0x326620 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_0 0x326624 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_1 0x326628 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_2 0x32662C + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_3 0x326630 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_4 0x326634 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_5 0x326638 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_6 0x32663C + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_7 0x326640 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_8 0x326644 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_9 0x326648 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_10 0x32664C + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_11 0x326650 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_12 0x326654 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_13 0x326658 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_14 0x32665C + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_15 0x326660 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_0 0x326664 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_1 0x326668 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_2 0x32666C + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_3 0x326670 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_4 0x326674 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_5 0x326678 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_6 0x32667C + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_7 0x326680 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_8 0x326684 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_9 0x326688 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_10 0x32668C + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_11 0x326690 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_12 0x326694 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_13 0x326698 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_14 0x32669C + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_15 0x3266A0 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_0 0x3266A4 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_1 0x3266A8 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_2 0x3266AC + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_3 0x3266B0 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_4 0x3266B4 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_5 0x3266B8 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_6 0x3266BC + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_7 0x3266C0 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_8 0x3266C4 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_9 0x3266C8 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_10 0x3266CC + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_11 0x3266D0 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_12 0x3266D4 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_13 0x3266D8 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_14 0x3266DC + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_15 0x3266E0 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_0 0x3266E4 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_1 0x3266E8 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_2 0x3266EC + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_3 0x3266F0 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_4 0x3266F4 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_5 0x3266F8 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_6 0x3266FC + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_7 0x326700 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_8 0x326704 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_9 0x326708 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_10 0x32670C + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_11 0x326710 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_12 0x326714 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_13 0x326718 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_14 0x32671C + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_15 0x326720 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_0 0x326724 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_1 0x326728 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_2 0x32672C + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_3 0x326730 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_4 0x326734 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_5 0x326738 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_6 0x32673C + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_7 0x326740 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_8 0x326744 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_9 0x326748 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_10 0x32674C + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_11 0x326750 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_12 0x326754 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_13 0x326758 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_14 0x32675C + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_15 0x326760 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_0 0x326764 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_1 0x326768 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_2 0x32676C + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_3 0x326770 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_4 0x326774 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_5 0x326778 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_6 0x32677C + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_7 0x326780 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_8 0x326784 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_9 0x326788 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_10 0x32678C + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_11 0x326790 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_12 0x326794 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_13 0x326798 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_14 0x32679C + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_15 0x3267A0 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_0 0x3267A4 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_1 0x3267A8 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_2 0x3267AC + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_3 0x3267B0 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_4 0x3267B4 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_5 0x3267B8 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_6 0x3267BC + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_7 0x3267C0 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_8 0x3267C4 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_9 0x3267C8 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_10 0x3267CC + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_11 0x3267D0 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_12 0x3267D4 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_13 0x3267D8 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_14 0x3267DC + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_15 0x3267E0 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_0 0x326824 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_1 0x326828 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_2 0x32682C + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_3 0x326830 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_4 0x326834 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_5 0x326838 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_6 0x32683C + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_7 0x326840 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_8 0x326844 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_9 0x326848 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_10 0x32684C + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_11 0x326850 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_12 0x326854 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_13 0x326858 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_14 0x32685C + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_15 0x326860 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_0 0x326864 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_1 0x326868 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_2 0x32686C + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_3 0x326870 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_4 0x326874 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_5 0x326878 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_6 0x32687C + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_7 0x326880 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_8 0x326884 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_9 0x326888 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_10 0x32688C + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_11 0x326890 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_12 0x326894 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_13 0x326898 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_14 0x32689C + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_15 0x3268A0 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_0 0x3268A4 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_1 0x3268A8 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_2 0x3268AC + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_3 0x3268B0 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_4 0x3268B4 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_5 0x3268B8 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_6 0x3268BC + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_7 0x3268C0 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_8 0x3268C4 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_9 0x3268C8 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_10 0x3268CC + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_11 0x3268D0 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_12 0x3268D4 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_13 0x3268D8 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_14 0x3268DC + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_15 0x3268E0 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_0 0x3268E4 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_1 0x3268E8 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_2 0x3268EC + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_3 0x3268F0 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_4 0x3268F4 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_5 0x3268F8 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_6 0x3268FC + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_7 0x326900 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_8 0x326904 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_9 0x326908 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_10 0x32690C + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_11 0x326910 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_12 0x326914 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_13 0x326918 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_14 0x32691C + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_15 0x326920 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_0 0x326924 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_1 0x326928 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_2 0x32692C + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_3 0x326930 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_4 0x326934 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_5 0x326938 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_6 0x32693C + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_7 0x326940 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_8 0x326944 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_9 0x326948 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_10 0x32694C + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_11 0x326950 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_12 0x326954 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_13 0x326958 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_14 0x32695C + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_15 0x326960 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_0 0x326964 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_1 0x326968 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_2 0x32696C + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_3 0x326970 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_4 0x326974 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_5 0x326978 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_6 0x32697C + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_7 0x326980 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_8 0x326984 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_9 0x326988 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_10 0x32698C + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_11 0x326990 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_12 0x326994 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_13 0x326998 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_14 0x32699C + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_15 0x3269A0 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_0 0x3269A4 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_1 0x3269A8 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_2 0x3269AC + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_3 0x3269B0 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_4 0x3269B4 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_5 0x3269B8 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_6 0x3269BC + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_7 0x3269C0 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_8 0x3269C4 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_9 0x3269C8 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_10 0x3269CC + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_11 0x3269D0 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_12 0x3269D4 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_13 0x3269D8 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_14 0x3269DC + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_15 0x3269E0 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_0 0x3269E4 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_1 0x3269E8 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_2 0x3269EC + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_3 0x3269F0 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_4 0x3269F4 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_5 0x3269F8 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_6 0x3269FC + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_7 0x326A00 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_8 0x326A04 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_9 0x326A08 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_10 0x326A0C + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_11 0x326A10 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_12 0x326A14 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_13 0x326A18 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_14 0x326A1C + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_15 0x326A20 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_HIT_AW 0x326A64 + +#define mmSIF_RTR_CTRL_2_RANGE_SEC_HIT_AR 0x326A68 + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_HIT_AW 0x326A6C + +#define mmSIF_RTR_CTRL_2_RANGE_PRIV_HIT_AR 0x326A70 + +#define mmSIF_RTR_CTRL_2_RGL_CFG 0x326B64 + +#define mmSIF_RTR_CTRL_2_RGL_SHIFT 0x326B68 + +#define mmSIF_RTR_CTRL_2_RGL_EXPECTED_LAT_0 0x326B6C + +#define mmSIF_RTR_CTRL_2_RGL_EXPECTED_LAT_1 0x326B70 + +#define mmSIF_RTR_CTRL_2_RGL_EXPECTED_LAT_2 0x326B74 + +#define mmSIF_RTR_CTRL_2_RGL_EXPECTED_LAT_3 0x326B78 + +#define mmSIF_RTR_CTRL_2_RGL_EXPECTED_LAT_4 0x326B7C + +#define mmSIF_RTR_CTRL_2_RGL_EXPECTED_LAT_5 0x326B80 + +#define mmSIF_RTR_CTRL_2_RGL_EXPECTED_LAT_6 0x326B84 + +#define mmSIF_RTR_CTRL_2_RGL_EXPECTED_LAT_7 0x326B88 + +#define mmSIF_RTR_CTRL_2_RGL_TOKEN_0 0x326BAC + +#define mmSIF_RTR_CTRL_2_RGL_TOKEN_1 0x326BB0 + +#define mmSIF_RTR_CTRL_2_RGL_TOKEN_2 0x326BB4 + +#define mmSIF_RTR_CTRL_2_RGL_TOKEN_3 0x326BB8 + +#define mmSIF_RTR_CTRL_2_RGL_TOKEN_4 0x326BBC + +#define mmSIF_RTR_CTRL_2_RGL_TOKEN_5 0x326BC0 + +#define mmSIF_RTR_CTRL_2_RGL_TOKEN_6 0x326BC4 + +#define mmSIF_RTR_CTRL_2_RGL_TOKEN_7 0x326BC8 + +#define mmSIF_RTR_CTRL_2_RGL_BANK_ID_0 0x326BEC + +#define mmSIF_RTR_CTRL_2_RGL_BANK_ID_1 0x326BF0 + +#define mmSIF_RTR_CTRL_2_RGL_BANK_ID_2 0x326BF4 + +#define mmSIF_RTR_CTRL_2_RGL_BANK_ID_3 0x326BF8 + +#define mmSIF_RTR_CTRL_2_RGL_BANK_ID_4 0x326BFC + +#define mmSIF_RTR_CTRL_2_RGL_BANK_ID_5 0x326C00 + +#define mmSIF_RTR_CTRL_2_RGL_BANK_ID_6 0x326C04 + +#define mmSIF_RTR_CTRL_2_RGL_BANK_ID_7 0x326C08 + +#define mmSIF_RTR_CTRL_2_RGL_WDT 0x326C2C + +#define mmSIF_RTR_CTRL_2_E2E_AR_HBM0_CH0_CTR_WRAP 0x326C30 + +#define mmSIF_RTR_CTRL_2_E2E_AR_HBM0_CH1_CTR_WRAP 0x326C34 + +#define mmSIF_RTR_CTRL_2_E2E_AR_HBM1_CH0_CTR_WRAP 0x326C38 + +#define mmSIF_RTR_CTRL_2_E2E_AR_HBM1_CH1_CTR_WRAP 0x326C3C + +#define mmSIF_RTR_CTRL_2_E2E_AR_HBM2_CH0_CTR_WRAP 0x326C40 + +#define mmSIF_RTR_CTRL_2_E2E_AR_HBM2_CH1_CTR_WRAP 0x326C44 + +#define mmSIF_RTR_CTRL_2_E2E_AR_HBM3_CH0_CTR_WRAP 0x326C48 + +#define mmSIF_RTR_CTRL_2_E2E_AR_HBM3_CH1_CTR_WRAP 0x326C4C + +#define mmSIF_RTR_CTRL_2_E2E_AR_HBM0_CH0_CTR_CNT 0x326C50 + +#define mmSIF_RTR_CTRL_2_E2E_AR_HBM0_CH1_CTR_CNT 0x326C54 + +#define mmSIF_RTR_CTRL_2_E2E_AR_HBM1_CH0_CTR_CNT 0x326C58 + +#define mmSIF_RTR_CTRL_2_E2E_AR_HBM1_CH1_CTR_CNT 0x326C5C + +#define mmSIF_RTR_CTRL_2_E2E_AR_HBM2_CH0_CTR_CNT 0x326C60 + +#define mmSIF_RTR_CTRL_2_E2E_AR_HBM2_CH1_CTR_CNT 0x326C64 + +#define mmSIF_RTR_CTRL_2_E2E_AR_HBM3_CH0_CTR_CNT 0x326C68 + +#define mmSIF_RTR_CTRL_2_E2E_AR_HBM3_CH1_CTR_CNT 0x326C6C + +#define mmSIF_RTR_CTRL_2_E2E_AW_HBM0_CH0_CTR_WRAP 0x326C70 + +#define mmSIF_RTR_CTRL_2_E2E_AW_HBM0_CH1_CTR_WRAP 0x326C74 + +#define mmSIF_RTR_CTRL_2_E2E_AW_HBM1_CH0_CTR_WRAP 0x326C78 + +#define mmSIF_RTR_CTRL_2_E2E_AW_HBM1_CH1_CTR_WRAP 0x326C7C + +#define mmSIF_RTR_CTRL_2_E2E_AW_HBM2_CH0_CTR_WRAP 0x326C80 + +#define mmSIF_RTR_CTRL_2_E2E_AW_HBM2_CH1_CTR_WRAP 0x326C84 + +#define mmSIF_RTR_CTRL_2_E2E_AW_HBM3_CH0_CTR_WRAP 0x326C88 + +#define mmSIF_RTR_CTRL_2_E2E_AW_HBM3_CH1_CTR_WRAP 0x326C8C + +#define mmSIF_RTR_CTRL_2_E2E_AW_HBM0_CH0_CTR_CNT 0x326C90 + +#define mmSIF_RTR_CTRL_2_E2E_AW_HBM0_CH1_CTR_CNT 0x326C94 + +#define mmSIF_RTR_CTRL_2_E2E_AW_HBM1_CH0_CTR_CNT 0x326C98 + +#define mmSIF_RTR_CTRL_2_E2E_AW_HBM1_CH1_CTR_CNT 0x326C9C + +#define mmSIF_RTR_CTRL_2_E2E_AW_HBM2_CH0_CTR_CNT 0x326CA0 + +#define mmSIF_RTR_CTRL_2_E2E_AW_HBM2_CH1_CTR_CNT 0x326CA4 + +#define mmSIF_RTR_CTRL_2_E2E_AW_HBM3_CH0_CTR_CNT 0x326CA8 + +#define mmSIF_RTR_CTRL_2_E2E_AW_HBM3_CH1_CTR_CNT 0x326CAC + +#define mmSIF_RTR_CTRL_2_NL_HBM_PC_SEL_0 0x326CB0 + +#define mmSIF_RTR_CTRL_2_NL_HBM_PC_SEL_1 0x326CB4 + +#define mmSIF_RTR_CTRL_2_NL_HBM_PC_SEL_2 0x326CB8 + +#define mmSIF_RTR_CTRL_2_NL_HBM_PC_SEL_3 0x326CBC + +#endif /* ASIC_REG_SIF_RTR_CTRL_2_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_3_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_3_regs.h new file mode 100644 index 000000000..d749f1968 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_3_regs.h @@ -0,0 +1,896 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_SIF_RTR_CTRL_3_REGS_H_ +#define ASIC_REG_SIF_RTR_CTRL_3_REGS_H_ + +/* + ***************************************** + * SIF_RTR_CTRL_3 (Prototype: RTR_CTRL) + ***************************************** + */ + +#define mmSIF_RTR_CTRL_3_PERM_SEL 0x336108 + +#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_0 0x336114 + +#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_1 0x336118 + +#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_2 0x33611C + +#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_3 0x336120 + +#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_4 0x336124 + +#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_5 0x336128 + +#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_6 0x33612C + +#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_7 0x336130 + +#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_8 0x336134 + +#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_9 0x336138 + +#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_10 0x33613C + +#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_11 0x336140 + +#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_12 0x336144 + +#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_13 0x336148 + +#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_14 0x33614C + +#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_15 0x336150 + +#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_16 0x336154 + +#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_17 0x336158 + +#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_18 0x33615C + +#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_19 0x336160 + +#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_20 0x336164 + +#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_21 0x336168 + +#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_22 0x33616C + +#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_23 0x336170 + +#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_24 0x336174 + +#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_25 0x336178 + +#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_26 0x33617C + +#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_27 0x336180 + +#define mmSIF_RTR_CTRL_3_SRAM_POLY_H3_0 0x336184 + +#define mmSIF_RTR_CTRL_3_SRAM_POLY_H3_1 0x336188 + +#define mmSIF_RTR_CTRL_3_SRAM_POLY_H3_2 0x33618C + +#define mmSIF_RTR_CTRL_3_SRAM_POLY_H3_3 0x336190 + +#define mmSIF_RTR_CTRL_3_SRAM_POLY_H3_4 0x336194 + +#define mmSIF_RTR_CTRL_3_SRAM_POLY_H3_5 0x336198 + +#define mmSIF_RTR_CTRL_3_SRAM_POLY_H3_6 0x33619C + +#define mmSIF_RTR_CTRL_3_SRAM_POLY_H3_7 0x3361A0 + +#define mmSIF_RTR_CTRL_3_SRAM_POLY_H3_8 0x3361A4 + +#define mmSIF_RTR_CTRL_3_SRAM_POLY_H3_9 0x3361A8 + +#define mmSIF_RTR_CTRL_3_SRAM_POLY_H3_10 0x3361AC + +#define mmSIF_RTR_CTRL_3_SRAM_POLY_H3_11 0x3361B0 + +#define mmSIF_RTR_CTRL_3_SRAM_POLY_H3_12 0x3361B4 + +#define mmSIF_RTR_CTRL_3_SRAM_POLY_H3_13 0x3361B8 + +#define mmSIF_RTR_CTRL_3_SRAM_POLY_H3_14 0x3361BC + +#define mmSIF_RTR_CTRL_3_SCRAM_SRAM_EN 0x33626C + +#define mmSIF_RTR_CTRL_3_RL_HBM_EN 0x336274 + +#define mmSIF_RTR_CTRL_3_RL_HBM_SAT 0x336278 + +#define mmSIF_RTR_CTRL_3_RL_HBM_RST 0x33627C + +#define mmSIF_RTR_CTRL_3_RL_HBM_TIMEOUT 0x336280 + +#define mmSIF_RTR_CTRL_3_SCRAM_HBM_EN 0x336284 + +#define mmSIF_RTR_CTRL_3_RL_PCI_EN 0x336288 + +#define mmSIF_RTR_CTRL_3_RL_PCI_SAT 0x33628C + +#define mmSIF_RTR_CTRL_3_RL_PCI_RST 0x336290 + +#define mmSIF_RTR_CTRL_3_RL_PCI_TIMEOUT 0x336294 + +#define mmSIF_RTR_CTRL_3_RL_SRAM_EN 0x33629C + +#define mmSIF_RTR_CTRL_3_RL_SRAM_SAT 0x3362A0 + +#define mmSIF_RTR_CTRL_3_RL_SRAM_RST 0x3362A4 + +#define mmSIF_RTR_CTRL_3_RL_SRAM_TIMEOUT 0x3362AC + +#define mmSIF_RTR_CTRL_3_RL_SRAM_RED 0x3362B4 + +#define mmSIF_RTR_CTRL_3_E2E_HBM_EN 0x3362EC + +#define mmSIF_RTR_CTRL_3_E2E_PCI_EN 0x3362F0 + +#define mmSIF_RTR_CTRL_3_E2E_HBM_WR_SIZE 0x3362F4 + +#define mmSIF_RTR_CTRL_3_E2E_PCI_WR_SIZE 0x3362F8 + +#define mmSIF_RTR_CTRL_3_E2E_AW_PCI_CTR_SET_EN 0x336404 + +#define mmSIF_RTR_CTRL_3_E2E_AW_PCI_CTR_SET 0x336408 + +#define mmSIF_RTR_CTRL_3_E2E_AW_PCI_CTR_WRAP 0x33640C + +#define mmSIF_RTR_CTRL_3_E2E_AW_PCI_CTR_CNT 0x336410 + +#define mmSIF_RTR_CTRL_3_E2E_AW_HBM_CTR_SET_EN 0x336414 + +#define mmSIF_RTR_CTRL_3_E2E_AW_HBM_CTR_SET 0x336418 + +#define mmSIF_RTR_CTRL_3_E2E_HBM_RD_SIZE 0x33641C + +#define mmSIF_RTR_CTRL_3_E2E_PCI_RD_SIZE 0x336420 + +#define mmSIF_RTR_CTRL_3_E2E_AR_PCI_CTR_SET_EN 0x336424 + +#define mmSIF_RTR_CTRL_3_E2E_AR_PCI_CTR_SET 0x336428 + +#define mmSIF_RTR_CTRL_3_E2E_AR_PCI_CTR_WRAP 0x33642C + +#define mmSIF_RTR_CTRL_3_E2E_AR_PCI_CTR_CNT 0x336430 + +#define mmSIF_RTR_CTRL_3_E2E_AR_HBM_CTR_SET_EN 0x336434 + +#define mmSIF_RTR_CTRL_3_E2E_AR_HBM_CTR_SET 0x336438 + +#define mmSIF_RTR_CTRL_3_NL_HBM_SEL_0 0x336450 + +#define mmSIF_RTR_CTRL_3_NL_HBM_SEL_1 0x336454 + +#define mmSIF_RTR_CTRL_3_NON_LIN_EN 0x336480 + +#define mmSIF_RTR_CTRL_3_NL_SRAM_BANK_0 0x336500 + +#define mmSIF_RTR_CTRL_3_NL_SRAM_BANK_1 0x336504 + +#define mmSIF_RTR_CTRL_3_NL_SRAM_BANK_2 0x336508 + +#define mmSIF_RTR_CTRL_3_NL_SRAM_BANK_3 0x33650C + +#define mmSIF_RTR_CTRL_3_NL_SRAM_BANK_4 0x336510 + +#define mmSIF_RTR_CTRL_3_NL_SRAM_OFFSET_0 0x336514 + +#define mmSIF_RTR_CTRL_3_NL_SRAM_OFFSET_1 0x336520 + +#define mmSIF_RTR_CTRL_3_NL_SRAM_OFFSET_2 0x336524 + +#define mmSIF_RTR_CTRL_3_NL_SRAM_OFFSET_3 0x336528 + +#define mmSIF_RTR_CTRL_3_NL_SRAM_OFFSET_4 0x33652C + +#define mmSIF_RTR_CTRL_3_NL_SRAM_OFFSET_5 0x336530 + +#define mmSIF_RTR_CTRL_3_NL_SRAM_OFFSET_6 0x336534 + +#define mmSIF_RTR_CTRL_3_NL_SRAM_OFFSET_7 0x336538 + +#define mmSIF_RTR_CTRL_3_NL_SRAM_OFFSET_8 0x33653C + +#define mmSIF_RTR_CTRL_3_NL_SRAM_OFFSET_9 0x336540 + +#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_0 0x336550 + +#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_1 0x336554 + +#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_2 0x336558 + +#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_3 0x33655C + +#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_4 0x336560 + +#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_5 0x336564 + +#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_6 0x336568 + +#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_7 0x33656C + +#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_8 0x336570 + +#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_9 0x336574 + +#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_10 0x336578 + +#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_11 0x33657C + +#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_12 0x336580 + +#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_13 0x336584 + +#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_14 0x336588 + +#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_15 0x33658C + +#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_16 0x336590 + +#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_17 0x336594 + +#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_18 0x336598 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_0 0x3365E4 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_1 0x3365E8 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_2 0x3365EC + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_3 0x3365F0 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_4 0x3365F4 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_5 0x3365F8 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_6 0x3365FC + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_7 0x336600 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_8 0x336604 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_9 0x336608 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_10 0x33660C + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_11 0x336610 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_12 0x336614 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_13 0x336618 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_14 0x33661C + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_15 0x336620 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_0 0x336624 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_1 0x336628 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_2 0x33662C + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_3 0x336630 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_4 0x336634 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_5 0x336638 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_6 0x33663C + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_7 0x336640 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_8 0x336644 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_9 0x336648 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_10 0x33664C + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_11 0x336650 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_12 0x336654 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_13 0x336658 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_14 0x33665C + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_15 0x336660 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_0 0x336664 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_1 0x336668 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_2 0x33666C + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_3 0x336670 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_4 0x336674 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_5 0x336678 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_6 0x33667C + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_7 0x336680 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_8 0x336684 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_9 0x336688 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_10 0x33668C + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_11 0x336690 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_12 0x336694 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_13 0x336698 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_14 0x33669C + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_15 0x3366A0 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_0 0x3366A4 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_1 0x3366A8 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_2 0x3366AC + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_3 0x3366B0 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_4 0x3366B4 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_5 0x3366B8 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_6 0x3366BC + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_7 0x3366C0 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_8 0x3366C4 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_9 0x3366C8 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_10 0x3366CC + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_11 0x3366D0 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_12 0x3366D4 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_13 0x3366D8 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_14 0x3366DC + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_15 0x3366E0 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_0 0x3366E4 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_1 0x3366E8 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_2 0x3366EC + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_3 0x3366F0 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_4 0x3366F4 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_5 0x3366F8 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_6 0x3366FC + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_7 0x336700 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_8 0x336704 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_9 0x336708 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_10 0x33670C + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_11 0x336710 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_12 0x336714 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_13 0x336718 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_14 0x33671C + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_15 0x336720 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_0 0x336724 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_1 0x336728 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_2 0x33672C + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_3 0x336730 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_4 0x336734 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_5 0x336738 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_6 0x33673C + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_7 0x336740 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_8 0x336744 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_9 0x336748 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_10 0x33674C + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_11 0x336750 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_12 0x336754 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_13 0x336758 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_14 0x33675C + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_15 0x336760 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_0 0x336764 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_1 0x336768 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_2 0x33676C + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_3 0x336770 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_4 0x336774 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_5 0x336778 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_6 0x33677C + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_7 0x336780 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_8 0x336784 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_9 0x336788 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_10 0x33678C + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_11 0x336790 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_12 0x336794 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_13 0x336798 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_14 0x33679C + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_15 0x3367A0 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_0 0x3367A4 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_1 0x3367A8 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_2 0x3367AC + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_3 0x3367B0 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_4 0x3367B4 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_5 0x3367B8 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_6 0x3367BC + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_7 0x3367C0 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_8 0x3367C4 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_9 0x3367C8 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_10 0x3367CC + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_11 0x3367D0 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_12 0x3367D4 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_13 0x3367D8 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_14 0x3367DC + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_15 0x3367E0 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_0 0x336824 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_1 0x336828 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_2 0x33682C + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_3 0x336830 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_4 0x336834 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_5 0x336838 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_6 0x33683C + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_7 0x336840 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_8 0x336844 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_9 0x336848 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_10 0x33684C + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_11 0x336850 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_12 0x336854 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_13 0x336858 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_14 0x33685C + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_15 0x336860 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_0 0x336864 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_1 0x336868 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_2 0x33686C + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_3 0x336870 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_4 0x336874 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_5 0x336878 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_6 0x33687C + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_7 0x336880 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_8 0x336884 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_9 0x336888 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_10 0x33688C + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_11 0x336890 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_12 0x336894 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_13 0x336898 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_14 0x33689C + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_15 0x3368A0 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_0 0x3368A4 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_1 0x3368A8 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_2 0x3368AC + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_3 0x3368B0 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_4 0x3368B4 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_5 0x3368B8 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_6 0x3368BC + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_7 0x3368C0 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_8 0x3368C4 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_9 0x3368C8 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_10 0x3368CC + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_11 0x3368D0 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_12 0x3368D4 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_13 0x3368D8 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_14 0x3368DC + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_15 0x3368E0 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_0 0x3368E4 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_1 0x3368E8 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_2 0x3368EC + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_3 0x3368F0 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_4 0x3368F4 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_5 0x3368F8 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_6 0x3368FC + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_7 0x336900 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_8 0x336904 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_9 0x336908 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_10 0x33690C + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_11 0x336910 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_12 0x336914 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_13 0x336918 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_14 0x33691C + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_15 0x336920 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_0 0x336924 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_1 0x336928 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_2 0x33692C + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_3 0x336930 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_4 0x336934 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_5 0x336938 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_6 0x33693C + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_7 0x336940 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_8 0x336944 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_9 0x336948 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_10 0x33694C + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_11 0x336950 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_12 0x336954 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_13 0x336958 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_14 0x33695C + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_15 0x336960 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_0 0x336964 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_1 0x336968 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_2 0x33696C + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_3 0x336970 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_4 0x336974 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_5 0x336978 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_6 0x33697C + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_7 0x336980 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_8 0x336984 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_9 0x336988 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_10 0x33698C + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_11 0x336990 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_12 0x336994 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_13 0x336998 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_14 0x33699C + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_15 0x3369A0 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_0 0x3369A4 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_1 0x3369A8 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_2 0x3369AC + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_3 0x3369B0 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_4 0x3369B4 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_5 0x3369B8 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_6 0x3369BC + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_7 0x3369C0 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_8 0x3369C4 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_9 0x3369C8 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_10 0x3369CC + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_11 0x3369D0 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_12 0x3369D4 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_13 0x3369D8 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_14 0x3369DC + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_15 0x3369E0 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_0 0x3369E4 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_1 0x3369E8 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_2 0x3369EC + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_3 0x3369F0 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_4 0x3369F4 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_5 0x3369F8 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_6 0x3369FC + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_7 0x336A00 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_8 0x336A04 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_9 0x336A08 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_10 0x336A0C + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_11 0x336A10 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_12 0x336A14 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_13 0x336A18 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_14 0x336A1C + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_15 0x336A20 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_HIT_AW 0x336A64 + +#define mmSIF_RTR_CTRL_3_RANGE_SEC_HIT_AR 0x336A68 + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_HIT_AW 0x336A6C + +#define mmSIF_RTR_CTRL_3_RANGE_PRIV_HIT_AR 0x336A70 + +#define mmSIF_RTR_CTRL_3_RGL_CFG 0x336B64 + +#define mmSIF_RTR_CTRL_3_RGL_SHIFT 0x336B68 + +#define mmSIF_RTR_CTRL_3_RGL_EXPECTED_LAT_0 0x336B6C + +#define mmSIF_RTR_CTRL_3_RGL_EXPECTED_LAT_1 0x336B70 + +#define mmSIF_RTR_CTRL_3_RGL_EXPECTED_LAT_2 0x336B74 + +#define mmSIF_RTR_CTRL_3_RGL_EXPECTED_LAT_3 0x336B78 + +#define mmSIF_RTR_CTRL_3_RGL_EXPECTED_LAT_4 0x336B7C + +#define mmSIF_RTR_CTRL_3_RGL_EXPECTED_LAT_5 0x336B80 + +#define mmSIF_RTR_CTRL_3_RGL_EXPECTED_LAT_6 0x336B84 + +#define mmSIF_RTR_CTRL_3_RGL_EXPECTED_LAT_7 0x336B88 + +#define mmSIF_RTR_CTRL_3_RGL_TOKEN_0 0x336BAC + +#define mmSIF_RTR_CTRL_3_RGL_TOKEN_1 0x336BB0 + +#define mmSIF_RTR_CTRL_3_RGL_TOKEN_2 0x336BB4 + +#define mmSIF_RTR_CTRL_3_RGL_TOKEN_3 0x336BB8 + +#define mmSIF_RTR_CTRL_3_RGL_TOKEN_4 0x336BBC + +#define mmSIF_RTR_CTRL_3_RGL_TOKEN_5 0x336BC0 + +#define mmSIF_RTR_CTRL_3_RGL_TOKEN_6 0x336BC4 + +#define mmSIF_RTR_CTRL_3_RGL_TOKEN_7 0x336BC8 + +#define mmSIF_RTR_CTRL_3_RGL_BANK_ID_0 0x336BEC + +#define mmSIF_RTR_CTRL_3_RGL_BANK_ID_1 0x336BF0 + +#define mmSIF_RTR_CTRL_3_RGL_BANK_ID_2 0x336BF4 + +#define mmSIF_RTR_CTRL_3_RGL_BANK_ID_3 0x336BF8 + +#define mmSIF_RTR_CTRL_3_RGL_BANK_ID_4 0x336BFC + +#define mmSIF_RTR_CTRL_3_RGL_BANK_ID_5 0x336C00 + +#define mmSIF_RTR_CTRL_3_RGL_BANK_ID_6 0x336C04 + +#define mmSIF_RTR_CTRL_3_RGL_BANK_ID_7 0x336C08 + +#define mmSIF_RTR_CTRL_3_RGL_WDT 0x336C2C + +#define mmSIF_RTR_CTRL_3_E2E_AR_HBM0_CH0_CTR_WRAP 0x336C30 + +#define mmSIF_RTR_CTRL_3_E2E_AR_HBM0_CH1_CTR_WRAP 0x336C34 + +#define mmSIF_RTR_CTRL_3_E2E_AR_HBM1_CH0_CTR_WRAP 0x336C38 + +#define mmSIF_RTR_CTRL_3_E2E_AR_HBM1_CH1_CTR_WRAP 0x336C3C + +#define mmSIF_RTR_CTRL_3_E2E_AR_HBM2_CH0_CTR_WRAP 0x336C40 + +#define mmSIF_RTR_CTRL_3_E2E_AR_HBM2_CH1_CTR_WRAP 0x336C44 + +#define mmSIF_RTR_CTRL_3_E2E_AR_HBM3_CH0_CTR_WRAP 0x336C48 + +#define mmSIF_RTR_CTRL_3_E2E_AR_HBM3_CH1_CTR_WRAP 0x336C4C + +#define mmSIF_RTR_CTRL_3_E2E_AR_HBM0_CH0_CTR_CNT 0x336C50 + +#define mmSIF_RTR_CTRL_3_E2E_AR_HBM0_CH1_CTR_CNT 0x336C54 + +#define mmSIF_RTR_CTRL_3_E2E_AR_HBM1_CH0_CTR_CNT 0x336C58 + +#define mmSIF_RTR_CTRL_3_E2E_AR_HBM1_CH1_CTR_CNT 0x336C5C + +#define mmSIF_RTR_CTRL_3_E2E_AR_HBM2_CH0_CTR_CNT 0x336C60 + +#define mmSIF_RTR_CTRL_3_E2E_AR_HBM2_CH1_CTR_CNT 0x336C64 + +#define mmSIF_RTR_CTRL_3_E2E_AR_HBM3_CH0_CTR_CNT 0x336C68 + +#define mmSIF_RTR_CTRL_3_E2E_AR_HBM3_CH1_CTR_CNT 0x336C6C + +#define mmSIF_RTR_CTRL_3_E2E_AW_HBM0_CH0_CTR_WRAP 0x336C70 + +#define mmSIF_RTR_CTRL_3_E2E_AW_HBM0_CH1_CTR_WRAP 0x336C74 + +#define mmSIF_RTR_CTRL_3_E2E_AW_HBM1_CH0_CTR_WRAP 0x336C78 + +#define mmSIF_RTR_CTRL_3_E2E_AW_HBM1_CH1_CTR_WRAP 0x336C7C + +#define mmSIF_RTR_CTRL_3_E2E_AW_HBM2_CH0_CTR_WRAP 0x336C80 + +#define mmSIF_RTR_CTRL_3_E2E_AW_HBM2_CH1_CTR_WRAP 0x336C84 + +#define mmSIF_RTR_CTRL_3_E2E_AW_HBM3_CH0_CTR_WRAP 0x336C88 + +#define mmSIF_RTR_CTRL_3_E2E_AW_HBM3_CH1_CTR_WRAP 0x336C8C + +#define mmSIF_RTR_CTRL_3_E2E_AW_HBM0_CH0_CTR_CNT 0x336C90 + +#define mmSIF_RTR_CTRL_3_E2E_AW_HBM0_CH1_CTR_CNT 0x336C94 + +#define mmSIF_RTR_CTRL_3_E2E_AW_HBM1_CH0_CTR_CNT 0x336C98 + +#define mmSIF_RTR_CTRL_3_E2E_AW_HBM1_CH1_CTR_CNT 0x336C9C + +#define mmSIF_RTR_CTRL_3_E2E_AW_HBM2_CH0_CTR_CNT 0x336CA0 + +#define mmSIF_RTR_CTRL_3_E2E_AW_HBM2_CH1_CTR_CNT 0x336CA4 + +#define mmSIF_RTR_CTRL_3_E2E_AW_HBM3_CH0_CTR_CNT 0x336CA8 + +#define mmSIF_RTR_CTRL_3_E2E_AW_HBM3_CH1_CTR_CNT 0x336CAC + +#define mmSIF_RTR_CTRL_3_NL_HBM_PC_SEL_0 0x336CB0 + +#define mmSIF_RTR_CTRL_3_NL_HBM_PC_SEL_1 0x336CB4 + +#define mmSIF_RTR_CTRL_3_NL_HBM_PC_SEL_2 0x336CB8 + +#define mmSIF_RTR_CTRL_3_NL_HBM_PC_SEL_3 0x336CBC + +#endif /* ASIC_REG_SIF_RTR_CTRL_3_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_4_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_4_regs.h new file mode 100644 index 000000000..ad48773c4 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_4_regs.h @@ -0,0 +1,896 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_SIF_RTR_CTRL_4_REGS_H_ +#define ASIC_REG_SIF_RTR_CTRL_4_REGS_H_ + +/* + ***************************************** + * SIF_RTR_CTRL_4 (Prototype: RTR_CTRL) + ***************************************** + */ + +#define mmSIF_RTR_CTRL_4_PERM_SEL 0x346108 + +#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_0 0x346114 + +#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_1 0x346118 + +#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_2 0x34611C + +#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_3 0x346120 + +#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_4 0x346124 + +#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_5 0x346128 + +#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_6 0x34612C + +#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_7 0x346130 + +#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_8 0x346134 + +#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_9 0x346138 + +#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_10 0x34613C + +#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_11 0x346140 + +#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_12 0x346144 + +#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_13 0x346148 + +#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_14 0x34614C + +#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_15 0x346150 + +#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_16 0x346154 + +#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_17 0x346158 + +#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_18 0x34615C + +#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_19 0x346160 + +#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_20 0x346164 + +#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_21 0x346168 + +#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_22 0x34616C + +#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_23 0x346170 + +#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_24 0x346174 + +#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_25 0x346178 + +#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_26 0x34617C + +#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_27 0x346180 + +#define mmSIF_RTR_CTRL_4_SRAM_POLY_H3_0 0x346184 + +#define mmSIF_RTR_CTRL_4_SRAM_POLY_H3_1 0x346188 + +#define mmSIF_RTR_CTRL_4_SRAM_POLY_H3_2 0x34618C + +#define mmSIF_RTR_CTRL_4_SRAM_POLY_H3_3 0x346190 + +#define mmSIF_RTR_CTRL_4_SRAM_POLY_H3_4 0x346194 + +#define mmSIF_RTR_CTRL_4_SRAM_POLY_H3_5 0x346198 + +#define mmSIF_RTR_CTRL_4_SRAM_POLY_H3_6 0x34619C + +#define mmSIF_RTR_CTRL_4_SRAM_POLY_H3_7 0x3461A0 + +#define mmSIF_RTR_CTRL_4_SRAM_POLY_H3_8 0x3461A4 + +#define mmSIF_RTR_CTRL_4_SRAM_POLY_H3_9 0x3461A8 + +#define mmSIF_RTR_CTRL_4_SRAM_POLY_H3_10 0x3461AC + +#define mmSIF_RTR_CTRL_4_SRAM_POLY_H3_11 0x3461B0 + +#define mmSIF_RTR_CTRL_4_SRAM_POLY_H3_12 0x3461B4 + +#define mmSIF_RTR_CTRL_4_SRAM_POLY_H3_13 0x3461B8 + +#define mmSIF_RTR_CTRL_4_SRAM_POLY_H3_14 0x3461BC + +#define mmSIF_RTR_CTRL_4_SCRAM_SRAM_EN 0x34626C + +#define mmSIF_RTR_CTRL_4_RL_HBM_EN 0x346274 + +#define mmSIF_RTR_CTRL_4_RL_HBM_SAT 0x346278 + +#define mmSIF_RTR_CTRL_4_RL_HBM_RST 0x34627C + +#define mmSIF_RTR_CTRL_4_RL_HBM_TIMEOUT 0x346280 + +#define mmSIF_RTR_CTRL_4_SCRAM_HBM_EN 0x346284 + +#define mmSIF_RTR_CTRL_4_RL_PCI_EN 0x346288 + +#define mmSIF_RTR_CTRL_4_RL_PCI_SAT 0x34628C + +#define mmSIF_RTR_CTRL_4_RL_PCI_RST 0x346290 + +#define mmSIF_RTR_CTRL_4_RL_PCI_TIMEOUT 0x346294 + +#define mmSIF_RTR_CTRL_4_RL_SRAM_EN 0x34629C + +#define mmSIF_RTR_CTRL_4_RL_SRAM_SAT 0x3462A0 + +#define mmSIF_RTR_CTRL_4_RL_SRAM_RST 0x3462A4 + +#define mmSIF_RTR_CTRL_4_RL_SRAM_TIMEOUT 0x3462AC + +#define mmSIF_RTR_CTRL_4_RL_SRAM_RED 0x3462B4 + +#define mmSIF_RTR_CTRL_4_E2E_HBM_EN 0x3462EC + +#define mmSIF_RTR_CTRL_4_E2E_PCI_EN 0x3462F0 + +#define mmSIF_RTR_CTRL_4_E2E_HBM_WR_SIZE 0x3462F4 + +#define mmSIF_RTR_CTRL_4_E2E_PCI_WR_SIZE 0x3462F8 + +#define mmSIF_RTR_CTRL_4_E2E_AW_PCI_CTR_SET_EN 0x346404 + +#define mmSIF_RTR_CTRL_4_E2E_AW_PCI_CTR_SET 0x346408 + +#define mmSIF_RTR_CTRL_4_E2E_AW_PCI_CTR_WRAP 0x34640C + +#define mmSIF_RTR_CTRL_4_E2E_AW_PCI_CTR_CNT 0x346410 + +#define mmSIF_RTR_CTRL_4_E2E_AW_HBM_CTR_SET_EN 0x346414 + +#define mmSIF_RTR_CTRL_4_E2E_AW_HBM_CTR_SET 0x346418 + +#define mmSIF_RTR_CTRL_4_E2E_HBM_RD_SIZE 0x34641C + +#define mmSIF_RTR_CTRL_4_E2E_PCI_RD_SIZE 0x346420 + +#define mmSIF_RTR_CTRL_4_E2E_AR_PCI_CTR_SET_EN 0x346424 + +#define mmSIF_RTR_CTRL_4_E2E_AR_PCI_CTR_SET 0x346428 + +#define mmSIF_RTR_CTRL_4_E2E_AR_PCI_CTR_WRAP 0x34642C + +#define mmSIF_RTR_CTRL_4_E2E_AR_PCI_CTR_CNT 0x346430 + +#define mmSIF_RTR_CTRL_4_E2E_AR_HBM_CTR_SET_EN 0x346434 + +#define mmSIF_RTR_CTRL_4_E2E_AR_HBM_CTR_SET 0x346438 + +#define mmSIF_RTR_CTRL_4_NL_HBM_SEL_0 0x346450 + +#define mmSIF_RTR_CTRL_4_NL_HBM_SEL_1 0x346454 + +#define mmSIF_RTR_CTRL_4_NON_LIN_EN 0x346480 + +#define mmSIF_RTR_CTRL_4_NL_SRAM_BANK_0 0x346500 + +#define mmSIF_RTR_CTRL_4_NL_SRAM_BANK_1 0x346504 + +#define mmSIF_RTR_CTRL_4_NL_SRAM_BANK_2 0x346508 + +#define mmSIF_RTR_CTRL_4_NL_SRAM_BANK_3 0x34650C + +#define mmSIF_RTR_CTRL_4_NL_SRAM_BANK_4 0x346510 + +#define mmSIF_RTR_CTRL_4_NL_SRAM_OFFSET_0 0x346514 + +#define mmSIF_RTR_CTRL_4_NL_SRAM_OFFSET_1 0x346520 + +#define mmSIF_RTR_CTRL_4_NL_SRAM_OFFSET_2 0x346524 + +#define mmSIF_RTR_CTRL_4_NL_SRAM_OFFSET_3 0x346528 + +#define mmSIF_RTR_CTRL_4_NL_SRAM_OFFSET_4 0x34652C + +#define mmSIF_RTR_CTRL_4_NL_SRAM_OFFSET_5 0x346530 + +#define mmSIF_RTR_CTRL_4_NL_SRAM_OFFSET_6 0x346534 + +#define mmSIF_RTR_CTRL_4_NL_SRAM_OFFSET_7 0x346538 + +#define mmSIF_RTR_CTRL_4_NL_SRAM_OFFSET_8 0x34653C + +#define mmSIF_RTR_CTRL_4_NL_SRAM_OFFSET_9 0x346540 + +#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_0 0x346550 + +#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_1 0x346554 + +#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_2 0x346558 + +#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_3 0x34655C + +#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_4 0x346560 + +#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_5 0x346564 + +#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_6 0x346568 + +#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_7 0x34656C + +#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_8 0x346570 + +#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_9 0x346574 + +#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_10 0x346578 + +#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_11 0x34657C + +#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_12 0x346580 + +#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_13 0x346584 + +#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_14 0x346588 + +#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_15 0x34658C + +#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_16 0x346590 + +#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_17 0x346594 + +#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_18 0x346598 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_0 0x3465E4 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_1 0x3465E8 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_2 0x3465EC + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_3 0x3465F0 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_4 0x3465F4 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_5 0x3465F8 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_6 0x3465FC + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_7 0x346600 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_8 0x346604 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_9 0x346608 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_10 0x34660C + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_11 0x346610 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_12 0x346614 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_13 0x346618 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_14 0x34661C + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_15 0x346620 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_0 0x346624 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_1 0x346628 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_2 0x34662C + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_3 0x346630 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_4 0x346634 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_5 0x346638 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_6 0x34663C + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_7 0x346640 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_8 0x346644 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_9 0x346648 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_10 0x34664C + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_11 0x346650 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_12 0x346654 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_13 0x346658 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_14 0x34665C + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_15 0x346660 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_0 0x346664 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_1 0x346668 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_2 0x34666C + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_3 0x346670 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_4 0x346674 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_5 0x346678 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_6 0x34667C + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_7 0x346680 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_8 0x346684 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_9 0x346688 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_10 0x34668C + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_11 0x346690 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_12 0x346694 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_13 0x346698 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_14 0x34669C + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_15 0x3466A0 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_0 0x3466A4 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_1 0x3466A8 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_2 0x3466AC + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_3 0x3466B0 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_4 0x3466B4 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_5 0x3466B8 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_6 0x3466BC + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_7 0x3466C0 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_8 0x3466C4 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_9 0x3466C8 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_10 0x3466CC + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_11 0x3466D0 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_12 0x3466D4 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_13 0x3466D8 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_14 0x3466DC + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_15 0x3466E0 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_0 0x3466E4 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_1 0x3466E8 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_2 0x3466EC + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_3 0x3466F0 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_4 0x3466F4 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_5 0x3466F8 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_6 0x3466FC + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_7 0x346700 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_8 0x346704 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_9 0x346708 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_10 0x34670C + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_11 0x346710 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_12 0x346714 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_13 0x346718 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_14 0x34671C + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_15 0x346720 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_0 0x346724 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_1 0x346728 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_2 0x34672C + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_3 0x346730 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_4 0x346734 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_5 0x346738 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_6 0x34673C + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_7 0x346740 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_8 0x346744 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_9 0x346748 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_10 0x34674C + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_11 0x346750 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_12 0x346754 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_13 0x346758 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_14 0x34675C + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_15 0x346760 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_0 0x346764 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_1 0x346768 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_2 0x34676C + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_3 0x346770 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_4 0x346774 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_5 0x346778 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_6 0x34677C + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_7 0x346780 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_8 0x346784 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_9 0x346788 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_10 0x34678C + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_11 0x346790 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_12 0x346794 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_13 0x346798 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_14 0x34679C + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_15 0x3467A0 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_0 0x3467A4 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_1 0x3467A8 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_2 0x3467AC + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_3 0x3467B0 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_4 0x3467B4 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_5 0x3467B8 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_6 0x3467BC + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_7 0x3467C0 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_8 0x3467C4 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_9 0x3467C8 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_10 0x3467CC + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_11 0x3467D0 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_12 0x3467D4 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_13 0x3467D8 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_14 0x3467DC + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_15 0x3467E0 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_0 0x346824 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_1 0x346828 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_2 0x34682C + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_3 0x346830 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_4 0x346834 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_5 0x346838 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_6 0x34683C + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_7 0x346840 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_8 0x346844 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_9 0x346848 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_10 0x34684C + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_11 0x346850 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_12 0x346854 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_13 0x346858 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_14 0x34685C + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_15 0x346860 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_0 0x346864 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_1 0x346868 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_2 0x34686C + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_3 0x346870 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_4 0x346874 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_5 0x346878 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_6 0x34687C + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_7 0x346880 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_8 0x346884 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_9 0x346888 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_10 0x34688C + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_11 0x346890 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_12 0x346894 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_13 0x346898 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_14 0x34689C + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_15 0x3468A0 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_0 0x3468A4 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_1 0x3468A8 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_2 0x3468AC + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_3 0x3468B0 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_4 0x3468B4 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_5 0x3468B8 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_6 0x3468BC + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_7 0x3468C0 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_8 0x3468C4 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_9 0x3468C8 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_10 0x3468CC + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_11 0x3468D0 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_12 0x3468D4 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_13 0x3468D8 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_14 0x3468DC + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_15 0x3468E0 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_0 0x3468E4 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_1 0x3468E8 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_2 0x3468EC + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_3 0x3468F0 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_4 0x3468F4 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_5 0x3468F8 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_6 0x3468FC + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_7 0x346900 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_8 0x346904 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_9 0x346908 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_10 0x34690C + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_11 0x346910 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_12 0x346914 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_13 0x346918 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_14 0x34691C + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_15 0x346920 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_0 0x346924 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_1 0x346928 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_2 0x34692C + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_3 0x346930 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_4 0x346934 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_5 0x346938 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_6 0x34693C + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_7 0x346940 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_8 0x346944 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_9 0x346948 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_10 0x34694C + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_11 0x346950 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_12 0x346954 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_13 0x346958 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_14 0x34695C + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_15 0x346960 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_0 0x346964 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_1 0x346968 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_2 0x34696C + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_3 0x346970 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_4 0x346974 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_5 0x346978 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_6 0x34697C + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_7 0x346980 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_8 0x346984 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_9 0x346988 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_10 0x34698C + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_11 0x346990 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_12 0x346994 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_13 0x346998 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_14 0x34699C + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_15 0x3469A0 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_0 0x3469A4 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_1 0x3469A8 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_2 0x3469AC + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_3 0x3469B0 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_4 0x3469B4 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_5 0x3469B8 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_6 0x3469BC + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_7 0x3469C0 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_8 0x3469C4 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_9 0x3469C8 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_10 0x3469CC + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_11 0x3469D0 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_12 0x3469D4 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_13 0x3469D8 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_14 0x3469DC + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_15 0x3469E0 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_0 0x3469E4 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_1 0x3469E8 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_2 0x3469EC + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_3 0x3469F0 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_4 0x3469F4 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_5 0x3469F8 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_6 0x3469FC + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_7 0x346A00 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_8 0x346A04 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_9 0x346A08 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_10 0x346A0C + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_11 0x346A10 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_12 0x346A14 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_13 0x346A18 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_14 0x346A1C + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_15 0x346A20 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_HIT_AW 0x346A64 + +#define mmSIF_RTR_CTRL_4_RANGE_SEC_HIT_AR 0x346A68 + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_HIT_AW 0x346A6C + +#define mmSIF_RTR_CTRL_4_RANGE_PRIV_HIT_AR 0x346A70 + +#define mmSIF_RTR_CTRL_4_RGL_CFG 0x346B64 + +#define mmSIF_RTR_CTRL_4_RGL_SHIFT 0x346B68 + +#define mmSIF_RTR_CTRL_4_RGL_EXPECTED_LAT_0 0x346B6C + +#define mmSIF_RTR_CTRL_4_RGL_EXPECTED_LAT_1 0x346B70 + +#define mmSIF_RTR_CTRL_4_RGL_EXPECTED_LAT_2 0x346B74 + +#define mmSIF_RTR_CTRL_4_RGL_EXPECTED_LAT_3 0x346B78 + +#define mmSIF_RTR_CTRL_4_RGL_EXPECTED_LAT_4 0x346B7C + +#define mmSIF_RTR_CTRL_4_RGL_EXPECTED_LAT_5 0x346B80 + +#define mmSIF_RTR_CTRL_4_RGL_EXPECTED_LAT_6 0x346B84 + +#define mmSIF_RTR_CTRL_4_RGL_EXPECTED_LAT_7 0x346B88 + +#define mmSIF_RTR_CTRL_4_RGL_TOKEN_0 0x346BAC + +#define mmSIF_RTR_CTRL_4_RGL_TOKEN_1 0x346BB0 + +#define mmSIF_RTR_CTRL_4_RGL_TOKEN_2 0x346BB4 + +#define mmSIF_RTR_CTRL_4_RGL_TOKEN_3 0x346BB8 + +#define mmSIF_RTR_CTRL_4_RGL_TOKEN_4 0x346BBC + +#define mmSIF_RTR_CTRL_4_RGL_TOKEN_5 0x346BC0 + +#define mmSIF_RTR_CTRL_4_RGL_TOKEN_6 0x346BC4 + +#define mmSIF_RTR_CTRL_4_RGL_TOKEN_7 0x346BC8 + +#define mmSIF_RTR_CTRL_4_RGL_BANK_ID_0 0x346BEC + +#define mmSIF_RTR_CTRL_4_RGL_BANK_ID_1 0x346BF0 + +#define mmSIF_RTR_CTRL_4_RGL_BANK_ID_2 0x346BF4 + +#define mmSIF_RTR_CTRL_4_RGL_BANK_ID_3 0x346BF8 + +#define mmSIF_RTR_CTRL_4_RGL_BANK_ID_4 0x346BFC + +#define mmSIF_RTR_CTRL_4_RGL_BANK_ID_5 0x346C00 + +#define mmSIF_RTR_CTRL_4_RGL_BANK_ID_6 0x346C04 + +#define mmSIF_RTR_CTRL_4_RGL_BANK_ID_7 0x346C08 + +#define mmSIF_RTR_CTRL_4_RGL_WDT 0x346C2C + +#define mmSIF_RTR_CTRL_4_E2E_AR_HBM0_CH0_CTR_WRAP 0x346C30 + +#define mmSIF_RTR_CTRL_4_E2E_AR_HBM0_CH1_CTR_WRAP 0x346C34 + +#define mmSIF_RTR_CTRL_4_E2E_AR_HBM1_CH0_CTR_WRAP 0x346C38 + +#define mmSIF_RTR_CTRL_4_E2E_AR_HBM1_CH1_CTR_WRAP 0x346C3C + +#define mmSIF_RTR_CTRL_4_E2E_AR_HBM2_CH0_CTR_WRAP 0x346C40 + +#define mmSIF_RTR_CTRL_4_E2E_AR_HBM2_CH1_CTR_WRAP 0x346C44 + +#define mmSIF_RTR_CTRL_4_E2E_AR_HBM3_CH0_CTR_WRAP 0x346C48 + +#define mmSIF_RTR_CTRL_4_E2E_AR_HBM3_CH1_CTR_WRAP 0x346C4C + +#define mmSIF_RTR_CTRL_4_E2E_AR_HBM0_CH0_CTR_CNT 0x346C50 + +#define mmSIF_RTR_CTRL_4_E2E_AR_HBM0_CH1_CTR_CNT 0x346C54 + +#define mmSIF_RTR_CTRL_4_E2E_AR_HBM1_CH0_CTR_CNT 0x346C58 + +#define mmSIF_RTR_CTRL_4_E2E_AR_HBM1_CH1_CTR_CNT 0x346C5C + +#define mmSIF_RTR_CTRL_4_E2E_AR_HBM2_CH0_CTR_CNT 0x346C60 + +#define mmSIF_RTR_CTRL_4_E2E_AR_HBM2_CH1_CTR_CNT 0x346C64 + +#define mmSIF_RTR_CTRL_4_E2E_AR_HBM3_CH0_CTR_CNT 0x346C68 + +#define mmSIF_RTR_CTRL_4_E2E_AR_HBM3_CH1_CTR_CNT 0x346C6C + +#define mmSIF_RTR_CTRL_4_E2E_AW_HBM0_CH0_CTR_WRAP 0x346C70 + +#define mmSIF_RTR_CTRL_4_E2E_AW_HBM0_CH1_CTR_WRAP 0x346C74 + +#define mmSIF_RTR_CTRL_4_E2E_AW_HBM1_CH0_CTR_WRAP 0x346C78 + +#define mmSIF_RTR_CTRL_4_E2E_AW_HBM1_CH1_CTR_WRAP 0x346C7C + +#define mmSIF_RTR_CTRL_4_E2E_AW_HBM2_CH0_CTR_WRAP 0x346C80 + +#define mmSIF_RTR_CTRL_4_E2E_AW_HBM2_CH1_CTR_WRAP 0x346C84 + +#define mmSIF_RTR_CTRL_4_E2E_AW_HBM3_CH0_CTR_WRAP 0x346C88 + +#define mmSIF_RTR_CTRL_4_E2E_AW_HBM3_CH1_CTR_WRAP 0x346C8C + +#define mmSIF_RTR_CTRL_4_E2E_AW_HBM0_CH0_CTR_CNT 0x346C90 + +#define mmSIF_RTR_CTRL_4_E2E_AW_HBM0_CH1_CTR_CNT 0x346C94 + +#define mmSIF_RTR_CTRL_4_E2E_AW_HBM1_CH0_CTR_CNT 0x346C98 + +#define mmSIF_RTR_CTRL_4_E2E_AW_HBM1_CH1_CTR_CNT 0x346C9C + +#define mmSIF_RTR_CTRL_4_E2E_AW_HBM2_CH0_CTR_CNT 0x346CA0 + +#define mmSIF_RTR_CTRL_4_E2E_AW_HBM2_CH1_CTR_CNT 0x346CA4 + +#define mmSIF_RTR_CTRL_4_E2E_AW_HBM3_CH0_CTR_CNT 0x346CA8 + +#define mmSIF_RTR_CTRL_4_E2E_AW_HBM3_CH1_CTR_CNT 0x346CAC + +#define mmSIF_RTR_CTRL_4_NL_HBM_PC_SEL_0 0x346CB0 + +#define mmSIF_RTR_CTRL_4_NL_HBM_PC_SEL_1 0x346CB4 + +#define mmSIF_RTR_CTRL_4_NL_HBM_PC_SEL_2 0x346CB8 + +#define mmSIF_RTR_CTRL_4_NL_HBM_PC_SEL_3 0x346CBC + +#endif /* ASIC_REG_SIF_RTR_CTRL_4_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_5_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_5_regs.h new file mode 100644 index 000000000..6c27850ca --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_5_regs.h @@ -0,0 +1,896 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_SIF_RTR_CTRL_5_REGS_H_ +#define ASIC_REG_SIF_RTR_CTRL_5_REGS_H_ + +/* + ***************************************** + * SIF_RTR_CTRL_5 (Prototype: RTR_CTRL) + ***************************************** + */ + +#define mmSIF_RTR_CTRL_5_PERM_SEL 0x356108 + +#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_0 0x356114 + +#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_1 0x356118 + +#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_2 0x35611C + +#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_3 0x356120 + +#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_4 0x356124 + +#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_5 0x356128 + +#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_6 0x35612C + +#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_7 0x356130 + +#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_8 0x356134 + +#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_9 0x356138 + +#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_10 0x35613C + +#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_11 0x356140 + +#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_12 0x356144 + +#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_13 0x356148 + +#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_14 0x35614C + +#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_15 0x356150 + +#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_16 0x356154 + +#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_17 0x356158 + +#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_18 0x35615C + +#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_19 0x356160 + +#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_20 0x356164 + +#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_21 0x356168 + +#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_22 0x35616C + +#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_23 0x356170 + +#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_24 0x356174 + +#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_25 0x356178 + +#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_26 0x35617C + +#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_27 0x356180 + +#define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_0 0x356184 + +#define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_1 0x356188 + +#define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_2 0x35618C + +#define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_3 0x356190 + +#define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_4 0x356194 + +#define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_5 0x356198 + +#define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_6 0x35619C + +#define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_7 0x3561A0 + +#define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_8 0x3561A4 + +#define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_9 0x3561A8 + +#define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_10 0x3561AC + +#define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_11 0x3561B0 + +#define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_12 0x3561B4 + +#define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_13 0x3561B8 + +#define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_14 0x3561BC + +#define mmSIF_RTR_CTRL_5_SCRAM_SRAM_EN 0x35626C + +#define mmSIF_RTR_CTRL_5_RL_HBM_EN 0x356274 + +#define mmSIF_RTR_CTRL_5_RL_HBM_SAT 0x356278 + +#define mmSIF_RTR_CTRL_5_RL_HBM_RST 0x35627C + +#define mmSIF_RTR_CTRL_5_RL_HBM_TIMEOUT 0x356280 + +#define mmSIF_RTR_CTRL_5_SCRAM_HBM_EN 0x356284 + +#define mmSIF_RTR_CTRL_5_RL_PCI_EN 0x356288 + +#define mmSIF_RTR_CTRL_5_RL_PCI_SAT 0x35628C + +#define mmSIF_RTR_CTRL_5_RL_PCI_RST 0x356290 + +#define mmSIF_RTR_CTRL_5_RL_PCI_TIMEOUT 0x356294 + +#define mmSIF_RTR_CTRL_5_RL_SRAM_EN 0x35629C + +#define mmSIF_RTR_CTRL_5_RL_SRAM_SAT 0x3562A0 + +#define mmSIF_RTR_CTRL_5_RL_SRAM_RST 0x3562A4 + +#define mmSIF_RTR_CTRL_5_RL_SRAM_TIMEOUT 0x3562AC + +#define mmSIF_RTR_CTRL_5_RL_SRAM_RED 0x3562B4 + +#define mmSIF_RTR_CTRL_5_E2E_HBM_EN 0x3562EC + +#define mmSIF_RTR_CTRL_5_E2E_PCI_EN 0x3562F0 + +#define mmSIF_RTR_CTRL_5_E2E_HBM_WR_SIZE 0x3562F4 + +#define mmSIF_RTR_CTRL_5_E2E_PCI_WR_SIZE 0x3562F8 + +#define mmSIF_RTR_CTRL_5_E2E_AW_PCI_CTR_SET_EN 0x356404 + +#define mmSIF_RTR_CTRL_5_E2E_AW_PCI_CTR_SET 0x356408 + +#define mmSIF_RTR_CTRL_5_E2E_AW_PCI_CTR_WRAP 0x35640C + +#define mmSIF_RTR_CTRL_5_E2E_AW_PCI_CTR_CNT 0x356410 + +#define mmSIF_RTR_CTRL_5_E2E_AW_HBM_CTR_SET_EN 0x356414 + +#define mmSIF_RTR_CTRL_5_E2E_AW_HBM_CTR_SET 0x356418 + +#define mmSIF_RTR_CTRL_5_E2E_HBM_RD_SIZE 0x35641C + +#define mmSIF_RTR_CTRL_5_E2E_PCI_RD_SIZE 0x356420 + +#define mmSIF_RTR_CTRL_5_E2E_AR_PCI_CTR_SET_EN 0x356424 + +#define mmSIF_RTR_CTRL_5_E2E_AR_PCI_CTR_SET 0x356428 + +#define mmSIF_RTR_CTRL_5_E2E_AR_PCI_CTR_WRAP 0x35642C + +#define mmSIF_RTR_CTRL_5_E2E_AR_PCI_CTR_CNT 0x356430 + +#define mmSIF_RTR_CTRL_5_E2E_AR_HBM_CTR_SET_EN 0x356434 + +#define mmSIF_RTR_CTRL_5_E2E_AR_HBM_CTR_SET 0x356438 + +#define mmSIF_RTR_CTRL_5_NL_HBM_SEL_0 0x356450 + +#define mmSIF_RTR_CTRL_5_NL_HBM_SEL_1 0x356454 + +#define mmSIF_RTR_CTRL_5_NON_LIN_EN 0x356480 + +#define mmSIF_RTR_CTRL_5_NL_SRAM_BANK_0 0x356500 + +#define mmSIF_RTR_CTRL_5_NL_SRAM_BANK_1 0x356504 + +#define mmSIF_RTR_CTRL_5_NL_SRAM_BANK_2 0x356508 + +#define mmSIF_RTR_CTRL_5_NL_SRAM_BANK_3 0x35650C + +#define mmSIF_RTR_CTRL_5_NL_SRAM_BANK_4 0x356510 + +#define mmSIF_RTR_CTRL_5_NL_SRAM_OFFSET_0 0x356514 + +#define mmSIF_RTR_CTRL_5_NL_SRAM_OFFSET_1 0x356520 + +#define mmSIF_RTR_CTRL_5_NL_SRAM_OFFSET_2 0x356524 + +#define mmSIF_RTR_CTRL_5_NL_SRAM_OFFSET_3 0x356528 + +#define mmSIF_RTR_CTRL_5_NL_SRAM_OFFSET_4 0x35652C + +#define mmSIF_RTR_CTRL_5_NL_SRAM_OFFSET_5 0x356530 + +#define mmSIF_RTR_CTRL_5_NL_SRAM_OFFSET_6 0x356534 + +#define mmSIF_RTR_CTRL_5_NL_SRAM_OFFSET_7 0x356538 + +#define mmSIF_RTR_CTRL_5_NL_SRAM_OFFSET_8 0x35653C + +#define mmSIF_RTR_CTRL_5_NL_SRAM_OFFSET_9 0x356540 + +#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_0 0x356550 + +#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_1 0x356554 + +#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_2 0x356558 + +#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_3 0x35655C + +#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_4 0x356560 + +#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_5 0x356564 + +#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_6 0x356568 + +#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_7 0x35656C + +#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_8 0x356570 + +#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_9 0x356574 + +#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_10 0x356578 + +#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_11 0x35657C + +#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_12 0x356580 + +#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_13 0x356584 + +#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_14 0x356588 + +#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_15 0x35658C + +#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_16 0x356590 + +#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_17 0x356594 + +#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_18 0x356598 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_0 0x3565E4 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_1 0x3565E8 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_2 0x3565EC + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_3 0x3565F0 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_4 0x3565F4 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_5 0x3565F8 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_6 0x3565FC + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_7 0x356600 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_8 0x356604 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_9 0x356608 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_10 0x35660C + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_11 0x356610 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_12 0x356614 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_13 0x356618 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_14 0x35661C + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_15 0x356620 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_0 0x356624 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_1 0x356628 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_2 0x35662C + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_3 0x356630 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_4 0x356634 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_5 0x356638 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_6 0x35663C + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_7 0x356640 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_8 0x356644 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_9 0x356648 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_10 0x35664C + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_11 0x356650 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_12 0x356654 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_13 0x356658 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_14 0x35665C + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_15 0x356660 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_0 0x356664 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_1 0x356668 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_2 0x35666C + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_3 0x356670 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_4 0x356674 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_5 0x356678 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_6 0x35667C + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_7 0x356680 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_8 0x356684 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_9 0x356688 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_10 0x35668C + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_11 0x356690 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_12 0x356694 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_13 0x356698 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_14 0x35669C + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_15 0x3566A0 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_0 0x3566A4 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_1 0x3566A8 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_2 0x3566AC + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_3 0x3566B0 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_4 0x3566B4 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_5 0x3566B8 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_6 0x3566BC + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_7 0x3566C0 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_8 0x3566C4 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_9 0x3566C8 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_10 0x3566CC + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_11 0x3566D0 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_12 0x3566D4 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_13 0x3566D8 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_14 0x3566DC + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_15 0x3566E0 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_0 0x3566E4 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_1 0x3566E8 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_2 0x3566EC + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_3 0x3566F0 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_4 0x3566F4 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_5 0x3566F8 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_6 0x3566FC + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_7 0x356700 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_8 0x356704 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_9 0x356708 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_10 0x35670C + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_11 0x356710 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_12 0x356714 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_13 0x356718 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_14 0x35671C + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_15 0x356720 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_0 0x356724 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_1 0x356728 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_2 0x35672C + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_3 0x356730 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_4 0x356734 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_5 0x356738 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_6 0x35673C + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_7 0x356740 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_8 0x356744 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_9 0x356748 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_10 0x35674C + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_11 0x356750 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_12 0x356754 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_13 0x356758 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_14 0x35675C + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_15 0x356760 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_0 0x356764 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_1 0x356768 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_2 0x35676C + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_3 0x356770 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_4 0x356774 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_5 0x356778 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_6 0x35677C + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_7 0x356780 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_8 0x356784 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_9 0x356788 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_10 0x35678C + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_11 0x356790 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_12 0x356794 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_13 0x356798 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_14 0x35679C + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_15 0x3567A0 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_0 0x3567A4 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_1 0x3567A8 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_2 0x3567AC + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_3 0x3567B0 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_4 0x3567B4 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_5 0x3567B8 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_6 0x3567BC + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_7 0x3567C0 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_8 0x3567C4 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_9 0x3567C8 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_10 0x3567CC + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_11 0x3567D0 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_12 0x3567D4 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_13 0x3567D8 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_14 0x3567DC + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_15 0x3567E0 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_0 0x356824 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_1 0x356828 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_2 0x35682C + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_3 0x356830 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_4 0x356834 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_5 0x356838 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_6 0x35683C + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_7 0x356840 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_8 0x356844 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_9 0x356848 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_10 0x35684C + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_11 0x356850 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_12 0x356854 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_13 0x356858 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_14 0x35685C + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_15 0x356860 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_0 0x356864 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_1 0x356868 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_2 0x35686C + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_3 0x356870 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_4 0x356874 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_5 0x356878 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_6 0x35687C + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_7 0x356880 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_8 0x356884 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_9 0x356888 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_10 0x35688C + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_11 0x356890 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_12 0x356894 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_13 0x356898 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_14 0x35689C + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_15 0x3568A0 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_0 0x3568A4 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_1 0x3568A8 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_2 0x3568AC + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_3 0x3568B0 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_4 0x3568B4 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_5 0x3568B8 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_6 0x3568BC + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_7 0x3568C0 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_8 0x3568C4 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_9 0x3568C8 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_10 0x3568CC + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_11 0x3568D0 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_12 0x3568D4 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_13 0x3568D8 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_14 0x3568DC + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_15 0x3568E0 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_0 0x3568E4 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_1 0x3568E8 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_2 0x3568EC + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_3 0x3568F0 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_4 0x3568F4 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_5 0x3568F8 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_6 0x3568FC + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_7 0x356900 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_8 0x356904 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_9 0x356908 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_10 0x35690C + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_11 0x356910 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_12 0x356914 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_13 0x356918 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_14 0x35691C + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_15 0x356920 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_0 0x356924 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_1 0x356928 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_2 0x35692C + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_3 0x356930 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_4 0x356934 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_5 0x356938 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_6 0x35693C + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_7 0x356940 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_8 0x356944 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_9 0x356948 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_10 0x35694C + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_11 0x356950 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_12 0x356954 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_13 0x356958 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_14 0x35695C + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_15 0x356960 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_0 0x356964 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_1 0x356968 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_2 0x35696C + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_3 0x356970 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_4 0x356974 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_5 0x356978 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_6 0x35697C + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_7 0x356980 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_8 0x356984 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_9 0x356988 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_10 0x35698C + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_11 0x356990 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_12 0x356994 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_13 0x356998 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_14 0x35699C + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_15 0x3569A0 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_0 0x3569A4 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_1 0x3569A8 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_2 0x3569AC + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_3 0x3569B0 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_4 0x3569B4 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_5 0x3569B8 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_6 0x3569BC + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_7 0x3569C0 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_8 0x3569C4 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_9 0x3569C8 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_10 0x3569CC + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_11 0x3569D0 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_12 0x3569D4 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_13 0x3569D8 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_14 0x3569DC + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_15 0x3569E0 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_0 0x3569E4 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_1 0x3569E8 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_2 0x3569EC + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_3 0x3569F0 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_4 0x3569F4 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_5 0x3569F8 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_6 0x3569FC + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_7 0x356A00 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_8 0x356A04 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_9 0x356A08 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_10 0x356A0C + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_11 0x356A10 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_12 0x356A14 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_13 0x356A18 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_14 0x356A1C + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_15 0x356A20 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_HIT_AW 0x356A64 + +#define mmSIF_RTR_CTRL_5_RANGE_SEC_HIT_AR 0x356A68 + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_HIT_AW 0x356A6C + +#define mmSIF_RTR_CTRL_5_RANGE_PRIV_HIT_AR 0x356A70 + +#define mmSIF_RTR_CTRL_5_RGL_CFG 0x356B64 + +#define mmSIF_RTR_CTRL_5_RGL_SHIFT 0x356B68 + +#define mmSIF_RTR_CTRL_5_RGL_EXPECTED_LAT_0 0x356B6C + +#define mmSIF_RTR_CTRL_5_RGL_EXPECTED_LAT_1 0x356B70 + +#define mmSIF_RTR_CTRL_5_RGL_EXPECTED_LAT_2 0x356B74 + +#define mmSIF_RTR_CTRL_5_RGL_EXPECTED_LAT_3 0x356B78 + +#define mmSIF_RTR_CTRL_5_RGL_EXPECTED_LAT_4 0x356B7C + +#define mmSIF_RTR_CTRL_5_RGL_EXPECTED_LAT_5 0x356B80 + +#define mmSIF_RTR_CTRL_5_RGL_EXPECTED_LAT_6 0x356B84 + +#define mmSIF_RTR_CTRL_5_RGL_EXPECTED_LAT_7 0x356B88 + +#define mmSIF_RTR_CTRL_5_RGL_TOKEN_0 0x356BAC + +#define mmSIF_RTR_CTRL_5_RGL_TOKEN_1 0x356BB0 + +#define mmSIF_RTR_CTRL_5_RGL_TOKEN_2 0x356BB4 + +#define mmSIF_RTR_CTRL_5_RGL_TOKEN_3 0x356BB8 + +#define mmSIF_RTR_CTRL_5_RGL_TOKEN_4 0x356BBC + +#define mmSIF_RTR_CTRL_5_RGL_TOKEN_5 0x356BC0 + +#define mmSIF_RTR_CTRL_5_RGL_TOKEN_6 0x356BC4 + +#define mmSIF_RTR_CTRL_5_RGL_TOKEN_7 0x356BC8 + +#define mmSIF_RTR_CTRL_5_RGL_BANK_ID_0 0x356BEC + +#define mmSIF_RTR_CTRL_5_RGL_BANK_ID_1 0x356BF0 + +#define mmSIF_RTR_CTRL_5_RGL_BANK_ID_2 0x356BF4 + +#define mmSIF_RTR_CTRL_5_RGL_BANK_ID_3 0x356BF8 + +#define mmSIF_RTR_CTRL_5_RGL_BANK_ID_4 0x356BFC + +#define mmSIF_RTR_CTRL_5_RGL_BANK_ID_5 0x356C00 + +#define mmSIF_RTR_CTRL_5_RGL_BANK_ID_6 0x356C04 + +#define mmSIF_RTR_CTRL_5_RGL_BANK_ID_7 0x356C08 + +#define mmSIF_RTR_CTRL_5_RGL_WDT 0x356C2C + +#define mmSIF_RTR_CTRL_5_E2E_AR_HBM0_CH0_CTR_WRAP 0x356C30 + +#define mmSIF_RTR_CTRL_5_E2E_AR_HBM0_CH1_CTR_WRAP 0x356C34 + +#define mmSIF_RTR_CTRL_5_E2E_AR_HBM1_CH0_CTR_WRAP 0x356C38 + +#define mmSIF_RTR_CTRL_5_E2E_AR_HBM1_CH1_CTR_WRAP 0x356C3C + +#define mmSIF_RTR_CTRL_5_E2E_AR_HBM2_CH0_CTR_WRAP 0x356C40 + +#define mmSIF_RTR_CTRL_5_E2E_AR_HBM2_CH1_CTR_WRAP 0x356C44 + +#define mmSIF_RTR_CTRL_5_E2E_AR_HBM3_CH0_CTR_WRAP 0x356C48 + +#define mmSIF_RTR_CTRL_5_E2E_AR_HBM3_CH1_CTR_WRAP 0x356C4C + +#define mmSIF_RTR_CTRL_5_E2E_AR_HBM0_CH0_CTR_CNT 0x356C50 + +#define mmSIF_RTR_CTRL_5_E2E_AR_HBM0_CH1_CTR_CNT 0x356C54 + +#define mmSIF_RTR_CTRL_5_E2E_AR_HBM1_CH0_CTR_CNT 0x356C58 + +#define mmSIF_RTR_CTRL_5_E2E_AR_HBM1_CH1_CTR_CNT 0x356C5C + +#define mmSIF_RTR_CTRL_5_E2E_AR_HBM2_CH0_CTR_CNT 0x356C60 + +#define mmSIF_RTR_CTRL_5_E2E_AR_HBM2_CH1_CTR_CNT 0x356C64 + +#define mmSIF_RTR_CTRL_5_E2E_AR_HBM3_CH0_CTR_CNT 0x356C68 + +#define mmSIF_RTR_CTRL_5_E2E_AR_HBM3_CH1_CTR_CNT 0x356C6C + +#define mmSIF_RTR_CTRL_5_E2E_AW_HBM0_CH0_CTR_WRAP 0x356C70 + +#define mmSIF_RTR_CTRL_5_E2E_AW_HBM0_CH1_CTR_WRAP 0x356C74 + +#define mmSIF_RTR_CTRL_5_E2E_AW_HBM1_CH0_CTR_WRAP 0x356C78 + +#define mmSIF_RTR_CTRL_5_E2E_AW_HBM1_CH1_CTR_WRAP 0x356C7C + +#define mmSIF_RTR_CTRL_5_E2E_AW_HBM2_CH0_CTR_WRAP 0x356C80 + +#define mmSIF_RTR_CTRL_5_E2E_AW_HBM2_CH1_CTR_WRAP 0x356C84 + +#define mmSIF_RTR_CTRL_5_E2E_AW_HBM3_CH0_CTR_WRAP 0x356C88 + +#define mmSIF_RTR_CTRL_5_E2E_AW_HBM3_CH1_CTR_WRAP 0x356C8C + +#define mmSIF_RTR_CTRL_5_E2E_AW_HBM0_CH0_CTR_CNT 0x356C90 + +#define mmSIF_RTR_CTRL_5_E2E_AW_HBM0_CH1_CTR_CNT 0x356C94 + +#define mmSIF_RTR_CTRL_5_E2E_AW_HBM1_CH0_CTR_CNT 0x356C98 + +#define mmSIF_RTR_CTRL_5_E2E_AW_HBM1_CH1_CTR_CNT 0x356C9C + +#define mmSIF_RTR_CTRL_5_E2E_AW_HBM2_CH0_CTR_CNT 0x356CA0 + +#define mmSIF_RTR_CTRL_5_E2E_AW_HBM2_CH1_CTR_CNT 0x356CA4 + +#define mmSIF_RTR_CTRL_5_E2E_AW_HBM3_CH0_CTR_CNT 0x356CA8 + +#define mmSIF_RTR_CTRL_5_E2E_AW_HBM3_CH1_CTR_CNT 0x356CAC + +#define mmSIF_RTR_CTRL_5_NL_HBM_PC_SEL_0 0x356CB0 + +#define mmSIF_RTR_CTRL_5_NL_HBM_PC_SEL_1 0x356CB4 + +#define mmSIF_RTR_CTRL_5_NL_HBM_PC_SEL_2 0x356CB8 + +#define mmSIF_RTR_CTRL_5_NL_HBM_PC_SEL_3 0x356CBC + +#endif /* ASIC_REG_SIF_RTR_CTRL_5_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_6_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_6_regs.h new file mode 100644 index 000000000..a9ea89aa6 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_6_regs.h @@ -0,0 +1,896 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_SIF_RTR_CTRL_6_REGS_H_ +#define ASIC_REG_SIF_RTR_CTRL_6_REGS_H_ + +/* + ***************************************** + * SIF_RTR_CTRL_6 (Prototype: RTR_CTRL) + ***************************************** + */ + +#define mmSIF_RTR_CTRL_6_PERM_SEL 0x366108 + +#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_0 0x366114 + +#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_1 0x366118 + +#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_2 0x36611C + +#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_3 0x366120 + +#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_4 0x366124 + +#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_5 0x366128 + +#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_6 0x36612C + +#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_7 0x366130 + +#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_8 0x366134 + +#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_9 0x366138 + +#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_10 0x36613C + +#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_11 0x366140 + +#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_12 0x366144 + +#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_13 0x366148 + +#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_14 0x36614C + +#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_15 0x366150 + +#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_16 0x366154 + +#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_17 0x366158 + +#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_18 0x36615C + +#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_19 0x366160 + +#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_20 0x366164 + +#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_21 0x366168 + +#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_22 0x36616C + +#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_23 0x366170 + +#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_24 0x366174 + +#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_25 0x366178 + +#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_26 0x36617C + +#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_27 0x366180 + +#define mmSIF_RTR_CTRL_6_SRAM_POLY_H3_0 0x366184 + +#define mmSIF_RTR_CTRL_6_SRAM_POLY_H3_1 0x366188 + +#define mmSIF_RTR_CTRL_6_SRAM_POLY_H3_2 0x36618C + +#define mmSIF_RTR_CTRL_6_SRAM_POLY_H3_3 0x366190 + +#define mmSIF_RTR_CTRL_6_SRAM_POLY_H3_4 0x366194 + +#define mmSIF_RTR_CTRL_6_SRAM_POLY_H3_5 0x366198 + +#define mmSIF_RTR_CTRL_6_SRAM_POLY_H3_6 0x36619C + +#define mmSIF_RTR_CTRL_6_SRAM_POLY_H3_7 0x3661A0 + +#define mmSIF_RTR_CTRL_6_SRAM_POLY_H3_8 0x3661A4 + +#define mmSIF_RTR_CTRL_6_SRAM_POLY_H3_9 0x3661A8 + +#define mmSIF_RTR_CTRL_6_SRAM_POLY_H3_10 0x3661AC + +#define mmSIF_RTR_CTRL_6_SRAM_POLY_H3_11 0x3661B0 + +#define mmSIF_RTR_CTRL_6_SRAM_POLY_H3_12 0x3661B4 + +#define mmSIF_RTR_CTRL_6_SRAM_POLY_H3_13 0x3661B8 + +#define mmSIF_RTR_CTRL_6_SRAM_POLY_H3_14 0x3661BC + +#define mmSIF_RTR_CTRL_6_SCRAM_SRAM_EN 0x36626C + +#define mmSIF_RTR_CTRL_6_RL_HBM_EN 0x366274 + +#define mmSIF_RTR_CTRL_6_RL_HBM_SAT 0x366278 + +#define mmSIF_RTR_CTRL_6_RL_HBM_RST 0x36627C + +#define mmSIF_RTR_CTRL_6_RL_HBM_TIMEOUT 0x366280 + +#define mmSIF_RTR_CTRL_6_SCRAM_HBM_EN 0x366284 + +#define mmSIF_RTR_CTRL_6_RL_PCI_EN 0x366288 + +#define mmSIF_RTR_CTRL_6_RL_PCI_SAT 0x36628C + +#define mmSIF_RTR_CTRL_6_RL_PCI_RST 0x366290 + +#define mmSIF_RTR_CTRL_6_RL_PCI_TIMEOUT 0x366294 + +#define mmSIF_RTR_CTRL_6_RL_SRAM_EN 0x36629C + +#define mmSIF_RTR_CTRL_6_RL_SRAM_SAT 0x3662A0 + +#define mmSIF_RTR_CTRL_6_RL_SRAM_RST 0x3662A4 + +#define mmSIF_RTR_CTRL_6_RL_SRAM_TIMEOUT 0x3662AC + +#define mmSIF_RTR_CTRL_6_RL_SRAM_RED 0x3662B4 + +#define mmSIF_RTR_CTRL_6_E2E_HBM_EN 0x3662EC + +#define mmSIF_RTR_CTRL_6_E2E_PCI_EN 0x3662F0 + +#define mmSIF_RTR_CTRL_6_E2E_HBM_WR_SIZE 0x3662F4 + +#define mmSIF_RTR_CTRL_6_E2E_PCI_WR_SIZE 0x3662F8 + +#define mmSIF_RTR_CTRL_6_E2E_AW_PCI_CTR_SET_EN 0x366404 + +#define mmSIF_RTR_CTRL_6_E2E_AW_PCI_CTR_SET 0x366408 + +#define mmSIF_RTR_CTRL_6_E2E_AW_PCI_CTR_WRAP 0x36640C + +#define mmSIF_RTR_CTRL_6_E2E_AW_PCI_CTR_CNT 0x366410 + +#define mmSIF_RTR_CTRL_6_E2E_AW_HBM_CTR_SET_EN 0x366414 + +#define mmSIF_RTR_CTRL_6_E2E_AW_HBM_CTR_SET 0x366418 + +#define mmSIF_RTR_CTRL_6_E2E_HBM_RD_SIZE 0x36641C + +#define mmSIF_RTR_CTRL_6_E2E_PCI_RD_SIZE 0x366420 + +#define mmSIF_RTR_CTRL_6_E2E_AR_PCI_CTR_SET_EN 0x366424 + +#define mmSIF_RTR_CTRL_6_E2E_AR_PCI_CTR_SET 0x366428 + +#define mmSIF_RTR_CTRL_6_E2E_AR_PCI_CTR_WRAP 0x36642C + +#define mmSIF_RTR_CTRL_6_E2E_AR_PCI_CTR_CNT 0x366430 + +#define mmSIF_RTR_CTRL_6_E2E_AR_HBM_CTR_SET_EN 0x366434 + +#define mmSIF_RTR_CTRL_6_E2E_AR_HBM_CTR_SET 0x366438 + +#define mmSIF_RTR_CTRL_6_NL_HBM_SEL_0 0x366450 + +#define mmSIF_RTR_CTRL_6_NL_HBM_SEL_1 0x366454 + +#define mmSIF_RTR_CTRL_6_NON_LIN_EN 0x366480 + +#define mmSIF_RTR_CTRL_6_NL_SRAM_BANK_0 0x366500 + +#define mmSIF_RTR_CTRL_6_NL_SRAM_BANK_1 0x366504 + +#define mmSIF_RTR_CTRL_6_NL_SRAM_BANK_2 0x366508 + +#define mmSIF_RTR_CTRL_6_NL_SRAM_BANK_3 0x36650C + +#define mmSIF_RTR_CTRL_6_NL_SRAM_BANK_4 0x366510 + +#define mmSIF_RTR_CTRL_6_NL_SRAM_OFFSET_0 0x366514 + +#define mmSIF_RTR_CTRL_6_NL_SRAM_OFFSET_1 0x366520 + +#define mmSIF_RTR_CTRL_6_NL_SRAM_OFFSET_2 0x366524 + +#define mmSIF_RTR_CTRL_6_NL_SRAM_OFFSET_3 0x366528 + +#define mmSIF_RTR_CTRL_6_NL_SRAM_OFFSET_4 0x36652C + +#define mmSIF_RTR_CTRL_6_NL_SRAM_OFFSET_5 0x366530 + +#define mmSIF_RTR_CTRL_6_NL_SRAM_OFFSET_6 0x366534 + +#define mmSIF_RTR_CTRL_6_NL_SRAM_OFFSET_7 0x366538 + +#define mmSIF_RTR_CTRL_6_NL_SRAM_OFFSET_8 0x36653C + +#define mmSIF_RTR_CTRL_6_NL_SRAM_OFFSET_9 0x366540 + +#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_0 0x366550 + +#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_1 0x366554 + +#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_2 0x366558 + +#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_3 0x36655C + +#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_4 0x366560 + +#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_5 0x366564 + +#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_6 0x366568 + +#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_7 0x36656C + +#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_8 0x366570 + +#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_9 0x366574 + +#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_10 0x366578 + +#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_11 0x36657C + +#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_12 0x366580 + +#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_13 0x366584 + +#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_14 0x366588 + +#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_15 0x36658C + +#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_16 0x366590 + +#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_17 0x366594 + +#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_18 0x366598 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_0 0x3665E4 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_1 0x3665E8 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_2 0x3665EC + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_3 0x3665F0 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_4 0x3665F4 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_5 0x3665F8 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_6 0x3665FC + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_7 0x366600 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_8 0x366604 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_9 0x366608 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_10 0x36660C + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_11 0x366610 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_12 0x366614 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_13 0x366618 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_14 0x36661C + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_15 0x366620 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_0 0x366624 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_1 0x366628 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_2 0x36662C + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_3 0x366630 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_4 0x366634 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_5 0x366638 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_6 0x36663C + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_7 0x366640 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_8 0x366644 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_9 0x366648 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_10 0x36664C + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_11 0x366650 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_12 0x366654 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_13 0x366658 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_14 0x36665C + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_15 0x366660 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_0 0x366664 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_1 0x366668 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_2 0x36666C + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_3 0x366670 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_4 0x366674 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_5 0x366678 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_6 0x36667C + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_7 0x366680 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_8 0x366684 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_9 0x366688 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_10 0x36668C + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_11 0x366690 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_12 0x366694 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_13 0x366698 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_14 0x36669C + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_15 0x3666A0 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_0 0x3666A4 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_1 0x3666A8 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_2 0x3666AC + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_3 0x3666B0 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_4 0x3666B4 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_5 0x3666B8 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_6 0x3666BC + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_7 0x3666C0 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_8 0x3666C4 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_9 0x3666C8 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_10 0x3666CC + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_11 0x3666D0 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_12 0x3666D4 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_13 0x3666D8 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_14 0x3666DC + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_15 0x3666E0 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_0 0x3666E4 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_1 0x3666E8 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_2 0x3666EC + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_3 0x3666F0 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_4 0x3666F4 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_5 0x3666F8 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_6 0x3666FC + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_7 0x366700 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_8 0x366704 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_9 0x366708 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_10 0x36670C + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_11 0x366710 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_12 0x366714 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_13 0x366718 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_14 0x36671C + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_15 0x366720 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_0 0x366724 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_1 0x366728 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_2 0x36672C + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_3 0x366730 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_4 0x366734 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_5 0x366738 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_6 0x36673C + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_7 0x366740 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_8 0x366744 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_9 0x366748 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_10 0x36674C + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_11 0x366750 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_12 0x366754 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_13 0x366758 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_14 0x36675C + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_15 0x366760 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_0 0x366764 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_1 0x366768 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_2 0x36676C + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_3 0x366770 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_4 0x366774 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_5 0x366778 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_6 0x36677C + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_7 0x366780 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_8 0x366784 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_9 0x366788 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_10 0x36678C + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_11 0x366790 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_12 0x366794 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_13 0x366798 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_14 0x36679C + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_15 0x3667A0 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_0 0x3667A4 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_1 0x3667A8 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_2 0x3667AC + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_3 0x3667B0 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_4 0x3667B4 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_5 0x3667B8 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_6 0x3667BC + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_7 0x3667C0 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_8 0x3667C4 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_9 0x3667C8 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_10 0x3667CC + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_11 0x3667D0 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_12 0x3667D4 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_13 0x3667D8 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_14 0x3667DC + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_15 0x3667E0 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_0 0x366824 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_1 0x366828 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_2 0x36682C + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_3 0x366830 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_4 0x366834 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_5 0x366838 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_6 0x36683C + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_7 0x366840 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_8 0x366844 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_9 0x366848 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_10 0x36684C + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_11 0x366850 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_12 0x366854 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_13 0x366858 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_14 0x36685C + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_15 0x366860 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_0 0x366864 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_1 0x366868 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_2 0x36686C + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_3 0x366870 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_4 0x366874 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_5 0x366878 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_6 0x36687C + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_7 0x366880 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_8 0x366884 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_9 0x366888 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_10 0x36688C + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_11 0x366890 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_12 0x366894 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_13 0x366898 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_14 0x36689C + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_15 0x3668A0 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_0 0x3668A4 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_1 0x3668A8 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_2 0x3668AC + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_3 0x3668B0 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_4 0x3668B4 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_5 0x3668B8 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_6 0x3668BC + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_7 0x3668C0 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_8 0x3668C4 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_9 0x3668C8 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_10 0x3668CC + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_11 0x3668D0 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_12 0x3668D4 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_13 0x3668D8 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_14 0x3668DC + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_15 0x3668E0 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_0 0x3668E4 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_1 0x3668E8 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_2 0x3668EC + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_3 0x3668F0 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_4 0x3668F4 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_5 0x3668F8 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_6 0x3668FC + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_7 0x366900 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_8 0x366904 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_9 0x366908 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_10 0x36690C + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_11 0x366910 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_12 0x366914 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_13 0x366918 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_14 0x36691C + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_15 0x366920 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_0 0x366924 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_1 0x366928 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_2 0x36692C + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_3 0x366930 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_4 0x366934 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_5 0x366938 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_6 0x36693C + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_7 0x366940 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_8 0x366944 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_9 0x366948 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_10 0x36694C + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_11 0x366950 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_12 0x366954 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_13 0x366958 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_14 0x36695C + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_15 0x366960 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_0 0x366964 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_1 0x366968 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_2 0x36696C + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_3 0x366970 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_4 0x366974 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_5 0x366978 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_6 0x36697C + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_7 0x366980 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_8 0x366984 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_9 0x366988 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_10 0x36698C + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_11 0x366990 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_12 0x366994 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_13 0x366998 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_14 0x36699C + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_15 0x3669A0 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_0 0x3669A4 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_1 0x3669A8 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_2 0x3669AC + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_3 0x3669B0 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_4 0x3669B4 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_5 0x3669B8 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_6 0x3669BC + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_7 0x3669C0 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_8 0x3669C4 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_9 0x3669C8 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_10 0x3669CC + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_11 0x3669D0 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_12 0x3669D4 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_13 0x3669D8 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_14 0x3669DC + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_15 0x3669E0 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_0 0x3669E4 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_1 0x3669E8 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_2 0x3669EC + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_3 0x3669F0 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_4 0x3669F4 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_5 0x3669F8 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_6 0x3669FC + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_7 0x366A00 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_8 0x366A04 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_9 0x366A08 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_10 0x366A0C + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_11 0x366A10 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_12 0x366A14 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_13 0x366A18 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_14 0x366A1C + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_15 0x366A20 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_HIT_AW 0x366A64 + +#define mmSIF_RTR_CTRL_6_RANGE_SEC_HIT_AR 0x366A68 + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_HIT_AW 0x366A6C + +#define mmSIF_RTR_CTRL_6_RANGE_PRIV_HIT_AR 0x366A70 + +#define mmSIF_RTR_CTRL_6_RGL_CFG 0x366B64 + +#define mmSIF_RTR_CTRL_6_RGL_SHIFT 0x366B68 + +#define mmSIF_RTR_CTRL_6_RGL_EXPECTED_LAT_0 0x366B6C + +#define mmSIF_RTR_CTRL_6_RGL_EXPECTED_LAT_1 0x366B70 + +#define mmSIF_RTR_CTRL_6_RGL_EXPECTED_LAT_2 0x366B74 + +#define mmSIF_RTR_CTRL_6_RGL_EXPECTED_LAT_3 0x366B78 + +#define mmSIF_RTR_CTRL_6_RGL_EXPECTED_LAT_4 0x366B7C + +#define mmSIF_RTR_CTRL_6_RGL_EXPECTED_LAT_5 0x366B80 + +#define mmSIF_RTR_CTRL_6_RGL_EXPECTED_LAT_6 0x366B84 + +#define mmSIF_RTR_CTRL_6_RGL_EXPECTED_LAT_7 0x366B88 + +#define mmSIF_RTR_CTRL_6_RGL_TOKEN_0 0x366BAC + +#define mmSIF_RTR_CTRL_6_RGL_TOKEN_1 0x366BB0 + +#define mmSIF_RTR_CTRL_6_RGL_TOKEN_2 0x366BB4 + +#define mmSIF_RTR_CTRL_6_RGL_TOKEN_3 0x366BB8 + +#define mmSIF_RTR_CTRL_6_RGL_TOKEN_4 0x366BBC + +#define mmSIF_RTR_CTRL_6_RGL_TOKEN_5 0x366BC0 + +#define mmSIF_RTR_CTRL_6_RGL_TOKEN_6 0x366BC4 + +#define mmSIF_RTR_CTRL_6_RGL_TOKEN_7 0x366BC8 + +#define mmSIF_RTR_CTRL_6_RGL_BANK_ID_0 0x366BEC + +#define mmSIF_RTR_CTRL_6_RGL_BANK_ID_1 0x366BF0 + +#define mmSIF_RTR_CTRL_6_RGL_BANK_ID_2 0x366BF4 + +#define mmSIF_RTR_CTRL_6_RGL_BANK_ID_3 0x366BF8 + +#define mmSIF_RTR_CTRL_6_RGL_BANK_ID_4 0x366BFC + +#define mmSIF_RTR_CTRL_6_RGL_BANK_ID_5 0x366C00 + +#define mmSIF_RTR_CTRL_6_RGL_BANK_ID_6 0x366C04 + +#define mmSIF_RTR_CTRL_6_RGL_BANK_ID_7 0x366C08 + +#define mmSIF_RTR_CTRL_6_RGL_WDT 0x366C2C + +#define mmSIF_RTR_CTRL_6_E2E_AR_HBM0_CH0_CTR_WRAP 0x366C30 + +#define mmSIF_RTR_CTRL_6_E2E_AR_HBM0_CH1_CTR_WRAP 0x366C34 + +#define mmSIF_RTR_CTRL_6_E2E_AR_HBM1_CH0_CTR_WRAP 0x366C38 + +#define mmSIF_RTR_CTRL_6_E2E_AR_HBM1_CH1_CTR_WRAP 0x366C3C + +#define mmSIF_RTR_CTRL_6_E2E_AR_HBM2_CH0_CTR_WRAP 0x366C40 + +#define mmSIF_RTR_CTRL_6_E2E_AR_HBM2_CH1_CTR_WRAP 0x366C44 + +#define mmSIF_RTR_CTRL_6_E2E_AR_HBM3_CH0_CTR_WRAP 0x366C48 + +#define mmSIF_RTR_CTRL_6_E2E_AR_HBM3_CH1_CTR_WRAP 0x366C4C + +#define mmSIF_RTR_CTRL_6_E2E_AR_HBM0_CH0_CTR_CNT 0x366C50 + +#define mmSIF_RTR_CTRL_6_E2E_AR_HBM0_CH1_CTR_CNT 0x366C54 + +#define mmSIF_RTR_CTRL_6_E2E_AR_HBM1_CH0_CTR_CNT 0x366C58 + +#define mmSIF_RTR_CTRL_6_E2E_AR_HBM1_CH1_CTR_CNT 0x366C5C + +#define mmSIF_RTR_CTRL_6_E2E_AR_HBM2_CH0_CTR_CNT 0x366C60 + +#define mmSIF_RTR_CTRL_6_E2E_AR_HBM2_CH1_CTR_CNT 0x366C64 + +#define mmSIF_RTR_CTRL_6_E2E_AR_HBM3_CH0_CTR_CNT 0x366C68 + +#define mmSIF_RTR_CTRL_6_E2E_AR_HBM3_CH1_CTR_CNT 0x366C6C + +#define mmSIF_RTR_CTRL_6_E2E_AW_HBM0_CH0_CTR_WRAP 0x366C70 + +#define mmSIF_RTR_CTRL_6_E2E_AW_HBM0_CH1_CTR_WRAP 0x366C74 + +#define mmSIF_RTR_CTRL_6_E2E_AW_HBM1_CH0_CTR_WRAP 0x366C78 + +#define mmSIF_RTR_CTRL_6_E2E_AW_HBM1_CH1_CTR_WRAP 0x366C7C + +#define mmSIF_RTR_CTRL_6_E2E_AW_HBM2_CH0_CTR_WRAP 0x366C80 + +#define mmSIF_RTR_CTRL_6_E2E_AW_HBM2_CH1_CTR_WRAP 0x366C84 + +#define mmSIF_RTR_CTRL_6_E2E_AW_HBM3_CH0_CTR_WRAP 0x366C88 + +#define mmSIF_RTR_CTRL_6_E2E_AW_HBM3_CH1_CTR_WRAP 0x366C8C + +#define mmSIF_RTR_CTRL_6_E2E_AW_HBM0_CH0_CTR_CNT 0x366C90 + +#define mmSIF_RTR_CTRL_6_E2E_AW_HBM0_CH1_CTR_CNT 0x366C94 + +#define mmSIF_RTR_CTRL_6_E2E_AW_HBM1_CH0_CTR_CNT 0x366C98 + +#define mmSIF_RTR_CTRL_6_E2E_AW_HBM1_CH1_CTR_CNT 0x366C9C + +#define mmSIF_RTR_CTRL_6_E2E_AW_HBM2_CH0_CTR_CNT 0x366CA0 + +#define mmSIF_RTR_CTRL_6_E2E_AW_HBM2_CH1_CTR_CNT 0x366CA4 + +#define mmSIF_RTR_CTRL_6_E2E_AW_HBM3_CH0_CTR_CNT 0x366CA8 + +#define mmSIF_RTR_CTRL_6_E2E_AW_HBM3_CH1_CTR_CNT 0x366CAC + +#define mmSIF_RTR_CTRL_6_NL_HBM_PC_SEL_0 0x366CB0 + +#define mmSIF_RTR_CTRL_6_NL_HBM_PC_SEL_1 0x366CB4 + +#define mmSIF_RTR_CTRL_6_NL_HBM_PC_SEL_2 0x366CB8 + +#define mmSIF_RTR_CTRL_6_NL_HBM_PC_SEL_3 0x366CBC + +#endif /* ASIC_REG_SIF_RTR_CTRL_6_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_7_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_7_regs.h new file mode 100644 index 000000000..a37772c53 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_7_regs.h @@ -0,0 +1,896 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_SIF_RTR_CTRL_7_REGS_H_ +#define ASIC_REG_SIF_RTR_CTRL_7_REGS_H_ + +/* + ***************************************** + * SIF_RTR_CTRL_7 (Prototype: RTR_CTRL) + ***************************************** + */ + +#define mmSIF_RTR_CTRL_7_PERM_SEL 0x376108 + +#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_0 0x376114 + +#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_1 0x376118 + +#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_2 0x37611C + +#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_3 0x376120 + +#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_4 0x376124 + +#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_5 0x376128 + +#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_6 0x37612C + +#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_7 0x376130 + +#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_8 0x376134 + +#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_9 0x376138 + +#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_10 0x37613C + +#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_11 0x376140 + +#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_12 0x376144 + +#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_13 0x376148 + +#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_14 0x37614C + +#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_15 0x376150 + +#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_16 0x376154 + +#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_17 0x376158 + +#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_18 0x37615C + +#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_19 0x376160 + +#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_20 0x376164 + +#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_21 0x376168 + +#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_22 0x37616C + +#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_23 0x376170 + +#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_24 0x376174 + +#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_25 0x376178 + +#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_26 0x37617C + +#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_27 0x376180 + +#define mmSIF_RTR_CTRL_7_SRAM_POLY_H3_0 0x376184 + +#define mmSIF_RTR_CTRL_7_SRAM_POLY_H3_1 0x376188 + +#define mmSIF_RTR_CTRL_7_SRAM_POLY_H3_2 0x37618C + +#define mmSIF_RTR_CTRL_7_SRAM_POLY_H3_3 0x376190 + +#define mmSIF_RTR_CTRL_7_SRAM_POLY_H3_4 0x376194 + +#define mmSIF_RTR_CTRL_7_SRAM_POLY_H3_5 0x376198 + +#define mmSIF_RTR_CTRL_7_SRAM_POLY_H3_6 0x37619C + +#define mmSIF_RTR_CTRL_7_SRAM_POLY_H3_7 0x3761A0 + +#define mmSIF_RTR_CTRL_7_SRAM_POLY_H3_8 0x3761A4 + +#define mmSIF_RTR_CTRL_7_SRAM_POLY_H3_9 0x3761A8 + +#define mmSIF_RTR_CTRL_7_SRAM_POLY_H3_10 0x3761AC + +#define mmSIF_RTR_CTRL_7_SRAM_POLY_H3_11 0x3761B0 + +#define mmSIF_RTR_CTRL_7_SRAM_POLY_H3_12 0x3761B4 + +#define mmSIF_RTR_CTRL_7_SRAM_POLY_H3_13 0x3761B8 + +#define mmSIF_RTR_CTRL_7_SRAM_POLY_H3_14 0x3761BC + +#define mmSIF_RTR_CTRL_7_SCRAM_SRAM_EN 0x37626C + +#define mmSIF_RTR_CTRL_7_RL_HBM_EN 0x376274 + +#define mmSIF_RTR_CTRL_7_RL_HBM_SAT 0x376278 + +#define mmSIF_RTR_CTRL_7_RL_HBM_RST 0x37627C + +#define mmSIF_RTR_CTRL_7_RL_HBM_TIMEOUT 0x376280 + +#define mmSIF_RTR_CTRL_7_SCRAM_HBM_EN 0x376284 + +#define mmSIF_RTR_CTRL_7_RL_PCI_EN 0x376288 + +#define mmSIF_RTR_CTRL_7_RL_PCI_SAT 0x37628C + +#define mmSIF_RTR_CTRL_7_RL_PCI_RST 0x376290 + +#define mmSIF_RTR_CTRL_7_RL_PCI_TIMEOUT 0x376294 + +#define mmSIF_RTR_CTRL_7_RL_SRAM_EN 0x37629C + +#define mmSIF_RTR_CTRL_7_RL_SRAM_SAT 0x3762A0 + +#define mmSIF_RTR_CTRL_7_RL_SRAM_RST 0x3762A4 + +#define mmSIF_RTR_CTRL_7_RL_SRAM_TIMEOUT 0x3762AC + +#define mmSIF_RTR_CTRL_7_RL_SRAM_RED 0x3762B4 + +#define mmSIF_RTR_CTRL_7_E2E_HBM_EN 0x3762EC + +#define mmSIF_RTR_CTRL_7_E2E_PCI_EN 0x3762F0 + +#define mmSIF_RTR_CTRL_7_E2E_HBM_WR_SIZE 0x3762F4 + +#define mmSIF_RTR_CTRL_7_E2E_PCI_WR_SIZE 0x3762F8 + +#define mmSIF_RTR_CTRL_7_E2E_AW_PCI_CTR_SET_EN 0x376404 + +#define mmSIF_RTR_CTRL_7_E2E_AW_PCI_CTR_SET 0x376408 + +#define mmSIF_RTR_CTRL_7_E2E_AW_PCI_CTR_WRAP 0x37640C + +#define mmSIF_RTR_CTRL_7_E2E_AW_PCI_CTR_CNT 0x376410 + +#define mmSIF_RTR_CTRL_7_E2E_AW_HBM_CTR_SET_EN 0x376414 + +#define mmSIF_RTR_CTRL_7_E2E_AW_HBM_CTR_SET 0x376418 + +#define mmSIF_RTR_CTRL_7_E2E_HBM_RD_SIZE 0x37641C + +#define mmSIF_RTR_CTRL_7_E2E_PCI_RD_SIZE 0x376420 + +#define mmSIF_RTR_CTRL_7_E2E_AR_PCI_CTR_SET_EN 0x376424 + +#define mmSIF_RTR_CTRL_7_E2E_AR_PCI_CTR_SET 0x376428 + +#define mmSIF_RTR_CTRL_7_E2E_AR_PCI_CTR_WRAP 0x37642C + +#define mmSIF_RTR_CTRL_7_E2E_AR_PCI_CTR_CNT 0x376430 + +#define mmSIF_RTR_CTRL_7_E2E_AR_HBM_CTR_SET_EN 0x376434 + +#define mmSIF_RTR_CTRL_7_E2E_AR_HBM_CTR_SET 0x376438 + +#define mmSIF_RTR_CTRL_7_NL_HBM_SEL_0 0x376450 + +#define mmSIF_RTR_CTRL_7_NL_HBM_SEL_1 0x376454 + +#define mmSIF_RTR_CTRL_7_NON_LIN_EN 0x376480 + +#define mmSIF_RTR_CTRL_7_NL_SRAM_BANK_0 0x376500 + +#define mmSIF_RTR_CTRL_7_NL_SRAM_BANK_1 0x376504 + +#define mmSIF_RTR_CTRL_7_NL_SRAM_BANK_2 0x376508 + +#define mmSIF_RTR_CTRL_7_NL_SRAM_BANK_3 0x37650C + +#define mmSIF_RTR_CTRL_7_NL_SRAM_BANK_4 0x376510 + +#define mmSIF_RTR_CTRL_7_NL_SRAM_OFFSET_0 0x376514 + +#define mmSIF_RTR_CTRL_7_NL_SRAM_OFFSET_1 0x376520 + +#define mmSIF_RTR_CTRL_7_NL_SRAM_OFFSET_2 0x376524 + +#define mmSIF_RTR_CTRL_7_NL_SRAM_OFFSET_3 0x376528 + +#define mmSIF_RTR_CTRL_7_NL_SRAM_OFFSET_4 0x37652C + +#define mmSIF_RTR_CTRL_7_NL_SRAM_OFFSET_5 0x376530 + +#define mmSIF_RTR_CTRL_7_NL_SRAM_OFFSET_6 0x376534 + +#define mmSIF_RTR_CTRL_7_NL_SRAM_OFFSET_7 0x376538 + +#define mmSIF_RTR_CTRL_7_NL_SRAM_OFFSET_8 0x37653C + +#define mmSIF_RTR_CTRL_7_NL_SRAM_OFFSET_9 0x376540 + +#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_0 0x376550 + +#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_1 0x376554 + +#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_2 0x376558 + +#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_3 0x37655C + +#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_4 0x376560 + +#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_5 0x376564 + +#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_6 0x376568 + +#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_7 0x37656C + +#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_8 0x376570 + +#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_9 0x376574 + +#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_10 0x376578 + +#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_11 0x37657C + +#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_12 0x376580 + +#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_13 0x376584 + +#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_14 0x376588 + +#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_15 0x37658C + +#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_16 0x376590 + +#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_17 0x376594 + +#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_18 0x376598 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_0 0x3765E4 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_1 0x3765E8 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_2 0x3765EC + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_3 0x3765F0 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_4 0x3765F4 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_5 0x3765F8 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_6 0x3765FC + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_7 0x376600 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_8 0x376604 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_9 0x376608 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_10 0x37660C + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_11 0x376610 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_12 0x376614 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_13 0x376618 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_14 0x37661C + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_15 0x376620 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_0 0x376624 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_1 0x376628 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_2 0x37662C + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_3 0x376630 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_4 0x376634 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_5 0x376638 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_6 0x37663C + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_7 0x376640 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_8 0x376644 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_9 0x376648 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_10 0x37664C + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_11 0x376650 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_12 0x376654 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_13 0x376658 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_14 0x37665C + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_15 0x376660 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_0 0x376664 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_1 0x376668 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_2 0x37666C + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_3 0x376670 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_4 0x376674 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_5 0x376678 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_6 0x37667C + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_7 0x376680 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_8 0x376684 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_9 0x376688 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_10 0x37668C + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_11 0x376690 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_12 0x376694 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_13 0x376698 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_14 0x37669C + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_15 0x3766A0 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_0 0x3766A4 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_1 0x3766A8 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_2 0x3766AC + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_3 0x3766B0 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_4 0x3766B4 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_5 0x3766B8 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_6 0x3766BC + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_7 0x3766C0 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_8 0x3766C4 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_9 0x3766C8 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_10 0x3766CC + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_11 0x3766D0 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_12 0x3766D4 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_13 0x3766D8 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_14 0x3766DC + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_15 0x3766E0 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_0 0x3766E4 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_1 0x3766E8 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_2 0x3766EC + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_3 0x3766F0 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_4 0x3766F4 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_5 0x3766F8 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_6 0x3766FC + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_7 0x376700 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_8 0x376704 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_9 0x376708 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_10 0x37670C + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_11 0x376710 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_12 0x376714 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_13 0x376718 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_14 0x37671C + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_15 0x376720 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_0 0x376724 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_1 0x376728 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_2 0x37672C + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_3 0x376730 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_4 0x376734 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_5 0x376738 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_6 0x37673C + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_7 0x376740 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_8 0x376744 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_9 0x376748 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_10 0x37674C + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_11 0x376750 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_12 0x376754 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_13 0x376758 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_14 0x37675C + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_15 0x376760 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_0 0x376764 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_1 0x376768 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_2 0x37676C + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_3 0x376770 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_4 0x376774 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_5 0x376778 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_6 0x37677C + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_7 0x376780 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_8 0x376784 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_9 0x376788 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_10 0x37678C + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_11 0x376790 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_12 0x376794 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_13 0x376798 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_14 0x37679C + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_15 0x3767A0 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_0 0x3767A4 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_1 0x3767A8 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_2 0x3767AC + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_3 0x3767B0 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_4 0x3767B4 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_5 0x3767B8 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_6 0x3767BC + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_7 0x3767C0 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_8 0x3767C4 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_9 0x3767C8 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_10 0x3767CC + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_11 0x3767D0 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_12 0x3767D4 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_13 0x3767D8 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_14 0x3767DC + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_15 0x3767E0 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_0 0x376824 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_1 0x376828 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_2 0x37682C + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_3 0x376830 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_4 0x376834 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_5 0x376838 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_6 0x37683C + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_7 0x376840 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_8 0x376844 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_9 0x376848 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_10 0x37684C + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_11 0x376850 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_12 0x376854 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_13 0x376858 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_14 0x37685C + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_15 0x376860 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_0 0x376864 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_1 0x376868 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_2 0x37686C + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_3 0x376870 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_4 0x376874 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_5 0x376878 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_6 0x37687C + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_7 0x376880 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_8 0x376884 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_9 0x376888 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_10 0x37688C + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_11 0x376890 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_12 0x376894 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_13 0x376898 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_14 0x37689C + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_15 0x3768A0 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_0 0x3768A4 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_1 0x3768A8 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_2 0x3768AC + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_3 0x3768B0 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_4 0x3768B4 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_5 0x3768B8 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_6 0x3768BC + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_7 0x3768C0 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_8 0x3768C4 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_9 0x3768C8 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_10 0x3768CC + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_11 0x3768D0 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_12 0x3768D4 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_13 0x3768D8 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_14 0x3768DC + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_15 0x3768E0 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_0 0x3768E4 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_1 0x3768E8 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_2 0x3768EC + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_3 0x3768F0 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_4 0x3768F4 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_5 0x3768F8 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_6 0x3768FC + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_7 0x376900 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_8 0x376904 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_9 0x376908 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_10 0x37690C + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_11 0x376910 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_12 0x376914 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_13 0x376918 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_14 0x37691C + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_15 0x376920 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_0 0x376924 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_1 0x376928 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_2 0x37692C + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_3 0x376930 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_4 0x376934 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_5 0x376938 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_6 0x37693C + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_7 0x376940 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_8 0x376944 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_9 0x376948 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_10 0x37694C + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_11 0x376950 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_12 0x376954 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_13 0x376958 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_14 0x37695C + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_15 0x376960 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_0 0x376964 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_1 0x376968 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_2 0x37696C + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_3 0x376970 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_4 0x376974 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_5 0x376978 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_6 0x37697C + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_7 0x376980 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_8 0x376984 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_9 0x376988 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_10 0x37698C + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_11 0x376990 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_12 0x376994 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_13 0x376998 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_14 0x37699C + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_15 0x3769A0 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_0 0x3769A4 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_1 0x3769A8 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_2 0x3769AC + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_3 0x3769B0 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_4 0x3769B4 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_5 0x3769B8 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_6 0x3769BC + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_7 0x3769C0 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_8 0x3769C4 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_9 0x3769C8 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_10 0x3769CC + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_11 0x3769D0 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_12 0x3769D4 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_13 0x3769D8 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_14 0x3769DC + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_15 0x3769E0 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_0 0x3769E4 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_1 0x3769E8 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_2 0x3769EC + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_3 0x3769F0 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_4 0x3769F4 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_5 0x3769F8 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_6 0x3769FC + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_7 0x376A00 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_8 0x376A04 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_9 0x376A08 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_10 0x376A0C + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_11 0x376A10 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_12 0x376A14 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_13 0x376A18 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_14 0x376A1C + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_15 0x376A20 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_HIT_AW 0x376A64 + +#define mmSIF_RTR_CTRL_7_RANGE_SEC_HIT_AR 0x376A68 + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_HIT_AW 0x376A6C + +#define mmSIF_RTR_CTRL_7_RANGE_PRIV_HIT_AR 0x376A70 + +#define mmSIF_RTR_CTRL_7_RGL_CFG 0x376B64 + +#define mmSIF_RTR_CTRL_7_RGL_SHIFT 0x376B68 + +#define mmSIF_RTR_CTRL_7_RGL_EXPECTED_LAT_0 0x376B6C + +#define mmSIF_RTR_CTRL_7_RGL_EXPECTED_LAT_1 0x376B70 + +#define mmSIF_RTR_CTRL_7_RGL_EXPECTED_LAT_2 0x376B74 + +#define mmSIF_RTR_CTRL_7_RGL_EXPECTED_LAT_3 0x376B78 + +#define mmSIF_RTR_CTRL_7_RGL_EXPECTED_LAT_4 0x376B7C + +#define mmSIF_RTR_CTRL_7_RGL_EXPECTED_LAT_5 0x376B80 + +#define mmSIF_RTR_CTRL_7_RGL_EXPECTED_LAT_6 0x376B84 + +#define mmSIF_RTR_CTRL_7_RGL_EXPECTED_LAT_7 0x376B88 + +#define mmSIF_RTR_CTRL_7_RGL_TOKEN_0 0x376BAC + +#define mmSIF_RTR_CTRL_7_RGL_TOKEN_1 0x376BB0 + +#define mmSIF_RTR_CTRL_7_RGL_TOKEN_2 0x376BB4 + +#define mmSIF_RTR_CTRL_7_RGL_TOKEN_3 0x376BB8 + +#define mmSIF_RTR_CTRL_7_RGL_TOKEN_4 0x376BBC + +#define mmSIF_RTR_CTRL_7_RGL_TOKEN_5 0x376BC0 + +#define mmSIF_RTR_CTRL_7_RGL_TOKEN_6 0x376BC4 + +#define mmSIF_RTR_CTRL_7_RGL_TOKEN_7 0x376BC8 + +#define mmSIF_RTR_CTRL_7_RGL_BANK_ID_0 0x376BEC + +#define mmSIF_RTR_CTRL_7_RGL_BANK_ID_1 0x376BF0 + +#define mmSIF_RTR_CTRL_7_RGL_BANK_ID_2 0x376BF4 + +#define mmSIF_RTR_CTRL_7_RGL_BANK_ID_3 0x376BF8 + +#define mmSIF_RTR_CTRL_7_RGL_BANK_ID_4 0x376BFC + +#define mmSIF_RTR_CTRL_7_RGL_BANK_ID_5 0x376C00 + +#define mmSIF_RTR_CTRL_7_RGL_BANK_ID_6 0x376C04 + +#define mmSIF_RTR_CTRL_7_RGL_BANK_ID_7 0x376C08 + +#define mmSIF_RTR_CTRL_7_RGL_WDT 0x376C2C + +#define mmSIF_RTR_CTRL_7_E2E_AR_HBM0_CH0_CTR_WRAP 0x376C30 + +#define mmSIF_RTR_CTRL_7_E2E_AR_HBM0_CH1_CTR_WRAP 0x376C34 + +#define mmSIF_RTR_CTRL_7_E2E_AR_HBM1_CH0_CTR_WRAP 0x376C38 + +#define mmSIF_RTR_CTRL_7_E2E_AR_HBM1_CH1_CTR_WRAP 0x376C3C + +#define mmSIF_RTR_CTRL_7_E2E_AR_HBM2_CH0_CTR_WRAP 0x376C40 + +#define mmSIF_RTR_CTRL_7_E2E_AR_HBM2_CH1_CTR_WRAP 0x376C44 + +#define mmSIF_RTR_CTRL_7_E2E_AR_HBM3_CH0_CTR_WRAP 0x376C48 + +#define mmSIF_RTR_CTRL_7_E2E_AR_HBM3_CH1_CTR_WRAP 0x376C4C + +#define mmSIF_RTR_CTRL_7_E2E_AR_HBM0_CH0_CTR_CNT 0x376C50 + +#define mmSIF_RTR_CTRL_7_E2E_AR_HBM0_CH1_CTR_CNT 0x376C54 + +#define mmSIF_RTR_CTRL_7_E2E_AR_HBM1_CH0_CTR_CNT 0x376C58 + +#define mmSIF_RTR_CTRL_7_E2E_AR_HBM1_CH1_CTR_CNT 0x376C5C + +#define mmSIF_RTR_CTRL_7_E2E_AR_HBM2_CH0_CTR_CNT 0x376C60 + +#define mmSIF_RTR_CTRL_7_E2E_AR_HBM2_CH1_CTR_CNT 0x376C64 + +#define mmSIF_RTR_CTRL_7_E2E_AR_HBM3_CH0_CTR_CNT 0x376C68 + +#define mmSIF_RTR_CTRL_7_E2E_AR_HBM3_CH1_CTR_CNT 0x376C6C + +#define mmSIF_RTR_CTRL_7_E2E_AW_HBM0_CH0_CTR_WRAP 0x376C70 + +#define mmSIF_RTR_CTRL_7_E2E_AW_HBM0_CH1_CTR_WRAP 0x376C74 + +#define mmSIF_RTR_CTRL_7_E2E_AW_HBM1_CH0_CTR_WRAP 0x376C78 + +#define mmSIF_RTR_CTRL_7_E2E_AW_HBM1_CH1_CTR_WRAP 0x376C7C + +#define mmSIF_RTR_CTRL_7_E2E_AW_HBM2_CH0_CTR_WRAP 0x376C80 + +#define mmSIF_RTR_CTRL_7_E2E_AW_HBM2_CH1_CTR_WRAP 0x376C84 + +#define mmSIF_RTR_CTRL_7_E2E_AW_HBM3_CH0_CTR_WRAP 0x376C88 + +#define mmSIF_RTR_CTRL_7_E2E_AW_HBM3_CH1_CTR_WRAP 0x376C8C + +#define mmSIF_RTR_CTRL_7_E2E_AW_HBM0_CH0_CTR_CNT 0x376C90 + +#define mmSIF_RTR_CTRL_7_E2E_AW_HBM0_CH1_CTR_CNT 0x376C94 + +#define mmSIF_RTR_CTRL_7_E2E_AW_HBM1_CH0_CTR_CNT 0x376C98 + +#define mmSIF_RTR_CTRL_7_E2E_AW_HBM1_CH1_CTR_CNT 0x376C9C + +#define mmSIF_RTR_CTRL_7_E2E_AW_HBM2_CH0_CTR_CNT 0x376CA0 + +#define mmSIF_RTR_CTRL_7_E2E_AW_HBM2_CH1_CTR_CNT 0x376CA4 + +#define mmSIF_RTR_CTRL_7_E2E_AW_HBM3_CH0_CTR_CNT 0x376CA8 + +#define mmSIF_RTR_CTRL_7_E2E_AW_HBM3_CH1_CTR_CNT 0x376CAC + +#define mmSIF_RTR_CTRL_7_NL_HBM_PC_SEL_0 0x376CB0 + +#define mmSIF_RTR_CTRL_7_NL_HBM_PC_SEL_1 0x376CB4 + +#define mmSIF_RTR_CTRL_7_NL_HBM_PC_SEL_2 0x376CB8 + +#define mmSIF_RTR_CTRL_7_NL_HBM_PC_SEL_3 0x376CBC + +#endif /* ASIC_REG_SIF_RTR_CTRL_7_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/stlb_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/stlb_regs.h new file mode 100644 index 000000000..07d2a9000 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/stlb_regs.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_STLB_REGS_H_ +#define ASIC_REG_STLB_REGS_H_ + +/* + ***************************************** + * STLB (Prototype: STLB) + ***************************************** + */ + +#define mmSTLB_CACHE_INV 0xC12010 + +#define mmSTLB_CACHE_INV_BASE_39_8 0xC12014 + +#define mmSTLB_CACHE_INV_BASE_49_40 0xC12018 + +#define mmSTLB_STLB_FEATURE_EN 0xC1201C + +#define mmSTLB_STLB_AXI_CACHE 0xC12020 + +#define mmSTLB_HOP_CONFIGURATION 0xC12024 + +#define mmSTLB_LINK_LIST_LOOKUP_MASK_49_32 0xC12028 + +#define mmSTLB_LINK_LIST_LOOKUP_MASK_31_0 0xC1202C + +#define mmSTLB_LINK_LIST 0xC12030 + +#define mmSTLB_INV_ALL_START 0xC12034 + +#define mmSTLB_INV_ALL_SET 0xC12038 + +#define mmSTLB_INV_PS 0xC1203C + +#define mmSTLB_INV_CONSUMER_INDEX 0xC12040 + +#define mmSTLB_INV_HIT_COUNT 0xC12044 + +#define mmSTLB_INV_SET 0xC12048 + +#define mmSTLB_SRAM_INIT 0xC1204C + +#define mmSTLB_MEM_CACHE_INVALIDATION 0xC12050 + +#define mmSTLB_MEM_CACHE_INV_STATUS 0xC12054 + +#define mmSTLB_MEM_CACHE_BASE_38_7 0xC12058 + +#define mmSTLB_MEM_CACHE_BASE_49_39 0xC1205C + +#define mmSTLB_MEM_CACHE_CONFIG 0xC12060 + +#define mmSTLB_SET_THRESHOLD_HOP4 0xC12064 + +#define mmSTLB_SET_THRESHOLD_HOP3 0xC12068 + +#define mmSTLB_SET_THRESHOLD_HOP2 0xC1206C + +#define mmSTLB_SET_THRESHOLD_HOP1 0xC12070 + +#define mmSTLB_SET_THRESHOLD_HOP0 0xC12074 + +#define mmSTLB_MULTI_HIT_INTERRUPT_CLR 0xC12078 + +#define mmSTLB_MULTI_HIT_INTERRUPT_MASK 0xC1207C + +#define mmSTLB_MEM_L0_CACHE_CFG 0xC12080 + +#define mmSTLB_MEM_READ_ARPROT 0xC12084 + +#endif /* ASIC_REG_STLB_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_cfg_masks.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_cfg_masks.h new file mode 100644 index 000000000..8f67c11c8 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_cfg_masks.h @@ -0,0 +1,2578 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC0_CFG_MASKS_H_ +#define ASIC_REG_TPC0_CFG_MASKS_H_ + +/* + ***************************************** + * TPC0_CFG (Prototype: TPC) + ***************************************** + */ + +/* TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW */ +#define TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH */ +#define TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_PADDING_VALUE */ +#define TPC0_CFG_KERNEL_TENSOR_0_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG */ +#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_DATA_TYPE_MASK 0x7 +#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 +#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_RMW_SET_SHIFT 19 +#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_RMW_SET_MASK 0x80000 +#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_RMW_RESERV_SHIFT 20 +#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000 +#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_RMW_OP_SHIFT 21 +#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_RMW_OP_MASK 0x600000 + +/* TPC0_CFG_KERNEL_TENSOR_0_DIM_0_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_DIM_1_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_DIM_2_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_DIM_3_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_DIM_4_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW */ +#define TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH */ +#define TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_PADDING_VALUE */ +#define TPC0_CFG_KERNEL_TENSOR_1_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG */ +#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_DATA_TYPE_MASK 0x7 +#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 +#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_RMW_SET_SHIFT 19 +#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_RMW_SET_MASK 0x80000 +#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_RMW_RESERV_SHIFT 20 +#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000 +#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_RMW_OP_SHIFT 21 +#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_RMW_OP_MASK 0x600000 + +/* TPC0_CFG_KERNEL_TENSOR_1_DIM_0_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_DIM_1_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_DIM_2_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_DIM_3_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_DIM_4_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW */ +#define TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH */ +#define TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_PADDING_VALUE */ +#define TPC0_CFG_KERNEL_TENSOR_2_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG */ +#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_DATA_TYPE_MASK 0x7 +#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 +#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_RMW_SET_SHIFT 19 +#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_RMW_SET_MASK 0x80000 +#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_RMW_RESERV_SHIFT 20 +#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000 +#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_RMW_OP_SHIFT 21 +#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_RMW_OP_MASK 0x600000 + +/* TPC0_CFG_KERNEL_TENSOR_2_DIM_0_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_DIM_1_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_DIM_2_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_DIM_3_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_DIM_4_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW */ +#define TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH */ +#define TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_PADDING_VALUE */ +#define TPC0_CFG_KERNEL_TENSOR_3_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG */ +#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_DATA_TYPE_MASK 0x7 +#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 +#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_RMW_SET_SHIFT 19 +#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_RMW_SET_MASK 0x80000 +#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_RMW_RESERV_SHIFT 20 +#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000 +#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_RMW_OP_SHIFT 21 +#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_RMW_OP_MASK 0x600000 + +/* TPC0_CFG_KERNEL_TENSOR_3_DIM_0_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_DIM_1_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_DIM_2_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_DIM_3_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_DIM_4_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW */ +#define TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH */ +#define TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_PADDING_VALUE */ +#define TPC0_CFG_KERNEL_TENSOR_4_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG */ +#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_DATA_TYPE_MASK 0x7 +#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 +#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_RMW_SET_SHIFT 19 +#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_RMW_SET_MASK 0x80000 +#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_RMW_RESERV_SHIFT 20 +#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000 +#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_RMW_OP_SHIFT 21 +#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_RMW_OP_MASK 0x600000 + +/* TPC0_CFG_KERNEL_TENSOR_4_DIM_0_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_DIM_1_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_DIM_2_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_DIM_3_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_DIM_4_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW */ +#define TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH */ +#define TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_PADDING_VALUE */ +#define TPC0_CFG_KERNEL_TENSOR_5_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG */ +#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_DATA_TYPE_MASK 0x7 +#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 +#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_RMW_SET_SHIFT 19 +#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_RMW_SET_MASK 0x80000 +#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_RMW_RESERV_SHIFT 20 +#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000 +#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_RMW_OP_SHIFT 21 +#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_RMW_OP_MASK 0x600000 + +/* TPC0_CFG_KERNEL_TENSOR_5_DIM_0_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_DIM_1_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_DIM_2_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_DIM_3_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_DIM_4_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW */ +#define TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH */ +#define TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_PADDING_VALUE */ +#define TPC0_CFG_KERNEL_TENSOR_6_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG */ +#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_DATA_TYPE_MASK 0x7 +#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 +#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_RMW_SET_SHIFT 19 +#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_RMW_SET_MASK 0x80000 +#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_RMW_RESERV_SHIFT 20 +#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000 +#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_RMW_OP_SHIFT 21 +#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_RMW_OP_MASK 0x600000 + +/* TPC0_CFG_KERNEL_TENSOR_6_DIM_0_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_DIM_1_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_DIM_2_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_DIM_3_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_DIM_4_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW */ +#define TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH */ +#define TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_PADDING_VALUE */ +#define TPC0_CFG_KERNEL_TENSOR_7_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG */ +#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_DATA_TYPE_MASK 0x7 +#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 +#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_RMW_SET_SHIFT 19 +#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_RMW_SET_MASK 0x80000 +#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_RMW_RESERV_SHIFT 20 +#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000 +#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_RMW_OP_SHIFT 21 +#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_RMW_OP_MASK 0x600000 + +/* TPC0_CFG_KERNEL_TENSOR_7_DIM_0_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_DIM_1_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_DIM_2_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_DIM_3_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_DIM_4_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_8_BASE_ADDR_LOW */ +#define TPC0_CFG_KERNEL_TENSOR_8_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_8_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_8_BASE_ADDR_HIGH */ +#define TPC0_CFG_KERNEL_TENSOR_8_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_8_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_8_PADDING_VALUE */ +#define TPC0_CFG_KERNEL_TENSOR_8_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_8_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG */ +#define TPC0_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG_DATA_TYPE_MASK 0x7 +#define TPC0_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 +#define TPC0_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG_RMW_SET_SHIFT 19 +#define TPC0_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG_RMW_SET_MASK 0x80000 +#define TPC0_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG_RMW_RESERV_SHIFT 20 +#define TPC0_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000 +#define TPC0_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG_RMW_OP_SHIFT 21 +#define TPC0_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG_RMW_OP_MASK 0x600000 + +/* TPC0_CFG_KERNEL_TENSOR_8_DIM_0_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_8_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_8_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_8_DIM_0_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_8_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_8_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_8_DIM_1_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_8_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_8_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_8_DIM_1_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_8_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_8_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_8_DIM_2_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_8_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_8_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_8_DIM_2_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_8_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_8_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_8_DIM_3_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_8_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_8_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_8_DIM_3_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_8_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_8_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_8_DIM_4_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_8_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_8_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_8_DIM_4_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_8_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_8_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_9_BASE_ADDR_LOW */ +#define TPC0_CFG_KERNEL_TENSOR_9_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_9_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_9_BASE_ADDR_HIGH */ +#define TPC0_CFG_KERNEL_TENSOR_9_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_9_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_9_PADDING_VALUE */ +#define TPC0_CFG_KERNEL_TENSOR_9_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_9_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG */ +#define TPC0_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG_DATA_TYPE_MASK 0x7 +#define TPC0_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 +#define TPC0_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG_RMW_SET_SHIFT 19 +#define TPC0_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG_RMW_SET_MASK 0x80000 +#define TPC0_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG_RMW_RESERV_SHIFT 20 +#define TPC0_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000 +#define TPC0_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG_RMW_OP_SHIFT 21 +#define TPC0_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG_RMW_OP_MASK 0x600000 + +/* TPC0_CFG_KERNEL_TENSOR_9_DIM_0_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_9_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_9_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_9_DIM_0_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_9_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_9_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_9_DIM_1_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_9_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_9_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_9_DIM_1_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_9_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_9_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_9_DIM_2_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_9_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_9_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_9_DIM_2_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_9_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_9_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_9_DIM_3_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_9_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_9_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_9_DIM_3_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_9_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_9_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_9_DIM_4_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_9_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_9_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_9_DIM_4_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_9_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_9_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_10_BASE_ADDR_LOW */ +#define TPC0_CFG_KERNEL_TENSOR_10_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_10_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_10_BASE_ADDR_HIGH */ +#define TPC0_CFG_KERNEL_TENSOR_10_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_10_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_10_PADDING_VALUE */ +#define TPC0_CFG_KERNEL_TENSOR_10_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_10_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG */ +#define TPC0_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG_DATA_TYPE_MASK 0x7 +#define TPC0_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 +#define TPC0_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG_RMW_SET_SHIFT 19 +#define TPC0_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG_RMW_SET_MASK 0x80000 +#define TPC0_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG_RMW_RESERV_SHIFT 20 +#define TPC0_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000 +#define TPC0_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG_RMW_OP_SHIFT 21 +#define TPC0_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG_RMW_OP_MASK 0x600000 + +/* TPC0_CFG_KERNEL_TENSOR_10_DIM_0_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_10_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_10_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_10_DIM_0_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_10_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_10_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_10_DIM_1_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_10_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_10_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_10_DIM_1_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_10_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_10_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_10_DIM_2_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_10_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_10_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_10_DIM_2_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_10_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_10_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_10_DIM_3_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_10_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_10_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_10_DIM_3_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_10_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_10_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_10_DIM_4_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_10_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_10_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_10_DIM_4_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_10_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_10_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_11_BASE_ADDR_LOW */ +#define TPC0_CFG_KERNEL_TENSOR_11_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_11_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_11_BASE_ADDR_HIGH */ +#define TPC0_CFG_KERNEL_TENSOR_11_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_11_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_11_PADDING_VALUE */ +#define TPC0_CFG_KERNEL_TENSOR_11_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_11_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG */ +#define TPC0_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG_DATA_TYPE_MASK 0x7 +#define TPC0_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 +#define TPC0_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG_RMW_SET_SHIFT 19 +#define TPC0_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG_RMW_SET_MASK 0x80000 +#define TPC0_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG_RMW_RESERV_SHIFT 20 +#define TPC0_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000 +#define TPC0_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG_RMW_OP_SHIFT 21 +#define TPC0_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG_RMW_OP_MASK 0x600000 + +/* TPC0_CFG_KERNEL_TENSOR_11_DIM_0_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_11_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_11_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_11_DIM_0_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_11_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_11_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_11_DIM_1_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_11_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_11_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_11_DIM_1_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_11_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_11_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_11_DIM_2_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_11_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_11_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_11_DIM_2_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_11_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_11_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_11_DIM_3_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_11_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_11_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_11_DIM_3_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_11_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_11_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_11_DIM_4_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_11_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_11_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_11_DIM_4_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_11_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_11_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_12_BASE_ADDR_LOW */ +#define TPC0_CFG_KERNEL_TENSOR_12_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_12_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_12_BASE_ADDR_HIGH */ +#define TPC0_CFG_KERNEL_TENSOR_12_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_12_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_12_PADDING_VALUE */ +#define TPC0_CFG_KERNEL_TENSOR_12_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_12_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG */ +#define TPC0_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG_DATA_TYPE_MASK 0x7 +#define TPC0_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 +#define TPC0_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG_RMW_SET_SHIFT 19 +#define TPC0_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG_RMW_SET_MASK 0x80000 +#define TPC0_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG_RMW_RESERV_SHIFT 20 +#define TPC0_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000 +#define TPC0_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG_RMW_OP_SHIFT 21 +#define TPC0_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG_RMW_OP_MASK 0x600000 + +/* TPC0_CFG_KERNEL_TENSOR_12_DIM_0_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_12_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_12_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_12_DIM_0_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_12_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_12_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_12_DIM_1_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_12_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_12_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_12_DIM_1_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_12_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_12_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_12_DIM_2_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_12_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_12_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_12_DIM_2_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_12_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_12_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_12_DIM_3_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_12_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_12_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_12_DIM_3_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_12_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_12_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_12_DIM_4_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_12_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_12_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_12_DIM_4_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_12_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_12_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_13_BASE_ADDR_LOW */ +#define TPC0_CFG_KERNEL_TENSOR_13_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_13_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_13_BASE_ADDR_HIGH */ +#define TPC0_CFG_KERNEL_TENSOR_13_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_13_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_13_PADDING_VALUE */ +#define TPC0_CFG_KERNEL_TENSOR_13_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_13_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG */ +#define TPC0_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG_DATA_TYPE_MASK 0x7 +#define TPC0_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 +#define TPC0_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG_RMW_SET_SHIFT 19 +#define TPC0_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG_RMW_SET_MASK 0x80000 +#define TPC0_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG_RMW_RESERV_SHIFT 20 +#define TPC0_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000 +#define TPC0_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG_RMW_OP_SHIFT 21 +#define TPC0_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG_RMW_OP_MASK 0x600000 + +/* TPC0_CFG_KERNEL_TENSOR_13_DIM_0_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_13_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_13_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_13_DIM_0_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_13_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_13_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_13_DIM_1_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_13_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_13_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_13_DIM_1_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_13_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_13_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_13_DIM_2_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_13_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_13_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_13_DIM_2_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_13_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_13_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_13_DIM_3_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_13_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_13_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_13_DIM_3_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_13_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_13_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_13_DIM_4_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_13_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_13_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_13_DIM_4_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_13_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_13_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_14_BASE_ADDR_LOW */ +#define TPC0_CFG_KERNEL_TENSOR_14_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_14_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_14_BASE_ADDR_HIGH */ +#define TPC0_CFG_KERNEL_TENSOR_14_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_14_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_14_PADDING_VALUE */ +#define TPC0_CFG_KERNEL_TENSOR_14_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_14_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG */ +#define TPC0_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG_DATA_TYPE_MASK 0x7 +#define TPC0_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 +#define TPC0_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG_RMW_SET_SHIFT 19 +#define TPC0_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG_RMW_SET_MASK 0x80000 +#define TPC0_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG_RMW_RESERV_SHIFT 20 +#define TPC0_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000 +#define TPC0_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG_RMW_OP_SHIFT 21 +#define TPC0_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG_RMW_OP_MASK 0x600000 + +/* TPC0_CFG_KERNEL_TENSOR_14_DIM_0_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_14_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_14_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_14_DIM_0_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_14_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_14_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_14_DIM_1_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_14_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_14_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_14_DIM_1_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_14_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_14_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_14_DIM_2_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_14_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_14_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_14_DIM_2_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_14_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_14_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_14_DIM_3_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_14_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_14_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_14_DIM_3_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_14_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_14_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_14_DIM_4_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_14_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_14_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_14_DIM_4_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_14_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_14_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_15_BASE_ADDR_LOW */ +#define TPC0_CFG_KERNEL_TENSOR_15_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_15_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_15_BASE_ADDR_HIGH */ +#define TPC0_CFG_KERNEL_TENSOR_15_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_15_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_15_PADDING_VALUE */ +#define TPC0_CFG_KERNEL_TENSOR_15_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_15_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG */ +#define TPC0_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG_DATA_TYPE_MASK 0x7 +#define TPC0_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 +#define TPC0_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG_RMW_SET_SHIFT 19 +#define TPC0_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG_RMW_SET_MASK 0x80000 +#define TPC0_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG_RMW_RESERV_SHIFT 20 +#define TPC0_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000 +#define TPC0_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG_RMW_OP_SHIFT 21 +#define TPC0_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG_RMW_OP_MASK 0x600000 + +/* TPC0_CFG_KERNEL_TENSOR_15_DIM_0_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_15_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_15_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_15_DIM_0_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_15_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_15_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_15_DIM_1_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_15_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_15_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_15_DIM_1_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_15_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_15_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_15_DIM_2_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_15_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_15_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_15_DIM_2_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_15_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_15_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_15_DIM_3_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_15_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_15_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_15_DIM_3_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_15_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_15_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_15_DIM_4_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_15_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_15_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_15_DIM_4_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_15_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_15_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE */ +#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_SHIFT 0 +#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_MASK 0xFFFF +#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_RSV_SHIFT 16 +#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_RSV_MASK 0x1FFF0000 +#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_SO_OPERATION_SHIFT 29 +#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_SO_OPERATION_MASK 0xE0000000 + +/* TPC0_CFG_KERNEL_SYNC_OBJECT_ADDR */ +#define TPC0_CFG_KERNEL_SYNC_OBJECT_ADDR_V_SHIFT 0 +#define TPC0_CFG_KERNEL_SYNC_OBJECT_ADDR_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW */ +#define TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW_V_SHIFT 0 +#define TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH */ +#define TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH_V_SHIFT 0 +#define TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TID_BASE_DIM_0 */ +#define TPC0_CFG_KERNEL_TID_BASE_DIM_0_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TID_BASE_DIM_0_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TID_SIZE_DIM_0 */ +#define TPC0_CFG_KERNEL_TID_SIZE_DIM_0_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TID_SIZE_DIM_0_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TID_BASE_DIM_1 */ +#define TPC0_CFG_KERNEL_TID_BASE_DIM_1_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TID_BASE_DIM_1_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TID_SIZE_DIM_1 */ +#define TPC0_CFG_KERNEL_TID_SIZE_DIM_1_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TID_SIZE_DIM_1_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TID_BASE_DIM_2 */ +#define TPC0_CFG_KERNEL_TID_BASE_DIM_2_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TID_BASE_DIM_2_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TID_SIZE_DIM_2 */ +#define TPC0_CFG_KERNEL_TID_SIZE_DIM_2_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TID_SIZE_DIM_2_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TID_BASE_DIM_3 */ +#define TPC0_CFG_KERNEL_TID_BASE_DIM_3_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TID_BASE_DIM_3_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TID_SIZE_DIM_3 */ +#define TPC0_CFG_KERNEL_TID_SIZE_DIM_3_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TID_SIZE_DIM_3_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TID_BASE_DIM_4 */ +#define TPC0_CFG_KERNEL_TID_BASE_DIM_4_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TID_BASE_DIM_4_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TID_SIZE_DIM_4 */ +#define TPC0_CFG_KERNEL_TID_SIZE_DIM_4_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TID_SIZE_DIM_4_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_KERNEL_CONFIG */ +#define TPC0_CFG_KERNEL_KERNEL_CONFIG_SMALL_VLM_SHIFT 0 +#define TPC0_CFG_KERNEL_KERNEL_CONFIG_SMALL_VLM_MASK 0x1 +#define TPC0_CFG_KERNEL_KERNEL_CONFIG_ASO_EVICT_L0_SHIFT 1 +#define TPC0_CFG_KERNEL_KERNEL_CONFIG_ASO_EVICT_L0_MASK 0x2 +#define TPC0_CFG_KERNEL_KERNEL_CONFIG_NUM_VALID_SRFS_SHIFT 2 +#define TPC0_CFG_KERNEL_KERNEL_CONFIG_NUM_VALID_SRFS_MASK 0xFC +#define TPC0_CFG_KERNEL_KERNEL_CONFIG_RD_RATE_LIMIT_RST_TOKEN_SHIFT 8 +#define TPC0_CFG_KERNEL_KERNEL_CONFIG_RD_RATE_LIMIT_RST_TOKEN_MASK 0xFF00 +#define TPC0_CFG_KERNEL_KERNEL_CONFIG_WR_RATE_LIMIT_RST_TOKEN_SHIFT 16 +#define TPC0_CFG_KERNEL_KERNEL_CONFIG_WR_RATE_LIMIT_RST_TOKEN_MASK 0xFF0000 + +/* TPC0_CFG_KERNEL_KERNEL_ID */ +#define TPC0_CFG_KERNEL_KERNEL_ID_V_SHIFT 0 +#define TPC0_CFG_KERNEL_KERNEL_ID_V_MASK 0xFFFF + +/* TPC0_CFG_KERNEL_SRF */ +#define TPC0_CFG_KERNEL_SRF_V_SHIFT 0 +#define TPC0_CFG_KERNEL_SRF_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_ROUND_CSR */ +#define TPC0_CFG_ROUND_CSR_MODE_SHIFT 0 +#define TPC0_CFG_ROUND_CSR_MODE_MASK 0x7 + +/* TPC0_CFG_PROT */ +#define TPC0_CFG_PROT_AWPROT_SHIFT 0 +#define TPC0_CFG_PROT_AWPROT_MASK 0x7 +#define TPC0_CFG_PROT_ARPROT_SHIFT 3 +#define TPC0_CFG_PROT_ARPROT_MASK 0x38 + +/* TPC0_CFG_SEMAPHORE */ +#define TPC0_CFG_SEMAPHORE_V_SHIFT 0 +#define TPC0_CFG_SEMAPHORE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_VFLAGS */ +#define TPC0_CFG_VFLAGS_V_SHIFT 0 +#define TPC0_CFG_VFLAGS_V_MASK 0xF + +/* TPC0_CFG_SFLAGS */ +#define TPC0_CFG_SFLAGS_V_SHIFT 0 +#define TPC0_CFG_SFLAGS_V_MASK 0xF + +/* TPC0_CFG_LFSR_POLYNOM */ +#define TPC0_CFG_LFSR_POLYNOM_V_SHIFT 0 +#define TPC0_CFG_LFSR_POLYNOM_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_STATUS */ +#define TPC0_CFG_STATUS_SCALAR_PIPE_EMPTY_SHIFT 1 +#define TPC0_CFG_STATUS_SCALAR_PIPE_EMPTY_MASK 0x2 +#define TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_SHIFT 2 +#define TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_MASK 0x4 +#define TPC0_CFG_STATUS_IQ_EMPTY_SHIFT 3 +#define TPC0_CFG_STATUS_IQ_EMPTY_MASK 0x8 +#define TPC0_CFG_STATUS_SB_EMPTY_SHIFT 5 +#define TPC0_CFG_STATUS_SB_EMPTY_MASK 0x20 +#define TPC0_CFG_STATUS_QM_IDLE_SHIFT 6 +#define TPC0_CFG_STATUS_QM_IDLE_MASK 0x40 +#define TPC0_CFG_STATUS_QM_RDY_SHIFT 7 +#define TPC0_CFG_STATUS_QM_RDY_MASK 0x80 + +/* TPC0_CFG_CFG_BASE_ADDRESS_HIGH */ +#define TPC0_CFG_CFG_BASE_ADDRESS_HIGH_V_SHIFT 0 +#define TPC0_CFG_CFG_BASE_ADDRESS_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_CFG_SUBTRACT_VALUE */ +#define TPC0_CFG_CFG_SUBTRACT_VALUE_V_SHIFT 0 +#define TPC0_CFG_CFG_SUBTRACT_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_SM_BASE_ADDRESS_HIGH */ +#define TPC0_CFG_SM_BASE_ADDRESS_HIGH_V_SHIFT 0 +#define TPC0_CFG_SM_BASE_ADDRESS_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_TPC_CMD */ +#define TPC0_CFG_TPC_CMD_ICACHE_INVALIDATE_SHIFT 0 +#define TPC0_CFG_TPC_CMD_ICACHE_INVALIDATE_MASK 0x1 +#define TPC0_CFG_TPC_CMD_DCACHE_INVALIDATE_SHIFT 1 +#define TPC0_CFG_TPC_CMD_DCACHE_INVALIDATE_MASK 0x2 +#define TPC0_CFG_TPC_CMD_LCACHE_INVALIDATE_SHIFT 2 +#define TPC0_CFG_TPC_CMD_LCACHE_INVALIDATE_MASK 0x4 +#define TPC0_CFG_TPC_CMD_TCACHE_INVALIDATE_SHIFT 3 +#define TPC0_CFG_TPC_CMD_TCACHE_INVALIDATE_MASK 0x8 +#define TPC0_CFG_TPC_CMD_ICACHE_PREFETCH_64KB_SHIFT 4 +#define TPC0_CFG_TPC_CMD_ICACHE_PREFETCH_64KB_MASK 0x10 +#define TPC0_CFG_TPC_CMD_ICACHE_PREFETCH_32KB_SHIFT 5 +#define TPC0_CFG_TPC_CMD_ICACHE_PREFETCH_32KB_MASK 0x20 +#define TPC0_CFG_TPC_CMD_QMAN_STOP_SHIFT 6 +#define TPC0_CFG_TPC_CMD_QMAN_STOP_MASK 0x40 + +/* TPC0_CFG_TPC_EXECUTE */ +#define TPC0_CFG_TPC_EXECUTE_V_SHIFT 0 +#define TPC0_CFG_TPC_EXECUTE_V_MASK 0x1 + +/* TPC0_CFG_TPC_STALL */ +#define TPC0_CFG_TPC_STALL_V_SHIFT 0 +#define TPC0_CFG_TPC_STALL_V_MASK 0x1 + +/* TPC0_CFG_ICACHE_BASE_ADDERESS_LOW */ +#define TPC0_CFG_ICACHE_BASE_ADDERESS_LOW_V_SHIFT 0 +#define TPC0_CFG_ICACHE_BASE_ADDERESS_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_ICACHE_BASE_ADDERESS_HIGH */ +#define TPC0_CFG_ICACHE_BASE_ADDERESS_HIGH_V_SHIFT 0 +#define TPC0_CFG_ICACHE_BASE_ADDERESS_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_RD_RATE_LIMIT */ +#define TPC0_CFG_RD_RATE_LIMIT_ENABLE_SHIFT 0 +#define TPC0_CFG_RD_RATE_LIMIT_ENABLE_MASK 0x1 +#define TPC0_CFG_RD_RATE_LIMIT_SATURATION_SHIFT 1 +#define TPC0_CFG_RD_RATE_LIMIT_SATURATION_MASK 0x1FE +#define TPC0_CFG_RD_RATE_LIMIT_TIMEOUT_SHIFT 9 +#define TPC0_CFG_RD_RATE_LIMIT_TIMEOUT_MASK 0x1FE00 + +/* TPC0_CFG_WR_RATE_LIMIT */ +#define TPC0_CFG_WR_RATE_LIMIT_ENABLE_SHIFT 0 +#define TPC0_CFG_WR_RATE_LIMIT_ENABLE_MASK 0x1 +#define TPC0_CFG_WR_RATE_LIMIT_SATURATION_SHIFT 1 +#define TPC0_CFG_WR_RATE_LIMIT_SATURATION_MASK 0x1FE +#define TPC0_CFG_WR_RATE_LIMIT_TIMEOUT_SHIFT 9 +#define TPC0_CFG_WR_RATE_LIMIT_TIMEOUT_MASK 0x1FE00 + +/* TPC0_CFG_MSS_CONFIG */ +#define TPC0_CFG_MSS_CONFIG_AWCACHE_SHIFT 0 +#define TPC0_CFG_MSS_CONFIG_AWCACHE_MASK 0xF +#define TPC0_CFG_MSS_CONFIG_ARCACHE_SHIFT 4 +#define TPC0_CFG_MSS_CONFIG_ARCACHE_MASK 0xF0 +#define TPC0_CFG_MSS_CONFIG_ICACHE_FETCH_LINE_NUM_SHIFT 8 +#define TPC0_CFG_MSS_CONFIG_ICACHE_FETCH_LINE_NUM_MASK 0x300 +#define TPC0_CFG_MSS_CONFIG_EXPOSED_PIPE_DIS_SHIFT 10 +#define TPC0_CFG_MSS_CONFIG_EXPOSED_PIPE_DIS_MASK 0x400 +#define TPC0_CFG_MSS_CONFIG_DCACHE_PREFETCH_DIS_SHIFT 11 +#define TPC0_CFG_MSS_CONFIG_DCACHE_PREFETCH_DIS_MASK 0x800 + +/* TPC0_CFG_TPC_INTR_CAUSE */ +#define TPC0_CFG_TPC_INTR_CAUSE_CAUSE_SHIFT 0 +#define TPC0_CFG_TPC_INTR_CAUSE_CAUSE_MASK 0xFFFFF + +/* TPC0_CFG_TPC_INTR_MASK */ +#define TPC0_CFG_TPC_INTR_MASK_MASK_SHIFT 0 +#define TPC0_CFG_TPC_INTR_MASK_MASK_MASK 0xFFFFF + +/* TPC0_CFG_WQ_CREDITS */ +#define TPC0_CFG_WQ_CREDITS_ST_G_SHIFT 0 +#define TPC0_CFG_WQ_CREDITS_ST_G_MASK 0xF +#define TPC0_CFG_WQ_CREDITS_KERNEL_FIFO_SHIFT 4 +#define TPC0_CFG_WQ_CREDITS_KERNEL_FIFO_MASK 0x70 + +/* TPC0_CFG_ARUSER_LO */ +#define TPC0_CFG_ARUSER_LO_V_SHIFT 0 +#define TPC0_CFG_ARUSER_LO_V_MASK 0x7FF + +/* TPC0_CFG_ARUSER_HI */ +#define TPC0_CFG_ARUSER_HI_V_SHIFT 11 +#define TPC0_CFG_ARUSER_HI_V_MASK 0x1800 +#define TPC0_CFG_ARUSER_HI_RSRV_SHIFT 13 +#define TPC0_CFG_ARUSER_HI_RSRV_MASK 0xFFFFE000 + +/* TPC0_CFG_AWUSER_LO */ +#define TPC0_CFG_AWUSER_LO_V_SHIFT 0 +#define TPC0_CFG_AWUSER_LO_V_MASK 0x7FF + +/* TPC0_CFG_AWUSER_HI */ +#define TPC0_CFG_AWUSER_HI_V_SHIFT 11 +#define TPC0_CFG_AWUSER_HI_V_MASK 0x1800 +#define TPC0_CFG_AWUSER_HI_RSRV_SHIFT 13 +#define TPC0_CFG_AWUSER_HI_RSRV_MASK 0xFFFFE000 + +/* TPC0_CFG_OPCODE_EXEC */ +#define TPC0_CFG_OPCODE_EXEC_SPU_OP_SHIFT 0 +#define TPC0_CFG_OPCODE_EXEC_SPU_OP_MASK 0x7F +#define TPC0_CFG_OPCODE_EXEC_SPU_EN_SHIFT 7 +#define TPC0_CFG_OPCODE_EXEC_SPU_EN_MASK 0x80 +#define TPC0_CFG_OPCODE_EXEC_VPU_OP_SHIFT 8 +#define TPC0_CFG_OPCODE_EXEC_VPU_OP_MASK 0x7F00 +#define TPC0_CFG_OPCODE_EXEC_VPU_EN_SHIFT 15 +#define TPC0_CFG_OPCODE_EXEC_VPU_EN_MASK 0x8000 +#define TPC0_CFG_OPCODE_EXEC_LD_OP_SHIFT 16 +#define TPC0_CFG_OPCODE_EXEC_LD_OP_MASK 0x7F0000 +#define TPC0_CFG_OPCODE_EXEC_LD_EN_SHIFT 23 +#define TPC0_CFG_OPCODE_EXEC_LD_EN_MASK 0x800000 +#define TPC0_CFG_OPCODE_EXEC_ST_OP_SHIFT 24 +#define TPC0_CFG_OPCODE_EXEC_ST_OP_MASK 0x7F000000 +#define TPC0_CFG_OPCODE_EXEC_ST_EN_SHIFT 31 +#define TPC0_CFG_OPCODE_EXEC_ST_EN_MASK 0x80000000 + +/* TPC0_CFG_LUT_FUNC32_BASE_ADDR_LO */ +#define TPC0_CFG_LUT_FUNC32_BASE_ADDR_LO_V_SHIFT 0 +#define TPC0_CFG_LUT_FUNC32_BASE_ADDR_LO_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_LUT_FUNC32_BASE_ADDR_HI */ +#define TPC0_CFG_LUT_FUNC32_BASE_ADDR_HI_V_SHIFT 0 +#define TPC0_CFG_LUT_FUNC32_BASE_ADDR_HI_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_LUT_FUNC64_BASE_ADDR_LO */ +#define TPC0_CFG_LUT_FUNC64_BASE_ADDR_LO_V_SHIFT 0 +#define TPC0_CFG_LUT_FUNC64_BASE_ADDR_LO_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_LUT_FUNC64_BASE_ADDR_HI */ +#define TPC0_CFG_LUT_FUNC64_BASE_ADDR_HI_V_SHIFT 0 +#define TPC0_CFG_LUT_FUNC64_BASE_ADDR_HI_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_LUT_FUNC128_BASE_ADDR_LO */ +#define TPC0_CFG_LUT_FUNC128_BASE_ADDR_LO_V_SHIFT 0 +#define TPC0_CFG_LUT_FUNC128_BASE_ADDR_LO_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_LUT_FUNC128_BASE_ADDR_HI */ +#define TPC0_CFG_LUT_FUNC128_BASE_ADDR_HI_V_SHIFT 0 +#define TPC0_CFG_LUT_FUNC128_BASE_ADDR_HI_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_LUT_FUNC256_BASE_ADDR_LO */ +#define TPC0_CFG_LUT_FUNC256_BASE_ADDR_LO_V_SHIFT 0 +#define TPC0_CFG_LUT_FUNC256_BASE_ADDR_LO_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_LUT_FUNC256_BASE_ADDR_HI */ +#define TPC0_CFG_LUT_FUNC256_BASE_ADDR_HI_V_SHIFT 0 +#define TPC0_CFG_LUT_FUNC256_BASE_ADDR_HI_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_TSB_CFG_MAX_SIZE */ +#define TPC0_CFG_TSB_CFG_MAX_SIZE_DATA_SHIFT 0 +#define TPC0_CFG_TSB_CFG_MAX_SIZE_DATA_MASK 0xFFFF +#define TPC0_CFG_TSB_CFG_MAX_SIZE_MD_SHIFT 16 +#define TPC0_CFG_TSB_CFG_MAX_SIZE_MD_MASK 0xFFFF0000 + +/* TPC0_CFG_TSB_CFG */ +#define TPC0_CFG_TSB_CFG_FORCE_MISS_SHIFT 0 +#define TPC0_CFG_TSB_CFG_FORCE_MISS_MASK 0x1 +#define TPC0_CFG_TSB_CFG_MAX_OS_SHIFT 1 +#define TPC0_CFG_TSB_CFG_MAX_OS_MASK 0x1FFFE + +/* TPC0_CFG_DBGMEM_ADD */ +#define TPC0_CFG_DBGMEM_ADD_V_SHIFT 0 +#define TPC0_CFG_DBGMEM_ADD_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_DBGMEM_DATA_WR */ +#define TPC0_CFG_DBGMEM_DATA_WR_V_SHIFT 0 +#define TPC0_CFG_DBGMEM_DATA_WR_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_DBGMEM_DATA_RD */ +#define TPC0_CFG_DBGMEM_DATA_RD_V_SHIFT 0 +#define TPC0_CFG_DBGMEM_DATA_RD_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_DBGMEM_CTRL */ +#define TPC0_CFG_DBGMEM_CTRL_WR_NRD_SHIFT 0 +#define TPC0_CFG_DBGMEM_CTRL_WR_NRD_MASK 0x1 + +/* TPC0_CFG_DBGMEM_RC */ +#define TPC0_CFG_DBGMEM_RC_VALID_SHIFT 0 +#define TPC0_CFG_DBGMEM_RC_VALID_MASK 0x1 + +/* TPC0_CFG_TSB_INFLIGHT_CNTR */ +#define TPC0_CFG_TSB_INFLIGHT_CNTR_V_SHIFT 0 +#define TPC0_CFG_TSB_INFLIGHT_CNTR_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_WQ_INFLIGHT_CNTR */ +#define TPC0_CFG_WQ_INFLIGHT_CNTR_HBW_SHIFT 0 +#define TPC0_CFG_WQ_INFLIGHT_CNTR_HBW_MASK 0xFFFF +#define TPC0_CFG_WQ_INFLIGHT_CNTR_LBW_SHIFT 16 +#define TPC0_CFG_WQ_INFLIGHT_CNTR_LBW_MASK 0xF0000 + +/* TPC0_CFG_WQ_LBW_TOTAL_CNTR */ +#define TPC0_CFG_WQ_LBW_TOTAL_CNTR_V_SHIFT 0 +#define TPC0_CFG_WQ_LBW_TOTAL_CNTR_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_WQ_HBW_TOTAL_CNTR */ +#define TPC0_CFG_WQ_HBW_TOTAL_CNTR_V_SHIFT 0 +#define TPC0_CFG_WQ_HBW_TOTAL_CNTR_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_IRQ_OCCOUPY_CNTR */ +#define TPC0_CFG_IRQ_OCCOUPY_CNTR_V_SHIFT 0 +#define TPC0_CFG_IRQ_OCCOUPY_CNTR_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_FUNC_MBIST_CNTRL */ +#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_START_SHIFT 0 +#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_START_MASK 0x1 +#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_DONE_SHIFT 1 +#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_DONE_MASK 0x2 +#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_ACTIVE_SHIFT 2 +#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_ACTIVE_MASK 0x4 +#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_FAILED_SHIFT 16 +#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_FAILED_MASK 0x3FF0000 + +/* TPC0_CFG_FUNC_MBIST_PAT */ +#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN0_EVEN_SHIFT 0 +#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN0_EVEN_MASK 0x3 +#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN0_ODD_SHIFT 2 +#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN0_ODD_MASK 0xC +#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN1_EVEN_SHIFT 4 +#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN1_EVEN_MASK 0x30 +#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN1_ODD_SHIFT 6 +#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN1_ODD_MASK 0xC0 +#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN2_EVEN_SHIFT 8 +#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN2_EVEN_MASK 0x300 +#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN2_ODD_SHIFT 10 +#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN2_ODD_MASK 0xC00 + +/* TPC0_CFG_FUNC_MBIST_MEM */ +#define TPC0_CFG_FUNC_MBIST_MEM_MAX_ADDR_SHIFT 0 +#define TPC0_CFG_FUNC_MBIST_MEM_MAX_ADDR_MASK 0x7FF +#define TPC0_CFG_FUNC_MBIST_MEM_PATTERN_EN_SHIFT 12 +#define TPC0_CFG_FUNC_MBIST_MEM_PATTERN_EN_MASK 0x7000 +#define TPC0_CFG_FUNC_MBIST_MEM_LAST_FAILED_ADDR_SHIFT 16 +#define TPC0_CFG_FUNC_MBIST_MEM_LAST_FAILED_ADDR_MASK 0x7FF0000 +#define TPC0_CFG_FUNC_MBIST_MEM_LAST_FAILED_PATTERN_SHIFT 28 +#define TPC0_CFG_FUNC_MBIST_MEM_LAST_FAILED_PATTERN_MASK 0x70000000 + +/* TPC0_CFG_QM_TENSOR_0_BASE_ADDR_LOW */ +#define TPC0_CFG_QM_TENSOR_0_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_BASE_ADDR_HIGH */ +#define TPC0_CFG_QM_TENSOR_0_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_PADDING_VALUE */ +#define TPC0_CFG_QM_TENSOR_0_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG */ +#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_DATA_TYPE_MASK 0x7 +#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 +#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_RMW_SET_SHIFT 19 +#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_RMW_SET_MASK 0x80000 +#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_RMW_RESERV_SHIFT 20 +#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000 +#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_RMW_OP_SHIFT 21 +#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_RMW_OP_MASK 0x600000 + +/* TPC0_CFG_QM_TENSOR_0_DIM_0_SIZE */ +#define TPC0_CFG_QM_TENSOR_0_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_DIM_0_STRIDE */ +#define TPC0_CFG_QM_TENSOR_0_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_DIM_1_SIZE */ +#define TPC0_CFG_QM_TENSOR_0_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_DIM_1_STRIDE */ +#define TPC0_CFG_QM_TENSOR_0_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_DIM_2_SIZE */ +#define TPC0_CFG_QM_TENSOR_0_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_DIM_2_STRIDE */ +#define TPC0_CFG_QM_TENSOR_0_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_DIM_3_SIZE */ +#define TPC0_CFG_QM_TENSOR_0_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_DIM_3_STRIDE */ +#define TPC0_CFG_QM_TENSOR_0_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_DIM_4_SIZE */ +#define TPC0_CFG_QM_TENSOR_0_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_DIM_4_STRIDE */ +#define TPC0_CFG_QM_TENSOR_0_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_BASE_ADDR_LOW */ +#define TPC0_CFG_QM_TENSOR_1_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_BASE_ADDR_HIGH */ +#define TPC0_CFG_QM_TENSOR_1_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_PADDING_VALUE */ +#define TPC0_CFG_QM_TENSOR_1_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG */ +#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_DATA_TYPE_MASK 0x7 +#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 +#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_RMW_SET_SHIFT 19 +#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_RMW_SET_MASK 0x80000 +#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_RMW_RESERV_SHIFT 20 +#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000 +#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_RMW_OP_SHIFT 21 +#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_RMW_OP_MASK 0x600000 + +/* TPC0_CFG_QM_TENSOR_1_DIM_0_SIZE */ +#define TPC0_CFG_QM_TENSOR_1_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_DIM_0_STRIDE */ +#define TPC0_CFG_QM_TENSOR_1_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_DIM_1_SIZE */ +#define TPC0_CFG_QM_TENSOR_1_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_DIM_1_STRIDE */ +#define TPC0_CFG_QM_TENSOR_1_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_DIM_2_SIZE */ +#define TPC0_CFG_QM_TENSOR_1_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_DIM_2_STRIDE */ +#define TPC0_CFG_QM_TENSOR_1_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_DIM_3_SIZE */ +#define TPC0_CFG_QM_TENSOR_1_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_DIM_3_STRIDE */ +#define TPC0_CFG_QM_TENSOR_1_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_DIM_4_SIZE */ +#define TPC0_CFG_QM_TENSOR_1_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_DIM_4_STRIDE */ +#define TPC0_CFG_QM_TENSOR_1_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_BASE_ADDR_LOW */ +#define TPC0_CFG_QM_TENSOR_2_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_BASE_ADDR_HIGH */ +#define TPC0_CFG_QM_TENSOR_2_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_PADDING_VALUE */ +#define TPC0_CFG_QM_TENSOR_2_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG */ +#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_DATA_TYPE_MASK 0x7 +#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 +#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_RMW_SET_SHIFT 19 +#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_RMW_SET_MASK 0x80000 +#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_RMW_RESERV_SHIFT 20 +#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000 +#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_RMW_OP_SHIFT 21 +#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_RMW_OP_MASK 0x600000 + +/* TPC0_CFG_QM_TENSOR_2_DIM_0_SIZE */ +#define TPC0_CFG_QM_TENSOR_2_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_DIM_0_STRIDE */ +#define TPC0_CFG_QM_TENSOR_2_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_DIM_1_SIZE */ +#define TPC0_CFG_QM_TENSOR_2_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_DIM_1_STRIDE */ +#define TPC0_CFG_QM_TENSOR_2_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_DIM_2_SIZE */ +#define TPC0_CFG_QM_TENSOR_2_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_DIM_2_STRIDE */ +#define TPC0_CFG_QM_TENSOR_2_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_DIM_3_SIZE */ +#define TPC0_CFG_QM_TENSOR_2_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_DIM_3_STRIDE */ +#define TPC0_CFG_QM_TENSOR_2_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_DIM_4_SIZE */ +#define TPC0_CFG_QM_TENSOR_2_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_DIM_4_STRIDE */ +#define TPC0_CFG_QM_TENSOR_2_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_BASE_ADDR_LOW */ +#define TPC0_CFG_QM_TENSOR_3_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_BASE_ADDR_HIGH */ +#define TPC0_CFG_QM_TENSOR_3_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_PADDING_VALUE */ +#define TPC0_CFG_QM_TENSOR_3_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG */ +#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_DATA_TYPE_MASK 0x7 +#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 +#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_RMW_SET_SHIFT 19 +#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_RMW_SET_MASK 0x80000 +#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_RMW_RESERV_SHIFT 20 +#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000 +#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_RMW_OP_SHIFT 21 +#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_RMW_OP_MASK 0x600000 + +/* TPC0_CFG_QM_TENSOR_3_DIM_0_SIZE */ +#define TPC0_CFG_QM_TENSOR_3_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_DIM_0_STRIDE */ +#define TPC0_CFG_QM_TENSOR_3_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_DIM_1_SIZE */ +#define TPC0_CFG_QM_TENSOR_3_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_DIM_1_STRIDE */ +#define TPC0_CFG_QM_TENSOR_3_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_DIM_2_SIZE */ +#define TPC0_CFG_QM_TENSOR_3_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_DIM_2_STRIDE */ +#define TPC0_CFG_QM_TENSOR_3_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_DIM_3_SIZE */ +#define TPC0_CFG_QM_TENSOR_3_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_DIM_3_STRIDE */ +#define TPC0_CFG_QM_TENSOR_3_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_DIM_4_SIZE */ +#define TPC0_CFG_QM_TENSOR_3_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_DIM_4_STRIDE */ +#define TPC0_CFG_QM_TENSOR_3_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_BASE_ADDR_LOW */ +#define TPC0_CFG_QM_TENSOR_4_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_BASE_ADDR_HIGH */ +#define TPC0_CFG_QM_TENSOR_4_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_PADDING_VALUE */ +#define TPC0_CFG_QM_TENSOR_4_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG */ +#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_DATA_TYPE_MASK 0x7 +#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 +#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_RMW_SET_SHIFT 19 +#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_RMW_SET_MASK 0x80000 +#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_RMW_RESERV_SHIFT 20 +#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000 +#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_RMW_OP_SHIFT 21 +#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_RMW_OP_MASK 0x600000 + +/* TPC0_CFG_QM_TENSOR_4_DIM_0_SIZE */ +#define TPC0_CFG_QM_TENSOR_4_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_DIM_0_STRIDE */ +#define TPC0_CFG_QM_TENSOR_4_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_DIM_1_SIZE */ +#define TPC0_CFG_QM_TENSOR_4_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_DIM_1_STRIDE */ +#define TPC0_CFG_QM_TENSOR_4_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_DIM_2_SIZE */ +#define TPC0_CFG_QM_TENSOR_4_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_DIM_2_STRIDE */ +#define TPC0_CFG_QM_TENSOR_4_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_DIM_3_SIZE */ +#define TPC0_CFG_QM_TENSOR_4_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_DIM_3_STRIDE */ +#define TPC0_CFG_QM_TENSOR_4_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_DIM_4_SIZE */ +#define TPC0_CFG_QM_TENSOR_4_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_DIM_4_STRIDE */ +#define TPC0_CFG_QM_TENSOR_4_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_BASE_ADDR_LOW */ +#define TPC0_CFG_QM_TENSOR_5_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_BASE_ADDR_HIGH */ +#define TPC0_CFG_QM_TENSOR_5_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_PADDING_VALUE */ +#define TPC0_CFG_QM_TENSOR_5_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG */ +#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_DATA_TYPE_MASK 0x7 +#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 +#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_RMW_SET_SHIFT 19 +#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_RMW_SET_MASK 0x80000 +#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_RMW_RESERV_SHIFT 20 +#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000 +#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_RMW_OP_SHIFT 21 +#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_RMW_OP_MASK 0x600000 + +/* TPC0_CFG_QM_TENSOR_5_DIM_0_SIZE */ +#define TPC0_CFG_QM_TENSOR_5_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_DIM_0_STRIDE */ +#define TPC0_CFG_QM_TENSOR_5_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_DIM_1_SIZE */ +#define TPC0_CFG_QM_TENSOR_5_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_DIM_1_STRIDE */ +#define TPC0_CFG_QM_TENSOR_5_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_DIM_2_SIZE */ +#define TPC0_CFG_QM_TENSOR_5_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_DIM_2_STRIDE */ +#define TPC0_CFG_QM_TENSOR_5_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_DIM_3_SIZE */ +#define TPC0_CFG_QM_TENSOR_5_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_DIM_3_STRIDE */ +#define TPC0_CFG_QM_TENSOR_5_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_DIM_4_SIZE */ +#define TPC0_CFG_QM_TENSOR_5_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_DIM_4_STRIDE */ +#define TPC0_CFG_QM_TENSOR_5_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_BASE_ADDR_LOW */ +#define TPC0_CFG_QM_TENSOR_6_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_BASE_ADDR_HIGH */ +#define TPC0_CFG_QM_TENSOR_6_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_PADDING_VALUE */ +#define TPC0_CFG_QM_TENSOR_6_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG */ +#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_DATA_TYPE_MASK 0x7 +#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 +#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_RMW_SET_SHIFT 19 +#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_RMW_SET_MASK 0x80000 +#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_RMW_RESERV_SHIFT 20 +#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000 +#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_RMW_OP_SHIFT 21 +#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_RMW_OP_MASK 0x600000 + +/* TPC0_CFG_QM_TENSOR_6_DIM_0_SIZE */ +#define TPC0_CFG_QM_TENSOR_6_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_DIM_0_STRIDE */ +#define TPC0_CFG_QM_TENSOR_6_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_DIM_1_SIZE */ +#define TPC0_CFG_QM_TENSOR_6_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_DIM_1_STRIDE */ +#define TPC0_CFG_QM_TENSOR_6_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_DIM_2_SIZE */ +#define TPC0_CFG_QM_TENSOR_6_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_DIM_2_STRIDE */ +#define TPC0_CFG_QM_TENSOR_6_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_DIM_3_SIZE */ +#define TPC0_CFG_QM_TENSOR_6_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_DIM_3_STRIDE */ +#define TPC0_CFG_QM_TENSOR_6_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_DIM_4_SIZE */ +#define TPC0_CFG_QM_TENSOR_6_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_DIM_4_STRIDE */ +#define TPC0_CFG_QM_TENSOR_6_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_BASE_ADDR_LOW */ +#define TPC0_CFG_QM_TENSOR_7_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_BASE_ADDR_HIGH */ +#define TPC0_CFG_QM_TENSOR_7_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_PADDING_VALUE */ +#define TPC0_CFG_QM_TENSOR_7_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG */ +#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_DATA_TYPE_MASK 0x7 +#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 +#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_RMW_SET_SHIFT 19 +#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_RMW_SET_MASK 0x80000 +#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_RMW_RESERV_SHIFT 20 +#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000 +#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_RMW_OP_SHIFT 21 +#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_RMW_OP_MASK 0x600000 + +/* TPC0_CFG_QM_TENSOR_7_DIM_0_SIZE */ +#define TPC0_CFG_QM_TENSOR_7_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_DIM_0_STRIDE */ +#define TPC0_CFG_QM_TENSOR_7_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_DIM_1_SIZE */ +#define TPC0_CFG_QM_TENSOR_7_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_DIM_1_STRIDE */ +#define TPC0_CFG_QM_TENSOR_7_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_DIM_2_SIZE */ +#define TPC0_CFG_QM_TENSOR_7_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_DIM_2_STRIDE */ +#define TPC0_CFG_QM_TENSOR_7_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_DIM_3_SIZE */ +#define TPC0_CFG_QM_TENSOR_7_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_DIM_3_STRIDE */ +#define TPC0_CFG_QM_TENSOR_7_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_DIM_4_SIZE */ +#define TPC0_CFG_QM_TENSOR_7_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_DIM_4_STRIDE */ +#define TPC0_CFG_QM_TENSOR_7_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_8_BASE_ADDR_LOW */ +#define TPC0_CFG_QM_TENSOR_8_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_8_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_8_BASE_ADDR_HIGH */ +#define TPC0_CFG_QM_TENSOR_8_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_8_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_8_PADDING_VALUE */ +#define TPC0_CFG_QM_TENSOR_8_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_8_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_8_TENSOR_CONFIG */ +#define TPC0_CFG_QM_TENSOR_8_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_8_TENSOR_CONFIG_DATA_TYPE_MASK 0x7 +#define TPC0_CFG_QM_TENSOR_8_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_QM_TENSOR_8_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_QM_TENSOR_8_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_QM_TENSOR_8_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 +#define TPC0_CFG_QM_TENSOR_8_TENSOR_CONFIG_RMW_SET_SHIFT 19 +#define TPC0_CFG_QM_TENSOR_8_TENSOR_CONFIG_RMW_SET_MASK 0x80000 +#define TPC0_CFG_QM_TENSOR_8_TENSOR_CONFIG_RMW_RESERV_SHIFT 20 +#define TPC0_CFG_QM_TENSOR_8_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000 +#define TPC0_CFG_QM_TENSOR_8_TENSOR_CONFIG_RMW_OP_SHIFT 21 +#define TPC0_CFG_QM_TENSOR_8_TENSOR_CONFIG_RMW_OP_MASK 0x600000 + +/* TPC0_CFG_QM_TENSOR_8_DIM_0_SIZE */ +#define TPC0_CFG_QM_TENSOR_8_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_8_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_8_DIM_0_STRIDE */ +#define TPC0_CFG_QM_TENSOR_8_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_8_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_8_DIM_1_SIZE */ +#define TPC0_CFG_QM_TENSOR_8_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_8_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_8_DIM_1_STRIDE */ +#define TPC0_CFG_QM_TENSOR_8_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_8_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_8_DIM_2_SIZE */ +#define TPC0_CFG_QM_TENSOR_8_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_8_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_8_DIM_2_STRIDE */ +#define TPC0_CFG_QM_TENSOR_8_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_8_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_8_DIM_3_SIZE */ +#define TPC0_CFG_QM_TENSOR_8_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_8_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_8_DIM_3_STRIDE */ +#define TPC0_CFG_QM_TENSOR_8_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_8_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_8_DIM_4_SIZE */ +#define TPC0_CFG_QM_TENSOR_8_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_8_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_8_DIM_4_STRIDE */ +#define TPC0_CFG_QM_TENSOR_8_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_8_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_9_BASE_ADDR_LOW */ +#define TPC0_CFG_QM_TENSOR_9_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_9_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_9_BASE_ADDR_HIGH */ +#define TPC0_CFG_QM_TENSOR_9_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_9_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_9_PADDING_VALUE */ +#define TPC0_CFG_QM_TENSOR_9_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_9_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_9_TENSOR_CONFIG */ +#define TPC0_CFG_QM_TENSOR_9_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_9_TENSOR_CONFIG_DATA_TYPE_MASK 0x7 +#define TPC0_CFG_QM_TENSOR_9_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_QM_TENSOR_9_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_QM_TENSOR_9_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_QM_TENSOR_9_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 +#define TPC0_CFG_QM_TENSOR_9_TENSOR_CONFIG_RMW_SET_SHIFT 19 +#define TPC0_CFG_QM_TENSOR_9_TENSOR_CONFIG_RMW_SET_MASK 0x80000 +#define TPC0_CFG_QM_TENSOR_9_TENSOR_CONFIG_RMW_RESERV_SHIFT 20 +#define TPC0_CFG_QM_TENSOR_9_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000 +#define TPC0_CFG_QM_TENSOR_9_TENSOR_CONFIG_RMW_OP_SHIFT 21 +#define TPC0_CFG_QM_TENSOR_9_TENSOR_CONFIG_RMW_OP_MASK 0x600000 + +/* TPC0_CFG_QM_TENSOR_9_DIM_0_SIZE */ +#define TPC0_CFG_QM_TENSOR_9_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_9_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_9_DIM_0_STRIDE */ +#define TPC0_CFG_QM_TENSOR_9_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_9_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_9_DIM_1_SIZE */ +#define TPC0_CFG_QM_TENSOR_9_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_9_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_9_DIM_1_STRIDE */ +#define TPC0_CFG_QM_TENSOR_9_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_9_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_9_DIM_2_SIZE */ +#define TPC0_CFG_QM_TENSOR_9_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_9_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_9_DIM_2_STRIDE */ +#define TPC0_CFG_QM_TENSOR_9_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_9_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_9_DIM_3_SIZE */ +#define TPC0_CFG_QM_TENSOR_9_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_9_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_9_DIM_3_STRIDE */ +#define TPC0_CFG_QM_TENSOR_9_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_9_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_9_DIM_4_SIZE */ +#define TPC0_CFG_QM_TENSOR_9_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_9_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_9_DIM_4_STRIDE */ +#define TPC0_CFG_QM_TENSOR_9_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_9_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_10_BASE_ADDR_LOW */ +#define TPC0_CFG_QM_TENSOR_10_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_10_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_10_BASE_ADDR_HIGH */ +#define TPC0_CFG_QM_TENSOR_10_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_10_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_10_PADDING_VALUE */ +#define TPC0_CFG_QM_TENSOR_10_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_10_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_10_TENSOR_CONFIG */ +#define TPC0_CFG_QM_TENSOR_10_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_10_TENSOR_CONFIG_DATA_TYPE_MASK 0x7 +#define TPC0_CFG_QM_TENSOR_10_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_QM_TENSOR_10_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_QM_TENSOR_10_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_QM_TENSOR_10_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 +#define TPC0_CFG_QM_TENSOR_10_TENSOR_CONFIG_RMW_SET_SHIFT 19 +#define TPC0_CFG_QM_TENSOR_10_TENSOR_CONFIG_RMW_SET_MASK 0x80000 +#define TPC0_CFG_QM_TENSOR_10_TENSOR_CONFIG_RMW_RESERV_SHIFT 20 +#define TPC0_CFG_QM_TENSOR_10_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000 +#define TPC0_CFG_QM_TENSOR_10_TENSOR_CONFIG_RMW_OP_SHIFT 21 +#define TPC0_CFG_QM_TENSOR_10_TENSOR_CONFIG_RMW_OP_MASK 0x600000 + +/* TPC0_CFG_QM_TENSOR_10_DIM_0_SIZE */ +#define TPC0_CFG_QM_TENSOR_10_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_10_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_10_DIM_0_STRIDE */ +#define TPC0_CFG_QM_TENSOR_10_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_10_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_10_DIM_1_SIZE */ +#define TPC0_CFG_QM_TENSOR_10_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_10_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_10_DIM_1_STRIDE */ +#define TPC0_CFG_QM_TENSOR_10_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_10_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_10_DIM_2_SIZE */ +#define TPC0_CFG_QM_TENSOR_10_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_10_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_10_DIM_2_STRIDE */ +#define TPC0_CFG_QM_TENSOR_10_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_10_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_10_DIM_3_SIZE */ +#define TPC0_CFG_QM_TENSOR_10_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_10_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_10_DIM_3_STRIDE */ +#define TPC0_CFG_QM_TENSOR_10_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_10_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_10_DIM_4_SIZE */ +#define TPC0_CFG_QM_TENSOR_10_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_10_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_10_DIM_4_STRIDE */ +#define TPC0_CFG_QM_TENSOR_10_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_10_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_11_BASE_ADDR_LOW */ +#define TPC0_CFG_QM_TENSOR_11_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_11_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_11_BASE_ADDR_HIGH */ +#define TPC0_CFG_QM_TENSOR_11_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_11_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_11_PADDING_VALUE */ +#define TPC0_CFG_QM_TENSOR_11_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_11_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_11_TENSOR_CONFIG */ +#define TPC0_CFG_QM_TENSOR_11_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_11_TENSOR_CONFIG_DATA_TYPE_MASK 0x7 +#define TPC0_CFG_QM_TENSOR_11_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_QM_TENSOR_11_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_QM_TENSOR_11_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_QM_TENSOR_11_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 +#define TPC0_CFG_QM_TENSOR_11_TENSOR_CONFIG_RMW_SET_SHIFT 19 +#define TPC0_CFG_QM_TENSOR_11_TENSOR_CONFIG_RMW_SET_MASK 0x80000 +#define TPC0_CFG_QM_TENSOR_11_TENSOR_CONFIG_RMW_RESERV_SHIFT 20 +#define TPC0_CFG_QM_TENSOR_11_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000 +#define TPC0_CFG_QM_TENSOR_11_TENSOR_CONFIG_RMW_OP_SHIFT 21 +#define TPC0_CFG_QM_TENSOR_11_TENSOR_CONFIG_RMW_OP_MASK 0x600000 + +/* TPC0_CFG_QM_TENSOR_11_DIM_0_SIZE */ +#define TPC0_CFG_QM_TENSOR_11_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_11_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_11_DIM_0_STRIDE */ +#define TPC0_CFG_QM_TENSOR_11_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_11_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_11_DIM_1_SIZE */ +#define TPC0_CFG_QM_TENSOR_11_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_11_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_11_DIM_1_STRIDE */ +#define TPC0_CFG_QM_TENSOR_11_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_11_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_11_DIM_2_SIZE */ +#define TPC0_CFG_QM_TENSOR_11_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_11_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_11_DIM_2_STRIDE */ +#define TPC0_CFG_QM_TENSOR_11_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_11_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_11_DIM_3_SIZE */ +#define TPC0_CFG_QM_TENSOR_11_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_11_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_11_DIM_3_STRIDE */ +#define TPC0_CFG_QM_TENSOR_11_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_11_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_11_DIM_4_SIZE */ +#define TPC0_CFG_QM_TENSOR_11_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_11_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_11_DIM_4_STRIDE */ +#define TPC0_CFG_QM_TENSOR_11_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_11_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_12_BASE_ADDR_LOW */ +#define TPC0_CFG_QM_TENSOR_12_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_12_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_12_BASE_ADDR_HIGH */ +#define TPC0_CFG_QM_TENSOR_12_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_12_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_12_PADDING_VALUE */ +#define TPC0_CFG_QM_TENSOR_12_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_12_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_12_TENSOR_CONFIG */ +#define TPC0_CFG_QM_TENSOR_12_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_12_TENSOR_CONFIG_DATA_TYPE_MASK 0x7 +#define TPC0_CFG_QM_TENSOR_12_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_QM_TENSOR_12_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_QM_TENSOR_12_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_QM_TENSOR_12_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 +#define TPC0_CFG_QM_TENSOR_12_TENSOR_CONFIG_RMW_SET_SHIFT 19 +#define TPC0_CFG_QM_TENSOR_12_TENSOR_CONFIG_RMW_SET_MASK 0x80000 +#define TPC0_CFG_QM_TENSOR_12_TENSOR_CONFIG_RMW_RESERV_SHIFT 20 +#define TPC0_CFG_QM_TENSOR_12_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000 +#define TPC0_CFG_QM_TENSOR_12_TENSOR_CONFIG_RMW_OP_SHIFT 21 +#define TPC0_CFG_QM_TENSOR_12_TENSOR_CONFIG_RMW_OP_MASK 0x600000 + +/* TPC0_CFG_QM_TENSOR_12_DIM_0_SIZE */ +#define TPC0_CFG_QM_TENSOR_12_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_12_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_12_DIM_0_STRIDE */ +#define TPC0_CFG_QM_TENSOR_12_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_12_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_12_DIM_1_SIZE */ +#define TPC0_CFG_QM_TENSOR_12_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_12_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_12_DIM_1_STRIDE */ +#define TPC0_CFG_QM_TENSOR_12_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_12_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_12_DIM_2_SIZE */ +#define TPC0_CFG_QM_TENSOR_12_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_12_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_12_DIM_2_STRIDE */ +#define TPC0_CFG_QM_TENSOR_12_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_12_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_12_DIM_3_SIZE */ +#define TPC0_CFG_QM_TENSOR_12_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_12_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_12_DIM_3_STRIDE */ +#define TPC0_CFG_QM_TENSOR_12_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_12_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_12_DIM_4_SIZE */ +#define TPC0_CFG_QM_TENSOR_12_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_12_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_12_DIM_4_STRIDE */ +#define TPC0_CFG_QM_TENSOR_12_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_12_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_13_BASE_ADDR_LOW */ +#define TPC0_CFG_QM_TENSOR_13_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_13_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_13_BASE_ADDR_HIGH */ +#define TPC0_CFG_QM_TENSOR_13_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_13_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_13_PADDING_VALUE */ +#define TPC0_CFG_QM_TENSOR_13_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_13_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_13_TENSOR_CONFIG */ +#define TPC0_CFG_QM_TENSOR_13_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_13_TENSOR_CONFIG_DATA_TYPE_MASK 0x7 +#define TPC0_CFG_QM_TENSOR_13_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_QM_TENSOR_13_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_QM_TENSOR_13_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_QM_TENSOR_13_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 +#define TPC0_CFG_QM_TENSOR_13_TENSOR_CONFIG_RMW_SET_SHIFT 19 +#define TPC0_CFG_QM_TENSOR_13_TENSOR_CONFIG_RMW_SET_MASK 0x80000 +#define TPC0_CFG_QM_TENSOR_13_TENSOR_CONFIG_RMW_RESERV_SHIFT 20 +#define TPC0_CFG_QM_TENSOR_13_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000 +#define TPC0_CFG_QM_TENSOR_13_TENSOR_CONFIG_RMW_OP_SHIFT 21 +#define TPC0_CFG_QM_TENSOR_13_TENSOR_CONFIG_RMW_OP_MASK 0x600000 + +/* TPC0_CFG_QM_TENSOR_13_DIM_0_SIZE */ +#define TPC0_CFG_QM_TENSOR_13_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_13_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_13_DIM_0_STRIDE */ +#define TPC0_CFG_QM_TENSOR_13_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_13_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_13_DIM_1_SIZE */ +#define TPC0_CFG_QM_TENSOR_13_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_13_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_13_DIM_1_STRIDE */ +#define TPC0_CFG_QM_TENSOR_13_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_13_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_13_DIM_2_SIZE */ +#define TPC0_CFG_QM_TENSOR_13_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_13_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_13_DIM_2_STRIDE */ +#define TPC0_CFG_QM_TENSOR_13_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_13_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_13_DIM_3_SIZE */ +#define TPC0_CFG_QM_TENSOR_13_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_13_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_13_DIM_3_STRIDE */ +#define TPC0_CFG_QM_TENSOR_13_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_13_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_13_DIM_4_SIZE */ +#define TPC0_CFG_QM_TENSOR_13_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_13_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_13_DIM_4_STRIDE */ +#define TPC0_CFG_QM_TENSOR_13_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_13_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_14_BASE_ADDR_LOW */ +#define TPC0_CFG_QM_TENSOR_14_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_14_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_14_BASE_ADDR_HIGH */ +#define TPC0_CFG_QM_TENSOR_14_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_14_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_14_PADDING_VALUE */ +#define TPC0_CFG_QM_TENSOR_14_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_14_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_14_TENSOR_CONFIG */ +#define TPC0_CFG_QM_TENSOR_14_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_14_TENSOR_CONFIG_DATA_TYPE_MASK 0x7 +#define TPC0_CFG_QM_TENSOR_14_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_QM_TENSOR_14_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_QM_TENSOR_14_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_QM_TENSOR_14_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 +#define TPC0_CFG_QM_TENSOR_14_TENSOR_CONFIG_RMW_SET_SHIFT 19 +#define TPC0_CFG_QM_TENSOR_14_TENSOR_CONFIG_RMW_SET_MASK 0x80000 +#define TPC0_CFG_QM_TENSOR_14_TENSOR_CONFIG_RMW_RESERV_SHIFT 20 +#define TPC0_CFG_QM_TENSOR_14_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000 +#define TPC0_CFG_QM_TENSOR_14_TENSOR_CONFIG_RMW_OP_SHIFT 21 +#define TPC0_CFG_QM_TENSOR_14_TENSOR_CONFIG_RMW_OP_MASK 0x600000 + +/* TPC0_CFG_QM_TENSOR_14_DIM_0_SIZE */ +#define TPC0_CFG_QM_TENSOR_14_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_14_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_14_DIM_0_STRIDE */ +#define TPC0_CFG_QM_TENSOR_14_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_14_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_14_DIM_1_SIZE */ +#define TPC0_CFG_QM_TENSOR_14_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_14_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_14_DIM_1_STRIDE */ +#define TPC0_CFG_QM_TENSOR_14_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_14_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_14_DIM_2_SIZE */ +#define TPC0_CFG_QM_TENSOR_14_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_14_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_14_DIM_2_STRIDE */ +#define TPC0_CFG_QM_TENSOR_14_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_14_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_14_DIM_3_SIZE */ +#define TPC0_CFG_QM_TENSOR_14_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_14_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_14_DIM_3_STRIDE */ +#define TPC0_CFG_QM_TENSOR_14_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_14_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_14_DIM_4_SIZE */ +#define TPC0_CFG_QM_TENSOR_14_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_14_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_14_DIM_4_STRIDE */ +#define TPC0_CFG_QM_TENSOR_14_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_14_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_15_BASE_ADDR_LOW */ +#define TPC0_CFG_QM_TENSOR_15_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_15_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_15_BASE_ADDR_HIGH */ +#define TPC0_CFG_QM_TENSOR_15_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_15_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_15_PADDING_VALUE */ +#define TPC0_CFG_QM_TENSOR_15_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_15_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_15_TENSOR_CONFIG */ +#define TPC0_CFG_QM_TENSOR_15_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_15_TENSOR_CONFIG_DATA_TYPE_MASK 0x7 +#define TPC0_CFG_QM_TENSOR_15_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_QM_TENSOR_15_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_QM_TENSOR_15_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_QM_TENSOR_15_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 +#define TPC0_CFG_QM_TENSOR_15_TENSOR_CONFIG_RMW_SET_SHIFT 19 +#define TPC0_CFG_QM_TENSOR_15_TENSOR_CONFIG_RMW_SET_MASK 0x80000 +#define TPC0_CFG_QM_TENSOR_15_TENSOR_CONFIG_RMW_RESERV_SHIFT 20 +#define TPC0_CFG_QM_TENSOR_15_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000 +#define TPC0_CFG_QM_TENSOR_15_TENSOR_CONFIG_RMW_OP_SHIFT 21 +#define TPC0_CFG_QM_TENSOR_15_TENSOR_CONFIG_RMW_OP_MASK 0x600000 + +/* TPC0_CFG_QM_TENSOR_15_DIM_0_SIZE */ +#define TPC0_CFG_QM_TENSOR_15_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_15_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_15_DIM_0_STRIDE */ +#define TPC0_CFG_QM_TENSOR_15_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_15_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_15_DIM_1_SIZE */ +#define TPC0_CFG_QM_TENSOR_15_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_15_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_15_DIM_1_STRIDE */ +#define TPC0_CFG_QM_TENSOR_15_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_15_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_15_DIM_2_SIZE */ +#define TPC0_CFG_QM_TENSOR_15_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_15_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_15_DIM_2_STRIDE */ +#define TPC0_CFG_QM_TENSOR_15_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_15_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_15_DIM_3_SIZE */ +#define TPC0_CFG_QM_TENSOR_15_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_15_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_15_DIM_3_STRIDE */ +#define TPC0_CFG_QM_TENSOR_15_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_15_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_15_DIM_4_SIZE */ +#define TPC0_CFG_QM_TENSOR_15_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_15_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_15_DIM_4_STRIDE */ +#define TPC0_CFG_QM_TENSOR_15_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_15_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_SYNC_OBJECT_MESSAGE */ +#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_SHIFT 0 +#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_MASK 0xFFFF +#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_RSV_SHIFT 16 +#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_RSV_MASK 0x1FFF0000 +#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_SO_OPERATION_SHIFT 29 +#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_SO_OPERATION_MASK 0xE0000000 + +/* TPC0_CFG_QM_SYNC_OBJECT_ADDR */ +#define TPC0_CFG_QM_SYNC_OBJECT_ADDR_V_SHIFT 0 +#define TPC0_CFG_QM_SYNC_OBJECT_ADDR_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_KERNEL_BASE_ADDRESS_LOW */ +#define TPC0_CFG_QM_KERNEL_BASE_ADDRESS_LOW_V_SHIFT 0 +#define TPC0_CFG_QM_KERNEL_BASE_ADDRESS_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_KERNEL_BASE_ADDRESS_HIGH */ +#define TPC0_CFG_QM_KERNEL_BASE_ADDRESS_HIGH_V_SHIFT 0 +#define TPC0_CFG_QM_KERNEL_BASE_ADDRESS_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TID_BASE_DIM_0 */ +#define TPC0_CFG_QM_TID_BASE_DIM_0_V_SHIFT 0 +#define TPC0_CFG_QM_TID_BASE_DIM_0_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TID_SIZE_DIM_0 */ +#define TPC0_CFG_QM_TID_SIZE_DIM_0_V_SHIFT 0 +#define TPC0_CFG_QM_TID_SIZE_DIM_0_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TID_BASE_DIM_1 */ +#define TPC0_CFG_QM_TID_BASE_DIM_1_V_SHIFT 0 +#define TPC0_CFG_QM_TID_BASE_DIM_1_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TID_SIZE_DIM_1 */ +#define TPC0_CFG_QM_TID_SIZE_DIM_1_V_SHIFT 0 +#define TPC0_CFG_QM_TID_SIZE_DIM_1_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TID_BASE_DIM_2 */ +#define TPC0_CFG_QM_TID_BASE_DIM_2_V_SHIFT 0 +#define TPC0_CFG_QM_TID_BASE_DIM_2_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TID_SIZE_DIM_2 */ +#define TPC0_CFG_QM_TID_SIZE_DIM_2_V_SHIFT 0 +#define TPC0_CFG_QM_TID_SIZE_DIM_2_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TID_BASE_DIM_3 */ +#define TPC0_CFG_QM_TID_BASE_DIM_3_V_SHIFT 0 +#define TPC0_CFG_QM_TID_BASE_DIM_3_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TID_SIZE_DIM_3 */ +#define TPC0_CFG_QM_TID_SIZE_DIM_3_V_SHIFT 0 +#define TPC0_CFG_QM_TID_SIZE_DIM_3_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TID_BASE_DIM_4 */ +#define TPC0_CFG_QM_TID_BASE_DIM_4_V_SHIFT 0 +#define TPC0_CFG_QM_TID_BASE_DIM_4_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TID_SIZE_DIM_4 */ +#define TPC0_CFG_QM_TID_SIZE_DIM_4_V_SHIFT 0 +#define TPC0_CFG_QM_TID_SIZE_DIM_4_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_KERNEL_CONFIG */ +#define TPC0_CFG_QM_KERNEL_CONFIG_SMALL_VLM_SHIFT 0 +#define TPC0_CFG_QM_KERNEL_CONFIG_SMALL_VLM_MASK 0x1 +#define TPC0_CFG_QM_KERNEL_CONFIG_ASO_EVICT_L0_SHIFT 1 +#define TPC0_CFG_QM_KERNEL_CONFIG_ASO_EVICT_L0_MASK 0x2 +#define TPC0_CFG_QM_KERNEL_CONFIG_NUM_VALID_SRFS_SHIFT 2 +#define TPC0_CFG_QM_KERNEL_CONFIG_NUM_VALID_SRFS_MASK 0xFC +#define TPC0_CFG_QM_KERNEL_CONFIG_RD_RATE_LIMIT_RST_TOKEN_SHIFT 8 +#define TPC0_CFG_QM_KERNEL_CONFIG_RD_RATE_LIMIT_RST_TOKEN_MASK 0xFF00 +#define TPC0_CFG_QM_KERNEL_CONFIG_WR_RATE_LIMIT_RST_TOKEN_SHIFT 16 +#define TPC0_CFG_QM_KERNEL_CONFIG_WR_RATE_LIMIT_RST_TOKEN_MASK 0xFF0000 + +/* TPC0_CFG_QM_KERNEL_ID */ +#define TPC0_CFG_QM_KERNEL_ID_V_SHIFT 0 +#define TPC0_CFG_QM_KERNEL_ID_V_MASK 0xFFFF + +/* TPC0_CFG_QM_SRF */ +#define TPC0_CFG_QM_SRF_V_SHIFT 0 +#define TPC0_CFG_QM_SRF_V_MASK 0xFFFFFFFF + +#endif /* ASIC_REG_TPC0_CFG_MASKS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_cfg_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_cfg_regs.h new file mode 100644 index 000000000..b82a90626 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_cfg_regs.h @@ -0,0 +1,1226 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC0_CFG_REGS_H_ +#define ASIC_REG_TPC0_CFG_REGS_H_ + +/* + ***************************************** + * TPC0_CFG (Prototype: TPC) + ***************************************** + */ + +#define mmTPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xE06400 + +#define mmTPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xE06404 + +#define mmTPC0_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xE06408 + +#define mmTPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xE0640C + +#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xE06410 + +#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xE06414 + +#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xE06418 + +#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xE0641C + +#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xE06420 + +#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xE06424 + +#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xE06428 + +#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xE0642C + +#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xE06430 + +#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xE06434 + +#define mmTPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xE06438 + +#define mmTPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xE0643C + +#define mmTPC0_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xE06440 + +#define mmTPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xE06444 + +#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xE06448 + +#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xE0644C + +#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xE06450 + +#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xE06454 + +#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xE06458 + +#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xE0645C + +#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xE06460 + +#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xE06464 + +#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xE06468 + +#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xE0646C + +#define mmTPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xE06470 + +#define mmTPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xE06474 + +#define mmTPC0_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xE06478 + +#define mmTPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xE0647C + +#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xE06480 + +#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xE06484 + +#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xE06488 + +#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xE0648C + +#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xE06490 + +#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xE06494 + +#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xE06498 + +#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xE0649C + +#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xE064A0 + +#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xE064A4 + +#define mmTPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xE064A8 + +#define mmTPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xE064AC + +#define mmTPC0_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xE064B0 + +#define mmTPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xE064B4 + +#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xE064B8 + +#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xE064BC + +#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xE064C0 + +#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xE064C4 + +#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xE064C8 + +#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xE064CC + +#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xE064D0 + +#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xE064D4 + +#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xE064D8 + +#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xE064DC + +#define mmTPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xE064E0 + +#define mmTPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xE064E4 + +#define mmTPC0_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xE064E8 + +#define mmTPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xE064EC + +#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xE064F0 + +#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xE064F4 + +#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xE064F8 + +#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xE064FC + +#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xE06500 + +#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xE06504 + +#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xE06508 + +#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xE0650C + +#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xE06510 + +#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xE06514 + +#define mmTPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xE06518 + +#define mmTPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xE0651C + +#define mmTPC0_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xE06520 + +#define mmTPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xE06524 + +#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xE06528 + +#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xE0652C + +#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xE06530 + +#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xE06534 + +#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xE06538 + +#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xE0653C + +#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xE06540 + +#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xE06544 + +#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xE06548 + +#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xE0654C + +#define mmTPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xE06550 + +#define mmTPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xE06554 + +#define mmTPC0_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xE06558 + +#define mmTPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xE0655C + +#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xE06560 + +#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xE06564 + +#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xE06568 + +#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xE0656C + +#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xE06570 + +#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xE06574 + +#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xE06578 + +#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xE0657C + +#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xE06580 + +#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xE06584 + +#define mmTPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xE06588 + +#define mmTPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xE0658C + +#define mmTPC0_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xE06590 + +#define mmTPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xE06594 + +#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xE06598 + +#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xE0659C + +#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xE065A0 + +#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xE065A4 + +#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xE065A8 + +#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xE065AC + +#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xE065B0 + +#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xE065B4 + +#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xE065B8 + +#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xE065BC + +#define mmTPC0_CFG_KERNEL_TENSOR_8_BASE_ADDR_LOW 0xE065C0 + +#define mmTPC0_CFG_KERNEL_TENSOR_8_BASE_ADDR_HIGH 0xE065C4 + +#define mmTPC0_CFG_KERNEL_TENSOR_8_PADDING_VALUE 0xE065C8 + +#define mmTPC0_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG 0xE065CC + +#define mmTPC0_CFG_KERNEL_TENSOR_8_DIM_0_SIZE 0xE065D0 + +#define mmTPC0_CFG_KERNEL_TENSOR_8_DIM_0_STRIDE 0xE065D4 + +#define mmTPC0_CFG_KERNEL_TENSOR_8_DIM_1_SIZE 0xE065D8 + +#define mmTPC0_CFG_KERNEL_TENSOR_8_DIM_1_STRIDE 0xE065DC + +#define mmTPC0_CFG_KERNEL_TENSOR_8_DIM_2_SIZE 0xE065E0 + +#define mmTPC0_CFG_KERNEL_TENSOR_8_DIM_2_STRIDE 0xE065E4 + +#define mmTPC0_CFG_KERNEL_TENSOR_8_DIM_3_SIZE 0xE065E8 + +#define mmTPC0_CFG_KERNEL_TENSOR_8_DIM_3_STRIDE 0xE065EC + +#define mmTPC0_CFG_KERNEL_TENSOR_8_DIM_4_SIZE 0xE065F0 + +#define mmTPC0_CFG_KERNEL_TENSOR_8_DIM_4_STRIDE 0xE065F4 + +#define mmTPC0_CFG_KERNEL_TENSOR_9_BASE_ADDR_LOW 0xE065F8 + +#define mmTPC0_CFG_KERNEL_TENSOR_9_BASE_ADDR_HIGH 0xE065FC + +#define mmTPC0_CFG_KERNEL_TENSOR_9_PADDING_VALUE 0xE06600 + +#define mmTPC0_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG 0xE06604 + +#define mmTPC0_CFG_KERNEL_TENSOR_9_DIM_0_SIZE 0xE06608 + +#define mmTPC0_CFG_KERNEL_TENSOR_9_DIM_0_STRIDE 0xE0660C + +#define mmTPC0_CFG_KERNEL_TENSOR_9_DIM_1_SIZE 0xE06610 + +#define mmTPC0_CFG_KERNEL_TENSOR_9_DIM_1_STRIDE 0xE06614 + +#define mmTPC0_CFG_KERNEL_TENSOR_9_DIM_2_SIZE 0xE06618 + +#define mmTPC0_CFG_KERNEL_TENSOR_9_DIM_2_STRIDE 0xE0661C + +#define mmTPC0_CFG_KERNEL_TENSOR_9_DIM_3_SIZE 0xE06620 + +#define mmTPC0_CFG_KERNEL_TENSOR_9_DIM_3_STRIDE 0xE06624 + +#define mmTPC0_CFG_KERNEL_TENSOR_9_DIM_4_SIZE 0xE06628 + +#define mmTPC0_CFG_KERNEL_TENSOR_9_DIM_4_STRIDE 0xE0662C + +#define mmTPC0_CFG_KERNEL_TENSOR_10_BASE_ADDR_LOW 0xE06630 + +#define mmTPC0_CFG_KERNEL_TENSOR_10_BASE_ADDR_HIGH 0xE06634 + +#define mmTPC0_CFG_KERNEL_TENSOR_10_PADDING_VALUE 0xE06638 + +#define mmTPC0_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG 0xE0663C + +#define mmTPC0_CFG_KERNEL_TENSOR_10_DIM_0_SIZE 0xE06640 + +#define mmTPC0_CFG_KERNEL_TENSOR_10_DIM_0_STRIDE 0xE06644 + +#define mmTPC0_CFG_KERNEL_TENSOR_10_DIM_1_SIZE 0xE06648 + +#define mmTPC0_CFG_KERNEL_TENSOR_10_DIM_1_STRIDE 0xE0664C + +#define mmTPC0_CFG_KERNEL_TENSOR_10_DIM_2_SIZE 0xE06650 + +#define mmTPC0_CFG_KERNEL_TENSOR_10_DIM_2_STRIDE 0xE06654 + +#define mmTPC0_CFG_KERNEL_TENSOR_10_DIM_3_SIZE 0xE06658 + +#define mmTPC0_CFG_KERNEL_TENSOR_10_DIM_3_STRIDE 0xE0665C + +#define mmTPC0_CFG_KERNEL_TENSOR_10_DIM_4_SIZE 0xE06660 + +#define mmTPC0_CFG_KERNEL_TENSOR_10_DIM_4_STRIDE 0xE06664 + +#define mmTPC0_CFG_KERNEL_TENSOR_11_BASE_ADDR_LOW 0xE06668 + +#define mmTPC0_CFG_KERNEL_TENSOR_11_BASE_ADDR_HIGH 0xE0666C + +#define mmTPC0_CFG_KERNEL_TENSOR_11_PADDING_VALUE 0xE06670 + +#define mmTPC0_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG 0xE06674 + +#define mmTPC0_CFG_KERNEL_TENSOR_11_DIM_0_SIZE 0xE06678 + +#define mmTPC0_CFG_KERNEL_TENSOR_11_DIM_0_STRIDE 0xE0667C + +#define mmTPC0_CFG_KERNEL_TENSOR_11_DIM_1_SIZE 0xE06680 + +#define mmTPC0_CFG_KERNEL_TENSOR_11_DIM_1_STRIDE 0xE06684 + +#define mmTPC0_CFG_KERNEL_TENSOR_11_DIM_2_SIZE 0xE06688 + +#define mmTPC0_CFG_KERNEL_TENSOR_11_DIM_2_STRIDE 0xE0668C + +#define mmTPC0_CFG_KERNEL_TENSOR_11_DIM_3_SIZE 0xE06690 + +#define mmTPC0_CFG_KERNEL_TENSOR_11_DIM_3_STRIDE 0xE06694 + +#define mmTPC0_CFG_KERNEL_TENSOR_11_DIM_4_SIZE 0xE06698 + +#define mmTPC0_CFG_KERNEL_TENSOR_11_DIM_4_STRIDE 0xE0669C + +#define mmTPC0_CFG_KERNEL_TENSOR_12_BASE_ADDR_LOW 0xE066A0 + +#define mmTPC0_CFG_KERNEL_TENSOR_12_BASE_ADDR_HIGH 0xE066A4 + +#define mmTPC0_CFG_KERNEL_TENSOR_12_PADDING_VALUE 0xE066A8 + +#define mmTPC0_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG 0xE066AC + +#define mmTPC0_CFG_KERNEL_TENSOR_12_DIM_0_SIZE 0xE066B0 + +#define mmTPC0_CFG_KERNEL_TENSOR_12_DIM_0_STRIDE 0xE066B4 + +#define mmTPC0_CFG_KERNEL_TENSOR_12_DIM_1_SIZE 0xE066B8 + +#define mmTPC0_CFG_KERNEL_TENSOR_12_DIM_1_STRIDE 0xE066BC + +#define mmTPC0_CFG_KERNEL_TENSOR_12_DIM_2_SIZE 0xE066C0 + +#define mmTPC0_CFG_KERNEL_TENSOR_12_DIM_2_STRIDE 0xE066C4 + +#define mmTPC0_CFG_KERNEL_TENSOR_12_DIM_3_SIZE 0xE066C8 + +#define mmTPC0_CFG_KERNEL_TENSOR_12_DIM_3_STRIDE 0xE066CC + +#define mmTPC0_CFG_KERNEL_TENSOR_12_DIM_4_SIZE 0xE066D0 + +#define mmTPC0_CFG_KERNEL_TENSOR_12_DIM_4_STRIDE 0xE066D4 + +#define mmTPC0_CFG_KERNEL_TENSOR_13_BASE_ADDR_LOW 0xE066D8 + +#define mmTPC0_CFG_KERNEL_TENSOR_13_BASE_ADDR_HIGH 0xE066DC + +#define mmTPC0_CFG_KERNEL_TENSOR_13_PADDING_VALUE 0xE066E0 + +#define mmTPC0_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG 0xE066E4 + +#define mmTPC0_CFG_KERNEL_TENSOR_13_DIM_0_SIZE 0xE066E8 + +#define mmTPC0_CFG_KERNEL_TENSOR_13_DIM_0_STRIDE 0xE066EC + +#define mmTPC0_CFG_KERNEL_TENSOR_13_DIM_1_SIZE 0xE066F0 + +#define mmTPC0_CFG_KERNEL_TENSOR_13_DIM_1_STRIDE 0xE066F4 + +#define mmTPC0_CFG_KERNEL_TENSOR_13_DIM_2_SIZE 0xE066F8 + +#define mmTPC0_CFG_KERNEL_TENSOR_13_DIM_2_STRIDE 0xE066FC + +#define mmTPC0_CFG_KERNEL_TENSOR_13_DIM_3_SIZE 0xE06700 + +#define mmTPC0_CFG_KERNEL_TENSOR_13_DIM_3_STRIDE 0xE06704 + +#define mmTPC0_CFG_KERNEL_TENSOR_13_DIM_4_SIZE 0xE06708 + +#define mmTPC0_CFG_KERNEL_TENSOR_13_DIM_4_STRIDE 0xE0670C + +#define mmTPC0_CFG_KERNEL_TENSOR_14_BASE_ADDR_LOW 0xE06710 + +#define mmTPC0_CFG_KERNEL_TENSOR_14_BASE_ADDR_HIGH 0xE06714 + +#define mmTPC0_CFG_KERNEL_TENSOR_14_PADDING_VALUE 0xE06718 + +#define mmTPC0_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG 0xE0671C + +#define mmTPC0_CFG_KERNEL_TENSOR_14_DIM_0_SIZE 0xE06720 + +#define mmTPC0_CFG_KERNEL_TENSOR_14_DIM_0_STRIDE 0xE06724 + +#define mmTPC0_CFG_KERNEL_TENSOR_14_DIM_1_SIZE 0xE06728 + +#define mmTPC0_CFG_KERNEL_TENSOR_14_DIM_1_STRIDE 0xE0672C + +#define mmTPC0_CFG_KERNEL_TENSOR_14_DIM_2_SIZE 0xE06730 + +#define mmTPC0_CFG_KERNEL_TENSOR_14_DIM_2_STRIDE 0xE06734 + +#define mmTPC0_CFG_KERNEL_TENSOR_14_DIM_3_SIZE 0xE06738 + +#define mmTPC0_CFG_KERNEL_TENSOR_14_DIM_3_STRIDE 0xE0673C + +#define mmTPC0_CFG_KERNEL_TENSOR_14_DIM_4_SIZE 0xE06740 + +#define mmTPC0_CFG_KERNEL_TENSOR_14_DIM_4_STRIDE 0xE06744 + +#define mmTPC0_CFG_KERNEL_TENSOR_15_BASE_ADDR_LOW 0xE06748 + +#define mmTPC0_CFG_KERNEL_TENSOR_15_BASE_ADDR_HIGH 0xE0674C + +#define mmTPC0_CFG_KERNEL_TENSOR_15_PADDING_VALUE 0xE06750 + +#define mmTPC0_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG 0xE06754 + +#define mmTPC0_CFG_KERNEL_TENSOR_15_DIM_0_SIZE 0xE06758 + +#define mmTPC0_CFG_KERNEL_TENSOR_15_DIM_0_STRIDE 0xE0675C + +#define mmTPC0_CFG_KERNEL_TENSOR_15_DIM_1_SIZE 0xE06760 + +#define mmTPC0_CFG_KERNEL_TENSOR_15_DIM_1_STRIDE 0xE06764 + +#define mmTPC0_CFG_KERNEL_TENSOR_15_DIM_2_SIZE 0xE06768 + +#define mmTPC0_CFG_KERNEL_TENSOR_15_DIM_2_STRIDE 0xE0676C + +#define mmTPC0_CFG_KERNEL_TENSOR_15_DIM_3_SIZE 0xE06770 + +#define mmTPC0_CFG_KERNEL_TENSOR_15_DIM_3_STRIDE 0xE06774 + +#define mmTPC0_CFG_KERNEL_TENSOR_15_DIM_4_SIZE 0xE06778 + +#define mmTPC0_CFG_KERNEL_TENSOR_15_DIM_4_STRIDE 0xE0677C + +#define mmTPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xE06780 + +#define mmTPC0_CFG_KERNEL_SYNC_OBJECT_ADDR 0xE06784 + +#define mmTPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xE06788 + +#define mmTPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xE0678C + +#define mmTPC0_CFG_KERNEL_TID_BASE_DIM_0 0xE06790 + +#define mmTPC0_CFG_KERNEL_TID_SIZE_DIM_0 0xE06794 + +#define mmTPC0_CFG_KERNEL_TID_BASE_DIM_1 0xE06798 + +#define mmTPC0_CFG_KERNEL_TID_SIZE_DIM_1 0xE0679C + +#define mmTPC0_CFG_KERNEL_TID_BASE_DIM_2 0xE067A0 + +#define mmTPC0_CFG_KERNEL_TID_SIZE_DIM_2 0xE067A4 + +#define mmTPC0_CFG_KERNEL_TID_BASE_DIM_3 0xE067A8 + +#define mmTPC0_CFG_KERNEL_TID_SIZE_DIM_3 0xE067AC + +#define mmTPC0_CFG_KERNEL_TID_BASE_DIM_4 0xE067B0 + +#define mmTPC0_CFG_KERNEL_TID_SIZE_DIM_4 0xE067B4 + +#define mmTPC0_CFG_KERNEL_KERNEL_CONFIG 0xE067B8 + +#define mmTPC0_CFG_KERNEL_KERNEL_ID 0xE067BC + +#define mmTPC0_CFG_KERNEL_SRF_0 0xE067C0 + +#define mmTPC0_CFG_KERNEL_SRF_1 0xE067C4 + +#define mmTPC0_CFG_KERNEL_SRF_2 0xE067C8 + +#define mmTPC0_CFG_KERNEL_SRF_3 0xE067CC + +#define mmTPC0_CFG_KERNEL_SRF_4 0xE067D0 + +#define mmTPC0_CFG_KERNEL_SRF_5 0xE067D4 + +#define mmTPC0_CFG_KERNEL_SRF_6 0xE067D8 + +#define mmTPC0_CFG_KERNEL_SRF_7 0xE067DC + +#define mmTPC0_CFG_KERNEL_SRF_8 0xE067E0 + +#define mmTPC0_CFG_KERNEL_SRF_9 0xE067E4 + +#define mmTPC0_CFG_KERNEL_SRF_10 0xE067E8 + +#define mmTPC0_CFG_KERNEL_SRF_11 0xE067EC + +#define mmTPC0_CFG_KERNEL_SRF_12 0xE067F0 + +#define mmTPC0_CFG_KERNEL_SRF_13 0xE067F4 + +#define mmTPC0_CFG_KERNEL_SRF_14 0xE067F8 + +#define mmTPC0_CFG_KERNEL_SRF_15 0xE067FC + +#define mmTPC0_CFG_KERNEL_SRF_16 0xE06800 + +#define mmTPC0_CFG_KERNEL_SRF_17 0xE06804 + +#define mmTPC0_CFG_KERNEL_SRF_18 0xE06808 + +#define mmTPC0_CFG_KERNEL_SRF_19 0xE0680C + +#define mmTPC0_CFG_KERNEL_SRF_20 0xE06810 + +#define mmTPC0_CFG_KERNEL_SRF_21 0xE06814 + +#define mmTPC0_CFG_KERNEL_SRF_22 0xE06818 + +#define mmTPC0_CFG_KERNEL_SRF_23 0xE0681C + +#define mmTPC0_CFG_KERNEL_SRF_24 0xE06820 + +#define mmTPC0_CFG_KERNEL_SRF_25 0xE06824 + +#define mmTPC0_CFG_KERNEL_SRF_26 0xE06828 + +#define mmTPC0_CFG_KERNEL_SRF_27 0xE0682C + +#define mmTPC0_CFG_KERNEL_SRF_28 0xE06830 + +#define mmTPC0_CFG_KERNEL_SRF_29 0xE06834 + +#define mmTPC0_CFG_KERNEL_SRF_30 0xE06838 + +#define mmTPC0_CFG_KERNEL_SRF_31 0xE0683C + +#define mmTPC0_CFG_ROUND_CSR 0xE068FC + +#define mmTPC0_CFG_PROT 0xE06900 + +#define mmTPC0_CFG_SEMAPHORE 0xE06908 + +#define mmTPC0_CFG_VFLAGS 0xE0690C + +#define mmTPC0_CFG_SFLAGS 0xE06910 + +#define mmTPC0_CFG_LFSR_POLYNOM 0xE06918 + +#define mmTPC0_CFG_STATUS 0xE0691C + +#define mmTPC0_CFG_CFG_BASE_ADDRESS_HIGH 0xE06920 + +#define mmTPC0_CFG_CFG_SUBTRACT_VALUE 0xE06924 + +#define mmTPC0_CFG_SM_BASE_ADDRESS_HIGH 0xE0692C + +#define mmTPC0_CFG_TPC_CMD 0xE06930 + +#define mmTPC0_CFG_TPC_EXECUTE 0xE06938 + +#define mmTPC0_CFG_TPC_STALL 0xE0693C + +#define mmTPC0_CFG_ICACHE_BASE_ADDERESS_LOW 0xE06940 + +#define mmTPC0_CFG_ICACHE_BASE_ADDERESS_HIGH 0xE06944 + +#define mmTPC0_CFG_RD_RATE_LIMIT 0xE06948 + +#define mmTPC0_CFG_WR_RATE_LIMIT 0xE06950 + +#define mmTPC0_CFG_MSS_CONFIG 0xE06954 + +#define mmTPC0_CFG_TPC_INTR_CAUSE 0xE06958 + +#define mmTPC0_CFG_TPC_INTR_MASK 0xE0695C + +#define mmTPC0_CFG_WQ_CREDITS 0xE06960 + +#define mmTPC0_CFG_ARUSER_LO 0xE06964 + +#define mmTPC0_CFG_ARUSER_HI 0xE06968 + +#define mmTPC0_CFG_AWUSER_LO 0xE0696C + +#define mmTPC0_CFG_AWUSER_HI 0xE06970 + +#define mmTPC0_CFG_OPCODE_EXEC 0xE06974 + +#define mmTPC0_CFG_LUT_FUNC32_BASE_ADDR_LO 0xE06978 + +#define mmTPC0_CFG_LUT_FUNC32_BASE_ADDR_HI 0xE0697C + +#define mmTPC0_CFG_LUT_FUNC64_BASE_ADDR_LO 0xE06980 + +#define mmTPC0_CFG_LUT_FUNC64_BASE_ADDR_HI 0xE06984 + +#define mmTPC0_CFG_LUT_FUNC128_BASE_ADDR_LO 0xE06988 + +#define mmTPC0_CFG_LUT_FUNC128_BASE_ADDR_HI 0xE0698C + +#define mmTPC0_CFG_LUT_FUNC256_BASE_ADDR_LO 0xE06990 + +#define mmTPC0_CFG_LUT_FUNC256_BASE_ADDR_HI 0xE06994 + +#define mmTPC0_CFG_TSB_CFG_MAX_SIZE 0xE06998 + +#define mmTPC0_CFG_TSB_CFG 0xE0699C + +#define mmTPC0_CFG_DBGMEM_ADD 0xE069A0 + +#define mmTPC0_CFG_DBGMEM_DATA_WR 0xE069A4 + +#define mmTPC0_CFG_DBGMEM_DATA_RD 0xE069A8 + +#define mmTPC0_CFG_DBGMEM_CTRL 0xE069AC + +#define mmTPC0_CFG_DBGMEM_RC 0xE069B0 + +#define mmTPC0_CFG_TSB_INFLIGHT_CNTR 0xE069B4 + +#define mmTPC0_CFG_WQ_INFLIGHT_CNTR 0xE069B8 + +#define mmTPC0_CFG_WQ_LBW_TOTAL_CNTR 0xE069BC + +#define mmTPC0_CFG_WQ_HBW_TOTAL_CNTR 0xE069C0 + +#define mmTPC0_CFG_IRQ_OCCOUPY_CNTR 0xE069C4 + +#define mmTPC0_CFG_FUNC_MBIST_CNTRL 0xE069D0 + +#define mmTPC0_CFG_FUNC_MBIST_PAT 0xE069D4 + +#define mmTPC0_CFG_FUNC_MBIST_MEM_0 0xE069D8 + +#define mmTPC0_CFG_FUNC_MBIST_MEM_1 0xE069DC + +#define mmTPC0_CFG_FUNC_MBIST_MEM_2 0xE069E0 + +#define mmTPC0_CFG_FUNC_MBIST_MEM_3 0xE069E4 + +#define mmTPC0_CFG_FUNC_MBIST_MEM_4 0xE069E8 + +#define mmTPC0_CFG_FUNC_MBIST_MEM_5 0xE069EC + +#define mmTPC0_CFG_FUNC_MBIST_MEM_6 0xE069F0 + +#define mmTPC0_CFG_FUNC_MBIST_MEM_7 0xE069F4 + +#define mmTPC0_CFG_FUNC_MBIST_MEM_8 0xE069F8 + +#define mmTPC0_CFG_FUNC_MBIST_MEM_9 0xE069FC + +#define mmTPC0_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xE06A00 + +#define mmTPC0_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xE06A04 + +#define mmTPC0_CFG_QM_TENSOR_0_PADDING_VALUE 0xE06A08 + +#define mmTPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xE06A0C + +#define mmTPC0_CFG_QM_TENSOR_0_DIM_0_SIZE 0xE06A10 + +#define mmTPC0_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xE06A14 + +#define mmTPC0_CFG_QM_TENSOR_0_DIM_1_SIZE 0xE06A18 + +#define mmTPC0_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xE06A1C + +#define mmTPC0_CFG_QM_TENSOR_0_DIM_2_SIZE 0xE06A20 + +#define mmTPC0_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xE06A24 + +#define mmTPC0_CFG_QM_TENSOR_0_DIM_3_SIZE 0xE06A28 + +#define mmTPC0_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xE06A2C + +#define mmTPC0_CFG_QM_TENSOR_0_DIM_4_SIZE 0xE06A30 + +#define mmTPC0_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xE06A34 + +#define mmTPC0_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xE06A38 + +#define mmTPC0_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xE06A3C + +#define mmTPC0_CFG_QM_TENSOR_1_PADDING_VALUE 0xE06A40 + +#define mmTPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xE06A44 + +#define mmTPC0_CFG_QM_TENSOR_1_DIM_0_SIZE 0xE06A48 + +#define mmTPC0_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xE06A4C + +#define mmTPC0_CFG_QM_TENSOR_1_DIM_1_SIZE 0xE06A50 + +#define mmTPC0_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xE06A54 + +#define mmTPC0_CFG_QM_TENSOR_1_DIM_2_SIZE 0xE06A58 + +#define mmTPC0_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xE06A5C + +#define mmTPC0_CFG_QM_TENSOR_1_DIM_3_SIZE 0xE06A60 + +#define mmTPC0_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xE06A64 + +#define mmTPC0_CFG_QM_TENSOR_1_DIM_4_SIZE 0xE06A68 + +#define mmTPC0_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xE06A6C + +#define mmTPC0_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xE06A70 + +#define mmTPC0_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xE06A74 + +#define mmTPC0_CFG_QM_TENSOR_2_PADDING_VALUE 0xE06A78 + +#define mmTPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xE06A7C + +#define mmTPC0_CFG_QM_TENSOR_2_DIM_0_SIZE 0xE06A80 + +#define mmTPC0_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xE06A84 + +#define mmTPC0_CFG_QM_TENSOR_2_DIM_1_SIZE 0xE06A88 + +#define mmTPC0_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xE06A8C + +#define mmTPC0_CFG_QM_TENSOR_2_DIM_2_SIZE 0xE06A90 + +#define mmTPC0_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xE06A94 + +#define mmTPC0_CFG_QM_TENSOR_2_DIM_3_SIZE 0xE06A98 + +#define mmTPC0_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xE06A9C + +#define mmTPC0_CFG_QM_TENSOR_2_DIM_4_SIZE 0xE06AA0 + +#define mmTPC0_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xE06AA4 + +#define mmTPC0_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xE06AA8 + +#define mmTPC0_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xE06AAC + +#define mmTPC0_CFG_QM_TENSOR_3_PADDING_VALUE 0xE06AB0 + +#define mmTPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xE06AB4 + +#define mmTPC0_CFG_QM_TENSOR_3_DIM_0_SIZE 0xE06AB8 + +#define mmTPC0_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xE06ABC + +#define mmTPC0_CFG_QM_TENSOR_3_DIM_1_SIZE 0xE06AC0 + +#define mmTPC0_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xE06AC4 + +#define mmTPC0_CFG_QM_TENSOR_3_DIM_2_SIZE 0xE06AC8 + +#define mmTPC0_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xE06ACC + +#define mmTPC0_CFG_QM_TENSOR_3_DIM_3_SIZE 0xE06AD0 + +#define mmTPC0_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xE06AD4 + +#define mmTPC0_CFG_QM_TENSOR_3_DIM_4_SIZE 0xE06AD8 + +#define mmTPC0_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xE06ADC + +#define mmTPC0_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xE06AE0 + +#define mmTPC0_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xE06AE4 + +#define mmTPC0_CFG_QM_TENSOR_4_PADDING_VALUE 0xE06AE8 + +#define mmTPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xE06AEC + +#define mmTPC0_CFG_QM_TENSOR_4_DIM_0_SIZE 0xE06AF0 + +#define mmTPC0_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xE06AF4 + +#define mmTPC0_CFG_QM_TENSOR_4_DIM_1_SIZE 0xE06AF8 + +#define mmTPC0_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xE06AFC + +#define mmTPC0_CFG_QM_TENSOR_4_DIM_2_SIZE 0xE06B00 + +#define mmTPC0_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xE06B04 + +#define mmTPC0_CFG_QM_TENSOR_4_DIM_3_SIZE 0xE06B08 + +#define mmTPC0_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xE06B0C + +#define mmTPC0_CFG_QM_TENSOR_4_DIM_4_SIZE 0xE06B10 + +#define mmTPC0_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xE06B14 + +#define mmTPC0_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xE06B18 + +#define mmTPC0_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xE06B1C + +#define mmTPC0_CFG_QM_TENSOR_5_PADDING_VALUE 0xE06B20 + +#define mmTPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xE06B24 + +#define mmTPC0_CFG_QM_TENSOR_5_DIM_0_SIZE 0xE06B28 + +#define mmTPC0_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xE06B2C + +#define mmTPC0_CFG_QM_TENSOR_5_DIM_1_SIZE 0xE06B30 + +#define mmTPC0_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xE06B34 + +#define mmTPC0_CFG_QM_TENSOR_5_DIM_2_SIZE 0xE06B38 + +#define mmTPC0_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xE06B3C + +#define mmTPC0_CFG_QM_TENSOR_5_DIM_3_SIZE 0xE06B40 + +#define mmTPC0_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xE06B44 + +#define mmTPC0_CFG_QM_TENSOR_5_DIM_4_SIZE 0xE06B48 + +#define mmTPC0_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xE06B4C + +#define mmTPC0_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xE06B50 + +#define mmTPC0_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xE06B54 + +#define mmTPC0_CFG_QM_TENSOR_6_PADDING_VALUE 0xE06B58 + +#define mmTPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xE06B5C + +#define mmTPC0_CFG_QM_TENSOR_6_DIM_0_SIZE 0xE06B60 + +#define mmTPC0_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xE06B64 + +#define mmTPC0_CFG_QM_TENSOR_6_DIM_1_SIZE 0xE06B68 + +#define mmTPC0_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xE06B6C + +#define mmTPC0_CFG_QM_TENSOR_6_DIM_2_SIZE 0xE06B70 + +#define mmTPC0_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xE06B74 + +#define mmTPC0_CFG_QM_TENSOR_6_DIM_3_SIZE 0xE06B78 + +#define mmTPC0_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xE06B7C + +#define mmTPC0_CFG_QM_TENSOR_6_DIM_4_SIZE 0xE06B80 + +#define mmTPC0_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xE06B84 + +#define mmTPC0_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xE06B88 + +#define mmTPC0_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xE06B8C + +#define mmTPC0_CFG_QM_TENSOR_7_PADDING_VALUE 0xE06B90 + +#define mmTPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xE06B94 + +#define mmTPC0_CFG_QM_TENSOR_7_DIM_0_SIZE 0xE06B98 + +#define mmTPC0_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xE06B9C + +#define mmTPC0_CFG_QM_TENSOR_7_DIM_1_SIZE 0xE06BA0 + +#define mmTPC0_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xE06BA4 + +#define mmTPC0_CFG_QM_TENSOR_7_DIM_2_SIZE 0xE06BA8 + +#define mmTPC0_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xE06BAC + +#define mmTPC0_CFG_QM_TENSOR_7_DIM_3_SIZE 0xE06BB0 + +#define mmTPC0_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xE06BB4 + +#define mmTPC0_CFG_QM_TENSOR_7_DIM_4_SIZE 0xE06BB8 + +#define mmTPC0_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xE06BBC + +#define mmTPC0_CFG_QM_TENSOR_8_BASE_ADDR_LOW 0xE06BC0 + +#define mmTPC0_CFG_QM_TENSOR_8_BASE_ADDR_HIGH 0xE06BC4 + +#define mmTPC0_CFG_QM_TENSOR_8_PADDING_VALUE 0xE06BC8 + +#define mmTPC0_CFG_QM_TENSOR_8_TENSOR_CONFIG 0xE06BCC + +#define mmTPC0_CFG_QM_TENSOR_8_DIM_0_SIZE 0xE06BD0 + +#define mmTPC0_CFG_QM_TENSOR_8_DIM_0_STRIDE 0xE06BD4 + +#define mmTPC0_CFG_QM_TENSOR_8_DIM_1_SIZE 0xE06BD8 + +#define mmTPC0_CFG_QM_TENSOR_8_DIM_1_STRIDE 0xE06BDC + +#define mmTPC0_CFG_QM_TENSOR_8_DIM_2_SIZE 0xE06BE0 + +#define mmTPC0_CFG_QM_TENSOR_8_DIM_2_STRIDE 0xE06BE4 + +#define mmTPC0_CFG_QM_TENSOR_8_DIM_3_SIZE 0xE06BE8 + +#define mmTPC0_CFG_QM_TENSOR_8_DIM_3_STRIDE 0xE06BEC + +#define mmTPC0_CFG_QM_TENSOR_8_DIM_4_SIZE 0xE06BF0 + +#define mmTPC0_CFG_QM_TENSOR_8_DIM_4_STRIDE 0xE06BF4 + +#define mmTPC0_CFG_QM_TENSOR_9_BASE_ADDR_LOW 0xE06BF8 + +#define mmTPC0_CFG_QM_TENSOR_9_BASE_ADDR_HIGH 0xE06BFC + +#define mmTPC0_CFG_QM_TENSOR_9_PADDING_VALUE 0xE06C00 + +#define mmTPC0_CFG_QM_TENSOR_9_TENSOR_CONFIG 0xE06C04 + +#define mmTPC0_CFG_QM_TENSOR_9_DIM_0_SIZE 0xE06C08 + +#define mmTPC0_CFG_QM_TENSOR_9_DIM_0_STRIDE 0xE06C0C + +#define mmTPC0_CFG_QM_TENSOR_9_DIM_1_SIZE 0xE06C10 + +#define mmTPC0_CFG_QM_TENSOR_9_DIM_1_STRIDE 0xE06C14 + +#define mmTPC0_CFG_QM_TENSOR_9_DIM_2_SIZE 0xE06C18 + +#define mmTPC0_CFG_QM_TENSOR_9_DIM_2_STRIDE 0xE06C1C + +#define mmTPC0_CFG_QM_TENSOR_9_DIM_3_SIZE 0xE06C20 + +#define mmTPC0_CFG_QM_TENSOR_9_DIM_3_STRIDE 0xE06C24 + +#define mmTPC0_CFG_QM_TENSOR_9_DIM_4_SIZE 0xE06C28 + +#define mmTPC0_CFG_QM_TENSOR_9_DIM_4_STRIDE 0xE06C2C + +#define mmTPC0_CFG_QM_TENSOR_10_BASE_ADDR_LOW 0xE06C30 + +#define mmTPC0_CFG_QM_TENSOR_10_BASE_ADDR_HIGH 0xE06C34 + +#define mmTPC0_CFG_QM_TENSOR_10_PADDING_VALUE 0xE06C38 + +#define mmTPC0_CFG_QM_TENSOR_10_TENSOR_CONFIG 0xE06C3C + +#define mmTPC0_CFG_QM_TENSOR_10_DIM_0_SIZE 0xE06C40 + +#define mmTPC0_CFG_QM_TENSOR_10_DIM_0_STRIDE 0xE06C44 + +#define mmTPC0_CFG_QM_TENSOR_10_DIM_1_SIZE 0xE06C48 + +#define mmTPC0_CFG_QM_TENSOR_10_DIM_1_STRIDE 0xE06C4C + +#define mmTPC0_CFG_QM_TENSOR_10_DIM_2_SIZE 0xE06C50 + +#define mmTPC0_CFG_QM_TENSOR_10_DIM_2_STRIDE 0xE06C54 + +#define mmTPC0_CFG_QM_TENSOR_10_DIM_3_SIZE 0xE06C58 + +#define mmTPC0_CFG_QM_TENSOR_10_DIM_3_STRIDE 0xE06C5C + +#define mmTPC0_CFG_QM_TENSOR_10_DIM_4_SIZE 0xE06C60 + +#define mmTPC0_CFG_QM_TENSOR_10_DIM_4_STRIDE 0xE06C64 + +#define mmTPC0_CFG_QM_TENSOR_11_BASE_ADDR_LOW 0xE06C68 + +#define mmTPC0_CFG_QM_TENSOR_11_BASE_ADDR_HIGH 0xE06C6C + +#define mmTPC0_CFG_QM_TENSOR_11_PADDING_VALUE 0xE06C70 + +#define mmTPC0_CFG_QM_TENSOR_11_TENSOR_CONFIG 0xE06C74 + +#define mmTPC0_CFG_QM_TENSOR_11_DIM_0_SIZE 0xE06C78 + +#define mmTPC0_CFG_QM_TENSOR_11_DIM_0_STRIDE 0xE06C7C + +#define mmTPC0_CFG_QM_TENSOR_11_DIM_1_SIZE 0xE06C80 + +#define mmTPC0_CFG_QM_TENSOR_11_DIM_1_STRIDE 0xE06C84 + +#define mmTPC0_CFG_QM_TENSOR_11_DIM_2_SIZE 0xE06C88 + +#define mmTPC0_CFG_QM_TENSOR_11_DIM_2_STRIDE 0xE06C8C + +#define mmTPC0_CFG_QM_TENSOR_11_DIM_3_SIZE 0xE06C90 + +#define mmTPC0_CFG_QM_TENSOR_11_DIM_3_STRIDE 0xE06C94 + +#define mmTPC0_CFG_QM_TENSOR_11_DIM_4_SIZE 0xE06C98 + +#define mmTPC0_CFG_QM_TENSOR_11_DIM_4_STRIDE 0xE06C9C + +#define mmTPC0_CFG_QM_TENSOR_12_BASE_ADDR_LOW 0xE06CA0 + +#define mmTPC0_CFG_QM_TENSOR_12_BASE_ADDR_HIGH 0xE06CA4 + +#define mmTPC0_CFG_QM_TENSOR_12_PADDING_VALUE 0xE06CA8 + +#define mmTPC0_CFG_QM_TENSOR_12_TENSOR_CONFIG 0xE06CAC + +#define mmTPC0_CFG_QM_TENSOR_12_DIM_0_SIZE 0xE06CB0 + +#define mmTPC0_CFG_QM_TENSOR_12_DIM_0_STRIDE 0xE06CB4 + +#define mmTPC0_CFG_QM_TENSOR_12_DIM_1_SIZE 0xE06CB8 + +#define mmTPC0_CFG_QM_TENSOR_12_DIM_1_STRIDE 0xE06CBC + +#define mmTPC0_CFG_QM_TENSOR_12_DIM_2_SIZE 0xE06CC0 + +#define mmTPC0_CFG_QM_TENSOR_12_DIM_2_STRIDE 0xE06CC4 + +#define mmTPC0_CFG_QM_TENSOR_12_DIM_3_SIZE 0xE06CC8 + +#define mmTPC0_CFG_QM_TENSOR_12_DIM_3_STRIDE 0xE06CCC + +#define mmTPC0_CFG_QM_TENSOR_12_DIM_4_SIZE 0xE06CD0 + +#define mmTPC0_CFG_QM_TENSOR_12_DIM_4_STRIDE 0xE06CD4 + +#define mmTPC0_CFG_QM_TENSOR_13_BASE_ADDR_LOW 0xE06CD8 + +#define mmTPC0_CFG_QM_TENSOR_13_BASE_ADDR_HIGH 0xE06CDC + +#define mmTPC0_CFG_QM_TENSOR_13_PADDING_VALUE 0xE06CE0 + +#define mmTPC0_CFG_QM_TENSOR_13_TENSOR_CONFIG 0xE06CE4 + +#define mmTPC0_CFG_QM_TENSOR_13_DIM_0_SIZE 0xE06CE8 + +#define mmTPC0_CFG_QM_TENSOR_13_DIM_0_STRIDE 0xE06CEC + +#define mmTPC0_CFG_QM_TENSOR_13_DIM_1_SIZE 0xE06CF0 + +#define mmTPC0_CFG_QM_TENSOR_13_DIM_1_STRIDE 0xE06CF4 + +#define mmTPC0_CFG_QM_TENSOR_13_DIM_2_SIZE 0xE06CF8 + +#define mmTPC0_CFG_QM_TENSOR_13_DIM_2_STRIDE 0xE06CFC + +#define mmTPC0_CFG_QM_TENSOR_13_DIM_3_SIZE 0xE06D00 + +#define mmTPC0_CFG_QM_TENSOR_13_DIM_3_STRIDE 0xE06D04 + +#define mmTPC0_CFG_QM_TENSOR_13_DIM_4_SIZE 0xE06D08 + +#define mmTPC0_CFG_QM_TENSOR_13_DIM_4_STRIDE 0xE06D0C + +#define mmTPC0_CFG_QM_TENSOR_14_BASE_ADDR_LOW 0xE06D10 + +#define mmTPC0_CFG_QM_TENSOR_14_BASE_ADDR_HIGH 0xE06D14 + +#define mmTPC0_CFG_QM_TENSOR_14_PADDING_VALUE 0xE06D18 + +#define mmTPC0_CFG_QM_TENSOR_14_TENSOR_CONFIG 0xE06D1C + +#define mmTPC0_CFG_QM_TENSOR_14_DIM_0_SIZE 0xE06D20 + +#define mmTPC0_CFG_QM_TENSOR_14_DIM_0_STRIDE 0xE06D24 + +#define mmTPC0_CFG_QM_TENSOR_14_DIM_1_SIZE 0xE06D28 + +#define mmTPC0_CFG_QM_TENSOR_14_DIM_1_STRIDE 0xE06D2C + +#define mmTPC0_CFG_QM_TENSOR_14_DIM_2_SIZE 0xE06D30 + +#define mmTPC0_CFG_QM_TENSOR_14_DIM_2_STRIDE 0xE06D34 + +#define mmTPC0_CFG_QM_TENSOR_14_DIM_3_SIZE 0xE06D38 + +#define mmTPC0_CFG_QM_TENSOR_14_DIM_3_STRIDE 0xE06D3C + +#define mmTPC0_CFG_QM_TENSOR_14_DIM_4_SIZE 0xE06D40 + +#define mmTPC0_CFG_QM_TENSOR_14_DIM_4_STRIDE 0xE06D44 + +#define mmTPC0_CFG_QM_TENSOR_15_BASE_ADDR_LOW 0xE06D48 + +#define mmTPC0_CFG_QM_TENSOR_15_BASE_ADDR_HIGH 0xE06D4C + +#define mmTPC0_CFG_QM_TENSOR_15_PADDING_VALUE 0xE06D50 + +#define mmTPC0_CFG_QM_TENSOR_15_TENSOR_CONFIG 0xE06D54 + +#define mmTPC0_CFG_QM_TENSOR_15_DIM_0_SIZE 0xE06D58 + +#define mmTPC0_CFG_QM_TENSOR_15_DIM_0_STRIDE 0xE06D5C + +#define mmTPC0_CFG_QM_TENSOR_15_DIM_1_SIZE 0xE06D60 + +#define mmTPC0_CFG_QM_TENSOR_15_DIM_1_STRIDE 0xE06D64 + +#define mmTPC0_CFG_QM_TENSOR_15_DIM_2_SIZE 0xE06D68 + +#define mmTPC0_CFG_QM_TENSOR_15_DIM_2_STRIDE 0xE06D6C + +#define mmTPC0_CFG_QM_TENSOR_15_DIM_3_SIZE 0xE06D70 + +#define mmTPC0_CFG_QM_TENSOR_15_DIM_3_STRIDE 0xE06D74 + +#define mmTPC0_CFG_QM_TENSOR_15_DIM_4_SIZE 0xE06D78 + +#define mmTPC0_CFG_QM_TENSOR_15_DIM_4_STRIDE 0xE06D7C + +#define mmTPC0_CFG_QM_SYNC_OBJECT_MESSAGE 0xE06D80 + +#define mmTPC0_CFG_QM_SYNC_OBJECT_ADDR 0xE06D84 + +#define mmTPC0_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xE06D88 + +#define mmTPC0_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xE06D8C + +#define mmTPC0_CFG_QM_TID_BASE_DIM_0 0xE06D90 + +#define mmTPC0_CFG_QM_TID_SIZE_DIM_0 0xE06D94 + +#define mmTPC0_CFG_QM_TID_BASE_DIM_1 0xE06D98 + +#define mmTPC0_CFG_QM_TID_SIZE_DIM_1 0xE06D9C + +#define mmTPC0_CFG_QM_TID_BASE_DIM_2 0xE06DA0 + +#define mmTPC0_CFG_QM_TID_SIZE_DIM_2 0xE06DA4 + +#define mmTPC0_CFG_QM_TID_BASE_DIM_3 0xE06DA8 + +#define mmTPC0_CFG_QM_TID_SIZE_DIM_3 0xE06DAC + +#define mmTPC0_CFG_QM_TID_BASE_DIM_4 0xE06DB0 + +#define mmTPC0_CFG_QM_TID_SIZE_DIM_4 0xE06DB4 + +#define mmTPC0_CFG_QM_KERNEL_CONFIG 0xE06DB8 + +#define mmTPC0_CFG_QM_KERNEL_ID 0xE06DBC + +#define mmTPC0_CFG_QM_SRF_0 0xE06DC0 + +#define mmTPC0_CFG_QM_SRF_1 0xE06DC4 + +#define mmTPC0_CFG_QM_SRF_2 0xE06DC8 + +#define mmTPC0_CFG_QM_SRF_3 0xE06DCC + +#define mmTPC0_CFG_QM_SRF_4 0xE06DD0 + +#define mmTPC0_CFG_QM_SRF_5 0xE06DD4 + +#define mmTPC0_CFG_QM_SRF_6 0xE06DD8 + +#define mmTPC0_CFG_QM_SRF_7 0xE06DDC + +#define mmTPC0_CFG_QM_SRF_8 0xE06DE0 + +#define mmTPC0_CFG_QM_SRF_9 0xE06DE4 + +#define mmTPC0_CFG_QM_SRF_10 0xE06DE8 + +#define mmTPC0_CFG_QM_SRF_11 0xE06DEC + +#define mmTPC0_CFG_QM_SRF_12 0xE06DF0 + +#define mmTPC0_CFG_QM_SRF_13 0xE06DF4 + +#define mmTPC0_CFG_QM_SRF_14 0xE06DF8 + +#define mmTPC0_CFG_QM_SRF_15 0xE06DFC + +#define mmTPC0_CFG_QM_SRF_16 0xE06E00 + +#define mmTPC0_CFG_QM_SRF_17 0xE06E04 + +#define mmTPC0_CFG_QM_SRF_18 0xE06E08 + +#define mmTPC0_CFG_QM_SRF_19 0xE06E0C + +#define mmTPC0_CFG_QM_SRF_20 0xE06E10 + +#define mmTPC0_CFG_QM_SRF_21 0xE06E14 + +#define mmTPC0_CFG_QM_SRF_22 0xE06E18 + +#define mmTPC0_CFG_QM_SRF_23 0xE06E1C + +#define mmTPC0_CFG_QM_SRF_24 0xE06E20 + +#define mmTPC0_CFG_QM_SRF_25 0xE06E24 + +#define mmTPC0_CFG_QM_SRF_26 0xE06E28 + +#define mmTPC0_CFG_QM_SRF_27 0xE06E2C + +#define mmTPC0_CFG_QM_SRF_28 0xE06E30 + +#define mmTPC0_CFG_QM_SRF_29 0xE06E34 + +#define mmTPC0_CFG_QM_SRF_30 0xE06E38 + +#define mmTPC0_CFG_QM_SRF_31 0xE06E3C + +#endif /* ASIC_REG_TPC0_CFG_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_qm_masks.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_qm_masks.h new file mode 100644 index 000000000..8e71532c6 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_qm_masks.h @@ -0,0 +1,800 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC0_QM_MASKS_H_ +#define ASIC_REG_TPC0_QM_MASKS_H_ + +/* + ***************************************** + * TPC0_QM (Prototype: QMAN) + ***************************************** + */ + +/* TPC0_QM_GLBL_CFG0 */ +#define TPC0_QM_GLBL_CFG0_PQF_EN_SHIFT 0 +#define TPC0_QM_GLBL_CFG0_PQF_EN_MASK 0xF +#define TPC0_QM_GLBL_CFG0_CQF_EN_SHIFT 4 +#define TPC0_QM_GLBL_CFG0_CQF_EN_MASK 0x1F0 +#define TPC0_QM_GLBL_CFG0_CP_EN_SHIFT 9 +#define TPC0_QM_GLBL_CFG0_CP_EN_MASK 0x3E00 + +/* TPC0_QM_GLBL_CFG1 */ +#define TPC0_QM_GLBL_CFG1_PQF_STOP_SHIFT 0 +#define TPC0_QM_GLBL_CFG1_PQF_STOP_MASK 0xF +#define TPC0_QM_GLBL_CFG1_CQF_STOP_SHIFT 4 +#define TPC0_QM_GLBL_CFG1_CQF_STOP_MASK 0x1F0 +#define TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT 9 +#define TPC0_QM_GLBL_CFG1_CP_STOP_MASK 0x3E00 +#define TPC0_QM_GLBL_CFG1_PQF_FLUSH_SHIFT 16 +#define TPC0_QM_GLBL_CFG1_PQF_FLUSH_MASK 0xF0000 +#define TPC0_QM_GLBL_CFG1_CQF_FLUSH_SHIFT 20 +#define TPC0_QM_GLBL_CFG1_CQF_FLUSH_MASK 0x1F00000 +#define TPC0_QM_GLBL_CFG1_CP_FLUSH_SHIFT 25 +#define TPC0_QM_GLBL_CFG1_CP_FLUSH_MASK 0x3E000000 + +/* TPC0_QM_GLBL_PROT */ +#define TPC0_QM_GLBL_PROT_PQF_SHIFT 0 +#define TPC0_QM_GLBL_PROT_PQF_MASK 0xF +#define TPC0_QM_GLBL_PROT_CQF_SHIFT 4 +#define TPC0_QM_GLBL_PROT_CQF_MASK 0x1F0 +#define TPC0_QM_GLBL_PROT_CP_SHIFT 9 +#define TPC0_QM_GLBL_PROT_CP_MASK 0x3E00 +#define TPC0_QM_GLBL_PROT_ERR_SHIFT 14 +#define TPC0_QM_GLBL_PROT_ERR_MASK 0x4000 +#define TPC0_QM_GLBL_PROT_ARB_SHIFT 15 +#define TPC0_QM_GLBL_PROT_ARB_MASK 0x8000 + +/* TPC0_QM_GLBL_ERR_CFG */ +#define TPC0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT 0 +#define TPC0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK 0xF +#define TPC0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT 4 +#define TPC0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK 0x1F0 +#define TPC0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT 9 +#define TPC0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK 0x3E00 +#define TPC0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT 16 +#define TPC0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK 0xF0000 +#define TPC0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT 20 +#define TPC0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK 0x1F00000 +#define TPC0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT 25 +#define TPC0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK 0x3E000000 +#define TPC0_QM_GLBL_ERR_CFG_ARB_STOP_ON_ERR_SHIFT 31 +#define TPC0_QM_GLBL_ERR_CFG_ARB_STOP_ON_ERR_MASK 0x80000000 + +/* TPC0_QM_GLBL_SECURE_PROPS */ +#define TPC0_QM_GLBL_SECURE_PROPS_0_ASID_SHIFT 0 +#define TPC0_QM_GLBL_SECURE_PROPS_0_ASID_MASK 0x3FF +#define TPC0_QM_GLBL_SECURE_PROPS_1_ASID_SHIFT 0 +#define TPC0_QM_GLBL_SECURE_PROPS_1_ASID_MASK 0x3FF +#define TPC0_QM_GLBL_SECURE_PROPS_2_ASID_SHIFT 0 +#define TPC0_QM_GLBL_SECURE_PROPS_2_ASID_MASK 0x3FF +#define TPC0_QM_GLBL_SECURE_PROPS_3_ASID_SHIFT 0 +#define TPC0_QM_GLBL_SECURE_PROPS_3_ASID_MASK 0x3FF +#define TPC0_QM_GLBL_SECURE_PROPS_4_ASID_SHIFT 0 +#define TPC0_QM_GLBL_SECURE_PROPS_4_ASID_MASK 0x3FF +#define TPC0_QM_GLBL_SECURE_PROPS_0_MMBP_SHIFT 10 +#define TPC0_QM_GLBL_SECURE_PROPS_0_MMBP_MASK 0x400 +#define TPC0_QM_GLBL_SECURE_PROPS_1_MMBP_SHIFT 10 +#define TPC0_QM_GLBL_SECURE_PROPS_1_MMBP_MASK 0x400 +#define TPC0_QM_GLBL_SECURE_PROPS_2_MMBP_SHIFT 10 +#define TPC0_QM_GLBL_SECURE_PROPS_2_MMBP_MASK 0x400 +#define TPC0_QM_GLBL_SECURE_PROPS_3_MMBP_SHIFT 10 +#define TPC0_QM_GLBL_SECURE_PROPS_3_MMBP_MASK 0x400 +#define TPC0_QM_GLBL_SECURE_PROPS_4_MMBP_SHIFT 10 +#define TPC0_QM_GLBL_SECURE_PROPS_4_MMBP_MASK 0x400 + +/* TPC0_QM_GLBL_NON_SECURE_PROPS */ +#define TPC0_QM_GLBL_NON_SECURE_PROPS_0_ASID_SHIFT 0 +#define TPC0_QM_GLBL_NON_SECURE_PROPS_0_ASID_MASK 0x3FF +#define TPC0_QM_GLBL_NON_SECURE_PROPS_1_ASID_SHIFT 0 +#define TPC0_QM_GLBL_NON_SECURE_PROPS_1_ASID_MASK 0x3FF +#define TPC0_QM_GLBL_NON_SECURE_PROPS_2_ASID_SHIFT 0 +#define TPC0_QM_GLBL_NON_SECURE_PROPS_2_ASID_MASK 0x3FF +#define TPC0_QM_GLBL_NON_SECURE_PROPS_3_ASID_SHIFT 0 +#define TPC0_QM_GLBL_NON_SECURE_PROPS_3_ASID_MASK 0x3FF +#define TPC0_QM_GLBL_NON_SECURE_PROPS_4_ASID_SHIFT 0 +#define TPC0_QM_GLBL_NON_SECURE_PROPS_4_ASID_MASK 0x3FF +#define TPC0_QM_GLBL_NON_SECURE_PROPS_0_MMBP_SHIFT 10 +#define TPC0_QM_GLBL_NON_SECURE_PROPS_0_MMBP_MASK 0x400 +#define TPC0_QM_GLBL_NON_SECURE_PROPS_1_MMBP_SHIFT 10 +#define TPC0_QM_GLBL_NON_SECURE_PROPS_1_MMBP_MASK 0x400 +#define TPC0_QM_GLBL_NON_SECURE_PROPS_2_MMBP_SHIFT 10 +#define TPC0_QM_GLBL_NON_SECURE_PROPS_2_MMBP_MASK 0x400 +#define TPC0_QM_GLBL_NON_SECURE_PROPS_3_MMBP_SHIFT 10 +#define TPC0_QM_GLBL_NON_SECURE_PROPS_3_MMBP_MASK 0x400 +#define TPC0_QM_GLBL_NON_SECURE_PROPS_4_MMBP_SHIFT 10 +#define TPC0_QM_GLBL_NON_SECURE_PROPS_4_MMBP_MASK 0x400 + +/* TPC0_QM_GLBL_STS0 */ +#define TPC0_QM_GLBL_STS0_PQF_IDLE_SHIFT 0 +#define TPC0_QM_GLBL_STS0_PQF_IDLE_MASK 0xF +#define TPC0_QM_GLBL_STS0_CQF_IDLE_SHIFT 4 +#define TPC0_QM_GLBL_STS0_CQF_IDLE_MASK 0x1F0 +#define TPC0_QM_GLBL_STS0_CP_IDLE_SHIFT 9 +#define TPC0_QM_GLBL_STS0_CP_IDLE_MASK 0x3E00 +#define TPC0_QM_GLBL_STS0_PQF_IS_STOP_SHIFT 16 +#define TPC0_QM_GLBL_STS0_PQF_IS_STOP_MASK 0xF0000 +#define TPC0_QM_GLBL_STS0_CQF_IS_STOP_SHIFT 20 +#define TPC0_QM_GLBL_STS0_CQF_IS_STOP_MASK 0x1F00000 +#define TPC0_QM_GLBL_STS0_CP_IS_STOP_SHIFT 25 +#define TPC0_QM_GLBL_STS0_CP_IS_STOP_MASK 0x3E000000 +#define TPC0_QM_GLBL_STS0_ARB_IS_STOP_SHIFT 31 +#define TPC0_QM_GLBL_STS0_ARB_IS_STOP_MASK 0x80000000 + +/* TPC0_QM_GLBL_STS1 */ +#define TPC0_QM_GLBL_STS1_PQF_RD_ERR_SHIFT 0 +#define TPC0_QM_GLBL_STS1_PQF_RD_ERR_MASK 0x1 +#define TPC0_QM_GLBL_STS1_CQF_RD_ERR_SHIFT 1 +#define TPC0_QM_GLBL_STS1_CQF_RD_ERR_MASK 0x2 +#define TPC0_QM_GLBL_STS1_CP_RD_ERR_SHIFT 2 +#define TPC0_QM_GLBL_STS1_CP_RD_ERR_MASK 0x4 +#define TPC0_QM_GLBL_STS1_CP_UNDEF_CMD_ERR_SHIFT 3 +#define TPC0_QM_GLBL_STS1_CP_UNDEF_CMD_ERR_MASK 0x8 +#define TPC0_QM_GLBL_STS1_CP_STOP_OP_SHIFT 4 +#define TPC0_QM_GLBL_STS1_CP_STOP_OP_MASK 0x10 +#define TPC0_QM_GLBL_STS1_CP_MSG_WR_ERR_SHIFT 5 +#define TPC0_QM_GLBL_STS1_CP_MSG_WR_ERR_MASK 0x20 +#define TPC0_QM_GLBL_STS1_CP_WREG_ERR_SHIFT 6 +#define TPC0_QM_GLBL_STS1_CP_WREG_ERR_MASK 0x40 +#define TPC0_QM_GLBL_STS1_CP_FENCE0_OVF_ERR_SHIFT 8 +#define TPC0_QM_GLBL_STS1_CP_FENCE0_OVF_ERR_MASK 0x100 +#define TPC0_QM_GLBL_STS1_CP_FENCE1_OVF_ERR_SHIFT 9 +#define TPC0_QM_GLBL_STS1_CP_FENCE1_OVF_ERR_MASK 0x200 +#define TPC0_QM_GLBL_STS1_CP_FENCE2_OVF_ERR_SHIFT 10 +#define TPC0_QM_GLBL_STS1_CP_FENCE2_OVF_ERR_MASK 0x400 +#define TPC0_QM_GLBL_STS1_CP_FENCE3_OVF_ERR_SHIFT 11 +#define TPC0_QM_GLBL_STS1_CP_FENCE3_OVF_ERR_MASK 0x800 +#define TPC0_QM_GLBL_STS1_CP_FENCE0_UDF_ERR_SHIFT 12 +#define TPC0_QM_GLBL_STS1_CP_FENCE0_UDF_ERR_MASK 0x1000 +#define TPC0_QM_GLBL_STS1_CP_FENCE1_UDF_ERR_SHIFT 13 +#define TPC0_QM_GLBL_STS1_CP_FENCE1_UDF_ERR_MASK 0x2000 +#define TPC0_QM_GLBL_STS1_CP_FENCE2_UDF_ERR_SHIFT 14 +#define TPC0_QM_GLBL_STS1_CP_FENCE2_UDF_ERR_MASK 0x4000 +#define TPC0_QM_GLBL_STS1_CP_FENCE3_UDF_ERR_SHIFT 15 +#define TPC0_QM_GLBL_STS1_CP_FENCE3_UDF_ERR_MASK 0x8000 + +/* TPC0_QM_GLBL_STS1_4 */ +#define TPC0_QM_GLBL_STS1_4_CQF_RD_ERR_SHIFT 1 +#define TPC0_QM_GLBL_STS1_4_CQF_RD_ERR_MASK 0x2 +#define TPC0_QM_GLBL_STS1_4_CP_RD_ERR_SHIFT 2 +#define TPC0_QM_GLBL_STS1_4_CP_RD_ERR_MASK 0x4 +#define TPC0_QM_GLBL_STS1_4_CP_UNDEF_CMD_ERR_SHIFT 3 +#define TPC0_QM_GLBL_STS1_4_CP_UNDEF_CMD_ERR_MASK 0x8 +#define TPC0_QM_GLBL_STS1_4_CP_STOP_OP_SHIFT 4 +#define TPC0_QM_GLBL_STS1_4_CP_STOP_OP_MASK 0x10 +#define TPC0_QM_GLBL_STS1_4_CP_MSG_WR_ERR_SHIFT 5 +#define TPC0_QM_GLBL_STS1_4_CP_MSG_WR_ERR_MASK 0x20 +#define TPC0_QM_GLBL_STS1_4_CP_WREG_ERR_SHIFT 6 +#define TPC0_QM_GLBL_STS1_4_CP_WREG_ERR_MASK 0x40 +#define TPC0_QM_GLBL_STS1_4_CP_FENCE0_OVF_ERR_SHIFT 8 +#define TPC0_QM_GLBL_STS1_4_CP_FENCE0_OVF_ERR_MASK 0x100 +#define TPC0_QM_GLBL_STS1_4_CP_FENCE1_OVF_ERR_SHIFT 9 +#define TPC0_QM_GLBL_STS1_4_CP_FENCE1_OVF_ERR_MASK 0x200 +#define TPC0_QM_GLBL_STS1_4_CP_FENCE2_OVF_ERR_SHIFT 10 +#define TPC0_QM_GLBL_STS1_4_CP_FENCE2_OVF_ERR_MASK 0x400 +#define TPC0_QM_GLBL_STS1_4_CP_FENCE3_OVF_ERR_SHIFT 11 +#define TPC0_QM_GLBL_STS1_4_CP_FENCE3_OVF_ERR_MASK 0x800 +#define TPC0_QM_GLBL_STS1_4_CP_FENCE0_UDF_ERR_SHIFT 12 +#define TPC0_QM_GLBL_STS1_4_CP_FENCE0_UDF_ERR_MASK 0x1000 +#define TPC0_QM_GLBL_STS1_4_CP_FENCE1_UDF_ERR_SHIFT 13 +#define TPC0_QM_GLBL_STS1_4_CP_FENCE1_UDF_ERR_MASK 0x2000 +#define TPC0_QM_GLBL_STS1_4_CP_FENCE2_UDF_ERR_SHIFT 14 +#define TPC0_QM_GLBL_STS1_4_CP_FENCE2_UDF_ERR_MASK 0x4000 +#define TPC0_QM_GLBL_STS1_4_CP_FENCE3_UDF_ERR_SHIFT 15 +#define TPC0_QM_GLBL_STS1_4_CP_FENCE3_UDF_ERR_MASK 0x8000 + +/* TPC0_QM_GLBL_MSG_EN */ +#define TPC0_QM_GLBL_MSG_EN_PQF_RD_ERR_SHIFT 0 +#define TPC0_QM_GLBL_MSG_EN_PQF_RD_ERR_MASK 0x1 +#define TPC0_QM_GLBL_MSG_EN_CQF_RD_ERR_SHIFT 1 +#define TPC0_QM_GLBL_MSG_EN_CQF_RD_ERR_MASK 0x2 +#define TPC0_QM_GLBL_MSG_EN_CP_RD_ERR_SHIFT 2 +#define TPC0_QM_GLBL_MSG_EN_CP_RD_ERR_MASK 0x4 +#define TPC0_QM_GLBL_MSG_EN_CP_UNDEF_CMD_ERR_SHIFT 3 +#define TPC0_QM_GLBL_MSG_EN_CP_UNDEF_CMD_ERR_MASK 0x8 +#define TPC0_QM_GLBL_MSG_EN_CP_STOP_OP_SHIFT 4 +#define TPC0_QM_GLBL_MSG_EN_CP_STOP_OP_MASK 0x10 +#define TPC0_QM_GLBL_MSG_EN_CP_MSG_WR_ERR_SHIFT 5 +#define TPC0_QM_GLBL_MSG_EN_CP_MSG_WR_ERR_MASK 0x20 +#define TPC0_QM_GLBL_MSG_EN_CP_WREG_ERR_SHIFT 6 +#define TPC0_QM_GLBL_MSG_EN_CP_WREG_ERR_MASK 0x40 +#define TPC0_QM_GLBL_MSG_EN_CP_FENCE0_OVF_ERR_SHIFT 8 +#define TPC0_QM_GLBL_MSG_EN_CP_FENCE0_OVF_ERR_MASK 0x100 +#define TPC0_QM_GLBL_MSG_EN_CP_FENCE1_OVF_ERR_SHIFT 9 +#define TPC0_QM_GLBL_MSG_EN_CP_FENCE1_OVF_ERR_MASK 0x200 +#define TPC0_QM_GLBL_MSG_EN_CP_FENCE2_OVF_ERR_SHIFT 10 +#define TPC0_QM_GLBL_MSG_EN_CP_FENCE2_OVF_ERR_MASK 0x400 +#define TPC0_QM_GLBL_MSG_EN_CP_FENCE3_OVF_ERR_SHIFT 11 +#define TPC0_QM_GLBL_MSG_EN_CP_FENCE3_OVF_ERR_MASK 0x800 +#define TPC0_QM_GLBL_MSG_EN_CP_FENCE0_UDF_ERR_SHIFT 12 +#define TPC0_QM_GLBL_MSG_EN_CP_FENCE0_UDF_ERR_MASK 0x1000 +#define TPC0_QM_GLBL_MSG_EN_CP_FENCE1_UDF_ERR_SHIFT 13 +#define TPC0_QM_GLBL_MSG_EN_CP_FENCE1_UDF_ERR_MASK 0x2000 +#define TPC0_QM_GLBL_MSG_EN_CP_FENCE2_UDF_ERR_SHIFT 14 +#define TPC0_QM_GLBL_MSG_EN_CP_FENCE2_UDF_ERR_MASK 0x4000 +#define TPC0_QM_GLBL_MSG_EN_CP_FENCE3_UDF_ERR_SHIFT 15 +#define TPC0_QM_GLBL_MSG_EN_CP_FENCE3_UDF_ERR_MASK 0x8000 + +/* TPC0_QM_GLBL_MSG_EN_4 */ +#define TPC0_QM_GLBL_MSG_EN_4_CQF_RD_ERR_SHIFT 1 +#define TPC0_QM_GLBL_MSG_EN_4_CQF_RD_ERR_MASK 0x2 +#define TPC0_QM_GLBL_MSG_EN_4_CP_RD_ERR_SHIFT 2 +#define TPC0_QM_GLBL_MSG_EN_4_CP_RD_ERR_MASK 0x4 +#define TPC0_QM_GLBL_MSG_EN_4_CP_UNDEF_CMD_ERR_SHIFT 3 +#define TPC0_QM_GLBL_MSG_EN_4_CP_UNDEF_CMD_ERR_MASK 0x8 +#define TPC0_QM_GLBL_MSG_EN_4_CP_STOP_OP_SHIFT 4 +#define TPC0_QM_GLBL_MSG_EN_4_CP_STOP_OP_MASK 0x10 +#define TPC0_QM_GLBL_MSG_EN_4_CP_MSG_WR_ERR_SHIFT 5 +#define TPC0_QM_GLBL_MSG_EN_4_CP_MSG_WR_ERR_MASK 0x20 +#define TPC0_QM_GLBL_MSG_EN_4_CP_WREG_ERR_SHIFT 6 +#define TPC0_QM_GLBL_MSG_EN_4_CP_WREG_ERR_MASK 0x40 +#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE0_OVF_ERR_SHIFT 8 +#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE0_OVF_ERR_MASK 0x100 +#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE1_OVF_ERR_SHIFT 9 +#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE1_OVF_ERR_MASK 0x200 +#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE2_OVF_ERR_SHIFT 10 +#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE2_OVF_ERR_MASK 0x400 +#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE3_OVF_ERR_SHIFT 11 +#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE3_OVF_ERR_MASK 0x800 +#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE0_UDF_ERR_SHIFT 12 +#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE0_UDF_ERR_MASK 0x1000 +#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE1_UDF_ERR_SHIFT 13 +#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE1_UDF_ERR_MASK 0x2000 +#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE2_UDF_ERR_SHIFT 14 +#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE2_UDF_ERR_MASK 0x4000 +#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE3_UDF_ERR_SHIFT 15 +#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE3_UDF_ERR_MASK 0x8000 + +/* TPC0_QM_PQ_BASE_LO */ +#define TPC0_QM_PQ_BASE_LO_VAL_SHIFT 0 +#define TPC0_QM_PQ_BASE_LO_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_PQ_BASE_HI */ +#define TPC0_QM_PQ_BASE_HI_VAL_SHIFT 0 +#define TPC0_QM_PQ_BASE_HI_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_PQ_SIZE */ +#define TPC0_QM_PQ_SIZE_VAL_SHIFT 0 +#define TPC0_QM_PQ_SIZE_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_PQ_PI */ +#define TPC0_QM_PQ_PI_VAL_SHIFT 0 +#define TPC0_QM_PQ_PI_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_PQ_CI */ +#define TPC0_QM_PQ_CI_VAL_SHIFT 0 +#define TPC0_QM_PQ_CI_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_PQ_CFG0 */ +#define TPC0_QM_PQ_CFG0_RESERVED_SHIFT 0 +#define TPC0_QM_PQ_CFG0_RESERVED_MASK 0x1 + +/* TPC0_QM_PQ_CFG1 */ +#define TPC0_QM_PQ_CFG1_CREDIT_LIM_SHIFT 0 +#define TPC0_QM_PQ_CFG1_CREDIT_LIM_MASK 0xFFFF +#define TPC0_QM_PQ_CFG1_MAX_INFLIGHT_SHIFT 16 +#define TPC0_QM_PQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000 + +/* TPC0_QM_PQ_ARUSER_31_11 */ +#define TPC0_QM_PQ_ARUSER_31_11_VAL_SHIFT 0 +#define TPC0_QM_PQ_ARUSER_31_11_VAL_MASK 0x1FFFFF + +/* TPC0_QM_PQ_STS0 */ +#define TPC0_QM_PQ_STS0_PQ_CREDIT_CNT_SHIFT 0 +#define TPC0_QM_PQ_STS0_PQ_CREDIT_CNT_MASK 0xFFFF +#define TPC0_QM_PQ_STS0_PQ_FREE_CNT_SHIFT 16 +#define TPC0_QM_PQ_STS0_PQ_FREE_CNT_MASK 0xFFFF0000 + +/* TPC0_QM_PQ_STS1 */ +#define TPC0_QM_PQ_STS1_PQ_INFLIGHT_CNT_SHIFT 0 +#define TPC0_QM_PQ_STS1_PQ_INFLIGHT_CNT_MASK 0xFFFF +#define TPC0_QM_PQ_STS1_PQ_BUF_EMPTY_SHIFT 30 +#define TPC0_QM_PQ_STS1_PQ_BUF_EMPTY_MASK 0x40000000 +#define TPC0_QM_PQ_STS1_PQ_BUSY_SHIFT 31 +#define TPC0_QM_PQ_STS1_PQ_BUSY_MASK 0x80000000 + +/* TPC0_QM_CQ_CFG0 */ +#define TPC0_QM_CQ_CFG0_RESERVED_SHIFT 0 +#define TPC0_QM_CQ_CFG0_RESERVED_MASK 0x1 + +/* TPC0_QM_CQ_CFG1 */ +#define TPC0_QM_CQ_CFG1_CREDIT_LIM_SHIFT 0 +#define TPC0_QM_CQ_CFG1_CREDIT_LIM_MASK 0xFFFF +#define TPC0_QM_CQ_CFG1_MAX_INFLIGHT_SHIFT 16 +#define TPC0_QM_CQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000 + +/* TPC0_QM_CQ_ARUSER_31_11 */ +#define TPC0_QM_CQ_ARUSER_31_11_VAL_SHIFT 0 +#define TPC0_QM_CQ_ARUSER_31_11_VAL_MASK 0x1FFFFF + +/* TPC0_QM_CQ_STS0 */ +#define TPC0_QM_CQ_STS0_CQ_CREDIT_CNT_SHIFT 0 +#define TPC0_QM_CQ_STS0_CQ_CREDIT_CNT_MASK 0xFFFF +#define TPC0_QM_CQ_STS0_CQ_FREE_CNT_SHIFT 16 +#define TPC0_QM_CQ_STS0_CQ_FREE_CNT_MASK 0xFFFF0000 + +/* TPC0_QM_CQ_STS1 */ +#define TPC0_QM_CQ_STS1_CQ_INFLIGHT_CNT_SHIFT 0 +#define TPC0_QM_CQ_STS1_CQ_INFLIGHT_CNT_MASK 0xFFFF +#define TPC0_QM_CQ_STS1_CQ_BUF_EMPTY_SHIFT 30 +#define TPC0_QM_CQ_STS1_CQ_BUF_EMPTY_MASK 0x40000000 +#define TPC0_QM_CQ_STS1_CQ_BUSY_SHIFT 31 +#define TPC0_QM_CQ_STS1_CQ_BUSY_MASK 0x80000000 + +/* TPC0_QM_CQ_PTR_LO_0 */ +#define TPC0_QM_CQ_PTR_LO_0_VAL_SHIFT 0 +#define TPC0_QM_CQ_PTR_LO_0_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CQ_PTR_HI_0 */ +#define TPC0_QM_CQ_PTR_HI_0_VAL_SHIFT 0 +#define TPC0_QM_CQ_PTR_HI_0_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CQ_TSIZE_0 */ +#define TPC0_QM_CQ_TSIZE_0_VAL_SHIFT 0 +#define TPC0_QM_CQ_TSIZE_0_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CQ_CTL_0 */ +#define TPC0_QM_CQ_CTL_0_RPT_SHIFT 0 +#define TPC0_QM_CQ_CTL_0_RPT_MASK 0xFFFF +#define TPC0_QM_CQ_CTL_0_CTL_SHIFT 16 +#define TPC0_QM_CQ_CTL_0_CTL_MASK 0xFFFF0000 + +/* TPC0_QM_CQ_PTR_LO_1 */ +#define TPC0_QM_CQ_PTR_LO_1_VAL_SHIFT 0 +#define TPC0_QM_CQ_PTR_LO_1_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CQ_PTR_HI_1 */ +#define TPC0_QM_CQ_PTR_HI_1_VAL_SHIFT 0 +#define TPC0_QM_CQ_PTR_HI_1_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CQ_TSIZE_1 */ +#define TPC0_QM_CQ_TSIZE_1_VAL_SHIFT 0 +#define TPC0_QM_CQ_TSIZE_1_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CQ_CTL_1 */ +#define TPC0_QM_CQ_CTL_1_RPT_SHIFT 0 +#define TPC0_QM_CQ_CTL_1_RPT_MASK 0xFFFF +#define TPC0_QM_CQ_CTL_1_CTL_SHIFT 16 +#define TPC0_QM_CQ_CTL_1_CTL_MASK 0xFFFF0000 + +/* TPC0_QM_CQ_PTR_LO_2 */ +#define TPC0_QM_CQ_PTR_LO_2_VAL_SHIFT 0 +#define TPC0_QM_CQ_PTR_LO_2_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CQ_PTR_HI_2 */ +#define TPC0_QM_CQ_PTR_HI_2_VAL_SHIFT 0 +#define TPC0_QM_CQ_PTR_HI_2_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CQ_TSIZE_2 */ +#define TPC0_QM_CQ_TSIZE_2_VAL_SHIFT 0 +#define TPC0_QM_CQ_TSIZE_2_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CQ_CTL_2 */ +#define TPC0_QM_CQ_CTL_2_RPT_SHIFT 0 +#define TPC0_QM_CQ_CTL_2_RPT_MASK 0xFFFF +#define TPC0_QM_CQ_CTL_2_CTL_SHIFT 16 +#define TPC0_QM_CQ_CTL_2_CTL_MASK 0xFFFF0000 + +/* TPC0_QM_CQ_PTR_LO_3 */ +#define TPC0_QM_CQ_PTR_LO_3_VAL_SHIFT 0 +#define TPC0_QM_CQ_PTR_LO_3_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CQ_PTR_HI_3 */ +#define TPC0_QM_CQ_PTR_HI_3_VAL_SHIFT 0 +#define TPC0_QM_CQ_PTR_HI_3_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CQ_TSIZE_3 */ +#define TPC0_QM_CQ_TSIZE_3_VAL_SHIFT 0 +#define TPC0_QM_CQ_TSIZE_3_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CQ_CTL_3 */ +#define TPC0_QM_CQ_CTL_3_RPT_SHIFT 0 +#define TPC0_QM_CQ_CTL_3_RPT_MASK 0xFFFF +#define TPC0_QM_CQ_CTL_3_CTL_SHIFT 16 +#define TPC0_QM_CQ_CTL_3_CTL_MASK 0xFFFF0000 + +/* TPC0_QM_CQ_PTR_LO_4 */ +#define TPC0_QM_CQ_PTR_LO_4_VAL_SHIFT 0 +#define TPC0_QM_CQ_PTR_LO_4_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CQ_PTR_HI_4 */ +#define TPC0_QM_CQ_PTR_HI_4_VAL_SHIFT 0 +#define TPC0_QM_CQ_PTR_HI_4_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CQ_TSIZE_4 */ +#define TPC0_QM_CQ_TSIZE_4_VAL_SHIFT 0 +#define TPC0_QM_CQ_TSIZE_4_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CQ_CTL_4 */ +#define TPC0_QM_CQ_CTL_4_RPT_SHIFT 0 +#define TPC0_QM_CQ_CTL_4_RPT_MASK 0xFFFF +#define TPC0_QM_CQ_CTL_4_CTL_SHIFT 16 +#define TPC0_QM_CQ_CTL_4_CTL_MASK 0xFFFF0000 + +/* TPC0_QM_CQ_PTR_LO_STS */ +#define TPC0_QM_CQ_PTR_LO_STS_VAL_SHIFT 0 +#define TPC0_QM_CQ_PTR_LO_STS_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CQ_PTR_HI_STS */ +#define TPC0_QM_CQ_PTR_HI_STS_VAL_SHIFT 0 +#define TPC0_QM_CQ_PTR_HI_STS_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CQ_TSIZE_STS */ +#define TPC0_QM_CQ_TSIZE_STS_VAL_SHIFT 0 +#define TPC0_QM_CQ_TSIZE_STS_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CQ_CTL_STS */ +#define TPC0_QM_CQ_CTL_STS_RPT_SHIFT 0 +#define TPC0_QM_CQ_CTL_STS_RPT_MASK 0xFFFF +#define TPC0_QM_CQ_CTL_STS_CTL_SHIFT 16 +#define TPC0_QM_CQ_CTL_STS_CTL_MASK 0xFFFF0000 + +/* TPC0_QM_CQ_IFIFO_CNT */ +#define TPC0_QM_CQ_IFIFO_CNT_VAL_SHIFT 0 +#define TPC0_QM_CQ_IFIFO_CNT_VAL_MASK 0x3 + +/* TPC0_QM_CP_MSG_BASE0_ADDR_LO */ +#define TPC0_QM_CP_MSG_BASE0_ADDR_LO_VAL_SHIFT 0 +#define TPC0_QM_CP_MSG_BASE0_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CP_MSG_BASE0_ADDR_HI */ +#define TPC0_QM_CP_MSG_BASE0_ADDR_HI_VAL_SHIFT 0 +#define TPC0_QM_CP_MSG_BASE0_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CP_MSG_BASE1_ADDR_LO */ +#define TPC0_QM_CP_MSG_BASE1_ADDR_LO_VAL_SHIFT 0 +#define TPC0_QM_CP_MSG_BASE1_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CP_MSG_BASE1_ADDR_HI */ +#define TPC0_QM_CP_MSG_BASE1_ADDR_HI_VAL_SHIFT 0 +#define TPC0_QM_CP_MSG_BASE1_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CP_MSG_BASE2_ADDR_LO */ +#define TPC0_QM_CP_MSG_BASE2_ADDR_LO_VAL_SHIFT 0 +#define TPC0_QM_CP_MSG_BASE2_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CP_MSG_BASE2_ADDR_HI */ +#define TPC0_QM_CP_MSG_BASE2_ADDR_HI_VAL_SHIFT 0 +#define TPC0_QM_CP_MSG_BASE2_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CP_MSG_BASE3_ADDR_LO */ +#define TPC0_QM_CP_MSG_BASE3_ADDR_LO_VAL_SHIFT 0 +#define TPC0_QM_CP_MSG_BASE3_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CP_MSG_BASE3_ADDR_HI */ +#define TPC0_QM_CP_MSG_BASE3_ADDR_HI_VAL_SHIFT 0 +#define TPC0_QM_CP_MSG_BASE3_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CP_LDMA_TSIZE_OFFSET */ +#define TPC0_QM_CP_LDMA_TSIZE_OFFSET_VAL_SHIFT 0 +#define TPC0_QM_CP_LDMA_TSIZE_OFFSET_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET */ +#define TPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_SHIFT 0 +#define TPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET */ +#define TPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_VAL_SHIFT 0 +#define TPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CP_FENCE0_RDATA */ +#define TPC0_QM_CP_FENCE0_RDATA_INC_VAL_SHIFT 0 +#define TPC0_QM_CP_FENCE0_RDATA_INC_VAL_MASK 0xF + +/* TPC0_QM_CP_FENCE1_RDATA */ +#define TPC0_QM_CP_FENCE1_RDATA_INC_VAL_SHIFT 0 +#define TPC0_QM_CP_FENCE1_RDATA_INC_VAL_MASK 0xF + +/* TPC0_QM_CP_FENCE2_RDATA */ +#define TPC0_QM_CP_FENCE2_RDATA_INC_VAL_SHIFT 0 +#define TPC0_QM_CP_FENCE2_RDATA_INC_VAL_MASK 0xF + +/* TPC0_QM_CP_FENCE3_RDATA */ +#define TPC0_QM_CP_FENCE3_RDATA_INC_VAL_SHIFT 0 +#define TPC0_QM_CP_FENCE3_RDATA_INC_VAL_MASK 0xF + +/* TPC0_QM_CP_FENCE0_CNT */ +#define TPC0_QM_CP_FENCE0_CNT_VAL_SHIFT 0 +#define TPC0_QM_CP_FENCE0_CNT_VAL_MASK 0x3FFF + +/* TPC0_QM_CP_FENCE1_CNT */ +#define TPC0_QM_CP_FENCE1_CNT_VAL_SHIFT 0 +#define TPC0_QM_CP_FENCE1_CNT_VAL_MASK 0x3FFF + +/* TPC0_QM_CP_FENCE2_CNT */ +#define TPC0_QM_CP_FENCE2_CNT_VAL_SHIFT 0 +#define TPC0_QM_CP_FENCE2_CNT_VAL_MASK 0x3FFF + +/* TPC0_QM_CP_FENCE3_CNT */ +#define TPC0_QM_CP_FENCE3_CNT_VAL_SHIFT 0 +#define TPC0_QM_CP_FENCE3_CNT_VAL_MASK 0x3FFF + +/* TPC0_QM_CP_STS */ +#define TPC0_QM_CP_STS_MSG_INFLIGHT_CNT_SHIFT 0 +#define TPC0_QM_CP_STS_MSG_INFLIGHT_CNT_MASK 0xFFFF +#define TPC0_QM_CP_STS_ERDY_SHIFT 16 +#define TPC0_QM_CP_STS_ERDY_MASK 0x10000 +#define TPC0_QM_CP_STS_RRDY_SHIFT 17 +#define TPC0_QM_CP_STS_RRDY_MASK 0x20000 +#define TPC0_QM_CP_STS_MRDY_SHIFT 18 +#define TPC0_QM_CP_STS_MRDY_MASK 0x40000 +#define TPC0_QM_CP_STS_SW_STOP_SHIFT 19 +#define TPC0_QM_CP_STS_SW_STOP_MASK 0x80000 +#define TPC0_QM_CP_STS_FENCE_ID_SHIFT 20 +#define TPC0_QM_CP_STS_FENCE_ID_MASK 0x300000 +#define TPC0_QM_CP_STS_FENCE_IN_PROGRESS_SHIFT 22 +#define TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK 0x400000 + +/* TPC0_QM_CP_CURRENT_INST_LO */ +#define TPC0_QM_CP_CURRENT_INST_LO_VAL_SHIFT 0 +#define TPC0_QM_CP_CURRENT_INST_LO_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CP_CURRENT_INST_HI */ +#define TPC0_QM_CP_CURRENT_INST_HI_VAL_SHIFT 0 +#define TPC0_QM_CP_CURRENT_INST_HI_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CP_BARRIER_CFG */ +#define TPC0_QM_CP_BARRIER_CFG_EBGUARD_SHIFT 0 +#define TPC0_QM_CP_BARRIER_CFG_EBGUARD_MASK 0xFFF +#define TPC0_QM_CP_BARRIER_CFG_RBGUARD_SHIFT 16 +#define TPC0_QM_CP_BARRIER_CFG_RBGUARD_MASK 0xF0000 + +/* TPC0_QM_CP_DBG_0 */ +#define TPC0_QM_CP_DBG_0_CS_SHIFT 0 +#define TPC0_QM_CP_DBG_0_CS_MASK 0xF +#define TPC0_QM_CP_DBG_0_EB_CNT_NOT_ZERO_SHIFT 4 +#define TPC0_QM_CP_DBG_0_EB_CNT_NOT_ZERO_MASK 0x10 +#define TPC0_QM_CP_DBG_0_BULK_CNT_NOT_ZERO_SHIFT 5 +#define TPC0_QM_CP_DBG_0_BULK_CNT_NOT_ZERO_MASK 0x20 +#define TPC0_QM_CP_DBG_0_MREB_STALL_SHIFT 6 +#define TPC0_QM_CP_DBG_0_MREB_STALL_MASK 0x40 +#define TPC0_QM_CP_DBG_0_STALL_SHIFT 7 +#define TPC0_QM_CP_DBG_0_STALL_MASK 0x80 + +/* TPC0_QM_CP_ARUSER_31_11 */ +#define TPC0_QM_CP_ARUSER_31_11_VAL_SHIFT 0 +#define TPC0_QM_CP_ARUSER_31_11_VAL_MASK 0x1FFFFF + +/* TPC0_QM_CP_AWUSER_31_11 */ +#define TPC0_QM_CP_AWUSER_31_11_VAL_SHIFT 0 +#define TPC0_QM_CP_AWUSER_31_11_VAL_MASK 0x1FFFFF + +/* TPC0_QM_ARB_CFG_0 */ +#define TPC0_QM_ARB_CFG_0_TYPE_SHIFT 0 +#define TPC0_QM_ARB_CFG_0_TYPE_MASK 0x1 +#define TPC0_QM_ARB_CFG_0_IS_MASTER_SHIFT 4 +#define TPC0_QM_ARB_CFG_0_IS_MASTER_MASK 0x10 +#define TPC0_QM_ARB_CFG_0_EN_SHIFT 8 +#define TPC0_QM_ARB_CFG_0_EN_MASK 0x100 +#define TPC0_QM_ARB_CFG_0_MASK_SHIFT 12 +#define TPC0_QM_ARB_CFG_0_MASK_MASK 0xF000 +#define TPC0_QM_ARB_CFG_0_MST_MSG_NOSTALL_SHIFT 16 +#define TPC0_QM_ARB_CFG_0_MST_MSG_NOSTALL_MASK 0x10000 + +/* TPC0_QM_ARB_CHOISE_Q_PUSH */ +#define TPC0_QM_ARB_CHOISE_Q_PUSH_VAL_SHIFT 0 +#define TPC0_QM_ARB_CHOISE_Q_PUSH_VAL_MASK 0x3 + +/* TPC0_QM_ARB_WRR_WEIGHT */ +#define TPC0_QM_ARB_WRR_WEIGHT_VAL_SHIFT 0 +#define TPC0_QM_ARB_WRR_WEIGHT_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_ARB_CFG_1 */ +#define TPC0_QM_ARB_CFG_1_CLR_SHIFT 0 +#define TPC0_QM_ARB_CFG_1_CLR_MASK 0x1 + +/* TPC0_QM_ARB_MST_AVAIL_CRED */ +#define TPC0_QM_ARB_MST_AVAIL_CRED_VAL_SHIFT 0 +#define TPC0_QM_ARB_MST_AVAIL_CRED_VAL_MASK 0x7F + +/* TPC0_QM_ARB_MST_CRED_INC */ +#define TPC0_QM_ARB_MST_CRED_INC_VAL_SHIFT 0 +#define TPC0_QM_ARB_MST_CRED_INC_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_ARB_MST_CHOISE_PUSH_OFST */ +#define TPC0_QM_ARB_MST_CHOISE_PUSH_OFST_VAL_SHIFT 0 +#define TPC0_QM_ARB_MST_CHOISE_PUSH_OFST_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_ARB_SLV_MASTER_INC_CRED_OFST */ +#define TPC0_QM_ARB_SLV_MASTER_INC_CRED_OFST_VAL_SHIFT 0 +#define TPC0_QM_ARB_SLV_MASTER_INC_CRED_OFST_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_ARB_MST_SLAVE_EN */ +#define TPC0_QM_ARB_MST_SLAVE_EN_VAL_SHIFT 0 +#define TPC0_QM_ARB_MST_SLAVE_EN_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_ARB_MST_QUIET_PER */ +#define TPC0_QM_ARB_MST_QUIET_PER_VAL_SHIFT 0 +#define TPC0_QM_ARB_MST_QUIET_PER_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_ARB_SLV_CHOISE_WDT */ +#define TPC0_QM_ARB_SLV_CHOISE_WDT_VAL_SHIFT 0 +#define TPC0_QM_ARB_SLV_CHOISE_WDT_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_ARB_SLV_ID */ +#define TPC0_QM_ARB_SLV_ID_VAL_SHIFT 0 +#define TPC0_QM_ARB_SLV_ID_VAL_MASK 0x1F + +/* TPC0_QM_ARB_MSG_MAX_INFLIGHT */ +#define TPC0_QM_ARB_MSG_MAX_INFLIGHT_VAL_SHIFT 0 +#define TPC0_QM_ARB_MSG_MAX_INFLIGHT_VAL_MASK 0x3F + +/* TPC0_QM_ARB_MSG_AWUSER_31_11 */ +#define TPC0_QM_ARB_MSG_AWUSER_31_11_VAL_SHIFT 0 +#define TPC0_QM_ARB_MSG_AWUSER_31_11_VAL_MASK 0x1FFFFF + +/* TPC0_QM_ARB_MSG_AWUSER_SEC_PROP */ +#define TPC0_QM_ARB_MSG_AWUSER_SEC_PROP_ASID_SHIFT 0 +#define TPC0_QM_ARB_MSG_AWUSER_SEC_PROP_ASID_MASK 0x3FF +#define TPC0_QM_ARB_MSG_AWUSER_SEC_PROP_MMBP_SHIFT 10 +#define TPC0_QM_ARB_MSG_AWUSER_SEC_PROP_MMBP_MASK 0x400 + +/* TPC0_QM_ARB_MSG_AWUSER_NON_SEC_PROP */ +#define TPC0_QM_ARB_MSG_AWUSER_NON_SEC_PROP_ASID_SHIFT 0 +#define TPC0_QM_ARB_MSG_AWUSER_NON_SEC_PROP_ASID_MASK 0x3FF +#define TPC0_QM_ARB_MSG_AWUSER_NON_SEC_PROP_MMBP_SHIFT 10 +#define TPC0_QM_ARB_MSG_AWUSER_NON_SEC_PROP_MMBP_MASK 0x400 + +/* TPC0_QM_ARB_BASE_LO */ +#define TPC0_QM_ARB_BASE_LO_VAL_SHIFT 0 +#define TPC0_QM_ARB_BASE_LO_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_ARB_BASE_HI */ +#define TPC0_QM_ARB_BASE_HI_VAL_SHIFT 0 +#define TPC0_QM_ARB_BASE_HI_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_ARB_STATE_STS */ +#define TPC0_QM_ARB_STATE_STS_VAL_SHIFT 0 +#define TPC0_QM_ARB_STATE_STS_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_ARB_CHOISE_FULLNESS_STS */ +#define TPC0_QM_ARB_CHOISE_FULLNESS_STS_VAL_SHIFT 0 +#define TPC0_QM_ARB_CHOISE_FULLNESS_STS_VAL_MASK 0x7F + +/* TPC0_QM_ARB_MSG_STS */ +#define TPC0_QM_ARB_MSG_STS_FULL_SHIFT 0 +#define TPC0_QM_ARB_MSG_STS_FULL_MASK 0x1 +#define TPC0_QM_ARB_MSG_STS_NO_INFLIGHT_SHIFT 1 +#define TPC0_QM_ARB_MSG_STS_NO_INFLIGHT_MASK 0x2 + +/* TPC0_QM_ARB_SLV_CHOISE_Q_HEAD */ +#define TPC0_QM_ARB_SLV_CHOISE_Q_HEAD_VAL_SHIFT 0 +#define TPC0_QM_ARB_SLV_CHOISE_Q_HEAD_VAL_MASK 0x3 + +/* TPC0_QM_ARB_ERR_CAUSE */ +#define TPC0_QM_ARB_ERR_CAUSE_CHOISE_OVF_SHIFT 0 +#define TPC0_QM_ARB_ERR_CAUSE_CHOISE_OVF_MASK 0x1 +#define TPC0_QM_ARB_ERR_CAUSE_CHOISE_WDT_SHIFT 1 +#define TPC0_QM_ARB_ERR_CAUSE_CHOISE_WDT_MASK 0x2 +#define TPC0_QM_ARB_ERR_CAUSE_AXI_LBW_ERR_SHIFT 2 +#define TPC0_QM_ARB_ERR_CAUSE_AXI_LBW_ERR_MASK 0x4 + +/* TPC0_QM_ARB_ERR_MSG_EN */ +#define TPC0_QM_ARB_ERR_MSG_EN_CHOISE_OVF_SHIFT 0 +#define TPC0_QM_ARB_ERR_MSG_EN_CHOISE_OVF_MASK 0x1 +#define TPC0_QM_ARB_ERR_MSG_EN_CHOISE_WDT_SHIFT 1 +#define TPC0_QM_ARB_ERR_MSG_EN_CHOISE_WDT_MASK 0x2 +#define TPC0_QM_ARB_ERR_MSG_EN_AXI_LBW_ERR_SHIFT 2 +#define TPC0_QM_ARB_ERR_MSG_EN_AXI_LBW_ERR_MASK 0x4 + +/* TPC0_QM_ARB_ERR_STS_DRP */ +#define TPC0_QM_ARB_ERR_STS_DRP_VAL_SHIFT 0 +#define TPC0_QM_ARB_ERR_STS_DRP_VAL_MASK 0x3 + +/* TPC0_QM_ARB_MST_CRED_STS */ +#define TPC0_QM_ARB_MST_CRED_STS_VAL_SHIFT 0 +#define TPC0_QM_ARB_MST_CRED_STS_VAL_MASK 0x7F + +/* TPC0_QM_CGM_CFG */ +#define TPC0_QM_CGM_CFG_IDLE_TH_SHIFT 0 +#define TPC0_QM_CGM_CFG_IDLE_TH_MASK 0xFFF +#define TPC0_QM_CGM_CFG_G2F_TH_SHIFT 16 +#define TPC0_QM_CGM_CFG_G2F_TH_MASK 0xFF0000 +#define TPC0_QM_CGM_CFG_CP_IDLE_MASK_SHIFT 24 +#define TPC0_QM_CGM_CFG_CP_IDLE_MASK_MASK 0x1F000000 +#define TPC0_QM_CGM_CFG_EN_SHIFT 31 +#define TPC0_QM_CGM_CFG_EN_MASK 0x80000000 + +/* TPC0_QM_CGM_STS */ +#define TPC0_QM_CGM_STS_ST_SHIFT 0 +#define TPC0_QM_CGM_STS_ST_MASK 0x3 +#define TPC0_QM_CGM_STS_CG_SHIFT 4 +#define TPC0_QM_CGM_STS_CG_MASK 0x10 +#define TPC0_QM_CGM_STS_AGENT_IDLE_SHIFT 8 +#define TPC0_QM_CGM_STS_AGENT_IDLE_MASK 0x100 +#define TPC0_QM_CGM_STS_AXI_IDLE_SHIFT 9 +#define TPC0_QM_CGM_STS_AXI_IDLE_MASK 0x200 +#define TPC0_QM_CGM_STS_CP_IDLE_SHIFT 10 +#define TPC0_QM_CGM_STS_CP_IDLE_MASK 0x400 + +/* TPC0_QM_CGM_CFG1 */ +#define TPC0_QM_CGM_CFG1_MASK_TH_SHIFT 0 +#define TPC0_QM_CGM_CFG1_MASK_TH_MASK 0xFF + +/* TPC0_QM_LOCAL_RANGE_BASE */ +#define TPC0_QM_LOCAL_RANGE_BASE_VAL_SHIFT 0 +#define TPC0_QM_LOCAL_RANGE_BASE_VAL_MASK 0xFFFF + +/* TPC0_QM_LOCAL_RANGE_SIZE */ +#define TPC0_QM_LOCAL_RANGE_SIZE_VAL_SHIFT 0 +#define TPC0_QM_LOCAL_RANGE_SIZE_VAL_MASK 0xFFFF + +/* TPC0_QM_CSMR_STRICT_PRIO_CFG */ +#define TPC0_QM_CSMR_STRICT_PRIO_CFG_TYPE_SHIFT 0 +#define TPC0_QM_CSMR_STRICT_PRIO_CFG_TYPE_MASK 0x1 + +/* TPC0_QM_HBW_RD_RATE_LIM_CFG_1 */ +#define TPC0_QM_HBW_RD_RATE_LIM_CFG_1_TOUT_SHIFT 0 +#define TPC0_QM_HBW_RD_RATE_LIM_CFG_1_TOUT_MASK 0xFF +#define TPC0_QM_HBW_RD_RATE_LIM_CFG_1_EN_SHIFT 31 +#define TPC0_QM_HBW_RD_RATE_LIM_CFG_1_EN_MASK 0x80000000 + +/* TPC0_QM_LBW_WR_RATE_LIM_CFG_0 */ +#define TPC0_QM_LBW_WR_RATE_LIM_CFG_0_RST_TOKEN_SHIFT 0 +#define TPC0_QM_LBW_WR_RATE_LIM_CFG_0_RST_TOKEN_MASK 0xFF +#define TPC0_QM_LBW_WR_RATE_LIM_CFG_0_SAT_SHIFT 16 +#define TPC0_QM_LBW_WR_RATE_LIM_CFG_0_SAT_MASK 0xFF0000 + +/* TPC0_QM_LBW_WR_RATE_LIM_CFG_1 */ +#define TPC0_QM_LBW_WR_RATE_LIM_CFG_1_TOUT_SHIFT 0 +#define TPC0_QM_LBW_WR_RATE_LIM_CFG_1_TOUT_MASK 0xFF +#define TPC0_QM_LBW_WR_RATE_LIM_CFG_1_EN_SHIFT 31 +#define TPC0_QM_LBW_WR_RATE_LIM_CFG_1_EN_MASK 0x80000000 + +/* TPC0_QM_HBW_RD_RATE_LIM_CFG_0 */ +#define TPC0_QM_HBW_RD_RATE_LIM_CFG_0_RST_TOKEN_SHIFT 0 +#define TPC0_QM_HBW_RD_RATE_LIM_CFG_0_RST_TOKEN_MASK 0xFF +#define TPC0_QM_HBW_RD_RATE_LIM_CFG_0_SAT_SHIFT 16 +#define TPC0_QM_HBW_RD_RATE_LIM_CFG_0_SAT_MASK 0xFF0000 + +/* TPC0_QM_GLBL_AXCACHE */ +#define TPC0_QM_GLBL_AXCACHE_AR_SHIFT 0 +#define TPC0_QM_GLBL_AXCACHE_AR_MASK 0xF +#define TPC0_QM_GLBL_AXCACHE_AW_SHIFT 16 +#define TPC0_QM_GLBL_AXCACHE_AW_MASK 0xF0000 + +/* TPC0_QM_IND_GW_APB_CFG */ +#define TPC0_QM_IND_GW_APB_CFG_ADDR_SHIFT 0 +#define TPC0_QM_IND_GW_APB_CFG_ADDR_MASK 0x7FFFFFFF +#define TPC0_QM_IND_GW_APB_CFG_CMD_SHIFT 31 +#define TPC0_QM_IND_GW_APB_CFG_CMD_MASK 0x80000000 + +/* TPC0_QM_IND_GW_APB_WDATA */ +#define TPC0_QM_IND_GW_APB_WDATA_VAL_SHIFT 0 +#define TPC0_QM_IND_GW_APB_WDATA_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_IND_GW_APB_RDATA */ +#define TPC0_QM_IND_GW_APB_RDATA_VAL_SHIFT 0 +#define TPC0_QM_IND_GW_APB_RDATA_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_IND_GW_APB_STATUS */ +#define TPC0_QM_IND_GW_APB_STATUS_RDY_SHIFT 0 +#define TPC0_QM_IND_GW_APB_STATUS_RDY_MASK 0x1 +#define TPC0_QM_IND_GW_APB_STATUS_ERR_SHIFT 1 +#define TPC0_QM_IND_GW_APB_STATUS_ERR_MASK 0x2 + +/* TPC0_QM_GLBL_ERR_ADDR_LO */ +#define TPC0_QM_GLBL_ERR_ADDR_LO_VAL_SHIFT 0 +#define TPC0_QM_GLBL_ERR_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_GLBL_ERR_ADDR_HI */ +#define TPC0_QM_GLBL_ERR_ADDR_HI_VAL_SHIFT 0 +#define TPC0_QM_GLBL_ERR_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_GLBL_ERR_WDATA */ +#define TPC0_QM_GLBL_ERR_WDATA_VAL_SHIFT 0 +#define TPC0_QM_GLBL_ERR_WDATA_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_GLBL_MEM_INIT_BUSY */ +#define TPC0_QM_GLBL_MEM_INIT_BUSY_RBUF_SHIFT 0 +#define TPC0_QM_GLBL_MEM_INIT_BUSY_RBUF_MASK 0xF + +#endif /* ASIC_REG_TPC0_QM_MASKS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_qm_regs.h new file mode 100644 index 000000000..f9e310ab6 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_qm_regs.h @@ -0,0 +1,834 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC0_QM_REGS_H_ +#define ASIC_REG_TPC0_QM_REGS_H_ + +/* + ***************************************** + * TPC0_QM (Prototype: QMAN) + ***************************************** + */ + +#define mmTPC0_QM_GLBL_CFG0 0xE08000 + +#define mmTPC0_QM_GLBL_CFG1 0xE08004 + +#define mmTPC0_QM_GLBL_PROT 0xE08008 + +#define mmTPC0_QM_GLBL_ERR_CFG 0xE0800C + +#define mmTPC0_QM_GLBL_SECURE_PROPS_0 0xE08010 + +#define mmTPC0_QM_GLBL_SECURE_PROPS_1 0xE08014 + +#define mmTPC0_QM_GLBL_SECURE_PROPS_2 0xE08018 + +#define mmTPC0_QM_GLBL_SECURE_PROPS_3 0xE0801C + +#define mmTPC0_QM_GLBL_SECURE_PROPS_4 0xE08020 + +#define mmTPC0_QM_GLBL_NON_SECURE_PROPS_0 0xE08024 + +#define mmTPC0_QM_GLBL_NON_SECURE_PROPS_1 0xE08028 + +#define mmTPC0_QM_GLBL_NON_SECURE_PROPS_2 0xE0802C + +#define mmTPC0_QM_GLBL_NON_SECURE_PROPS_3 0xE08030 + +#define mmTPC0_QM_GLBL_NON_SECURE_PROPS_4 0xE08034 + +#define mmTPC0_QM_GLBL_STS0 0xE08038 + +#define mmTPC0_QM_GLBL_STS1_0 0xE08040 + +#define mmTPC0_QM_GLBL_STS1_1 0xE08044 + +#define mmTPC0_QM_GLBL_STS1_2 0xE08048 + +#define mmTPC0_QM_GLBL_STS1_3 0xE0804C + +#define mmTPC0_QM_GLBL_STS1_4 0xE08050 + +#define mmTPC0_QM_GLBL_MSG_EN_0 0xE08054 + +#define mmTPC0_QM_GLBL_MSG_EN_1 0xE08058 + +#define mmTPC0_QM_GLBL_MSG_EN_2 0xE0805C + +#define mmTPC0_QM_GLBL_MSG_EN_3 0xE08060 + +#define mmTPC0_QM_GLBL_MSG_EN_4 0xE08068 + +#define mmTPC0_QM_PQ_BASE_LO_0 0xE08070 + +#define mmTPC0_QM_PQ_BASE_LO_1 0xE08074 + +#define mmTPC0_QM_PQ_BASE_LO_2 0xE08078 + +#define mmTPC0_QM_PQ_BASE_LO_3 0xE0807C + +#define mmTPC0_QM_PQ_BASE_HI_0 0xE08080 + +#define mmTPC0_QM_PQ_BASE_HI_1 0xE08084 + +#define mmTPC0_QM_PQ_BASE_HI_2 0xE08088 + +#define mmTPC0_QM_PQ_BASE_HI_3 0xE0808C + +#define mmTPC0_QM_PQ_SIZE_0 0xE08090 + +#define mmTPC0_QM_PQ_SIZE_1 0xE08094 + +#define mmTPC0_QM_PQ_SIZE_2 0xE08098 + +#define mmTPC0_QM_PQ_SIZE_3 0xE0809C + +#define mmTPC0_QM_PQ_PI_0 0xE080A0 + +#define mmTPC0_QM_PQ_PI_1 0xE080A4 + +#define mmTPC0_QM_PQ_PI_2 0xE080A8 + +#define mmTPC0_QM_PQ_PI_3 0xE080AC + +#define mmTPC0_QM_PQ_CI_0 0xE080B0 + +#define mmTPC0_QM_PQ_CI_1 0xE080B4 + +#define mmTPC0_QM_PQ_CI_2 0xE080B8 + +#define mmTPC0_QM_PQ_CI_3 0xE080BC + +#define mmTPC0_QM_PQ_CFG0_0 0xE080C0 + +#define mmTPC0_QM_PQ_CFG0_1 0xE080C4 + +#define mmTPC0_QM_PQ_CFG0_2 0xE080C8 + +#define mmTPC0_QM_PQ_CFG0_3 0xE080CC + +#define mmTPC0_QM_PQ_CFG1_0 0xE080D0 + +#define mmTPC0_QM_PQ_CFG1_1 0xE080D4 + +#define mmTPC0_QM_PQ_CFG1_2 0xE080D8 + +#define mmTPC0_QM_PQ_CFG1_3 0xE080DC + +#define mmTPC0_QM_PQ_ARUSER_31_11_0 0xE080E0 + +#define mmTPC0_QM_PQ_ARUSER_31_11_1 0xE080E4 + +#define mmTPC0_QM_PQ_ARUSER_31_11_2 0xE080E8 + +#define mmTPC0_QM_PQ_ARUSER_31_11_3 0xE080EC + +#define mmTPC0_QM_PQ_STS0_0 0xE080F0 + +#define mmTPC0_QM_PQ_STS0_1 0xE080F4 + +#define mmTPC0_QM_PQ_STS0_2 0xE080F8 + +#define mmTPC0_QM_PQ_STS0_3 0xE080FC + +#define mmTPC0_QM_PQ_STS1_0 0xE08100 + +#define mmTPC0_QM_PQ_STS1_1 0xE08104 + +#define mmTPC0_QM_PQ_STS1_2 0xE08108 + +#define mmTPC0_QM_PQ_STS1_3 0xE0810C + +#define mmTPC0_QM_CQ_CFG0_0 0xE08110 + +#define mmTPC0_QM_CQ_CFG0_1 0xE08114 + +#define mmTPC0_QM_CQ_CFG0_2 0xE08118 + +#define mmTPC0_QM_CQ_CFG0_3 0xE0811C + +#define mmTPC0_QM_CQ_CFG0_4 0xE08120 + +#define mmTPC0_QM_CQ_CFG1_0 0xE08124 + +#define mmTPC0_QM_CQ_CFG1_1 0xE08128 + +#define mmTPC0_QM_CQ_CFG1_2 0xE0812C + +#define mmTPC0_QM_CQ_CFG1_3 0xE08130 + +#define mmTPC0_QM_CQ_CFG1_4 0xE08134 + +#define mmTPC0_QM_CQ_ARUSER_31_11_0 0xE08138 + +#define mmTPC0_QM_CQ_ARUSER_31_11_1 0xE0813C + +#define mmTPC0_QM_CQ_ARUSER_31_11_2 0xE08140 + +#define mmTPC0_QM_CQ_ARUSER_31_11_3 0xE08144 + +#define mmTPC0_QM_CQ_ARUSER_31_11_4 0xE08148 + +#define mmTPC0_QM_CQ_STS0_0 0xE0814C + +#define mmTPC0_QM_CQ_STS0_1 0xE08150 + +#define mmTPC0_QM_CQ_STS0_2 0xE08154 + +#define mmTPC0_QM_CQ_STS0_3 0xE08158 + +#define mmTPC0_QM_CQ_STS0_4 0xE0815C + +#define mmTPC0_QM_CQ_STS1_0 0xE08160 + +#define mmTPC0_QM_CQ_STS1_1 0xE08164 + +#define mmTPC0_QM_CQ_STS1_2 0xE08168 + +#define mmTPC0_QM_CQ_STS1_3 0xE0816C + +#define mmTPC0_QM_CQ_STS1_4 0xE08170 + +#define mmTPC0_QM_CQ_PTR_LO_0 0xE08174 + +#define mmTPC0_QM_CQ_PTR_HI_0 0xE08178 + +#define mmTPC0_QM_CQ_TSIZE_0 0xE0817C + +#define mmTPC0_QM_CQ_CTL_0 0xE08180 + +#define mmTPC0_QM_CQ_PTR_LO_1 0xE08184 + +#define mmTPC0_QM_CQ_PTR_HI_1 0xE08188 + +#define mmTPC0_QM_CQ_TSIZE_1 0xE0818C + +#define mmTPC0_QM_CQ_CTL_1 0xE08190 + +#define mmTPC0_QM_CQ_PTR_LO_2 0xE08194 + +#define mmTPC0_QM_CQ_PTR_HI_2 0xE08198 + +#define mmTPC0_QM_CQ_TSIZE_2 0xE0819C + +#define mmTPC0_QM_CQ_CTL_2 0xE081A0 + +#define mmTPC0_QM_CQ_PTR_LO_3 0xE081A4 + +#define mmTPC0_QM_CQ_PTR_HI_3 0xE081A8 + +#define mmTPC0_QM_CQ_TSIZE_3 0xE081AC + +#define mmTPC0_QM_CQ_CTL_3 0xE081B0 + +#define mmTPC0_QM_CQ_PTR_LO_4 0xE081B4 + +#define mmTPC0_QM_CQ_PTR_HI_4 0xE081B8 + +#define mmTPC0_QM_CQ_TSIZE_4 0xE081BC + +#define mmTPC0_QM_CQ_CTL_4 0xE081C0 + +#define mmTPC0_QM_CQ_PTR_LO_STS_0 0xE081C4 + +#define mmTPC0_QM_CQ_PTR_LO_STS_1 0xE081C8 + +#define mmTPC0_QM_CQ_PTR_LO_STS_2 0xE081CC + +#define mmTPC0_QM_CQ_PTR_LO_STS_3 0xE081D0 + +#define mmTPC0_QM_CQ_PTR_LO_STS_4 0xE081D4 + +#define mmTPC0_QM_CQ_PTR_HI_STS_0 0xE081D8 + +#define mmTPC0_QM_CQ_PTR_HI_STS_1 0xE081DC + +#define mmTPC0_QM_CQ_PTR_HI_STS_2 0xE081E0 + +#define mmTPC0_QM_CQ_PTR_HI_STS_3 0xE081E4 + +#define mmTPC0_QM_CQ_PTR_HI_STS_4 0xE081E8 + +#define mmTPC0_QM_CQ_TSIZE_STS_0 0xE081EC + +#define mmTPC0_QM_CQ_TSIZE_STS_1 0xE081F0 + +#define mmTPC0_QM_CQ_TSIZE_STS_2 0xE081F4 + +#define mmTPC0_QM_CQ_TSIZE_STS_3 0xE081F8 + +#define mmTPC0_QM_CQ_TSIZE_STS_4 0xE081FC + +#define mmTPC0_QM_CQ_CTL_STS_0 0xE08200 + +#define mmTPC0_QM_CQ_CTL_STS_1 0xE08204 + +#define mmTPC0_QM_CQ_CTL_STS_2 0xE08208 + +#define mmTPC0_QM_CQ_CTL_STS_3 0xE0820C + +#define mmTPC0_QM_CQ_CTL_STS_4 0xE08210 + +#define mmTPC0_QM_CQ_IFIFO_CNT_0 0xE08214 + +#define mmTPC0_QM_CQ_IFIFO_CNT_1 0xE08218 + +#define mmTPC0_QM_CQ_IFIFO_CNT_2 0xE0821C + +#define mmTPC0_QM_CQ_IFIFO_CNT_3 0xE08220 + +#define mmTPC0_QM_CQ_IFIFO_CNT_4 0xE08224 + +#define mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_0 0xE08228 + +#define mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_1 0xE0822C + +#define mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_2 0xE08230 + +#define mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_3 0xE08234 + +#define mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_4 0xE08238 + +#define mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_0 0xE0823C + +#define mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_1 0xE08240 + +#define mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_2 0xE08244 + +#define mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_3 0xE08248 + +#define mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_4 0xE0824C + +#define mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_0 0xE08250 + +#define mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_1 0xE08254 + +#define mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_2 0xE08258 + +#define mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_3 0xE0825C + +#define mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_4 0xE08260 + +#define mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_0 0xE08264 + +#define mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_1 0xE08268 + +#define mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_2 0xE0826C + +#define mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_3 0xE08270 + +#define mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_4 0xE08274 + +#define mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_0 0xE08278 + +#define mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_1 0xE0827C + +#define mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_2 0xE08280 + +#define mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_3 0xE08284 + +#define mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_4 0xE08288 + +#define mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_0 0xE0828C + +#define mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_1 0xE08290 + +#define mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_2 0xE08294 + +#define mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_3 0xE08298 + +#define mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_4 0xE0829C + +#define mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_0 0xE082A0 + +#define mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_1 0xE082A4 + +#define mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_2 0xE082A8 + +#define mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_3 0xE082AC + +#define mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_4 0xE082B0 + +#define mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_0 0xE082B4 + +#define mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_1 0xE082B8 + +#define mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_2 0xE082BC + +#define mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_3 0xE082C0 + +#define mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_4 0xE082C4 + +#define mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_0 0xE082C8 + +#define mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_1 0xE082CC + +#define mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_2 0xE082D0 + +#define mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_3 0xE082D4 + +#define mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_4 0xE082D8 + +#define mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0xE082E0 + +#define mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0xE082E4 + +#define mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0xE082E8 + +#define mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0xE082EC + +#define mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0xE082F0 + +#define mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0xE082F4 + +#define mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0xE082F8 + +#define mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0xE082FC + +#define mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0xE08300 + +#define mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0xE08304 + +#define mmTPC0_QM_CP_FENCE0_RDATA_0 0xE08308 + +#define mmTPC0_QM_CP_FENCE0_RDATA_1 0xE0830C + +#define mmTPC0_QM_CP_FENCE0_RDATA_2 0xE08310 + +#define mmTPC0_QM_CP_FENCE0_RDATA_3 0xE08314 + +#define mmTPC0_QM_CP_FENCE0_RDATA_4 0xE08318 + +#define mmTPC0_QM_CP_FENCE1_RDATA_0 0xE0831C + +#define mmTPC0_QM_CP_FENCE1_RDATA_1 0xE08320 + +#define mmTPC0_QM_CP_FENCE1_RDATA_2 0xE08324 + +#define mmTPC0_QM_CP_FENCE1_RDATA_3 0xE08328 + +#define mmTPC0_QM_CP_FENCE1_RDATA_4 0xE0832C + +#define mmTPC0_QM_CP_FENCE2_RDATA_0 0xE08330 + +#define mmTPC0_QM_CP_FENCE2_RDATA_1 0xE08334 + +#define mmTPC0_QM_CP_FENCE2_RDATA_2 0xE08338 + +#define mmTPC0_QM_CP_FENCE2_RDATA_3 0xE0833C + +#define mmTPC0_QM_CP_FENCE2_RDATA_4 0xE08340 + +#define mmTPC0_QM_CP_FENCE3_RDATA_0 0xE08344 + +#define mmTPC0_QM_CP_FENCE3_RDATA_1 0xE08348 + +#define mmTPC0_QM_CP_FENCE3_RDATA_2 0xE0834C + +#define mmTPC0_QM_CP_FENCE3_RDATA_3 0xE08350 + +#define mmTPC0_QM_CP_FENCE3_RDATA_4 0xE08354 + +#define mmTPC0_QM_CP_FENCE0_CNT_0 0xE08358 + +#define mmTPC0_QM_CP_FENCE0_CNT_1 0xE0835C + +#define mmTPC0_QM_CP_FENCE0_CNT_2 0xE08360 + +#define mmTPC0_QM_CP_FENCE0_CNT_3 0xE08364 + +#define mmTPC0_QM_CP_FENCE0_CNT_4 0xE08368 + +#define mmTPC0_QM_CP_FENCE1_CNT_0 0xE0836C + +#define mmTPC0_QM_CP_FENCE1_CNT_1 0xE08370 + +#define mmTPC0_QM_CP_FENCE1_CNT_2 0xE08374 + +#define mmTPC0_QM_CP_FENCE1_CNT_3 0xE08378 + +#define mmTPC0_QM_CP_FENCE1_CNT_4 0xE0837C + +#define mmTPC0_QM_CP_FENCE2_CNT_0 0xE08380 + +#define mmTPC0_QM_CP_FENCE2_CNT_1 0xE08384 + +#define mmTPC0_QM_CP_FENCE2_CNT_2 0xE08388 + +#define mmTPC0_QM_CP_FENCE2_CNT_3 0xE0838C + +#define mmTPC0_QM_CP_FENCE2_CNT_4 0xE08390 + +#define mmTPC0_QM_CP_FENCE3_CNT_0 0xE08394 + +#define mmTPC0_QM_CP_FENCE3_CNT_1 0xE08398 + +#define mmTPC0_QM_CP_FENCE3_CNT_2 0xE0839C + +#define mmTPC0_QM_CP_FENCE3_CNT_3 0xE083A0 + +#define mmTPC0_QM_CP_FENCE3_CNT_4 0xE083A4 + +#define mmTPC0_QM_CP_STS_0 0xE083A8 + +#define mmTPC0_QM_CP_STS_1 0xE083AC + +#define mmTPC0_QM_CP_STS_2 0xE083B0 + +#define mmTPC0_QM_CP_STS_3 0xE083B4 + +#define mmTPC0_QM_CP_STS_4 0xE083B8 + +#define mmTPC0_QM_CP_CURRENT_INST_LO_0 0xE083BC + +#define mmTPC0_QM_CP_CURRENT_INST_LO_1 0xE083C0 + +#define mmTPC0_QM_CP_CURRENT_INST_LO_2 0xE083C4 + +#define mmTPC0_QM_CP_CURRENT_INST_LO_3 0xE083C8 + +#define mmTPC0_QM_CP_CURRENT_INST_LO_4 0xE083CC + +#define mmTPC0_QM_CP_CURRENT_INST_HI_0 0xE083D0 + +#define mmTPC0_QM_CP_CURRENT_INST_HI_1 0xE083D4 + +#define mmTPC0_QM_CP_CURRENT_INST_HI_2 0xE083D8 + +#define mmTPC0_QM_CP_CURRENT_INST_HI_3 0xE083DC + +#define mmTPC0_QM_CP_CURRENT_INST_HI_4 0xE083E0 + +#define mmTPC0_QM_CP_BARRIER_CFG_0 0xE083F4 + +#define mmTPC0_QM_CP_BARRIER_CFG_1 0xE083F8 + +#define mmTPC0_QM_CP_BARRIER_CFG_2 0xE083FC + +#define mmTPC0_QM_CP_BARRIER_CFG_3 0xE08400 + +#define mmTPC0_QM_CP_BARRIER_CFG_4 0xE08404 + +#define mmTPC0_QM_CP_DBG_0_0 0xE08408 + +#define mmTPC0_QM_CP_DBG_0_1 0xE0840C + +#define mmTPC0_QM_CP_DBG_0_2 0xE08410 + +#define mmTPC0_QM_CP_DBG_0_3 0xE08414 + +#define mmTPC0_QM_CP_DBG_0_4 0xE08418 + +#define mmTPC0_QM_CP_ARUSER_31_11_0 0xE0841C + +#define mmTPC0_QM_CP_ARUSER_31_11_1 0xE08420 + +#define mmTPC0_QM_CP_ARUSER_31_11_2 0xE08424 + +#define mmTPC0_QM_CP_ARUSER_31_11_3 0xE08428 + +#define mmTPC0_QM_CP_ARUSER_31_11_4 0xE0842C + +#define mmTPC0_QM_CP_AWUSER_31_11_0 0xE08430 + +#define mmTPC0_QM_CP_AWUSER_31_11_1 0xE08434 + +#define mmTPC0_QM_CP_AWUSER_31_11_2 0xE08438 + +#define mmTPC0_QM_CP_AWUSER_31_11_3 0xE0843C + +#define mmTPC0_QM_CP_AWUSER_31_11_4 0xE08440 + +#define mmTPC0_QM_ARB_CFG_0 0xE08A00 + +#define mmTPC0_QM_ARB_CHOISE_Q_PUSH 0xE08A04 + +#define mmTPC0_QM_ARB_WRR_WEIGHT_0 0xE08A08 + +#define mmTPC0_QM_ARB_WRR_WEIGHT_1 0xE08A0C + +#define mmTPC0_QM_ARB_WRR_WEIGHT_2 0xE08A10 + +#define mmTPC0_QM_ARB_WRR_WEIGHT_3 0xE08A14 + +#define mmTPC0_QM_ARB_CFG_1 0xE08A18 + +#define mmTPC0_QM_ARB_MST_AVAIL_CRED_0 0xE08A20 + +#define mmTPC0_QM_ARB_MST_AVAIL_CRED_1 0xE08A24 + +#define mmTPC0_QM_ARB_MST_AVAIL_CRED_2 0xE08A28 + +#define mmTPC0_QM_ARB_MST_AVAIL_CRED_3 0xE08A2C + +#define mmTPC0_QM_ARB_MST_AVAIL_CRED_4 0xE08A30 + +#define mmTPC0_QM_ARB_MST_AVAIL_CRED_5 0xE08A34 + +#define mmTPC0_QM_ARB_MST_AVAIL_CRED_6 0xE08A38 + +#define mmTPC0_QM_ARB_MST_AVAIL_CRED_7 0xE08A3C + +#define mmTPC0_QM_ARB_MST_AVAIL_CRED_8 0xE08A40 + +#define mmTPC0_QM_ARB_MST_AVAIL_CRED_9 0xE08A44 + +#define mmTPC0_QM_ARB_MST_AVAIL_CRED_10 0xE08A48 + +#define mmTPC0_QM_ARB_MST_AVAIL_CRED_11 0xE08A4C + +#define mmTPC0_QM_ARB_MST_AVAIL_CRED_12 0xE08A50 + +#define mmTPC0_QM_ARB_MST_AVAIL_CRED_13 0xE08A54 + +#define mmTPC0_QM_ARB_MST_AVAIL_CRED_14 0xE08A58 + +#define mmTPC0_QM_ARB_MST_AVAIL_CRED_15 0xE08A5C + +#define mmTPC0_QM_ARB_MST_AVAIL_CRED_16 0xE08A60 + +#define mmTPC0_QM_ARB_MST_AVAIL_CRED_17 0xE08A64 + +#define mmTPC0_QM_ARB_MST_AVAIL_CRED_18 0xE08A68 + +#define mmTPC0_QM_ARB_MST_AVAIL_CRED_19 0xE08A6C + +#define mmTPC0_QM_ARB_MST_AVAIL_CRED_20 0xE08A70 + +#define mmTPC0_QM_ARB_MST_AVAIL_CRED_21 0xE08A74 + +#define mmTPC0_QM_ARB_MST_AVAIL_CRED_22 0xE08A78 + +#define mmTPC0_QM_ARB_MST_AVAIL_CRED_23 0xE08A7C + +#define mmTPC0_QM_ARB_MST_AVAIL_CRED_24 0xE08A80 + +#define mmTPC0_QM_ARB_MST_AVAIL_CRED_25 0xE08A84 + +#define mmTPC0_QM_ARB_MST_AVAIL_CRED_26 0xE08A88 + +#define mmTPC0_QM_ARB_MST_AVAIL_CRED_27 0xE08A8C + +#define mmTPC0_QM_ARB_MST_AVAIL_CRED_28 0xE08A90 + +#define mmTPC0_QM_ARB_MST_AVAIL_CRED_29 0xE08A94 + +#define mmTPC0_QM_ARB_MST_AVAIL_CRED_30 0xE08A98 + +#define mmTPC0_QM_ARB_MST_AVAIL_CRED_31 0xE08A9C + +#define mmTPC0_QM_ARB_MST_CRED_INC 0xE08AA0 + +#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_0 0xE08AA4 + +#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_1 0xE08AA8 + +#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_2 0xE08AAC + +#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_3 0xE08AB0 + +#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_4 0xE08AB4 + +#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_5 0xE08AB8 + +#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_6 0xE08ABC + +#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_7 0xE08AC0 + +#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_8 0xE08AC4 + +#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_9 0xE08AC8 + +#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_10 0xE08ACC + +#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_11 0xE08AD0 + +#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_12 0xE08AD4 + +#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_13 0xE08AD8 + +#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_14 0xE08ADC + +#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_15 0xE08AE0 + +#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_16 0xE08AE4 + +#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_17 0xE08AE8 + +#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_18 0xE08AEC + +#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_19 0xE08AF0 + +#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_20 0xE08AF4 + +#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_21 0xE08AF8 + +#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_22 0xE08AFC + +#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_23 0xE08B00 + +#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_24 0xE08B04 + +#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_25 0xE08B08 + +#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_26 0xE08B0C + +#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_27 0xE08B10 + +#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_28 0xE08B14 + +#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_29 0xE08B18 + +#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_30 0xE08B1C + +#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_31 0xE08B20 + +#define mmTPC0_QM_ARB_SLV_MASTER_INC_CRED_OFST 0xE08B28 + +#define mmTPC0_QM_ARB_MST_SLAVE_EN 0xE08B2C + +#define mmTPC0_QM_ARB_MST_QUIET_PER 0xE08B34 + +#define mmTPC0_QM_ARB_SLV_CHOISE_WDT 0xE08B38 + +#define mmTPC0_QM_ARB_SLV_ID 0xE08B3C + +#define mmTPC0_QM_ARB_MSG_MAX_INFLIGHT 0xE08B44 + +#define mmTPC0_QM_ARB_MSG_AWUSER_31_11 0xE08B48 + +#define mmTPC0_QM_ARB_MSG_AWUSER_SEC_PROP 0xE08B4C + +#define mmTPC0_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0xE08B50 + +#define mmTPC0_QM_ARB_BASE_LO 0xE08B54 + +#define mmTPC0_QM_ARB_BASE_HI 0xE08B58 + +#define mmTPC0_QM_ARB_STATE_STS 0xE08B80 + +#define mmTPC0_QM_ARB_CHOISE_FULLNESS_STS 0xE08B84 + +#define mmTPC0_QM_ARB_MSG_STS 0xE08B88 + +#define mmTPC0_QM_ARB_SLV_CHOISE_Q_HEAD 0xE08B8C + +#define mmTPC0_QM_ARB_ERR_CAUSE 0xE08B9C + +#define mmTPC0_QM_ARB_ERR_MSG_EN 0xE08BA0 + +#define mmTPC0_QM_ARB_ERR_STS_DRP 0xE08BA8 + +#define mmTPC0_QM_ARB_MST_CRED_STS_0 0xE08BB0 + +#define mmTPC0_QM_ARB_MST_CRED_STS_1 0xE08BB4 + +#define mmTPC0_QM_ARB_MST_CRED_STS_2 0xE08BB8 + +#define mmTPC0_QM_ARB_MST_CRED_STS_3 0xE08BBC + +#define mmTPC0_QM_ARB_MST_CRED_STS_4 0xE08BC0 + +#define mmTPC0_QM_ARB_MST_CRED_STS_5 0xE08BC4 + +#define mmTPC0_QM_ARB_MST_CRED_STS_6 0xE08BC8 + +#define mmTPC0_QM_ARB_MST_CRED_STS_7 0xE08BCC + +#define mmTPC0_QM_ARB_MST_CRED_STS_8 0xE08BD0 + +#define mmTPC0_QM_ARB_MST_CRED_STS_9 0xE08BD4 + +#define mmTPC0_QM_ARB_MST_CRED_STS_10 0xE08BD8 + +#define mmTPC0_QM_ARB_MST_CRED_STS_11 0xE08BDC + +#define mmTPC0_QM_ARB_MST_CRED_STS_12 0xE08BE0 + +#define mmTPC0_QM_ARB_MST_CRED_STS_13 0xE08BE4 + +#define mmTPC0_QM_ARB_MST_CRED_STS_14 0xE08BE8 + +#define mmTPC0_QM_ARB_MST_CRED_STS_15 0xE08BEC + +#define mmTPC0_QM_ARB_MST_CRED_STS_16 0xE08BF0 + +#define mmTPC0_QM_ARB_MST_CRED_STS_17 0xE08BF4 + +#define mmTPC0_QM_ARB_MST_CRED_STS_18 0xE08BF8 + +#define mmTPC0_QM_ARB_MST_CRED_STS_19 0xE08BFC + +#define mmTPC0_QM_ARB_MST_CRED_STS_20 0xE08C00 + +#define mmTPC0_QM_ARB_MST_CRED_STS_21 0xE08C04 + +#define mmTPC0_QM_ARB_MST_CRED_STS_22 0xE08C08 + +#define mmTPC0_QM_ARB_MST_CRED_STS_23 0xE08C0C + +#define mmTPC0_QM_ARB_MST_CRED_STS_24 0xE08C10 + +#define mmTPC0_QM_ARB_MST_CRED_STS_25 0xE08C14 + +#define mmTPC0_QM_ARB_MST_CRED_STS_26 0xE08C18 + +#define mmTPC0_QM_ARB_MST_CRED_STS_27 0xE08C1C + +#define mmTPC0_QM_ARB_MST_CRED_STS_28 0xE08C20 + +#define mmTPC0_QM_ARB_MST_CRED_STS_29 0xE08C24 + +#define mmTPC0_QM_ARB_MST_CRED_STS_30 0xE08C28 + +#define mmTPC0_QM_ARB_MST_CRED_STS_31 0xE08C2C + +#define mmTPC0_QM_CGM_CFG 0xE08C70 + +#define mmTPC0_QM_CGM_STS 0xE08C74 + +#define mmTPC0_QM_CGM_CFG1 0xE08C78 + +#define mmTPC0_QM_LOCAL_RANGE_BASE 0xE08C80 + +#define mmTPC0_QM_LOCAL_RANGE_SIZE 0xE08C84 + +#define mmTPC0_QM_CSMR_STRICT_PRIO_CFG 0xE08C90 + +#define mmTPC0_QM_HBW_RD_RATE_LIM_CFG_1 0xE08C94 + +#define mmTPC0_QM_LBW_WR_RATE_LIM_CFG_0 0xE08C98 + +#define mmTPC0_QM_LBW_WR_RATE_LIM_CFG_1 0xE08C9C + +#define mmTPC0_QM_HBW_RD_RATE_LIM_CFG_0 0xE08CA0 + +#define mmTPC0_QM_GLBL_AXCACHE 0xE08CA4 + +#define mmTPC0_QM_IND_GW_APB_CFG 0xE08CB0 + +#define mmTPC0_QM_IND_GW_APB_WDATA 0xE08CB4 + +#define mmTPC0_QM_IND_GW_APB_RDATA 0xE08CB8 + +#define mmTPC0_QM_IND_GW_APB_STATUS 0xE08CBC + +#define mmTPC0_QM_GLBL_ERR_ADDR_LO 0xE08CD0 + +#define mmTPC0_QM_GLBL_ERR_ADDR_HI 0xE08CD4 + +#define mmTPC0_QM_GLBL_ERR_WDATA 0xE08CD8 + +#define mmTPC0_QM_GLBL_MEM_INIT_BUSY 0xE08D00 + +#endif /* ASIC_REG_TPC0_QM_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc1_cfg_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc1_cfg_regs.h new file mode 100644 index 000000000..6736c476d --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc1_cfg_regs.h @@ -0,0 +1,1226 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC1_CFG_REGS_H_ +#define ASIC_REG_TPC1_CFG_REGS_H_ + +/* + ***************************************** + * TPC1_CFG (Prototype: TPC) + ***************************************** + */ + +#define mmTPC1_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xE46400 + +#define mmTPC1_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xE46404 + +#define mmTPC1_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xE46408 + +#define mmTPC1_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xE4640C + +#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xE46410 + +#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xE46414 + +#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xE46418 + +#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xE4641C + +#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xE46420 + +#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xE46424 + +#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xE46428 + +#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xE4642C + +#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xE46430 + +#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xE46434 + +#define mmTPC1_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xE46438 + +#define mmTPC1_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xE4643C + +#define mmTPC1_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xE46440 + +#define mmTPC1_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xE46444 + +#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xE46448 + +#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xE4644C + +#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xE46450 + +#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xE46454 + +#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xE46458 + +#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xE4645C + +#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xE46460 + +#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xE46464 + +#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xE46468 + +#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xE4646C + +#define mmTPC1_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xE46470 + +#define mmTPC1_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xE46474 + +#define mmTPC1_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xE46478 + +#define mmTPC1_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xE4647C + +#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xE46480 + +#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xE46484 + +#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xE46488 + +#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xE4648C + +#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xE46490 + +#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xE46494 + +#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xE46498 + +#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xE4649C + +#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xE464A0 + +#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xE464A4 + +#define mmTPC1_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xE464A8 + +#define mmTPC1_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xE464AC + +#define mmTPC1_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xE464B0 + +#define mmTPC1_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xE464B4 + +#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xE464B8 + +#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xE464BC + +#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xE464C0 + +#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xE464C4 + +#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xE464C8 + +#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xE464CC + +#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xE464D0 + +#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xE464D4 + +#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xE464D8 + +#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xE464DC + +#define mmTPC1_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xE464E0 + +#define mmTPC1_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xE464E4 + +#define mmTPC1_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xE464E8 + +#define mmTPC1_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xE464EC + +#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xE464F0 + +#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xE464F4 + +#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xE464F8 + +#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xE464FC + +#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xE46500 + +#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xE46504 + +#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xE46508 + +#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xE4650C + +#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xE46510 + +#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xE46514 + +#define mmTPC1_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xE46518 + +#define mmTPC1_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xE4651C + +#define mmTPC1_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xE46520 + +#define mmTPC1_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xE46524 + +#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xE46528 + +#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xE4652C + +#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xE46530 + +#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xE46534 + +#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xE46538 + +#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xE4653C + +#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xE46540 + +#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xE46544 + +#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xE46548 + +#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xE4654C + +#define mmTPC1_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xE46550 + +#define mmTPC1_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xE46554 + +#define mmTPC1_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xE46558 + +#define mmTPC1_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xE4655C + +#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xE46560 + +#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xE46564 + +#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xE46568 + +#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xE4656C + +#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xE46570 + +#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xE46574 + +#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xE46578 + +#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xE4657C + +#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xE46580 + +#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xE46584 + +#define mmTPC1_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xE46588 + +#define mmTPC1_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xE4658C + +#define mmTPC1_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xE46590 + +#define mmTPC1_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xE46594 + +#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xE46598 + +#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xE4659C + +#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xE465A0 + +#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xE465A4 + +#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xE465A8 + +#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xE465AC + +#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xE465B0 + +#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xE465B4 + +#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xE465B8 + +#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xE465BC + +#define mmTPC1_CFG_KERNEL_TENSOR_8_BASE_ADDR_LOW 0xE465C0 + +#define mmTPC1_CFG_KERNEL_TENSOR_8_BASE_ADDR_HIGH 0xE465C4 + +#define mmTPC1_CFG_KERNEL_TENSOR_8_PADDING_VALUE 0xE465C8 + +#define mmTPC1_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG 0xE465CC + +#define mmTPC1_CFG_KERNEL_TENSOR_8_DIM_0_SIZE 0xE465D0 + +#define mmTPC1_CFG_KERNEL_TENSOR_8_DIM_0_STRIDE 0xE465D4 + +#define mmTPC1_CFG_KERNEL_TENSOR_8_DIM_1_SIZE 0xE465D8 + +#define mmTPC1_CFG_KERNEL_TENSOR_8_DIM_1_STRIDE 0xE465DC + +#define mmTPC1_CFG_KERNEL_TENSOR_8_DIM_2_SIZE 0xE465E0 + +#define mmTPC1_CFG_KERNEL_TENSOR_8_DIM_2_STRIDE 0xE465E4 + +#define mmTPC1_CFG_KERNEL_TENSOR_8_DIM_3_SIZE 0xE465E8 + +#define mmTPC1_CFG_KERNEL_TENSOR_8_DIM_3_STRIDE 0xE465EC + +#define mmTPC1_CFG_KERNEL_TENSOR_8_DIM_4_SIZE 0xE465F0 + +#define mmTPC1_CFG_KERNEL_TENSOR_8_DIM_4_STRIDE 0xE465F4 + +#define mmTPC1_CFG_KERNEL_TENSOR_9_BASE_ADDR_LOW 0xE465F8 + +#define mmTPC1_CFG_KERNEL_TENSOR_9_BASE_ADDR_HIGH 0xE465FC + +#define mmTPC1_CFG_KERNEL_TENSOR_9_PADDING_VALUE 0xE46600 + +#define mmTPC1_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG 0xE46604 + +#define mmTPC1_CFG_KERNEL_TENSOR_9_DIM_0_SIZE 0xE46608 + +#define mmTPC1_CFG_KERNEL_TENSOR_9_DIM_0_STRIDE 0xE4660C + +#define mmTPC1_CFG_KERNEL_TENSOR_9_DIM_1_SIZE 0xE46610 + +#define mmTPC1_CFG_KERNEL_TENSOR_9_DIM_1_STRIDE 0xE46614 + +#define mmTPC1_CFG_KERNEL_TENSOR_9_DIM_2_SIZE 0xE46618 + +#define mmTPC1_CFG_KERNEL_TENSOR_9_DIM_2_STRIDE 0xE4661C + +#define mmTPC1_CFG_KERNEL_TENSOR_9_DIM_3_SIZE 0xE46620 + +#define mmTPC1_CFG_KERNEL_TENSOR_9_DIM_3_STRIDE 0xE46624 + +#define mmTPC1_CFG_KERNEL_TENSOR_9_DIM_4_SIZE 0xE46628 + +#define mmTPC1_CFG_KERNEL_TENSOR_9_DIM_4_STRIDE 0xE4662C + +#define mmTPC1_CFG_KERNEL_TENSOR_10_BASE_ADDR_LOW 0xE46630 + +#define mmTPC1_CFG_KERNEL_TENSOR_10_BASE_ADDR_HIGH 0xE46634 + +#define mmTPC1_CFG_KERNEL_TENSOR_10_PADDING_VALUE 0xE46638 + +#define mmTPC1_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG 0xE4663C + +#define mmTPC1_CFG_KERNEL_TENSOR_10_DIM_0_SIZE 0xE46640 + +#define mmTPC1_CFG_KERNEL_TENSOR_10_DIM_0_STRIDE 0xE46644 + +#define mmTPC1_CFG_KERNEL_TENSOR_10_DIM_1_SIZE 0xE46648 + +#define mmTPC1_CFG_KERNEL_TENSOR_10_DIM_1_STRIDE 0xE4664C + +#define mmTPC1_CFG_KERNEL_TENSOR_10_DIM_2_SIZE 0xE46650 + +#define mmTPC1_CFG_KERNEL_TENSOR_10_DIM_2_STRIDE 0xE46654 + +#define mmTPC1_CFG_KERNEL_TENSOR_10_DIM_3_SIZE 0xE46658 + +#define mmTPC1_CFG_KERNEL_TENSOR_10_DIM_3_STRIDE 0xE4665C + +#define mmTPC1_CFG_KERNEL_TENSOR_10_DIM_4_SIZE 0xE46660 + +#define mmTPC1_CFG_KERNEL_TENSOR_10_DIM_4_STRIDE 0xE46664 + +#define mmTPC1_CFG_KERNEL_TENSOR_11_BASE_ADDR_LOW 0xE46668 + +#define mmTPC1_CFG_KERNEL_TENSOR_11_BASE_ADDR_HIGH 0xE4666C + +#define mmTPC1_CFG_KERNEL_TENSOR_11_PADDING_VALUE 0xE46670 + +#define mmTPC1_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG 0xE46674 + +#define mmTPC1_CFG_KERNEL_TENSOR_11_DIM_0_SIZE 0xE46678 + +#define mmTPC1_CFG_KERNEL_TENSOR_11_DIM_0_STRIDE 0xE4667C + +#define mmTPC1_CFG_KERNEL_TENSOR_11_DIM_1_SIZE 0xE46680 + +#define mmTPC1_CFG_KERNEL_TENSOR_11_DIM_1_STRIDE 0xE46684 + +#define mmTPC1_CFG_KERNEL_TENSOR_11_DIM_2_SIZE 0xE46688 + +#define mmTPC1_CFG_KERNEL_TENSOR_11_DIM_2_STRIDE 0xE4668C + +#define mmTPC1_CFG_KERNEL_TENSOR_11_DIM_3_SIZE 0xE46690 + +#define mmTPC1_CFG_KERNEL_TENSOR_11_DIM_3_STRIDE 0xE46694 + +#define mmTPC1_CFG_KERNEL_TENSOR_11_DIM_4_SIZE 0xE46698 + +#define mmTPC1_CFG_KERNEL_TENSOR_11_DIM_4_STRIDE 0xE4669C + +#define mmTPC1_CFG_KERNEL_TENSOR_12_BASE_ADDR_LOW 0xE466A0 + +#define mmTPC1_CFG_KERNEL_TENSOR_12_BASE_ADDR_HIGH 0xE466A4 + +#define mmTPC1_CFG_KERNEL_TENSOR_12_PADDING_VALUE 0xE466A8 + +#define mmTPC1_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG 0xE466AC + +#define mmTPC1_CFG_KERNEL_TENSOR_12_DIM_0_SIZE 0xE466B0 + +#define mmTPC1_CFG_KERNEL_TENSOR_12_DIM_0_STRIDE 0xE466B4 + +#define mmTPC1_CFG_KERNEL_TENSOR_12_DIM_1_SIZE 0xE466B8 + +#define mmTPC1_CFG_KERNEL_TENSOR_12_DIM_1_STRIDE 0xE466BC + +#define mmTPC1_CFG_KERNEL_TENSOR_12_DIM_2_SIZE 0xE466C0 + +#define mmTPC1_CFG_KERNEL_TENSOR_12_DIM_2_STRIDE 0xE466C4 + +#define mmTPC1_CFG_KERNEL_TENSOR_12_DIM_3_SIZE 0xE466C8 + +#define mmTPC1_CFG_KERNEL_TENSOR_12_DIM_3_STRIDE 0xE466CC + +#define mmTPC1_CFG_KERNEL_TENSOR_12_DIM_4_SIZE 0xE466D0 + +#define mmTPC1_CFG_KERNEL_TENSOR_12_DIM_4_STRIDE 0xE466D4 + +#define mmTPC1_CFG_KERNEL_TENSOR_13_BASE_ADDR_LOW 0xE466D8 + +#define mmTPC1_CFG_KERNEL_TENSOR_13_BASE_ADDR_HIGH 0xE466DC + +#define mmTPC1_CFG_KERNEL_TENSOR_13_PADDING_VALUE 0xE466E0 + +#define mmTPC1_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG 0xE466E4 + +#define mmTPC1_CFG_KERNEL_TENSOR_13_DIM_0_SIZE 0xE466E8 + +#define mmTPC1_CFG_KERNEL_TENSOR_13_DIM_0_STRIDE 0xE466EC + +#define mmTPC1_CFG_KERNEL_TENSOR_13_DIM_1_SIZE 0xE466F0 + +#define mmTPC1_CFG_KERNEL_TENSOR_13_DIM_1_STRIDE 0xE466F4 + +#define mmTPC1_CFG_KERNEL_TENSOR_13_DIM_2_SIZE 0xE466F8 + +#define mmTPC1_CFG_KERNEL_TENSOR_13_DIM_2_STRIDE 0xE466FC + +#define mmTPC1_CFG_KERNEL_TENSOR_13_DIM_3_SIZE 0xE46700 + +#define mmTPC1_CFG_KERNEL_TENSOR_13_DIM_3_STRIDE 0xE46704 + +#define mmTPC1_CFG_KERNEL_TENSOR_13_DIM_4_SIZE 0xE46708 + +#define mmTPC1_CFG_KERNEL_TENSOR_13_DIM_4_STRIDE 0xE4670C + +#define mmTPC1_CFG_KERNEL_TENSOR_14_BASE_ADDR_LOW 0xE46710 + +#define mmTPC1_CFG_KERNEL_TENSOR_14_BASE_ADDR_HIGH 0xE46714 + +#define mmTPC1_CFG_KERNEL_TENSOR_14_PADDING_VALUE 0xE46718 + +#define mmTPC1_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG 0xE4671C + +#define mmTPC1_CFG_KERNEL_TENSOR_14_DIM_0_SIZE 0xE46720 + +#define mmTPC1_CFG_KERNEL_TENSOR_14_DIM_0_STRIDE 0xE46724 + +#define mmTPC1_CFG_KERNEL_TENSOR_14_DIM_1_SIZE 0xE46728 + +#define mmTPC1_CFG_KERNEL_TENSOR_14_DIM_1_STRIDE 0xE4672C + +#define mmTPC1_CFG_KERNEL_TENSOR_14_DIM_2_SIZE 0xE46730 + +#define mmTPC1_CFG_KERNEL_TENSOR_14_DIM_2_STRIDE 0xE46734 + +#define mmTPC1_CFG_KERNEL_TENSOR_14_DIM_3_SIZE 0xE46738 + +#define mmTPC1_CFG_KERNEL_TENSOR_14_DIM_3_STRIDE 0xE4673C + +#define mmTPC1_CFG_KERNEL_TENSOR_14_DIM_4_SIZE 0xE46740 + +#define mmTPC1_CFG_KERNEL_TENSOR_14_DIM_4_STRIDE 0xE46744 + +#define mmTPC1_CFG_KERNEL_TENSOR_15_BASE_ADDR_LOW 0xE46748 + +#define mmTPC1_CFG_KERNEL_TENSOR_15_BASE_ADDR_HIGH 0xE4674C + +#define mmTPC1_CFG_KERNEL_TENSOR_15_PADDING_VALUE 0xE46750 + +#define mmTPC1_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG 0xE46754 + +#define mmTPC1_CFG_KERNEL_TENSOR_15_DIM_0_SIZE 0xE46758 + +#define mmTPC1_CFG_KERNEL_TENSOR_15_DIM_0_STRIDE 0xE4675C + +#define mmTPC1_CFG_KERNEL_TENSOR_15_DIM_1_SIZE 0xE46760 + +#define mmTPC1_CFG_KERNEL_TENSOR_15_DIM_1_STRIDE 0xE46764 + +#define mmTPC1_CFG_KERNEL_TENSOR_15_DIM_2_SIZE 0xE46768 + +#define mmTPC1_CFG_KERNEL_TENSOR_15_DIM_2_STRIDE 0xE4676C + +#define mmTPC1_CFG_KERNEL_TENSOR_15_DIM_3_SIZE 0xE46770 + +#define mmTPC1_CFG_KERNEL_TENSOR_15_DIM_3_STRIDE 0xE46774 + +#define mmTPC1_CFG_KERNEL_TENSOR_15_DIM_4_SIZE 0xE46778 + +#define mmTPC1_CFG_KERNEL_TENSOR_15_DIM_4_STRIDE 0xE4677C + +#define mmTPC1_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xE46780 + +#define mmTPC1_CFG_KERNEL_SYNC_OBJECT_ADDR 0xE46784 + +#define mmTPC1_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xE46788 + +#define mmTPC1_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xE4678C + +#define mmTPC1_CFG_KERNEL_TID_BASE_DIM_0 0xE46790 + +#define mmTPC1_CFG_KERNEL_TID_SIZE_DIM_0 0xE46794 + +#define mmTPC1_CFG_KERNEL_TID_BASE_DIM_1 0xE46798 + +#define mmTPC1_CFG_KERNEL_TID_SIZE_DIM_1 0xE4679C + +#define mmTPC1_CFG_KERNEL_TID_BASE_DIM_2 0xE467A0 + +#define mmTPC1_CFG_KERNEL_TID_SIZE_DIM_2 0xE467A4 + +#define mmTPC1_CFG_KERNEL_TID_BASE_DIM_3 0xE467A8 + +#define mmTPC1_CFG_KERNEL_TID_SIZE_DIM_3 0xE467AC + +#define mmTPC1_CFG_KERNEL_TID_BASE_DIM_4 0xE467B0 + +#define mmTPC1_CFG_KERNEL_TID_SIZE_DIM_4 0xE467B4 + +#define mmTPC1_CFG_KERNEL_KERNEL_CONFIG 0xE467B8 + +#define mmTPC1_CFG_KERNEL_KERNEL_ID 0xE467BC + +#define mmTPC1_CFG_KERNEL_SRF_0 0xE467C0 + +#define mmTPC1_CFG_KERNEL_SRF_1 0xE467C4 + +#define mmTPC1_CFG_KERNEL_SRF_2 0xE467C8 + +#define mmTPC1_CFG_KERNEL_SRF_3 0xE467CC + +#define mmTPC1_CFG_KERNEL_SRF_4 0xE467D0 + +#define mmTPC1_CFG_KERNEL_SRF_5 0xE467D4 + +#define mmTPC1_CFG_KERNEL_SRF_6 0xE467D8 + +#define mmTPC1_CFG_KERNEL_SRF_7 0xE467DC + +#define mmTPC1_CFG_KERNEL_SRF_8 0xE467E0 + +#define mmTPC1_CFG_KERNEL_SRF_9 0xE467E4 + +#define mmTPC1_CFG_KERNEL_SRF_10 0xE467E8 + +#define mmTPC1_CFG_KERNEL_SRF_11 0xE467EC + +#define mmTPC1_CFG_KERNEL_SRF_12 0xE467F0 + +#define mmTPC1_CFG_KERNEL_SRF_13 0xE467F4 + +#define mmTPC1_CFG_KERNEL_SRF_14 0xE467F8 + +#define mmTPC1_CFG_KERNEL_SRF_15 0xE467FC + +#define mmTPC1_CFG_KERNEL_SRF_16 0xE46800 + +#define mmTPC1_CFG_KERNEL_SRF_17 0xE46804 + +#define mmTPC1_CFG_KERNEL_SRF_18 0xE46808 + +#define mmTPC1_CFG_KERNEL_SRF_19 0xE4680C + +#define mmTPC1_CFG_KERNEL_SRF_20 0xE46810 + +#define mmTPC1_CFG_KERNEL_SRF_21 0xE46814 + +#define mmTPC1_CFG_KERNEL_SRF_22 0xE46818 + +#define mmTPC1_CFG_KERNEL_SRF_23 0xE4681C + +#define mmTPC1_CFG_KERNEL_SRF_24 0xE46820 + +#define mmTPC1_CFG_KERNEL_SRF_25 0xE46824 + +#define mmTPC1_CFG_KERNEL_SRF_26 0xE46828 + +#define mmTPC1_CFG_KERNEL_SRF_27 0xE4682C + +#define mmTPC1_CFG_KERNEL_SRF_28 0xE46830 + +#define mmTPC1_CFG_KERNEL_SRF_29 0xE46834 + +#define mmTPC1_CFG_KERNEL_SRF_30 0xE46838 + +#define mmTPC1_CFG_KERNEL_SRF_31 0xE4683C + +#define mmTPC1_CFG_ROUND_CSR 0xE468FC + +#define mmTPC1_CFG_PROT 0xE46900 + +#define mmTPC1_CFG_SEMAPHORE 0xE46908 + +#define mmTPC1_CFG_VFLAGS 0xE4690C + +#define mmTPC1_CFG_SFLAGS 0xE46910 + +#define mmTPC1_CFG_LFSR_POLYNOM 0xE46918 + +#define mmTPC1_CFG_STATUS 0xE4691C + +#define mmTPC1_CFG_CFG_BASE_ADDRESS_HIGH 0xE46920 + +#define mmTPC1_CFG_CFG_SUBTRACT_VALUE 0xE46924 + +#define mmTPC1_CFG_SM_BASE_ADDRESS_HIGH 0xE4692C + +#define mmTPC1_CFG_TPC_CMD 0xE46930 + +#define mmTPC1_CFG_TPC_EXECUTE 0xE46938 + +#define mmTPC1_CFG_TPC_STALL 0xE4693C + +#define mmTPC1_CFG_ICACHE_BASE_ADDERESS_LOW 0xE46940 + +#define mmTPC1_CFG_ICACHE_BASE_ADDERESS_HIGH 0xE46944 + +#define mmTPC1_CFG_RD_RATE_LIMIT 0xE46948 + +#define mmTPC1_CFG_WR_RATE_LIMIT 0xE46950 + +#define mmTPC1_CFG_MSS_CONFIG 0xE46954 + +#define mmTPC1_CFG_TPC_INTR_CAUSE 0xE46958 + +#define mmTPC1_CFG_TPC_INTR_MASK 0xE4695C + +#define mmTPC1_CFG_WQ_CREDITS 0xE46960 + +#define mmTPC1_CFG_ARUSER_LO 0xE46964 + +#define mmTPC1_CFG_ARUSER_HI 0xE46968 + +#define mmTPC1_CFG_AWUSER_LO 0xE4696C + +#define mmTPC1_CFG_AWUSER_HI 0xE46970 + +#define mmTPC1_CFG_OPCODE_EXEC 0xE46974 + +#define mmTPC1_CFG_LUT_FUNC32_BASE_ADDR_LO 0xE46978 + +#define mmTPC1_CFG_LUT_FUNC32_BASE_ADDR_HI 0xE4697C + +#define mmTPC1_CFG_LUT_FUNC64_BASE_ADDR_LO 0xE46980 + +#define mmTPC1_CFG_LUT_FUNC64_BASE_ADDR_HI 0xE46984 + +#define mmTPC1_CFG_LUT_FUNC128_BASE_ADDR_LO 0xE46988 + +#define mmTPC1_CFG_LUT_FUNC128_BASE_ADDR_HI 0xE4698C + +#define mmTPC1_CFG_LUT_FUNC256_BASE_ADDR_LO 0xE46990 + +#define mmTPC1_CFG_LUT_FUNC256_BASE_ADDR_HI 0xE46994 + +#define mmTPC1_CFG_TSB_CFG_MAX_SIZE 0xE46998 + +#define mmTPC1_CFG_TSB_CFG 0xE4699C + +#define mmTPC1_CFG_DBGMEM_ADD 0xE469A0 + +#define mmTPC1_CFG_DBGMEM_DATA_WR 0xE469A4 + +#define mmTPC1_CFG_DBGMEM_DATA_RD 0xE469A8 + +#define mmTPC1_CFG_DBGMEM_CTRL 0xE469AC + +#define mmTPC1_CFG_DBGMEM_RC 0xE469B0 + +#define mmTPC1_CFG_TSB_INFLIGHT_CNTR 0xE469B4 + +#define mmTPC1_CFG_WQ_INFLIGHT_CNTR 0xE469B8 + +#define mmTPC1_CFG_WQ_LBW_TOTAL_CNTR 0xE469BC + +#define mmTPC1_CFG_WQ_HBW_TOTAL_CNTR 0xE469C0 + +#define mmTPC1_CFG_IRQ_OCCOUPY_CNTR 0xE469C4 + +#define mmTPC1_CFG_FUNC_MBIST_CNTRL 0xE469D0 + +#define mmTPC1_CFG_FUNC_MBIST_PAT 0xE469D4 + +#define mmTPC1_CFG_FUNC_MBIST_MEM_0 0xE469D8 + +#define mmTPC1_CFG_FUNC_MBIST_MEM_1 0xE469DC + +#define mmTPC1_CFG_FUNC_MBIST_MEM_2 0xE469E0 + +#define mmTPC1_CFG_FUNC_MBIST_MEM_3 0xE469E4 + +#define mmTPC1_CFG_FUNC_MBIST_MEM_4 0xE469E8 + +#define mmTPC1_CFG_FUNC_MBIST_MEM_5 0xE469EC + +#define mmTPC1_CFG_FUNC_MBIST_MEM_6 0xE469F0 + +#define mmTPC1_CFG_FUNC_MBIST_MEM_7 0xE469F4 + +#define mmTPC1_CFG_FUNC_MBIST_MEM_8 0xE469F8 + +#define mmTPC1_CFG_FUNC_MBIST_MEM_9 0xE469FC + +#define mmTPC1_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xE46A00 + +#define mmTPC1_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xE46A04 + +#define mmTPC1_CFG_QM_TENSOR_0_PADDING_VALUE 0xE46A08 + +#define mmTPC1_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xE46A0C + +#define mmTPC1_CFG_QM_TENSOR_0_DIM_0_SIZE 0xE46A10 + +#define mmTPC1_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xE46A14 + +#define mmTPC1_CFG_QM_TENSOR_0_DIM_1_SIZE 0xE46A18 + +#define mmTPC1_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xE46A1C + +#define mmTPC1_CFG_QM_TENSOR_0_DIM_2_SIZE 0xE46A20 + +#define mmTPC1_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xE46A24 + +#define mmTPC1_CFG_QM_TENSOR_0_DIM_3_SIZE 0xE46A28 + +#define mmTPC1_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xE46A2C + +#define mmTPC1_CFG_QM_TENSOR_0_DIM_4_SIZE 0xE46A30 + +#define mmTPC1_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xE46A34 + +#define mmTPC1_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xE46A38 + +#define mmTPC1_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xE46A3C + +#define mmTPC1_CFG_QM_TENSOR_1_PADDING_VALUE 0xE46A40 + +#define mmTPC1_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xE46A44 + +#define mmTPC1_CFG_QM_TENSOR_1_DIM_0_SIZE 0xE46A48 + +#define mmTPC1_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xE46A4C + +#define mmTPC1_CFG_QM_TENSOR_1_DIM_1_SIZE 0xE46A50 + +#define mmTPC1_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xE46A54 + +#define mmTPC1_CFG_QM_TENSOR_1_DIM_2_SIZE 0xE46A58 + +#define mmTPC1_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xE46A5C + +#define mmTPC1_CFG_QM_TENSOR_1_DIM_3_SIZE 0xE46A60 + +#define mmTPC1_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xE46A64 + +#define mmTPC1_CFG_QM_TENSOR_1_DIM_4_SIZE 0xE46A68 + +#define mmTPC1_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xE46A6C + +#define mmTPC1_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xE46A70 + +#define mmTPC1_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xE46A74 + +#define mmTPC1_CFG_QM_TENSOR_2_PADDING_VALUE 0xE46A78 + +#define mmTPC1_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xE46A7C + +#define mmTPC1_CFG_QM_TENSOR_2_DIM_0_SIZE 0xE46A80 + +#define mmTPC1_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xE46A84 + +#define mmTPC1_CFG_QM_TENSOR_2_DIM_1_SIZE 0xE46A88 + +#define mmTPC1_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xE46A8C + +#define mmTPC1_CFG_QM_TENSOR_2_DIM_2_SIZE 0xE46A90 + +#define mmTPC1_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xE46A94 + +#define mmTPC1_CFG_QM_TENSOR_2_DIM_3_SIZE 0xE46A98 + +#define mmTPC1_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xE46A9C + +#define mmTPC1_CFG_QM_TENSOR_2_DIM_4_SIZE 0xE46AA0 + +#define mmTPC1_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xE46AA4 + +#define mmTPC1_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xE46AA8 + +#define mmTPC1_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xE46AAC + +#define mmTPC1_CFG_QM_TENSOR_3_PADDING_VALUE 0xE46AB0 + +#define mmTPC1_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xE46AB4 + +#define mmTPC1_CFG_QM_TENSOR_3_DIM_0_SIZE 0xE46AB8 + +#define mmTPC1_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xE46ABC + +#define mmTPC1_CFG_QM_TENSOR_3_DIM_1_SIZE 0xE46AC0 + +#define mmTPC1_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xE46AC4 + +#define mmTPC1_CFG_QM_TENSOR_3_DIM_2_SIZE 0xE46AC8 + +#define mmTPC1_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xE46ACC + +#define mmTPC1_CFG_QM_TENSOR_3_DIM_3_SIZE 0xE46AD0 + +#define mmTPC1_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xE46AD4 + +#define mmTPC1_CFG_QM_TENSOR_3_DIM_4_SIZE 0xE46AD8 + +#define mmTPC1_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xE46ADC + +#define mmTPC1_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xE46AE0 + +#define mmTPC1_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xE46AE4 + +#define mmTPC1_CFG_QM_TENSOR_4_PADDING_VALUE 0xE46AE8 + +#define mmTPC1_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xE46AEC + +#define mmTPC1_CFG_QM_TENSOR_4_DIM_0_SIZE 0xE46AF0 + +#define mmTPC1_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xE46AF4 + +#define mmTPC1_CFG_QM_TENSOR_4_DIM_1_SIZE 0xE46AF8 + +#define mmTPC1_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xE46AFC + +#define mmTPC1_CFG_QM_TENSOR_4_DIM_2_SIZE 0xE46B00 + +#define mmTPC1_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xE46B04 + +#define mmTPC1_CFG_QM_TENSOR_4_DIM_3_SIZE 0xE46B08 + +#define mmTPC1_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xE46B0C + +#define mmTPC1_CFG_QM_TENSOR_4_DIM_4_SIZE 0xE46B10 + +#define mmTPC1_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xE46B14 + +#define mmTPC1_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xE46B18 + +#define mmTPC1_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xE46B1C + +#define mmTPC1_CFG_QM_TENSOR_5_PADDING_VALUE 0xE46B20 + +#define mmTPC1_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xE46B24 + +#define mmTPC1_CFG_QM_TENSOR_5_DIM_0_SIZE 0xE46B28 + +#define mmTPC1_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xE46B2C + +#define mmTPC1_CFG_QM_TENSOR_5_DIM_1_SIZE 0xE46B30 + +#define mmTPC1_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xE46B34 + +#define mmTPC1_CFG_QM_TENSOR_5_DIM_2_SIZE 0xE46B38 + +#define mmTPC1_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xE46B3C + +#define mmTPC1_CFG_QM_TENSOR_5_DIM_3_SIZE 0xE46B40 + +#define mmTPC1_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xE46B44 + +#define mmTPC1_CFG_QM_TENSOR_5_DIM_4_SIZE 0xE46B48 + +#define mmTPC1_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xE46B4C + +#define mmTPC1_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xE46B50 + +#define mmTPC1_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xE46B54 + +#define mmTPC1_CFG_QM_TENSOR_6_PADDING_VALUE 0xE46B58 + +#define mmTPC1_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xE46B5C + +#define mmTPC1_CFG_QM_TENSOR_6_DIM_0_SIZE 0xE46B60 + +#define mmTPC1_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xE46B64 + +#define mmTPC1_CFG_QM_TENSOR_6_DIM_1_SIZE 0xE46B68 + +#define mmTPC1_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xE46B6C + +#define mmTPC1_CFG_QM_TENSOR_6_DIM_2_SIZE 0xE46B70 + +#define mmTPC1_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xE46B74 + +#define mmTPC1_CFG_QM_TENSOR_6_DIM_3_SIZE 0xE46B78 + +#define mmTPC1_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xE46B7C + +#define mmTPC1_CFG_QM_TENSOR_6_DIM_4_SIZE 0xE46B80 + +#define mmTPC1_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xE46B84 + +#define mmTPC1_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xE46B88 + +#define mmTPC1_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xE46B8C + +#define mmTPC1_CFG_QM_TENSOR_7_PADDING_VALUE 0xE46B90 + +#define mmTPC1_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xE46B94 + +#define mmTPC1_CFG_QM_TENSOR_7_DIM_0_SIZE 0xE46B98 + +#define mmTPC1_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xE46B9C + +#define mmTPC1_CFG_QM_TENSOR_7_DIM_1_SIZE 0xE46BA0 + +#define mmTPC1_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xE46BA4 + +#define mmTPC1_CFG_QM_TENSOR_7_DIM_2_SIZE 0xE46BA8 + +#define mmTPC1_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xE46BAC + +#define mmTPC1_CFG_QM_TENSOR_7_DIM_3_SIZE 0xE46BB0 + +#define mmTPC1_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xE46BB4 + +#define mmTPC1_CFG_QM_TENSOR_7_DIM_4_SIZE 0xE46BB8 + +#define mmTPC1_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xE46BBC + +#define mmTPC1_CFG_QM_TENSOR_8_BASE_ADDR_LOW 0xE46BC0 + +#define mmTPC1_CFG_QM_TENSOR_8_BASE_ADDR_HIGH 0xE46BC4 + +#define mmTPC1_CFG_QM_TENSOR_8_PADDING_VALUE 0xE46BC8 + +#define mmTPC1_CFG_QM_TENSOR_8_TENSOR_CONFIG 0xE46BCC + +#define mmTPC1_CFG_QM_TENSOR_8_DIM_0_SIZE 0xE46BD0 + +#define mmTPC1_CFG_QM_TENSOR_8_DIM_0_STRIDE 0xE46BD4 + +#define mmTPC1_CFG_QM_TENSOR_8_DIM_1_SIZE 0xE46BD8 + +#define mmTPC1_CFG_QM_TENSOR_8_DIM_1_STRIDE 0xE46BDC + +#define mmTPC1_CFG_QM_TENSOR_8_DIM_2_SIZE 0xE46BE0 + +#define mmTPC1_CFG_QM_TENSOR_8_DIM_2_STRIDE 0xE46BE4 + +#define mmTPC1_CFG_QM_TENSOR_8_DIM_3_SIZE 0xE46BE8 + +#define mmTPC1_CFG_QM_TENSOR_8_DIM_3_STRIDE 0xE46BEC + +#define mmTPC1_CFG_QM_TENSOR_8_DIM_4_SIZE 0xE46BF0 + +#define mmTPC1_CFG_QM_TENSOR_8_DIM_4_STRIDE 0xE46BF4 + +#define mmTPC1_CFG_QM_TENSOR_9_BASE_ADDR_LOW 0xE46BF8 + +#define mmTPC1_CFG_QM_TENSOR_9_BASE_ADDR_HIGH 0xE46BFC + +#define mmTPC1_CFG_QM_TENSOR_9_PADDING_VALUE 0xE46C00 + +#define mmTPC1_CFG_QM_TENSOR_9_TENSOR_CONFIG 0xE46C04 + +#define mmTPC1_CFG_QM_TENSOR_9_DIM_0_SIZE 0xE46C08 + +#define mmTPC1_CFG_QM_TENSOR_9_DIM_0_STRIDE 0xE46C0C + +#define mmTPC1_CFG_QM_TENSOR_9_DIM_1_SIZE 0xE46C10 + +#define mmTPC1_CFG_QM_TENSOR_9_DIM_1_STRIDE 0xE46C14 + +#define mmTPC1_CFG_QM_TENSOR_9_DIM_2_SIZE 0xE46C18 + +#define mmTPC1_CFG_QM_TENSOR_9_DIM_2_STRIDE 0xE46C1C + +#define mmTPC1_CFG_QM_TENSOR_9_DIM_3_SIZE 0xE46C20 + +#define mmTPC1_CFG_QM_TENSOR_9_DIM_3_STRIDE 0xE46C24 + +#define mmTPC1_CFG_QM_TENSOR_9_DIM_4_SIZE 0xE46C28 + +#define mmTPC1_CFG_QM_TENSOR_9_DIM_4_STRIDE 0xE46C2C + +#define mmTPC1_CFG_QM_TENSOR_10_BASE_ADDR_LOW 0xE46C30 + +#define mmTPC1_CFG_QM_TENSOR_10_BASE_ADDR_HIGH 0xE46C34 + +#define mmTPC1_CFG_QM_TENSOR_10_PADDING_VALUE 0xE46C38 + +#define mmTPC1_CFG_QM_TENSOR_10_TENSOR_CONFIG 0xE46C3C + +#define mmTPC1_CFG_QM_TENSOR_10_DIM_0_SIZE 0xE46C40 + +#define mmTPC1_CFG_QM_TENSOR_10_DIM_0_STRIDE 0xE46C44 + +#define mmTPC1_CFG_QM_TENSOR_10_DIM_1_SIZE 0xE46C48 + +#define mmTPC1_CFG_QM_TENSOR_10_DIM_1_STRIDE 0xE46C4C + +#define mmTPC1_CFG_QM_TENSOR_10_DIM_2_SIZE 0xE46C50 + +#define mmTPC1_CFG_QM_TENSOR_10_DIM_2_STRIDE 0xE46C54 + +#define mmTPC1_CFG_QM_TENSOR_10_DIM_3_SIZE 0xE46C58 + +#define mmTPC1_CFG_QM_TENSOR_10_DIM_3_STRIDE 0xE46C5C + +#define mmTPC1_CFG_QM_TENSOR_10_DIM_4_SIZE 0xE46C60 + +#define mmTPC1_CFG_QM_TENSOR_10_DIM_4_STRIDE 0xE46C64 + +#define mmTPC1_CFG_QM_TENSOR_11_BASE_ADDR_LOW 0xE46C68 + +#define mmTPC1_CFG_QM_TENSOR_11_BASE_ADDR_HIGH 0xE46C6C + +#define mmTPC1_CFG_QM_TENSOR_11_PADDING_VALUE 0xE46C70 + +#define mmTPC1_CFG_QM_TENSOR_11_TENSOR_CONFIG 0xE46C74 + +#define mmTPC1_CFG_QM_TENSOR_11_DIM_0_SIZE 0xE46C78 + +#define mmTPC1_CFG_QM_TENSOR_11_DIM_0_STRIDE 0xE46C7C + +#define mmTPC1_CFG_QM_TENSOR_11_DIM_1_SIZE 0xE46C80 + +#define mmTPC1_CFG_QM_TENSOR_11_DIM_1_STRIDE 0xE46C84 + +#define mmTPC1_CFG_QM_TENSOR_11_DIM_2_SIZE 0xE46C88 + +#define mmTPC1_CFG_QM_TENSOR_11_DIM_2_STRIDE 0xE46C8C + +#define mmTPC1_CFG_QM_TENSOR_11_DIM_3_SIZE 0xE46C90 + +#define mmTPC1_CFG_QM_TENSOR_11_DIM_3_STRIDE 0xE46C94 + +#define mmTPC1_CFG_QM_TENSOR_11_DIM_4_SIZE 0xE46C98 + +#define mmTPC1_CFG_QM_TENSOR_11_DIM_4_STRIDE 0xE46C9C + +#define mmTPC1_CFG_QM_TENSOR_12_BASE_ADDR_LOW 0xE46CA0 + +#define mmTPC1_CFG_QM_TENSOR_12_BASE_ADDR_HIGH 0xE46CA4 + +#define mmTPC1_CFG_QM_TENSOR_12_PADDING_VALUE 0xE46CA8 + +#define mmTPC1_CFG_QM_TENSOR_12_TENSOR_CONFIG 0xE46CAC + +#define mmTPC1_CFG_QM_TENSOR_12_DIM_0_SIZE 0xE46CB0 + +#define mmTPC1_CFG_QM_TENSOR_12_DIM_0_STRIDE 0xE46CB4 + +#define mmTPC1_CFG_QM_TENSOR_12_DIM_1_SIZE 0xE46CB8 + +#define mmTPC1_CFG_QM_TENSOR_12_DIM_1_STRIDE 0xE46CBC + +#define mmTPC1_CFG_QM_TENSOR_12_DIM_2_SIZE 0xE46CC0 + +#define mmTPC1_CFG_QM_TENSOR_12_DIM_2_STRIDE 0xE46CC4 + +#define mmTPC1_CFG_QM_TENSOR_12_DIM_3_SIZE 0xE46CC8 + +#define mmTPC1_CFG_QM_TENSOR_12_DIM_3_STRIDE 0xE46CCC + +#define mmTPC1_CFG_QM_TENSOR_12_DIM_4_SIZE 0xE46CD0 + +#define mmTPC1_CFG_QM_TENSOR_12_DIM_4_STRIDE 0xE46CD4 + +#define mmTPC1_CFG_QM_TENSOR_13_BASE_ADDR_LOW 0xE46CD8 + +#define mmTPC1_CFG_QM_TENSOR_13_BASE_ADDR_HIGH 0xE46CDC + +#define mmTPC1_CFG_QM_TENSOR_13_PADDING_VALUE 0xE46CE0 + +#define mmTPC1_CFG_QM_TENSOR_13_TENSOR_CONFIG 0xE46CE4 + +#define mmTPC1_CFG_QM_TENSOR_13_DIM_0_SIZE 0xE46CE8 + +#define mmTPC1_CFG_QM_TENSOR_13_DIM_0_STRIDE 0xE46CEC + +#define mmTPC1_CFG_QM_TENSOR_13_DIM_1_SIZE 0xE46CF0 + +#define mmTPC1_CFG_QM_TENSOR_13_DIM_1_STRIDE 0xE46CF4 + +#define mmTPC1_CFG_QM_TENSOR_13_DIM_2_SIZE 0xE46CF8 + +#define mmTPC1_CFG_QM_TENSOR_13_DIM_2_STRIDE 0xE46CFC + +#define mmTPC1_CFG_QM_TENSOR_13_DIM_3_SIZE 0xE46D00 + +#define mmTPC1_CFG_QM_TENSOR_13_DIM_3_STRIDE 0xE46D04 + +#define mmTPC1_CFG_QM_TENSOR_13_DIM_4_SIZE 0xE46D08 + +#define mmTPC1_CFG_QM_TENSOR_13_DIM_4_STRIDE 0xE46D0C + +#define mmTPC1_CFG_QM_TENSOR_14_BASE_ADDR_LOW 0xE46D10 + +#define mmTPC1_CFG_QM_TENSOR_14_BASE_ADDR_HIGH 0xE46D14 + +#define mmTPC1_CFG_QM_TENSOR_14_PADDING_VALUE 0xE46D18 + +#define mmTPC1_CFG_QM_TENSOR_14_TENSOR_CONFIG 0xE46D1C + +#define mmTPC1_CFG_QM_TENSOR_14_DIM_0_SIZE 0xE46D20 + +#define mmTPC1_CFG_QM_TENSOR_14_DIM_0_STRIDE 0xE46D24 + +#define mmTPC1_CFG_QM_TENSOR_14_DIM_1_SIZE 0xE46D28 + +#define mmTPC1_CFG_QM_TENSOR_14_DIM_1_STRIDE 0xE46D2C + +#define mmTPC1_CFG_QM_TENSOR_14_DIM_2_SIZE 0xE46D30 + +#define mmTPC1_CFG_QM_TENSOR_14_DIM_2_STRIDE 0xE46D34 + +#define mmTPC1_CFG_QM_TENSOR_14_DIM_3_SIZE 0xE46D38 + +#define mmTPC1_CFG_QM_TENSOR_14_DIM_3_STRIDE 0xE46D3C + +#define mmTPC1_CFG_QM_TENSOR_14_DIM_4_SIZE 0xE46D40 + +#define mmTPC1_CFG_QM_TENSOR_14_DIM_4_STRIDE 0xE46D44 + +#define mmTPC1_CFG_QM_TENSOR_15_BASE_ADDR_LOW 0xE46D48 + +#define mmTPC1_CFG_QM_TENSOR_15_BASE_ADDR_HIGH 0xE46D4C + +#define mmTPC1_CFG_QM_TENSOR_15_PADDING_VALUE 0xE46D50 + +#define mmTPC1_CFG_QM_TENSOR_15_TENSOR_CONFIG 0xE46D54 + +#define mmTPC1_CFG_QM_TENSOR_15_DIM_0_SIZE 0xE46D58 + +#define mmTPC1_CFG_QM_TENSOR_15_DIM_0_STRIDE 0xE46D5C + +#define mmTPC1_CFG_QM_TENSOR_15_DIM_1_SIZE 0xE46D60 + +#define mmTPC1_CFG_QM_TENSOR_15_DIM_1_STRIDE 0xE46D64 + +#define mmTPC1_CFG_QM_TENSOR_15_DIM_2_SIZE 0xE46D68 + +#define mmTPC1_CFG_QM_TENSOR_15_DIM_2_STRIDE 0xE46D6C + +#define mmTPC1_CFG_QM_TENSOR_15_DIM_3_SIZE 0xE46D70 + +#define mmTPC1_CFG_QM_TENSOR_15_DIM_3_STRIDE 0xE46D74 + +#define mmTPC1_CFG_QM_TENSOR_15_DIM_4_SIZE 0xE46D78 + +#define mmTPC1_CFG_QM_TENSOR_15_DIM_4_STRIDE 0xE46D7C + +#define mmTPC1_CFG_QM_SYNC_OBJECT_MESSAGE 0xE46D80 + +#define mmTPC1_CFG_QM_SYNC_OBJECT_ADDR 0xE46D84 + +#define mmTPC1_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xE46D88 + +#define mmTPC1_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xE46D8C + +#define mmTPC1_CFG_QM_TID_BASE_DIM_0 0xE46D90 + +#define mmTPC1_CFG_QM_TID_SIZE_DIM_0 0xE46D94 + +#define mmTPC1_CFG_QM_TID_BASE_DIM_1 0xE46D98 + +#define mmTPC1_CFG_QM_TID_SIZE_DIM_1 0xE46D9C + +#define mmTPC1_CFG_QM_TID_BASE_DIM_2 0xE46DA0 + +#define mmTPC1_CFG_QM_TID_SIZE_DIM_2 0xE46DA4 + +#define mmTPC1_CFG_QM_TID_BASE_DIM_3 0xE46DA8 + +#define mmTPC1_CFG_QM_TID_SIZE_DIM_3 0xE46DAC + +#define mmTPC1_CFG_QM_TID_BASE_DIM_4 0xE46DB0 + +#define mmTPC1_CFG_QM_TID_SIZE_DIM_4 0xE46DB4 + +#define mmTPC1_CFG_QM_KERNEL_CONFIG 0xE46DB8 + +#define mmTPC1_CFG_QM_KERNEL_ID 0xE46DBC + +#define mmTPC1_CFG_QM_SRF_0 0xE46DC0 + +#define mmTPC1_CFG_QM_SRF_1 0xE46DC4 + +#define mmTPC1_CFG_QM_SRF_2 0xE46DC8 + +#define mmTPC1_CFG_QM_SRF_3 0xE46DCC + +#define mmTPC1_CFG_QM_SRF_4 0xE46DD0 + +#define mmTPC1_CFG_QM_SRF_5 0xE46DD4 + +#define mmTPC1_CFG_QM_SRF_6 0xE46DD8 + +#define mmTPC1_CFG_QM_SRF_7 0xE46DDC + +#define mmTPC1_CFG_QM_SRF_8 0xE46DE0 + +#define mmTPC1_CFG_QM_SRF_9 0xE46DE4 + +#define mmTPC1_CFG_QM_SRF_10 0xE46DE8 + +#define mmTPC1_CFG_QM_SRF_11 0xE46DEC + +#define mmTPC1_CFG_QM_SRF_12 0xE46DF0 + +#define mmTPC1_CFG_QM_SRF_13 0xE46DF4 + +#define mmTPC1_CFG_QM_SRF_14 0xE46DF8 + +#define mmTPC1_CFG_QM_SRF_15 0xE46DFC + +#define mmTPC1_CFG_QM_SRF_16 0xE46E00 + +#define mmTPC1_CFG_QM_SRF_17 0xE46E04 + +#define mmTPC1_CFG_QM_SRF_18 0xE46E08 + +#define mmTPC1_CFG_QM_SRF_19 0xE46E0C + +#define mmTPC1_CFG_QM_SRF_20 0xE46E10 + +#define mmTPC1_CFG_QM_SRF_21 0xE46E14 + +#define mmTPC1_CFG_QM_SRF_22 0xE46E18 + +#define mmTPC1_CFG_QM_SRF_23 0xE46E1C + +#define mmTPC1_CFG_QM_SRF_24 0xE46E20 + +#define mmTPC1_CFG_QM_SRF_25 0xE46E24 + +#define mmTPC1_CFG_QM_SRF_26 0xE46E28 + +#define mmTPC1_CFG_QM_SRF_27 0xE46E2C + +#define mmTPC1_CFG_QM_SRF_28 0xE46E30 + +#define mmTPC1_CFG_QM_SRF_29 0xE46E34 + +#define mmTPC1_CFG_QM_SRF_30 0xE46E38 + +#define mmTPC1_CFG_QM_SRF_31 0xE46E3C + +#endif /* ASIC_REG_TPC1_CFG_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc1_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc1_qm_regs.h new file mode 100644 index 000000000..af10ef7a8 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc1_qm_regs.h @@ -0,0 +1,834 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC1_QM_REGS_H_ +#define ASIC_REG_TPC1_QM_REGS_H_ + +/* + ***************************************** + * TPC1_QM (Prototype: QMAN) + ***************************************** + */ + +#define mmTPC1_QM_GLBL_CFG0 0xE48000 + +#define mmTPC1_QM_GLBL_CFG1 0xE48004 + +#define mmTPC1_QM_GLBL_PROT 0xE48008 + +#define mmTPC1_QM_GLBL_ERR_CFG 0xE4800C + +#define mmTPC1_QM_GLBL_SECURE_PROPS_0 0xE48010 + +#define mmTPC1_QM_GLBL_SECURE_PROPS_1 0xE48014 + +#define mmTPC1_QM_GLBL_SECURE_PROPS_2 0xE48018 + +#define mmTPC1_QM_GLBL_SECURE_PROPS_3 0xE4801C + +#define mmTPC1_QM_GLBL_SECURE_PROPS_4 0xE48020 + +#define mmTPC1_QM_GLBL_NON_SECURE_PROPS_0 0xE48024 + +#define mmTPC1_QM_GLBL_NON_SECURE_PROPS_1 0xE48028 + +#define mmTPC1_QM_GLBL_NON_SECURE_PROPS_2 0xE4802C + +#define mmTPC1_QM_GLBL_NON_SECURE_PROPS_3 0xE48030 + +#define mmTPC1_QM_GLBL_NON_SECURE_PROPS_4 0xE48034 + +#define mmTPC1_QM_GLBL_STS0 0xE48038 + +#define mmTPC1_QM_GLBL_STS1_0 0xE48040 + +#define mmTPC1_QM_GLBL_STS1_1 0xE48044 + +#define mmTPC1_QM_GLBL_STS1_2 0xE48048 + +#define mmTPC1_QM_GLBL_STS1_3 0xE4804C + +#define mmTPC1_QM_GLBL_STS1_4 0xE48050 + +#define mmTPC1_QM_GLBL_MSG_EN_0 0xE48054 + +#define mmTPC1_QM_GLBL_MSG_EN_1 0xE48058 + +#define mmTPC1_QM_GLBL_MSG_EN_2 0xE4805C + +#define mmTPC1_QM_GLBL_MSG_EN_3 0xE48060 + +#define mmTPC1_QM_GLBL_MSG_EN_4 0xE48068 + +#define mmTPC1_QM_PQ_BASE_LO_0 0xE48070 + +#define mmTPC1_QM_PQ_BASE_LO_1 0xE48074 + +#define mmTPC1_QM_PQ_BASE_LO_2 0xE48078 + +#define mmTPC1_QM_PQ_BASE_LO_3 0xE4807C + +#define mmTPC1_QM_PQ_BASE_HI_0 0xE48080 + +#define mmTPC1_QM_PQ_BASE_HI_1 0xE48084 + +#define mmTPC1_QM_PQ_BASE_HI_2 0xE48088 + +#define mmTPC1_QM_PQ_BASE_HI_3 0xE4808C + +#define mmTPC1_QM_PQ_SIZE_0 0xE48090 + +#define mmTPC1_QM_PQ_SIZE_1 0xE48094 + +#define mmTPC1_QM_PQ_SIZE_2 0xE48098 + +#define mmTPC1_QM_PQ_SIZE_3 0xE4809C + +#define mmTPC1_QM_PQ_PI_0 0xE480A0 + +#define mmTPC1_QM_PQ_PI_1 0xE480A4 + +#define mmTPC1_QM_PQ_PI_2 0xE480A8 + +#define mmTPC1_QM_PQ_PI_3 0xE480AC + +#define mmTPC1_QM_PQ_CI_0 0xE480B0 + +#define mmTPC1_QM_PQ_CI_1 0xE480B4 + +#define mmTPC1_QM_PQ_CI_2 0xE480B8 + +#define mmTPC1_QM_PQ_CI_3 0xE480BC + +#define mmTPC1_QM_PQ_CFG0_0 0xE480C0 + +#define mmTPC1_QM_PQ_CFG0_1 0xE480C4 + +#define mmTPC1_QM_PQ_CFG0_2 0xE480C8 + +#define mmTPC1_QM_PQ_CFG0_3 0xE480CC + +#define mmTPC1_QM_PQ_CFG1_0 0xE480D0 + +#define mmTPC1_QM_PQ_CFG1_1 0xE480D4 + +#define mmTPC1_QM_PQ_CFG1_2 0xE480D8 + +#define mmTPC1_QM_PQ_CFG1_3 0xE480DC + +#define mmTPC1_QM_PQ_ARUSER_31_11_0 0xE480E0 + +#define mmTPC1_QM_PQ_ARUSER_31_11_1 0xE480E4 + +#define mmTPC1_QM_PQ_ARUSER_31_11_2 0xE480E8 + +#define mmTPC1_QM_PQ_ARUSER_31_11_3 0xE480EC + +#define mmTPC1_QM_PQ_STS0_0 0xE480F0 + +#define mmTPC1_QM_PQ_STS0_1 0xE480F4 + +#define mmTPC1_QM_PQ_STS0_2 0xE480F8 + +#define mmTPC1_QM_PQ_STS0_3 0xE480FC + +#define mmTPC1_QM_PQ_STS1_0 0xE48100 + +#define mmTPC1_QM_PQ_STS1_1 0xE48104 + +#define mmTPC1_QM_PQ_STS1_2 0xE48108 + +#define mmTPC1_QM_PQ_STS1_3 0xE4810C + +#define mmTPC1_QM_CQ_CFG0_0 0xE48110 + +#define mmTPC1_QM_CQ_CFG0_1 0xE48114 + +#define mmTPC1_QM_CQ_CFG0_2 0xE48118 + +#define mmTPC1_QM_CQ_CFG0_3 0xE4811C + +#define mmTPC1_QM_CQ_CFG0_4 0xE48120 + +#define mmTPC1_QM_CQ_CFG1_0 0xE48124 + +#define mmTPC1_QM_CQ_CFG1_1 0xE48128 + +#define mmTPC1_QM_CQ_CFG1_2 0xE4812C + +#define mmTPC1_QM_CQ_CFG1_3 0xE48130 + +#define mmTPC1_QM_CQ_CFG1_4 0xE48134 + +#define mmTPC1_QM_CQ_ARUSER_31_11_0 0xE48138 + +#define mmTPC1_QM_CQ_ARUSER_31_11_1 0xE4813C + +#define mmTPC1_QM_CQ_ARUSER_31_11_2 0xE48140 + +#define mmTPC1_QM_CQ_ARUSER_31_11_3 0xE48144 + +#define mmTPC1_QM_CQ_ARUSER_31_11_4 0xE48148 + +#define mmTPC1_QM_CQ_STS0_0 0xE4814C + +#define mmTPC1_QM_CQ_STS0_1 0xE48150 + +#define mmTPC1_QM_CQ_STS0_2 0xE48154 + +#define mmTPC1_QM_CQ_STS0_3 0xE48158 + +#define mmTPC1_QM_CQ_STS0_4 0xE4815C + +#define mmTPC1_QM_CQ_STS1_0 0xE48160 + +#define mmTPC1_QM_CQ_STS1_1 0xE48164 + +#define mmTPC1_QM_CQ_STS1_2 0xE48168 + +#define mmTPC1_QM_CQ_STS1_3 0xE4816C + +#define mmTPC1_QM_CQ_STS1_4 0xE48170 + +#define mmTPC1_QM_CQ_PTR_LO_0 0xE48174 + +#define mmTPC1_QM_CQ_PTR_HI_0 0xE48178 + +#define mmTPC1_QM_CQ_TSIZE_0 0xE4817C + +#define mmTPC1_QM_CQ_CTL_0 0xE48180 + +#define mmTPC1_QM_CQ_PTR_LO_1 0xE48184 + +#define mmTPC1_QM_CQ_PTR_HI_1 0xE48188 + +#define mmTPC1_QM_CQ_TSIZE_1 0xE4818C + +#define mmTPC1_QM_CQ_CTL_1 0xE48190 + +#define mmTPC1_QM_CQ_PTR_LO_2 0xE48194 + +#define mmTPC1_QM_CQ_PTR_HI_2 0xE48198 + +#define mmTPC1_QM_CQ_TSIZE_2 0xE4819C + +#define mmTPC1_QM_CQ_CTL_2 0xE481A0 + +#define mmTPC1_QM_CQ_PTR_LO_3 0xE481A4 + +#define mmTPC1_QM_CQ_PTR_HI_3 0xE481A8 + +#define mmTPC1_QM_CQ_TSIZE_3 0xE481AC + +#define mmTPC1_QM_CQ_CTL_3 0xE481B0 + +#define mmTPC1_QM_CQ_PTR_LO_4 0xE481B4 + +#define mmTPC1_QM_CQ_PTR_HI_4 0xE481B8 + +#define mmTPC1_QM_CQ_TSIZE_4 0xE481BC + +#define mmTPC1_QM_CQ_CTL_4 0xE481C0 + +#define mmTPC1_QM_CQ_PTR_LO_STS_0 0xE481C4 + +#define mmTPC1_QM_CQ_PTR_LO_STS_1 0xE481C8 + +#define mmTPC1_QM_CQ_PTR_LO_STS_2 0xE481CC + +#define mmTPC1_QM_CQ_PTR_LO_STS_3 0xE481D0 + +#define mmTPC1_QM_CQ_PTR_LO_STS_4 0xE481D4 + +#define mmTPC1_QM_CQ_PTR_HI_STS_0 0xE481D8 + +#define mmTPC1_QM_CQ_PTR_HI_STS_1 0xE481DC + +#define mmTPC1_QM_CQ_PTR_HI_STS_2 0xE481E0 + +#define mmTPC1_QM_CQ_PTR_HI_STS_3 0xE481E4 + +#define mmTPC1_QM_CQ_PTR_HI_STS_4 0xE481E8 + +#define mmTPC1_QM_CQ_TSIZE_STS_0 0xE481EC + +#define mmTPC1_QM_CQ_TSIZE_STS_1 0xE481F0 + +#define mmTPC1_QM_CQ_TSIZE_STS_2 0xE481F4 + +#define mmTPC1_QM_CQ_TSIZE_STS_3 0xE481F8 + +#define mmTPC1_QM_CQ_TSIZE_STS_4 0xE481FC + +#define mmTPC1_QM_CQ_CTL_STS_0 0xE48200 + +#define mmTPC1_QM_CQ_CTL_STS_1 0xE48204 + +#define mmTPC1_QM_CQ_CTL_STS_2 0xE48208 + +#define mmTPC1_QM_CQ_CTL_STS_3 0xE4820C + +#define mmTPC1_QM_CQ_CTL_STS_4 0xE48210 + +#define mmTPC1_QM_CQ_IFIFO_CNT_0 0xE48214 + +#define mmTPC1_QM_CQ_IFIFO_CNT_1 0xE48218 + +#define mmTPC1_QM_CQ_IFIFO_CNT_2 0xE4821C + +#define mmTPC1_QM_CQ_IFIFO_CNT_3 0xE48220 + +#define mmTPC1_QM_CQ_IFIFO_CNT_4 0xE48224 + +#define mmTPC1_QM_CP_MSG_BASE0_ADDR_LO_0 0xE48228 + +#define mmTPC1_QM_CP_MSG_BASE0_ADDR_LO_1 0xE4822C + +#define mmTPC1_QM_CP_MSG_BASE0_ADDR_LO_2 0xE48230 + +#define mmTPC1_QM_CP_MSG_BASE0_ADDR_LO_3 0xE48234 + +#define mmTPC1_QM_CP_MSG_BASE0_ADDR_LO_4 0xE48238 + +#define mmTPC1_QM_CP_MSG_BASE0_ADDR_HI_0 0xE4823C + +#define mmTPC1_QM_CP_MSG_BASE0_ADDR_HI_1 0xE48240 + +#define mmTPC1_QM_CP_MSG_BASE0_ADDR_HI_2 0xE48244 + +#define mmTPC1_QM_CP_MSG_BASE0_ADDR_HI_3 0xE48248 + +#define mmTPC1_QM_CP_MSG_BASE0_ADDR_HI_4 0xE4824C + +#define mmTPC1_QM_CP_MSG_BASE1_ADDR_LO_0 0xE48250 + +#define mmTPC1_QM_CP_MSG_BASE1_ADDR_LO_1 0xE48254 + +#define mmTPC1_QM_CP_MSG_BASE1_ADDR_LO_2 0xE48258 + +#define mmTPC1_QM_CP_MSG_BASE1_ADDR_LO_3 0xE4825C + +#define mmTPC1_QM_CP_MSG_BASE1_ADDR_LO_4 0xE48260 + +#define mmTPC1_QM_CP_MSG_BASE1_ADDR_HI_0 0xE48264 + +#define mmTPC1_QM_CP_MSG_BASE1_ADDR_HI_1 0xE48268 + +#define mmTPC1_QM_CP_MSG_BASE1_ADDR_HI_2 0xE4826C + +#define mmTPC1_QM_CP_MSG_BASE1_ADDR_HI_3 0xE48270 + +#define mmTPC1_QM_CP_MSG_BASE1_ADDR_HI_4 0xE48274 + +#define mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_0 0xE48278 + +#define mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_1 0xE4827C + +#define mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_2 0xE48280 + +#define mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_3 0xE48284 + +#define mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_4 0xE48288 + +#define mmTPC1_QM_CP_MSG_BASE2_ADDR_HI_0 0xE4828C + +#define mmTPC1_QM_CP_MSG_BASE2_ADDR_HI_1 0xE48290 + +#define mmTPC1_QM_CP_MSG_BASE2_ADDR_HI_2 0xE48294 + +#define mmTPC1_QM_CP_MSG_BASE2_ADDR_HI_3 0xE48298 + +#define mmTPC1_QM_CP_MSG_BASE2_ADDR_HI_4 0xE4829C + +#define mmTPC1_QM_CP_MSG_BASE3_ADDR_LO_0 0xE482A0 + +#define mmTPC1_QM_CP_MSG_BASE3_ADDR_LO_1 0xE482A4 + +#define mmTPC1_QM_CP_MSG_BASE3_ADDR_LO_2 0xE482A8 + +#define mmTPC1_QM_CP_MSG_BASE3_ADDR_LO_3 0xE482AC + +#define mmTPC1_QM_CP_MSG_BASE3_ADDR_LO_4 0xE482B0 + +#define mmTPC1_QM_CP_MSG_BASE3_ADDR_HI_0 0xE482B4 + +#define mmTPC1_QM_CP_MSG_BASE3_ADDR_HI_1 0xE482B8 + +#define mmTPC1_QM_CP_MSG_BASE3_ADDR_HI_2 0xE482BC + +#define mmTPC1_QM_CP_MSG_BASE3_ADDR_HI_3 0xE482C0 + +#define mmTPC1_QM_CP_MSG_BASE3_ADDR_HI_4 0xE482C4 + +#define mmTPC1_QM_CP_LDMA_TSIZE_OFFSET_0 0xE482C8 + +#define mmTPC1_QM_CP_LDMA_TSIZE_OFFSET_1 0xE482CC + +#define mmTPC1_QM_CP_LDMA_TSIZE_OFFSET_2 0xE482D0 + +#define mmTPC1_QM_CP_LDMA_TSIZE_OFFSET_3 0xE482D4 + +#define mmTPC1_QM_CP_LDMA_TSIZE_OFFSET_4 0xE482D8 + +#define mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0xE482E0 + +#define mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0xE482E4 + +#define mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0xE482E8 + +#define mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0xE482EC + +#define mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0xE482F0 + +#define mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0xE482F4 + +#define mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0xE482F8 + +#define mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0xE482FC + +#define mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0xE48300 + +#define mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0xE48304 + +#define mmTPC1_QM_CP_FENCE0_RDATA_0 0xE48308 + +#define mmTPC1_QM_CP_FENCE0_RDATA_1 0xE4830C + +#define mmTPC1_QM_CP_FENCE0_RDATA_2 0xE48310 + +#define mmTPC1_QM_CP_FENCE0_RDATA_3 0xE48314 + +#define mmTPC1_QM_CP_FENCE0_RDATA_4 0xE48318 + +#define mmTPC1_QM_CP_FENCE1_RDATA_0 0xE4831C + +#define mmTPC1_QM_CP_FENCE1_RDATA_1 0xE48320 + +#define mmTPC1_QM_CP_FENCE1_RDATA_2 0xE48324 + +#define mmTPC1_QM_CP_FENCE1_RDATA_3 0xE48328 + +#define mmTPC1_QM_CP_FENCE1_RDATA_4 0xE4832C + +#define mmTPC1_QM_CP_FENCE2_RDATA_0 0xE48330 + +#define mmTPC1_QM_CP_FENCE2_RDATA_1 0xE48334 + +#define mmTPC1_QM_CP_FENCE2_RDATA_2 0xE48338 + +#define mmTPC1_QM_CP_FENCE2_RDATA_3 0xE4833C + +#define mmTPC1_QM_CP_FENCE2_RDATA_4 0xE48340 + +#define mmTPC1_QM_CP_FENCE3_RDATA_0 0xE48344 + +#define mmTPC1_QM_CP_FENCE3_RDATA_1 0xE48348 + +#define mmTPC1_QM_CP_FENCE3_RDATA_2 0xE4834C + +#define mmTPC1_QM_CP_FENCE3_RDATA_3 0xE48350 + +#define mmTPC1_QM_CP_FENCE3_RDATA_4 0xE48354 + +#define mmTPC1_QM_CP_FENCE0_CNT_0 0xE48358 + +#define mmTPC1_QM_CP_FENCE0_CNT_1 0xE4835C + +#define mmTPC1_QM_CP_FENCE0_CNT_2 0xE48360 + +#define mmTPC1_QM_CP_FENCE0_CNT_3 0xE48364 + +#define mmTPC1_QM_CP_FENCE0_CNT_4 0xE48368 + +#define mmTPC1_QM_CP_FENCE1_CNT_0 0xE4836C + +#define mmTPC1_QM_CP_FENCE1_CNT_1 0xE48370 + +#define mmTPC1_QM_CP_FENCE1_CNT_2 0xE48374 + +#define mmTPC1_QM_CP_FENCE1_CNT_3 0xE48378 + +#define mmTPC1_QM_CP_FENCE1_CNT_4 0xE4837C + +#define mmTPC1_QM_CP_FENCE2_CNT_0 0xE48380 + +#define mmTPC1_QM_CP_FENCE2_CNT_1 0xE48384 + +#define mmTPC1_QM_CP_FENCE2_CNT_2 0xE48388 + +#define mmTPC1_QM_CP_FENCE2_CNT_3 0xE4838C + +#define mmTPC1_QM_CP_FENCE2_CNT_4 0xE48390 + +#define mmTPC1_QM_CP_FENCE3_CNT_0 0xE48394 + +#define mmTPC1_QM_CP_FENCE3_CNT_1 0xE48398 + +#define mmTPC1_QM_CP_FENCE3_CNT_2 0xE4839C + +#define mmTPC1_QM_CP_FENCE3_CNT_3 0xE483A0 + +#define mmTPC1_QM_CP_FENCE3_CNT_4 0xE483A4 + +#define mmTPC1_QM_CP_STS_0 0xE483A8 + +#define mmTPC1_QM_CP_STS_1 0xE483AC + +#define mmTPC1_QM_CP_STS_2 0xE483B0 + +#define mmTPC1_QM_CP_STS_3 0xE483B4 + +#define mmTPC1_QM_CP_STS_4 0xE483B8 + +#define mmTPC1_QM_CP_CURRENT_INST_LO_0 0xE483BC + +#define mmTPC1_QM_CP_CURRENT_INST_LO_1 0xE483C0 + +#define mmTPC1_QM_CP_CURRENT_INST_LO_2 0xE483C4 + +#define mmTPC1_QM_CP_CURRENT_INST_LO_3 0xE483C8 + +#define mmTPC1_QM_CP_CURRENT_INST_LO_4 0xE483CC + +#define mmTPC1_QM_CP_CURRENT_INST_HI_0 0xE483D0 + +#define mmTPC1_QM_CP_CURRENT_INST_HI_1 0xE483D4 + +#define mmTPC1_QM_CP_CURRENT_INST_HI_2 0xE483D8 + +#define mmTPC1_QM_CP_CURRENT_INST_HI_3 0xE483DC + +#define mmTPC1_QM_CP_CURRENT_INST_HI_4 0xE483E0 + +#define mmTPC1_QM_CP_BARRIER_CFG_0 0xE483F4 + +#define mmTPC1_QM_CP_BARRIER_CFG_1 0xE483F8 + +#define mmTPC1_QM_CP_BARRIER_CFG_2 0xE483FC + +#define mmTPC1_QM_CP_BARRIER_CFG_3 0xE48400 + +#define mmTPC1_QM_CP_BARRIER_CFG_4 0xE48404 + +#define mmTPC1_QM_CP_DBG_0_0 0xE48408 + +#define mmTPC1_QM_CP_DBG_0_1 0xE4840C + +#define mmTPC1_QM_CP_DBG_0_2 0xE48410 + +#define mmTPC1_QM_CP_DBG_0_3 0xE48414 + +#define mmTPC1_QM_CP_DBG_0_4 0xE48418 + +#define mmTPC1_QM_CP_ARUSER_31_11_0 0xE4841C + +#define mmTPC1_QM_CP_ARUSER_31_11_1 0xE48420 + +#define mmTPC1_QM_CP_ARUSER_31_11_2 0xE48424 + +#define mmTPC1_QM_CP_ARUSER_31_11_3 0xE48428 + +#define mmTPC1_QM_CP_ARUSER_31_11_4 0xE4842C + +#define mmTPC1_QM_CP_AWUSER_31_11_0 0xE48430 + +#define mmTPC1_QM_CP_AWUSER_31_11_1 0xE48434 + +#define mmTPC1_QM_CP_AWUSER_31_11_2 0xE48438 + +#define mmTPC1_QM_CP_AWUSER_31_11_3 0xE4843C + +#define mmTPC1_QM_CP_AWUSER_31_11_4 0xE48440 + +#define mmTPC1_QM_ARB_CFG_0 0xE48A00 + +#define mmTPC1_QM_ARB_CHOISE_Q_PUSH 0xE48A04 + +#define mmTPC1_QM_ARB_WRR_WEIGHT_0 0xE48A08 + +#define mmTPC1_QM_ARB_WRR_WEIGHT_1 0xE48A0C + +#define mmTPC1_QM_ARB_WRR_WEIGHT_2 0xE48A10 + +#define mmTPC1_QM_ARB_WRR_WEIGHT_3 0xE48A14 + +#define mmTPC1_QM_ARB_CFG_1 0xE48A18 + +#define mmTPC1_QM_ARB_MST_AVAIL_CRED_0 0xE48A20 + +#define mmTPC1_QM_ARB_MST_AVAIL_CRED_1 0xE48A24 + +#define mmTPC1_QM_ARB_MST_AVAIL_CRED_2 0xE48A28 + +#define mmTPC1_QM_ARB_MST_AVAIL_CRED_3 0xE48A2C + +#define mmTPC1_QM_ARB_MST_AVAIL_CRED_4 0xE48A30 + +#define mmTPC1_QM_ARB_MST_AVAIL_CRED_5 0xE48A34 + +#define mmTPC1_QM_ARB_MST_AVAIL_CRED_6 0xE48A38 + +#define mmTPC1_QM_ARB_MST_AVAIL_CRED_7 0xE48A3C + +#define mmTPC1_QM_ARB_MST_AVAIL_CRED_8 0xE48A40 + +#define mmTPC1_QM_ARB_MST_AVAIL_CRED_9 0xE48A44 + +#define mmTPC1_QM_ARB_MST_AVAIL_CRED_10 0xE48A48 + +#define mmTPC1_QM_ARB_MST_AVAIL_CRED_11 0xE48A4C + +#define mmTPC1_QM_ARB_MST_AVAIL_CRED_12 0xE48A50 + +#define mmTPC1_QM_ARB_MST_AVAIL_CRED_13 0xE48A54 + +#define mmTPC1_QM_ARB_MST_AVAIL_CRED_14 0xE48A58 + +#define mmTPC1_QM_ARB_MST_AVAIL_CRED_15 0xE48A5C + +#define mmTPC1_QM_ARB_MST_AVAIL_CRED_16 0xE48A60 + +#define mmTPC1_QM_ARB_MST_AVAIL_CRED_17 0xE48A64 + +#define mmTPC1_QM_ARB_MST_AVAIL_CRED_18 0xE48A68 + +#define mmTPC1_QM_ARB_MST_AVAIL_CRED_19 0xE48A6C + +#define mmTPC1_QM_ARB_MST_AVAIL_CRED_20 0xE48A70 + +#define mmTPC1_QM_ARB_MST_AVAIL_CRED_21 0xE48A74 + +#define mmTPC1_QM_ARB_MST_AVAIL_CRED_22 0xE48A78 + +#define mmTPC1_QM_ARB_MST_AVAIL_CRED_23 0xE48A7C + +#define mmTPC1_QM_ARB_MST_AVAIL_CRED_24 0xE48A80 + +#define mmTPC1_QM_ARB_MST_AVAIL_CRED_25 0xE48A84 + +#define mmTPC1_QM_ARB_MST_AVAIL_CRED_26 0xE48A88 + +#define mmTPC1_QM_ARB_MST_AVAIL_CRED_27 0xE48A8C + +#define mmTPC1_QM_ARB_MST_AVAIL_CRED_28 0xE48A90 + +#define mmTPC1_QM_ARB_MST_AVAIL_CRED_29 0xE48A94 + +#define mmTPC1_QM_ARB_MST_AVAIL_CRED_30 0xE48A98 + +#define mmTPC1_QM_ARB_MST_AVAIL_CRED_31 0xE48A9C + +#define mmTPC1_QM_ARB_MST_CRED_INC 0xE48AA0 + +#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_0 0xE48AA4 + +#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_1 0xE48AA8 + +#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_2 0xE48AAC + +#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_3 0xE48AB0 + +#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_4 0xE48AB4 + +#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_5 0xE48AB8 + +#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_6 0xE48ABC + +#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_7 0xE48AC0 + +#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_8 0xE48AC4 + +#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_9 0xE48AC8 + +#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_10 0xE48ACC + +#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_11 0xE48AD0 + +#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_12 0xE48AD4 + +#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_13 0xE48AD8 + +#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_14 0xE48ADC + +#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_15 0xE48AE0 + +#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_16 0xE48AE4 + +#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_17 0xE48AE8 + +#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_18 0xE48AEC + +#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_19 0xE48AF0 + +#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_20 0xE48AF4 + +#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_21 0xE48AF8 + +#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_22 0xE48AFC + +#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_23 0xE48B00 + +#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_24 0xE48B04 + +#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_25 0xE48B08 + +#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_26 0xE48B0C + +#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_27 0xE48B10 + +#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_28 0xE48B14 + +#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_29 0xE48B18 + +#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_30 0xE48B1C + +#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_31 0xE48B20 + +#define mmTPC1_QM_ARB_SLV_MASTER_INC_CRED_OFST 0xE48B28 + +#define mmTPC1_QM_ARB_MST_SLAVE_EN 0xE48B2C + +#define mmTPC1_QM_ARB_MST_QUIET_PER 0xE48B34 + +#define mmTPC1_QM_ARB_SLV_CHOISE_WDT 0xE48B38 + +#define mmTPC1_QM_ARB_SLV_ID 0xE48B3C + +#define mmTPC1_QM_ARB_MSG_MAX_INFLIGHT 0xE48B44 + +#define mmTPC1_QM_ARB_MSG_AWUSER_31_11 0xE48B48 + +#define mmTPC1_QM_ARB_MSG_AWUSER_SEC_PROP 0xE48B4C + +#define mmTPC1_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0xE48B50 + +#define mmTPC1_QM_ARB_BASE_LO 0xE48B54 + +#define mmTPC1_QM_ARB_BASE_HI 0xE48B58 + +#define mmTPC1_QM_ARB_STATE_STS 0xE48B80 + +#define mmTPC1_QM_ARB_CHOISE_FULLNESS_STS 0xE48B84 + +#define mmTPC1_QM_ARB_MSG_STS 0xE48B88 + +#define mmTPC1_QM_ARB_SLV_CHOISE_Q_HEAD 0xE48B8C + +#define mmTPC1_QM_ARB_ERR_CAUSE 0xE48B9C + +#define mmTPC1_QM_ARB_ERR_MSG_EN 0xE48BA0 + +#define mmTPC1_QM_ARB_ERR_STS_DRP 0xE48BA8 + +#define mmTPC1_QM_ARB_MST_CRED_STS_0 0xE48BB0 + +#define mmTPC1_QM_ARB_MST_CRED_STS_1 0xE48BB4 + +#define mmTPC1_QM_ARB_MST_CRED_STS_2 0xE48BB8 + +#define mmTPC1_QM_ARB_MST_CRED_STS_3 0xE48BBC + +#define mmTPC1_QM_ARB_MST_CRED_STS_4 0xE48BC0 + +#define mmTPC1_QM_ARB_MST_CRED_STS_5 0xE48BC4 + +#define mmTPC1_QM_ARB_MST_CRED_STS_6 0xE48BC8 + +#define mmTPC1_QM_ARB_MST_CRED_STS_7 0xE48BCC + +#define mmTPC1_QM_ARB_MST_CRED_STS_8 0xE48BD0 + +#define mmTPC1_QM_ARB_MST_CRED_STS_9 0xE48BD4 + +#define mmTPC1_QM_ARB_MST_CRED_STS_10 0xE48BD8 + +#define mmTPC1_QM_ARB_MST_CRED_STS_11 0xE48BDC + +#define mmTPC1_QM_ARB_MST_CRED_STS_12 0xE48BE0 + +#define mmTPC1_QM_ARB_MST_CRED_STS_13 0xE48BE4 + +#define mmTPC1_QM_ARB_MST_CRED_STS_14 0xE48BE8 + +#define mmTPC1_QM_ARB_MST_CRED_STS_15 0xE48BEC + +#define mmTPC1_QM_ARB_MST_CRED_STS_16 0xE48BF0 + +#define mmTPC1_QM_ARB_MST_CRED_STS_17 0xE48BF4 + +#define mmTPC1_QM_ARB_MST_CRED_STS_18 0xE48BF8 + +#define mmTPC1_QM_ARB_MST_CRED_STS_19 0xE48BFC + +#define mmTPC1_QM_ARB_MST_CRED_STS_20 0xE48C00 + +#define mmTPC1_QM_ARB_MST_CRED_STS_21 0xE48C04 + +#define mmTPC1_QM_ARB_MST_CRED_STS_22 0xE48C08 + +#define mmTPC1_QM_ARB_MST_CRED_STS_23 0xE48C0C + +#define mmTPC1_QM_ARB_MST_CRED_STS_24 0xE48C10 + +#define mmTPC1_QM_ARB_MST_CRED_STS_25 0xE48C14 + +#define mmTPC1_QM_ARB_MST_CRED_STS_26 0xE48C18 + +#define mmTPC1_QM_ARB_MST_CRED_STS_27 0xE48C1C + +#define mmTPC1_QM_ARB_MST_CRED_STS_28 0xE48C20 + +#define mmTPC1_QM_ARB_MST_CRED_STS_29 0xE48C24 + +#define mmTPC1_QM_ARB_MST_CRED_STS_30 0xE48C28 + +#define mmTPC1_QM_ARB_MST_CRED_STS_31 0xE48C2C + +#define mmTPC1_QM_CGM_CFG 0xE48C70 + +#define mmTPC1_QM_CGM_STS 0xE48C74 + +#define mmTPC1_QM_CGM_CFG1 0xE48C78 + +#define mmTPC1_QM_LOCAL_RANGE_BASE 0xE48C80 + +#define mmTPC1_QM_LOCAL_RANGE_SIZE 0xE48C84 + +#define mmTPC1_QM_CSMR_STRICT_PRIO_CFG 0xE48C90 + +#define mmTPC1_QM_HBW_RD_RATE_LIM_CFG_1 0xE48C94 + +#define mmTPC1_QM_LBW_WR_RATE_LIM_CFG_0 0xE48C98 + +#define mmTPC1_QM_LBW_WR_RATE_LIM_CFG_1 0xE48C9C + +#define mmTPC1_QM_HBW_RD_RATE_LIM_CFG_0 0xE48CA0 + +#define mmTPC1_QM_GLBL_AXCACHE 0xE48CA4 + +#define mmTPC1_QM_IND_GW_APB_CFG 0xE48CB0 + +#define mmTPC1_QM_IND_GW_APB_WDATA 0xE48CB4 + +#define mmTPC1_QM_IND_GW_APB_RDATA 0xE48CB8 + +#define mmTPC1_QM_IND_GW_APB_STATUS 0xE48CBC + +#define mmTPC1_QM_GLBL_ERR_ADDR_LO 0xE48CD0 + +#define mmTPC1_QM_GLBL_ERR_ADDR_HI 0xE48CD4 + +#define mmTPC1_QM_GLBL_ERR_WDATA 0xE48CD8 + +#define mmTPC1_QM_GLBL_MEM_INIT_BUSY 0xE48D00 + +#endif /* ASIC_REG_TPC1_QM_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc2_cfg_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc2_cfg_regs.h new file mode 100644 index 000000000..3e77c3795 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc2_cfg_regs.h @@ -0,0 +1,1226 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC2_CFG_REGS_H_ +#define ASIC_REG_TPC2_CFG_REGS_H_ + +/* + ***************************************** + * TPC2_CFG (Prototype: TPC) + ***************************************** + */ + +#define mmTPC2_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xE86400 + +#define mmTPC2_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xE86404 + +#define mmTPC2_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xE86408 + +#define mmTPC2_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xE8640C + +#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xE86410 + +#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xE86414 + +#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xE86418 + +#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xE8641C + +#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xE86420 + +#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xE86424 + +#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xE86428 + +#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xE8642C + +#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xE86430 + +#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xE86434 + +#define mmTPC2_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xE86438 + +#define mmTPC2_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xE8643C + +#define mmTPC2_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xE86440 + +#define mmTPC2_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xE86444 + +#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xE86448 + +#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xE8644C + +#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xE86450 + +#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xE86454 + +#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xE86458 + +#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xE8645C + +#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xE86460 + +#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xE86464 + +#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xE86468 + +#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xE8646C + +#define mmTPC2_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xE86470 + +#define mmTPC2_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xE86474 + +#define mmTPC2_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xE86478 + +#define mmTPC2_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xE8647C + +#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xE86480 + +#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xE86484 + +#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xE86488 + +#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xE8648C + +#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xE86490 + +#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xE86494 + +#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xE86498 + +#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xE8649C + +#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xE864A0 + +#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xE864A4 + +#define mmTPC2_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xE864A8 + +#define mmTPC2_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xE864AC + +#define mmTPC2_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xE864B0 + +#define mmTPC2_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xE864B4 + +#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xE864B8 + +#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xE864BC + +#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xE864C0 + +#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xE864C4 + +#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xE864C8 + +#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xE864CC + +#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xE864D0 + +#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xE864D4 + +#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xE864D8 + +#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xE864DC + +#define mmTPC2_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xE864E0 + +#define mmTPC2_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xE864E4 + +#define mmTPC2_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xE864E8 + +#define mmTPC2_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xE864EC + +#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xE864F0 + +#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xE864F4 + +#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xE864F8 + +#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xE864FC + +#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xE86500 + +#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xE86504 + +#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xE86508 + +#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xE8650C + +#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xE86510 + +#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xE86514 + +#define mmTPC2_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xE86518 + +#define mmTPC2_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xE8651C + +#define mmTPC2_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xE86520 + +#define mmTPC2_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xE86524 + +#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xE86528 + +#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xE8652C + +#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xE86530 + +#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xE86534 + +#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xE86538 + +#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xE8653C + +#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xE86540 + +#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xE86544 + +#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xE86548 + +#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xE8654C + +#define mmTPC2_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xE86550 + +#define mmTPC2_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xE86554 + +#define mmTPC2_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xE86558 + +#define mmTPC2_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xE8655C + +#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xE86560 + +#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xE86564 + +#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xE86568 + +#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xE8656C + +#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xE86570 + +#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xE86574 + +#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xE86578 + +#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xE8657C + +#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xE86580 + +#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xE86584 + +#define mmTPC2_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xE86588 + +#define mmTPC2_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xE8658C + +#define mmTPC2_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xE86590 + +#define mmTPC2_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xE86594 + +#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xE86598 + +#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xE8659C + +#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xE865A0 + +#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xE865A4 + +#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xE865A8 + +#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xE865AC + +#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xE865B0 + +#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xE865B4 + +#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xE865B8 + +#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xE865BC + +#define mmTPC2_CFG_KERNEL_TENSOR_8_BASE_ADDR_LOW 0xE865C0 + +#define mmTPC2_CFG_KERNEL_TENSOR_8_BASE_ADDR_HIGH 0xE865C4 + +#define mmTPC2_CFG_KERNEL_TENSOR_8_PADDING_VALUE 0xE865C8 + +#define mmTPC2_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG 0xE865CC + +#define mmTPC2_CFG_KERNEL_TENSOR_8_DIM_0_SIZE 0xE865D0 + +#define mmTPC2_CFG_KERNEL_TENSOR_8_DIM_0_STRIDE 0xE865D4 + +#define mmTPC2_CFG_KERNEL_TENSOR_8_DIM_1_SIZE 0xE865D8 + +#define mmTPC2_CFG_KERNEL_TENSOR_8_DIM_1_STRIDE 0xE865DC + +#define mmTPC2_CFG_KERNEL_TENSOR_8_DIM_2_SIZE 0xE865E0 + +#define mmTPC2_CFG_KERNEL_TENSOR_8_DIM_2_STRIDE 0xE865E4 + +#define mmTPC2_CFG_KERNEL_TENSOR_8_DIM_3_SIZE 0xE865E8 + +#define mmTPC2_CFG_KERNEL_TENSOR_8_DIM_3_STRIDE 0xE865EC + +#define mmTPC2_CFG_KERNEL_TENSOR_8_DIM_4_SIZE 0xE865F0 + +#define mmTPC2_CFG_KERNEL_TENSOR_8_DIM_4_STRIDE 0xE865F4 + +#define mmTPC2_CFG_KERNEL_TENSOR_9_BASE_ADDR_LOW 0xE865F8 + +#define mmTPC2_CFG_KERNEL_TENSOR_9_BASE_ADDR_HIGH 0xE865FC + +#define mmTPC2_CFG_KERNEL_TENSOR_9_PADDING_VALUE 0xE86600 + +#define mmTPC2_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG 0xE86604 + +#define mmTPC2_CFG_KERNEL_TENSOR_9_DIM_0_SIZE 0xE86608 + +#define mmTPC2_CFG_KERNEL_TENSOR_9_DIM_0_STRIDE 0xE8660C + +#define mmTPC2_CFG_KERNEL_TENSOR_9_DIM_1_SIZE 0xE86610 + +#define mmTPC2_CFG_KERNEL_TENSOR_9_DIM_1_STRIDE 0xE86614 + +#define mmTPC2_CFG_KERNEL_TENSOR_9_DIM_2_SIZE 0xE86618 + +#define mmTPC2_CFG_KERNEL_TENSOR_9_DIM_2_STRIDE 0xE8661C + +#define mmTPC2_CFG_KERNEL_TENSOR_9_DIM_3_SIZE 0xE86620 + +#define mmTPC2_CFG_KERNEL_TENSOR_9_DIM_3_STRIDE 0xE86624 + +#define mmTPC2_CFG_KERNEL_TENSOR_9_DIM_4_SIZE 0xE86628 + +#define mmTPC2_CFG_KERNEL_TENSOR_9_DIM_4_STRIDE 0xE8662C + +#define mmTPC2_CFG_KERNEL_TENSOR_10_BASE_ADDR_LOW 0xE86630 + +#define mmTPC2_CFG_KERNEL_TENSOR_10_BASE_ADDR_HIGH 0xE86634 + +#define mmTPC2_CFG_KERNEL_TENSOR_10_PADDING_VALUE 0xE86638 + +#define mmTPC2_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG 0xE8663C + +#define mmTPC2_CFG_KERNEL_TENSOR_10_DIM_0_SIZE 0xE86640 + +#define mmTPC2_CFG_KERNEL_TENSOR_10_DIM_0_STRIDE 0xE86644 + +#define mmTPC2_CFG_KERNEL_TENSOR_10_DIM_1_SIZE 0xE86648 + +#define mmTPC2_CFG_KERNEL_TENSOR_10_DIM_1_STRIDE 0xE8664C + +#define mmTPC2_CFG_KERNEL_TENSOR_10_DIM_2_SIZE 0xE86650 + +#define mmTPC2_CFG_KERNEL_TENSOR_10_DIM_2_STRIDE 0xE86654 + +#define mmTPC2_CFG_KERNEL_TENSOR_10_DIM_3_SIZE 0xE86658 + +#define mmTPC2_CFG_KERNEL_TENSOR_10_DIM_3_STRIDE 0xE8665C + +#define mmTPC2_CFG_KERNEL_TENSOR_10_DIM_4_SIZE 0xE86660 + +#define mmTPC2_CFG_KERNEL_TENSOR_10_DIM_4_STRIDE 0xE86664 + +#define mmTPC2_CFG_KERNEL_TENSOR_11_BASE_ADDR_LOW 0xE86668 + +#define mmTPC2_CFG_KERNEL_TENSOR_11_BASE_ADDR_HIGH 0xE8666C + +#define mmTPC2_CFG_KERNEL_TENSOR_11_PADDING_VALUE 0xE86670 + +#define mmTPC2_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG 0xE86674 + +#define mmTPC2_CFG_KERNEL_TENSOR_11_DIM_0_SIZE 0xE86678 + +#define mmTPC2_CFG_KERNEL_TENSOR_11_DIM_0_STRIDE 0xE8667C + +#define mmTPC2_CFG_KERNEL_TENSOR_11_DIM_1_SIZE 0xE86680 + +#define mmTPC2_CFG_KERNEL_TENSOR_11_DIM_1_STRIDE 0xE86684 + +#define mmTPC2_CFG_KERNEL_TENSOR_11_DIM_2_SIZE 0xE86688 + +#define mmTPC2_CFG_KERNEL_TENSOR_11_DIM_2_STRIDE 0xE8668C + +#define mmTPC2_CFG_KERNEL_TENSOR_11_DIM_3_SIZE 0xE86690 + +#define mmTPC2_CFG_KERNEL_TENSOR_11_DIM_3_STRIDE 0xE86694 + +#define mmTPC2_CFG_KERNEL_TENSOR_11_DIM_4_SIZE 0xE86698 + +#define mmTPC2_CFG_KERNEL_TENSOR_11_DIM_4_STRIDE 0xE8669C + +#define mmTPC2_CFG_KERNEL_TENSOR_12_BASE_ADDR_LOW 0xE866A0 + +#define mmTPC2_CFG_KERNEL_TENSOR_12_BASE_ADDR_HIGH 0xE866A4 + +#define mmTPC2_CFG_KERNEL_TENSOR_12_PADDING_VALUE 0xE866A8 + +#define mmTPC2_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG 0xE866AC + +#define mmTPC2_CFG_KERNEL_TENSOR_12_DIM_0_SIZE 0xE866B0 + +#define mmTPC2_CFG_KERNEL_TENSOR_12_DIM_0_STRIDE 0xE866B4 + +#define mmTPC2_CFG_KERNEL_TENSOR_12_DIM_1_SIZE 0xE866B8 + +#define mmTPC2_CFG_KERNEL_TENSOR_12_DIM_1_STRIDE 0xE866BC + +#define mmTPC2_CFG_KERNEL_TENSOR_12_DIM_2_SIZE 0xE866C0 + +#define mmTPC2_CFG_KERNEL_TENSOR_12_DIM_2_STRIDE 0xE866C4 + +#define mmTPC2_CFG_KERNEL_TENSOR_12_DIM_3_SIZE 0xE866C8 + +#define mmTPC2_CFG_KERNEL_TENSOR_12_DIM_3_STRIDE 0xE866CC + +#define mmTPC2_CFG_KERNEL_TENSOR_12_DIM_4_SIZE 0xE866D0 + +#define mmTPC2_CFG_KERNEL_TENSOR_12_DIM_4_STRIDE 0xE866D4 + +#define mmTPC2_CFG_KERNEL_TENSOR_13_BASE_ADDR_LOW 0xE866D8 + +#define mmTPC2_CFG_KERNEL_TENSOR_13_BASE_ADDR_HIGH 0xE866DC + +#define mmTPC2_CFG_KERNEL_TENSOR_13_PADDING_VALUE 0xE866E0 + +#define mmTPC2_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG 0xE866E4 + +#define mmTPC2_CFG_KERNEL_TENSOR_13_DIM_0_SIZE 0xE866E8 + +#define mmTPC2_CFG_KERNEL_TENSOR_13_DIM_0_STRIDE 0xE866EC + +#define mmTPC2_CFG_KERNEL_TENSOR_13_DIM_1_SIZE 0xE866F0 + +#define mmTPC2_CFG_KERNEL_TENSOR_13_DIM_1_STRIDE 0xE866F4 + +#define mmTPC2_CFG_KERNEL_TENSOR_13_DIM_2_SIZE 0xE866F8 + +#define mmTPC2_CFG_KERNEL_TENSOR_13_DIM_2_STRIDE 0xE866FC + +#define mmTPC2_CFG_KERNEL_TENSOR_13_DIM_3_SIZE 0xE86700 + +#define mmTPC2_CFG_KERNEL_TENSOR_13_DIM_3_STRIDE 0xE86704 + +#define mmTPC2_CFG_KERNEL_TENSOR_13_DIM_4_SIZE 0xE86708 + +#define mmTPC2_CFG_KERNEL_TENSOR_13_DIM_4_STRIDE 0xE8670C + +#define mmTPC2_CFG_KERNEL_TENSOR_14_BASE_ADDR_LOW 0xE86710 + +#define mmTPC2_CFG_KERNEL_TENSOR_14_BASE_ADDR_HIGH 0xE86714 + +#define mmTPC2_CFG_KERNEL_TENSOR_14_PADDING_VALUE 0xE86718 + +#define mmTPC2_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG 0xE8671C + +#define mmTPC2_CFG_KERNEL_TENSOR_14_DIM_0_SIZE 0xE86720 + +#define mmTPC2_CFG_KERNEL_TENSOR_14_DIM_0_STRIDE 0xE86724 + +#define mmTPC2_CFG_KERNEL_TENSOR_14_DIM_1_SIZE 0xE86728 + +#define mmTPC2_CFG_KERNEL_TENSOR_14_DIM_1_STRIDE 0xE8672C + +#define mmTPC2_CFG_KERNEL_TENSOR_14_DIM_2_SIZE 0xE86730 + +#define mmTPC2_CFG_KERNEL_TENSOR_14_DIM_2_STRIDE 0xE86734 + +#define mmTPC2_CFG_KERNEL_TENSOR_14_DIM_3_SIZE 0xE86738 + +#define mmTPC2_CFG_KERNEL_TENSOR_14_DIM_3_STRIDE 0xE8673C + +#define mmTPC2_CFG_KERNEL_TENSOR_14_DIM_4_SIZE 0xE86740 + +#define mmTPC2_CFG_KERNEL_TENSOR_14_DIM_4_STRIDE 0xE86744 + +#define mmTPC2_CFG_KERNEL_TENSOR_15_BASE_ADDR_LOW 0xE86748 + +#define mmTPC2_CFG_KERNEL_TENSOR_15_BASE_ADDR_HIGH 0xE8674C + +#define mmTPC2_CFG_KERNEL_TENSOR_15_PADDING_VALUE 0xE86750 + +#define mmTPC2_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG 0xE86754 + +#define mmTPC2_CFG_KERNEL_TENSOR_15_DIM_0_SIZE 0xE86758 + +#define mmTPC2_CFG_KERNEL_TENSOR_15_DIM_0_STRIDE 0xE8675C + +#define mmTPC2_CFG_KERNEL_TENSOR_15_DIM_1_SIZE 0xE86760 + +#define mmTPC2_CFG_KERNEL_TENSOR_15_DIM_1_STRIDE 0xE86764 + +#define mmTPC2_CFG_KERNEL_TENSOR_15_DIM_2_SIZE 0xE86768 + +#define mmTPC2_CFG_KERNEL_TENSOR_15_DIM_2_STRIDE 0xE8676C + +#define mmTPC2_CFG_KERNEL_TENSOR_15_DIM_3_SIZE 0xE86770 + +#define mmTPC2_CFG_KERNEL_TENSOR_15_DIM_3_STRIDE 0xE86774 + +#define mmTPC2_CFG_KERNEL_TENSOR_15_DIM_4_SIZE 0xE86778 + +#define mmTPC2_CFG_KERNEL_TENSOR_15_DIM_4_STRIDE 0xE8677C + +#define mmTPC2_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xE86780 + +#define mmTPC2_CFG_KERNEL_SYNC_OBJECT_ADDR 0xE86784 + +#define mmTPC2_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xE86788 + +#define mmTPC2_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xE8678C + +#define mmTPC2_CFG_KERNEL_TID_BASE_DIM_0 0xE86790 + +#define mmTPC2_CFG_KERNEL_TID_SIZE_DIM_0 0xE86794 + +#define mmTPC2_CFG_KERNEL_TID_BASE_DIM_1 0xE86798 + +#define mmTPC2_CFG_KERNEL_TID_SIZE_DIM_1 0xE8679C + +#define mmTPC2_CFG_KERNEL_TID_BASE_DIM_2 0xE867A0 + +#define mmTPC2_CFG_KERNEL_TID_SIZE_DIM_2 0xE867A4 + +#define mmTPC2_CFG_KERNEL_TID_BASE_DIM_3 0xE867A8 + +#define mmTPC2_CFG_KERNEL_TID_SIZE_DIM_3 0xE867AC + +#define mmTPC2_CFG_KERNEL_TID_BASE_DIM_4 0xE867B0 + +#define mmTPC2_CFG_KERNEL_TID_SIZE_DIM_4 0xE867B4 + +#define mmTPC2_CFG_KERNEL_KERNEL_CONFIG 0xE867B8 + +#define mmTPC2_CFG_KERNEL_KERNEL_ID 0xE867BC + +#define mmTPC2_CFG_KERNEL_SRF_0 0xE867C0 + +#define mmTPC2_CFG_KERNEL_SRF_1 0xE867C4 + +#define mmTPC2_CFG_KERNEL_SRF_2 0xE867C8 + +#define mmTPC2_CFG_KERNEL_SRF_3 0xE867CC + +#define mmTPC2_CFG_KERNEL_SRF_4 0xE867D0 + +#define mmTPC2_CFG_KERNEL_SRF_5 0xE867D4 + +#define mmTPC2_CFG_KERNEL_SRF_6 0xE867D8 + +#define mmTPC2_CFG_KERNEL_SRF_7 0xE867DC + +#define mmTPC2_CFG_KERNEL_SRF_8 0xE867E0 + +#define mmTPC2_CFG_KERNEL_SRF_9 0xE867E4 + +#define mmTPC2_CFG_KERNEL_SRF_10 0xE867E8 + +#define mmTPC2_CFG_KERNEL_SRF_11 0xE867EC + +#define mmTPC2_CFG_KERNEL_SRF_12 0xE867F0 + +#define mmTPC2_CFG_KERNEL_SRF_13 0xE867F4 + +#define mmTPC2_CFG_KERNEL_SRF_14 0xE867F8 + +#define mmTPC2_CFG_KERNEL_SRF_15 0xE867FC + +#define mmTPC2_CFG_KERNEL_SRF_16 0xE86800 + +#define mmTPC2_CFG_KERNEL_SRF_17 0xE86804 + +#define mmTPC2_CFG_KERNEL_SRF_18 0xE86808 + +#define mmTPC2_CFG_KERNEL_SRF_19 0xE8680C + +#define mmTPC2_CFG_KERNEL_SRF_20 0xE86810 + +#define mmTPC2_CFG_KERNEL_SRF_21 0xE86814 + +#define mmTPC2_CFG_KERNEL_SRF_22 0xE86818 + +#define mmTPC2_CFG_KERNEL_SRF_23 0xE8681C + +#define mmTPC2_CFG_KERNEL_SRF_24 0xE86820 + +#define mmTPC2_CFG_KERNEL_SRF_25 0xE86824 + +#define mmTPC2_CFG_KERNEL_SRF_26 0xE86828 + +#define mmTPC2_CFG_KERNEL_SRF_27 0xE8682C + +#define mmTPC2_CFG_KERNEL_SRF_28 0xE86830 + +#define mmTPC2_CFG_KERNEL_SRF_29 0xE86834 + +#define mmTPC2_CFG_KERNEL_SRF_30 0xE86838 + +#define mmTPC2_CFG_KERNEL_SRF_31 0xE8683C + +#define mmTPC2_CFG_ROUND_CSR 0xE868FC + +#define mmTPC2_CFG_PROT 0xE86900 + +#define mmTPC2_CFG_SEMAPHORE 0xE86908 + +#define mmTPC2_CFG_VFLAGS 0xE8690C + +#define mmTPC2_CFG_SFLAGS 0xE86910 + +#define mmTPC2_CFG_LFSR_POLYNOM 0xE86918 + +#define mmTPC2_CFG_STATUS 0xE8691C + +#define mmTPC2_CFG_CFG_BASE_ADDRESS_HIGH 0xE86920 + +#define mmTPC2_CFG_CFG_SUBTRACT_VALUE 0xE86924 + +#define mmTPC2_CFG_SM_BASE_ADDRESS_HIGH 0xE8692C + +#define mmTPC2_CFG_TPC_CMD 0xE86930 + +#define mmTPC2_CFG_TPC_EXECUTE 0xE86938 + +#define mmTPC2_CFG_TPC_STALL 0xE8693C + +#define mmTPC2_CFG_ICACHE_BASE_ADDERESS_LOW 0xE86940 + +#define mmTPC2_CFG_ICACHE_BASE_ADDERESS_HIGH 0xE86944 + +#define mmTPC2_CFG_RD_RATE_LIMIT 0xE86948 + +#define mmTPC2_CFG_WR_RATE_LIMIT 0xE86950 + +#define mmTPC2_CFG_MSS_CONFIG 0xE86954 + +#define mmTPC2_CFG_TPC_INTR_CAUSE 0xE86958 + +#define mmTPC2_CFG_TPC_INTR_MASK 0xE8695C + +#define mmTPC2_CFG_WQ_CREDITS 0xE86960 + +#define mmTPC2_CFG_ARUSER_LO 0xE86964 + +#define mmTPC2_CFG_ARUSER_HI 0xE86968 + +#define mmTPC2_CFG_AWUSER_LO 0xE8696C + +#define mmTPC2_CFG_AWUSER_HI 0xE86970 + +#define mmTPC2_CFG_OPCODE_EXEC 0xE86974 + +#define mmTPC2_CFG_LUT_FUNC32_BASE_ADDR_LO 0xE86978 + +#define mmTPC2_CFG_LUT_FUNC32_BASE_ADDR_HI 0xE8697C + +#define mmTPC2_CFG_LUT_FUNC64_BASE_ADDR_LO 0xE86980 + +#define mmTPC2_CFG_LUT_FUNC64_BASE_ADDR_HI 0xE86984 + +#define mmTPC2_CFG_LUT_FUNC128_BASE_ADDR_LO 0xE86988 + +#define mmTPC2_CFG_LUT_FUNC128_BASE_ADDR_HI 0xE8698C + +#define mmTPC2_CFG_LUT_FUNC256_BASE_ADDR_LO 0xE86990 + +#define mmTPC2_CFG_LUT_FUNC256_BASE_ADDR_HI 0xE86994 + +#define mmTPC2_CFG_TSB_CFG_MAX_SIZE 0xE86998 + +#define mmTPC2_CFG_TSB_CFG 0xE8699C + +#define mmTPC2_CFG_DBGMEM_ADD 0xE869A0 + +#define mmTPC2_CFG_DBGMEM_DATA_WR 0xE869A4 + +#define mmTPC2_CFG_DBGMEM_DATA_RD 0xE869A8 + +#define mmTPC2_CFG_DBGMEM_CTRL 0xE869AC + +#define mmTPC2_CFG_DBGMEM_RC 0xE869B0 + +#define mmTPC2_CFG_TSB_INFLIGHT_CNTR 0xE869B4 + +#define mmTPC2_CFG_WQ_INFLIGHT_CNTR 0xE869B8 + +#define mmTPC2_CFG_WQ_LBW_TOTAL_CNTR 0xE869BC + +#define mmTPC2_CFG_WQ_HBW_TOTAL_CNTR 0xE869C0 + +#define mmTPC2_CFG_IRQ_OCCOUPY_CNTR 0xE869C4 + +#define mmTPC2_CFG_FUNC_MBIST_CNTRL 0xE869D0 + +#define mmTPC2_CFG_FUNC_MBIST_PAT 0xE869D4 + +#define mmTPC2_CFG_FUNC_MBIST_MEM_0 0xE869D8 + +#define mmTPC2_CFG_FUNC_MBIST_MEM_1 0xE869DC + +#define mmTPC2_CFG_FUNC_MBIST_MEM_2 0xE869E0 + +#define mmTPC2_CFG_FUNC_MBIST_MEM_3 0xE869E4 + +#define mmTPC2_CFG_FUNC_MBIST_MEM_4 0xE869E8 + +#define mmTPC2_CFG_FUNC_MBIST_MEM_5 0xE869EC + +#define mmTPC2_CFG_FUNC_MBIST_MEM_6 0xE869F0 + +#define mmTPC2_CFG_FUNC_MBIST_MEM_7 0xE869F4 + +#define mmTPC2_CFG_FUNC_MBIST_MEM_8 0xE869F8 + +#define mmTPC2_CFG_FUNC_MBIST_MEM_9 0xE869FC + +#define mmTPC2_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xE86A00 + +#define mmTPC2_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xE86A04 + +#define mmTPC2_CFG_QM_TENSOR_0_PADDING_VALUE 0xE86A08 + +#define mmTPC2_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xE86A0C + +#define mmTPC2_CFG_QM_TENSOR_0_DIM_0_SIZE 0xE86A10 + +#define mmTPC2_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xE86A14 + +#define mmTPC2_CFG_QM_TENSOR_0_DIM_1_SIZE 0xE86A18 + +#define mmTPC2_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xE86A1C + +#define mmTPC2_CFG_QM_TENSOR_0_DIM_2_SIZE 0xE86A20 + +#define mmTPC2_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xE86A24 + +#define mmTPC2_CFG_QM_TENSOR_0_DIM_3_SIZE 0xE86A28 + +#define mmTPC2_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xE86A2C + +#define mmTPC2_CFG_QM_TENSOR_0_DIM_4_SIZE 0xE86A30 + +#define mmTPC2_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xE86A34 + +#define mmTPC2_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xE86A38 + +#define mmTPC2_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xE86A3C + +#define mmTPC2_CFG_QM_TENSOR_1_PADDING_VALUE 0xE86A40 + +#define mmTPC2_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xE86A44 + +#define mmTPC2_CFG_QM_TENSOR_1_DIM_0_SIZE 0xE86A48 + +#define mmTPC2_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xE86A4C + +#define mmTPC2_CFG_QM_TENSOR_1_DIM_1_SIZE 0xE86A50 + +#define mmTPC2_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xE86A54 + +#define mmTPC2_CFG_QM_TENSOR_1_DIM_2_SIZE 0xE86A58 + +#define mmTPC2_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xE86A5C + +#define mmTPC2_CFG_QM_TENSOR_1_DIM_3_SIZE 0xE86A60 + +#define mmTPC2_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xE86A64 + +#define mmTPC2_CFG_QM_TENSOR_1_DIM_4_SIZE 0xE86A68 + +#define mmTPC2_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xE86A6C + +#define mmTPC2_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xE86A70 + +#define mmTPC2_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xE86A74 + +#define mmTPC2_CFG_QM_TENSOR_2_PADDING_VALUE 0xE86A78 + +#define mmTPC2_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xE86A7C + +#define mmTPC2_CFG_QM_TENSOR_2_DIM_0_SIZE 0xE86A80 + +#define mmTPC2_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xE86A84 + +#define mmTPC2_CFG_QM_TENSOR_2_DIM_1_SIZE 0xE86A88 + +#define mmTPC2_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xE86A8C + +#define mmTPC2_CFG_QM_TENSOR_2_DIM_2_SIZE 0xE86A90 + +#define mmTPC2_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xE86A94 + +#define mmTPC2_CFG_QM_TENSOR_2_DIM_3_SIZE 0xE86A98 + +#define mmTPC2_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xE86A9C + +#define mmTPC2_CFG_QM_TENSOR_2_DIM_4_SIZE 0xE86AA0 + +#define mmTPC2_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xE86AA4 + +#define mmTPC2_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xE86AA8 + +#define mmTPC2_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xE86AAC + +#define mmTPC2_CFG_QM_TENSOR_3_PADDING_VALUE 0xE86AB0 + +#define mmTPC2_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xE86AB4 + +#define mmTPC2_CFG_QM_TENSOR_3_DIM_0_SIZE 0xE86AB8 + +#define mmTPC2_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xE86ABC + +#define mmTPC2_CFG_QM_TENSOR_3_DIM_1_SIZE 0xE86AC0 + +#define mmTPC2_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xE86AC4 + +#define mmTPC2_CFG_QM_TENSOR_3_DIM_2_SIZE 0xE86AC8 + +#define mmTPC2_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xE86ACC + +#define mmTPC2_CFG_QM_TENSOR_3_DIM_3_SIZE 0xE86AD0 + +#define mmTPC2_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xE86AD4 + +#define mmTPC2_CFG_QM_TENSOR_3_DIM_4_SIZE 0xE86AD8 + +#define mmTPC2_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xE86ADC + +#define mmTPC2_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xE86AE0 + +#define mmTPC2_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xE86AE4 + +#define mmTPC2_CFG_QM_TENSOR_4_PADDING_VALUE 0xE86AE8 + +#define mmTPC2_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xE86AEC + +#define mmTPC2_CFG_QM_TENSOR_4_DIM_0_SIZE 0xE86AF0 + +#define mmTPC2_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xE86AF4 + +#define mmTPC2_CFG_QM_TENSOR_4_DIM_1_SIZE 0xE86AF8 + +#define mmTPC2_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xE86AFC + +#define mmTPC2_CFG_QM_TENSOR_4_DIM_2_SIZE 0xE86B00 + +#define mmTPC2_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xE86B04 + +#define mmTPC2_CFG_QM_TENSOR_4_DIM_3_SIZE 0xE86B08 + +#define mmTPC2_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xE86B0C + +#define mmTPC2_CFG_QM_TENSOR_4_DIM_4_SIZE 0xE86B10 + +#define mmTPC2_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xE86B14 + +#define mmTPC2_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xE86B18 + +#define mmTPC2_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xE86B1C + +#define mmTPC2_CFG_QM_TENSOR_5_PADDING_VALUE 0xE86B20 + +#define mmTPC2_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xE86B24 + +#define mmTPC2_CFG_QM_TENSOR_5_DIM_0_SIZE 0xE86B28 + +#define mmTPC2_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xE86B2C + +#define mmTPC2_CFG_QM_TENSOR_5_DIM_1_SIZE 0xE86B30 + +#define mmTPC2_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xE86B34 + +#define mmTPC2_CFG_QM_TENSOR_5_DIM_2_SIZE 0xE86B38 + +#define mmTPC2_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xE86B3C + +#define mmTPC2_CFG_QM_TENSOR_5_DIM_3_SIZE 0xE86B40 + +#define mmTPC2_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xE86B44 + +#define mmTPC2_CFG_QM_TENSOR_5_DIM_4_SIZE 0xE86B48 + +#define mmTPC2_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xE86B4C + +#define mmTPC2_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xE86B50 + +#define mmTPC2_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xE86B54 + +#define mmTPC2_CFG_QM_TENSOR_6_PADDING_VALUE 0xE86B58 + +#define mmTPC2_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xE86B5C + +#define mmTPC2_CFG_QM_TENSOR_6_DIM_0_SIZE 0xE86B60 + +#define mmTPC2_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xE86B64 + +#define mmTPC2_CFG_QM_TENSOR_6_DIM_1_SIZE 0xE86B68 + +#define mmTPC2_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xE86B6C + +#define mmTPC2_CFG_QM_TENSOR_6_DIM_2_SIZE 0xE86B70 + +#define mmTPC2_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xE86B74 + +#define mmTPC2_CFG_QM_TENSOR_6_DIM_3_SIZE 0xE86B78 + +#define mmTPC2_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xE86B7C + +#define mmTPC2_CFG_QM_TENSOR_6_DIM_4_SIZE 0xE86B80 + +#define mmTPC2_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xE86B84 + +#define mmTPC2_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xE86B88 + +#define mmTPC2_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xE86B8C + +#define mmTPC2_CFG_QM_TENSOR_7_PADDING_VALUE 0xE86B90 + +#define mmTPC2_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xE86B94 + +#define mmTPC2_CFG_QM_TENSOR_7_DIM_0_SIZE 0xE86B98 + +#define mmTPC2_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xE86B9C + +#define mmTPC2_CFG_QM_TENSOR_7_DIM_1_SIZE 0xE86BA0 + +#define mmTPC2_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xE86BA4 + +#define mmTPC2_CFG_QM_TENSOR_7_DIM_2_SIZE 0xE86BA8 + +#define mmTPC2_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xE86BAC + +#define mmTPC2_CFG_QM_TENSOR_7_DIM_3_SIZE 0xE86BB0 + +#define mmTPC2_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xE86BB4 + +#define mmTPC2_CFG_QM_TENSOR_7_DIM_4_SIZE 0xE86BB8 + +#define mmTPC2_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xE86BBC + +#define mmTPC2_CFG_QM_TENSOR_8_BASE_ADDR_LOW 0xE86BC0 + +#define mmTPC2_CFG_QM_TENSOR_8_BASE_ADDR_HIGH 0xE86BC4 + +#define mmTPC2_CFG_QM_TENSOR_8_PADDING_VALUE 0xE86BC8 + +#define mmTPC2_CFG_QM_TENSOR_8_TENSOR_CONFIG 0xE86BCC + +#define mmTPC2_CFG_QM_TENSOR_8_DIM_0_SIZE 0xE86BD0 + +#define mmTPC2_CFG_QM_TENSOR_8_DIM_0_STRIDE 0xE86BD4 + +#define mmTPC2_CFG_QM_TENSOR_8_DIM_1_SIZE 0xE86BD8 + +#define mmTPC2_CFG_QM_TENSOR_8_DIM_1_STRIDE 0xE86BDC + +#define mmTPC2_CFG_QM_TENSOR_8_DIM_2_SIZE 0xE86BE0 + +#define mmTPC2_CFG_QM_TENSOR_8_DIM_2_STRIDE 0xE86BE4 + +#define mmTPC2_CFG_QM_TENSOR_8_DIM_3_SIZE 0xE86BE8 + +#define mmTPC2_CFG_QM_TENSOR_8_DIM_3_STRIDE 0xE86BEC + +#define mmTPC2_CFG_QM_TENSOR_8_DIM_4_SIZE 0xE86BF0 + +#define mmTPC2_CFG_QM_TENSOR_8_DIM_4_STRIDE 0xE86BF4 + +#define mmTPC2_CFG_QM_TENSOR_9_BASE_ADDR_LOW 0xE86BF8 + +#define mmTPC2_CFG_QM_TENSOR_9_BASE_ADDR_HIGH 0xE86BFC + +#define mmTPC2_CFG_QM_TENSOR_9_PADDING_VALUE 0xE86C00 + +#define mmTPC2_CFG_QM_TENSOR_9_TENSOR_CONFIG 0xE86C04 + +#define mmTPC2_CFG_QM_TENSOR_9_DIM_0_SIZE 0xE86C08 + +#define mmTPC2_CFG_QM_TENSOR_9_DIM_0_STRIDE 0xE86C0C + +#define mmTPC2_CFG_QM_TENSOR_9_DIM_1_SIZE 0xE86C10 + +#define mmTPC2_CFG_QM_TENSOR_9_DIM_1_STRIDE 0xE86C14 + +#define mmTPC2_CFG_QM_TENSOR_9_DIM_2_SIZE 0xE86C18 + +#define mmTPC2_CFG_QM_TENSOR_9_DIM_2_STRIDE 0xE86C1C + +#define mmTPC2_CFG_QM_TENSOR_9_DIM_3_SIZE 0xE86C20 + +#define mmTPC2_CFG_QM_TENSOR_9_DIM_3_STRIDE 0xE86C24 + +#define mmTPC2_CFG_QM_TENSOR_9_DIM_4_SIZE 0xE86C28 + +#define mmTPC2_CFG_QM_TENSOR_9_DIM_4_STRIDE 0xE86C2C + +#define mmTPC2_CFG_QM_TENSOR_10_BASE_ADDR_LOW 0xE86C30 + +#define mmTPC2_CFG_QM_TENSOR_10_BASE_ADDR_HIGH 0xE86C34 + +#define mmTPC2_CFG_QM_TENSOR_10_PADDING_VALUE 0xE86C38 + +#define mmTPC2_CFG_QM_TENSOR_10_TENSOR_CONFIG 0xE86C3C + +#define mmTPC2_CFG_QM_TENSOR_10_DIM_0_SIZE 0xE86C40 + +#define mmTPC2_CFG_QM_TENSOR_10_DIM_0_STRIDE 0xE86C44 + +#define mmTPC2_CFG_QM_TENSOR_10_DIM_1_SIZE 0xE86C48 + +#define mmTPC2_CFG_QM_TENSOR_10_DIM_1_STRIDE 0xE86C4C + +#define mmTPC2_CFG_QM_TENSOR_10_DIM_2_SIZE 0xE86C50 + +#define mmTPC2_CFG_QM_TENSOR_10_DIM_2_STRIDE 0xE86C54 + +#define mmTPC2_CFG_QM_TENSOR_10_DIM_3_SIZE 0xE86C58 + +#define mmTPC2_CFG_QM_TENSOR_10_DIM_3_STRIDE 0xE86C5C + +#define mmTPC2_CFG_QM_TENSOR_10_DIM_4_SIZE 0xE86C60 + +#define mmTPC2_CFG_QM_TENSOR_10_DIM_4_STRIDE 0xE86C64 + +#define mmTPC2_CFG_QM_TENSOR_11_BASE_ADDR_LOW 0xE86C68 + +#define mmTPC2_CFG_QM_TENSOR_11_BASE_ADDR_HIGH 0xE86C6C + +#define mmTPC2_CFG_QM_TENSOR_11_PADDING_VALUE 0xE86C70 + +#define mmTPC2_CFG_QM_TENSOR_11_TENSOR_CONFIG 0xE86C74 + +#define mmTPC2_CFG_QM_TENSOR_11_DIM_0_SIZE 0xE86C78 + +#define mmTPC2_CFG_QM_TENSOR_11_DIM_0_STRIDE 0xE86C7C + +#define mmTPC2_CFG_QM_TENSOR_11_DIM_1_SIZE 0xE86C80 + +#define mmTPC2_CFG_QM_TENSOR_11_DIM_1_STRIDE 0xE86C84 + +#define mmTPC2_CFG_QM_TENSOR_11_DIM_2_SIZE 0xE86C88 + +#define mmTPC2_CFG_QM_TENSOR_11_DIM_2_STRIDE 0xE86C8C + +#define mmTPC2_CFG_QM_TENSOR_11_DIM_3_SIZE 0xE86C90 + +#define mmTPC2_CFG_QM_TENSOR_11_DIM_3_STRIDE 0xE86C94 + +#define mmTPC2_CFG_QM_TENSOR_11_DIM_4_SIZE 0xE86C98 + +#define mmTPC2_CFG_QM_TENSOR_11_DIM_4_STRIDE 0xE86C9C + +#define mmTPC2_CFG_QM_TENSOR_12_BASE_ADDR_LOW 0xE86CA0 + +#define mmTPC2_CFG_QM_TENSOR_12_BASE_ADDR_HIGH 0xE86CA4 + +#define mmTPC2_CFG_QM_TENSOR_12_PADDING_VALUE 0xE86CA8 + +#define mmTPC2_CFG_QM_TENSOR_12_TENSOR_CONFIG 0xE86CAC + +#define mmTPC2_CFG_QM_TENSOR_12_DIM_0_SIZE 0xE86CB0 + +#define mmTPC2_CFG_QM_TENSOR_12_DIM_0_STRIDE 0xE86CB4 + +#define mmTPC2_CFG_QM_TENSOR_12_DIM_1_SIZE 0xE86CB8 + +#define mmTPC2_CFG_QM_TENSOR_12_DIM_1_STRIDE 0xE86CBC + +#define mmTPC2_CFG_QM_TENSOR_12_DIM_2_SIZE 0xE86CC0 + +#define mmTPC2_CFG_QM_TENSOR_12_DIM_2_STRIDE 0xE86CC4 + +#define mmTPC2_CFG_QM_TENSOR_12_DIM_3_SIZE 0xE86CC8 + +#define mmTPC2_CFG_QM_TENSOR_12_DIM_3_STRIDE 0xE86CCC + +#define mmTPC2_CFG_QM_TENSOR_12_DIM_4_SIZE 0xE86CD0 + +#define mmTPC2_CFG_QM_TENSOR_12_DIM_4_STRIDE 0xE86CD4 + +#define mmTPC2_CFG_QM_TENSOR_13_BASE_ADDR_LOW 0xE86CD8 + +#define mmTPC2_CFG_QM_TENSOR_13_BASE_ADDR_HIGH 0xE86CDC + +#define mmTPC2_CFG_QM_TENSOR_13_PADDING_VALUE 0xE86CE0 + +#define mmTPC2_CFG_QM_TENSOR_13_TENSOR_CONFIG 0xE86CE4 + +#define mmTPC2_CFG_QM_TENSOR_13_DIM_0_SIZE 0xE86CE8 + +#define mmTPC2_CFG_QM_TENSOR_13_DIM_0_STRIDE 0xE86CEC + +#define mmTPC2_CFG_QM_TENSOR_13_DIM_1_SIZE 0xE86CF0 + +#define mmTPC2_CFG_QM_TENSOR_13_DIM_1_STRIDE 0xE86CF4 + +#define mmTPC2_CFG_QM_TENSOR_13_DIM_2_SIZE 0xE86CF8 + +#define mmTPC2_CFG_QM_TENSOR_13_DIM_2_STRIDE 0xE86CFC + +#define mmTPC2_CFG_QM_TENSOR_13_DIM_3_SIZE 0xE86D00 + +#define mmTPC2_CFG_QM_TENSOR_13_DIM_3_STRIDE 0xE86D04 + +#define mmTPC2_CFG_QM_TENSOR_13_DIM_4_SIZE 0xE86D08 + +#define mmTPC2_CFG_QM_TENSOR_13_DIM_4_STRIDE 0xE86D0C + +#define mmTPC2_CFG_QM_TENSOR_14_BASE_ADDR_LOW 0xE86D10 + +#define mmTPC2_CFG_QM_TENSOR_14_BASE_ADDR_HIGH 0xE86D14 + +#define mmTPC2_CFG_QM_TENSOR_14_PADDING_VALUE 0xE86D18 + +#define mmTPC2_CFG_QM_TENSOR_14_TENSOR_CONFIG 0xE86D1C + +#define mmTPC2_CFG_QM_TENSOR_14_DIM_0_SIZE 0xE86D20 + +#define mmTPC2_CFG_QM_TENSOR_14_DIM_0_STRIDE 0xE86D24 + +#define mmTPC2_CFG_QM_TENSOR_14_DIM_1_SIZE 0xE86D28 + +#define mmTPC2_CFG_QM_TENSOR_14_DIM_1_STRIDE 0xE86D2C + +#define mmTPC2_CFG_QM_TENSOR_14_DIM_2_SIZE 0xE86D30 + +#define mmTPC2_CFG_QM_TENSOR_14_DIM_2_STRIDE 0xE86D34 + +#define mmTPC2_CFG_QM_TENSOR_14_DIM_3_SIZE 0xE86D38 + +#define mmTPC2_CFG_QM_TENSOR_14_DIM_3_STRIDE 0xE86D3C + +#define mmTPC2_CFG_QM_TENSOR_14_DIM_4_SIZE 0xE86D40 + +#define mmTPC2_CFG_QM_TENSOR_14_DIM_4_STRIDE 0xE86D44 + +#define mmTPC2_CFG_QM_TENSOR_15_BASE_ADDR_LOW 0xE86D48 + +#define mmTPC2_CFG_QM_TENSOR_15_BASE_ADDR_HIGH 0xE86D4C + +#define mmTPC2_CFG_QM_TENSOR_15_PADDING_VALUE 0xE86D50 + +#define mmTPC2_CFG_QM_TENSOR_15_TENSOR_CONFIG 0xE86D54 + +#define mmTPC2_CFG_QM_TENSOR_15_DIM_0_SIZE 0xE86D58 + +#define mmTPC2_CFG_QM_TENSOR_15_DIM_0_STRIDE 0xE86D5C + +#define mmTPC2_CFG_QM_TENSOR_15_DIM_1_SIZE 0xE86D60 + +#define mmTPC2_CFG_QM_TENSOR_15_DIM_1_STRIDE 0xE86D64 + +#define mmTPC2_CFG_QM_TENSOR_15_DIM_2_SIZE 0xE86D68 + +#define mmTPC2_CFG_QM_TENSOR_15_DIM_2_STRIDE 0xE86D6C + +#define mmTPC2_CFG_QM_TENSOR_15_DIM_3_SIZE 0xE86D70 + +#define mmTPC2_CFG_QM_TENSOR_15_DIM_3_STRIDE 0xE86D74 + +#define mmTPC2_CFG_QM_TENSOR_15_DIM_4_SIZE 0xE86D78 + +#define mmTPC2_CFG_QM_TENSOR_15_DIM_4_STRIDE 0xE86D7C + +#define mmTPC2_CFG_QM_SYNC_OBJECT_MESSAGE 0xE86D80 + +#define mmTPC2_CFG_QM_SYNC_OBJECT_ADDR 0xE86D84 + +#define mmTPC2_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xE86D88 + +#define mmTPC2_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xE86D8C + +#define mmTPC2_CFG_QM_TID_BASE_DIM_0 0xE86D90 + +#define mmTPC2_CFG_QM_TID_SIZE_DIM_0 0xE86D94 + +#define mmTPC2_CFG_QM_TID_BASE_DIM_1 0xE86D98 + +#define mmTPC2_CFG_QM_TID_SIZE_DIM_1 0xE86D9C + +#define mmTPC2_CFG_QM_TID_BASE_DIM_2 0xE86DA0 + +#define mmTPC2_CFG_QM_TID_SIZE_DIM_2 0xE86DA4 + +#define mmTPC2_CFG_QM_TID_BASE_DIM_3 0xE86DA8 + +#define mmTPC2_CFG_QM_TID_SIZE_DIM_3 0xE86DAC + +#define mmTPC2_CFG_QM_TID_BASE_DIM_4 0xE86DB0 + +#define mmTPC2_CFG_QM_TID_SIZE_DIM_4 0xE86DB4 + +#define mmTPC2_CFG_QM_KERNEL_CONFIG 0xE86DB8 + +#define mmTPC2_CFG_QM_KERNEL_ID 0xE86DBC + +#define mmTPC2_CFG_QM_SRF_0 0xE86DC0 + +#define mmTPC2_CFG_QM_SRF_1 0xE86DC4 + +#define mmTPC2_CFG_QM_SRF_2 0xE86DC8 + +#define mmTPC2_CFG_QM_SRF_3 0xE86DCC + +#define mmTPC2_CFG_QM_SRF_4 0xE86DD0 + +#define mmTPC2_CFG_QM_SRF_5 0xE86DD4 + +#define mmTPC2_CFG_QM_SRF_6 0xE86DD8 + +#define mmTPC2_CFG_QM_SRF_7 0xE86DDC + +#define mmTPC2_CFG_QM_SRF_8 0xE86DE0 + +#define mmTPC2_CFG_QM_SRF_9 0xE86DE4 + +#define mmTPC2_CFG_QM_SRF_10 0xE86DE8 + +#define mmTPC2_CFG_QM_SRF_11 0xE86DEC + +#define mmTPC2_CFG_QM_SRF_12 0xE86DF0 + +#define mmTPC2_CFG_QM_SRF_13 0xE86DF4 + +#define mmTPC2_CFG_QM_SRF_14 0xE86DF8 + +#define mmTPC2_CFG_QM_SRF_15 0xE86DFC + +#define mmTPC2_CFG_QM_SRF_16 0xE86E00 + +#define mmTPC2_CFG_QM_SRF_17 0xE86E04 + +#define mmTPC2_CFG_QM_SRF_18 0xE86E08 + +#define mmTPC2_CFG_QM_SRF_19 0xE86E0C + +#define mmTPC2_CFG_QM_SRF_20 0xE86E10 + +#define mmTPC2_CFG_QM_SRF_21 0xE86E14 + +#define mmTPC2_CFG_QM_SRF_22 0xE86E18 + +#define mmTPC2_CFG_QM_SRF_23 0xE86E1C + +#define mmTPC2_CFG_QM_SRF_24 0xE86E20 + +#define mmTPC2_CFG_QM_SRF_25 0xE86E24 + +#define mmTPC2_CFG_QM_SRF_26 0xE86E28 + +#define mmTPC2_CFG_QM_SRF_27 0xE86E2C + +#define mmTPC2_CFG_QM_SRF_28 0xE86E30 + +#define mmTPC2_CFG_QM_SRF_29 0xE86E34 + +#define mmTPC2_CFG_QM_SRF_30 0xE86E38 + +#define mmTPC2_CFG_QM_SRF_31 0xE86E3C + +#endif /* ASIC_REG_TPC2_CFG_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc2_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc2_qm_regs.h new file mode 100644 index 000000000..2919e2fa5 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc2_qm_regs.h @@ -0,0 +1,834 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC2_QM_REGS_H_ +#define ASIC_REG_TPC2_QM_REGS_H_ + +/* + ***************************************** + * TPC2_QM (Prototype: QMAN) + ***************************************** + */ + +#define mmTPC2_QM_GLBL_CFG0 0xE88000 + +#define mmTPC2_QM_GLBL_CFG1 0xE88004 + +#define mmTPC2_QM_GLBL_PROT 0xE88008 + +#define mmTPC2_QM_GLBL_ERR_CFG 0xE8800C + +#define mmTPC2_QM_GLBL_SECURE_PROPS_0 0xE88010 + +#define mmTPC2_QM_GLBL_SECURE_PROPS_1 0xE88014 + +#define mmTPC2_QM_GLBL_SECURE_PROPS_2 0xE88018 + +#define mmTPC2_QM_GLBL_SECURE_PROPS_3 0xE8801C + +#define mmTPC2_QM_GLBL_SECURE_PROPS_4 0xE88020 + +#define mmTPC2_QM_GLBL_NON_SECURE_PROPS_0 0xE88024 + +#define mmTPC2_QM_GLBL_NON_SECURE_PROPS_1 0xE88028 + +#define mmTPC2_QM_GLBL_NON_SECURE_PROPS_2 0xE8802C + +#define mmTPC2_QM_GLBL_NON_SECURE_PROPS_3 0xE88030 + +#define mmTPC2_QM_GLBL_NON_SECURE_PROPS_4 0xE88034 + +#define mmTPC2_QM_GLBL_STS0 0xE88038 + +#define mmTPC2_QM_GLBL_STS1_0 0xE88040 + +#define mmTPC2_QM_GLBL_STS1_1 0xE88044 + +#define mmTPC2_QM_GLBL_STS1_2 0xE88048 + +#define mmTPC2_QM_GLBL_STS1_3 0xE8804C + +#define mmTPC2_QM_GLBL_STS1_4 0xE88050 + +#define mmTPC2_QM_GLBL_MSG_EN_0 0xE88054 + +#define mmTPC2_QM_GLBL_MSG_EN_1 0xE88058 + +#define mmTPC2_QM_GLBL_MSG_EN_2 0xE8805C + +#define mmTPC2_QM_GLBL_MSG_EN_3 0xE88060 + +#define mmTPC2_QM_GLBL_MSG_EN_4 0xE88068 + +#define mmTPC2_QM_PQ_BASE_LO_0 0xE88070 + +#define mmTPC2_QM_PQ_BASE_LO_1 0xE88074 + +#define mmTPC2_QM_PQ_BASE_LO_2 0xE88078 + +#define mmTPC2_QM_PQ_BASE_LO_3 0xE8807C + +#define mmTPC2_QM_PQ_BASE_HI_0 0xE88080 + +#define mmTPC2_QM_PQ_BASE_HI_1 0xE88084 + +#define mmTPC2_QM_PQ_BASE_HI_2 0xE88088 + +#define mmTPC2_QM_PQ_BASE_HI_3 0xE8808C + +#define mmTPC2_QM_PQ_SIZE_0 0xE88090 + +#define mmTPC2_QM_PQ_SIZE_1 0xE88094 + +#define mmTPC2_QM_PQ_SIZE_2 0xE88098 + +#define mmTPC2_QM_PQ_SIZE_3 0xE8809C + +#define mmTPC2_QM_PQ_PI_0 0xE880A0 + +#define mmTPC2_QM_PQ_PI_1 0xE880A4 + +#define mmTPC2_QM_PQ_PI_2 0xE880A8 + +#define mmTPC2_QM_PQ_PI_3 0xE880AC + +#define mmTPC2_QM_PQ_CI_0 0xE880B0 + +#define mmTPC2_QM_PQ_CI_1 0xE880B4 + +#define mmTPC2_QM_PQ_CI_2 0xE880B8 + +#define mmTPC2_QM_PQ_CI_3 0xE880BC + +#define mmTPC2_QM_PQ_CFG0_0 0xE880C0 + +#define mmTPC2_QM_PQ_CFG0_1 0xE880C4 + +#define mmTPC2_QM_PQ_CFG0_2 0xE880C8 + +#define mmTPC2_QM_PQ_CFG0_3 0xE880CC + +#define mmTPC2_QM_PQ_CFG1_0 0xE880D0 + +#define mmTPC2_QM_PQ_CFG1_1 0xE880D4 + +#define mmTPC2_QM_PQ_CFG1_2 0xE880D8 + +#define mmTPC2_QM_PQ_CFG1_3 0xE880DC + +#define mmTPC2_QM_PQ_ARUSER_31_11_0 0xE880E0 + +#define mmTPC2_QM_PQ_ARUSER_31_11_1 0xE880E4 + +#define mmTPC2_QM_PQ_ARUSER_31_11_2 0xE880E8 + +#define mmTPC2_QM_PQ_ARUSER_31_11_3 0xE880EC + +#define mmTPC2_QM_PQ_STS0_0 0xE880F0 + +#define mmTPC2_QM_PQ_STS0_1 0xE880F4 + +#define mmTPC2_QM_PQ_STS0_2 0xE880F8 + +#define mmTPC2_QM_PQ_STS0_3 0xE880FC + +#define mmTPC2_QM_PQ_STS1_0 0xE88100 + +#define mmTPC2_QM_PQ_STS1_1 0xE88104 + +#define mmTPC2_QM_PQ_STS1_2 0xE88108 + +#define mmTPC2_QM_PQ_STS1_3 0xE8810C + +#define mmTPC2_QM_CQ_CFG0_0 0xE88110 + +#define mmTPC2_QM_CQ_CFG0_1 0xE88114 + +#define mmTPC2_QM_CQ_CFG0_2 0xE88118 + +#define mmTPC2_QM_CQ_CFG0_3 0xE8811C + +#define mmTPC2_QM_CQ_CFG0_4 0xE88120 + +#define mmTPC2_QM_CQ_CFG1_0 0xE88124 + +#define mmTPC2_QM_CQ_CFG1_1 0xE88128 + +#define mmTPC2_QM_CQ_CFG1_2 0xE8812C + +#define mmTPC2_QM_CQ_CFG1_3 0xE88130 + +#define mmTPC2_QM_CQ_CFG1_4 0xE88134 + +#define mmTPC2_QM_CQ_ARUSER_31_11_0 0xE88138 + +#define mmTPC2_QM_CQ_ARUSER_31_11_1 0xE8813C + +#define mmTPC2_QM_CQ_ARUSER_31_11_2 0xE88140 + +#define mmTPC2_QM_CQ_ARUSER_31_11_3 0xE88144 + +#define mmTPC2_QM_CQ_ARUSER_31_11_4 0xE88148 + +#define mmTPC2_QM_CQ_STS0_0 0xE8814C + +#define mmTPC2_QM_CQ_STS0_1 0xE88150 + +#define mmTPC2_QM_CQ_STS0_2 0xE88154 + +#define mmTPC2_QM_CQ_STS0_3 0xE88158 + +#define mmTPC2_QM_CQ_STS0_4 0xE8815C + +#define mmTPC2_QM_CQ_STS1_0 0xE88160 + +#define mmTPC2_QM_CQ_STS1_1 0xE88164 + +#define mmTPC2_QM_CQ_STS1_2 0xE88168 + +#define mmTPC2_QM_CQ_STS1_3 0xE8816C + +#define mmTPC2_QM_CQ_STS1_4 0xE88170 + +#define mmTPC2_QM_CQ_PTR_LO_0 0xE88174 + +#define mmTPC2_QM_CQ_PTR_HI_0 0xE88178 + +#define mmTPC2_QM_CQ_TSIZE_0 0xE8817C + +#define mmTPC2_QM_CQ_CTL_0 0xE88180 + +#define mmTPC2_QM_CQ_PTR_LO_1 0xE88184 + +#define mmTPC2_QM_CQ_PTR_HI_1 0xE88188 + +#define mmTPC2_QM_CQ_TSIZE_1 0xE8818C + +#define mmTPC2_QM_CQ_CTL_1 0xE88190 + +#define mmTPC2_QM_CQ_PTR_LO_2 0xE88194 + +#define mmTPC2_QM_CQ_PTR_HI_2 0xE88198 + +#define mmTPC2_QM_CQ_TSIZE_2 0xE8819C + +#define mmTPC2_QM_CQ_CTL_2 0xE881A0 + +#define mmTPC2_QM_CQ_PTR_LO_3 0xE881A4 + +#define mmTPC2_QM_CQ_PTR_HI_3 0xE881A8 + +#define mmTPC2_QM_CQ_TSIZE_3 0xE881AC + +#define mmTPC2_QM_CQ_CTL_3 0xE881B0 + +#define mmTPC2_QM_CQ_PTR_LO_4 0xE881B4 + +#define mmTPC2_QM_CQ_PTR_HI_4 0xE881B8 + +#define mmTPC2_QM_CQ_TSIZE_4 0xE881BC + +#define mmTPC2_QM_CQ_CTL_4 0xE881C0 + +#define mmTPC2_QM_CQ_PTR_LO_STS_0 0xE881C4 + +#define mmTPC2_QM_CQ_PTR_LO_STS_1 0xE881C8 + +#define mmTPC2_QM_CQ_PTR_LO_STS_2 0xE881CC + +#define mmTPC2_QM_CQ_PTR_LO_STS_3 0xE881D0 + +#define mmTPC2_QM_CQ_PTR_LO_STS_4 0xE881D4 + +#define mmTPC2_QM_CQ_PTR_HI_STS_0 0xE881D8 + +#define mmTPC2_QM_CQ_PTR_HI_STS_1 0xE881DC + +#define mmTPC2_QM_CQ_PTR_HI_STS_2 0xE881E0 + +#define mmTPC2_QM_CQ_PTR_HI_STS_3 0xE881E4 + +#define mmTPC2_QM_CQ_PTR_HI_STS_4 0xE881E8 + +#define mmTPC2_QM_CQ_TSIZE_STS_0 0xE881EC + +#define mmTPC2_QM_CQ_TSIZE_STS_1 0xE881F0 + +#define mmTPC2_QM_CQ_TSIZE_STS_2 0xE881F4 + +#define mmTPC2_QM_CQ_TSIZE_STS_3 0xE881F8 + +#define mmTPC2_QM_CQ_TSIZE_STS_4 0xE881FC + +#define mmTPC2_QM_CQ_CTL_STS_0 0xE88200 + +#define mmTPC2_QM_CQ_CTL_STS_1 0xE88204 + +#define mmTPC2_QM_CQ_CTL_STS_2 0xE88208 + +#define mmTPC2_QM_CQ_CTL_STS_3 0xE8820C + +#define mmTPC2_QM_CQ_CTL_STS_4 0xE88210 + +#define mmTPC2_QM_CQ_IFIFO_CNT_0 0xE88214 + +#define mmTPC2_QM_CQ_IFIFO_CNT_1 0xE88218 + +#define mmTPC2_QM_CQ_IFIFO_CNT_2 0xE8821C + +#define mmTPC2_QM_CQ_IFIFO_CNT_3 0xE88220 + +#define mmTPC2_QM_CQ_IFIFO_CNT_4 0xE88224 + +#define mmTPC2_QM_CP_MSG_BASE0_ADDR_LO_0 0xE88228 + +#define mmTPC2_QM_CP_MSG_BASE0_ADDR_LO_1 0xE8822C + +#define mmTPC2_QM_CP_MSG_BASE0_ADDR_LO_2 0xE88230 + +#define mmTPC2_QM_CP_MSG_BASE0_ADDR_LO_3 0xE88234 + +#define mmTPC2_QM_CP_MSG_BASE0_ADDR_LO_4 0xE88238 + +#define mmTPC2_QM_CP_MSG_BASE0_ADDR_HI_0 0xE8823C + +#define mmTPC2_QM_CP_MSG_BASE0_ADDR_HI_1 0xE88240 + +#define mmTPC2_QM_CP_MSG_BASE0_ADDR_HI_2 0xE88244 + +#define mmTPC2_QM_CP_MSG_BASE0_ADDR_HI_3 0xE88248 + +#define mmTPC2_QM_CP_MSG_BASE0_ADDR_HI_4 0xE8824C + +#define mmTPC2_QM_CP_MSG_BASE1_ADDR_LO_0 0xE88250 + +#define mmTPC2_QM_CP_MSG_BASE1_ADDR_LO_1 0xE88254 + +#define mmTPC2_QM_CP_MSG_BASE1_ADDR_LO_2 0xE88258 + +#define mmTPC2_QM_CP_MSG_BASE1_ADDR_LO_3 0xE8825C + +#define mmTPC2_QM_CP_MSG_BASE1_ADDR_LO_4 0xE88260 + +#define mmTPC2_QM_CP_MSG_BASE1_ADDR_HI_0 0xE88264 + +#define mmTPC2_QM_CP_MSG_BASE1_ADDR_HI_1 0xE88268 + +#define mmTPC2_QM_CP_MSG_BASE1_ADDR_HI_2 0xE8826C + +#define mmTPC2_QM_CP_MSG_BASE1_ADDR_HI_3 0xE88270 + +#define mmTPC2_QM_CP_MSG_BASE1_ADDR_HI_4 0xE88274 + +#define mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_0 0xE88278 + +#define mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_1 0xE8827C + +#define mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_2 0xE88280 + +#define mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_3 0xE88284 + +#define mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_4 0xE88288 + +#define mmTPC2_QM_CP_MSG_BASE2_ADDR_HI_0 0xE8828C + +#define mmTPC2_QM_CP_MSG_BASE2_ADDR_HI_1 0xE88290 + +#define mmTPC2_QM_CP_MSG_BASE2_ADDR_HI_2 0xE88294 + +#define mmTPC2_QM_CP_MSG_BASE2_ADDR_HI_3 0xE88298 + +#define mmTPC2_QM_CP_MSG_BASE2_ADDR_HI_4 0xE8829C + +#define mmTPC2_QM_CP_MSG_BASE3_ADDR_LO_0 0xE882A0 + +#define mmTPC2_QM_CP_MSG_BASE3_ADDR_LO_1 0xE882A4 + +#define mmTPC2_QM_CP_MSG_BASE3_ADDR_LO_2 0xE882A8 + +#define mmTPC2_QM_CP_MSG_BASE3_ADDR_LO_3 0xE882AC + +#define mmTPC2_QM_CP_MSG_BASE3_ADDR_LO_4 0xE882B0 + +#define mmTPC2_QM_CP_MSG_BASE3_ADDR_HI_0 0xE882B4 + +#define mmTPC2_QM_CP_MSG_BASE3_ADDR_HI_1 0xE882B8 + +#define mmTPC2_QM_CP_MSG_BASE3_ADDR_HI_2 0xE882BC + +#define mmTPC2_QM_CP_MSG_BASE3_ADDR_HI_3 0xE882C0 + +#define mmTPC2_QM_CP_MSG_BASE3_ADDR_HI_4 0xE882C4 + +#define mmTPC2_QM_CP_LDMA_TSIZE_OFFSET_0 0xE882C8 + +#define mmTPC2_QM_CP_LDMA_TSIZE_OFFSET_1 0xE882CC + +#define mmTPC2_QM_CP_LDMA_TSIZE_OFFSET_2 0xE882D0 + +#define mmTPC2_QM_CP_LDMA_TSIZE_OFFSET_3 0xE882D4 + +#define mmTPC2_QM_CP_LDMA_TSIZE_OFFSET_4 0xE882D8 + +#define mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0xE882E0 + +#define mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0xE882E4 + +#define mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0xE882E8 + +#define mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0xE882EC + +#define mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0xE882F0 + +#define mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0xE882F4 + +#define mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0xE882F8 + +#define mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0xE882FC + +#define mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0xE88300 + +#define mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0xE88304 + +#define mmTPC2_QM_CP_FENCE0_RDATA_0 0xE88308 + +#define mmTPC2_QM_CP_FENCE0_RDATA_1 0xE8830C + +#define mmTPC2_QM_CP_FENCE0_RDATA_2 0xE88310 + +#define mmTPC2_QM_CP_FENCE0_RDATA_3 0xE88314 + +#define mmTPC2_QM_CP_FENCE0_RDATA_4 0xE88318 + +#define mmTPC2_QM_CP_FENCE1_RDATA_0 0xE8831C + +#define mmTPC2_QM_CP_FENCE1_RDATA_1 0xE88320 + +#define mmTPC2_QM_CP_FENCE1_RDATA_2 0xE88324 + +#define mmTPC2_QM_CP_FENCE1_RDATA_3 0xE88328 + +#define mmTPC2_QM_CP_FENCE1_RDATA_4 0xE8832C + +#define mmTPC2_QM_CP_FENCE2_RDATA_0 0xE88330 + +#define mmTPC2_QM_CP_FENCE2_RDATA_1 0xE88334 + +#define mmTPC2_QM_CP_FENCE2_RDATA_2 0xE88338 + +#define mmTPC2_QM_CP_FENCE2_RDATA_3 0xE8833C + +#define mmTPC2_QM_CP_FENCE2_RDATA_4 0xE88340 + +#define mmTPC2_QM_CP_FENCE3_RDATA_0 0xE88344 + +#define mmTPC2_QM_CP_FENCE3_RDATA_1 0xE88348 + +#define mmTPC2_QM_CP_FENCE3_RDATA_2 0xE8834C + +#define mmTPC2_QM_CP_FENCE3_RDATA_3 0xE88350 + +#define mmTPC2_QM_CP_FENCE3_RDATA_4 0xE88354 + +#define mmTPC2_QM_CP_FENCE0_CNT_0 0xE88358 + +#define mmTPC2_QM_CP_FENCE0_CNT_1 0xE8835C + +#define mmTPC2_QM_CP_FENCE0_CNT_2 0xE88360 + +#define mmTPC2_QM_CP_FENCE0_CNT_3 0xE88364 + +#define mmTPC2_QM_CP_FENCE0_CNT_4 0xE88368 + +#define mmTPC2_QM_CP_FENCE1_CNT_0 0xE8836C + +#define mmTPC2_QM_CP_FENCE1_CNT_1 0xE88370 + +#define mmTPC2_QM_CP_FENCE1_CNT_2 0xE88374 + +#define mmTPC2_QM_CP_FENCE1_CNT_3 0xE88378 + +#define mmTPC2_QM_CP_FENCE1_CNT_4 0xE8837C + +#define mmTPC2_QM_CP_FENCE2_CNT_0 0xE88380 + +#define mmTPC2_QM_CP_FENCE2_CNT_1 0xE88384 + +#define mmTPC2_QM_CP_FENCE2_CNT_2 0xE88388 + +#define mmTPC2_QM_CP_FENCE2_CNT_3 0xE8838C + +#define mmTPC2_QM_CP_FENCE2_CNT_4 0xE88390 + +#define mmTPC2_QM_CP_FENCE3_CNT_0 0xE88394 + +#define mmTPC2_QM_CP_FENCE3_CNT_1 0xE88398 + +#define mmTPC2_QM_CP_FENCE3_CNT_2 0xE8839C + +#define mmTPC2_QM_CP_FENCE3_CNT_3 0xE883A0 + +#define mmTPC2_QM_CP_FENCE3_CNT_4 0xE883A4 + +#define mmTPC2_QM_CP_STS_0 0xE883A8 + +#define mmTPC2_QM_CP_STS_1 0xE883AC + +#define mmTPC2_QM_CP_STS_2 0xE883B0 + +#define mmTPC2_QM_CP_STS_3 0xE883B4 + +#define mmTPC2_QM_CP_STS_4 0xE883B8 + +#define mmTPC2_QM_CP_CURRENT_INST_LO_0 0xE883BC + +#define mmTPC2_QM_CP_CURRENT_INST_LO_1 0xE883C0 + +#define mmTPC2_QM_CP_CURRENT_INST_LO_2 0xE883C4 + +#define mmTPC2_QM_CP_CURRENT_INST_LO_3 0xE883C8 + +#define mmTPC2_QM_CP_CURRENT_INST_LO_4 0xE883CC + +#define mmTPC2_QM_CP_CURRENT_INST_HI_0 0xE883D0 + +#define mmTPC2_QM_CP_CURRENT_INST_HI_1 0xE883D4 + +#define mmTPC2_QM_CP_CURRENT_INST_HI_2 0xE883D8 + +#define mmTPC2_QM_CP_CURRENT_INST_HI_3 0xE883DC + +#define mmTPC2_QM_CP_CURRENT_INST_HI_4 0xE883E0 + +#define mmTPC2_QM_CP_BARRIER_CFG_0 0xE883F4 + +#define mmTPC2_QM_CP_BARRIER_CFG_1 0xE883F8 + +#define mmTPC2_QM_CP_BARRIER_CFG_2 0xE883FC + +#define mmTPC2_QM_CP_BARRIER_CFG_3 0xE88400 + +#define mmTPC2_QM_CP_BARRIER_CFG_4 0xE88404 + +#define mmTPC2_QM_CP_DBG_0_0 0xE88408 + +#define mmTPC2_QM_CP_DBG_0_1 0xE8840C + +#define mmTPC2_QM_CP_DBG_0_2 0xE88410 + +#define mmTPC2_QM_CP_DBG_0_3 0xE88414 + +#define mmTPC2_QM_CP_DBG_0_4 0xE88418 + +#define mmTPC2_QM_CP_ARUSER_31_11_0 0xE8841C + +#define mmTPC2_QM_CP_ARUSER_31_11_1 0xE88420 + +#define mmTPC2_QM_CP_ARUSER_31_11_2 0xE88424 + +#define mmTPC2_QM_CP_ARUSER_31_11_3 0xE88428 + +#define mmTPC2_QM_CP_ARUSER_31_11_4 0xE8842C + +#define mmTPC2_QM_CP_AWUSER_31_11_0 0xE88430 + +#define mmTPC2_QM_CP_AWUSER_31_11_1 0xE88434 + +#define mmTPC2_QM_CP_AWUSER_31_11_2 0xE88438 + +#define mmTPC2_QM_CP_AWUSER_31_11_3 0xE8843C + +#define mmTPC2_QM_CP_AWUSER_31_11_4 0xE88440 + +#define mmTPC2_QM_ARB_CFG_0 0xE88A00 + +#define mmTPC2_QM_ARB_CHOISE_Q_PUSH 0xE88A04 + +#define mmTPC2_QM_ARB_WRR_WEIGHT_0 0xE88A08 + +#define mmTPC2_QM_ARB_WRR_WEIGHT_1 0xE88A0C + +#define mmTPC2_QM_ARB_WRR_WEIGHT_2 0xE88A10 + +#define mmTPC2_QM_ARB_WRR_WEIGHT_3 0xE88A14 + +#define mmTPC2_QM_ARB_CFG_1 0xE88A18 + +#define mmTPC2_QM_ARB_MST_AVAIL_CRED_0 0xE88A20 + +#define mmTPC2_QM_ARB_MST_AVAIL_CRED_1 0xE88A24 + +#define mmTPC2_QM_ARB_MST_AVAIL_CRED_2 0xE88A28 + +#define mmTPC2_QM_ARB_MST_AVAIL_CRED_3 0xE88A2C + +#define mmTPC2_QM_ARB_MST_AVAIL_CRED_4 0xE88A30 + +#define mmTPC2_QM_ARB_MST_AVAIL_CRED_5 0xE88A34 + +#define mmTPC2_QM_ARB_MST_AVAIL_CRED_6 0xE88A38 + +#define mmTPC2_QM_ARB_MST_AVAIL_CRED_7 0xE88A3C + +#define mmTPC2_QM_ARB_MST_AVAIL_CRED_8 0xE88A40 + +#define mmTPC2_QM_ARB_MST_AVAIL_CRED_9 0xE88A44 + +#define mmTPC2_QM_ARB_MST_AVAIL_CRED_10 0xE88A48 + +#define mmTPC2_QM_ARB_MST_AVAIL_CRED_11 0xE88A4C + +#define mmTPC2_QM_ARB_MST_AVAIL_CRED_12 0xE88A50 + +#define mmTPC2_QM_ARB_MST_AVAIL_CRED_13 0xE88A54 + +#define mmTPC2_QM_ARB_MST_AVAIL_CRED_14 0xE88A58 + +#define mmTPC2_QM_ARB_MST_AVAIL_CRED_15 0xE88A5C + +#define mmTPC2_QM_ARB_MST_AVAIL_CRED_16 0xE88A60 + +#define mmTPC2_QM_ARB_MST_AVAIL_CRED_17 0xE88A64 + +#define mmTPC2_QM_ARB_MST_AVAIL_CRED_18 0xE88A68 + +#define mmTPC2_QM_ARB_MST_AVAIL_CRED_19 0xE88A6C + +#define mmTPC2_QM_ARB_MST_AVAIL_CRED_20 0xE88A70 + +#define mmTPC2_QM_ARB_MST_AVAIL_CRED_21 0xE88A74 + +#define mmTPC2_QM_ARB_MST_AVAIL_CRED_22 0xE88A78 + +#define mmTPC2_QM_ARB_MST_AVAIL_CRED_23 0xE88A7C + +#define mmTPC2_QM_ARB_MST_AVAIL_CRED_24 0xE88A80 + +#define mmTPC2_QM_ARB_MST_AVAIL_CRED_25 0xE88A84 + +#define mmTPC2_QM_ARB_MST_AVAIL_CRED_26 0xE88A88 + +#define mmTPC2_QM_ARB_MST_AVAIL_CRED_27 0xE88A8C + +#define mmTPC2_QM_ARB_MST_AVAIL_CRED_28 0xE88A90 + +#define mmTPC2_QM_ARB_MST_AVAIL_CRED_29 0xE88A94 + +#define mmTPC2_QM_ARB_MST_AVAIL_CRED_30 0xE88A98 + +#define mmTPC2_QM_ARB_MST_AVAIL_CRED_31 0xE88A9C + +#define mmTPC2_QM_ARB_MST_CRED_INC 0xE88AA0 + +#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_0 0xE88AA4 + +#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_1 0xE88AA8 + +#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_2 0xE88AAC + +#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_3 0xE88AB0 + +#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_4 0xE88AB4 + +#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_5 0xE88AB8 + +#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_6 0xE88ABC + +#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_7 0xE88AC0 + +#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_8 0xE88AC4 + +#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_9 0xE88AC8 + +#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_10 0xE88ACC + +#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_11 0xE88AD0 + +#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_12 0xE88AD4 + +#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_13 0xE88AD8 + +#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_14 0xE88ADC + +#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_15 0xE88AE0 + +#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_16 0xE88AE4 + +#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_17 0xE88AE8 + +#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_18 0xE88AEC + +#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_19 0xE88AF0 + +#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_20 0xE88AF4 + +#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_21 0xE88AF8 + +#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_22 0xE88AFC + +#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_23 0xE88B00 + +#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_24 0xE88B04 + +#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_25 0xE88B08 + +#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_26 0xE88B0C + +#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_27 0xE88B10 + +#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_28 0xE88B14 + +#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_29 0xE88B18 + +#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_30 0xE88B1C + +#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_31 0xE88B20 + +#define mmTPC2_QM_ARB_SLV_MASTER_INC_CRED_OFST 0xE88B28 + +#define mmTPC2_QM_ARB_MST_SLAVE_EN 0xE88B2C + +#define mmTPC2_QM_ARB_MST_QUIET_PER 0xE88B34 + +#define mmTPC2_QM_ARB_SLV_CHOISE_WDT 0xE88B38 + +#define mmTPC2_QM_ARB_SLV_ID 0xE88B3C + +#define mmTPC2_QM_ARB_MSG_MAX_INFLIGHT 0xE88B44 + +#define mmTPC2_QM_ARB_MSG_AWUSER_31_11 0xE88B48 + +#define mmTPC2_QM_ARB_MSG_AWUSER_SEC_PROP 0xE88B4C + +#define mmTPC2_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0xE88B50 + +#define mmTPC2_QM_ARB_BASE_LO 0xE88B54 + +#define mmTPC2_QM_ARB_BASE_HI 0xE88B58 + +#define mmTPC2_QM_ARB_STATE_STS 0xE88B80 + +#define mmTPC2_QM_ARB_CHOISE_FULLNESS_STS 0xE88B84 + +#define mmTPC2_QM_ARB_MSG_STS 0xE88B88 + +#define mmTPC2_QM_ARB_SLV_CHOISE_Q_HEAD 0xE88B8C + +#define mmTPC2_QM_ARB_ERR_CAUSE 0xE88B9C + +#define mmTPC2_QM_ARB_ERR_MSG_EN 0xE88BA0 + +#define mmTPC2_QM_ARB_ERR_STS_DRP 0xE88BA8 + +#define mmTPC2_QM_ARB_MST_CRED_STS_0 0xE88BB0 + +#define mmTPC2_QM_ARB_MST_CRED_STS_1 0xE88BB4 + +#define mmTPC2_QM_ARB_MST_CRED_STS_2 0xE88BB8 + +#define mmTPC2_QM_ARB_MST_CRED_STS_3 0xE88BBC + +#define mmTPC2_QM_ARB_MST_CRED_STS_4 0xE88BC0 + +#define mmTPC2_QM_ARB_MST_CRED_STS_5 0xE88BC4 + +#define mmTPC2_QM_ARB_MST_CRED_STS_6 0xE88BC8 + +#define mmTPC2_QM_ARB_MST_CRED_STS_7 0xE88BCC + +#define mmTPC2_QM_ARB_MST_CRED_STS_8 0xE88BD0 + +#define mmTPC2_QM_ARB_MST_CRED_STS_9 0xE88BD4 + +#define mmTPC2_QM_ARB_MST_CRED_STS_10 0xE88BD8 + +#define mmTPC2_QM_ARB_MST_CRED_STS_11 0xE88BDC + +#define mmTPC2_QM_ARB_MST_CRED_STS_12 0xE88BE0 + +#define mmTPC2_QM_ARB_MST_CRED_STS_13 0xE88BE4 + +#define mmTPC2_QM_ARB_MST_CRED_STS_14 0xE88BE8 + +#define mmTPC2_QM_ARB_MST_CRED_STS_15 0xE88BEC + +#define mmTPC2_QM_ARB_MST_CRED_STS_16 0xE88BF0 + +#define mmTPC2_QM_ARB_MST_CRED_STS_17 0xE88BF4 + +#define mmTPC2_QM_ARB_MST_CRED_STS_18 0xE88BF8 + +#define mmTPC2_QM_ARB_MST_CRED_STS_19 0xE88BFC + +#define mmTPC2_QM_ARB_MST_CRED_STS_20 0xE88C00 + +#define mmTPC2_QM_ARB_MST_CRED_STS_21 0xE88C04 + +#define mmTPC2_QM_ARB_MST_CRED_STS_22 0xE88C08 + +#define mmTPC2_QM_ARB_MST_CRED_STS_23 0xE88C0C + +#define mmTPC2_QM_ARB_MST_CRED_STS_24 0xE88C10 + +#define mmTPC2_QM_ARB_MST_CRED_STS_25 0xE88C14 + +#define mmTPC2_QM_ARB_MST_CRED_STS_26 0xE88C18 + +#define mmTPC2_QM_ARB_MST_CRED_STS_27 0xE88C1C + +#define mmTPC2_QM_ARB_MST_CRED_STS_28 0xE88C20 + +#define mmTPC2_QM_ARB_MST_CRED_STS_29 0xE88C24 + +#define mmTPC2_QM_ARB_MST_CRED_STS_30 0xE88C28 + +#define mmTPC2_QM_ARB_MST_CRED_STS_31 0xE88C2C + +#define mmTPC2_QM_CGM_CFG 0xE88C70 + +#define mmTPC2_QM_CGM_STS 0xE88C74 + +#define mmTPC2_QM_CGM_CFG1 0xE88C78 + +#define mmTPC2_QM_LOCAL_RANGE_BASE 0xE88C80 + +#define mmTPC2_QM_LOCAL_RANGE_SIZE 0xE88C84 + +#define mmTPC2_QM_CSMR_STRICT_PRIO_CFG 0xE88C90 + +#define mmTPC2_QM_HBW_RD_RATE_LIM_CFG_1 0xE88C94 + +#define mmTPC2_QM_LBW_WR_RATE_LIM_CFG_0 0xE88C98 + +#define mmTPC2_QM_LBW_WR_RATE_LIM_CFG_1 0xE88C9C + +#define mmTPC2_QM_HBW_RD_RATE_LIM_CFG_0 0xE88CA0 + +#define mmTPC2_QM_GLBL_AXCACHE 0xE88CA4 + +#define mmTPC2_QM_IND_GW_APB_CFG 0xE88CB0 + +#define mmTPC2_QM_IND_GW_APB_WDATA 0xE88CB4 + +#define mmTPC2_QM_IND_GW_APB_RDATA 0xE88CB8 + +#define mmTPC2_QM_IND_GW_APB_STATUS 0xE88CBC + +#define mmTPC2_QM_GLBL_ERR_ADDR_LO 0xE88CD0 + +#define mmTPC2_QM_GLBL_ERR_ADDR_HI 0xE88CD4 + +#define mmTPC2_QM_GLBL_ERR_WDATA 0xE88CD8 + +#define mmTPC2_QM_GLBL_MEM_INIT_BUSY 0xE88D00 + +#endif /* ASIC_REG_TPC2_QM_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc3_cfg_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc3_cfg_regs.h new file mode 100644 index 000000000..6d4246965 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc3_cfg_regs.h @@ -0,0 +1,1226 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC3_CFG_REGS_H_ +#define ASIC_REG_TPC3_CFG_REGS_H_ + +/* + ***************************************** + * TPC3_CFG (Prototype: TPC) + ***************************************** + */ + +#define mmTPC3_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xEC6400 + +#define mmTPC3_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xEC6404 + +#define mmTPC3_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xEC6408 + +#define mmTPC3_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xEC640C + +#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xEC6410 + +#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xEC6414 + +#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xEC6418 + +#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xEC641C + +#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xEC6420 + +#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xEC6424 + +#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xEC6428 + +#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xEC642C + +#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xEC6430 + +#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xEC6434 + +#define mmTPC3_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xEC6438 + +#define mmTPC3_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xEC643C + +#define mmTPC3_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xEC6440 + +#define mmTPC3_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xEC6444 + +#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xEC6448 + +#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xEC644C + +#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xEC6450 + +#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xEC6454 + +#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xEC6458 + +#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xEC645C + +#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xEC6460 + +#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xEC6464 + +#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xEC6468 + +#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xEC646C + +#define mmTPC3_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xEC6470 + +#define mmTPC3_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xEC6474 + +#define mmTPC3_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xEC6478 + +#define mmTPC3_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xEC647C + +#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xEC6480 + +#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xEC6484 + +#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xEC6488 + +#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xEC648C + +#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xEC6490 + +#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xEC6494 + +#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xEC6498 + +#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xEC649C + +#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xEC64A0 + +#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xEC64A4 + +#define mmTPC3_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xEC64A8 + +#define mmTPC3_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xEC64AC + +#define mmTPC3_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xEC64B0 + +#define mmTPC3_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xEC64B4 + +#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xEC64B8 + +#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xEC64BC + +#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xEC64C0 + +#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xEC64C4 + +#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xEC64C8 + +#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xEC64CC + +#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xEC64D0 + +#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xEC64D4 + +#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xEC64D8 + +#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xEC64DC + +#define mmTPC3_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xEC64E0 + +#define mmTPC3_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xEC64E4 + +#define mmTPC3_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xEC64E8 + +#define mmTPC3_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xEC64EC + +#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xEC64F0 + +#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xEC64F4 + +#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xEC64F8 + +#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xEC64FC + +#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xEC6500 + +#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xEC6504 + +#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xEC6508 + +#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xEC650C + +#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xEC6510 + +#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xEC6514 + +#define mmTPC3_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xEC6518 + +#define mmTPC3_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xEC651C + +#define mmTPC3_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xEC6520 + +#define mmTPC3_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xEC6524 + +#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xEC6528 + +#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xEC652C + +#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xEC6530 + +#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xEC6534 + +#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xEC6538 + +#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xEC653C + +#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xEC6540 + +#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xEC6544 + +#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xEC6548 + +#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xEC654C + +#define mmTPC3_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xEC6550 + +#define mmTPC3_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xEC6554 + +#define mmTPC3_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xEC6558 + +#define mmTPC3_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xEC655C + +#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xEC6560 + +#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xEC6564 + +#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xEC6568 + +#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xEC656C + +#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xEC6570 + +#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xEC6574 + +#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xEC6578 + +#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xEC657C + +#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xEC6580 + +#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xEC6584 + +#define mmTPC3_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xEC6588 + +#define mmTPC3_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xEC658C + +#define mmTPC3_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xEC6590 + +#define mmTPC3_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xEC6594 + +#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xEC6598 + +#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xEC659C + +#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xEC65A0 + +#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xEC65A4 + +#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xEC65A8 + +#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xEC65AC + +#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xEC65B0 + +#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xEC65B4 + +#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xEC65B8 + +#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xEC65BC + +#define mmTPC3_CFG_KERNEL_TENSOR_8_BASE_ADDR_LOW 0xEC65C0 + +#define mmTPC3_CFG_KERNEL_TENSOR_8_BASE_ADDR_HIGH 0xEC65C4 + +#define mmTPC3_CFG_KERNEL_TENSOR_8_PADDING_VALUE 0xEC65C8 + +#define mmTPC3_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG 0xEC65CC + +#define mmTPC3_CFG_KERNEL_TENSOR_8_DIM_0_SIZE 0xEC65D0 + +#define mmTPC3_CFG_KERNEL_TENSOR_8_DIM_0_STRIDE 0xEC65D4 + +#define mmTPC3_CFG_KERNEL_TENSOR_8_DIM_1_SIZE 0xEC65D8 + +#define mmTPC3_CFG_KERNEL_TENSOR_8_DIM_1_STRIDE 0xEC65DC + +#define mmTPC3_CFG_KERNEL_TENSOR_8_DIM_2_SIZE 0xEC65E0 + +#define mmTPC3_CFG_KERNEL_TENSOR_8_DIM_2_STRIDE 0xEC65E4 + +#define mmTPC3_CFG_KERNEL_TENSOR_8_DIM_3_SIZE 0xEC65E8 + +#define mmTPC3_CFG_KERNEL_TENSOR_8_DIM_3_STRIDE 0xEC65EC + +#define mmTPC3_CFG_KERNEL_TENSOR_8_DIM_4_SIZE 0xEC65F0 + +#define mmTPC3_CFG_KERNEL_TENSOR_8_DIM_4_STRIDE 0xEC65F4 + +#define mmTPC3_CFG_KERNEL_TENSOR_9_BASE_ADDR_LOW 0xEC65F8 + +#define mmTPC3_CFG_KERNEL_TENSOR_9_BASE_ADDR_HIGH 0xEC65FC + +#define mmTPC3_CFG_KERNEL_TENSOR_9_PADDING_VALUE 0xEC6600 + +#define mmTPC3_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG 0xEC6604 + +#define mmTPC3_CFG_KERNEL_TENSOR_9_DIM_0_SIZE 0xEC6608 + +#define mmTPC3_CFG_KERNEL_TENSOR_9_DIM_0_STRIDE 0xEC660C + +#define mmTPC3_CFG_KERNEL_TENSOR_9_DIM_1_SIZE 0xEC6610 + +#define mmTPC3_CFG_KERNEL_TENSOR_9_DIM_1_STRIDE 0xEC6614 + +#define mmTPC3_CFG_KERNEL_TENSOR_9_DIM_2_SIZE 0xEC6618 + +#define mmTPC3_CFG_KERNEL_TENSOR_9_DIM_2_STRIDE 0xEC661C + +#define mmTPC3_CFG_KERNEL_TENSOR_9_DIM_3_SIZE 0xEC6620 + +#define mmTPC3_CFG_KERNEL_TENSOR_9_DIM_3_STRIDE 0xEC6624 + +#define mmTPC3_CFG_KERNEL_TENSOR_9_DIM_4_SIZE 0xEC6628 + +#define mmTPC3_CFG_KERNEL_TENSOR_9_DIM_4_STRIDE 0xEC662C + +#define mmTPC3_CFG_KERNEL_TENSOR_10_BASE_ADDR_LOW 0xEC6630 + +#define mmTPC3_CFG_KERNEL_TENSOR_10_BASE_ADDR_HIGH 0xEC6634 + +#define mmTPC3_CFG_KERNEL_TENSOR_10_PADDING_VALUE 0xEC6638 + +#define mmTPC3_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG 0xEC663C + +#define mmTPC3_CFG_KERNEL_TENSOR_10_DIM_0_SIZE 0xEC6640 + +#define mmTPC3_CFG_KERNEL_TENSOR_10_DIM_0_STRIDE 0xEC6644 + +#define mmTPC3_CFG_KERNEL_TENSOR_10_DIM_1_SIZE 0xEC6648 + +#define mmTPC3_CFG_KERNEL_TENSOR_10_DIM_1_STRIDE 0xEC664C + +#define mmTPC3_CFG_KERNEL_TENSOR_10_DIM_2_SIZE 0xEC6650 + +#define mmTPC3_CFG_KERNEL_TENSOR_10_DIM_2_STRIDE 0xEC6654 + +#define mmTPC3_CFG_KERNEL_TENSOR_10_DIM_3_SIZE 0xEC6658 + +#define mmTPC3_CFG_KERNEL_TENSOR_10_DIM_3_STRIDE 0xEC665C + +#define mmTPC3_CFG_KERNEL_TENSOR_10_DIM_4_SIZE 0xEC6660 + +#define mmTPC3_CFG_KERNEL_TENSOR_10_DIM_4_STRIDE 0xEC6664 + +#define mmTPC3_CFG_KERNEL_TENSOR_11_BASE_ADDR_LOW 0xEC6668 + +#define mmTPC3_CFG_KERNEL_TENSOR_11_BASE_ADDR_HIGH 0xEC666C + +#define mmTPC3_CFG_KERNEL_TENSOR_11_PADDING_VALUE 0xEC6670 + +#define mmTPC3_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG 0xEC6674 + +#define mmTPC3_CFG_KERNEL_TENSOR_11_DIM_0_SIZE 0xEC6678 + +#define mmTPC3_CFG_KERNEL_TENSOR_11_DIM_0_STRIDE 0xEC667C + +#define mmTPC3_CFG_KERNEL_TENSOR_11_DIM_1_SIZE 0xEC6680 + +#define mmTPC3_CFG_KERNEL_TENSOR_11_DIM_1_STRIDE 0xEC6684 + +#define mmTPC3_CFG_KERNEL_TENSOR_11_DIM_2_SIZE 0xEC6688 + +#define mmTPC3_CFG_KERNEL_TENSOR_11_DIM_2_STRIDE 0xEC668C + +#define mmTPC3_CFG_KERNEL_TENSOR_11_DIM_3_SIZE 0xEC6690 + +#define mmTPC3_CFG_KERNEL_TENSOR_11_DIM_3_STRIDE 0xEC6694 + +#define mmTPC3_CFG_KERNEL_TENSOR_11_DIM_4_SIZE 0xEC6698 + +#define mmTPC3_CFG_KERNEL_TENSOR_11_DIM_4_STRIDE 0xEC669C + +#define mmTPC3_CFG_KERNEL_TENSOR_12_BASE_ADDR_LOW 0xEC66A0 + +#define mmTPC3_CFG_KERNEL_TENSOR_12_BASE_ADDR_HIGH 0xEC66A4 + +#define mmTPC3_CFG_KERNEL_TENSOR_12_PADDING_VALUE 0xEC66A8 + +#define mmTPC3_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG 0xEC66AC + +#define mmTPC3_CFG_KERNEL_TENSOR_12_DIM_0_SIZE 0xEC66B0 + +#define mmTPC3_CFG_KERNEL_TENSOR_12_DIM_0_STRIDE 0xEC66B4 + +#define mmTPC3_CFG_KERNEL_TENSOR_12_DIM_1_SIZE 0xEC66B8 + +#define mmTPC3_CFG_KERNEL_TENSOR_12_DIM_1_STRIDE 0xEC66BC + +#define mmTPC3_CFG_KERNEL_TENSOR_12_DIM_2_SIZE 0xEC66C0 + +#define mmTPC3_CFG_KERNEL_TENSOR_12_DIM_2_STRIDE 0xEC66C4 + +#define mmTPC3_CFG_KERNEL_TENSOR_12_DIM_3_SIZE 0xEC66C8 + +#define mmTPC3_CFG_KERNEL_TENSOR_12_DIM_3_STRIDE 0xEC66CC + +#define mmTPC3_CFG_KERNEL_TENSOR_12_DIM_4_SIZE 0xEC66D0 + +#define mmTPC3_CFG_KERNEL_TENSOR_12_DIM_4_STRIDE 0xEC66D4 + +#define mmTPC3_CFG_KERNEL_TENSOR_13_BASE_ADDR_LOW 0xEC66D8 + +#define mmTPC3_CFG_KERNEL_TENSOR_13_BASE_ADDR_HIGH 0xEC66DC + +#define mmTPC3_CFG_KERNEL_TENSOR_13_PADDING_VALUE 0xEC66E0 + +#define mmTPC3_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG 0xEC66E4 + +#define mmTPC3_CFG_KERNEL_TENSOR_13_DIM_0_SIZE 0xEC66E8 + +#define mmTPC3_CFG_KERNEL_TENSOR_13_DIM_0_STRIDE 0xEC66EC + +#define mmTPC3_CFG_KERNEL_TENSOR_13_DIM_1_SIZE 0xEC66F0 + +#define mmTPC3_CFG_KERNEL_TENSOR_13_DIM_1_STRIDE 0xEC66F4 + +#define mmTPC3_CFG_KERNEL_TENSOR_13_DIM_2_SIZE 0xEC66F8 + +#define mmTPC3_CFG_KERNEL_TENSOR_13_DIM_2_STRIDE 0xEC66FC + +#define mmTPC3_CFG_KERNEL_TENSOR_13_DIM_3_SIZE 0xEC6700 + +#define mmTPC3_CFG_KERNEL_TENSOR_13_DIM_3_STRIDE 0xEC6704 + +#define mmTPC3_CFG_KERNEL_TENSOR_13_DIM_4_SIZE 0xEC6708 + +#define mmTPC3_CFG_KERNEL_TENSOR_13_DIM_4_STRIDE 0xEC670C + +#define mmTPC3_CFG_KERNEL_TENSOR_14_BASE_ADDR_LOW 0xEC6710 + +#define mmTPC3_CFG_KERNEL_TENSOR_14_BASE_ADDR_HIGH 0xEC6714 + +#define mmTPC3_CFG_KERNEL_TENSOR_14_PADDING_VALUE 0xEC6718 + +#define mmTPC3_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG 0xEC671C + +#define mmTPC3_CFG_KERNEL_TENSOR_14_DIM_0_SIZE 0xEC6720 + +#define mmTPC3_CFG_KERNEL_TENSOR_14_DIM_0_STRIDE 0xEC6724 + +#define mmTPC3_CFG_KERNEL_TENSOR_14_DIM_1_SIZE 0xEC6728 + +#define mmTPC3_CFG_KERNEL_TENSOR_14_DIM_1_STRIDE 0xEC672C + +#define mmTPC3_CFG_KERNEL_TENSOR_14_DIM_2_SIZE 0xEC6730 + +#define mmTPC3_CFG_KERNEL_TENSOR_14_DIM_2_STRIDE 0xEC6734 + +#define mmTPC3_CFG_KERNEL_TENSOR_14_DIM_3_SIZE 0xEC6738 + +#define mmTPC3_CFG_KERNEL_TENSOR_14_DIM_3_STRIDE 0xEC673C + +#define mmTPC3_CFG_KERNEL_TENSOR_14_DIM_4_SIZE 0xEC6740 + +#define mmTPC3_CFG_KERNEL_TENSOR_14_DIM_4_STRIDE 0xEC6744 + +#define mmTPC3_CFG_KERNEL_TENSOR_15_BASE_ADDR_LOW 0xEC6748 + +#define mmTPC3_CFG_KERNEL_TENSOR_15_BASE_ADDR_HIGH 0xEC674C + +#define mmTPC3_CFG_KERNEL_TENSOR_15_PADDING_VALUE 0xEC6750 + +#define mmTPC3_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG 0xEC6754 + +#define mmTPC3_CFG_KERNEL_TENSOR_15_DIM_0_SIZE 0xEC6758 + +#define mmTPC3_CFG_KERNEL_TENSOR_15_DIM_0_STRIDE 0xEC675C + +#define mmTPC3_CFG_KERNEL_TENSOR_15_DIM_1_SIZE 0xEC6760 + +#define mmTPC3_CFG_KERNEL_TENSOR_15_DIM_1_STRIDE 0xEC6764 + +#define mmTPC3_CFG_KERNEL_TENSOR_15_DIM_2_SIZE 0xEC6768 + +#define mmTPC3_CFG_KERNEL_TENSOR_15_DIM_2_STRIDE 0xEC676C + +#define mmTPC3_CFG_KERNEL_TENSOR_15_DIM_3_SIZE 0xEC6770 + +#define mmTPC3_CFG_KERNEL_TENSOR_15_DIM_3_STRIDE 0xEC6774 + +#define mmTPC3_CFG_KERNEL_TENSOR_15_DIM_4_SIZE 0xEC6778 + +#define mmTPC3_CFG_KERNEL_TENSOR_15_DIM_4_STRIDE 0xEC677C + +#define mmTPC3_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xEC6780 + +#define mmTPC3_CFG_KERNEL_SYNC_OBJECT_ADDR 0xEC6784 + +#define mmTPC3_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xEC6788 + +#define mmTPC3_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xEC678C + +#define mmTPC3_CFG_KERNEL_TID_BASE_DIM_0 0xEC6790 + +#define mmTPC3_CFG_KERNEL_TID_SIZE_DIM_0 0xEC6794 + +#define mmTPC3_CFG_KERNEL_TID_BASE_DIM_1 0xEC6798 + +#define mmTPC3_CFG_KERNEL_TID_SIZE_DIM_1 0xEC679C + +#define mmTPC3_CFG_KERNEL_TID_BASE_DIM_2 0xEC67A0 + +#define mmTPC3_CFG_KERNEL_TID_SIZE_DIM_2 0xEC67A4 + +#define mmTPC3_CFG_KERNEL_TID_BASE_DIM_3 0xEC67A8 + +#define mmTPC3_CFG_KERNEL_TID_SIZE_DIM_3 0xEC67AC + +#define mmTPC3_CFG_KERNEL_TID_BASE_DIM_4 0xEC67B0 + +#define mmTPC3_CFG_KERNEL_TID_SIZE_DIM_4 0xEC67B4 + +#define mmTPC3_CFG_KERNEL_KERNEL_CONFIG 0xEC67B8 + +#define mmTPC3_CFG_KERNEL_KERNEL_ID 0xEC67BC + +#define mmTPC3_CFG_KERNEL_SRF_0 0xEC67C0 + +#define mmTPC3_CFG_KERNEL_SRF_1 0xEC67C4 + +#define mmTPC3_CFG_KERNEL_SRF_2 0xEC67C8 + +#define mmTPC3_CFG_KERNEL_SRF_3 0xEC67CC + +#define mmTPC3_CFG_KERNEL_SRF_4 0xEC67D0 + +#define mmTPC3_CFG_KERNEL_SRF_5 0xEC67D4 + +#define mmTPC3_CFG_KERNEL_SRF_6 0xEC67D8 + +#define mmTPC3_CFG_KERNEL_SRF_7 0xEC67DC + +#define mmTPC3_CFG_KERNEL_SRF_8 0xEC67E0 + +#define mmTPC3_CFG_KERNEL_SRF_9 0xEC67E4 + +#define mmTPC3_CFG_KERNEL_SRF_10 0xEC67E8 + +#define mmTPC3_CFG_KERNEL_SRF_11 0xEC67EC + +#define mmTPC3_CFG_KERNEL_SRF_12 0xEC67F0 + +#define mmTPC3_CFG_KERNEL_SRF_13 0xEC67F4 + +#define mmTPC3_CFG_KERNEL_SRF_14 0xEC67F8 + +#define mmTPC3_CFG_KERNEL_SRF_15 0xEC67FC + +#define mmTPC3_CFG_KERNEL_SRF_16 0xEC6800 + +#define mmTPC3_CFG_KERNEL_SRF_17 0xEC6804 + +#define mmTPC3_CFG_KERNEL_SRF_18 0xEC6808 + +#define mmTPC3_CFG_KERNEL_SRF_19 0xEC680C + +#define mmTPC3_CFG_KERNEL_SRF_20 0xEC6810 + +#define mmTPC3_CFG_KERNEL_SRF_21 0xEC6814 + +#define mmTPC3_CFG_KERNEL_SRF_22 0xEC6818 + +#define mmTPC3_CFG_KERNEL_SRF_23 0xEC681C + +#define mmTPC3_CFG_KERNEL_SRF_24 0xEC6820 + +#define mmTPC3_CFG_KERNEL_SRF_25 0xEC6824 + +#define mmTPC3_CFG_KERNEL_SRF_26 0xEC6828 + +#define mmTPC3_CFG_KERNEL_SRF_27 0xEC682C + +#define mmTPC3_CFG_KERNEL_SRF_28 0xEC6830 + +#define mmTPC3_CFG_KERNEL_SRF_29 0xEC6834 + +#define mmTPC3_CFG_KERNEL_SRF_30 0xEC6838 + +#define mmTPC3_CFG_KERNEL_SRF_31 0xEC683C + +#define mmTPC3_CFG_ROUND_CSR 0xEC68FC + +#define mmTPC3_CFG_PROT 0xEC6900 + +#define mmTPC3_CFG_SEMAPHORE 0xEC6908 + +#define mmTPC3_CFG_VFLAGS 0xEC690C + +#define mmTPC3_CFG_SFLAGS 0xEC6910 + +#define mmTPC3_CFG_LFSR_POLYNOM 0xEC6918 + +#define mmTPC3_CFG_STATUS 0xEC691C + +#define mmTPC3_CFG_CFG_BASE_ADDRESS_HIGH 0xEC6920 + +#define mmTPC3_CFG_CFG_SUBTRACT_VALUE 0xEC6924 + +#define mmTPC3_CFG_SM_BASE_ADDRESS_HIGH 0xEC692C + +#define mmTPC3_CFG_TPC_CMD 0xEC6930 + +#define mmTPC3_CFG_TPC_EXECUTE 0xEC6938 + +#define mmTPC3_CFG_TPC_STALL 0xEC693C + +#define mmTPC3_CFG_ICACHE_BASE_ADDERESS_LOW 0xEC6940 + +#define mmTPC3_CFG_ICACHE_BASE_ADDERESS_HIGH 0xEC6944 + +#define mmTPC3_CFG_RD_RATE_LIMIT 0xEC6948 + +#define mmTPC3_CFG_WR_RATE_LIMIT 0xEC6950 + +#define mmTPC3_CFG_MSS_CONFIG 0xEC6954 + +#define mmTPC3_CFG_TPC_INTR_CAUSE 0xEC6958 + +#define mmTPC3_CFG_TPC_INTR_MASK 0xEC695C + +#define mmTPC3_CFG_WQ_CREDITS 0xEC6960 + +#define mmTPC3_CFG_ARUSER_LO 0xEC6964 + +#define mmTPC3_CFG_ARUSER_HI 0xEC6968 + +#define mmTPC3_CFG_AWUSER_LO 0xEC696C + +#define mmTPC3_CFG_AWUSER_HI 0xEC6970 + +#define mmTPC3_CFG_OPCODE_EXEC 0xEC6974 + +#define mmTPC3_CFG_LUT_FUNC32_BASE_ADDR_LO 0xEC6978 + +#define mmTPC3_CFG_LUT_FUNC32_BASE_ADDR_HI 0xEC697C + +#define mmTPC3_CFG_LUT_FUNC64_BASE_ADDR_LO 0xEC6980 + +#define mmTPC3_CFG_LUT_FUNC64_BASE_ADDR_HI 0xEC6984 + +#define mmTPC3_CFG_LUT_FUNC128_BASE_ADDR_LO 0xEC6988 + +#define mmTPC3_CFG_LUT_FUNC128_BASE_ADDR_HI 0xEC698C + +#define mmTPC3_CFG_LUT_FUNC256_BASE_ADDR_LO 0xEC6990 + +#define mmTPC3_CFG_LUT_FUNC256_BASE_ADDR_HI 0xEC6994 + +#define mmTPC3_CFG_TSB_CFG_MAX_SIZE 0xEC6998 + +#define mmTPC3_CFG_TSB_CFG 0xEC699C + +#define mmTPC3_CFG_DBGMEM_ADD 0xEC69A0 + +#define mmTPC3_CFG_DBGMEM_DATA_WR 0xEC69A4 + +#define mmTPC3_CFG_DBGMEM_DATA_RD 0xEC69A8 + +#define mmTPC3_CFG_DBGMEM_CTRL 0xEC69AC + +#define mmTPC3_CFG_DBGMEM_RC 0xEC69B0 + +#define mmTPC3_CFG_TSB_INFLIGHT_CNTR 0xEC69B4 + +#define mmTPC3_CFG_WQ_INFLIGHT_CNTR 0xEC69B8 + +#define mmTPC3_CFG_WQ_LBW_TOTAL_CNTR 0xEC69BC + +#define mmTPC3_CFG_WQ_HBW_TOTAL_CNTR 0xEC69C0 + +#define mmTPC3_CFG_IRQ_OCCOUPY_CNTR 0xEC69C4 + +#define mmTPC3_CFG_FUNC_MBIST_CNTRL 0xEC69D0 + +#define mmTPC3_CFG_FUNC_MBIST_PAT 0xEC69D4 + +#define mmTPC3_CFG_FUNC_MBIST_MEM_0 0xEC69D8 + +#define mmTPC3_CFG_FUNC_MBIST_MEM_1 0xEC69DC + +#define mmTPC3_CFG_FUNC_MBIST_MEM_2 0xEC69E0 + +#define mmTPC3_CFG_FUNC_MBIST_MEM_3 0xEC69E4 + +#define mmTPC3_CFG_FUNC_MBIST_MEM_4 0xEC69E8 + +#define mmTPC3_CFG_FUNC_MBIST_MEM_5 0xEC69EC + +#define mmTPC3_CFG_FUNC_MBIST_MEM_6 0xEC69F0 + +#define mmTPC3_CFG_FUNC_MBIST_MEM_7 0xEC69F4 + +#define mmTPC3_CFG_FUNC_MBIST_MEM_8 0xEC69F8 + +#define mmTPC3_CFG_FUNC_MBIST_MEM_9 0xEC69FC + +#define mmTPC3_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xEC6A00 + +#define mmTPC3_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xEC6A04 + +#define mmTPC3_CFG_QM_TENSOR_0_PADDING_VALUE 0xEC6A08 + +#define mmTPC3_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xEC6A0C + +#define mmTPC3_CFG_QM_TENSOR_0_DIM_0_SIZE 0xEC6A10 + +#define mmTPC3_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xEC6A14 + +#define mmTPC3_CFG_QM_TENSOR_0_DIM_1_SIZE 0xEC6A18 + +#define mmTPC3_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xEC6A1C + +#define mmTPC3_CFG_QM_TENSOR_0_DIM_2_SIZE 0xEC6A20 + +#define mmTPC3_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xEC6A24 + +#define mmTPC3_CFG_QM_TENSOR_0_DIM_3_SIZE 0xEC6A28 + +#define mmTPC3_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xEC6A2C + +#define mmTPC3_CFG_QM_TENSOR_0_DIM_4_SIZE 0xEC6A30 + +#define mmTPC3_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xEC6A34 + +#define mmTPC3_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xEC6A38 + +#define mmTPC3_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xEC6A3C + +#define mmTPC3_CFG_QM_TENSOR_1_PADDING_VALUE 0xEC6A40 + +#define mmTPC3_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xEC6A44 + +#define mmTPC3_CFG_QM_TENSOR_1_DIM_0_SIZE 0xEC6A48 + +#define mmTPC3_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xEC6A4C + +#define mmTPC3_CFG_QM_TENSOR_1_DIM_1_SIZE 0xEC6A50 + +#define mmTPC3_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xEC6A54 + +#define mmTPC3_CFG_QM_TENSOR_1_DIM_2_SIZE 0xEC6A58 + +#define mmTPC3_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xEC6A5C + +#define mmTPC3_CFG_QM_TENSOR_1_DIM_3_SIZE 0xEC6A60 + +#define mmTPC3_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xEC6A64 + +#define mmTPC3_CFG_QM_TENSOR_1_DIM_4_SIZE 0xEC6A68 + +#define mmTPC3_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xEC6A6C + +#define mmTPC3_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xEC6A70 + +#define mmTPC3_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xEC6A74 + +#define mmTPC3_CFG_QM_TENSOR_2_PADDING_VALUE 0xEC6A78 + +#define mmTPC3_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xEC6A7C + +#define mmTPC3_CFG_QM_TENSOR_2_DIM_0_SIZE 0xEC6A80 + +#define mmTPC3_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xEC6A84 + +#define mmTPC3_CFG_QM_TENSOR_2_DIM_1_SIZE 0xEC6A88 + +#define mmTPC3_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xEC6A8C + +#define mmTPC3_CFG_QM_TENSOR_2_DIM_2_SIZE 0xEC6A90 + +#define mmTPC3_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xEC6A94 + +#define mmTPC3_CFG_QM_TENSOR_2_DIM_3_SIZE 0xEC6A98 + +#define mmTPC3_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xEC6A9C + +#define mmTPC3_CFG_QM_TENSOR_2_DIM_4_SIZE 0xEC6AA0 + +#define mmTPC3_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xEC6AA4 + +#define mmTPC3_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xEC6AA8 + +#define mmTPC3_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xEC6AAC + +#define mmTPC3_CFG_QM_TENSOR_3_PADDING_VALUE 0xEC6AB0 + +#define mmTPC3_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xEC6AB4 + +#define mmTPC3_CFG_QM_TENSOR_3_DIM_0_SIZE 0xEC6AB8 + +#define mmTPC3_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xEC6ABC + +#define mmTPC3_CFG_QM_TENSOR_3_DIM_1_SIZE 0xEC6AC0 + +#define mmTPC3_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xEC6AC4 + +#define mmTPC3_CFG_QM_TENSOR_3_DIM_2_SIZE 0xEC6AC8 + +#define mmTPC3_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xEC6ACC + +#define mmTPC3_CFG_QM_TENSOR_3_DIM_3_SIZE 0xEC6AD0 + +#define mmTPC3_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xEC6AD4 + +#define mmTPC3_CFG_QM_TENSOR_3_DIM_4_SIZE 0xEC6AD8 + +#define mmTPC3_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xEC6ADC + +#define mmTPC3_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xEC6AE0 + +#define mmTPC3_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xEC6AE4 + +#define mmTPC3_CFG_QM_TENSOR_4_PADDING_VALUE 0xEC6AE8 + +#define mmTPC3_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xEC6AEC + +#define mmTPC3_CFG_QM_TENSOR_4_DIM_0_SIZE 0xEC6AF0 + +#define mmTPC3_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xEC6AF4 + +#define mmTPC3_CFG_QM_TENSOR_4_DIM_1_SIZE 0xEC6AF8 + +#define mmTPC3_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xEC6AFC + +#define mmTPC3_CFG_QM_TENSOR_4_DIM_2_SIZE 0xEC6B00 + +#define mmTPC3_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xEC6B04 + +#define mmTPC3_CFG_QM_TENSOR_4_DIM_3_SIZE 0xEC6B08 + +#define mmTPC3_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xEC6B0C + +#define mmTPC3_CFG_QM_TENSOR_4_DIM_4_SIZE 0xEC6B10 + +#define mmTPC3_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xEC6B14 + +#define mmTPC3_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xEC6B18 + +#define mmTPC3_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xEC6B1C + +#define mmTPC3_CFG_QM_TENSOR_5_PADDING_VALUE 0xEC6B20 + +#define mmTPC3_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xEC6B24 + +#define mmTPC3_CFG_QM_TENSOR_5_DIM_0_SIZE 0xEC6B28 + +#define mmTPC3_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xEC6B2C + +#define mmTPC3_CFG_QM_TENSOR_5_DIM_1_SIZE 0xEC6B30 + +#define mmTPC3_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xEC6B34 + +#define mmTPC3_CFG_QM_TENSOR_5_DIM_2_SIZE 0xEC6B38 + +#define mmTPC3_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xEC6B3C + +#define mmTPC3_CFG_QM_TENSOR_5_DIM_3_SIZE 0xEC6B40 + +#define mmTPC3_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xEC6B44 + +#define mmTPC3_CFG_QM_TENSOR_5_DIM_4_SIZE 0xEC6B48 + +#define mmTPC3_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xEC6B4C + +#define mmTPC3_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xEC6B50 + +#define mmTPC3_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xEC6B54 + +#define mmTPC3_CFG_QM_TENSOR_6_PADDING_VALUE 0xEC6B58 + +#define mmTPC3_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xEC6B5C + +#define mmTPC3_CFG_QM_TENSOR_6_DIM_0_SIZE 0xEC6B60 + +#define mmTPC3_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xEC6B64 + +#define mmTPC3_CFG_QM_TENSOR_6_DIM_1_SIZE 0xEC6B68 + +#define mmTPC3_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xEC6B6C + +#define mmTPC3_CFG_QM_TENSOR_6_DIM_2_SIZE 0xEC6B70 + +#define mmTPC3_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xEC6B74 + +#define mmTPC3_CFG_QM_TENSOR_6_DIM_3_SIZE 0xEC6B78 + +#define mmTPC3_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xEC6B7C + +#define mmTPC3_CFG_QM_TENSOR_6_DIM_4_SIZE 0xEC6B80 + +#define mmTPC3_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xEC6B84 + +#define mmTPC3_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xEC6B88 + +#define mmTPC3_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xEC6B8C + +#define mmTPC3_CFG_QM_TENSOR_7_PADDING_VALUE 0xEC6B90 + +#define mmTPC3_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xEC6B94 + +#define mmTPC3_CFG_QM_TENSOR_7_DIM_0_SIZE 0xEC6B98 + +#define mmTPC3_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xEC6B9C + +#define mmTPC3_CFG_QM_TENSOR_7_DIM_1_SIZE 0xEC6BA0 + +#define mmTPC3_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xEC6BA4 + +#define mmTPC3_CFG_QM_TENSOR_7_DIM_2_SIZE 0xEC6BA8 + +#define mmTPC3_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xEC6BAC + +#define mmTPC3_CFG_QM_TENSOR_7_DIM_3_SIZE 0xEC6BB0 + +#define mmTPC3_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xEC6BB4 + +#define mmTPC3_CFG_QM_TENSOR_7_DIM_4_SIZE 0xEC6BB8 + +#define mmTPC3_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xEC6BBC + +#define mmTPC3_CFG_QM_TENSOR_8_BASE_ADDR_LOW 0xEC6BC0 + +#define mmTPC3_CFG_QM_TENSOR_8_BASE_ADDR_HIGH 0xEC6BC4 + +#define mmTPC3_CFG_QM_TENSOR_8_PADDING_VALUE 0xEC6BC8 + +#define mmTPC3_CFG_QM_TENSOR_8_TENSOR_CONFIG 0xEC6BCC + +#define mmTPC3_CFG_QM_TENSOR_8_DIM_0_SIZE 0xEC6BD0 + +#define mmTPC3_CFG_QM_TENSOR_8_DIM_0_STRIDE 0xEC6BD4 + +#define mmTPC3_CFG_QM_TENSOR_8_DIM_1_SIZE 0xEC6BD8 + +#define mmTPC3_CFG_QM_TENSOR_8_DIM_1_STRIDE 0xEC6BDC + +#define mmTPC3_CFG_QM_TENSOR_8_DIM_2_SIZE 0xEC6BE0 + +#define mmTPC3_CFG_QM_TENSOR_8_DIM_2_STRIDE 0xEC6BE4 + +#define mmTPC3_CFG_QM_TENSOR_8_DIM_3_SIZE 0xEC6BE8 + +#define mmTPC3_CFG_QM_TENSOR_8_DIM_3_STRIDE 0xEC6BEC + +#define mmTPC3_CFG_QM_TENSOR_8_DIM_4_SIZE 0xEC6BF0 + +#define mmTPC3_CFG_QM_TENSOR_8_DIM_4_STRIDE 0xEC6BF4 + +#define mmTPC3_CFG_QM_TENSOR_9_BASE_ADDR_LOW 0xEC6BF8 + +#define mmTPC3_CFG_QM_TENSOR_9_BASE_ADDR_HIGH 0xEC6BFC + +#define mmTPC3_CFG_QM_TENSOR_9_PADDING_VALUE 0xEC6C00 + +#define mmTPC3_CFG_QM_TENSOR_9_TENSOR_CONFIG 0xEC6C04 + +#define mmTPC3_CFG_QM_TENSOR_9_DIM_0_SIZE 0xEC6C08 + +#define mmTPC3_CFG_QM_TENSOR_9_DIM_0_STRIDE 0xEC6C0C + +#define mmTPC3_CFG_QM_TENSOR_9_DIM_1_SIZE 0xEC6C10 + +#define mmTPC3_CFG_QM_TENSOR_9_DIM_1_STRIDE 0xEC6C14 + +#define mmTPC3_CFG_QM_TENSOR_9_DIM_2_SIZE 0xEC6C18 + +#define mmTPC3_CFG_QM_TENSOR_9_DIM_2_STRIDE 0xEC6C1C + +#define mmTPC3_CFG_QM_TENSOR_9_DIM_3_SIZE 0xEC6C20 + +#define mmTPC3_CFG_QM_TENSOR_9_DIM_3_STRIDE 0xEC6C24 + +#define mmTPC3_CFG_QM_TENSOR_9_DIM_4_SIZE 0xEC6C28 + +#define mmTPC3_CFG_QM_TENSOR_9_DIM_4_STRIDE 0xEC6C2C + +#define mmTPC3_CFG_QM_TENSOR_10_BASE_ADDR_LOW 0xEC6C30 + +#define mmTPC3_CFG_QM_TENSOR_10_BASE_ADDR_HIGH 0xEC6C34 + +#define mmTPC3_CFG_QM_TENSOR_10_PADDING_VALUE 0xEC6C38 + +#define mmTPC3_CFG_QM_TENSOR_10_TENSOR_CONFIG 0xEC6C3C + +#define mmTPC3_CFG_QM_TENSOR_10_DIM_0_SIZE 0xEC6C40 + +#define mmTPC3_CFG_QM_TENSOR_10_DIM_0_STRIDE 0xEC6C44 + +#define mmTPC3_CFG_QM_TENSOR_10_DIM_1_SIZE 0xEC6C48 + +#define mmTPC3_CFG_QM_TENSOR_10_DIM_1_STRIDE 0xEC6C4C + +#define mmTPC3_CFG_QM_TENSOR_10_DIM_2_SIZE 0xEC6C50 + +#define mmTPC3_CFG_QM_TENSOR_10_DIM_2_STRIDE 0xEC6C54 + +#define mmTPC3_CFG_QM_TENSOR_10_DIM_3_SIZE 0xEC6C58 + +#define mmTPC3_CFG_QM_TENSOR_10_DIM_3_STRIDE 0xEC6C5C + +#define mmTPC3_CFG_QM_TENSOR_10_DIM_4_SIZE 0xEC6C60 + +#define mmTPC3_CFG_QM_TENSOR_10_DIM_4_STRIDE 0xEC6C64 + +#define mmTPC3_CFG_QM_TENSOR_11_BASE_ADDR_LOW 0xEC6C68 + +#define mmTPC3_CFG_QM_TENSOR_11_BASE_ADDR_HIGH 0xEC6C6C + +#define mmTPC3_CFG_QM_TENSOR_11_PADDING_VALUE 0xEC6C70 + +#define mmTPC3_CFG_QM_TENSOR_11_TENSOR_CONFIG 0xEC6C74 + +#define mmTPC3_CFG_QM_TENSOR_11_DIM_0_SIZE 0xEC6C78 + +#define mmTPC3_CFG_QM_TENSOR_11_DIM_0_STRIDE 0xEC6C7C + +#define mmTPC3_CFG_QM_TENSOR_11_DIM_1_SIZE 0xEC6C80 + +#define mmTPC3_CFG_QM_TENSOR_11_DIM_1_STRIDE 0xEC6C84 + +#define mmTPC3_CFG_QM_TENSOR_11_DIM_2_SIZE 0xEC6C88 + +#define mmTPC3_CFG_QM_TENSOR_11_DIM_2_STRIDE 0xEC6C8C + +#define mmTPC3_CFG_QM_TENSOR_11_DIM_3_SIZE 0xEC6C90 + +#define mmTPC3_CFG_QM_TENSOR_11_DIM_3_STRIDE 0xEC6C94 + +#define mmTPC3_CFG_QM_TENSOR_11_DIM_4_SIZE 0xEC6C98 + +#define mmTPC3_CFG_QM_TENSOR_11_DIM_4_STRIDE 0xEC6C9C + +#define mmTPC3_CFG_QM_TENSOR_12_BASE_ADDR_LOW 0xEC6CA0 + +#define mmTPC3_CFG_QM_TENSOR_12_BASE_ADDR_HIGH 0xEC6CA4 + +#define mmTPC3_CFG_QM_TENSOR_12_PADDING_VALUE 0xEC6CA8 + +#define mmTPC3_CFG_QM_TENSOR_12_TENSOR_CONFIG 0xEC6CAC + +#define mmTPC3_CFG_QM_TENSOR_12_DIM_0_SIZE 0xEC6CB0 + +#define mmTPC3_CFG_QM_TENSOR_12_DIM_0_STRIDE 0xEC6CB4 + +#define mmTPC3_CFG_QM_TENSOR_12_DIM_1_SIZE 0xEC6CB8 + +#define mmTPC3_CFG_QM_TENSOR_12_DIM_1_STRIDE 0xEC6CBC + +#define mmTPC3_CFG_QM_TENSOR_12_DIM_2_SIZE 0xEC6CC0 + +#define mmTPC3_CFG_QM_TENSOR_12_DIM_2_STRIDE 0xEC6CC4 + +#define mmTPC3_CFG_QM_TENSOR_12_DIM_3_SIZE 0xEC6CC8 + +#define mmTPC3_CFG_QM_TENSOR_12_DIM_3_STRIDE 0xEC6CCC + +#define mmTPC3_CFG_QM_TENSOR_12_DIM_4_SIZE 0xEC6CD0 + +#define mmTPC3_CFG_QM_TENSOR_12_DIM_4_STRIDE 0xEC6CD4 + +#define mmTPC3_CFG_QM_TENSOR_13_BASE_ADDR_LOW 0xEC6CD8 + +#define mmTPC3_CFG_QM_TENSOR_13_BASE_ADDR_HIGH 0xEC6CDC + +#define mmTPC3_CFG_QM_TENSOR_13_PADDING_VALUE 0xEC6CE0 + +#define mmTPC3_CFG_QM_TENSOR_13_TENSOR_CONFIG 0xEC6CE4 + +#define mmTPC3_CFG_QM_TENSOR_13_DIM_0_SIZE 0xEC6CE8 + +#define mmTPC3_CFG_QM_TENSOR_13_DIM_0_STRIDE 0xEC6CEC + +#define mmTPC3_CFG_QM_TENSOR_13_DIM_1_SIZE 0xEC6CF0 + +#define mmTPC3_CFG_QM_TENSOR_13_DIM_1_STRIDE 0xEC6CF4 + +#define mmTPC3_CFG_QM_TENSOR_13_DIM_2_SIZE 0xEC6CF8 + +#define mmTPC3_CFG_QM_TENSOR_13_DIM_2_STRIDE 0xEC6CFC + +#define mmTPC3_CFG_QM_TENSOR_13_DIM_3_SIZE 0xEC6D00 + +#define mmTPC3_CFG_QM_TENSOR_13_DIM_3_STRIDE 0xEC6D04 + +#define mmTPC3_CFG_QM_TENSOR_13_DIM_4_SIZE 0xEC6D08 + +#define mmTPC3_CFG_QM_TENSOR_13_DIM_4_STRIDE 0xEC6D0C + +#define mmTPC3_CFG_QM_TENSOR_14_BASE_ADDR_LOW 0xEC6D10 + +#define mmTPC3_CFG_QM_TENSOR_14_BASE_ADDR_HIGH 0xEC6D14 + +#define mmTPC3_CFG_QM_TENSOR_14_PADDING_VALUE 0xEC6D18 + +#define mmTPC3_CFG_QM_TENSOR_14_TENSOR_CONFIG 0xEC6D1C + +#define mmTPC3_CFG_QM_TENSOR_14_DIM_0_SIZE 0xEC6D20 + +#define mmTPC3_CFG_QM_TENSOR_14_DIM_0_STRIDE 0xEC6D24 + +#define mmTPC3_CFG_QM_TENSOR_14_DIM_1_SIZE 0xEC6D28 + +#define mmTPC3_CFG_QM_TENSOR_14_DIM_1_STRIDE 0xEC6D2C + +#define mmTPC3_CFG_QM_TENSOR_14_DIM_2_SIZE 0xEC6D30 + +#define mmTPC3_CFG_QM_TENSOR_14_DIM_2_STRIDE 0xEC6D34 + +#define mmTPC3_CFG_QM_TENSOR_14_DIM_3_SIZE 0xEC6D38 + +#define mmTPC3_CFG_QM_TENSOR_14_DIM_3_STRIDE 0xEC6D3C + +#define mmTPC3_CFG_QM_TENSOR_14_DIM_4_SIZE 0xEC6D40 + +#define mmTPC3_CFG_QM_TENSOR_14_DIM_4_STRIDE 0xEC6D44 + +#define mmTPC3_CFG_QM_TENSOR_15_BASE_ADDR_LOW 0xEC6D48 + +#define mmTPC3_CFG_QM_TENSOR_15_BASE_ADDR_HIGH 0xEC6D4C + +#define mmTPC3_CFG_QM_TENSOR_15_PADDING_VALUE 0xEC6D50 + +#define mmTPC3_CFG_QM_TENSOR_15_TENSOR_CONFIG 0xEC6D54 + +#define mmTPC3_CFG_QM_TENSOR_15_DIM_0_SIZE 0xEC6D58 + +#define mmTPC3_CFG_QM_TENSOR_15_DIM_0_STRIDE 0xEC6D5C + +#define mmTPC3_CFG_QM_TENSOR_15_DIM_1_SIZE 0xEC6D60 + +#define mmTPC3_CFG_QM_TENSOR_15_DIM_1_STRIDE 0xEC6D64 + +#define mmTPC3_CFG_QM_TENSOR_15_DIM_2_SIZE 0xEC6D68 + +#define mmTPC3_CFG_QM_TENSOR_15_DIM_2_STRIDE 0xEC6D6C + +#define mmTPC3_CFG_QM_TENSOR_15_DIM_3_SIZE 0xEC6D70 + +#define mmTPC3_CFG_QM_TENSOR_15_DIM_3_STRIDE 0xEC6D74 + +#define mmTPC3_CFG_QM_TENSOR_15_DIM_4_SIZE 0xEC6D78 + +#define mmTPC3_CFG_QM_TENSOR_15_DIM_4_STRIDE 0xEC6D7C + +#define mmTPC3_CFG_QM_SYNC_OBJECT_MESSAGE 0xEC6D80 + +#define mmTPC3_CFG_QM_SYNC_OBJECT_ADDR 0xEC6D84 + +#define mmTPC3_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xEC6D88 + +#define mmTPC3_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xEC6D8C + +#define mmTPC3_CFG_QM_TID_BASE_DIM_0 0xEC6D90 + +#define mmTPC3_CFG_QM_TID_SIZE_DIM_0 0xEC6D94 + +#define mmTPC3_CFG_QM_TID_BASE_DIM_1 0xEC6D98 + +#define mmTPC3_CFG_QM_TID_SIZE_DIM_1 0xEC6D9C + +#define mmTPC3_CFG_QM_TID_BASE_DIM_2 0xEC6DA0 + +#define mmTPC3_CFG_QM_TID_SIZE_DIM_2 0xEC6DA4 + +#define mmTPC3_CFG_QM_TID_BASE_DIM_3 0xEC6DA8 + +#define mmTPC3_CFG_QM_TID_SIZE_DIM_3 0xEC6DAC + +#define mmTPC3_CFG_QM_TID_BASE_DIM_4 0xEC6DB0 + +#define mmTPC3_CFG_QM_TID_SIZE_DIM_4 0xEC6DB4 + +#define mmTPC3_CFG_QM_KERNEL_CONFIG 0xEC6DB8 + +#define mmTPC3_CFG_QM_KERNEL_ID 0xEC6DBC + +#define mmTPC3_CFG_QM_SRF_0 0xEC6DC0 + +#define mmTPC3_CFG_QM_SRF_1 0xEC6DC4 + +#define mmTPC3_CFG_QM_SRF_2 0xEC6DC8 + +#define mmTPC3_CFG_QM_SRF_3 0xEC6DCC + +#define mmTPC3_CFG_QM_SRF_4 0xEC6DD0 + +#define mmTPC3_CFG_QM_SRF_5 0xEC6DD4 + +#define mmTPC3_CFG_QM_SRF_6 0xEC6DD8 + +#define mmTPC3_CFG_QM_SRF_7 0xEC6DDC + +#define mmTPC3_CFG_QM_SRF_8 0xEC6DE0 + +#define mmTPC3_CFG_QM_SRF_9 0xEC6DE4 + +#define mmTPC3_CFG_QM_SRF_10 0xEC6DE8 + +#define mmTPC3_CFG_QM_SRF_11 0xEC6DEC + +#define mmTPC3_CFG_QM_SRF_12 0xEC6DF0 + +#define mmTPC3_CFG_QM_SRF_13 0xEC6DF4 + +#define mmTPC3_CFG_QM_SRF_14 0xEC6DF8 + +#define mmTPC3_CFG_QM_SRF_15 0xEC6DFC + +#define mmTPC3_CFG_QM_SRF_16 0xEC6E00 + +#define mmTPC3_CFG_QM_SRF_17 0xEC6E04 + +#define mmTPC3_CFG_QM_SRF_18 0xEC6E08 + +#define mmTPC3_CFG_QM_SRF_19 0xEC6E0C + +#define mmTPC3_CFG_QM_SRF_20 0xEC6E10 + +#define mmTPC3_CFG_QM_SRF_21 0xEC6E14 + +#define mmTPC3_CFG_QM_SRF_22 0xEC6E18 + +#define mmTPC3_CFG_QM_SRF_23 0xEC6E1C + +#define mmTPC3_CFG_QM_SRF_24 0xEC6E20 + +#define mmTPC3_CFG_QM_SRF_25 0xEC6E24 + +#define mmTPC3_CFG_QM_SRF_26 0xEC6E28 + +#define mmTPC3_CFG_QM_SRF_27 0xEC6E2C + +#define mmTPC3_CFG_QM_SRF_28 0xEC6E30 + +#define mmTPC3_CFG_QM_SRF_29 0xEC6E34 + +#define mmTPC3_CFG_QM_SRF_30 0xEC6E38 + +#define mmTPC3_CFG_QM_SRF_31 0xEC6E3C + +#endif /* ASIC_REG_TPC3_CFG_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc3_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc3_qm_regs.h new file mode 100644 index 000000000..5f2a0fd86 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc3_qm_regs.h @@ -0,0 +1,834 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC3_QM_REGS_H_ +#define ASIC_REG_TPC3_QM_REGS_H_ + +/* + ***************************************** + * TPC3_QM (Prototype: QMAN) + ***************************************** + */ + +#define mmTPC3_QM_GLBL_CFG0 0xEC8000 + +#define mmTPC3_QM_GLBL_CFG1 0xEC8004 + +#define mmTPC3_QM_GLBL_PROT 0xEC8008 + +#define mmTPC3_QM_GLBL_ERR_CFG 0xEC800C + +#define mmTPC3_QM_GLBL_SECURE_PROPS_0 0xEC8010 + +#define mmTPC3_QM_GLBL_SECURE_PROPS_1 0xEC8014 + +#define mmTPC3_QM_GLBL_SECURE_PROPS_2 0xEC8018 + +#define mmTPC3_QM_GLBL_SECURE_PROPS_3 0xEC801C + +#define mmTPC3_QM_GLBL_SECURE_PROPS_4 0xEC8020 + +#define mmTPC3_QM_GLBL_NON_SECURE_PROPS_0 0xEC8024 + +#define mmTPC3_QM_GLBL_NON_SECURE_PROPS_1 0xEC8028 + +#define mmTPC3_QM_GLBL_NON_SECURE_PROPS_2 0xEC802C + +#define mmTPC3_QM_GLBL_NON_SECURE_PROPS_3 0xEC8030 + +#define mmTPC3_QM_GLBL_NON_SECURE_PROPS_4 0xEC8034 + +#define mmTPC3_QM_GLBL_STS0 0xEC8038 + +#define mmTPC3_QM_GLBL_STS1_0 0xEC8040 + +#define mmTPC3_QM_GLBL_STS1_1 0xEC8044 + +#define mmTPC3_QM_GLBL_STS1_2 0xEC8048 + +#define mmTPC3_QM_GLBL_STS1_3 0xEC804C + +#define mmTPC3_QM_GLBL_STS1_4 0xEC8050 + +#define mmTPC3_QM_GLBL_MSG_EN_0 0xEC8054 + +#define mmTPC3_QM_GLBL_MSG_EN_1 0xEC8058 + +#define mmTPC3_QM_GLBL_MSG_EN_2 0xEC805C + +#define mmTPC3_QM_GLBL_MSG_EN_3 0xEC8060 + +#define mmTPC3_QM_GLBL_MSG_EN_4 0xEC8068 + +#define mmTPC3_QM_PQ_BASE_LO_0 0xEC8070 + +#define mmTPC3_QM_PQ_BASE_LO_1 0xEC8074 + +#define mmTPC3_QM_PQ_BASE_LO_2 0xEC8078 + +#define mmTPC3_QM_PQ_BASE_LO_3 0xEC807C + +#define mmTPC3_QM_PQ_BASE_HI_0 0xEC8080 + +#define mmTPC3_QM_PQ_BASE_HI_1 0xEC8084 + +#define mmTPC3_QM_PQ_BASE_HI_2 0xEC8088 + +#define mmTPC3_QM_PQ_BASE_HI_3 0xEC808C + +#define mmTPC3_QM_PQ_SIZE_0 0xEC8090 + +#define mmTPC3_QM_PQ_SIZE_1 0xEC8094 + +#define mmTPC3_QM_PQ_SIZE_2 0xEC8098 + +#define mmTPC3_QM_PQ_SIZE_3 0xEC809C + +#define mmTPC3_QM_PQ_PI_0 0xEC80A0 + +#define mmTPC3_QM_PQ_PI_1 0xEC80A4 + +#define mmTPC3_QM_PQ_PI_2 0xEC80A8 + +#define mmTPC3_QM_PQ_PI_3 0xEC80AC + +#define mmTPC3_QM_PQ_CI_0 0xEC80B0 + +#define mmTPC3_QM_PQ_CI_1 0xEC80B4 + +#define mmTPC3_QM_PQ_CI_2 0xEC80B8 + +#define mmTPC3_QM_PQ_CI_3 0xEC80BC + +#define mmTPC3_QM_PQ_CFG0_0 0xEC80C0 + +#define mmTPC3_QM_PQ_CFG0_1 0xEC80C4 + +#define mmTPC3_QM_PQ_CFG0_2 0xEC80C8 + +#define mmTPC3_QM_PQ_CFG0_3 0xEC80CC + +#define mmTPC3_QM_PQ_CFG1_0 0xEC80D0 + +#define mmTPC3_QM_PQ_CFG1_1 0xEC80D4 + +#define mmTPC3_QM_PQ_CFG1_2 0xEC80D8 + +#define mmTPC3_QM_PQ_CFG1_3 0xEC80DC + +#define mmTPC3_QM_PQ_ARUSER_31_11_0 0xEC80E0 + +#define mmTPC3_QM_PQ_ARUSER_31_11_1 0xEC80E4 + +#define mmTPC3_QM_PQ_ARUSER_31_11_2 0xEC80E8 + +#define mmTPC3_QM_PQ_ARUSER_31_11_3 0xEC80EC + +#define mmTPC3_QM_PQ_STS0_0 0xEC80F0 + +#define mmTPC3_QM_PQ_STS0_1 0xEC80F4 + +#define mmTPC3_QM_PQ_STS0_2 0xEC80F8 + +#define mmTPC3_QM_PQ_STS0_3 0xEC80FC + +#define mmTPC3_QM_PQ_STS1_0 0xEC8100 + +#define mmTPC3_QM_PQ_STS1_1 0xEC8104 + +#define mmTPC3_QM_PQ_STS1_2 0xEC8108 + +#define mmTPC3_QM_PQ_STS1_3 0xEC810C + +#define mmTPC3_QM_CQ_CFG0_0 0xEC8110 + +#define mmTPC3_QM_CQ_CFG0_1 0xEC8114 + +#define mmTPC3_QM_CQ_CFG0_2 0xEC8118 + +#define mmTPC3_QM_CQ_CFG0_3 0xEC811C + +#define mmTPC3_QM_CQ_CFG0_4 0xEC8120 + +#define mmTPC3_QM_CQ_CFG1_0 0xEC8124 + +#define mmTPC3_QM_CQ_CFG1_1 0xEC8128 + +#define mmTPC3_QM_CQ_CFG1_2 0xEC812C + +#define mmTPC3_QM_CQ_CFG1_3 0xEC8130 + +#define mmTPC3_QM_CQ_CFG1_4 0xEC8134 + +#define mmTPC3_QM_CQ_ARUSER_31_11_0 0xEC8138 + +#define mmTPC3_QM_CQ_ARUSER_31_11_1 0xEC813C + +#define mmTPC3_QM_CQ_ARUSER_31_11_2 0xEC8140 + +#define mmTPC3_QM_CQ_ARUSER_31_11_3 0xEC8144 + +#define mmTPC3_QM_CQ_ARUSER_31_11_4 0xEC8148 + +#define mmTPC3_QM_CQ_STS0_0 0xEC814C + +#define mmTPC3_QM_CQ_STS0_1 0xEC8150 + +#define mmTPC3_QM_CQ_STS0_2 0xEC8154 + +#define mmTPC3_QM_CQ_STS0_3 0xEC8158 + +#define mmTPC3_QM_CQ_STS0_4 0xEC815C + +#define mmTPC3_QM_CQ_STS1_0 0xEC8160 + +#define mmTPC3_QM_CQ_STS1_1 0xEC8164 + +#define mmTPC3_QM_CQ_STS1_2 0xEC8168 + +#define mmTPC3_QM_CQ_STS1_3 0xEC816C + +#define mmTPC3_QM_CQ_STS1_4 0xEC8170 + +#define mmTPC3_QM_CQ_PTR_LO_0 0xEC8174 + +#define mmTPC3_QM_CQ_PTR_HI_0 0xEC8178 + +#define mmTPC3_QM_CQ_TSIZE_0 0xEC817C + +#define mmTPC3_QM_CQ_CTL_0 0xEC8180 + +#define mmTPC3_QM_CQ_PTR_LO_1 0xEC8184 + +#define mmTPC3_QM_CQ_PTR_HI_1 0xEC8188 + +#define mmTPC3_QM_CQ_TSIZE_1 0xEC818C + +#define mmTPC3_QM_CQ_CTL_1 0xEC8190 + +#define mmTPC3_QM_CQ_PTR_LO_2 0xEC8194 + +#define mmTPC3_QM_CQ_PTR_HI_2 0xEC8198 + +#define mmTPC3_QM_CQ_TSIZE_2 0xEC819C + +#define mmTPC3_QM_CQ_CTL_2 0xEC81A0 + +#define mmTPC3_QM_CQ_PTR_LO_3 0xEC81A4 + +#define mmTPC3_QM_CQ_PTR_HI_3 0xEC81A8 + +#define mmTPC3_QM_CQ_TSIZE_3 0xEC81AC + +#define mmTPC3_QM_CQ_CTL_3 0xEC81B0 + +#define mmTPC3_QM_CQ_PTR_LO_4 0xEC81B4 + +#define mmTPC3_QM_CQ_PTR_HI_4 0xEC81B8 + +#define mmTPC3_QM_CQ_TSIZE_4 0xEC81BC + +#define mmTPC3_QM_CQ_CTL_4 0xEC81C0 + +#define mmTPC3_QM_CQ_PTR_LO_STS_0 0xEC81C4 + +#define mmTPC3_QM_CQ_PTR_LO_STS_1 0xEC81C8 + +#define mmTPC3_QM_CQ_PTR_LO_STS_2 0xEC81CC + +#define mmTPC3_QM_CQ_PTR_LO_STS_3 0xEC81D0 + +#define mmTPC3_QM_CQ_PTR_LO_STS_4 0xEC81D4 + +#define mmTPC3_QM_CQ_PTR_HI_STS_0 0xEC81D8 + +#define mmTPC3_QM_CQ_PTR_HI_STS_1 0xEC81DC + +#define mmTPC3_QM_CQ_PTR_HI_STS_2 0xEC81E0 + +#define mmTPC3_QM_CQ_PTR_HI_STS_3 0xEC81E4 + +#define mmTPC3_QM_CQ_PTR_HI_STS_4 0xEC81E8 + +#define mmTPC3_QM_CQ_TSIZE_STS_0 0xEC81EC + +#define mmTPC3_QM_CQ_TSIZE_STS_1 0xEC81F0 + +#define mmTPC3_QM_CQ_TSIZE_STS_2 0xEC81F4 + +#define mmTPC3_QM_CQ_TSIZE_STS_3 0xEC81F8 + +#define mmTPC3_QM_CQ_TSIZE_STS_4 0xEC81FC + +#define mmTPC3_QM_CQ_CTL_STS_0 0xEC8200 + +#define mmTPC3_QM_CQ_CTL_STS_1 0xEC8204 + +#define mmTPC3_QM_CQ_CTL_STS_2 0xEC8208 + +#define mmTPC3_QM_CQ_CTL_STS_3 0xEC820C + +#define mmTPC3_QM_CQ_CTL_STS_4 0xEC8210 + +#define mmTPC3_QM_CQ_IFIFO_CNT_0 0xEC8214 + +#define mmTPC3_QM_CQ_IFIFO_CNT_1 0xEC8218 + +#define mmTPC3_QM_CQ_IFIFO_CNT_2 0xEC821C + +#define mmTPC3_QM_CQ_IFIFO_CNT_3 0xEC8220 + +#define mmTPC3_QM_CQ_IFIFO_CNT_4 0xEC8224 + +#define mmTPC3_QM_CP_MSG_BASE0_ADDR_LO_0 0xEC8228 + +#define mmTPC3_QM_CP_MSG_BASE0_ADDR_LO_1 0xEC822C + +#define mmTPC3_QM_CP_MSG_BASE0_ADDR_LO_2 0xEC8230 + +#define mmTPC3_QM_CP_MSG_BASE0_ADDR_LO_3 0xEC8234 + +#define mmTPC3_QM_CP_MSG_BASE0_ADDR_LO_4 0xEC8238 + +#define mmTPC3_QM_CP_MSG_BASE0_ADDR_HI_0 0xEC823C + +#define mmTPC3_QM_CP_MSG_BASE0_ADDR_HI_1 0xEC8240 + +#define mmTPC3_QM_CP_MSG_BASE0_ADDR_HI_2 0xEC8244 + +#define mmTPC3_QM_CP_MSG_BASE0_ADDR_HI_3 0xEC8248 + +#define mmTPC3_QM_CP_MSG_BASE0_ADDR_HI_4 0xEC824C + +#define mmTPC3_QM_CP_MSG_BASE1_ADDR_LO_0 0xEC8250 + +#define mmTPC3_QM_CP_MSG_BASE1_ADDR_LO_1 0xEC8254 + +#define mmTPC3_QM_CP_MSG_BASE1_ADDR_LO_2 0xEC8258 + +#define mmTPC3_QM_CP_MSG_BASE1_ADDR_LO_3 0xEC825C + +#define mmTPC3_QM_CP_MSG_BASE1_ADDR_LO_4 0xEC8260 + +#define mmTPC3_QM_CP_MSG_BASE1_ADDR_HI_0 0xEC8264 + +#define mmTPC3_QM_CP_MSG_BASE1_ADDR_HI_1 0xEC8268 + +#define mmTPC3_QM_CP_MSG_BASE1_ADDR_HI_2 0xEC826C + +#define mmTPC3_QM_CP_MSG_BASE1_ADDR_HI_3 0xEC8270 + +#define mmTPC3_QM_CP_MSG_BASE1_ADDR_HI_4 0xEC8274 + +#define mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_0 0xEC8278 + +#define mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_1 0xEC827C + +#define mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_2 0xEC8280 + +#define mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_3 0xEC8284 + +#define mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_4 0xEC8288 + +#define mmTPC3_QM_CP_MSG_BASE2_ADDR_HI_0 0xEC828C + +#define mmTPC3_QM_CP_MSG_BASE2_ADDR_HI_1 0xEC8290 + +#define mmTPC3_QM_CP_MSG_BASE2_ADDR_HI_2 0xEC8294 + +#define mmTPC3_QM_CP_MSG_BASE2_ADDR_HI_3 0xEC8298 + +#define mmTPC3_QM_CP_MSG_BASE2_ADDR_HI_4 0xEC829C + +#define mmTPC3_QM_CP_MSG_BASE3_ADDR_LO_0 0xEC82A0 + +#define mmTPC3_QM_CP_MSG_BASE3_ADDR_LO_1 0xEC82A4 + +#define mmTPC3_QM_CP_MSG_BASE3_ADDR_LO_2 0xEC82A8 + +#define mmTPC3_QM_CP_MSG_BASE3_ADDR_LO_3 0xEC82AC + +#define mmTPC3_QM_CP_MSG_BASE3_ADDR_LO_4 0xEC82B0 + +#define mmTPC3_QM_CP_MSG_BASE3_ADDR_HI_0 0xEC82B4 + +#define mmTPC3_QM_CP_MSG_BASE3_ADDR_HI_1 0xEC82B8 + +#define mmTPC3_QM_CP_MSG_BASE3_ADDR_HI_2 0xEC82BC + +#define mmTPC3_QM_CP_MSG_BASE3_ADDR_HI_3 0xEC82C0 + +#define mmTPC3_QM_CP_MSG_BASE3_ADDR_HI_4 0xEC82C4 + +#define mmTPC3_QM_CP_LDMA_TSIZE_OFFSET_0 0xEC82C8 + +#define mmTPC3_QM_CP_LDMA_TSIZE_OFFSET_1 0xEC82CC + +#define mmTPC3_QM_CP_LDMA_TSIZE_OFFSET_2 0xEC82D0 + +#define mmTPC3_QM_CP_LDMA_TSIZE_OFFSET_3 0xEC82D4 + +#define mmTPC3_QM_CP_LDMA_TSIZE_OFFSET_4 0xEC82D8 + +#define mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0xEC82E0 + +#define mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0xEC82E4 + +#define mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0xEC82E8 + +#define mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0xEC82EC + +#define mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0xEC82F0 + +#define mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0xEC82F4 + +#define mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0xEC82F8 + +#define mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0xEC82FC + +#define mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0xEC8300 + +#define mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0xEC8304 + +#define mmTPC3_QM_CP_FENCE0_RDATA_0 0xEC8308 + +#define mmTPC3_QM_CP_FENCE0_RDATA_1 0xEC830C + +#define mmTPC3_QM_CP_FENCE0_RDATA_2 0xEC8310 + +#define mmTPC3_QM_CP_FENCE0_RDATA_3 0xEC8314 + +#define mmTPC3_QM_CP_FENCE0_RDATA_4 0xEC8318 + +#define mmTPC3_QM_CP_FENCE1_RDATA_0 0xEC831C + +#define mmTPC3_QM_CP_FENCE1_RDATA_1 0xEC8320 + +#define mmTPC3_QM_CP_FENCE1_RDATA_2 0xEC8324 + +#define mmTPC3_QM_CP_FENCE1_RDATA_3 0xEC8328 + +#define mmTPC3_QM_CP_FENCE1_RDATA_4 0xEC832C + +#define mmTPC3_QM_CP_FENCE2_RDATA_0 0xEC8330 + +#define mmTPC3_QM_CP_FENCE2_RDATA_1 0xEC8334 + +#define mmTPC3_QM_CP_FENCE2_RDATA_2 0xEC8338 + +#define mmTPC3_QM_CP_FENCE2_RDATA_3 0xEC833C + +#define mmTPC3_QM_CP_FENCE2_RDATA_4 0xEC8340 + +#define mmTPC3_QM_CP_FENCE3_RDATA_0 0xEC8344 + +#define mmTPC3_QM_CP_FENCE3_RDATA_1 0xEC8348 + +#define mmTPC3_QM_CP_FENCE3_RDATA_2 0xEC834C + +#define mmTPC3_QM_CP_FENCE3_RDATA_3 0xEC8350 + +#define mmTPC3_QM_CP_FENCE3_RDATA_4 0xEC8354 + +#define mmTPC3_QM_CP_FENCE0_CNT_0 0xEC8358 + +#define mmTPC3_QM_CP_FENCE0_CNT_1 0xEC835C + +#define mmTPC3_QM_CP_FENCE0_CNT_2 0xEC8360 + +#define mmTPC3_QM_CP_FENCE0_CNT_3 0xEC8364 + +#define mmTPC3_QM_CP_FENCE0_CNT_4 0xEC8368 + +#define mmTPC3_QM_CP_FENCE1_CNT_0 0xEC836C + +#define mmTPC3_QM_CP_FENCE1_CNT_1 0xEC8370 + +#define mmTPC3_QM_CP_FENCE1_CNT_2 0xEC8374 + +#define mmTPC3_QM_CP_FENCE1_CNT_3 0xEC8378 + +#define mmTPC3_QM_CP_FENCE1_CNT_4 0xEC837C + +#define mmTPC3_QM_CP_FENCE2_CNT_0 0xEC8380 + +#define mmTPC3_QM_CP_FENCE2_CNT_1 0xEC8384 + +#define mmTPC3_QM_CP_FENCE2_CNT_2 0xEC8388 + +#define mmTPC3_QM_CP_FENCE2_CNT_3 0xEC838C + +#define mmTPC3_QM_CP_FENCE2_CNT_4 0xEC8390 + +#define mmTPC3_QM_CP_FENCE3_CNT_0 0xEC8394 + +#define mmTPC3_QM_CP_FENCE3_CNT_1 0xEC8398 + +#define mmTPC3_QM_CP_FENCE3_CNT_2 0xEC839C + +#define mmTPC3_QM_CP_FENCE3_CNT_3 0xEC83A0 + +#define mmTPC3_QM_CP_FENCE3_CNT_4 0xEC83A4 + +#define mmTPC3_QM_CP_STS_0 0xEC83A8 + +#define mmTPC3_QM_CP_STS_1 0xEC83AC + +#define mmTPC3_QM_CP_STS_2 0xEC83B0 + +#define mmTPC3_QM_CP_STS_3 0xEC83B4 + +#define mmTPC3_QM_CP_STS_4 0xEC83B8 + +#define mmTPC3_QM_CP_CURRENT_INST_LO_0 0xEC83BC + +#define mmTPC3_QM_CP_CURRENT_INST_LO_1 0xEC83C0 + +#define mmTPC3_QM_CP_CURRENT_INST_LO_2 0xEC83C4 + +#define mmTPC3_QM_CP_CURRENT_INST_LO_3 0xEC83C8 + +#define mmTPC3_QM_CP_CURRENT_INST_LO_4 0xEC83CC + +#define mmTPC3_QM_CP_CURRENT_INST_HI_0 0xEC83D0 + +#define mmTPC3_QM_CP_CURRENT_INST_HI_1 0xEC83D4 + +#define mmTPC3_QM_CP_CURRENT_INST_HI_2 0xEC83D8 + +#define mmTPC3_QM_CP_CURRENT_INST_HI_3 0xEC83DC + +#define mmTPC3_QM_CP_CURRENT_INST_HI_4 0xEC83E0 + +#define mmTPC3_QM_CP_BARRIER_CFG_0 0xEC83F4 + +#define mmTPC3_QM_CP_BARRIER_CFG_1 0xEC83F8 + +#define mmTPC3_QM_CP_BARRIER_CFG_2 0xEC83FC + +#define mmTPC3_QM_CP_BARRIER_CFG_3 0xEC8400 + +#define mmTPC3_QM_CP_BARRIER_CFG_4 0xEC8404 + +#define mmTPC3_QM_CP_DBG_0_0 0xEC8408 + +#define mmTPC3_QM_CP_DBG_0_1 0xEC840C + +#define mmTPC3_QM_CP_DBG_0_2 0xEC8410 + +#define mmTPC3_QM_CP_DBG_0_3 0xEC8414 + +#define mmTPC3_QM_CP_DBG_0_4 0xEC8418 + +#define mmTPC3_QM_CP_ARUSER_31_11_0 0xEC841C + +#define mmTPC3_QM_CP_ARUSER_31_11_1 0xEC8420 + +#define mmTPC3_QM_CP_ARUSER_31_11_2 0xEC8424 + +#define mmTPC3_QM_CP_ARUSER_31_11_3 0xEC8428 + +#define mmTPC3_QM_CP_ARUSER_31_11_4 0xEC842C + +#define mmTPC3_QM_CP_AWUSER_31_11_0 0xEC8430 + +#define mmTPC3_QM_CP_AWUSER_31_11_1 0xEC8434 + +#define mmTPC3_QM_CP_AWUSER_31_11_2 0xEC8438 + +#define mmTPC3_QM_CP_AWUSER_31_11_3 0xEC843C + +#define mmTPC3_QM_CP_AWUSER_31_11_4 0xEC8440 + +#define mmTPC3_QM_ARB_CFG_0 0xEC8A00 + +#define mmTPC3_QM_ARB_CHOISE_Q_PUSH 0xEC8A04 + +#define mmTPC3_QM_ARB_WRR_WEIGHT_0 0xEC8A08 + +#define mmTPC3_QM_ARB_WRR_WEIGHT_1 0xEC8A0C + +#define mmTPC3_QM_ARB_WRR_WEIGHT_2 0xEC8A10 + +#define mmTPC3_QM_ARB_WRR_WEIGHT_3 0xEC8A14 + +#define mmTPC3_QM_ARB_CFG_1 0xEC8A18 + +#define mmTPC3_QM_ARB_MST_AVAIL_CRED_0 0xEC8A20 + +#define mmTPC3_QM_ARB_MST_AVAIL_CRED_1 0xEC8A24 + +#define mmTPC3_QM_ARB_MST_AVAIL_CRED_2 0xEC8A28 + +#define mmTPC3_QM_ARB_MST_AVAIL_CRED_3 0xEC8A2C + +#define mmTPC3_QM_ARB_MST_AVAIL_CRED_4 0xEC8A30 + +#define mmTPC3_QM_ARB_MST_AVAIL_CRED_5 0xEC8A34 + +#define mmTPC3_QM_ARB_MST_AVAIL_CRED_6 0xEC8A38 + +#define mmTPC3_QM_ARB_MST_AVAIL_CRED_7 0xEC8A3C + +#define mmTPC3_QM_ARB_MST_AVAIL_CRED_8 0xEC8A40 + +#define mmTPC3_QM_ARB_MST_AVAIL_CRED_9 0xEC8A44 + +#define mmTPC3_QM_ARB_MST_AVAIL_CRED_10 0xEC8A48 + +#define mmTPC3_QM_ARB_MST_AVAIL_CRED_11 0xEC8A4C + +#define mmTPC3_QM_ARB_MST_AVAIL_CRED_12 0xEC8A50 + +#define mmTPC3_QM_ARB_MST_AVAIL_CRED_13 0xEC8A54 + +#define mmTPC3_QM_ARB_MST_AVAIL_CRED_14 0xEC8A58 + +#define mmTPC3_QM_ARB_MST_AVAIL_CRED_15 0xEC8A5C + +#define mmTPC3_QM_ARB_MST_AVAIL_CRED_16 0xEC8A60 + +#define mmTPC3_QM_ARB_MST_AVAIL_CRED_17 0xEC8A64 + +#define mmTPC3_QM_ARB_MST_AVAIL_CRED_18 0xEC8A68 + +#define mmTPC3_QM_ARB_MST_AVAIL_CRED_19 0xEC8A6C + +#define mmTPC3_QM_ARB_MST_AVAIL_CRED_20 0xEC8A70 + +#define mmTPC3_QM_ARB_MST_AVAIL_CRED_21 0xEC8A74 + +#define mmTPC3_QM_ARB_MST_AVAIL_CRED_22 0xEC8A78 + +#define mmTPC3_QM_ARB_MST_AVAIL_CRED_23 0xEC8A7C + +#define mmTPC3_QM_ARB_MST_AVAIL_CRED_24 0xEC8A80 + +#define mmTPC3_QM_ARB_MST_AVAIL_CRED_25 0xEC8A84 + +#define mmTPC3_QM_ARB_MST_AVAIL_CRED_26 0xEC8A88 + +#define mmTPC3_QM_ARB_MST_AVAIL_CRED_27 0xEC8A8C + +#define mmTPC3_QM_ARB_MST_AVAIL_CRED_28 0xEC8A90 + +#define mmTPC3_QM_ARB_MST_AVAIL_CRED_29 0xEC8A94 + +#define mmTPC3_QM_ARB_MST_AVAIL_CRED_30 0xEC8A98 + +#define mmTPC3_QM_ARB_MST_AVAIL_CRED_31 0xEC8A9C + +#define mmTPC3_QM_ARB_MST_CRED_INC 0xEC8AA0 + +#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_0 0xEC8AA4 + +#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_1 0xEC8AA8 + +#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_2 0xEC8AAC + +#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_3 0xEC8AB0 + +#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_4 0xEC8AB4 + +#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_5 0xEC8AB8 + +#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_6 0xEC8ABC + +#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_7 0xEC8AC0 + +#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_8 0xEC8AC4 + +#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_9 0xEC8AC8 + +#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_10 0xEC8ACC + +#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_11 0xEC8AD0 + +#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_12 0xEC8AD4 + +#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_13 0xEC8AD8 + +#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_14 0xEC8ADC + +#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_15 0xEC8AE0 + +#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_16 0xEC8AE4 + +#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_17 0xEC8AE8 + +#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_18 0xEC8AEC + +#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_19 0xEC8AF0 + +#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_20 0xEC8AF4 + +#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_21 0xEC8AF8 + +#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_22 0xEC8AFC + +#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_23 0xEC8B00 + +#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_24 0xEC8B04 + +#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_25 0xEC8B08 + +#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_26 0xEC8B0C + +#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_27 0xEC8B10 + +#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_28 0xEC8B14 + +#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_29 0xEC8B18 + +#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_30 0xEC8B1C + +#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_31 0xEC8B20 + +#define mmTPC3_QM_ARB_SLV_MASTER_INC_CRED_OFST 0xEC8B28 + +#define mmTPC3_QM_ARB_MST_SLAVE_EN 0xEC8B2C + +#define mmTPC3_QM_ARB_MST_QUIET_PER 0xEC8B34 + +#define mmTPC3_QM_ARB_SLV_CHOISE_WDT 0xEC8B38 + +#define mmTPC3_QM_ARB_SLV_ID 0xEC8B3C + +#define mmTPC3_QM_ARB_MSG_MAX_INFLIGHT 0xEC8B44 + +#define mmTPC3_QM_ARB_MSG_AWUSER_31_11 0xEC8B48 + +#define mmTPC3_QM_ARB_MSG_AWUSER_SEC_PROP 0xEC8B4C + +#define mmTPC3_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0xEC8B50 + +#define mmTPC3_QM_ARB_BASE_LO 0xEC8B54 + +#define mmTPC3_QM_ARB_BASE_HI 0xEC8B58 + +#define mmTPC3_QM_ARB_STATE_STS 0xEC8B80 + +#define mmTPC3_QM_ARB_CHOISE_FULLNESS_STS 0xEC8B84 + +#define mmTPC3_QM_ARB_MSG_STS 0xEC8B88 + +#define mmTPC3_QM_ARB_SLV_CHOISE_Q_HEAD 0xEC8B8C + +#define mmTPC3_QM_ARB_ERR_CAUSE 0xEC8B9C + +#define mmTPC3_QM_ARB_ERR_MSG_EN 0xEC8BA0 + +#define mmTPC3_QM_ARB_ERR_STS_DRP 0xEC8BA8 + +#define mmTPC3_QM_ARB_MST_CRED_STS_0 0xEC8BB0 + +#define mmTPC3_QM_ARB_MST_CRED_STS_1 0xEC8BB4 + +#define mmTPC3_QM_ARB_MST_CRED_STS_2 0xEC8BB8 + +#define mmTPC3_QM_ARB_MST_CRED_STS_3 0xEC8BBC + +#define mmTPC3_QM_ARB_MST_CRED_STS_4 0xEC8BC0 + +#define mmTPC3_QM_ARB_MST_CRED_STS_5 0xEC8BC4 + +#define mmTPC3_QM_ARB_MST_CRED_STS_6 0xEC8BC8 + +#define mmTPC3_QM_ARB_MST_CRED_STS_7 0xEC8BCC + +#define mmTPC3_QM_ARB_MST_CRED_STS_8 0xEC8BD0 + +#define mmTPC3_QM_ARB_MST_CRED_STS_9 0xEC8BD4 + +#define mmTPC3_QM_ARB_MST_CRED_STS_10 0xEC8BD8 + +#define mmTPC3_QM_ARB_MST_CRED_STS_11 0xEC8BDC + +#define mmTPC3_QM_ARB_MST_CRED_STS_12 0xEC8BE0 + +#define mmTPC3_QM_ARB_MST_CRED_STS_13 0xEC8BE4 + +#define mmTPC3_QM_ARB_MST_CRED_STS_14 0xEC8BE8 + +#define mmTPC3_QM_ARB_MST_CRED_STS_15 0xEC8BEC + +#define mmTPC3_QM_ARB_MST_CRED_STS_16 0xEC8BF0 + +#define mmTPC3_QM_ARB_MST_CRED_STS_17 0xEC8BF4 + +#define mmTPC3_QM_ARB_MST_CRED_STS_18 0xEC8BF8 + +#define mmTPC3_QM_ARB_MST_CRED_STS_19 0xEC8BFC + +#define mmTPC3_QM_ARB_MST_CRED_STS_20 0xEC8C00 + +#define mmTPC3_QM_ARB_MST_CRED_STS_21 0xEC8C04 + +#define mmTPC3_QM_ARB_MST_CRED_STS_22 0xEC8C08 + +#define mmTPC3_QM_ARB_MST_CRED_STS_23 0xEC8C0C + +#define mmTPC3_QM_ARB_MST_CRED_STS_24 0xEC8C10 + +#define mmTPC3_QM_ARB_MST_CRED_STS_25 0xEC8C14 + +#define mmTPC3_QM_ARB_MST_CRED_STS_26 0xEC8C18 + +#define mmTPC3_QM_ARB_MST_CRED_STS_27 0xEC8C1C + +#define mmTPC3_QM_ARB_MST_CRED_STS_28 0xEC8C20 + +#define mmTPC3_QM_ARB_MST_CRED_STS_29 0xEC8C24 + +#define mmTPC3_QM_ARB_MST_CRED_STS_30 0xEC8C28 + +#define mmTPC3_QM_ARB_MST_CRED_STS_31 0xEC8C2C + +#define mmTPC3_QM_CGM_CFG 0xEC8C70 + +#define mmTPC3_QM_CGM_STS 0xEC8C74 + +#define mmTPC3_QM_CGM_CFG1 0xEC8C78 + +#define mmTPC3_QM_LOCAL_RANGE_BASE 0xEC8C80 + +#define mmTPC3_QM_LOCAL_RANGE_SIZE 0xEC8C84 + +#define mmTPC3_QM_CSMR_STRICT_PRIO_CFG 0xEC8C90 + +#define mmTPC3_QM_HBW_RD_RATE_LIM_CFG_1 0xEC8C94 + +#define mmTPC3_QM_LBW_WR_RATE_LIM_CFG_0 0xEC8C98 + +#define mmTPC3_QM_LBW_WR_RATE_LIM_CFG_1 0xEC8C9C + +#define mmTPC3_QM_HBW_RD_RATE_LIM_CFG_0 0xEC8CA0 + +#define mmTPC3_QM_GLBL_AXCACHE 0xEC8CA4 + +#define mmTPC3_QM_IND_GW_APB_CFG 0xEC8CB0 + +#define mmTPC3_QM_IND_GW_APB_WDATA 0xEC8CB4 + +#define mmTPC3_QM_IND_GW_APB_RDATA 0xEC8CB8 + +#define mmTPC3_QM_IND_GW_APB_STATUS 0xEC8CBC + +#define mmTPC3_QM_GLBL_ERR_ADDR_LO 0xEC8CD0 + +#define mmTPC3_QM_GLBL_ERR_ADDR_HI 0xEC8CD4 + +#define mmTPC3_QM_GLBL_ERR_WDATA 0xEC8CD8 + +#define mmTPC3_QM_GLBL_MEM_INIT_BUSY 0xEC8D00 + +#endif /* ASIC_REG_TPC3_QM_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc4_cfg_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc4_cfg_regs.h new file mode 100644 index 000000000..7a9447f39 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc4_cfg_regs.h @@ -0,0 +1,1226 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC4_CFG_REGS_H_ +#define ASIC_REG_TPC4_CFG_REGS_H_ + +/* + ***************************************** + * TPC4_CFG (Prototype: TPC) + ***************************************** + */ + +#define mmTPC4_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xF06400 + +#define mmTPC4_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xF06404 + +#define mmTPC4_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xF06408 + +#define mmTPC4_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xF0640C + +#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xF06410 + +#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xF06414 + +#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xF06418 + +#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xF0641C + +#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xF06420 + +#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xF06424 + +#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xF06428 + +#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xF0642C + +#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xF06430 + +#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xF06434 + +#define mmTPC4_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xF06438 + +#define mmTPC4_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xF0643C + +#define mmTPC4_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xF06440 + +#define mmTPC4_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xF06444 + +#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xF06448 + +#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xF0644C + +#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xF06450 + +#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xF06454 + +#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xF06458 + +#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xF0645C + +#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xF06460 + +#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xF06464 + +#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xF06468 + +#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xF0646C + +#define mmTPC4_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xF06470 + +#define mmTPC4_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xF06474 + +#define mmTPC4_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xF06478 + +#define mmTPC4_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xF0647C + +#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xF06480 + +#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xF06484 + +#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xF06488 + +#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xF0648C + +#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xF06490 + +#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xF06494 + +#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xF06498 + +#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xF0649C + +#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xF064A0 + +#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xF064A4 + +#define mmTPC4_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xF064A8 + +#define mmTPC4_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xF064AC + +#define mmTPC4_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xF064B0 + +#define mmTPC4_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xF064B4 + +#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xF064B8 + +#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xF064BC + +#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xF064C0 + +#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xF064C4 + +#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xF064C8 + +#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xF064CC + +#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xF064D0 + +#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xF064D4 + +#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xF064D8 + +#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xF064DC + +#define mmTPC4_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xF064E0 + +#define mmTPC4_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xF064E4 + +#define mmTPC4_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xF064E8 + +#define mmTPC4_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xF064EC + +#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xF064F0 + +#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xF064F4 + +#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xF064F8 + +#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xF064FC + +#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xF06500 + +#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xF06504 + +#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xF06508 + +#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xF0650C + +#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xF06510 + +#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xF06514 + +#define mmTPC4_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xF06518 + +#define mmTPC4_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xF0651C + +#define mmTPC4_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xF06520 + +#define mmTPC4_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xF06524 + +#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xF06528 + +#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xF0652C + +#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xF06530 + +#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xF06534 + +#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xF06538 + +#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xF0653C + +#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xF06540 + +#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xF06544 + +#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xF06548 + +#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xF0654C + +#define mmTPC4_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xF06550 + +#define mmTPC4_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xF06554 + +#define mmTPC4_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xF06558 + +#define mmTPC4_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xF0655C + +#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xF06560 + +#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xF06564 + +#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xF06568 + +#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xF0656C + +#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xF06570 + +#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xF06574 + +#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xF06578 + +#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xF0657C + +#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xF06580 + +#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xF06584 + +#define mmTPC4_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xF06588 + +#define mmTPC4_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xF0658C + +#define mmTPC4_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xF06590 + +#define mmTPC4_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xF06594 + +#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xF06598 + +#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xF0659C + +#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xF065A0 + +#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xF065A4 + +#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xF065A8 + +#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xF065AC + +#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xF065B0 + +#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xF065B4 + +#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xF065B8 + +#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xF065BC + +#define mmTPC4_CFG_KERNEL_TENSOR_8_BASE_ADDR_LOW 0xF065C0 + +#define mmTPC4_CFG_KERNEL_TENSOR_8_BASE_ADDR_HIGH 0xF065C4 + +#define mmTPC4_CFG_KERNEL_TENSOR_8_PADDING_VALUE 0xF065C8 + +#define mmTPC4_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG 0xF065CC + +#define mmTPC4_CFG_KERNEL_TENSOR_8_DIM_0_SIZE 0xF065D0 + +#define mmTPC4_CFG_KERNEL_TENSOR_8_DIM_0_STRIDE 0xF065D4 + +#define mmTPC4_CFG_KERNEL_TENSOR_8_DIM_1_SIZE 0xF065D8 + +#define mmTPC4_CFG_KERNEL_TENSOR_8_DIM_1_STRIDE 0xF065DC + +#define mmTPC4_CFG_KERNEL_TENSOR_8_DIM_2_SIZE 0xF065E0 + +#define mmTPC4_CFG_KERNEL_TENSOR_8_DIM_2_STRIDE 0xF065E4 + +#define mmTPC4_CFG_KERNEL_TENSOR_8_DIM_3_SIZE 0xF065E8 + +#define mmTPC4_CFG_KERNEL_TENSOR_8_DIM_3_STRIDE 0xF065EC + +#define mmTPC4_CFG_KERNEL_TENSOR_8_DIM_4_SIZE 0xF065F0 + +#define mmTPC4_CFG_KERNEL_TENSOR_8_DIM_4_STRIDE 0xF065F4 + +#define mmTPC4_CFG_KERNEL_TENSOR_9_BASE_ADDR_LOW 0xF065F8 + +#define mmTPC4_CFG_KERNEL_TENSOR_9_BASE_ADDR_HIGH 0xF065FC + +#define mmTPC4_CFG_KERNEL_TENSOR_9_PADDING_VALUE 0xF06600 + +#define mmTPC4_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG 0xF06604 + +#define mmTPC4_CFG_KERNEL_TENSOR_9_DIM_0_SIZE 0xF06608 + +#define mmTPC4_CFG_KERNEL_TENSOR_9_DIM_0_STRIDE 0xF0660C + +#define mmTPC4_CFG_KERNEL_TENSOR_9_DIM_1_SIZE 0xF06610 + +#define mmTPC4_CFG_KERNEL_TENSOR_9_DIM_1_STRIDE 0xF06614 + +#define mmTPC4_CFG_KERNEL_TENSOR_9_DIM_2_SIZE 0xF06618 + +#define mmTPC4_CFG_KERNEL_TENSOR_9_DIM_2_STRIDE 0xF0661C + +#define mmTPC4_CFG_KERNEL_TENSOR_9_DIM_3_SIZE 0xF06620 + +#define mmTPC4_CFG_KERNEL_TENSOR_9_DIM_3_STRIDE 0xF06624 + +#define mmTPC4_CFG_KERNEL_TENSOR_9_DIM_4_SIZE 0xF06628 + +#define mmTPC4_CFG_KERNEL_TENSOR_9_DIM_4_STRIDE 0xF0662C + +#define mmTPC4_CFG_KERNEL_TENSOR_10_BASE_ADDR_LOW 0xF06630 + +#define mmTPC4_CFG_KERNEL_TENSOR_10_BASE_ADDR_HIGH 0xF06634 + +#define mmTPC4_CFG_KERNEL_TENSOR_10_PADDING_VALUE 0xF06638 + +#define mmTPC4_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG 0xF0663C + +#define mmTPC4_CFG_KERNEL_TENSOR_10_DIM_0_SIZE 0xF06640 + +#define mmTPC4_CFG_KERNEL_TENSOR_10_DIM_0_STRIDE 0xF06644 + +#define mmTPC4_CFG_KERNEL_TENSOR_10_DIM_1_SIZE 0xF06648 + +#define mmTPC4_CFG_KERNEL_TENSOR_10_DIM_1_STRIDE 0xF0664C + +#define mmTPC4_CFG_KERNEL_TENSOR_10_DIM_2_SIZE 0xF06650 + +#define mmTPC4_CFG_KERNEL_TENSOR_10_DIM_2_STRIDE 0xF06654 + +#define mmTPC4_CFG_KERNEL_TENSOR_10_DIM_3_SIZE 0xF06658 + +#define mmTPC4_CFG_KERNEL_TENSOR_10_DIM_3_STRIDE 0xF0665C + +#define mmTPC4_CFG_KERNEL_TENSOR_10_DIM_4_SIZE 0xF06660 + +#define mmTPC4_CFG_KERNEL_TENSOR_10_DIM_4_STRIDE 0xF06664 + +#define mmTPC4_CFG_KERNEL_TENSOR_11_BASE_ADDR_LOW 0xF06668 + +#define mmTPC4_CFG_KERNEL_TENSOR_11_BASE_ADDR_HIGH 0xF0666C + +#define mmTPC4_CFG_KERNEL_TENSOR_11_PADDING_VALUE 0xF06670 + +#define mmTPC4_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG 0xF06674 + +#define mmTPC4_CFG_KERNEL_TENSOR_11_DIM_0_SIZE 0xF06678 + +#define mmTPC4_CFG_KERNEL_TENSOR_11_DIM_0_STRIDE 0xF0667C + +#define mmTPC4_CFG_KERNEL_TENSOR_11_DIM_1_SIZE 0xF06680 + +#define mmTPC4_CFG_KERNEL_TENSOR_11_DIM_1_STRIDE 0xF06684 + +#define mmTPC4_CFG_KERNEL_TENSOR_11_DIM_2_SIZE 0xF06688 + +#define mmTPC4_CFG_KERNEL_TENSOR_11_DIM_2_STRIDE 0xF0668C + +#define mmTPC4_CFG_KERNEL_TENSOR_11_DIM_3_SIZE 0xF06690 + +#define mmTPC4_CFG_KERNEL_TENSOR_11_DIM_3_STRIDE 0xF06694 + +#define mmTPC4_CFG_KERNEL_TENSOR_11_DIM_4_SIZE 0xF06698 + +#define mmTPC4_CFG_KERNEL_TENSOR_11_DIM_4_STRIDE 0xF0669C + +#define mmTPC4_CFG_KERNEL_TENSOR_12_BASE_ADDR_LOW 0xF066A0 + +#define mmTPC4_CFG_KERNEL_TENSOR_12_BASE_ADDR_HIGH 0xF066A4 + +#define mmTPC4_CFG_KERNEL_TENSOR_12_PADDING_VALUE 0xF066A8 + +#define mmTPC4_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG 0xF066AC + +#define mmTPC4_CFG_KERNEL_TENSOR_12_DIM_0_SIZE 0xF066B0 + +#define mmTPC4_CFG_KERNEL_TENSOR_12_DIM_0_STRIDE 0xF066B4 + +#define mmTPC4_CFG_KERNEL_TENSOR_12_DIM_1_SIZE 0xF066B8 + +#define mmTPC4_CFG_KERNEL_TENSOR_12_DIM_1_STRIDE 0xF066BC + +#define mmTPC4_CFG_KERNEL_TENSOR_12_DIM_2_SIZE 0xF066C0 + +#define mmTPC4_CFG_KERNEL_TENSOR_12_DIM_2_STRIDE 0xF066C4 + +#define mmTPC4_CFG_KERNEL_TENSOR_12_DIM_3_SIZE 0xF066C8 + +#define mmTPC4_CFG_KERNEL_TENSOR_12_DIM_3_STRIDE 0xF066CC + +#define mmTPC4_CFG_KERNEL_TENSOR_12_DIM_4_SIZE 0xF066D0 + +#define mmTPC4_CFG_KERNEL_TENSOR_12_DIM_4_STRIDE 0xF066D4 + +#define mmTPC4_CFG_KERNEL_TENSOR_13_BASE_ADDR_LOW 0xF066D8 + +#define mmTPC4_CFG_KERNEL_TENSOR_13_BASE_ADDR_HIGH 0xF066DC + +#define mmTPC4_CFG_KERNEL_TENSOR_13_PADDING_VALUE 0xF066E0 + +#define mmTPC4_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG 0xF066E4 + +#define mmTPC4_CFG_KERNEL_TENSOR_13_DIM_0_SIZE 0xF066E8 + +#define mmTPC4_CFG_KERNEL_TENSOR_13_DIM_0_STRIDE 0xF066EC + +#define mmTPC4_CFG_KERNEL_TENSOR_13_DIM_1_SIZE 0xF066F0 + +#define mmTPC4_CFG_KERNEL_TENSOR_13_DIM_1_STRIDE 0xF066F4 + +#define mmTPC4_CFG_KERNEL_TENSOR_13_DIM_2_SIZE 0xF066F8 + +#define mmTPC4_CFG_KERNEL_TENSOR_13_DIM_2_STRIDE 0xF066FC + +#define mmTPC4_CFG_KERNEL_TENSOR_13_DIM_3_SIZE 0xF06700 + +#define mmTPC4_CFG_KERNEL_TENSOR_13_DIM_3_STRIDE 0xF06704 + +#define mmTPC4_CFG_KERNEL_TENSOR_13_DIM_4_SIZE 0xF06708 + +#define mmTPC4_CFG_KERNEL_TENSOR_13_DIM_4_STRIDE 0xF0670C + +#define mmTPC4_CFG_KERNEL_TENSOR_14_BASE_ADDR_LOW 0xF06710 + +#define mmTPC4_CFG_KERNEL_TENSOR_14_BASE_ADDR_HIGH 0xF06714 + +#define mmTPC4_CFG_KERNEL_TENSOR_14_PADDING_VALUE 0xF06718 + +#define mmTPC4_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG 0xF0671C + +#define mmTPC4_CFG_KERNEL_TENSOR_14_DIM_0_SIZE 0xF06720 + +#define mmTPC4_CFG_KERNEL_TENSOR_14_DIM_0_STRIDE 0xF06724 + +#define mmTPC4_CFG_KERNEL_TENSOR_14_DIM_1_SIZE 0xF06728 + +#define mmTPC4_CFG_KERNEL_TENSOR_14_DIM_1_STRIDE 0xF0672C + +#define mmTPC4_CFG_KERNEL_TENSOR_14_DIM_2_SIZE 0xF06730 + +#define mmTPC4_CFG_KERNEL_TENSOR_14_DIM_2_STRIDE 0xF06734 + +#define mmTPC4_CFG_KERNEL_TENSOR_14_DIM_3_SIZE 0xF06738 + +#define mmTPC4_CFG_KERNEL_TENSOR_14_DIM_3_STRIDE 0xF0673C + +#define mmTPC4_CFG_KERNEL_TENSOR_14_DIM_4_SIZE 0xF06740 + +#define mmTPC4_CFG_KERNEL_TENSOR_14_DIM_4_STRIDE 0xF06744 + +#define mmTPC4_CFG_KERNEL_TENSOR_15_BASE_ADDR_LOW 0xF06748 + +#define mmTPC4_CFG_KERNEL_TENSOR_15_BASE_ADDR_HIGH 0xF0674C + +#define mmTPC4_CFG_KERNEL_TENSOR_15_PADDING_VALUE 0xF06750 + +#define mmTPC4_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG 0xF06754 + +#define mmTPC4_CFG_KERNEL_TENSOR_15_DIM_0_SIZE 0xF06758 + +#define mmTPC4_CFG_KERNEL_TENSOR_15_DIM_0_STRIDE 0xF0675C + +#define mmTPC4_CFG_KERNEL_TENSOR_15_DIM_1_SIZE 0xF06760 + +#define mmTPC4_CFG_KERNEL_TENSOR_15_DIM_1_STRIDE 0xF06764 + +#define mmTPC4_CFG_KERNEL_TENSOR_15_DIM_2_SIZE 0xF06768 + +#define mmTPC4_CFG_KERNEL_TENSOR_15_DIM_2_STRIDE 0xF0676C + +#define mmTPC4_CFG_KERNEL_TENSOR_15_DIM_3_SIZE 0xF06770 + +#define mmTPC4_CFG_KERNEL_TENSOR_15_DIM_3_STRIDE 0xF06774 + +#define mmTPC4_CFG_KERNEL_TENSOR_15_DIM_4_SIZE 0xF06778 + +#define mmTPC4_CFG_KERNEL_TENSOR_15_DIM_4_STRIDE 0xF0677C + +#define mmTPC4_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xF06780 + +#define mmTPC4_CFG_KERNEL_SYNC_OBJECT_ADDR 0xF06784 + +#define mmTPC4_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xF06788 + +#define mmTPC4_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xF0678C + +#define mmTPC4_CFG_KERNEL_TID_BASE_DIM_0 0xF06790 + +#define mmTPC4_CFG_KERNEL_TID_SIZE_DIM_0 0xF06794 + +#define mmTPC4_CFG_KERNEL_TID_BASE_DIM_1 0xF06798 + +#define mmTPC4_CFG_KERNEL_TID_SIZE_DIM_1 0xF0679C + +#define mmTPC4_CFG_KERNEL_TID_BASE_DIM_2 0xF067A0 + +#define mmTPC4_CFG_KERNEL_TID_SIZE_DIM_2 0xF067A4 + +#define mmTPC4_CFG_KERNEL_TID_BASE_DIM_3 0xF067A8 + +#define mmTPC4_CFG_KERNEL_TID_SIZE_DIM_3 0xF067AC + +#define mmTPC4_CFG_KERNEL_TID_BASE_DIM_4 0xF067B0 + +#define mmTPC4_CFG_KERNEL_TID_SIZE_DIM_4 0xF067B4 + +#define mmTPC4_CFG_KERNEL_KERNEL_CONFIG 0xF067B8 + +#define mmTPC4_CFG_KERNEL_KERNEL_ID 0xF067BC + +#define mmTPC4_CFG_KERNEL_SRF_0 0xF067C0 + +#define mmTPC4_CFG_KERNEL_SRF_1 0xF067C4 + +#define mmTPC4_CFG_KERNEL_SRF_2 0xF067C8 + +#define mmTPC4_CFG_KERNEL_SRF_3 0xF067CC + +#define mmTPC4_CFG_KERNEL_SRF_4 0xF067D0 + +#define mmTPC4_CFG_KERNEL_SRF_5 0xF067D4 + +#define mmTPC4_CFG_KERNEL_SRF_6 0xF067D8 + +#define mmTPC4_CFG_KERNEL_SRF_7 0xF067DC + +#define mmTPC4_CFG_KERNEL_SRF_8 0xF067E0 + +#define mmTPC4_CFG_KERNEL_SRF_9 0xF067E4 + +#define mmTPC4_CFG_KERNEL_SRF_10 0xF067E8 + +#define mmTPC4_CFG_KERNEL_SRF_11 0xF067EC + +#define mmTPC4_CFG_KERNEL_SRF_12 0xF067F0 + +#define mmTPC4_CFG_KERNEL_SRF_13 0xF067F4 + +#define mmTPC4_CFG_KERNEL_SRF_14 0xF067F8 + +#define mmTPC4_CFG_KERNEL_SRF_15 0xF067FC + +#define mmTPC4_CFG_KERNEL_SRF_16 0xF06800 + +#define mmTPC4_CFG_KERNEL_SRF_17 0xF06804 + +#define mmTPC4_CFG_KERNEL_SRF_18 0xF06808 + +#define mmTPC4_CFG_KERNEL_SRF_19 0xF0680C + +#define mmTPC4_CFG_KERNEL_SRF_20 0xF06810 + +#define mmTPC4_CFG_KERNEL_SRF_21 0xF06814 + +#define mmTPC4_CFG_KERNEL_SRF_22 0xF06818 + +#define mmTPC4_CFG_KERNEL_SRF_23 0xF0681C + +#define mmTPC4_CFG_KERNEL_SRF_24 0xF06820 + +#define mmTPC4_CFG_KERNEL_SRF_25 0xF06824 + +#define mmTPC4_CFG_KERNEL_SRF_26 0xF06828 + +#define mmTPC4_CFG_KERNEL_SRF_27 0xF0682C + +#define mmTPC4_CFG_KERNEL_SRF_28 0xF06830 + +#define mmTPC4_CFG_KERNEL_SRF_29 0xF06834 + +#define mmTPC4_CFG_KERNEL_SRF_30 0xF06838 + +#define mmTPC4_CFG_KERNEL_SRF_31 0xF0683C + +#define mmTPC4_CFG_ROUND_CSR 0xF068FC + +#define mmTPC4_CFG_PROT 0xF06900 + +#define mmTPC4_CFG_SEMAPHORE 0xF06908 + +#define mmTPC4_CFG_VFLAGS 0xF0690C + +#define mmTPC4_CFG_SFLAGS 0xF06910 + +#define mmTPC4_CFG_LFSR_POLYNOM 0xF06918 + +#define mmTPC4_CFG_STATUS 0xF0691C + +#define mmTPC4_CFG_CFG_BASE_ADDRESS_HIGH 0xF06920 + +#define mmTPC4_CFG_CFG_SUBTRACT_VALUE 0xF06924 + +#define mmTPC4_CFG_SM_BASE_ADDRESS_HIGH 0xF0692C + +#define mmTPC4_CFG_TPC_CMD 0xF06930 + +#define mmTPC4_CFG_TPC_EXECUTE 0xF06938 + +#define mmTPC4_CFG_TPC_STALL 0xF0693C + +#define mmTPC4_CFG_ICACHE_BASE_ADDERESS_LOW 0xF06940 + +#define mmTPC4_CFG_ICACHE_BASE_ADDERESS_HIGH 0xF06944 + +#define mmTPC4_CFG_RD_RATE_LIMIT 0xF06948 + +#define mmTPC4_CFG_WR_RATE_LIMIT 0xF06950 + +#define mmTPC4_CFG_MSS_CONFIG 0xF06954 + +#define mmTPC4_CFG_TPC_INTR_CAUSE 0xF06958 + +#define mmTPC4_CFG_TPC_INTR_MASK 0xF0695C + +#define mmTPC4_CFG_WQ_CREDITS 0xF06960 + +#define mmTPC4_CFG_ARUSER_LO 0xF06964 + +#define mmTPC4_CFG_ARUSER_HI 0xF06968 + +#define mmTPC4_CFG_AWUSER_LO 0xF0696C + +#define mmTPC4_CFG_AWUSER_HI 0xF06970 + +#define mmTPC4_CFG_OPCODE_EXEC 0xF06974 + +#define mmTPC4_CFG_LUT_FUNC32_BASE_ADDR_LO 0xF06978 + +#define mmTPC4_CFG_LUT_FUNC32_BASE_ADDR_HI 0xF0697C + +#define mmTPC4_CFG_LUT_FUNC64_BASE_ADDR_LO 0xF06980 + +#define mmTPC4_CFG_LUT_FUNC64_BASE_ADDR_HI 0xF06984 + +#define mmTPC4_CFG_LUT_FUNC128_BASE_ADDR_LO 0xF06988 + +#define mmTPC4_CFG_LUT_FUNC128_BASE_ADDR_HI 0xF0698C + +#define mmTPC4_CFG_LUT_FUNC256_BASE_ADDR_LO 0xF06990 + +#define mmTPC4_CFG_LUT_FUNC256_BASE_ADDR_HI 0xF06994 + +#define mmTPC4_CFG_TSB_CFG_MAX_SIZE 0xF06998 + +#define mmTPC4_CFG_TSB_CFG 0xF0699C + +#define mmTPC4_CFG_DBGMEM_ADD 0xF069A0 + +#define mmTPC4_CFG_DBGMEM_DATA_WR 0xF069A4 + +#define mmTPC4_CFG_DBGMEM_DATA_RD 0xF069A8 + +#define mmTPC4_CFG_DBGMEM_CTRL 0xF069AC + +#define mmTPC4_CFG_DBGMEM_RC 0xF069B0 + +#define mmTPC4_CFG_TSB_INFLIGHT_CNTR 0xF069B4 + +#define mmTPC4_CFG_WQ_INFLIGHT_CNTR 0xF069B8 + +#define mmTPC4_CFG_WQ_LBW_TOTAL_CNTR 0xF069BC + +#define mmTPC4_CFG_WQ_HBW_TOTAL_CNTR 0xF069C0 + +#define mmTPC4_CFG_IRQ_OCCOUPY_CNTR 0xF069C4 + +#define mmTPC4_CFG_FUNC_MBIST_CNTRL 0xF069D0 + +#define mmTPC4_CFG_FUNC_MBIST_PAT 0xF069D4 + +#define mmTPC4_CFG_FUNC_MBIST_MEM_0 0xF069D8 + +#define mmTPC4_CFG_FUNC_MBIST_MEM_1 0xF069DC + +#define mmTPC4_CFG_FUNC_MBIST_MEM_2 0xF069E0 + +#define mmTPC4_CFG_FUNC_MBIST_MEM_3 0xF069E4 + +#define mmTPC4_CFG_FUNC_MBIST_MEM_4 0xF069E8 + +#define mmTPC4_CFG_FUNC_MBIST_MEM_5 0xF069EC + +#define mmTPC4_CFG_FUNC_MBIST_MEM_6 0xF069F0 + +#define mmTPC4_CFG_FUNC_MBIST_MEM_7 0xF069F4 + +#define mmTPC4_CFG_FUNC_MBIST_MEM_8 0xF069F8 + +#define mmTPC4_CFG_FUNC_MBIST_MEM_9 0xF069FC + +#define mmTPC4_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xF06A00 + +#define mmTPC4_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xF06A04 + +#define mmTPC4_CFG_QM_TENSOR_0_PADDING_VALUE 0xF06A08 + +#define mmTPC4_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xF06A0C + +#define mmTPC4_CFG_QM_TENSOR_0_DIM_0_SIZE 0xF06A10 + +#define mmTPC4_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xF06A14 + +#define mmTPC4_CFG_QM_TENSOR_0_DIM_1_SIZE 0xF06A18 + +#define mmTPC4_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xF06A1C + +#define mmTPC4_CFG_QM_TENSOR_0_DIM_2_SIZE 0xF06A20 + +#define mmTPC4_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xF06A24 + +#define mmTPC4_CFG_QM_TENSOR_0_DIM_3_SIZE 0xF06A28 + +#define mmTPC4_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xF06A2C + +#define mmTPC4_CFG_QM_TENSOR_0_DIM_4_SIZE 0xF06A30 + +#define mmTPC4_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xF06A34 + +#define mmTPC4_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xF06A38 + +#define mmTPC4_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xF06A3C + +#define mmTPC4_CFG_QM_TENSOR_1_PADDING_VALUE 0xF06A40 + +#define mmTPC4_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xF06A44 + +#define mmTPC4_CFG_QM_TENSOR_1_DIM_0_SIZE 0xF06A48 + +#define mmTPC4_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xF06A4C + +#define mmTPC4_CFG_QM_TENSOR_1_DIM_1_SIZE 0xF06A50 + +#define mmTPC4_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xF06A54 + +#define mmTPC4_CFG_QM_TENSOR_1_DIM_2_SIZE 0xF06A58 + +#define mmTPC4_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xF06A5C + +#define mmTPC4_CFG_QM_TENSOR_1_DIM_3_SIZE 0xF06A60 + +#define mmTPC4_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xF06A64 + +#define mmTPC4_CFG_QM_TENSOR_1_DIM_4_SIZE 0xF06A68 + +#define mmTPC4_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xF06A6C + +#define mmTPC4_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xF06A70 + +#define mmTPC4_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xF06A74 + +#define mmTPC4_CFG_QM_TENSOR_2_PADDING_VALUE 0xF06A78 + +#define mmTPC4_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xF06A7C + +#define mmTPC4_CFG_QM_TENSOR_2_DIM_0_SIZE 0xF06A80 + +#define mmTPC4_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xF06A84 + +#define mmTPC4_CFG_QM_TENSOR_2_DIM_1_SIZE 0xF06A88 + +#define mmTPC4_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xF06A8C + +#define mmTPC4_CFG_QM_TENSOR_2_DIM_2_SIZE 0xF06A90 + +#define mmTPC4_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xF06A94 + +#define mmTPC4_CFG_QM_TENSOR_2_DIM_3_SIZE 0xF06A98 + +#define mmTPC4_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xF06A9C + +#define mmTPC4_CFG_QM_TENSOR_2_DIM_4_SIZE 0xF06AA0 + +#define mmTPC4_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xF06AA4 + +#define mmTPC4_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xF06AA8 + +#define mmTPC4_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xF06AAC + +#define mmTPC4_CFG_QM_TENSOR_3_PADDING_VALUE 0xF06AB0 + +#define mmTPC4_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xF06AB4 + +#define mmTPC4_CFG_QM_TENSOR_3_DIM_0_SIZE 0xF06AB8 + +#define mmTPC4_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xF06ABC + +#define mmTPC4_CFG_QM_TENSOR_3_DIM_1_SIZE 0xF06AC0 + +#define mmTPC4_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xF06AC4 + +#define mmTPC4_CFG_QM_TENSOR_3_DIM_2_SIZE 0xF06AC8 + +#define mmTPC4_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xF06ACC + +#define mmTPC4_CFG_QM_TENSOR_3_DIM_3_SIZE 0xF06AD0 + +#define mmTPC4_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xF06AD4 + +#define mmTPC4_CFG_QM_TENSOR_3_DIM_4_SIZE 0xF06AD8 + +#define mmTPC4_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xF06ADC + +#define mmTPC4_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xF06AE0 + +#define mmTPC4_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xF06AE4 + +#define mmTPC4_CFG_QM_TENSOR_4_PADDING_VALUE 0xF06AE8 + +#define mmTPC4_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xF06AEC + +#define mmTPC4_CFG_QM_TENSOR_4_DIM_0_SIZE 0xF06AF0 + +#define mmTPC4_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xF06AF4 + +#define mmTPC4_CFG_QM_TENSOR_4_DIM_1_SIZE 0xF06AF8 + +#define mmTPC4_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xF06AFC + +#define mmTPC4_CFG_QM_TENSOR_4_DIM_2_SIZE 0xF06B00 + +#define mmTPC4_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xF06B04 + +#define mmTPC4_CFG_QM_TENSOR_4_DIM_3_SIZE 0xF06B08 + +#define mmTPC4_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xF06B0C + +#define mmTPC4_CFG_QM_TENSOR_4_DIM_4_SIZE 0xF06B10 + +#define mmTPC4_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xF06B14 + +#define mmTPC4_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xF06B18 + +#define mmTPC4_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xF06B1C + +#define mmTPC4_CFG_QM_TENSOR_5_PADDING_VALUE 0xF06B20 + +#define mmTPC4_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xF06B24 + +#define mmTPC4_CFG_QM_TENSOR_5_DIM_0_SIZE 0xF06B28 + +#define mmTPC4_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xF06B2C + +#define mmTPC4_CFG_QM_TENSOR_5_DIM_1_SIZE 0xF06B30 + +#define mmTPC4_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xF06B34 + +#define mmTPC4_CFG_QM_TENSOR_5_DIM_2_SIZE 0xF06B38 + +#define mmTPC4_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xF06B3C + +#define mmTPC4_CFG_QM_TENSOR_5_DIM_3_SIZE 0xF06B40 + +#define mmTPC4_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xF06B44 + +#define mmTPC4_CFG_QM_TENSOR_5_DIM_4_SIZE 0xF06B48 + +#define mmTPC4_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xF06B4C + +#define mmTPC4_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xF06B50 + +#define mmTPC4_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xF06B54 + +#define mmTPC4_CFG_QM_TENSOR_6_PADDING_VALUE 0xF06B58 + +#define mmTPC4_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xF06B5C + +#define mmTPC4_CFG_QM_TENSOR_6_DIM_0_SIZE 0xF06B60 + +#define mmTPC4_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xF06B64 + +#define mmTPC4_CFG_QM_TENSOR_6_DIM_1_SIZE 0xF06B68 + +#define mmTPC4_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xF06B6C + +#define mmTPC4_CFG_QM_TENSOR_6_DIM_2_SIZE 0xF06B70 + +#define mmTPC4_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xF06B74 + +#define mmTPC4_CFG_QM_TENSOR_6_DIM_3_SIZE 0xF06B78 + +#define mmTPC4_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xF06B7C + +#define mmTPC4_CFG_QM_TENSOR_6_DIM_4_SIZE 0xF06B80 + +#define mmTPC4_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xF06B84 + +#define mmTPC4_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xF06B88 + +#define mmTPC4_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xF06B8C + +#define mmTPC4_CFG_QM_TENSOR_7_PADDING_VALUE 0xF06B90 + +#define mmTPC4_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xF06B94 + +#define mmTPC4_CFG_QM_TENSOR_7_DIM_0_SIZE 0xF06B98 + +#define mmTPC4_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xF06B9C + +#define mmTPC4_CFG_QM_TENSOR_7_DIM_1_SIZE 0xF06BA0 + +#define mmTPC4_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xF06BA4 + +#define mmTPC4_CFG_QM_TENSOR_7_DIM_2_SIZE 0xF06BA8 + +#define mmTPC4_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xF06BAC + +#define mmTPC4_CFG_QM_TENSOR_7_DIM_3_SIZE 0xF06BB0 + +#define mmTPC4_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xF06BB4 + +#define mmTPC4_CFG_QM_TENSOR_7_DIM_4_SIZE 0xF06BB8 + +#define mmTPC4_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xF06BBC + +#define mmTPC4_CFG_QM_TENSOR_8_BASE_ADDR_LOW 0xF06BC0 + +#define mmTPC4_CFG_QM_TENSOR_8_BASE_ADDR_HIGH 0xF06BC4 + +#define mmTPC4_CFG_QM_TENSOR_8_PADDING_VALUE 0xF06BC8 + +#define mmTPC4_CFG_QM_TENSOR_8_TENSOR_CONFIG 0xF06BCC + +#define mmTPC4_CFG_QM_TENSOR_8_DIM_0_SIZE 0xF06BD0 + +#define mmTPC4_CFG_QM_TENSOR_8_DIM_0_STRIDE 0xF06BD4 + +#define mmTPC4_CFG_QM_TENSOR_8_DIM_1_SIZE 0xF06BD8 + +#define mmTPC4_CFG_QM_TENSOR_8_DIM_1_STRIDE 0xF06BDC + +#define mmTPC4_CFG_QM_TENSOR_8_DIM_2_SIZE 0xF06BE0 + +#define mmTPC4_CFG_QM_TENSOR_8_DIM_2_STRIDE 0xF06BE4 + +#define mmTPC4_CFG_QM_TENSOR_8_DIM_3_SIZE 0xF06BE8 + +#define mmTPC4_CFG_QM_TENSOR_8_DIM_3_STRIDE 0xF06BEC + +#define mmTPC4_CFG_QM_TENSOR_8_DIM_4_SIZE 0xF06BF0 + +#define mmTPC4_CFG_QM_TENSOR_8_DIM_4_STRIDE 0xF06BF4 + +#define mmTPC4_CFG_QM_TENSOR_9_BASE_ADDR_LOW 0xF06BF8 + +#define mmTPC4_CFG_QM_TENSOR_9_BASE_ADDR_HIGH 0xF06BFC + +#define mmTPC4_CFG_QM_TENSOR_9_PADDING_VALUE 0xF06C00 + +#define mmTPC4_CFG_QM_TENSOR_9_TENSOR_CONFIG 0xF06C04 + +#define mmTPC4_CFG_QM_TENSOR_9_DIM_0_SIZE 0xF06C08 + +#define mmTPC4_CFG_QM_TENSOR_9_DIM_0_STRIDE 0xF06C0C + +#define mmTPC4_CFG_QM_TENSOR_9_DIM_1_SIZE 0xF06C10 + +#define mmTPC4_CFG_QM_TENSOR_9_DIM_1_STRIDE 0xF06C14 + +#define mmTPC4_CFG_QM_TENSOR_9_DIM_2_SIZE 0xF06C18 + +#define mmTPC4_CFG_QM_TENSOR_9_DIM_2_STRIDE 0xF06C1C + +#define mmTPC4_CFG_QM_TENSOR_9_DIM_3_SIZE 0xF06C20 + +#define mmTPC4_CFG_QM_TENSOR_9_DIM_3_STRIDE 0xF06C24 + +#define mmTPC4_CFG_QM_TENSOR_9_DIM_4_SIZE 0xF06C28 + +#define mmTPC4_CFG_QM_TENSOR_9_DIM_4_STRIDE 0xF06C2C + +#define mmTPC4_CFG_QM_TENSOR_10_BASE_ADDR_LOW 0xF06C30 + +#define mmTPC4_CFG_QM_TENSOR_10_BASE_ADDR_HIGH 0xF06C34 + +#define mmTPC4_CFG_QM_TENSOR_10_PADDING_VALUE 0xF06C38 + +#define mmTPC4_CFG_QM_TENSOR_10_TENSOR_CONFIG 0xF06C3C + +#define mmTPC4_CFG_QM_TENSOR_10_DIM_0_SIZE 0xF06C40 + +#define mmTPC4_CFG_QM_TENSOR_10_DIM_0_STRIDE 0xF06C44 + +#define mmTPC4_CFG_QM_TENSOR_10_DIM_1_SIZE 0xF06C48 + +#define mmTPC4_CFG_QM_TENSOR_10_DIM_1_STRIDE 0xF06C4C + +#define mmTPC4_CFG_QM_TENSOR_10_DIM_2_SIZE 0xF06C50 + +#define mmTPC4_CFG_QM_TENSOR_10_DIM_2_STRIDE 0xF06C54 + +#define mmTPC4_CFG_QM_TENSOR_10_DIM_3_SIZE 0xF06C58 + +#define mmTPC4_CFG_QM_TENSOR_10_DIM_3_STRIDE 0xF06C5C + +#define mmTPC4_CFG_QM_TENSOR_10_DIM_4_SIZE 0xF06C60 + +#define mmTPC4_CFG_QM_TENSOR_10_DIM_4_STRIDE 0xF06C64 + +#define mmTPC4_CFG_QM_TENSOR_11_BASE_ADDR_LOW 0xF06C68 + +#define mmTPC4_CFG_QM_TENSOR_11_BASE_ADDR_HIGH 0xF06C6C + +#define mmTPC4_CFG_QM_TENSOR_11_PADDING_VALUE 0xF06C70 + +#define mmTPC4_CFG_QM_TENSOR_11_TENSOR_CONFIG 0xF06C74 + +#define mmTPC4_CFG_QM_TENSOR_11_DIM_0_SIZE 0xF06C78 + +#define mmTPC4_CFG_QM_TENSOR_11_DIM_0_STRIDE 0xF06C7C + +#define mmTPC4_CFG_QM_TENSOR_11_DIM_1_SIZE 0xF06C80 + +#define mmTPC4_CFG_QM_TENSOR_11_DIM_1_STRIDE 0xF06C84 + +#define mmTPC4_CFG_QM_TENSOR_11_DIM_2_SIZE 0xF06C88 + +#define mmTPC4_CFG_QM_TENSOR_11_DIM_2_STRIDE 0xF06C8C + +#define mmTPC4_CFG_QM_TENSOR_11_DIM_3_SIZE 0xF06C90 + +#define mmTPC4_CFG_QM_TENSOR_11_DIM_3_STRIDE 0xF06C94 + +#define mmTPC4_CFG_QM_TENSOR_11_DIM_4_SIZE 0xF06C98 + +#define mmTPC4_CFG_QM_TENSOR_11_DIM_4_STRIDE 0xF06C9C + +#define mmTPC4_CFG_QM_TENSOR_12_BASE_ADDR_LOW 0xF06CA0 + +#define mmTPC4_CFG_QM_TENSOR_12_BASE_ADDR_HIGH 0xF06CA4 + +#define mmTPC4_CFG_QM_TENSOR_12_PADDING_VALUE 0xF06CA8 + +#define mmTPC4_CFG_QM_TENSOR_12_TENSOR_CONFIG 0xF06CAC + +#define mmTPC4_CFG_QM_TENSOR_12_DIM_0_SIZE 0xF06CB0 + +#define mmTPC4_CFG_QM_TENSOR_12_DIM_0_STRIDE 0xF06CB4 + +#define mmTPC4_CFG_QM_TENSOR_12_DIM_1_SIZE 0xF06CB8 + +#define mmTPC4_CFG_QM_TENSOR_12_DIM_1_STRIDE 0xF06CBC + +#define mmTPC4_CFG_QM_TENSOR_12_DIM_2_SIZE 0xF06CC0 + +#define mmTPC4_CFG_QM_TENSOR_12_DIM_2_STRIDE 0xF06CC4 + +#define mmTPC4_CFG_QM_TENSOR_12_DIM_3_SIZE 0xF06CC8 + +#define mmTPC4_CFG_QM_TENSOR_12_DIM_3_STRIDE 0xF06CCC + +#define mmTPC4_CFG_QM_TENSOR_12_DIM_4_SIZE 0xF06CD0 + +#define mmTPC4_CFG_QM_TENSOR_12_DIM_4_STRIDE 0xF06CD4 + +#define mmTPC4_CFG_QM_TENSOR_13_BASE_ADDR_LOW 0xF06CD8 + +#define mmTPC4_CFG_QM_TENSOR_13_BASE_ADDR_HIGH 0xF06CDC + +#define mmTPC4_CFG_QM_TENSOR_13_PADDING_VALUE 0xF06CE0 + +#define mmTPC4_CFG_QM_TENSOR_13_TENSOR_CONFIG 0xF06CE4 + +#define mmTPC4_CFG_QM_TENSOR_13_DIM_0_SIZE 0xF06CE8 + +#define mmTPC4_CFG_QM_TENSOR_13_DIM_0_STRIDE 0xF06CEC + +#define mmTPC4_CFG_QM_TENSOR_13_DIM_1_SIZE 0xF06CF0 + +#define mmTPC4_CFG_QM_TENSOR_13_DIM_1_STRIDE 0xF06CF4 + +#define mmTPC4_CFG_QM_TENSOR_13_DIM_2_SIZE 0xF06CF8 + +#define mmTPC4_CFG_QM_TENSOR_13_DIM_2_STRIDE 0xF06CFC + +#define mmTPC4_CFG_QM_TENSOR_13_DIM_3_SIZE 0xF06D00 + +#define mmTPC4_CFG_QM_TENSOR_13_DIM_3_STRIDE 0xF06D04 + +#define mmTPC4_CFG_QM_TENSOR_13_DIM_4_SIZE 0xF06D08 + +#define mmTPC4_CFG_QM_TENSOR_13_DIM_4_STRIDE 0xF06D0C + +#define mmTPC4_CFG_QM_TENSOR_14_BASE_ADDR_LOW 0xF06D10 + +#define mmTPC4_CFG_QM_TENSOR_14_BASE_ADDR_HIGH 0xF06D14 + +#define mmTPC4_CFG_QM_TENSOR_14_PADDING_VALUE 0xF06D18 + +#define mmTPC4_CFG_QM_TENSOR_14_TENSOR_CONFIG 0xF06D1C + +#define mmTPC4_CFG_QM_TENSOR_14_DIM_0_SIZE 0xF06D20 + +#define mmTPC4_CFG_QM_TENSOR_14_DIM_0_STRIDE 0xF06D24 + +#define mmTPC4_CFG_QM_TENSOR_14_DIM_1_SIZE 0xF06D28 + +#define mmTPC4_CFG_QM_TENSOR_14_DIM_1_STRIDE 0xF06D2C + +#define mmTPC4_CFG_QM_TENSOR_14_DIM_2_SIZE 0xF06D30 + +#define mmTPC4_CFG_QM_TENSOR_14_DIM_2_STRIDE 0xF06D34 + +#define mmTPC4_CFG_QM_TENSOR_14_DIM_3_SIZE 0xF06D38 + +#define mmTPC4_CFG_QM_TENSOR_14_DIM_3_STRIDE 0xF06D3C + +#define mmTPC4_CFG_QM_TENSOR_14_DIM_4_SIZE 0xF06D40 + +#define mmTPC4_CFG_QM_TENSOR_14_DIM_4_STRIDE 0xF06D44 + +#define mmTPC4_CFG_QM_TENSOR_15_BASE_ADDR_LOW 0xF06D48 + +#define mmTPC4_CFG_QM_TENSOR_15_BASE_ADDR_HIGH 0xF06D4C + +#define mmTPC4_CFG_QM_TENSOR_15_PADDING_VALUE 0xF06D50 + +#define mmTPC4_CFG_QM_TENSOR_15_TENSOR_CONFIG 0xF06D54 + +#define mmTPC4_CFG_QM_TENSOR_15_DIM_0_SIZE 0xF06D58 + +#define mmTPC4_CFG_QM_TENSOR_15_DIM_0_STRIDE 0xF06D5C + +#define mmTPC4_CFG_QM_TENSOR_15_DIM_1_SIZE 0xF06D60 + +#define mmTPC4_CFG_QM_TENSOR_15_DIM_1_STRIDE 0xF06D64 + +#define mmTPC4_CFG_QM_TENSOR_15_DIM_2_SIZE 0xF06D68 + +#define mmTPC4_CFG_QM_TENSOR_15_DIM_2_STRIDE 0xF06D6C + +#define mmTPC4_CFG_QM_TENSOR_15_DIM_3_SIZE 0xF06D70 + +#define mmTPC4_CFG_QM_TENSOR_15_DIM_3_STRIDE 0xF06D74 + +#define mmTPC4_CFG_QM_TENSOR_15_DIM_4_SIZE 0xF06D78 + +#define mmTPC4_CFG_QM_TENSOR_15_DIM_4_STRIDE 0xF06D7C + +#define mmTPC4_CFG_QM_SYNC_OBJECT_MESSAGE 0xF06D80 + +#define mmTPC4_CFG_QM_SYNC_OBJECT_ADDR 0xF06D84 + +#define mmTPC4_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xF06D88 + +#define mmTPC4_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xF06D8C + +#define mmTPC4_CFG_QM_TID_BASE_DIM_0 0xF06D90 + +#define mmTPC4_CFG_QM_TID_SIZE_DIM_0 0xF06D94 + +#define mmTPC4_CFG_QM_TID_BASE_DIM_1 0xF06D98 + +#define mmTPC4_CFG_QM_TID_SIZE_DIM_1 0xF06D9C + +#define mmTPC4_CFG_QM_TID_BASE_DIM_2 0xF06DA0 + +#define mmTPC4_CFG_QM_TID_SIZE_DIM_2 0xF06DA4 + +#define mmTPC4_CFG_QM_TID_BASE_DIM_3 0xF06DA8 + +#define mmTPC4_CFG_QM_TID_SIZE_DIM_3 0xF06DAC + +#define mmTPC4_CFG_QM_TID_BASE_DIM_4 0xF06DB0 + +#define mmTPC4_CFG_QM_TID_SIZE_DIM_4 0xF06DB4 + +#define mmTPC4_CFG_QM_KERNEL_CONFIG 0xF06DB8 + +#define mmTPC4_CFG_QM_KERNEL_ID 0xF06DBC + +#define mmTPC4_CFG_QM_SRF_0 0xF06DC0 + +#define mmTPC4_CFG_QM_SRF_1 0xF06DC4 + +#define mmTPC4_CFG_QM_SRF_2 0xF06DC8 + +#define mmTPC4_CFG_QM_SRF_3 0xF06DCC + +#define mmTPC4_CFG_QM_SRF_4 0xF06DD0 + +#define mmTPC4_CFG_QM_SRF_5 0xF06DD4 + +#define mmTPC4_CFG_QM_SRF_6 0xF06DD8 + +#define mmTPC4_CFG_QM_SRF_7 0xF06DDC + +#define mmTPC4_CFG_QM_SRF_8 0xF06DE0 + +#define mmTPC4_CFG_QM_SRF_9 0xF06DE4 + +#define mmTPC4_CFG_QM_SRF_10 0xF06DE8 + +#define mmTPC4_CFG_QM_SRF_11 0xF06DEC + +#define mmTPC4_CFG_QM_SRF_12 0xF06DF0 + +#define mmTPC4_CFG_QM_SRF_13 0xF06DF4 + +#define mmTPC4_CFG_QM_SRF_14 0xF06DF8 + +#define mmTPC4_CFG_QM_SRF_15 0xF06DFC + +#define mmTPC4_CFG_QM_SRF_16 0xF06E00 + +#define mmTPC4_CFG_QM_SRF_17 0xF06E04 + +#define mmTPC4_CFG_QM_SRF_18 0xF06E08 + +#define mmTPC4_CFG_QM_SRF_19 0xF06E0C + +#define mmTPC4_CFG_QM_SRF_20 0xF06E10 + +#define mmTPC4_CFG_QM_SRF_21 0xF06E14 + +#define mmTPC4_CFG_QM_SRF_22 0xF06E18 + +#define mmTPC4_CFG_QM_SRF_23 0xF06E1C + +#define mmTPC4_CFG_QM_SRF_24 0xF06E20 + +#define mmTPC4_CFG_QM_SRF_25 0xF06E24 + +#define mmTPC4_CFG_QM_SRF_26 0xF06E28 + +#define mmTPC4_CFG_QM_SRF_27 0xF06E2C + +#define mmTPC4_CFG_QM_SRF_28 0xF06E30 + +#define mmTPC4_CFG_QM_SRF_29 0xF06E34 + +#define mmTPC4_CFG_QM_SRF_30 0xF06E38 + +#define mmTPC4_CFG_QM_SRF_31 0xF06E3C + +#endif /* ASIC_REG_TPC4_CFG_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc4_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc4_qm_regs.h new file mode 100644 index 000000000..80e63402f --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc4_qm_regs.h @@ -0,0 +1,834 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC4_QM_REGS_H_ +#define ASIC_REG_TPC4_QM_REGS_H_ + +/* + ***************************************** + * TPC4_QM (Prototype: QMAN) + ***************************************** + */ + +#define mmTPC4_QM_GLBL_CFG0 0xF08000 + +#define mmTPC4_QM_GLBL_CFG1 0xF08004 + +#define mmTPC4_QM_GLBL_PROT 0xF08008 + +#define mmTPC4_QM_GLBL_ERR_CFG 0xF0800C + +#define mmTPC4_QM_GLBL_SECURE_PROPS_0 0xF08010 + +#define mmTPC4_QM_GLBL_SECURE_PROPS_1 0xF08014 + +#define mmTPC4_QM_GLBL_SECURE_PROPS_2 0xF08018 + +#define mmTPC4_QM_GLBL_SECURE_PROPS_3 0xF0801C + +#define mmTPC4_QM_GLBL_SECURE_PROPS_4 0xF08020 + +#define mmTPC4_QM_GLBL_NON_SECURE_PROPS_0 0xF08024 + +#define mmTPC4_QM_GLBL_NON_SECURE_PROPS_1 0xF08028 + +#define mmTPC4_QM_GLBL_NON_SECURE_PROPS_2 0xF0802C + +#define mmTPC4_QM_GLBL_NON_SECURE_PROPS_3 0xF08030 + +#define mmTPC4_QM_GLBL_NON_SECURE_PROPS_4 0xF08034 + +#define mmTPC4_QM_GLBL_STS0 0xF08038 + +#define mmTPC4_QM_GLBL_STS1_0 0xF08040 + +#define mmTPC4_QM_GLBL_STS1_1 0xF08044 + +#define mmTPC4_QM_GLBL_STS1_2 0xF08048 + +#define mmTPC4_QM_GLBL_STS1_3 0xF0804C + +#define mmTPC4_QM_GLBL_STS1_4 0xF08050 + +#define mmTPC4_QM_GLBL_MSG_EN_0 0xF08054 + +#define mmTPC4_QM_GLBL_MSG_EN_1 0xF08058 + +#define mmTPC4_QM_GLBL_MSG_EN_2 0xF0805C + +#define mmTPC4_QM_GLBL_MSG_EN_3 0xF08060 + +#define mmTPC4_QM_GLBL_MSG_EN_4 0xF08068 + +#define mmTPC4_QM_PQ_BASE_LO_0 0xF08070 + +#define mmTPC4_QM_PQ_BASE_LO_1 0xF08074 + +#define mmTPC4_QM_PQ_BASE_LO_2 0xF08078 + +#define mmTPC4_QM_PQ_BASE_LO_3 0xF0807C + +#define mmTPC4_QM_PQ_BASE_HI_0 0xF08080 + +#define mmTPC4_QM_PQ_BASE_HI_1 0xF08084 + +#define mmTPC4_QM_PQ_BASE_HI_2 0xF08088 + +#define mmTPC4_QM_PQ_BASE_HI_3 0xF0808C + +#define mmTPC4_QM_PQ_SIZE_0 0xF08090 + +#define mmTPC4_QM_PQ_SIZE_1 0xF08094 + +#define mmTPC4_QM_PQ_SIZE_2 0xF08098 + +#define mmTPC4_QM_PQ_SIZE_3 0xF0809C + +#define mmTPC4_QM_PQ_PI_0 0xF080A0 + +#define mmTPC4_QM_PQ_PI_1 0xF080A4 + +#define mmTPC4_QM_PQ_PI_2 0xF080A8 + +#define mmTPC4_QM_PQ_PI_3 0xF080AC + +#define mmTPC4_QM_PQ_CI_0 0xF080B0 + +#define mmTPC4_QM_PQ_CI_1 0xF080B4 + +#define mmTPC4_QM_PQ_CI_2 0xF080B8 + +#define mmTPC4_QM_PQ_CI_3 0xF080BC + +#define mmTPC4_QM_PQ_CFG0_0 0xF080C0 + +#define mmTPC4_QM_PQ_CFG0_1 0xF080C4 + +#define mmTPC4_QM_PQ_CFG0_2 0xF080C8 + +#define mmTPC4_QM_PQ_CFG0_3 0xF080CC + +#define mmTPC4_QM_PQ_CFG1_0 0xF080D0 + +#define mmTPC4_QM_PQ_CFG1_1 0xF080D4 + +#define mmTPC4_QM_PQ_CFG1_2 0xF080D8 + +#define mmTPC4_QM_PQ_CFG1_3 0xF080DC + +#define mmTPC4_QM_PQ_ARUSER_31_11_0 0xF080E0 + +#define mmTPC4_QM_PQ_ARUSER_31_11_1 0xF080E4 + +#define mmTPC4_QM_PQ_ARUSER_31_11_2 0xF080E8 + +#define mmTPC4_QM_PQ_ARUSER_31_11_3 0xF080EC + +#define mmTPC4_QM_PQ_STS0_0 0xF080F0 + +#define mmTPC4_QM_PQ_STS0_1 0xF080F4 + +#define mmTPC4_QM_PQ_STS0_2 0xF080F8 + +#define mmTPC4_QM_PQ_STS0_3 0xF080FC + +#define mmTPC4_QM_PQ_STS1_0 0xF08100 + +#define mmTPC4_QM_PQ_STS1_1 0xF08104 + +#define mmTPC4_QM_PQ_STS1_2 0xF08108 + +#define mmTPC4_QM_PQ_STS1_3 0xF0810C + +#define mmTPC4_QM_CQ_CFG0_0 0xF08110 + +#define mmTPC4_QM_CQ_CFG0_1 0xF08114 + +#define mmTPC4_QM_CQ_CFG0_2 0xF08118 + +#define mmTPC4_QM_CQ_CFG0_3 0xF0811C + +#define mmTPC4_QM_CQ_CFG0_4 0xF08120 + +#define mmTPC4_QM_CQ_CFG1_0 0xF08124 + +#define mmTPC4_QM_CQ_CFG1_1 0xF08128 + +#define mmTPC4_QM_CQ_CFG1_2 0xF0812C + +#define mmTPC4_QM_CQ_CFG1_3 0xF08130 + +#define mmTPC4_QM_CQ_CFG1_4 0xF08134 + +#define mmTPC4_QM_CQ_ARUSER_31_11_0 0xF08138 + +#define mmTPC4_QM_CQ_ARUSER_31_11_1 0xF0813C + +#define mmTPC4_QM_CQ_ARUSER_31_11_2 0xF08140 + +#define mmTPC4_QM_CQ_ARUSER_31_11_3 0xF08144 + +#define mmTPC4_QM_CQ_ARUSER_31_11_4 0xF08148 + +#define mmTPC4_QM_CQ_STS0_0 0xF0814C + +#define mmTPC4_QM_CQ_STS0_1 0xF08150 + +#define mmTPC4_QM_CQ_STS0_2 0xF08154 + +#define mmTPC4_QM_CQ_STS0_3 0xF08158 + +#define mmTPC4_QM_CQ_STS0_4 0xF0815C + +#define mmTPC4_QM_CQ_STS1_0 0xF08160 + +#define mmTPC4_QM_CQ_STS1_1 0xF08164 + +#define mmTPC4_QM_CQ_STS1_2 0xF08168 + +#define mmTPC4_QM_CQ_STS1_3 0xF0816C + +#define mmTPC4_QM_CQ_STS1_4 0xF08170 + +#define mmTPC4_QM_CQ_PTR_LO_0 0xF08174 + +#define mmTPC4_QM_CQ_PTR_HI_0 0xF08178 + +#define mmTPC4_QM_CQ_TSIZE_0 0xF0817C + +#define mmTPC4_QM_CQ_CTL_0 0xF08180 + +#define mmTPC4_QM_CQ_PTR_LO_1 0xF08184 + +#define mmTPC4_QM_CQ_PTR_HI_1 0xF08188 + +#define mmTPC4_QM_CQ_TSIZE_1 0xF0818C + +#define mmTPC4_QM_CQ_CTL_1 0xF08190 + +#define mmTPC4_QM_CQ_PTR_LO_2 0xF08194 + +#define mmTPC4_QM_CQ_PTR_HI_2 0xF08198 + +#define mmTPC4_QM_CQ_TSIZE_2 0xF0819C + +#define mmTPC4_QM_CQ_CTL_2 0xF081A0 + +#define mmTPC4_QM_CQ_PTR_LO_3 0xF081A4 + +#define mmTPC4_QM_CQ_PTR_HI_3 0xF081A8 + +#define mmTPC4_QM_CQ_TSIZE_3 0xF081AC + +#define mmTPC4_QM_CQ_CTL_3 0xF081B0 + +#define mmTPC4_QM_CQ_PTR_LO_4 0xF081B4 + +#define mmTPC4_QM_CQ_PTR_HI_4 0xF081B8 + +#define mmTPC4_QM_CQ_TSIZE_4 0xF081BC + +#define mmTPC4_QM_CQ_CTL_4 0xF081C0 + +#define mmTPC4_QM_CQ_PTR_LO_STS_0 0xF081C4 + +#define mmTPC4_QM_CQ_PTR_LO_STS_1 0xF081C8 + +#define mmTPC4_QM_CQ_PTR_LO_STS_2 0xF081CC + +#define mmTPC4_QM_CQ_PTR_LO_STS_3 0xF081D0 + +#define mmTPC4_QM_CQ_PTR_LO_STS_4 0xF081D4 + +#define mmTPC4_QM_CQ_PTR_HI_STS_0 0xF081D8 + +#define mmTPC4_QM_CQ_PTR_HI_STS_1 0xF081DC + +#define mmTPC4_QM_CQ_PTR_HI_STS_2 0xF081E0 + +#define mmTPC4_QM_CQ_PTR_HI_STS_3 0xF081E4 + +#define mmTPC4_QM_CQ_PTR_HI_STS_4 0xF081E8 + +#define mmTPC4_QM_CQ_TSIZE_STS_0 0xF081EC + +#define mmTPC4_QM_CQ_TSIZE_STS_1 0xF081F0 + +#define mmTPC4_QM_CQ_TSIZE_STS_2 0xF081F4 + +#define mmTPC4_QM_CQ_TSIZE_STS_3 0xF081F8 + +#define mmTPC4_QM_CQ_TSIZE_STS_4 0xF081FC + +#define mmTPC4_QM_CQ_CTL_STS_0 0xF08200 + +#define mmTPC4_QM_CQ_CTL_STS_1 0xF08204 + +#define mmTPC4_QM_CQ_CTL_STS_2 0xF08208 + +#define mmTPC4_QM_CQ_CTL_STS_3 0xF0820C + +#define mmTPC4_QM_CQ_CTL_STS_4 0xF08210 + +#define mmTPC4_QM_CQ_IFIFO_CNT_0 0xF08214 + +#define mmTPC4_QM_CQ_IFIFO_CNT_1 0xF08218 + +#define mmTPC4_QM_CQ_IFIFO_CNT_2 0xF0821C + +#define mmTPC4_QM_CQ_IFIFO_CNT_3 0xF08220 + +#define mmTPC4_QM_CQ_IFIFO_CNT_4 0xF08224 + +#define mmTPC4_QM_CP_MSG_BASE0_ADDR_LO_0 0xF08228 + +#define mmTPC4_QM_CP_MSG_BASE0_ADDR_LO_1 0xF0822C + +#define mmTPC4_QM_CP_MSG_BASE0_ADDR_LO_2 0xF08230 + +#define mmTPC4_QM_CP_MSG_BASE0_ADDR_LO_3 0xF08234 + +#define mmTPC4_QM_CP_MSG_BASE0_ADDR_LO_4 0xF08238 + +#define mmTPC4_QM_CP_MSG_BASE0_ADDR_HI_0 0xF0823C + +#define mmTPC4_QM_CP_MSG_BASE0_ADDR_HI_1 0xF08240 + +#define mmTPC4_QM_CP_MSG_BASE0_ADDR_HI_2 0xF08244 + +#define mmTPC4_QM_CP_MSG_BASE0_ADDR_HI_3 0xF08248 + +#define mmTPC4_QM_CP_MSG_BASE0_ADDR_HI_4 0xF0824C + +#define mmTPC4_QM_CP_MSG_BASE1_ADDR_LO_0 0xF08250 + +#define mmTPC4_QM_CP_MSG_BASE1_ADDR_LO_1 0xF08254 + +#define mmTPC4_QM_CP_MSG_BASE1_ADDR_LO_2 0xF08258 + +#define mmTPC4_QM_CP_MSG_BASE1_ADDR_LO_3 0xF0825C + +#define mmTPC4_QM_CP_MSG_BASE1_ADDR_LO_4 0xF08260 + +#define mmTPC4_QM_CP_MSG_BASE1_ADDR_HI_0 0xF08264 + +#define mmTPC4_QM_CP_MSG_BASE1_ADDR_HI_1 0xF08268 + +#define mmTPC4_QM_CP_MSG_BASE1_ADDR_HI_2 0xF0826C + +#define mmTPC4_QM_CP_MSG_BASE1_ADDR_HI_3 0xF08270 + +#define mmTPC4_QM_CP_MSG_BASE1_ADDR_HI_4 0xF08274 + +#define mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_0 0xF08278 + +#define mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_1 0xF0827C + +#define mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_2 0xF08280 + +#define mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_3 0xF08284 + +#define mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_4 0xF08288 + +#define mmTPC4_QM_CP_MSG_BASE2_ADDR_HI_0 0xF0828C + +#define mmTPC4_QM_CP_MSG_BASE2_ADDR_HI_1 0xF08290 + +#define mmTPC4_QM_CP_MSG_BASE2_ADDR_HI_2 0xF08294 + +#define mmTPC4_QM_CP_MSG_BASE2_ADDR_HI_3 0xF08298 + +#define mmTPC4_QM_CP_MSG_BASE2_ADDR_HI_4 0xF0829C + +#define mmTPC4_QM_CP_MSG_BASE3_ADDR_LO_0 0xF082A0 + +#define mmTPC4_QM_CP_MSG_BASE3_ADDR_LO_1 0xF082A4 + +#define mmTPC4_QM_CP_MSG_BASE3_ADDR_LO_2 0xF082A8 + +#define mmTPC4_QM_CP_MSG_BASE3_ADDR_LO_3 0xF082AC + +#define mmTPC4_QM_CP_MSG_BASE3_ADDR_LO_4 0xF082B0 + +#define mmTPC4_QM_CP_MSG_BASE3_ADDR_HI_0 0xF082B4 + +#define mmTPC4_QM_CP_MSG_BASE3_ADDR_HI_1 0xF082B8 + +#define mmTPC4_QM_CP_MSG_BASE3_ADDR_HI_2 0xF082BC + +#define mmTPC4_QM_CP_MSG_BASE3_ADDR_HI_3 0xF082C0 + +#define mmTPC4_QM_CP_MSG_BASE3_ADDR_HI_4 0xF082C4 + +#define mmTPC4_QM_CP_LDMA_TSIZE_OFFSET_0 0xF082C8 + +#define mmTPC4_QM_CP_LDMA_TSIZE_OFFSET_1 0xF082CC + +#define mmTPC4_QM_CP_LDMA_TSIZE_OFFSET_2 0xF082D0 + +#define mmTPC4_QM_CP_LDMA_TSIZE_OFFSET_3 0xF082D4 + +#define mmTPC4_QM_CP_LDMA_TSIZE_OFFSET_4 0xF082D8 + +#define mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0xF082E0 + +#define mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0xF082E4 + +#define mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0xF082E8 + +#define mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0xF082EC + +#define mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0xF082F0 + +#define mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0xF082F4 + +#define mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0xF082F8 + +#define mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0xF082FC + +#define mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0xF08300 + +#define mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0xF08304 + +#define mmTPC4_QM_CP_FENCE0_RDATA_0 0xF08308 + +#define mmTPC4_QM_CP_FENCE0_RDATA_1 0xF0830C + +#define mmTPC4_QM_CP_FENCE0_RDATA_2 0xF08310 + +#define mmTPC4_QM_CP_FENCE0_RDATA_3 0xF08314 + +#define mmTPC4_QM_CP_FENCE0_RDATA_4 0xF08318 + +#define mmTPC4_QM_CP_FENCE1_RDATA_0 0xF0831C + +#define mmTPC4_QM_CP_FENCE1_RDATA_1 0xF08320 + +#define mmTPC4_QM_CP_FENCE1_RDATA_2 0xF08324 + +#define mmTPC4_QM_CP_FENCE1_RDATA_3 0xF08328 + +#define mmTPC4_QM_CP_FENCE1_RDATA_4 0xF0832C + +#define mmTPC4_QM_CP_FENCE2_RDATA_0 0xF08330 + +#define mmTPC4_QM_CP_FENCE2_RDATA_1 0xF08334 + +#define mmTPC4_QM_CP_FENCE2_RDATA_2 0xF08338 + +#define mmTPC4_QM_CP_FENCE2_RDATA_3 0xF0833C + +#define mmTPC4_QM_CP_FENCE2_RDATA_4 0xF08340 + +#define mmTPC4_QM_CP_FENCE3_RDATA_0 0xF08344 + +#define mmTPC4_QM_CP_FENCE3_RDATA_1 0xF08348 + +#define mmTPC4_QM_CP_FENCE3_RDATA_2 0xF0834C + +#define mmTPC4_QM_CP_FENCE3_RDATA_3 0xF08350 + +#define mmTPC4_QM_CP_FENCE3_RDATA_4 0xF08354 + +#define mmTPC4_QM_CP_FENCE0_CNT_0 0xF08358 + +#define mmTPC4_QM_CP_FENCE0_CNT_1 0xF0835C + +#define mmTPC4_QM_CP_FENCE0_CNT_2 0xF08360 + +#define mmTPC4_QM_CP_FENCE0_CNT_3 0xF08364 + +#define mmTPC4_QM_CP_FENCE0_CNT_4 0xF08368 + +#define mmTPC4_QM_CP_FENCE1_CNT_0 0xF0836C + +#define mmTPC4_QM_CP_FENCE1_CNT_1 0xF08370 + +#define mmTPC4_QM_CP_FENCE1_CNT_2 0xF08374 + +#define mmTPC4_QM_CP_FENCE1_CNT_3 0xF08378 + +#define mmTPC4_QM_CP_FENCE1_CNT_4 0xF0837C + +#define mmTPC4_QM_CP_FENCE2_CNT_0 0xF08380 + +#define mmTPC4_QM_CP_FENCE2_CNT_1 0xF08384 + +#define mmTPC4_QM_CP_FENCE2_CNT_2 0xF08388 + +#define mmTPC4_QM_CP_FENCE2_CNT_3 0xF0838C + +#define mmTPC4_QM_CP_FENCE2_CNT_4 0xF08390 + +#define mmTPC4_QM_CP_FENCE3_CNT_0 0xF08394 + +#define mmTPC4_QM_CP_FENCE3_CNT_1 0xF08398 + +#define mmTPC4_QM_CP_FENCE3_CNT_2 0xF0839C + +#define mmTPC4_QM_CP_FENCE3_CNT_3 0xF083A0 + +#define mmTPC4_QM_CP_FENCE3_CNT_4 0xF083A4 + +#define mmTPC4_QM_CP_STS_0 0xF083A8 + +#define mmTPC4_QM_CP_STS_1 0xF083AC + +#define mmTPC4_QM_CP_STS_2 0xF083B0 + +#define mmTPC4_QM_CP_STS_3 0xF083B4 + +#define mmTPC4_QM_CP_STS_4 0xF083B8 + +#define mmTPC4_QM_CP_CURRENT_INST_LO_0 0xF083BC + +#define mmTPC4_QM_CP_CURRENT_INST_LO_1 0xF083C0 + +#define mmTPC4_QM_CP_CURRENT_INST_LO_2 0xF083C4 + +#define mmTPC4_QM_CP_CURRENT_INST_LO_3 0xF083C8 + +#define mmTPC4_QM_CP_CURRENT_INST_LO_4 0xF083CC + +#define mmTPC4_QM_CP_CURRENT_INST_HI_0 0xF083D0 + +#define mmTPC4_QM_CP_CURRENT_INST_HI_1 0xF083D4 + +#define mmTPC4_QM_CP_CURRENT_INST_HI_2 0xF083D8 + +#define mmTPC4_QM_CP_CURRENT_INST_HI_3 0xF083DC + +#define mmTPC4_QM_CP_CURRENT_INST_HI_4 0xF083E0 + +#define mmTPC4_QM_CP_BARRIER_CFG_0 0xF083F4 + +#define mmTPC4_QM_CP_BARRIER_CFG_1 0xF083F8 + +#define mmTPC4_QM_CP_BARRIER_CFG_2 0xF083FC + +#define mmTPC4_QM_CP_BARRIER_CFG_3 0xF08400 + +#define mmTPC4_QM_CP_BARRIER_CFG_4 0xF08404 + +#define mmTPC4_QM_CP_DBG_0_0 0xF08408 + +#define mmTPC4_QM_CP_DBG_0_1 0xF0840C + +#define mmTPC4_QM_CP_DBG_0_2 0xF08410 + +#define mmTPC4_QM_CP_DBG_0_3 0xF08414 + +#define mmTPC4_QM_CP_DBG_0_4 0xF08418 + +#define mmTPC4_QM_CP_ARUSER_31_11_0 0xF0841C + +#define mmTPC4_QM_CP_ARUSER_31_11_1 0xF08420 + +#define mmTPC4_QM_CP_ARUSER_31_11_2 0xF08424 + +#define mmTPC4_QM_CP_ARUSER_31_11_3 0xF08428 + +#define mmTPC4_QM_CP_ARUSER_31_11_4 0xF0842C + +#define mmTPC4_QM_CP_AWUSER_31_11_0 0xF08430 + +#define mmTPC4_QM_CP_AWUSER_31_11_1 0xF08434 + +#define mmTPC4_QM_CP_AWUSER_31_11_2 0xF08438 + +#define mmTPC4_QM_CP_AWUSER_31_11_3 0xF0843C + +#define mmTPC4_QM_CP_AWUSER_31_11_4 0xF08440 + +#define mmTPC4_QM_ARB_CFG_0 0xF08A00 + +#define mmTPC4_QM_ARB_CHOISE_Q_PUSH 0xF08A04 + +#define mmTPC4_QM_ARB_WRR_WEIGHT_0 0xF08A08 + +#define mmTPC4_QM_ARB_WRR_WEIGHT_1 0xF08A0C + +#define mmTPC4_QM_ARB_WRR_WEIGHT_2 0xF08A10 + +#define mmTPC4_QM_ARB_WRR_WEIGHT_3 0xF08A14 + +#define mmTPC4_QM_ARB_CFG_1 0xF08A18 + +#define mmTPC4_QM_ARB_MST_AVAIL_CRED_0 0xF08A20 + +#define mmTPC4_QM_ARB_MST_AVAIL_CRED_1 0xF08A24 + +#define mmTPC4_QM_ARB_MST_AVAIL_CRED_2 0xF08A28 + +#define mmTPC4_QM_ARB_MST_AVAIL_CRED_3 0xF08A2C + +#define mmTPC4_QM_ARB_MST_AVAIL_CRED_4 0xF08A30 + +#define mmTPC4_QM_ARB_MST_AVAIL_CRED_5 0xF08A34 + +#define mmTPC4_QM_ARB_MST_AVAIL_CRED_6 0xF08A38 + +#define mmTPC4_QM_ARB_MST_AVAIL_CRED_7 0xF08A3C + +#define mmTPC4_QM_ARB_MST_AVAIL_CRED_8 0xF08A40 + +#define mmTPC4_QM_ARB_MST_AVAIL_CRED_9 0xF08A44 + +#define mmTPC4_QM_ARB_MST_AVAIL_CRED_10 0xF08A48 + +#define mmTPC4_QM_ARB_MST_AVAIL_CRED_11 0xF08A4C + +#define mmTPC4_QM_ARB_MST_AVAIL_CRED_12 0xF08A50 + +#define mmTPC4_QM_ARB_MST_AVAIL_CRED_13 0xF08A54 + +#define mmTPC4_QM_ARB_MST_AVAIL_CRED_14 0xF08A58 + +#define mmTPC4_QM_ARB_MST_AVAIL_CRED_15 0xF08A5C + +#define mmTPC4_QM_ARB_MST_AVAIL_CRED_16 0xF08A60 + +#define mmTPC4_QM_ARB_MST_AVAIL_CRED_17 0xF08A64 + +#define mmTPC4_QM_ARB_MST_AVAIL_CRED_18 0xF08A68 + +#define mmTPC4_QM_ARB_MST_AVAIL_CRED_19 0xF08A6C + +#define mmTPC4_QM_ARB_MST_AVAIL_CRED_20 0xF08A70 + +#define mmTPC4_QM_ARB_MST_AVAIL_CRED_21 0xF08A74 + +#define mmTPC4_QM_ARB_MST_AVAIL_CRED_22 0xF08A78 + +#define mmTPC4_QM_ARB_MST_AVAIL_CRED_23 0xF08A7C + +#define mmTPC4_QM_ARB_MST_AVAIL_CRED_24 0xF08A80 + +#define mmTPC4_QM_ARB_MST_AVAIL_CRED_25 0xF08A84 + +#define mmTPC4_QM_ARB_MST_AVAIL_CRED_26 0xF08A88 + +#define mmTPC4_QM_ARB_MST_AVAIL_CRED_27 0xF08A8C + +#define mmTPC4_QM_ARB_MST_AVAIL_CRED_28 0xF08A90 + +#define mmTPC4_QM_ARB_MST_AVAIL_CRED_29 0xF08A94 + +#define mmTPC4_QM_ARB_MST_AVAIL_CRED_30 0xF08A98 + +#define mmTPC4_QM_ARB_MST_AVAIL_CRED_31 0xF08A9C + +#define mmTPC4_QM_ARB_MST_CRED_INC 0xF08AA0 + +#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_0 0xF08AA4 + +#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_1 0xF08AA8 + +#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_2 0xF08AAC + +#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_3 0xF08AB0 + +#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_4 0xF08AB4 + +#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_5 0xF08AB8 + +#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_6 0xF08ABC + +#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_7 0xF08AC0 + +#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_8 0xF08AC4 + +#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_9 0xF08AC8 + +#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_10 0xF08ACC + +#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_11 0xF08AD0 + +#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_12 0xF08AD4 + +#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_13 0xF08AD8 + +#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_14 0xF08ADC + +#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_15 0xF08AE0 + +#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_16 0xF08AE4 + +#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_17 0xF08AE8 + +#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_18 0xF08AEC + +#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_19 0xF08AF0 + +#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_20 0xF08AF4 + +#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_21 0xF08AF8 + +#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_22 0xF08AFC + +#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_23 0xF08B00 + +#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_24 0xF08B04 + +#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_25 0xF08B08 + +#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_26 0xF08B0C + +#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_27 0xF08B10 + +#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_28 0xF08B14 + +#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_29 0xF08B18 + +#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_30 0xF08B1C + +#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_31 0xF08B20 + +#define mmTPC4_QM_ARB_SLV_MASTER_INC_CRED_OFST 0xF08B28 + +#define mmTPC4_QM_ARB_MST_SLAVE_EN 0xF08B2C + +#define mmTPC4_QM_ARB_MST_QUIET_PER 0xF08B34 + +#define mmTPC4_QM_ARB_SLV_CHOISE_WDT 0xF08B38 + +#define mmTPC4_QM_ARB_SLV_ID 0xF08B3C + +#define mmTPC4_QM_ARB_MSG_MAX_INFLIGHT 0xF08B44 + +#define mmTPC4_QM_ARB_MSG_AWUSER_31_11 0xF08B48 + +#define mmTPC4_QM_ARB_MSG_AWUSER_SEC_PROP 0xF08B4C + +#define mmTPC4_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0xF08B50 + +#define mmTPC4_QM_ARB_BASE_LO 0xF08B54 + +#define mmTPC4_QM_ARB_BASE_HI 0xF08B58 + +#define mmTPC4_QM_ARB_STATE_STS 0xF08B80 + +#define mmTPC4_QM_ARB_CHOISE_FULLNESS_STS 0xF08B84 + +#define mmTPC4_QM_ARB_MSG_STS 0xF08B88 + +#define mmTPC4_QM_ARB_SLV_CHOISE_Q_HEAD 0xF08B8C + +#define mmTPC4_QM_ARB_ERR_CAUSE 0xF08B9C + +#define mmTPC4_QM_ARB_ERR_MSG_EN 0xF08BA0 + +#define mmTPC4_QM_ARB_ERR_STS_DRP 0xF08BA8 + +#define mmTPC4_QM_ARB_MST_CRED_STS_0 0xF08BB0 + +#define mmTPC4_QM_ARB_MST_CRED_STS_1 0xF08BB4 + +#define mmTPC4_QM_ARB_MST_CRED_STS_2 0xF08BB8 + +#define mmTPC4_QM_ARB_MST_CRED_STS_3 0xF08BBC + +#define mmTPC4_QM_ARB_MST_CRED_STS_4 0xF08BC0 + +#define mmTPC4_QM_ARB_MST_CRED_STS_5 0xF08BC4 + +#define mmTPC4_QM_ARB_MST_CRED_STS_6 0xF08BC8 + +#define mmTPC4_QM_ARB_MST_CRED_STS_7 0xF08BCC + +#define mmTPC4_QM_ARB_MST_CRED_STS_8 0xF08BD0 + +#define mmTPC4_QM_ARB_MST_CRED_STS_9 0xF08BD4 + +#define mmTPC4_QM_ARB_MST_CRED_STS_10 0xF08BD8 + +#define mmTPC4_QM_ARB_MST_CRED_STS_11 0xF08BDC + +#define mmTPC4_QM_ARB_MST_CRED_STS_12 0xF08BE0 + +#define mmTPC4_QM_ARB_MST_CRED_STS_13 0xF08BE4 + +#define mmTPC4_QM_ARB_MST_CRED_STS_14 0xF08BE8 + +#define mmTPC4_QM_ARB_MST_CRED_STS_15 0xF08BEC + +#define mmTPC4_QM_ARB_MST_CRED_STS_16 0xF08BF0 + +#define mmTPC4_QM_ARB_MST_CRED_STS_17 0xF08BF4 + +#define mmTPC4_QM_ARB_MST_CRED_STS_18 0xF08BF8 + +#define mmTPC4_QM_ARB_MST_CRED_STS_19 0xF08BFC + +#define mmTPC4_QM_ARB_MST_CRED_STS_20 0xF08C00 + +#define mmTPC4_QM_ARB_MST_CRED_STS_21 0xF08C04 + +#define mmTPC4_QM_ARB_MST_CRED_STS_22 0xF08C08 + +#define mmTPC4_QM_ARB_MST_CRED_STS_23 0xF08C0C + +#define mmTPC4_QM_ARB_MST_CRED_STS_24 0xF08C10 + +#define mmTPC4_QM_ARB_MST_CRED_STS_25 0xF08C14 + +#define mmTPC4_QM_ARB_MST_CRED_STS_26 0xF08C18 + +#define mmTPC4_QM_ARB_MST_CRED_STS_27 0xF08C1C + +#define mmTPC4_QM_ARB_MST_CRED_STS_28 0xF08C20 + +#define mmTPC4_QM_ARB_MST_CRED_STS_29 0xF08C24 + +#define mmTPC4_QM_ARB_MST_CRED_STS_30 0xF08C28 + +#define mmTPC4_QM_ARB_MST_CRED_STS_31 0xF08C2C + +#define mmTPC4_QM_CGM_CFG 0xF08C70 + +#define mmTPC4_QM_CGM_STS 0xF08C74 + +#define mmTPC4_QM_CGM_CFG1 0xF08C78 + +#define mmTPC4_QM_LOCAL_RANGE_BASE 0xF08C80 + +#define mmTPC4_QM_LOCAL_RANGE_SIZE 0xF08C84 + +#define mmTPC4_QM_CSMR_STRICT_PRIO_CFG 0xF08C90 + +#define mmTPC4_QM_HBW_RD_RATE_LIM_CFG_1 0xF08C94 + +#define mmTPC4_QM_LBW_WR_RATE_LIM_CFG_0 0xF08C98 + +#define mmTPC4_QM_LBW_WR_RATE_LIM_CFG_1 0xF08C9C + +#define mmTPC4_QM_HBW_RD_RATE_LIM_CFG_0 0xF08CA0 + +#define mmTPC4_QM_GLBL_AXCACHE 0xF08CA4 + +#define mmTPC4_QM_IND_GW_APB_CFG 0xF08CB0 + +#define mmTPC4_QM_IND_GW_APB_WDATA 0xF08CB4 + +#define mmTPC4_QM_IND_GW_APB_RDATA 0xF08CB8 + +#define mmTPC4_QM_IND_GW_APB_STATUS 0xF08CBC + +#define mmTPC4_QM_GLBL_ERR_ADDR_LO 0xF08CD0 + +#define mmTPC4_QM_GLBL_ERR_ADDR_HI 0xF08CD4 + +#define mmTPC4_QM_GLBL_ERR_WDATA 0xF08CD8 + +#define mmTPC4_QM_GLBL_MEM_INIT_BUSY 0xF08D00 + +#endif /* ASIC_REG_TPC4_QM_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc5_cfg_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc5_cfg_regs.h new file mode 100644 index 000000000..f428f8919 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc5_cfg_regs.h @@ -0,0 +1,1226 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC5_CFG_REGS_H_ +#define ASIC_REG_TPC5_CFG_REGS_H_ + +/* + ***************************************** + * TPC5_CFG (Prototype: TPC) + ***************************************** + */ + +#define mmTPC5_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xF46400 + +#define mmTPC5_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xF46404 + +#define mmTPC5_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xF46408 + +#define mmTPC5_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xF4640C + +#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xF46410 + +#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xF46414 + +#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xF46418 + +#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xF4641C + +#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xF46420 + +#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xF46424 + +#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xF46428 + +#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xF4642C + +#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xF46430 + +#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xF46434 + +#define mmTPC5_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xF46438 + +#define mmTPC5_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xF4643C + +#define mmTPC5_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xF46440 + +#define mmTPC5_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xF46444 + +#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xF46448 + +#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xF4644C + +#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xF46450 + +#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xF46454 + +#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xF46458 + +#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xF4645C + +#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xF46460 + +#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xF46464 + +#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xF46468 + +#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xF4646C + +#define mmTPC5_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xF46470 + +#define mmTPC5_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xF46474 + +#define mmTPC5_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xF46478 + +#define mmTPC5_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xF4647C + +#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xF46480 + +#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xF46484 + +#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xF46488 + +#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xF4648C + +#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xF46490 + +#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xF46494 + +#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xF46498 + +#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xF4649C + +#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xF464A0 + +#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xF464A4 + +#define mmTPC5_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xF464A8 + +#define mmTPC5_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xF464AC + +#define mmTPC5_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xF464B0 + +#define mmTPC5_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xF464B4 + +#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xF464B8 + +#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xF464BC + +#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xF464C0 + +#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xF464C4 + +#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xF464C8 + +#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xF464CC + +#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xF464D0 + +#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xF464D4 + +#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xF464D8 + +#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xF464DC + +#define mmTPC5_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xF464E0 + +#define mmTPC5_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xF464E4 + +#define mmTPC5_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xF464E8 + +#define mmTPC5_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xF464EC + +#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xF464F0 + +#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xF464F4 + +#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xF464F8 + +#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xF464FC + +#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xF46500 + +#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xF46504 + +#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xF46508 + +#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xF4650C + +#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xF46510 + +#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xF46514 + +#define mmTPC5_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xF46518 + +#define mmTPC5_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xF4651C + +#define mmTPC5_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xF46520 + +#define mmTPC5_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xF46524 + +#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xF46528 + +#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xF4652C + +#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xF46530 + +#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xF46534 + +#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xF46538 + +#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xF4653C + +#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xF46540 + +#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xF46544 + +#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xF46548 + +#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xF4654C + +#define mmTPC5_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xF46550 + +#define mmTPC5_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xF46554 + +#define mmTPC5_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xF46558 + +#define mmTPC5_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xF4655C + +#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xF46560 + +#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xF46564 + +#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xF46568 + +#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xF4656C + +#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xF46570 + +#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xF46574 + +#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xF46578 + +#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xF4657C + +#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xF46580 + +#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xF46584 + +#define mmTPC5_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xF46588 + +#define mmTPC5_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xF4658C + +#define mmTPC5_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xF46590 + +#define mmTPC5_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xF46594 + +#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xF46598 + +#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xF4659C + +#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xF465A0 + +#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xF465A4 + +#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xF465A8 + +#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xF465AC + +#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xF465B0 + +#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xF465B4 + +#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xF465B8 + +#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xF465BC + +#define mmTPC5_CFG_KERNEL_TENSOR_8_BASE_ADDR_LOW 0xF465C0 + +#define mmTPC5_CFG_KERNEL_TENSOR_8_BASE_ADDR_HIGH 0xF465C4 + +#define mmTPC5_CFG_KERNEL_TENSOR_8_PADDING_VALUE 0xF465C8 + +#define mmTPC5_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG 0xF465CC + +#define mmTPC5_CFG_KERNEL_TENSOR_8_DIM_0_SIZE 0xF465D0 + +#define mmTPC5_CFG_KERNEL_TENSOR_8_DIM_0_STRIDE 0xF465D4 + +#define mmTPC5_CFG_KERNEL_TENSOR_8_DIM_1_SIZE 0xF465D8 + +#define mmTPC5_CFG_KERNEL_TENSOR_8_DIM_1_STRIDE 0xF465DC + +#define mmTPC5_CFG_KERNEL_TENSOR_8_DIM_2_SIZE 0xF465E0 + +#define mmTPC5_CFG_KERNEL_TENSOR_8_DIM_2_STRIDE 0xF465E4 + +#define mmTPC5_CFG_KERNEL_TENSOR_8_DIM_3_SIZE 0xF465E8 + +#define mmTPC5_CFG_KERNEL_TENSOR_8_DIM_3_STRIDE 0xF465EC + +#define mmTPC5_CFG_KERNEL_TENSOR_8_DIM_4_SIZE 0xF465F0 + +#define mmTPC5_CFG_KERNEL_TENSOR_8_DIM_4_STRIDE 0xF465F4 + +#define mmTPC5_CFG_KERNEL_TENSOR_9_BASE_ADDR_LOW 0xF465F8 + +#define mmTPC5_CFG_KERNEL_TENSOR_9_BASE_ADDR_HIGH 0xF465FC + +#define mmTPC5_CFG_KERNEL_TENSOR_9_PADDING_VALUE 0xF46600 + +#define mmTPC5_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG 0xF46604 + +#define mmTPC5_CFG_KERNEL_TENSOR_9_DIM_0_SIZE 0xF46608 + +#define mmTPC5_CFG_KERNEL_TENSOR_9_DIM_0_STRIDE 0xF4660C + +#define mmTPC5_CFG_KERNEL_TENSOR_9_DIM_1_SIZE 0xF46610 + +#define mmTPC5_CFG_KERNEL_TENSOR_9_DIM_1_STRIDE 0xF46614 + +#define mmTPC5_CFG_KERNEL_TENSOR_9_DIM_2_SIZE 0xF46618 + +#define mmTPC5_CFG_KERNEL_TENSOR_9_DIM_2_STRIDE 0xF4661C + +#define mmTPC5_CFG_KERNEL_TENSOR_9_DIM_3_SIZE 0xF46620 + +#define mmTPC5_CFG_KERNEL_TENSOR_9_DIM_3_STRIDE 0xF46624 + +#define mmTPC5_CFG_KERNEL_TENSOR_9_DIM_4_SIZE 0xF46628 + +#define mmTPC5_CFG_KERNEL_TENSOR_9_DIM_4_STRIDE 0xF4662C + +#define mmTPC5_CFG_KERNEL_TENSOR_10_BASE_ADDR_LOW 0xF46630 + +#define mmTPC5_CFG_KERNEL_TENSOR_10_BASE_ADDR_HIGH 0xF46634 + +#define mmTPC5_CFG_KERNEL_TENSOR_10_PADDING_VALUE 0xF46638 + +#define mmTPC5_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG 0xF4663C + +#define mmTPC5_CFG_KERNEL_TENSOR_10_DIM_0_SIZE 0xF46640 + +#define mmTPC5_CFG_KERNEL_TENSOR_10_DIM_0_STRIDE 0xF46644 + +#define mmTPC5_CFG_KERNEL_TENSOR_10_DIM_1_SIZE 0xF46648 + +#define mmTPC5_CFG_KERNEL_TENSOR_10_DIM_1_STRIDE 0xF4664C + +#define mmTPC5_CFG_KERNEL_TENSOR_10_DIM_2_SIZE 0xF46650 + +#define mmTPC5_CFG_KERNEL_TENSOR_10_DIM_2_STRIDE 0xF46654 + +#define mmTPC5_CFG_KERNEL_TENSOR_10_DIM_3_SIZE 0xF46658 + +#define mmTPC5_CFG_KERNEL_TENSOR_10_DIM_3_STRIDE 0xF4665C + +#define mmTPC5_CFG_KERNEL_TENSOR_10_DIM_4_SIZE 0xF46660 + +#define mmTPC5_CFG_KERNEL_TENSOR_10_DIM_4_STRIDE 0xF46664 + +#define mmTPC5_CFG_KERNEL_TENSOR_11_BASE_ADDR_LOW 0xF46668 + +#define mmTPC5_CFG_KERNEL_TENSOR_11_BASE_ADDR_HIGH 0xF4666C + +#define mmTPC5_CFG_KERNEL_TENSOR_11_PADDING_VALUE 0xF46670 + +#define mmTPC5_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG 0xF46674 + +#define mmTPC5_CFG_KERNEL_TENSOR_11_DIM_0_SIZE 0xF46678 + +#define mmTPC5_CFG_KERNEL_TENSOR_11_DIM_0_STRIDE 0xF4667C + +#define mmTPC5_CFG_KERNEL_TENSOR_11_DIM_1_SIZE 0xF46680 + +#define mmTPC5_CFG_KERNEL_TENSOR_11_DIM_1_STRIDE 0xF46684 + +#define mmTPC5_CFG_KERNEL_TENSOR_11_DIM_2_SIZE 0xF46688 + +#define mmTPC5_CFG_KERNEL_TENSOR_11_DIM_2_STRIDE 0xF4668C + +#define mmTPC5_CFG_KERNEL_TENSOR_11_DIM_3_SIZE 0xF46690 + +#define mmTPC5_CFG_KERNEL_TENSOR_11_DIM_3_STRIDE 0xF46694 + +#define mmTPC5_CFG_KERNEL_TENSOR_11_DIM_4_SIZE 0xF46698 + +#define mmTPC5_CFG_KERNEL_TENSOR_11_DIM_4_STRIDE 0xF4669C + +#define mmTPC5_CFG_KERNEL_TENSOR_12_BASE_ADDR_LOW 0xF466A0 + +#define mmTPC5_CFG_KERNEL_TENSOR_12_BASE_ADDR_HIGH 0xF466A4 + +#define mmTPC5_CFG_KERNEL_TENSOR_12_PADDING_VALUE 0xF466A8 + +#define mmTPC5_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG 0xF466AC + +#define mmTPC5_CFG_KERNEL_TENSOR_12_DIM_0_SIZE 0xF466B0 + +#define mmTPC5_CFG_KERNEL_TENSOR_12_DIM_0_STRIDE 0xF466B4 + +#define mmTPC5_CFG_KERNEL_TENSOR_12_DIM_1_SIZE 0xF466B8 + +#define mmTPC5_CFG_KERNEL_TENSOR_12_DIM_1_STRIDE 0xF466BC + +#define mmTPC5_CFG_KERNEL_TENSOR_12_DIM_2_SIZE 0xF466C0 + +#define mmTPC5_CFG_KERNEL_TENSOR_12_DIM_2_STRIDE 0xF466C4 + +#define mmTPC5_CFG_KERNEL_TENSOR_12_DIM_3_SIZE 0xF466C8 + +#define mmTPC5_CFG_KERNEL_TENSOR_12_DIM_3_STRIDE 0xF466CC + +#define mmTPC5_CFG_KERNEL_TENSOR_12_DIM_4_SIZE 0xF466D0 + +#define mmTPC5_CFG_KERNEL_TENSOR_12_DIM_4_STRIDE 0xF466D4 + +#define mmTPC5_CFG_KERNEL_TENSOR_13_BASE_ADDR_LOW 0xF466D8 + +#define mmTPC5_CFG_KERNEL_TENSOR_13_BASE_ADDR_HIGH 0xF466DC + +#define mmTPC5_CFG_KERNEL_TENSOR_13_PADDING_VALUE 0xF466E0 + +#define mmTPC5_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG 0xF466E4 + +#define mmTPC5_CFG_KERNEL_TENSOR_13_DIM_0_SIZE 0xF466E8 + +#define mmTPC5_CFG_KERNEL_TENSOR_13_DIM_0_STRIDE 0xF466EC + +#define mmTPC5_CFG_KERNEL_TENSOR_13_DIM_1_SIZE 0xF466F0 + +#define mmTPC5_CFG_KERNEL_TENSOR_13_DIM_1_STRIDE 0xF466F4 + +#define mmTPC5_CFG_KERNEL_TENSOR_13_DIM_2_SIZE 0xF466F8 + +#define mmTPC5_CFG_KERNEL_TENSOR_13_DIM_2_STRIDE 0xF466FC + +#define mmTPC5_CFG_KERNEL_TENSOR_13_DIM_3_SIZE 0xF46700 + +#define mmTPC5_CFG_KERNEL_TENSOR_13_DIM_3_STRIDE 0xF46704 + +#define mmTPC5_CFG_KERNEL_TENSOR_13_DIM_4_SIZE 0xF46708 + +#define mmTPC5_CFG_KERNEL_TENSOR_13_DIM_4_STRIDE 0xF4670C + +#define mmTPC5_CFG_KERNEL_TENSOR_14_BASE_ADDR_LOW 0xF46710 + +#define mmTPC5_CFG_KERNEL_TENSOR_14_BASE_ADDR_HIGH 0xF46714 + +#define mmTPC5_CFG_KERNEL_TENSOR_14_PADDING_VALUE 0xF46718 + +#define mmTPC5_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG 0xF4671C + +#define mmTPC5_CFG_KERNEL_TENSOR_14_DIM_0_SIZE 0xF46720 + +#define mmTPC5_CFG_KERNEL_TENSOR_14_DIM_0_STRIDE 0xF46724 + +#define mmTPC5_CFG_KERNEL_TENSOR_14_DIM_1_SIZE 0xF46728 + +#define mmTPC5_CFG_KERNEL_TENSOR_14_DIM_1_STRIDE 0xF4672C + +#define mmTPC5_CFG_KERNEL_TENSOR_14_DIM_2_SIZE 0xF46730 + +#define mmTPC5_CFG_KERNEL_TENSOR_14_DIM_2_STRIDE 0xF46734 + +#define mmTPC5_CFG_KERNEL_TENSOR_14_DIM_3_SIZE 0xF46738 + +#define mmTPC5_CFG_KERNEL_TENSOR_14_DIM_3_STRIDE 0xF4673C + +#define mmTPC5_CFG_KERNEL_TENSOR_14_DIM_4_SIZE 0xF46740 + +#define mmTPC5_CFG_KERNEL_TENSOR_14_DIM_4_STRIDE 0xF46744 + +#define mmTPC5_CFG_KERNEL_TENSOR_15_BASE_ADDR_LOW 0xF46748 + +#define mmTPC5_CFG_KERNEL_TENSOR_15_BASE_ADDR_HIGH 0xF4674C + +#define mmTPC5_CFG_KERNEL_TENSOR_15_PADDING_VALUE 0xF46750 + +#define mmTPC5_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG 0xF46754 + +#define mmTPC5_CFG_KERNEL_TENSOR_15_DIM_0_SIZE 0xF46758 + +#define mmTPC5_CFG_KERNEL_TENSOR_15_DIM_0_STRIDE 0xF4675C + +#define mmTPC5_CFG_KERNEL_TENSOR_15_DIM_1_SIZE 0xF46760 + +#define mmTPC5_CFG_KERNEL_TENSOR_15_DIM_1_STRIDE 0xF46764 + +#define mmTPC5_CFG_KERNEL_TENSOR_15_DIM_2_SIZE 0xF46768 + +#define mmTPC5_CFG_KERNEL_TENSOR_15_DIM_2_STRIDE 0xF4676C + +#define mmTPC5_CFG_KERNEL_TENSOR_15_DIM_3_SIZE 0xF46770 + +#define mmTPC5_CFG_KERNEL_TENSOR_15_DIM_3_STRIDE 0xF46774 + +#define mmTPC5_CFG_KERNEL_TENSOR_15_DIM_4_SIZE 0xF46778 + +#define mmTPC5_CFG_KERNEL_TENSOR_15_DIM_4_STRIDE 0xF4677C + +#define mmTPC5_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xF46780 + +#define mmTPC5_CFG_KERNEL_SYNC_OBJECT_ADDR 0xF46784 + +#define mmTPC5_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xF46788 + +#define mmTPC5_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xF4678C + +#define mmTPC5_CFG_KERNEL_TID_BASE_DIM_0 0xF46790 + +#define mmTPC5_CFG_KERNEL_TID_SIZE_DIM_0 0xF46794 + +#define mmTPC5_CFG_KERNEL_TID_BASE_DIM_1 0xF46798 + +#define mmTPC5_CFG_KERNEL_TID_SIZE_DIM_1 0xF4679C + +#define mmTPC5_CFG_KERNEL_TID_BASE_DIM_2 0xF467A0 + +#define mmTPC5_CFG_KERNEL_TID_SIZE_DIM_2 0xF467A4 + +#define mmTPC5_CFG_KERNEL_TID_BASE_DIM_3 0xF467A8 + +#define mmTPC5_CFG_KERNEL_TID_SIZE_DIM_3 0xF467AC + +#define mmTPC5_CFG_KERNEL_TID_BASE_DIM_4 0xF467B0 + +#define mmTPC5_CFG_KERNEL_TID_SIZE_DIM_4 0xF467B4 + +#define mmTPC5_CFG_KERNEL_KERNEL_CONFIG 0xF467B8 + +#define mmTPC5_CFG_KERNEL_KERNEL_ID 0xF467BC + +#define mmTPC5_CFG_KERNEL_SRF_0 0xF467C0 + +#define mmTPC5_CFG_KERNEL_SRF_1 0xF467C4 + +#define mmTPC5_CFG_KERNEL_SRF_2 0xF467C8 + +#define mmTPC5_CFG_KERNEL_SRF_3 0xF467CC + +#define mmTPC5_CFG_KERNEL_SRF_4 0xF467D0 + +#define mmTPC5_CFG_KERNEL_SRF_5 0xF467D4 + +#define mmTPC5_CFG_KERNEL_SRF_6 0xF467D8 + +#define mmTPC5_CFG_KERNEL_SRF_7 0xF467DC + +#define mmTPC5_CFG_KERNEL_SRF_8 0xF467E0 + +#define mmTPC5_CFG_KERNEL_SRF_9 0xF467E4 + +#define mmTPC5_CFG_KERNEL_SRF_10 0xF467E8 + +#define mmTPC5_CFG_KERNEL_SRF_11 0xF467EC + +#define mmTPC5_CFG_KERNEL_SRF_12 0xF467F0 + +#define mmTPC5_CFG_KERNEL_SRF_13 0xF467F4 + +#define mmTPC5_CFG_KERNEL_SRF_14 0xF467F8 + +#define mmTPC5_CFG_KERNEL_SRF_15 0xF467FC + +#define mmTPC5_CFG_KERNEL_SRF_16 0xF46800 + +#define mmTPC5_CFG_KERNEL_SRF_17 0xF46804 + +#define mmTPC5_CFG_KERNEL_SRF_18 0xF46808 + +#define mmTPC5_CFG_KERNEL_SRF_19 0xF4680C + +#define mmTPC5_CFG_KERNEL_SRF_20 0xF46810 + +#define mmTPC5_CFG_KERNEL_SRF_21 0xF46814 + +#define mmTPC5_CFG_KERNEL_SRF_22 0xF46818 + +#define mmTPC5_CFG_KERNEL_SRF_23 0xF4681C + +#define mmTPC5_CFG_KERNEL_SRF_24 0xF46820 + +#define mmTPC5_CFG_KERNEL_SRF_25 0xF46824 + +#define mmTPC5_CFG_KERNEL_SRF_26 0xF46828 + +#define mmTPC5_CFG_KERNEL_SRF_27 0xF4682C + +#define mmTPC5_CFG_KERNEL_SRF_28 0xF46830 + +#define mmTPC5_CFG_KERNEL_SRF_29 0xF46834 + +#define mmTPC5_CFG_KERNEL_SRF_30 0xF46838 + +#define mmTPC5_CFG_KERNEL_SRF_31 0xF4683C + +#define mmTPC5_CFG_ROUND_CSR 0xF468FC + +#define mmTPC5_CFG_PROT 0xF46900 + +#define mmTPC5_CFG_SEMAPHORE 0xF46908 + +#define mmTPC5_CFG_VFLAGS 0xF4690C + +#define mmTPC5_CFG_SFLAGS 0xF46910 + +#define mmTPC5_CFG_LFSR_POLYNOM 0xF46918 + +#define mmTPC5_CFG_STATUS 0xF4691C + +#define mmTPC5_CFG_CFG_BASE_ADDRESS_HIGH 0xF46920 + +#define mmTPC5_CFG_CFG_SUBTRACT_VALUE 0xF46924 + +#define mmTPC5_CFG_SM_BASE_ADDRESS_HIGH 0xF4692C + +#define mmTPC5_CFG_TPC_CMD 0xF46930 + +#define mmTPC5_CFG_TPC_EXECUTE 0xF46938 + +#define mmTPC5_CFG_TPC_STALL 0xF4693C + +#define mmTPC5_CFG_ICACHE_BASE_ADDERESS_LOW 0xF46940 + +#define mmTPC5_CFG_ICACHE_BASE_ADDERESS_HIGH 0xF46944 + +#define mmTPC5_CFG_RD_RATE_LIMIT 0xF46948 + +#define mmTPC5_CFG_WR_RATE_LIMIT 0xF46950 + +#define mmTPC5_CFG_MSS_CONFIG 0xF46954 + +#define mmTPC5_CFG_TPC_INTR_CAUSE 0xF46958 + +#define mmTPC5_CFG_TPC_INTR_MASK 0xF4695C + +#define mmTPC5_CFG_WQ_CREDITS 0xF46960 + +#define mmTPC5_CFG_ARUSER_LO 0xF46964 + +#define mmTPC5_CFG_ARUSER_HI 0xF46968 + +#define mmTPC5_CFG_AWUSER_LO 0xF4696C + +#define mmTPC5_CFG_AWUSER_HI 0xF46970 + +#define mmTPC5_CFG_OPCODE_EXEC 0xF46974 + +#define mmTPC5_CFG_LUT_FUNC32_BASE_ADDR_LO 0xF46978 + +#define mmTPC5_CFG_LUT_FUNC32_BASE_ADDR_HI 0xF4697C + +#define mmTPC5_CFG_LUT_FUNC64_BASE_ADDR_LO 0xF46980 + +#define mmTPC5_CFG_LUT_FUNC64_BASE_ADDR_HI 0xF46984 + +#define mmTPC5_CFG_LUT_FUNC128_BASE_ADDR_LO 0xF46988 + +#define mmTPC5_CFG_LUT_FUNC128_BASE_ADDR_HI 0xF4698C + +#define mmTPC5_CFG_LUT_FUNC256_BASE_ADDR_LO 0xF46990 + +#define mmTPC5_CFG_LUT_FUNC256_BASE_ADDR_HI 0xF46994 + +#define mmTPC5_CFG_TSB_CFG_MAX_SIZE 0xF46998 + +#define mmTPC5_CFG_TSB_CFG 0xF4699C + +#define mmTPC5_CFG_DBGMEM_ADD 0xF469A0 + +#define mmTPC5_CFG_DBGMEM_DATA_WR 0xF469A4 + +#define mmTPC5_CFG_DBGMEM_DATA_RD 0xF469A8 + +#define mmTPC5_CFG_DBGMEM_CTRL 0xF469AC + +#define mmTPC5_CFG_DBGMEM_RC 0xF469B0 + +#define mmTPC5_CFG_TSB_INFLIGHT_CNTR 0xF469B4 + +#define mmTPC5_CFG_WQ_INFLIGHT_CNTR 0xF469B8 + +#define mmTPC5_CFG_WQ_LBW_TOTAL_CNTR 0xF469BC + +#define mmTPC5_CFG_WQ_HBW_TOTAL_CNTR 0xF469C0 + +#define mmTPC5_CFG_IRQ_OCCOUPY_CNTR 0xF469C4 + +#define mmTPC5_CFG_FUNC_MBIST_CNTRL 0xF469D0 + +#define mmTPC5_CFG_FUNC_MBIST_PAT 0xF469D4 + +#define mmTPC5_CFG_FUNC_MBIST_MEM_0 0xF469D8 + +#define mmTPC5_CFG_FUNC_MBIST_MEM_1 0xF469DC + +#define mmTPC5_CFG_FUNC_MBIST_MEM_2 0xF469E0 + +#define mmTPC5_CFG_FUNC_MBIST_MEM_3 0xF469E4 + +#define mmTPC5_CFG_FUNC_MBIST_MEM_4 0xF469E8 + +#define mmTPC5_CFG_FUNC_MBIST_MEM_5 0xF469EC + +#define mmTPC5_CFG_FUNC_MBIST_MEM_6 0xF469F0 + +#define mmTPC5_CFG_FUNC_MBIST_MEM_7 0xF469F4 + +#define mmTPC5_CFG_FUNC_MBIST_MEM_8 0xF469F8 + +#define mmTPC5_CFG_FUNC_MBIST_MEM_9 0xF469FC + +#define mmTPC5_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xF46A00 + +#define mmTPC5_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xF46A04 + +#define mmTPC5_CFG_QM_TENSOR_0_PADDING_VALUE 0xF46A08 + +#define mmTPC5_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xF46A0C + +#define mmTPC5_CFG_QM_TENSOR_0_DIM_0_SIZE 0xF46A10 + +#define mmTPC5_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xF46A14 + +#define mmTPC5_CFG_QM_TENSOR_0_DIM_1_SIZE 0xF46A18 + +#define mmTPC5_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xF46A1C + +#define mmTPC5_CFG_QM_TENSOR_0_DIM_2_SIZE 0xF46A20 + +#define mmTPC5_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xF46A24 + +#define mmTPC5_CFG_QM_TENSOR_0_DIM_3_SIZE 0xF46A28 + +#define mmTPC5_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xF46A2C + +#define mmTPC5_CFG_QM_TENSOR_0_DIM_4_SIZE 0xF46A30 + +#define mmTPC5_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xF46A34 + +#define mmTPC5_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xF46A38 + +#define mmTPC5_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xF46A3C + +#define mmTPC5_CFG_QM_TENSOR_1_PADDING_VALUE 0xF46A40 + +#define mmTPC5_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xF46A44 + +#define mmTPC5_CFG_QM_TENSOR_1_DIM_0_SIZE 0xF46A48 + +#define mmTPC5_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xF46A4C + +#define mmTPC5_CFG_QM_TENSOR_1_DIM_1_SIZE 0xF46A50 + +#define mmTPC5_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xF46A54 + +#define mmTPC5_CFG_QM_TENSOR_1_DIM_2_SIZE 0xF46A58 + +#define mmTPC5_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xF46A5C + +#define mmTPC5_CFG_QM_TENSOR_1_DIM_3_SIZE 0xF46A60 + +#define mmTPC5_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xF46A64 + +#define mmTPC5_CFG_QM_TENSOR_1_DIM_4_SIZE 0xF46A68 + +#define mmTPC5_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xF46A6C + +#define mmTPC5_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xF46A70 + +#define mmTPC5_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xF46A74 + +#define mmTPC5_CFG_QM_TENSOR_2_PADDING_VALUE 0xF46A78 + +#define mmTPC5_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xF46A7C + +#define mmTPC5_CFG_QM_TENSOR_2_DIM_0_SIZE 0xF46A80 + +#define mmTPC5_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xF46A84 + +#define mmTPC5_CFG_QM_TENSOR_2_DIM_1_SIZE 0xF46A88 + +#define mmTPC5_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xF46A8C + +#define mmTPC5_CFG_QM_TENSOR_2_DIM_2_SIZE 0xF46A90 + +#define mmTPC5_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xF46A94 + +#define mmTPC5_CFG_QM_TENSOR_2_DIM_3_SIZE 0xF46A98 + +#define mmTPC5_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xF46A9C + +#define mmTPC5_CFG_QM_TENSOR_2_DIM_4_SIZE 0xF46AA0 + +#define mmTPC5_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xF46AA4 + +#define mmTPC5_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xF46AA8 + +#define mmTPC5_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xF46AAC + +#define mmTPC5_CFG_QM_TENSOR_3_PADDING_VALUE 0xF46AB0 + +#define mmTPC5_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xF46AB4 + +#define mmTPC5_CFG_QM_TENSOR_3_DIM_0_SIZE 0xF46AB8 + +#define mmTPC5_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xF46ABC + +#define mmTPC5_CFG_QM_TENSOR_3_DIM_1_SIZE 0xF46AC0 + +#define mmTPC5_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xF46AC4 + +#define mmTPC5_CFG_QM_TENSOR_3_DIM_2_SIZE 0xF46AC8 + +#define mmTPC5_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xF46ACC + +#define mmTPC5_CFG_QM_TENSOR_3_DIM_3_SIZE 0xF46AD0 + +#define mmTPC5_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xF46AD4 + +#define mmTPC5_CFG_QM_TENSOR_3_DIM_4_SIZE 0xF46AD8 + +#define mmTPC5_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xF46ADC + +#define mmTPC5_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xF46AE0 + +#define mmTPC5_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xF46AE4 + +#define mmTPC5_CFG_QM_TENSOR_4_PADDING_VALUE 0xF46AE8 + +#define mmTPC5_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xF46AEC + +#define mmTPC5_CFG_QM_TENSOR_4_DIM_0_SIZE 0xF46AF0 + +#define mmTPC5_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xF46AF4 + +#define mmTPC5_CFG_QM_TENSOR_4_DIM_1_SIZE 0xF46AF8 + +#define mmTPC5_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xF46AFC + +#define mmTPC5_CFG_QM_TENSOR_4_DIM_2_SIZE 0xF46B00 + +#define mmTPC5_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xF46B04 + +#define mmTPC5_CFG_QM_TENSOR_4_DIM_3_SIZE 0xF46B08 + +#define mmTPC5_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xF46B0C + +#define mmTPC5_CFG_QM_TENSOR_4_DIM_4_SIZE 0xF46B10 + +#define mmTPC5_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xF46B14 + +#define mmTPC5_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xF46B18 + +#define mmTPC5_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xF46B1C + +#define mmTPC5_CFG_QM_TENSOR_5_PADDING_VALUE 0xF46B20 + +#define mmTPC5_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xF46B24 + +#define mmTPC5_CFG_QM_TENSOR_5_DIM_0_SIZE 0xF46B28 + +#define mmTPC5_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xF46B2C + +#define mmTPC5_CFG_QM_TENSOR_5_DIM_1_SIZE 0xF46B30 + +#define mmTPC5_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xF46B34 + +#define mmTPC5_CFG_QM_TENSOR_5_DIM_2_SIZE 0xF46B38 + +#define mmTPC5_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xF46B3C + +#define mmTPC5_CFG_QM_TENSOR_5_DIM_3_SIZE 0xF46B40 + +#define mmTPC5_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xF46B44 + +#define mmTPC5_CFG_QM_TENSOR_5_DIM_4_SIZE 0xF46B48 + +#define mmTPC5_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xF46B4C + +#define mmTPC5_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xF46B50 + +#define mmTPC5_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xF46B54 + +#define mmTPC5_CFG_QM_TENSOR_6_PADDING_VALUE 0xF46B58 + +#define mmTPC5_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xF46B5C + +#define mmTPC5_CFG_QM_TENSOR_6_DIM_0_SIZE 0xF46B60 + +#define mmTPC5_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xF46B64 + +#define mmTPC5_CFG_QM_TENSOR_6_DIM_1_SIZE 0xF46B68 + +#define mmTPC5_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xF46B6C + +#define mmTPC5_CFG_QM_TENSOR_6_DIM_2_SIZE 0xF46B70 + +#define mmTPC5_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xF46B74 + +#define mmTPC5_CFG_QM_TENSOR_6_DIM_3_SIZE 0xF46B78 + +#define mmTPC5_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xF46B7C + +#define mmTPC5_CFG_QM_TENSOR_6_DIM_4_SIZE 0xF46B80 + +#define mmTPC5_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xF46B84 + +#define mmTPC5_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xF46B88 + +#define mmTPC5_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xF46B8C + +#define mmTPC5_CFG_QM_TENSOR_7_PADDING_VALUE 0xF46B90 + +#define mmTPC5_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xF46B94 + +#define mmTPC5_CFG_QM_TENSOR_7_DIM_0_SIZE 0xF46B98 + +#define mmTPC5_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xF46B9C + +#define mmTPC5_CFG_QM_TENSOR_7_DIM_1_SIZE 0xF46BA0 + +#define mmTPC5_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xF46BA4 + +#define mmTPC5_CFG_QM_TENSOR_7_DIM_2_SIZE 0xF46BA8 + +#define mmTPC5_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xF46BAC + +#define mmTPC5_CFG_QM_TENSOR_7_DIM_3_SIZE 0xF46BB0 + +#define mmTPC5_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xF46BB4 + +#define mmTPC5_CFG_QM_TENSOR_7_DIM_4_SIZE 0xF46BB8 + +#define mmTPC5_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xF46BBC + +#define mmTPC5_CFG_QM_TENSOR_8_BASE_ADDR_LOW 0xF46BC0 + +#define mmTPC5_CFG_QM_TENSOR_8_BASE_ADDR_HIGH 0xF46BC4 + +#define mmTPC5_CFG_QM_TENSOR_8_PADDING_VALUE 0xF46BC8 + +#define mmTPC5_CFG_QM_TENSOR_8_TENSOR_CONFIG 0xF46BCC + +#define mmTPC5_CFG_QM_TENSOR_8_DIM_0_SIZE 0xF46BD0 + +#define mmTPC5_CFG_QM_TENSOR_8_DIM_0_STRIDE 0xF46BD4 + +#define mmTPC5_CFG_QM_TENSOR_8_DIM_1_SIZE 0xF46BD8 + +#define mmTPC5_CFG_QM_TENSOR_8_DIM_1_STRIDE 0xF46BDC + +#define mmTPC5_CFG_QM_TENSOR_8_DIM_2_SIZE 0xF46BE0 + +#define mmTPC5_CFG_QM_TENSOR_8_DIM_2_STRIDE 0xF46BE4 + +#define mmTPC5_CFG_QM_TENSOR_8_DIM_3_SIZE 0xF46BE8 + +#define mmTPC5_CFG_QM_TENSOR_8_DIM_3_STRIDE 0xF46BEC + +#define mmTPC5_CFG_QM_TENSOR_8_DIM_4_SIZE 0xF46BF0 + +#define mmTPC5_CFG_QM_TENSOR_8_DIM_4_STRIDE 0xF46BF4 + +#define mmTPC5_CFG_QM_TENSOR_9_BASE_ADDR_LOW 0xF46BF8 + +#define mmTPC5_CFG_QM_TENSOR_9_BASE_ADDR_HIGH 0xF46BFC + +#define mmTPC5_CFG_QM_TENSOR_9_PADDING_VALUE 0xF46C00 + +#define mmTPC5_CFG_QM_TENSOR_9_TENSOR_CONFIG 0xF46C04 + +#define mmTPC5_CFG_QM_TENSOR_9_DIM_0_SIZE 0xF46C08 + +#define mmTPC5_CFG_QM_TENSOR_9_DIM_0_STRIDE 0xF46C0C + +#define mmTPC5_CFG_QM_TENSOR_9_DIM_1_SIZE 0xF46C10 + +#define mmTPC5_CFG_QM_TENSOR_9_DIM_1_STRIDE 0xF46C14 + +#define mmTPC5_CFG_QM_TENSOR_9_DIM_2_SIZE 0xF46C18 + +#define mmTPC5_CFG_QM_TENSOR_9_DIM_2_STRIDE 0xF46C1C + +#define mmTPC5_CFG_QM_TENSOR_9_DIM_3_SIZE 0xF46C20 + +#define mmTPC5_CFG_QM_TENSOR_9_DIM_3_STRIDE 0xF46C24 + +#define mmTPC5_CFG_QM_TENSOR_9_DIM_4_SIZE 0xF46C28 + +#define mmTPC5_CFG_QM_TENSOR_9_DIM_4_STRIDE 0xF46C2C + +#define mmTPC5_CFG_QM_TENSOR_10_BASE_ADDR_LOW 0xF46C30 + +#define mmTPC5_CFG_QM_TENSOR_10_BASE_ADDR_HIGH 0xF46C34 + +#define mmTPC5_CFG_QM_TENSOR_10_PADDING_VALUE 0xF46C38 + +#define mmTPC5_CFG_QM_TENSOR_10_TENSOR_CONFIG 0xF46C3C + +#define mmTPC5_CFG_QM_TENSOR_10_DIM_0_SIZE 0xF46C40 + +#define mmTPC5_CFG_QM_TENSOR_10_DIM_0_STRIDE 0xF46C44 + +#define mmTPC5_CFG_QM_TENSOR_10_DIM_1_SIZE 0xF46C48 + +#define mmTPC5_CFG_QM_TENSOR_10_DIM_1_STRIDE 0xF46C4C + +#define mmTPC5_CFG_QM_TENSOR_10_DIM_2_SIZE 0xF46C50 + +#define mmTPC5_CFG_QM_TENSOR_10_DIM_2_STRIDE 0xF46C54 + +#define mmTPC5_CFG_QM_TENSOR_10_DIM_3_SIZE 0xF46C58 + +#define mmTPC5_CFG_QM_TENSOR_10_DIM_3_STRIDE 0xF46C5C + +#define mmTPC5_CFG_QM_TENSOR_10_DIM_4_SIZE 0xF46C60 + +#define mmTPC5_CFG_QM_TENSOR_10_DIM_4_STRIDE 0xF46C64 + +#define mmTPC5_CFG_QM_TENSOR_11_BASE_ADDR_LOW 0xF46C68 + +#define mmTPC5_CFG_QM_TENSOR_11_BASE_ADDR_HIGH 0xF46C6C + +#define mmTPC5_CFG_QM_TENSOR_11_PADDING_VALUE 0xF46C70 + +#define mmTPC5_CFG_QM_TENSOR_11_TENSOR_CONFIG 0xF46C74 + +#define mmTPC5_CFG_QM_TENSOR_11_DIM_0_SIZE 0xF46C78 + +#define mmTPC5_CFG_QM_TENSOR_11_DIM_0_STRIDE 0xF46C7C + +#define mmTPC5_CFG_QM_TENSOR_11_DIM_1_SIZE 0xF46C80 + +#define mmTPC5_CFG_QM_TENSOR_11_DIM_1_STRIDE 0xF46C84 + +#define mmTPC5_CFG_QM_TENSOR_11_DIM_2_SIZE 0xF46C88 + +#define mmTPC5_CFG_QM_TENSOR_11_DIM_2_STRIDE 0xF46C8C + +#define mmTPC5_CFG_QM_TENSOR_11_DIM_3_SIZE 0xF46C90 + +#define mmTPC5_CFG_QM_TENSOR_11_DIM_3_STRIDE 0xF46C94 + +#define mmTPC5_CFG_QM_TENSOR_11_DIM_4_SIZE 0xF46C98 + +#define mmTPC5_CFG_QM_TENSOR_11_DIM_4_STRIDE 0xF46C9C + +#define mmTPC5_CFG_QM_TENSOR_12_BASE_ADDR_LOW 0xF46CA0 + +#define mmTPC5_CFG_QM_TENSOR_12_BASE_ADDR_HIGH 0xF46CA4 + +#define mmTPC5_CFG_QM_TENSOR_12_PADDING_VALUE 0xF46CA8 + +#define mmTPC5_CFG_QM_TENSOR_12_TENSOR_CONFIG 0xF46CAC + +#define mmTPC5_CFG_QM_TENSOR_12_DIM_0_SIZE 0xF46CB0 + +#define mmTPC5_CFG_QM_TENSOR_12_DIM_0_STRIDE 0xF46CB4 + +#define mmTPC5_CFG_QM_TENSOR_12_DIM_1_SIZE 0xF46CB8 + +#define mmTPC5_CFG_QM_TENSOR_12_DIM_1_STRIDE 0xF46CBC + +#define mmTPC5_CFG_QM_TENSOR_12_DIM_2_SIZE 0xF46CC0 + +#define mmTPC5_CFG_QM_TENSOR_12_DIM_2_STRIDE 0xF46CC4 + +#define mmTPC5_CFG_QM_TENSOR_12_DIM_3_SIZE 0xF46CC8 + +#define mmTPC5_CFG_QM_TENSOR_12_DIM_3_STRIDE 0xF46CCC + +#define mmTPC5_CFG_QM_TENSOR_12_DIM_4_SIZE 0xF46CD0 + +#define mmTPC5_CFG_QM_TENSOR_12_DIM_4_STRIDE 0xF46CD4 + +#define mmTPC5_CFG_QM_TENSOR_13_BASE_ADDR_LOW 0xF46CD8 + +#define mmTPC5_CFG_QM_TENSOR_13_BASE_ADDR_HIGH 0xF46CDC + +#define mmTPC5_CFG_QM_TENSOR_13_PADDING_VALUE 0xF46CE0 + +#define mmTPC5_CFG_QM_TENSOR_13_TENSOR_CONFIG 0xF46CE4 + +#define mmTPC5_CFG_QM_TENSOR_13_DIM_0_SIZE 0xF46CE8 + +#define mmTPC5_CFG_QM_TENSOR_13_DIM_0_STRIDE 0xF46CEC + +#define mmTPC5_CFG_QM_TENSOR_13_DIM_1_SIZE 0xF46CF0 + +#define mmTPC5_CFG_QM_TENSOR_13_DIM_1_STRIDE 0xF46CF4 + +#define mmTPC5_CFG_QM_TENSOR_13_DIM_2_SIZE 0xF46CF8 + +#define mmTPC5_CFG_QM_TENSOR_13_DIM_2_STRIDE 0xF46CFC + +#define mmTPC5_CFG_QM_TENSOR_13_DIM_3_SIZE 0xF46D00 + +#define mmTPC5_CFG_QM_TENSOR_13_DIM_3_STRIDE 0xF46D04 + +#define mmTPC5_CFG_QM_TENSOR_13_DIM_4_SIZE 0xF46D08 + +#define mmTPC5_CFG_QM_TENSOR_13_DIM_4_STRIDE 0xF46D0C + +#define mmTPC5_CFG_QM_TENSOR_14_BASE_ADDR_LOW 0xF46D10 + +#define mmTPC5_CFG_QM_TENSOR_14_BASE_ADDR_HIGH 0xF46D14 + +#define mmTPC5_CFG_QM_TENSOR_14_PADDING_VALUE 0xF46D18 + +#define mmTPC5_CFG_QM_TENSOR_14_TENSOR_CONFIG 0xF46D1C + +#define mmTPC5_CFG_QM_TENSOR_14_DIM_0_SIZE 0xF46D20 + +#define mmTPC5_CFG_QM_TENSOR_14_DIM_0_STRIDE 0xF46D24 + +#define mmTPC5_CFG_QM_TENSOR_14_DIM_1_SIZE 0xF46D28 + +#define mmTPC5_CFG_QM_TENSOR_14_DIM_1_STRIDE 0xF46D2C + +#define mmTPC5_CFG_QM_TENSOR_14_DIM_2_SIZE 0xF46D30 + +#define mmTPC5_CFG_QM_TENSOR_14_DIM_2_STRIDE 0xF46D34 + +#define mmTPC5_CFG_QM_TENSOR_14_DIM_3_SIZE 0xF46D38 + +#define mmTPC5_CFG_QM_TENSOR_14_DIM_3_STRIDE 0xF46D3C + +#define mmTPC5_CFG_QM_TENSOR_14_DIM_4_SIZE 0xF46D40 + +#define mmTPC5_CFG_QM_TENSOR_14_DIM_4_STRIDE 0xF46D44 + +#define mmTPC5_CFG_QM_TENSOR_15_BASE_ADDR_LOW 0xF46D48 + +#define mmTPC5_CFG_QM_TENSOR_15_BASE_ADDR_HIGH 0xF46D4C + +#define mmTPC5_CFG_QM_TENSOR_15_PADDING_VALUE 0xF46D50 + +#define mmTPC5_CFG_QM_TENSOR_15_TENSOR_CONFIG 0xF46D54 + +#define mmTPC5_CFG_QM_TENSOR_15_DIM_0_SIZE 0xF46D58 + +#define mmTPC5_CFG_QM_TENSOR_15_DIM_0_STRIDE 0xF46D5C + +#define mmTPC5_CFG_QM_TENSOR_15_DIM_1_SIZE 0xF46D60 + +#define mmTPC5_CFG_QM_TENSOR_15_DIM_1_STRIDE 0xF46D64 + +#define mmTPC5_CFG_QM_TENSOR_15_DIM_2_SIZE 0xF46D68 + +#define mmTPC5_CFG_QM_TENSOR_15_DIM_2_STRIDE 0xF46D6C + +#define mmTPC5_CFG_QM_TENSOR_15_DIM_3_SIZE 0xF46D70 + +#define mmTPC5_CFG_QM_TENSOR_15_DIM_3_STRIDE 0xF46D74 + +#define mmTPC5_CFG_QM_TENSOR_15_DIM_4_SIZE 0xF46D78 + +#define mmTPC5_CFG_QM_TENSOR_15_DIM_4_STRIDE 0xF46D7C + +#define mmTPC5_CFG_QM_SYNC_OBJECT_MESSAGE 0xF46D80 + +#define mmTPC5_CFG_QM_SYNC_OBJECT_ADDR 0xF46D84 + +#define mmTPC5_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xF46D88 + +#define mmTPC5_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xF46D8C + +#define mmTPC5_CFG_QM_TID_BASE_DIM_0 0xF46D90 + +#define mmTPC5_CFG_QM_TID_SIZE_DIM_0 0xF46D94 + +#define mmTPC5_CFG_QM_TID_BASE_DIM_1 0xF46D98 + +#define mmTPC5_CFG_QM_TID_SIZE_DIM_1 0xF46D9C + +#define mmTPC5_CFG_QM_TID_BASE_DIM_2 0xF46DA0 + +#define mmTPC5_CFG_QM_TID_SIZE_DIM_2 0xF46DA4 + +#define mmTPC5_CFG_QM_TID_BASE_DIM_3 0xF46DA8 + +#define mmTPC5_CFG_QM_TID_SIZE_DIM_3 0xF46DAC + +#define mmTPC5_CFG_QM_TID_BASE_DIM_4 0xF46DB0 + +#define mmTPC5_CFG_QM_TID_SIZE_DIM_4 0xF46DB4 + +#define mmTPC5_CFG_QM_KERNEL_CONFIG 0xF46DB8 + +#define mmTPC5_CFG_QM_KERNEL_ID 0xF46DBC + +#define mmTPC5_CFG_QM_SRF_0 0xF46DC0 + +#define mmTPC5_CFG_QM_SRF_1 0xF46DC4 + +#define mmTPC5_CFG_QM_SRF_2 0xF46DC8 + +#define mmTPC5_CFG_QM_SRF_3 0xF46DCC + +#define mmTPC5_CFG_QM_SRF_4 0xF46DD0 + +#define mmTPC5_CFG_QM_SRF_5 0xF46DD4 + +#define mmTPC5_CFG_QM_SRF_6 0xF46DD8 + +#define mmTPC5_CFG_QM_SRF_7 0xF46DDC + +#define mmTPC5_CFG_QM_SRF_8 0xF46DE0 + +#define mmTPC5_CFG_QM_SRF_9 0xF46DE4 + +#define mmTPC5_CFG_QM_SRF_10 0xF46DE8 + +#define mmTPC5_CFG_QM_SRF_11 0xF46DEC + +#define mmTPC5_CFG_QM_SRF_12 0xF46DF0 + +#define mmTPC5_CFG_QM_SRF_13 0xF46DF4 + +#define mmTPC5_CFG_QM_SRF_14 0xF46DF8 + +#define mmTPC5_CFG_QM_SRF_15 0xF46DFC + +#define mmTPC5_CFG_QM_SRF_16 0xF46E00 + +#define mmTPC5_CFG_QM_SRF_17 0xF46E04 + +#define mmTPC5_CFG_QM_SRF_18 0xF46E08 + +#define mmTPC5_CFG_QM_SRF_19 0xF46E0C + +#define mmTPC5_CFG_QM_SRF_20 0xF46E10 + +#define mmTPC5_CFG_QM_SRF_21 0xF46E14 + +#define mmTPC5_CFG_QM_SRF_22 0xF46E18 + +#define mmTPC5_CFG_QM_SRF_23 0xF46E1C + +#define mmTPC5_CFG_QM_SRF_24 0xF46E20 + +#define mmTPC5_CFG_QM_SRF_25 0xF46E24 + +#define mmTPC5_CFG_QM_SRF_26 0xF46E28 + +#define mmTPC5_CFG_QM_SRF_27 0xF46E2C + +#define mmTPC5_CFG_QM_SRF_28 0xF46E30 + +#define mmTPC5_CFG_QM_SRF_29 0xF46E34 + +#define mmTPC5_CFG_QM_SRF_30 0xF46E38 + +#define mmTPC5_CFG_QM_SRF_31 0xF46E3C + +#endif /* ASIC_REG_TPC5_CFG_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc5_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc5_qm_regs.h new file mode 100644 index 000000000..cd3a810ff --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc5_qm_regs.h @@ -0,0 +1,834 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC5_QM_REGS_H_ +#define ASIC_REG_TPC5_QM_REGS_H_ + +/* + ***************************************** + * TPC5_QM (Prototype: QMAN) + ***************************************** + */ + +#define mmTPC5_QM_GLBL_CFG0 0xF48000 + +#define mmTPC5_QM_GLBL_CFG1 0xF48004 + +#define mmTPC5_QM_GLBL_PROT 0xF48008 + +#define mmTPC5_QM_GLBL_ERR_CFG 0xF4800C + +#define mmTPC5_QM_GLBL_SECURE_PROPS_0 0xF48010 + +#define mmTPC5_QM_GLBL_SECURE_PROPS_1 0xF48014 + +#define mmTPC5_QM_GLBL_SECURE_PROPS_2 0xF48018 + +#define mmTPC5_QM_GLBL_SECURE_PROPS_3 0xF4801C + +#define mmTPC5_QM_GLBL_SECURE_PROPS_4 0xF48020 + +#define mmTPC5_QM_GLBL_NON_SECURE_PROPS_0 0xF48024 + +#define mmTPC5_QM_GLBL_NON_SECURE_PROPS_1 0xF48028 + +#define mmTPC5_QM_GLBL_NON_SECURE_PROPS_2 0xF4802C + +#define mmTPC5_QM_GLBL_NON_SECURE_PROPS_3 0xF48030 + +#define mmTPC5_QM_GLBL_NON_SECURE_PROPS_4 0xF48034 + +#define mmTPC5_QM_GLBL_STS0 0xF48038 + +#define mmTPC5_QM_GLBL_STS1_0 0xF48040 + +#define mmTPC5_QM_GLBL_STS1_1 0xF48044 + +#define mmTPC5_QM_GLBL_STS1_2 0xF48048 + +#define mmTPC5_QM_GLBL_STS1_3 0xF4804C + +#define mmTPC5_QM_GLBL_STS1_4 0xF48050 + +#define mmTPC5_QM_GLBL_MSG_EN_0 0xF48054 + +#define mmTPC5_QM_GLBL_MSG_EN_1 0xF48058 + +#define mmTPC5_QM_GLBL_MSG_EN_2 0xF4805C + +#define mmTPC5_QM_GLBL_MSG_EN_3 0xF48060 + +#define mmTPC5_QM_GLBL_MSG_EN_4 0xF48068 + +#define mmTPC5_QM_PQ_BASE_LO_0 0xF48070 + +#define mmTPC5_QM_PQ_BASE_LO_1 0xF48074 + +#define mmTPC5_QM_PQ_BASE_LO_2 0xF48078 + +#define mmTPC5_QM_PQ_BASE_LO_3 0xF4807C + +#define mmTPC5_QM_PQ_BASE_HI_0 0xF48080 + +#define mmTPC5_QM_PQ_BASE_HI_1 0xF48084 + +#define mmTPC5_QM_PQ_BASE_HI_2 0xF48088 + +#define mmTPC5_QM_PQ_BASE_HI_3 0xF4808C + +#define mmTPC5_QM_PQ_SIZE_0 0xF48090 + +#define mmTPC5_QM_PQ_SIZE_1 0xF48094 + +#define mmTPC5_QM_PQ_SIZE_2 0xF48098 + +#define mmTPC5_QM_PQ_SIZE_3 0xF4809C + +#define mmTPC5_QM_PQ_PI_0 0xF480A0 + +#define mmTPC5_QM_PQ_PI_1 0xF480A4 + +#define mmTPC5_QM_PQ_PI_2 0xF480A8 + +#define mmTPC5_QM_PQ_PI_3 0xF480AC + +#define mmTPC5_QM_PQ_CI_0 0xF480B0 + +#define mmTPC5_QM_PQ_CI_1 0xF480B4 + +#define mmTPC5_QM_PQ_CI_2 0xF480B8 + +#define mmTPC5_QM_PQ_CI_3 0xF480BC + +#define mmTPC5_QM_PQ_CFG0_0 0xF480C0 + +#define mmTPC5_QM_PQ_CFG0_1 0xF480C4 + +#define mmTPC5_QM_PQ_CFG0_2 0xF480C8 + +#define mmTPC5_QM_PQ_CFG0_3 0xF480CC + +#define mmTPC5_QM_PQ_CFG1_0 0xF480D0 + +#define mmTPC5_QM_PQ_CFG1_1 0xF480D4 + +#define mmTPC5_QM_PQ_CFG1_2 0xF480D8 + +#define mmTPC5_QM_PQ_CFG1_3 0xF480DC + +#define mmTPC5_QM_PQ_ARUSER_31_11_0 0xF480E0 + +#define mmTPC5_QM_PQ_ARUSER_31_11_1 0xF480E4 + +#define mmTPC5_QM_PQ_ARUSER_31_11_2 0xF480E8 + +#define mmTPC5_QM_PQ_ARUSER_31_11_3 0xF480EC + +#define mmTPC5_QM_PQ_STS0_0 0xF480F0 + +#define mmTPC5_QM_PQ_STS0_1 0xF480F4 + +#define mmTPC5_QM_PQ_STS0_2 0xF480F8 + +#define mmTPC5_QM_PQ_STS0_3 0xF480FC + +#define mmTPC5_QM_PQ_STS1_0 0xF48100 + +#define mmTPC5_QM_PQ_STS1_1 0xF48104 + +#define mmTPC5_QM_PQ_STS1_2 0xF48108 + +#define mmTPC5_QM_PQ_STS1_3 0xF4810C + +#define mmTPC5_QM_CQ_CFG0_0 0xF48110 + +#define mmTPC5_QM_CQ_CFG0_1 0xF48114 + +#define mmTPC5_QM_CQ_CFG0_2 0xF48118 + +#define mmTPC5_QM_CQ_CFG0_3 0xF4811C + +#define mmTPC5_QM_CQ_CFG0_4 0xF48120 + +#define mmTPC5_QM_CQ_CFG1_0 0xF48124 + +#define mmTPC5_QM_CQ_CFG1_1 0xF48128 + +#define mmTPC5_QM_CQ_CFG1_2 0xF4812C + +#define mmTPC5_QM_CQ_CFG1_3 0xF48130 + +#define mmTPC5_QM_CQ_CFG1_4 0xF48134 + +#define mmTPC5_QM_CQ_ARUSER_31_11_0 0xF48138 + +#define mmTPC5_QM_CQ_ARUSER_31_11_1 0xF4813C + +#define mmTPC5_QM_CQ_ARUSER_31_11_2 0xF48140 + +#define mmTPC5_QM_CQ_ARUSER_31_11_3 0xF48144 + +#define mmTPC5_QM_CQ_ARUSER_31_11_4 0xF48148 + +#define mmTPC5_QM_CQ_STS0_0 0xF4814C + +#define mmTPC5_QM_CQ_STS0_1 0xF48150 + +#define mmTPC5_QM_CQ_STS0_2 0xF48154 + +#define mmTPC5_QM_CQ_STS0_3 0xF48158 + +#define mmTPC5_QM_CQ_STS0_4 0xF4815C + +#define mmTPC5_QM_CQ_STS1_0 0xF48160 + +#define mmTPC5_QM_CQ_STS1_1 0xF48164 + +#define mmTPC5_QM_CQ_STS1_2 0xF48168 + +#define mmTPC5_QM_CQ_STS1_3 0xF4816C + +#define mmTPC5_QM_CQ_STS1_4 0xF48170 + +#define mmTPC5_QM_CQ_PTR_LO_0 0xF48174 + +#define mmTPC5_QM_CQ_PTR_HI_0 0xF48178 + +#define mmTPC5_QM_CQ_TSIZE_0 0xF4817C + +#define mmTPC5_QM_CQ_CTL_0 0xF48180 + +#define mmTPC5_QM_CQ_PTR_LO_1 0xF48184 + +#define mmTPC5_QM_CQ_PTR_HI_1 0xF48188 + +#define mmTPC5_QM_CQ_TSIZE_1 0xF4818C + +#define mmTPC5_QM_CQ_CTL_1 0xF48190 + +#define mmTPC5_QM_CQ_PTR_LO_2 0xF48194 + +#define mmTPC5_QM_CQ_PTR_HI_2 0xF48198 + +#define mmTPC5_QM_CQ_TSIZE_2 0xF4819C + +#define mmTPC5_QM_CQ_CTL_2 0xF481A0 + +#define mmTPC5_QM_CQ_PTR_LO_3 0xF481A4 + +#define mmTPC5_QM_CQ_PTR_HI_3 0xF481A8 + +#define mmTPC5_QM_CQ_TSIZE_3 0xF481AC + +#define mmTPC5_QM_CQ_CTL_3 0xF481B0 + +#define mmTPC5_QM_CQ_PTR_LO_4 0xF481B4 + +#define mmTPC5_QM_CQ_PTR_HI_4 0xF481B8 + +#define mmTPC5_QM_CQ_TSIZE_4 0xF481BC + +#define mmTPC5_QM_CQ_CTL_4 0xF481C0 + +#define mmTPC5_QM_CQ_PTR_LO_STS_0 0xF481C4 + +#define mmTPC5_QM_CQ_PTR_LO_STS_1 0xF481C8 + +#define mmTPC5_QM_CQ_PTR_LO_STS_2 0xF481CC + +#define mmTPC5_QM_CQ_PTR_LO_STS_3 0xF481D0 + +#define mmTPC5_QM_CQ_PTR_LO_STS_4 0xF481D4 + +#define mmTPC5_QM_CQ_PTR_HI_STS_0 0xF481D8 + +#define mmTPC5_QM_CQ_PTR_HI_STS_1 0xF481DC + +#define mmTPC5_QM_CQ_PTR_HI_STS_2 0xF481E0 + +#define mmTPC5_QM_CQ_PTR_HI_STS_3 0xF481E4 + +#define mmTPC5_QM_CQ_PTR_HI_STS_4 0xF481E8 + +#define mmTPC5_QM_CQ_TSIZE_STS_0 0xF481EC + +#define mmTPC5_QM_CQ_TSIZE_STS_1 0xF481F0 + +#define mmTPC5_QM_CQ_TSIZE_STS_2 0xF481F4 + +#define mmTPC5_QM_CQ_TSIZE_STS_3 0xF481F8 + +#define mmTPC5_QM_CQ_TSIZE_STS_4 0xF481FC + +#define mmTPC5_QM_CQ_CTL_STS_0 0xF48200 + +#define mmTPC5_QM_CQ_CTL_STS_1 0xF48204 + +#define mmTPC5_QM_CQ_CTL_STS_2 0xF48208 + +#define mmTPC5_QM_CQ_CTL_STS_3 0xF4820C + +#define mmTPC5_QM_CQ_CTL_STS_4 0xF48210 + +#define mmTPC5_QM_CQ_IFIFO_CNT_0 0xF48214 + +#define mmTPC5_QM_CQ_IFIFO_CNT_1 0xF48218 + +#define mmTPC5_QM_CQ_IFIFO_CNT_2 0xF4821C + +#define mmTPC5_QM_CQ_IFIFO_CNT_3 0xF48220 + +#define mmTPC5_QM_CQ_IFIFO_CNT_4 0xF48224 + +#define mmTPC5_QM_CP_MSG_BASE0_ADDR_LO_0 0xF48228 + +#define mmTPC5_QM_CP_MSG_BASE0_ADDR_LO_1 0xF4822C + +#define mmTPC5_QM_CP_MSG_BASE0_ADDR_LO_2 0xF48230 + +#define mmTPC5_QM_CP_MSG_BASE0_ADDR_LO_3 0xF48234 + +#define mmTPC5_QM_CP_MSG_BASE0_ADDR_LO_4 0xF48238 + +#define mmTPC5_QM_CP_MSG_BASE0_ADDR_HI_0 0xF4823C + +#define mmTPC5_QM_CP_MSG_BASE0_ADDR_HI_1 0xF48240 + +#define mmTPC5_QM_CP_MSG_BASE0_ADDR_HI_2 0xF48244 + +#define mmTPC5_QM_CP_MSG_BASE0_ADDR_HI_3 0xF48248 + +#define mmTPC5_QM_CP_MSG_BASE0_ADDR_HI_4 0xF4824C + +#define mmTPC5_QM_CP_MSG_BASE1_ADDR_LO_0 0xF48250 + +#define mmTPC5_QM_CP_MSG_BASE1_ADDR_LO_1 0xF48254 + +#define mmTPC5_QM_CP_MSG_BASE1_ADDR_LO_2 0xF48258 + +#define mmTPC5_QM_CP_MSG_BASE1_ADDR_LO_3 0xF4825C + +#define mmTPC5_QM_CP_MSG_BASE1_ADDR_LO_4 0xF48260 + +#define mmTPC5_QM_CP_MSG_BASE1_ADDR_HI_0 0xF48264 + +#define mmTPC5_QM_CP_MSG_BASE1_ADDR_HI_1 0xF48268 + +#define mmTPC5_QM_CP_MSG_BASE1_ADDR_HI_2 0xF4826C + +#define mmTPC5_QM_CP_MSG_BASE1_ADDR_HI_3 0xF48270 + +#define mmTPC5_QM_CP_MSG_BASE1_ADDR_HI_4 0xF48274 + +#define mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_0 0xF48278 + +#define mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_1 0xF4827C + +#define mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_2 0xF48280 + +#define mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_3 0xF48284 + +#define mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_4 0xF48288 + +#define mmTPC5_QM_CP_MSG_BASE2_ADDR_HI_0 0xF4828C + +#define mmTPC5_QM_CP_MSG_BASE2_ADDR_HI_1 0xF48290 + +#define mmTPC5_QM_CP_MSG_BASE2_ADDR_HI_2 0xF48294 + +#define mmTPC5_QM_CP_MSG_BASE2_ADDR_HI_3 0xF48298 + +#define mmTPC5_QM_CP_MSG_BASE2_ADDR_HI_4 0xF4829C + +#define mmTPC5_QM_CP_MSG_BASE3_ADDR_LO_0 0xF482A0 + +#define mmTPC5_QM_CP_MSG_BASE3_ADDR_LO_1 0xF482A4 + +#define mmTPC5_QM_CP_MSG_BASE3_ADDR_LO_2 0xF482A8 + +#define mmTPC5_QM_CP_MSG_BASE3_ADDR_LO_3 0xF482AC + +#define mmTPC5_QM_CP_MSG_BASE3_ADDR_LO_4 0xF482B0 + +#define mmTPC5_QM_CP_MSG_BASE3_ADDR_HI_0 0xF482B4 + +#define mmTPC5_QM_CP_MSG_BASE3_ADDR_HI_1 0xF482B8 + +#define mmTPC5_QM_CP_MSG_BASE3_ADDR_HI_2 0xF482BC + +#define mmTPC5_QM_CP_MSG_BASE3_ADDR_HI_3 0xF482C0 + +#define mmTPC5_QM_CP_MSG_BASE3_ADDR_HI_4 0xF482C4 + +#define mmTPC5_QM_CP_LDMA_TSIZE_OFFSET_0 0xF482C8 + +#define mmTPC5_QM_CP_LDMA_TSIZE_OFFSET_1 0xF482CC + +#define mmTPC5_QM_CP_LDMA_TSIZE_OFFSET_2 0xF482D0 + +#define mmTPC5_QM_CP_LDMA_TSIZE_OFFSET_3 0xF482D4 + +#define mmTPC5_QM_CP_LDMA_TSIZE_OFFSET_4 0xF482D8 + +#define mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0xF482E0 + +#define mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0xF482E4 + +#define mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0xF482E8 + +#define mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0xF482EC + +#define mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0xF482F0 + +#define mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0xF482F4 + +#define mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0xF482F8 + +#define mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0xF482FC + +#define mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0xF48300 + +#define mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0xF48304 + +#define mmTPC5_QM_CP_FENCE0_RDATA_0 0xF48308 + +#define mmTPC5_QM_CP_FENCE0_RDATA_1 0xF4830C + +#define mmTPC5_QM_CP_FENCE0_RDATA_2 0xF48310 + +#define mmTPC5_QM_CP_FENCE0_RDATA_3 0xF48314 + +#define mmTPC5_QM_CP_FENCE0_RDATA_4 0xF48318 + +#define mmTPC5_QM_CP_FENCE1_RDATA_0 0xF4831C + +#define mmTPC5_QM_CP_FENCE1_RDATA_1 0xF48320 + +#define mmTPC5_QM_CP_FENCE1_RDATA_2 0xF48324 + +#define mmTPC5_QM_CP_FENCE1_RDATA_3 0xF48328 + +#define mmTPC5_QM_CP_FENCE1_RDATA_4 0xF4832C + +#define mmTPC5_QM_CP_FENCE2_RDATA_0 0xF48330 + +#define mmTPC5_QM_CP_FENCE2_RDATA_1 0xF48334 + +#define mmTPC5_QM_CP_FENCE2_RDATA_2 0xF48338 + +#define mmTPC5_QM_CP_FENCE2_RDATA_3 0xF4833C + +#define mmTPC5_QM_CP_FENCE2_RDATA_4 0xF48340 + +#define mmTPC5_QM_CP_FENCE3_RDATA_0 0xF48344 + +#define mmTPC5_QM_CP_FENCE3_RDATA_1 0xF48348 + +#define mmTPC5_QM_CP_FENCE3_RDATA_2 0xF4834C + +#define mmTPC5_QM_CP_FENCE3_RDATA_3 0xF48350 + +#define mmTPC5_QM_CP_FENCE3_RDATA_4 0xF48354 + +#define mmTPC5_QM_CP_FENCE0_CNT_0 0xF48358 + +#define mmTPC5_QM_CP_FENCE0_CNT_1 0xF4835C + +#define mmTPC5_QM_CP_FENCE0_CNT_2 0xF48360 + +#define mmTPC5_QM_CP_FENCE0_CNT_3 0xF48364 + +#define mmTPC5_QM_CP_FENCE0_CNT_4 0xF48368 + +#define mmTPC5_QM_CP_FENCE1_CNT_0 0xF4836C + +#define mmTPC5_QM_CP_FENCE1_CNT_1 0xF48370 + +#define mmTPC5_QM_CP_FENCE1_CNT_2 0xF48374 + +#define mmTPC5_QM_CP_FENCE1_CNT_3 0xF48378 + +#define mmTPC5_QM_CP_FENCE1_CNT_4 0xF4837C + +#define mmTPC5_QM_CP_FENCE2_CNT_0 0xF48380 + +#define mmTPC5_QM_CP_FENCE2_CNT_1 0xF48384 + +#define mmTPC5_QM_CP_FENCE2_CNT_2 0xF48388 + +#define mmTPC5_QM_CP_FENCE2_CNT_3 0xF4838C + +#define mmTPC5_QM_CP_FENCE2_CNT_4 0xF48390 + +#define mmTPC5_QM_CP_FENCE3_CNT_0 0xF48394 + +#define mmTPC5_QM_CP_FENCE3_CNT_1 0xF48398 + +#define mmTPC5_QM_CP_FENCE3_CNT_2 0xF4839C + +#define mmTPC5_QM_CP_FENCE3_CNT_3 0xF483A0 + +#define mmTPC5_QM_CP_FENCE3_CNT_4 0xF483A4 + +#define mmTPC5_QM_CP_STS_0 0xF483A8 + +#define mmTPC5_QM_CP_STS_1 0xF483AC + +#define mmTPC5_QM_CP_STS_2 0xF483B0 + +#define mmTPC5_QM_CP_STS_3 0xF483B4 + +#define mmTPC5_QM_CP_STS_4 0xF483B8 + +#define mmTPC5_QM_CP_CURRENT_INST_LO_0 0xF483BC + +#define mmTPC5_QM_CP_CURRENT_INST_LO_1 0xF483C0 + +#define mmTPC5_QM_CP_CURRENT_INST_LO_2 0xF483C4 + +#define mmTPC5_QM_CP_CURRENT_INST_LO_3 0xF483C8 + +#define mmTPC5_QM_CP_CURRENT_INST_LO_4 0xF483CC + +#define mmTPC5_QM_CP_CURRENT_INST_HI_0 0xF483D0 + +#define mmTPC5_QM_CP_CURRENT_INST_HI_1 0xF483D4 + +#define mmTPC5_QM_CP_CURRENT_INST_HI_2 0xF483D8 + +#define mmTPC5_QM_CP_CURRENT_INST_HI_3 0xF483DC + +#define mmTPC5_QM_CP_CURRENT_INST_HI_4 0xF483E0 + +#define mmTPC5_QM_CP_BARRIER_CFG_0 0xF483F4 + +#define mmTPC5_QM_CP_BARRIER_CFG_1 0xF483F8 + +#define mmTPC5_QM_CP_BARRIER_CFG_2 0xF483FC + +#define mmTPC5_QM_CP_BARRIER_CFG_3 0xF48400 + +#define mmTPC5_QM_CP_BARRIER_CFG_4 0xF48404 + +#define mmTPC5_QM_CP_DBG_0_0 0xF48408 + +#define mmTPC5_QM_CP_DBG_0_1 0xF4840C + +#define mmTPC5_QM_CP_DBG_0_2 0xF48410 + +#define mmTPC5_QM_CP_DBG_0_3 0xF48414 + +#define mmTPC5_QM_CP_DBG_0_4 0xF48418 + +#define mmTPC5_QM_CP_ARUSER_31_11_0 0xF4841C + +#define mmTPC5_QM_CP_ARUSER_31_11_1 0xF48420 + +#define mmTPC5_QM_CP_ARUSER_31_11_2 0xF48424 + +#define mmTPC5_QM_CP_ARUSER_31_11_3 0xF48428 + +#define mmTPC5_QM_CP_ARUSER_31_11_4 0xF4842C + +#define mmTPC5_QM_CP_AWUSER_31_11_0 0xF48430 + +#define mmTPC5_QM_CP_AWUSER_31_11_1 0xF48434 + +#define mmTPC5_QM_CP_AWUSER_31_11_2 0xF48438 + +#define mmTPC5_QM_CP_AWUSER_31_11_3 0xF4843C + +#define mmTPC5_QM_CP_AWUSER_31_11_4 0xF48440 + +#define mmTPC5_QM_ARB_CFG_0 0xF48A00 + +#define mmTPC5_QM_ARB_CHOISE_Q_PUSH 0xF48A04 + +#define mmTPC5_QM_ARB_WRR_WEIGHT_0 0xF48A08 + +#define mmTPC5_QM_ARB_WRR_WEIGHT_1 0xF48A0C + +#define mmTPC5_QM_ARB_WRR_WEIGHT_2 0xF48A10 + +#define mmTPC5_QM_ARB_WRR_WEIGHT_3 0xF48A14 + +#define mmTPC5_QM_ARB_CFG_1 0xF48A18 + +#define mmTPC5_QM_ARB_MST_AVAIL_CRED_0 0xF48A20 + +#define mmTPC5_QM_ARB_MST_AVAIL_CRED_1 0xF48A24 + +#define mmTPC5_QM_ARB_MST_AVAIL_CRED_2 0xF48A28 + +#define mmTPC5_QM_ARB_MST_AVAIL_CRED_3 0xF48A2C + +#define mmTPC5_QM_ARB_MST_AVAIL_CRED_4 0xF48A30 + +#define mmTPC5_QM_ARB_MST_AVAIL_CRED_5 0xF48A34 + +#define mmTPC5_QM_ARB_MST_AVAIL_CRED_6 0xF48A38 + +#define mmTPC5_QM_ARB_MST_AVAIL_CRED_7 0xF48A3C + +#define mmTPC5_QM_ARB_MST_AVAIL_CRED_8 0xF48A40 + +#define mmTPC5_QM_ARB_MST_AVAIL_CRED_9 0xF48A44 + +#define mmTPC5_QM_ARB_MST_AVAIL_CRED_10 0xF48A48 + +#define mmTPC5_QM_ARB_MST_AVAIL_CRED_11 0xF48A4C + +#define mmTPC5_QM_ARB_MST_AVAIL_CRED_12 0xF48A50 + +#define mmTPC5_QM_ARB_MST_AVAIL_CRED_13 0xF48A54 + +#define mmTPC5_QM_ARB_MST_AVAIL_CRED_14 0xF48A58 + +#define mmTPC5_QM_ARB_MST_AVAIL_CRED_15 0xF48A5C + +#define mmTPC5_QM_ARB_MST_AVAIL_CRED_16 0xF48A60 + +#define mmTPC5_QM_ARB_MST_AVAIL_CRED_17 0xF48A64 + +#define mmTPC5_QM_ARB_MST_AVAIL_CRED_18 0xF48A68 + +#define mmTPC5_QM_ARB_MST_AVAIL_CRED_19 0xF48A6C + +#define mmTPC5_QM_ARB_MST_AVAIL_CRED_20 0xF48A70 + +#define mmTPC5_QM_ARB_MST_AVAIL_CRED_21 0xF48A74 + +#define mmTPC5_QM_ARB_MST_AVAIL_CRED_22 0xF48A78 + +#define mmTPC5_QM_ARB_MST_AVAIL_CRED_23 0xF48A7C + +#define mmTPC5_QM_ARB_MST_AVAIL_CRED_24 0xF48A80 + +#define mmTPC5_QM_ARB_MST_AVAIL_CRED_25 0xF48A84 + +#define mmTPC5_QM_ARB_MST_AVAIL_CRED_26 0xF48A88 + +#define mmTPC5_QM_ARB_MST_AVAIL_CRED_27 0xF48A8C + +#define mmTPC5_QM_ARB_MST_AVAIL_CRED_28 0xF48A90 + +#define mmTPC5_QM_ARB_MST_AVAIL_CRED_29 0xF48A94 + +#define mmTPC5_QM_ARB_MST_AVAIL_CRED_30 0xF48A98 + +#define mmTPC5_QM_ARB_MST_AVAIL_CRED_31 0xF48A9C + +#define mmTPC5_QM_ARB_MST_CRED_INC 0xF48AA0 + +#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_0 0xF48AA4 + +#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_1 0xF48AA8 + +#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_2 0xF48AAC + +#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_3 0xF48AB0 + +#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_4 0xF48AB4 + +#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_5 0xF48AB8 + +#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_6 0xF48ABC + +#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_7 0xF48AC0 + +#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_8 0xF48AC4 + +#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_9 0xF48AC8 + +#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_10 0xF48ACC + +#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_11 0xF48AD0 + +#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_12 0xF48AD4 + +#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_13 0xF48AD8 + +#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_14 0xF48ADC + +#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_15 0xF48AE0 + +#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_16 0xF48AE4 + +#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_17 0xF48AE8 + +#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_18 0xF48AEC + +#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_19 0xF48AF0 + +#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_20 0xF48AF4 + +#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_21 0xF48AF8 + +#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_22 0xF48AFC + +#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_23 0xF48B00 + +#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_24 0xF48B04 + +#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_25 0xF48B08 + +#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_26 0xF48B0C + +#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_27 0xF48B10 + +#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_28 0xF48B14 + +#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_29 0xF48B18 + +#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_30 0xF48B1C + +#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_31 0xF48B20 + +#define mmTPC5_QM_ARB_SLV_MASTER_INC_CRED_OFST 0xF48B28 + +#define mmTPC5_QM_ARB_MST_SLAVE_EN 0xF48B2C + +#define mmTPC5_QM_ARB_MST_QUIET_PER 0xF48B34 + +#define mmTPC5_QM_ARB_SLV_CHOISE_WDT 0xF48B38 + +#define mmTPC5_QM_ARB_SLV_ID 0xF48B3C + +#define mmTPC5_QM_ARB_MSG_MAX_INFLIGHT 0xF48B44 + +#define mmTPC5_QM_ARB_MSG_AWUSER_31_11 0xF48B48 + +#define mmTPC5_QM_ARB_MSG_AWUSER_SEC_PROP 0xF48B4C + +#define mmTPC5_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0xF48B50 + +#define mmTPC5_QM_ARB_BASE_LO 0xF48B54 + +#define mmTPC5_QM_ARB_BASE_HI 0xF48B58 + +#define mmTPC5_QM_ARB_STATE_STS 0xF48B80 + +#define mmTPC5_QM_ARB_CHOISE_FULLNESS_STS 0xF48B84 + +#define mmTPC5_QM_ARB_MSG_STS 0xF48B88 + +#define mmTPC5_QM_ARB_SLV_CHOISE_Q_HEAD 0xF48B8C + +#define mmTPC5_QM_ARB_ERR_CAUSE 0xF48B9C + +#define mmTPC5_QM_ARB_ERR_MSG_EN 0xF48BA0 + +#define mmTPC5_QM_ARB_ERR_STS_DRP 0xF48BA8 + +#define mmTPC5_QM_ARB_MST_CRED_STS_0 0xF48BB0 + +#define mmTPC5_QM_ARB_MST_CRED_STS_1 0xF48BB4 + +#define mmTPC5_QM_ARB_MST_CRED_STS_2 0xF48BB8 + +#define mmTPC5_QM_ARB_MST_CRED_STS_3 0xF48BBC + +#define mmTPC5_QM_ARB_MST_CRED_STS_4 0xF48BC0 + +#define mmTPC5_QM_ARB_MST_CRED_STS_5 0xF48BC4 + +#define mmTPC5_QM_ARB_MST_CRED_STS_6 0xF48BC8 + +#define mmTPC5_QM_ARB_MST_CRED_STS_7 0xF48BCC + +#define mmTPC5_QM_ARB_MST_CRED_STS_8 0xF48BD0 + +#define mmTPC5_QM_ARB_MST_CRED_STS_9 0xF48BD4 + +#define mmTPC5_QM_ARB_MST_CRED_STS_10 0xF48BD8 + +#define mmTPC5_QM_ARB_MST_CRED_STS_11 0xF48BDC + +#define mmTPC5_QM_ARB_MST_CRED_STS_12 0xF48BE0 + +#define mmTPC5_QM_ARB_MST_CRED_STS_13 0xF48BE4 + +#define mmTPC5_QM_ARB_MST_CRED_STS_14 0xF48BE8 + +#define mmTPC5_QM_ARB_MST_CRED_STS_15 0xF48BEC + +#define mmTPC5_QM_ARB_MST_CRED_STS_16 0xF48BF0 + +#define mmTPC5_QM_ARB_MST_CRED_STS_17 0xF48BF4 + +#define mmTPC5_QM_ARB_MST_CRED_STS_18 0xF48BF8 + +#define mmTPC5_QM_ARB_MST_CRED_STS_19 0xF48BFC + +#define mmTPC5_QM_ARB_MST_CRED_STS_20 0xF48C00 + +#define mmTPC5_QM_ARB_MST_CRED_STS_21 0xF48C04 + +#define mmTPC5_QM_ARB_MST_CRED_STS_22 0xF48C08 + +#define mmTPC5_QM_ARB_MST_CRED_STS_23 0xF48C0C + +#define mmTPC5_QM_ARB_MST_CRED_STS_24 0xF48C10 + +#define mmTPC5_QM_ARB_MST_CRED_STS_25 0xF48C14 + +#define mmTPC5_QM_ARB_MST_CRED_STS_26 0xF48C18 + +#define mmTPC5_QM_ARB_MST_CRED_STS_27 0xF48C1C + +#define mmTPC5_QM_ARB_MST_CRED_STS_28 0xF48C20 + +#define mmTPC5_QM_ARB_MST_CRED_STS_29 0xF48C24 + +#define mmTPC5_QM_ARB_MST_CRED_STS_30 0xF48C28 + +#define mmTPC5_QM_ARB_MST_CRED_STS_31 0xF48C2C + +#define mmTPC5_QM_CGM_CFG 0xF48C70 + +#define mmTPC5_QM_CGM_STS 0xF48C74 + +#define mmTPC5_QM_CGM_CFG1 0xF48C78 + +#define mmTPC5_QM_LOCAL_RANGE_BASE 0xF48C80 + +#define mmTPC5_QM_LOCAL_RANGE_SIZE 0xF48C84 + +#define mmTPC5_QM_CSMR_STRICT_PRIO_CFG 0xF48C90 + +#define mmTPC5_QM_HBW_RD_RATE_LIM_CFG_1 0xF48C94 + +#define mmTPC5_QM_LBW_WR_RATE_LIM_CFG_0 0xF48C98 + +#define mmTPC5_QM_LBW_WR_RATE_LIM_CFG_1 0xF48C9C + +#define mmTPC5_QM_HBW_RD_RATE_LIM_CFG_0 0xF48CA0 + +#define mmTPC5_QM_GLBL_AXCACHE 0xF48CA4 + +#define mmTPC5_QM_IND_GW_APB_CFG 0xF48CB0 + +#define mmTPC5_QM_IND_GW_APB_WDATA 0xF48CB4 + +#define mmTPC5_QM_IND_GW_APB_RDATA 0xF48CB8 + +#define mmTPC5_QM_IND_GW_APB_STATUS 0xF48CBC + +#define mmTPC5_QM_GLBL_ERR_ADDR_LO 0xF48CD0 + +#define mmTPC5_QM_GLBL_ERR_ADDR_HI 0xF48CD4 + +#define mmTPC5_QM_GLBL_ERR_WDATA 0xF48CD8 + +#define mmTPC5_QM_GLBL_MEM_INIT_BUSY 0xF48D00 + +#endif /* ASIC_REG_TPC5_QM_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc6_cfg_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc6_cfg_regs.h new file mode 100644 index 000000000..eb251e728 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc6_cfg_regs.h @@ -0,0 +1,1226 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC6_CFG_REGS_H_ +#define ASIC_REG_TPC6_CFG_REGS_H_ + +/* + ***************************************** + * TPC6_CFG (Prototype: TPC) + ***************************************** + */ + +#define mmTPC6_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xF86400 + +#define mmTPC6_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xF86404 + +#define mmTPC6_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xF86408 + +#define mmTPC6_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xF8640C + +#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xF86410 + +#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xF86414 + +#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xF86418 + +#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xF8641C + +#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xF86420 + +#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xF86424 + +#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xF86428 + +#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xF8642C + +#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xF86430 + +#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xF86434 + +#define mmTPC6_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xF86438 + +#define mmTPC6_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xF8643C + +#define mmTPC6_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xF86440 + +#define mmTPC6_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xF86444 + +#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xF86448 + +#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xF8644C + +#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xF86450 + +#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xF86454 + +#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xF86458 + +#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xF8645C + +#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xF86460 + +#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xF86464 + +#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xF86468 + +#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xF8646C + +#define mmTPC6_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xF86470 + +#define mmTPC6_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xF86474 + +#define mmTPC6_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xF86478 + +#define mmTPC6_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xF8647C + +#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xF86480 + +#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xF86484 + +#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xF86488 + +#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xF8648C + +#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xF86490 + +#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xF86494 + +#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xF86498 + +#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xF8649C + +#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xF864A0 + +#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xF864A4 + +#define mmTPC6_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xF864A8 + +#define mmTPC6_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xF864AC + +#define mmTPC6_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xF864B0 + +#define mmTPC6_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xF864B4 + +#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xF864B8 + +#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xF864BC + +#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xF864C0 + +#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xF864C4 + +#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xF864C8 + +#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xF864CC + +#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xF864D0 + +#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xF864D4 + +#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xF864D8 + +#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xF864DC + +#define mmTPC6_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xF864E0 + +#define mmTPC6_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xF864E4 + +#define mmTPC6_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xF864E8 + +#define mmTPC6_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xF864EC + +#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xF864F0 + +#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xF864F4 + +#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xF864F8 + +#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xF864FC + +#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xF86500 + +#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xF86504 + +#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xF86508 + +#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xF8650C + +#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xF86510 + +#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xF86514 + +#define mmTPC6_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xF86518 + +#define mmTPC6_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xF8651C + +#define mmTPC6_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xF86520 + +#define mmTPC6_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xF86524 + +#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xF86528 + +#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xF8652C + +#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xF86530 + +#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xF86534 + +#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xF86538 + +#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xF8653C + +#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xF86540 + +#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xF86544 + +#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xF86548 + +#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xF8654C + +#define mmTPC6_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xF86550 + +#define mmTPC6_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xF86554 + +#define mmTPC6_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xF86558 + +#define mmTPC6_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xF8655C + +#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xF86560 + +#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xF86564 + +#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xF86568 + +#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xF8656C + +#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xF86570 + +#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xF86574 + +#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xF86578 + +#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xF8657C + +#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xF86580 + +#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xF86584 + +#define mmTPC6_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xF86588 + +#define mmTPC6_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xF8658C + +#define mmTPC6_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xF86590 + +#define mmTPC6_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xF86594 + +#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xF86598 + +#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xF8659C + +#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xF865A0 + +#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xF865A4 + +#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xF865A8 + +#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xF865AC + +#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xF865B0 + +#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xF865B4 + +#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xF865B8 + +#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xF865BC + +#define mmTPC6_CFG_KERNEL_TENSOR_8_BASE_ADDR_LOW 0xF865C0 + +#define mmTPC6_CFG_KERNEL_TENSOR_8_BASE_ADDR_HIGH 0xF865C4 + +#define mmTPC6_CFG_KERNEL_TENSOR_8_PADDING_VALUE 0xF865C8 + +#define mmTPC6_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG 0xF865CC + +#define mmTPC6_CFG_KERNEL_TENSOR_8_DIM_0_SIZE 0xF865D0 + +#define mmTPC6_CFG_KERNEL_TENSOR_8_DIM_0_STRIDE 0xF865D4 + +#define mmTPC6_CFG_KERNEL_TENSOR_8_DIM_1_SIZE 0xF865D8 + +#define mmTPC6_CFG_KERNEL_TENSOR_8_DIM_1_STRIDE 0xF865DC + +#define mmTPC6_CFG_KERNEL_TENSOR_8_DIM_2_SIZE 0xF865E0 + +#define mmTPC6_CFG_KERNEL_TENSOR_8_DIM_2_STRIDE 0xF865E4 + +#define mmTPC6_CFG_KERNEL_TENSOR_8_DIM_3_SIZE 0xF865E8 + +#define mmTPC6_CFG_KERNEL_TENSOR_8_DIM_3_STRIDE 0xF865EC + +#define mmTPC6_CFG_KERNEL_TENSOR_8_DIM_4_SIZE 0xF865F0 + +#define mmTPC6_CFG_KERNEL_TENSOR_8_DIM_4_STRIDE 0xF865F4 + +#define mmTPC6_CFG_KERNEL_TENSOR_9_BASE_ADDR_LOW 0xF865F8 + +#define mmTPC6_CFG_KERNEL_TENSOR_9_BASE_ADDR_HIGH 0xF865FC + +#define mmTPC6_CFG_KERNEL_TENSOR_9_PADDING_VALUE 0xF86600 + +#define mmTPC6_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG 0xF86604 + +#define mmTPC6_CFG_KERNEL_TENSOR_9_DIM_0_SIZE 0xF86608 + +#define mmTPC6_CFG_KERNEL_TENSOR_9_DIM_0_STRIDE 0xF8660C + +#define mmTPC6_CFG_KERNEL_TENSOR_9_DIM_1_SIZE 0xF86610 + +#define mmTPC6_CFG_KERNEL_TENSOR_9_DIM_1_STRIDE 0xF86614 + +#define mmTPC6_CFG_KERNEL_TENSOR_9_DIM_2_SIZE 0xF86618 + +#define mmTPC6_CFG_KERNEL_TENSOR_9_DIM_2_STRIDE 0xF8661C + +#define mmTPC6_CFG_KERNEL_TENSOR_9_DIM_3_SIZE 0xF86620 + +#define mmTPC6_CFG_KERNEL_TENSOR_9_DIM_3_STRIDE 0xF86624 + +#define mmTPC6_CFG_KERNEL_TENSOR_9_DIM_4_SIZE 0xF86628 + +#define mmTPC6_CFG_KERNEL_TENSOR_9_DIM_4_STRIDE 0xF8662C + +#define mmTPC6_CFG_KERNEL_TENSOR_10_BASE_ADDR_LOW 0xF86630 + +#define mmTPC6_CFG_KERNEL_TENSOR_10_BASE_ADDR_HIGH 0xF86634 + +#define mmTPC6_CFG_KERNEL_TENSOR_10_PADDING_VALUE 0xF86638 + +#define mmTPC6_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG 0xF8663C + +#define mmTPC6_CFG_KERNEL_TENSOR_10_DIM_0_SIZE 0xF86640 + +#define mmTPC6_CFG_KERNEL_TENSOR_10_DIM_0_STRIDE 0xF86644 + +#define mmTPC6_CFG_KERNEL_TENSOR_10_DIM_1_SIZE 0xF86648 + +#define mmTPC6_CFG_KERNEL_TENSOR_10_DIM_1_STRIDE 0xF8664C + +#define mmTPC6_CFG_KERNEL_TENSOR_10_DIM_2_SIZE 0xF86650 + +#define mmTPC6_CFG_KERNEL_TENSOR_10_DIM_2_STRIDE 0xF86654 + +#define mmTPC6_CFG_KERNEL_TENSOR_10_DIM_3_SIZE 0xF86658 + +#define mmTPC6_CFG_KERNEL_TENSOR_10_DIM_3_STRIDE 0xF8665C + +#define mmTPC6_CFG_KERNEL_TENSOR_10_DIM_4_SIZE 0xF86660 + +#define mmTPC6_CFG_KERNEL_TENSOR_10_DIM_4_STRIDE 0xF86664 + +#define mmTPC6_CFG_KERNEL_TENSOR_11_BASE_ADDR_LOW 0xF86668 + +#define mmTPC6_CFG_KERNEL_TENSOR_11_BASE_ADDR_HIGH 0xF8666C + +#define mmTPC6_CFG_KERNEL_TENSOR_11_PADDING_VALUE 0xF86670 + +#define mmTPC6_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG 0xF86674 + +#define mmTPC6_CFG_KERNEL_TENSOR_11_DIM_0_SIZE 0xF86678 + +#define mmTPC6_CFG_KERNEL_TENSOR_11_DIM_0_STRIDE 0xF8667C + +#define mmTPC6_CFG_KERNEL_TENSOR_11_DIM_1_SIZE 0xF86680 + +#define mmTPC6_CFG_KERNEL_TENSOR_11_DIM_1_STRIDE 0xF86684 + +#define mmTPC6_CFG_KERNEL_TENSOR_11_DIM_2_SIZE 0xF86688 + +#define mmTPC6_CFG_KERNEL_TENSOR_11_DIM_2_STRIDE 0xF8668C + +#define mmTPC6_CFG_KERNEL_TENSOR_11_DIM_3_SIZE 0xF86690 + +#define mmTPC6_CFG_KERNEL_TENSOR_11_DIM_3_STRIDE 0xF86694 + +#define mmTPC6_CFG_KERNEL_TENSOR_11_DIM_4_SIZE 0xF86698 + +#define mmTPC6_CFG_KERNEL_TENSOR_11_DIM_4_STRIDE 0xF8669C + +#define mmTPC6_CFG_KERNEL_TENSOR_12_BASE_ADDR_LOW 0xF866A0 + +#define mmTPC6_CFG_KERNEL_TENSOR_12_BASE_ADDR_HIGH 0xF866A4 + +#define mmTPC6_CFG_KERNEL_TENSOR_12_PADDING_VALUE 0xF866A8 + +#define mmTPC6_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG 0xF866AC + +#define mmTPC6_CFG_KERNEL_TENSOR_12_DIM_0_SIZE 0xF866B0 + +#define mmTPC6_CFG_KERNEL_TENSOR_12_DIM_0_STRIDE 0xF866B4 + +#define mmTPC6_CFG_KERNEL_TENSOR_12_DIM_1_SIZE 0xF866B8 + +#define mmTPC6_CFG_KERNEL_TENSOR_12_DIM_1_STRIDE 0xF866BC + +#define mmTPC6_CFG_KERNEL_TENSOR_12_DIM_2_SIZE 0xF866C0 + +#define mmTPC6_CFG_KERNEL_TENSOR_12_DIM_2_STRIDE 0xF866C4 + +#define mmTPC6_CFG_KERNEL_TENSOR_12_DIM_3_SIZE 0xF866C8 + +#define mmTPC6_CFG_KERNEL_TENSOR_12_DIM_3_STRIDE 0xF866CC + +#define mmTPC6_CFG_KERNEL_TENSOR_12_DIM_4_SIZE 0xF866D0 + +#define mmTPC6_CFG_KERNEL_TENSOR_12_DIM_4_STRIDE 0xF866D4 + +#define mmTPC6_CFG_KERNEL_TENSOR_13_BASE_ADDR_LOW 0xF866D8 + +#define mmTPC6_CFG_KERNEL_TENSOR_13_BASE_ADDR_HIGH 0xF866DC + +#define mmTPC6_CFG_KERNEL_TENSOR_13_PADDING_VALUE 0xF866E0 + +#define mmTPC6_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG 0xF866E4 + +#define mmTPC6_CFG_KERNEL_TENSOR_13_DIM_0_SIZE 0xF866E8 + +#define mmTPC6_CFG_KERNEL_TENSOR_13_DIM_0_STRIDE 0xF866EC + +#define mmTPC6_CFG_KERNEL_TENSOR_13_DIM_1_SIZE 0xF866F0 + +#define mmTPC6_CFG_KERNEL_TENSOR_13_DIM_1_STRIDE 0xF866F4 + +#define mmTPC6_CFG_KERNEL_TENSOR_13_DIM_2_SIZE 0xF866F8 + +#define mmTPC6_CFG_KERNEL_TENSOR_13_DIM_2_STRIDE 0xF866FC + +#define mmTPC6_CFG_KERNEL_TENSOR_13_DIM_3_SIZE 0xF86700 + +#define mmTPC6_CFG_KERNEL_TENSOR_13_DIM_3_STRIDE 0xF86704 + +#define mmTPC6_CFG_KERNEL_TENSOR_13_DIM_4_SIZE 0xF86708 + +#define mmTPC6_CFG_KERNEL_TENSOR_13_DIM_4_STRIDE 0xF8670C + +#define mmTPC6_CFG_KERNEL_TENSOR_14_BASE_ADDR_LOW 0xF86710 + +#define mmTPC6_CFG_KERNEL_TENSOR_14_BASE_ADDR_HIGH 0xF86714 + +#define mmTPC6_CFG_KERNEL_TENSOR_14_PADDING_VALUE 0xF86718 + +#define mmTPC6_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG 0xF8671C + +#define mmTPC6_CFG_KERNEL_TENSOR_14_DIM_0_SIZE 0xF86720 + +#define mmTPC6_CFG_KERNEL_TENSOR_14_DIM_0_STRIDE 0xF86724 + +#define mmTPC6_CFG_KERNEL_TENSOR_14_DIM_1_SIZE 0xF86728 + +#define mmTPC6_CFG_KERNEL_TENSOR_14_DIM_1_STRIDE 0xF8672C + +#define mmTPC6_CFG_KERNEL_TENSOR_14_DIM_2_SIZE 0xF86730 + +#define mmTPC6_CFG_KERNEL_TENSOR_14_DIM_2_STRIDE 0xF86734 + +#define mmTPC6_CFG_KERNEL_TENSOR_14_DIM_3_SIZE 0xF86738 + +#define mmTPC6_CFG_KERNEL_TENSOR_14_DIM_3_STRIDE 0xF8673C + +#define mmTPC6_CFG_KERNEL_TENSOR_14_DIM_4_SIZE 0xF86740 + +#define mmTPC6_CFG_KERNEL_TENSOR_14_DIM_4_STRIDE 0xF86744 + +#define mmTPC6_CFG_KERNEL_TENSOR_15_BASE_ADDR_LOW 0xF86748 + +#define mmTPC6_CFG_KERNEL_TENSOR_15_BASE_ADDR_HIGH 0xF8674C + +#define mmTPC6_CFG_KERNEL_TENSOR_15_PADDING_VALUE 0xF86750 + +#define mmTPC6_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG 0xF86754 + +#define mmTPC6_CFG_KERNEL_TENSOR_15_DIM_0_SIZE 0xF86758 + +#define mmTPC6_CFG_KERNEL_TENSOR_15_DIM_0_STRIDE 0xF8675C + +#define mmTPC6_CFG_KERNEL_TENSOR_15_DIM_1_SIZE 0xF86760 + +#define mmTPC6_CFG_KERNEL_TENSOR_15_DIM_1_STRIDE 0xF86764 + +#define mmTPC6_CFG_KERNEL_TENSOR_15_DIM_2_SIZE 0xF86768 + +#define mmTPC6_CFG_KERNEL_TENSOR_15_DIM_2_STRIDE 0xF8676C + +#define mmTPC6_CFG_KERNEL_TENSOR_15_DIM_3_SIZE 0xF86770 + +#define mmTPC6_CFG_KERNEL_TENSOR_15_DIM_3_STRIDE 0xF86774 + +#define mmTPC6_CFG_KERNEL_TENSOR_15_DIM_4_SIZE 0xF86778 + +#define mmTPC6_CFG_KERNEL_TENSOR_15_DIM_4_STRIDE 0xF8677C + +#define mmTPC6_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xF86780 + +#define mmTPC6_CFG_KERNEL_SYNC_OBJECT_ADDR 0xF86784 + +#define mmTPC6_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xF86788 + +#define mmTPC6_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xF8678C + +#define mmTPC6_CFG_KERNEL_TID_BASE_DIM_0 0xF86790 + +#define mmTPC6_CFG_KERNEL_TID_SIZE_DIM_0 0xF86794 + +#define mmTPC6_CFG_KERNEL_TID_BASE_DIM_1 0xF86798 + +#define mmTPC6_CFG_KERNEL_TID_SIZE_DIM_1 0xF8679C + +#define mmTPC6_CFG_KERNEL_TID_BASE_DIM_2 0xF867A0 + +#define mmTPC6_CFG_KERNEL_TID_SIZE_DIM_2 0xF867A4 + +#define mmTPC6_CFG_KERNEL_TID_BASE_DIM_3 0xF867A8 + +#define mmTPC6_CFG_KERNEL_TID_SIZE_DIM_3 0xF867AC + +#define mmTPC6_CFG_KERNEL_TID_BASE_DIM_4 0xF867B0 + +#define mmTPC6_CFG_KERNEL_TID_SIZE_DIM_4 0xF867B4 + +#define mmTPC6_CFG_KERNEL_KERNEL_CONFIG 0xF867B8 + +#define mmTPC6_CFG_KERNEL_KERNEL_ID 0xF867BC + +#define mmTPC6_CFG_KERNEL_SRF_0 0xF867C0 + +#define mmTPC6_CFG_KERNEL_SRF_1 0xF867C4 + +#define mmTPC6_CFG_KERNEL_SRF_2 0xF867C8 + +#define mmTPC6_CFG_KERNEL_SRF_3 0xF867CC + +#define mmTPC6_CFG_KERNEL_SRF_4 0xF867D0 + +#define mmTPC6_CFG_KERNEL_SRF_5 0xF867D4 + +#define mmTPC6_CFG_KERNEL_SRF_6 0xF867D8 + +#define mmTPC6_CFG_KERNEL_SRF_7 0xF867DC + +#define mmTPC6_CFG_KERNEL_SRF_8 0xF867E0 + +#define mmTPC6_CFG_KERNEL_SRF_9 0xF867E4 + +#define mmTPC6_CFG_KERNEL_SRF_10 0xF867E8 + +#define mmTPC6_CFG_KERNEL_SRF_11 0xF867EC + +#define mmTPC6_CFG_KERNEL_SRF_12 0xF867F0 + +#define mmTPC6_CFG_KERNEL_SRF_13 0xF867F4 + +#define mmTPC6_CFG_KERNEL_SRF_14 0xF867F8 + +#define mmTPC6_CFG_KERNEL_SRF_15 0xF867FC + +#define mmTPC6_CFG_KERNEL_SRF_16 0xF86800 + +#define mmTPC6_CFG_KERNEL_SRF_17 0xF86804 + +#define mmTPC6_CFG_KERNEL_SRF_18 0xF86808 + +#define mmTPC6_CFG_KERNEL_SRF_19 0xF8680C + +#define mmTPC6_CFG_KERNEL_SRF_20 0xF86810 + +#define mmTPC6_CFG_KERNEL_SRF_21 0xF86814 + +#define mmTPC6_CFG_KERNEL_SRF_22 0xF86818 + +#define mmTPC6_CFG_KERNEL_SRF_23 0xF8681C + +#define mmTPC6_CFG_KERNEL_SRF_24 0xF86820 + +#define mmTPC6_CFG_KERNEL_SRF_25 0xF86824 + +#define mmTPC6_CFG_KERNEL_SRF_26 0xF86828 + +#define mmTPC6_CFG_KERNEL_SRF_27 0xF8682C + +#define mmTPC6_CFG_KERNEL_SRF_28 0xF86830 + +#define mmTPC6_CFG_KERNEL_SRF_29 0xF86834 + +#define mmTPC6_CFG_KERNEL_SRF_30 0xF86838 + +#define mmTPC6_CFG_KERNEL_SRF_31 0xF8683C + +#define mmTPC6_CFG_ROUND_CSR 0xF868FC + +#define mmTPC6_CFG_PROT 0xF86900 + +#define mmTPC6_CFG_SEMAPHORE 0xF86908 + +#define mmTPC6_CFG_VFLAGS 0xF8690C + +#define mmTPC6_CFG_SFLAGS 0xF86910 + +#define mmTPC6_CFG_LFSR_POLYNOM 0xF86918 + +#define mmTPC6_CFG_STATUS 0xF8691C + +#define mmTPC6_CFG_CFG_BASE_ADDRESS_HIGH 0xF86920 + +#define mmTPC6_CFG_CFG_SUBTRACT_VALUE 0xF86924 + +#define mmTPC6_CFG_SM_BASE_ADDRESS_HIGH 0xF8692C + +#define mmTPC6_CFG_TPC_CMD 0xF86930 + +#define mmTPC6_CFG_TPC_EXECUTE 0xF86938 + +#define mmTPC6_CFG_TPC_STALL 0xF8693C + +#define mmTPC6_CFG_ICACHE_BASE_ADDERESS_LOW 0xF86940 + +#define mmTPC6_CFG_ICACHE_BASE_ADDERESS_HIGH 0xF86944 + +#define mmTPC6_CFG_RD_RATE_LIMIT 0xF86948 + +#define mmTPC6_CFG_WR_RATE_LIMIT 0xF86950 + +#define mmTPC6_CFG_MSS_CONFIG 0xF86954 + +#define mmTPC6_CFG_TPC_INTR_CAUSE 0xF86958 + +#define mmTPC6_CFG_TPC_INTR_MASK 0xF8695C + +#define mmTPC6_CFG_WQ_CREDITS 0xF86960 + +#define mmTPC6_CFG_ARUSER_LO 0xF86964 + +#define mmTPC6_CFG_ARUSER_HI 0xF86968 + +#define mmTPC6_CFG_AWUSER_LO 0xF8696C + +#define mmTPC6_CFG_AWUSER_HI 0xF86970 + +#define mmTPC6_CFG_OPCODE_EXEC 0xF86974 + +#define mmTPC6_CFG_LUT_FUNC32_BASE_ADDR_LO 0xF86978 + +#define mmTPC6_CFG_LUT_FUNC32_BASE_ADDR_HI 0xF8697C + +#define mmTPC6_CFG_LUT_FUNC64_BASE_ADDR_LO 0xF86980 + +#define mmTPC6_CFG_LUT_FUNC64_BASE_ADDR_HI 0xF86984 + +#define mmTPC6_CFG_LUT_FUNC128_BASE_ADDR_LO 0xF86988 + +#define mmTPC6_CFG_LUT_FUNC128_BASE_ADDR_HI 0xF8698C + +#define mmTPC6_CFG_LUT_FUNC256_BASE_ADDR_LO 0xF86990 + +#define mmTPC6_CFG_LUT_FUNC256_BASE_ADDR_HI 0xF86994 + +#define mmTPC6_CFG_TSB_CFG_MAX_SIZE 0xF86998 + +#define mmTPC6_CFG_TSB_CFG 0xF8699C + +#define mmTPC6_CFG_DBGMEM_ADD 0xF869A0 + +#define mmTPC6_CFG_DBGMEM_DATA_WR 0xF869A4 + +#define mmTPC6_CFG_DBGMEM_DATA_RD 0xF869A8 + +#define mmTPC6_CFG_DBGMEM_CTRL 0xF869AC + +#define mmTPC6_CFG_DBGMEM_RC 0xF869B0 + +#define mmTPC6_CFG_TSB_INFLIGHT_CNTR 0xF869B4 + +#define mmTPC6_CFG_WQ_INFLIGHT_CNTR 0xF869B8 + +#define mmTPC6_CFG_WQ_LBW_TOTAL_CNTR 0xF869BC + +#define mmTPC6_CFG_WQ_HBW_TOTAL_CNTR 0xF869C0 + +#define mmTPC6_CFG_IRQ_OCCOUPY_CNTR 0xF869C4 + +#define mmTPC6_CFG_FUNC_MBIST_CNTRL 0xF869D0 + +#define mmTPC6_CFG_FUNC_MBIST_PAT 0xF869D4 + +#define mmTPC6_CFG_FUNC_MBIST_MEM_0 0xF869D8 + +#define mmTPC6_CFG_FUNC_MBIST_MEM_1 0xF869DC + +#define mmTPC6_CFG_FUNC_MBIST_MEM_2 0xF869E0 + +#define mmTPC6_CFG_FUNC_MBIST_MEM_3 0xF869E4 + +#define mmTPC6_CFG_FUNC_MBIST_MEM_4 0xF869E8 + +#define mmTPC6_CFG_FUNC_MBIST_MEM_5 0xF869EC + +#define mmTPC6_CFG_FUNC_MBIST_MEM_6 0xF869F0 + +#define mmTPC6_CFG_FUNC_MBIST_MEM_7 0xF869F4 + +#define mmTPC6_CFG_FUNC_MBIST_MEM_8 0xF869F8 + +#define mmTPC6_CFG_FUNC_MBIST_MEM_9 0xF869FC + +#define mmTPC6_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xF86A00 + +#define mmTPC6_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xF86A04 + +#define mmTPC6_CFG_QM_TENSOR_0_PADDING_VALUE 0xF86A08 + +#define mmTPC6_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xF86A0C + +#define mmTPC6_CFG_QM_TENSOR_0_DIM_0_SIZE 0xF86A10 + +#define mmTPC6_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xF86A14 + +#define mmTPC6_CFG_QM_TENSOR_0_DIM_1_SIZE 0xF86A18 + +#define mmTPC6_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xF86A1C + +#define mmTPC6_CFG_QM_TENSOR_0_DIM_2_SIZE 0xF86A20 + +#define mmTPC6_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xF86A24 + +#define mmTPC6_CFG_QM_TENSOR_0_DIM_3_SIZE 0xF86A28 + +#define mmTPC6_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xF86A2C + +#define mmTPC6_CFG_QM_TENSOR_0_DIM_4_SIZE 0xF86A30 + +#define mmTPC6_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xF86A34 + +#define mmTPC6_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xF86A38 + +#define mmTPC6_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xF86A3C + +#define mmTPC6_CFG_QM_TENSOR_1_PADDING_VALUE 0xF86A40 + +#define mmTPC6_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xF86A44 + +#define mmTPC6_CFG_QM_TENSOR_1_DIM_0_SIZE 0xF86A48 + +#define mmTPC6_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xF86A4C + +#define mmTPC6_CFG_QM_TENSOR_1_DIM_1_SIZE 0xF86A50 + +#define mmTPC6_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xF86A54 + +#define mmTPC6_CFG_QM_TENSOR_1_DIM_2_SIZE 0xF86A58 + +#define mmTPC6_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xF86A5C + +#define mmTPC6_CFG_QM_TENSOR_1_DIM_3_SIZE 0xF86A60 + +#define mmTPC6_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xF86A64 + +#define mmTPC6_CFG_QM_TENSOR_1_DIM_4_SIZE 0xF86A68 + +#define mmTPC6_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xF86A6C + +#define mmTPC6_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xF86A70 + +#define mmTPC6_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xF86A74 + +#define mmTPC6_CFG_QM_TENSOR_2_PADDING_VALUE 0xF86A78 + +#define mmTPC6_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xF86A7C + +#define mmTPC6_CFG_QM_TENSOR_2_DIM_0_SIZE 0xF86A80 + +#define mmTPC6_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xF86A84 + +#define mmTPC6_CFG_QM_TENSOR_2_DIM_1_SIZE 0xF86A88 + +#define mmTPC6_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xF86A8C + +#define mmTPC6_CFG_QM_TENSOR_2_DIM_2_SIZE 0xF86A90 + +#define mmTPC6_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xF86A94 + +#define mmTPC6_CFG_QM_TENSOR_2_DIM_3_SIZE 0xF86A98 + +#define mmTPC6_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xF86A9C + +#define mmTPC6_CFG_QM_TENSOR_2_DIM_4_SIZE 0xF86AA0 + +#define mmTPC6_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xF86AA4 + +#define mmTPC6_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xF86AA8 + +#define mmTPC6_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xF86AAC + +#define mmTPC6_CFG_QM_TENSOR_3_PADDING_VALUE 0xF86AB0 + +#define mmTPC6_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xF86AB4 + +#define mmTPC6_CFG_QM_TENSOR_3_DIM_0_SIZE 0xF86AB8 + +#define mmTPC6_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xF86ABC + +#define mmTPC6_CFG_QM_TENSOR_3_DIM_1_SIZE 0xF86AC0 + +#define mmTPC6_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xF86AC4 + +#define mmTPC6_CFG_QM_TENSOR_3_DIM_2_SIZE 0xF86AC8 + +#define mmTPC6_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xF86ACC + +#define mmTPC6_CFG_QM_TENSOR_3_DIM_3_SIZE 0xF86AD0 + +#define mmTPC6_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xF86AD4 + +#define mmTPC6_CFG_QM_TENSOR_3_DIM_4_SIZE 0xF86AD8 + +#define mmTPC6_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xF86ADC + +#define mmTPC6_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xF86AE0 + +#define mmTPC6_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xF86AE4 + +#define mmTPC6_CFG_QM_TENSOR_4_PADDING_VALUE 0xF86AE8 + +#define mmTPC6_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xF86AEC + +#define mmTPC6_CFG_QM_TENSOR_4_DIM_0_SIZE 0xF86AF0 + +#define mmTPC6_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xF86AF4 + +#define mmTPC6_CFG_QM_TENSOR_4_DIM_1_SIZE 0xF86AF8 + +#define mmTPC6_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xF86AFC + +#define mmTPC6_CFG_QM_TENSOR_4_DIM_2_SIZE 0xF86B00 + +#define mmTPC6_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xF86B04 + +#define mmTPC6_CFG_QM_TENSOR_4_DIM_3_SIZE 0xF86B08 + +#define mmTPC6_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xF86B0C + +#define mmTPC6_CFG_QM_TENSOR_4_DIM_4_SIZE 0xF86B10 + +#define mmTPC6_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xF86B14 + +#define mmTPC6_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xF86B18 + +#define mmTPC6_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xF86B1C + +#define mmTPC6_CFG_QM_TENSOR_5_PADDING_VALUE 0xF86B20 + +#define mmTPC6_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xF86B24 + +#define mmTPC6_CFG_QM_TENSOR_5_DIM_0_SIZE 0xF86B28 + +#define mmTPC6_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xF86B2C + +#define mmTPC6_CFG_QM_TENSOR_5_DIM_1_SIZE 0xF86B30 + +#define mmTPC6_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xF86B34 + +#define mmTPC6_CFG_QM_TENSOR_5_DIM_2_SIZE 0xF86B38 + +#define mmTPC6_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xF86B3C + +#define mmTPC6_CFG_QM_TENSOR_5_DIM_3_SIZE 0xF86B40 + +#define mmTPC6_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xF86B44 + +#define mmTPC6_CFG_QM_TENSOR_5_DIM_4_SIZE 0xF86B48 + +#define mmTPC6_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xF86B4C + +#define mmTPC6_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xF86B50 + +#define mmTPC6_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xF86B54 + +#define mmTPC6_CFG_QM_TENSOR_6_PADDING_VALUE 0xF86B58 + +#define mmTPC6_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xF86B5C + +#define mmTPC6_CFG_QM_TENSOR_6_DIM_0_SIZE 0xF86B60 + +#define mmTPC6_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xF86B64 + +#define mmTPC6_CFG_QM_TENSOR_6_DIM_1_SIZE 0xF86B68 + +#define mmTPC6_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xF86B6C + +#define mmTPC6_CFG_QM_TENSOR_6_DIM_2_SIZE 0xF86B70 + +#define mmTPC6_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xF86B74 + +#define mmTPC6_CFG_QM_TENSOR_6_DIM_3_SIZE 0xF86B78 + +#define mmTPC6_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xF86B7C + +#define mmTPC6_CFG_QM_TENSOR_6_DIM_4_SIZE 0xF86B80 + +#define mmTPC6_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xF86B84 + +#define mmTPC6_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xF86B88 + +#define mmTPC6_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xF86B8C + +#define mmTPC6_CFG_QM_TENSOR_7_PADDING_VALUE 0xF86B90 + +#define mmTPC6_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xF86B94 + +#define mmTPC6_CFG_QM_TENSOR_7_DIM_0_SIZE 0xF86B98 + +#define mmTPC6_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xF86B9C + +#define mmTPC6_CFG_QM_TENSOR_7_DIM_1_SIZE 0xF86BA0 + +#define mmTPC6_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xF86BA4 + +#define mmTPC6_CFG_QM_TENSOR_7_DIM_2_SIZE 0xF86BA8 + +#define mmTPC6_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xF86BAC + +#define mmTPC6_CFG_QM_TENSOR_7_DIM_3_SIZE 0xF86BB0 + +#define mmTPC6_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xF86BB4 + +#define mmTPC6_CFG_QM_TENSOR_7_DIM_4_SIZE 0xF86BB8 + +#define mmTPC6_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xF86BBC + +#define mmTPC6_CFG_QM_TENSOR_8_BASE_ADDR_LOW 0xF86BC0 + +#define mmTPC6_CFG_QM_TENSOR_8_BASE_ADDR_HIGH 0xF86BC4 + +#define mmTPC6_CFG_QM_TENSOR_8_PADDING_VALUE 0xF86BC8 + +#define mmTPC6_CFG_QM_TENSOR_8_TENSOR_CONFIG 0xF86BCC + +#define mmTPC6_CFG_QM_TENSOR_8_DIM_0_SIZE 0xF86BD0 + +#define mmTPC6_CFG_QM_TENSOR_8_DIM_0_STRIDE 0xF86BD4 + +#define mmTPC6_CFG_QM_TENSOR_8_DIM_1_SIZE 0xF86BD8 + +#define mmTPC6_CFG_QM_TENSOR_8_DIM_1_STRIDE 0xF86BDC + +#define mmTPC6_CFG_QM_TENSOR_8_DIM_2_SIZE 0xF86BE0 + +#define mmTPC6_CFG_QM_TENSOR_8_DIM_2_STRIDE 0xF86BE4 + +#define mmTPC6_CFG_QM_TENSOR_8_DIM_3_SIZE 0xF86BE8 + +#define mmTPC6_CFG_QM_TENSOR_8_DIM_3_STRIDE 0xF86BEC + +#define mmTPC6_CFG_QM_TENSOR_8_DIM_4_SIZE 0xF86BF0 + +#define mmTPC6_CFG_QM_TENSOR_8_DIM_4_STRIDE 0xF86BF4 + +#define mmTPC6_CFG_QM_TENSOR_9_BASE_ADDR_LOW 0xF86BF8 + +#define mmTPC6_CFG_QM_TENSOR_9_BASE_ADDR_HIGH 0xF86BFC + +#define mmTPC6_CFG_QM_TENSOR_9_PADDING_VALUE 0xF86C00 + +#define mmTPC6_CFG_QM_TENSOR_9_TENSOR_CONFIG 0xF86C04 + +#define mmTPC6_CFG_QM_TENSOR_9_DIM_0_SIZE 0xF86C08 + +#define mmTPC6_CFG_QM_TENSOR_9_DIM_0_STRIDE 0xF86C0C + +#define mmTPC6_CFG_QM_TENSOR_9_DIM_1_SIZE 0xF86C10 + +#define mmTPC6_CFG_QM_TENSOR_9_DIM_1_STRIDE 0xF86C14 + +#define mmTPC6_CFG_QM_TENSOR_9_DIM_2_SIZE 0xF86C18 + +#define mmTPC6_CFG_QM_TENSOR_9_DIM_2_STRIDE 0xF86C1C + +#define mmTPC6_CFG_QM_TENSOR_9_DIM_3_SIZE 0xF86C20 + +#define mmTPC6_CFG_QM_TENSOR_9_DIM_3_STRIDE 0xF86C24 + +#define mmTPC6_CFG_QM_TENSOR_9_DIM_4_SIZE 0xF86C28 + +#define mmTPC6_CFG_QM_TENSOR_9_DIM_4_STRIDE 0xF86C2C + +#define mmTPC6_CFG_QM_TENSOR_10_BASE_ADDR_LOW 0xF86C30 + +#define mmTPC6_CFG_QM_TENSOR_10_BASE_ADDR_HIGH 0xF86C34 + +#define mmTPC6_CFG_QM_TENSOR_10_PADDING_VALUE 0xF86C38 + +#define mmTPC6_CFG_QM_TENSOR_10_TENSOR_CONFIG 0xF86C3C + +#define mmTPC6_CFG_QM_TENSOR_10_DIM_0_SIZE 0xF86C40 + +#define mmTPC6_CFG_QM_TENSOR_10_DIM_0_STRIDE 0xF86C44 + +#define mmTPC6_CFG_QM_TENSOR_10_DIM_1_SIZE 0xF86C48 + +#define mmTPC6_CFG_QM_TENSOR_10_DIM_1_STRIDE 0xF86C4C + +#define mmTPC6_CFG_QM_TENSOR_10_DIM_2_SIZE 0xF86C50 + +#define mmTPC6_CFG_QM_TENSOR_10_DIM_2_STRIDE 0xF86C54 + +#define mmTPC6_CFG_QM_TENSOR_10_DIM_3_SIZE 0xF86C58 + +#define mmTPC6_CFG_QM_TENSOR_10_DIM_3_STRIDE 0xF86C5C + +#define mmTPC6_CFG_QM_TENSOR_10_DIM_4_SIZE 0xF86C60 + +#define mmTPC6_CFG_QM_TENSOR_10_DIM_4_STRIDE 0xF86C64 + +#define mmTPC6_CFG_QM_TENSOR_11_BASE_ADDR_LOW 0xF86C68 + +#define mmTPC6_CFG_QM_TENSOR_11_BASE_ADDR_HIGH 0xF86C6C + +#define mmTPC6_CFG_QM_TENSOR_11_PADDING_VALUE 0xF86C70 + +#define mmTPC6_CFG_QM_TENSOR_11_TENSOR_CONFIG 0xF86C74 + +#define mmTPC6_CFG_QM_TENSOR_11_DIM_0_SIZE 0xF86C78 + +#define mmTPC6_CFG_QM_TENSOR_11_DIM_0_STRIDE 0xF86C7C + +#define mmTPC6_CFG_QM_TENSOR_11_DIM_1_SIZE 0xF86C80 + +#define mmTPC6_CFG_QM_TENSOR_11_DIM_1_STRIDE 0xF86C84 + +#define mmTPC6_CFG_QM_TENSOR_11_DIM_2_SIZE 0xF86C88 + +#define mmTPC6_CFG_QM_TENSOR_11_DIM_2_STRIDE 0xF86C8C + +#define mmTPC6_CFG_QM_TENSOR_11_DIM_3_SIZE 0xF86C90 + +#define mmTPC6_CFG_QM_TENSOR_11_DIM_3_STRIDE 0xF86C94 + +#define mmTPC6_CFG_QM_TENSOR_11_DIM_4_SIZE 0xF86C98 + +#define mmTPC6_CFG_QM_TENSOR_11_DIM_4_STRIDE 0xF86C9C + +#define mmTPC6_CFG_QM_TENSOR_12_BASE_ADDR_LOW 0xF86CA0 + +#define mmTPC6_CFG_QM_TENSOR_12_BASE_ADDR_HIGH 0xF86CA4 + +#define mmTPC6_CFG_QM_TENSOR_12_PADDING_VALUE 0xF86CA8 + +#define mmTPC6_CFG_QM_TENSOR_12_TENSOR_CONFIG 0xF86CAC + +#define mmTPC6_CFG_QM_TENSOR_12_DIM_0_SIZE 0xF86CB0 + +#define mmTPC6_CFG_QM_TENSOR_12_DIM_0_STRIDE 0xF86CB4 + +#define mmTPC6_CFG_QM_TENSOR_12_DIM_1_SIZE 0xF86CB8 + +#define mmTPC6_CFG_QM_TENSOR_12_DIM_1_STRIDE 0xF86CBC + +#define mmTPC6_CFG_QM_TENSOR_12_DIM_2_SIZE 0xF86CC0 + +#define mmTPC6_CFG_QM_TENSOR_12_DIM_2_STRIDE 0xF86CC4 + +#define mmTPC6_CFG_QM_TENSOR_12_DIM_3_SIZE 0xF86CC8 + +#define mmTPC6_CFG_QM_TENSOR_12_DIM_3_STRIDE 0xF86CCC + +#define mmTPC6_CFG_QM_TENSOR_12_DIM_4_SIZE 0xF86CD0 + +#define mmTPC6_CFG_QM_TENSOR_12_DIM_4_STRIDE 0xF86CD4 + +#define mmTPC6_CFG_QM_TENSOR_13_BASE_ADDR_LOW 0xF86CD8 + +#define mmTPC6_CFG_QM_TENSOR_13_BASE_ADDR_HIGH 0xF86CDC + +#define mmTPC6_CFG_QM_TENSOR_13_PADDING_VALUE 0xF86CE0 + +#define mmTPC6_CFG_QM_TENSOR_13_TENSOR_CONFIG 0xF86CE4 + +#define mmTPC6_CFG_QM_TENSOR_13_DIM_0_SIZE 0xF86CE8 + +#define mmTPC6_CFG_QM_TENSOR_13_DIM_0_STRIDE 0xF86CEC + +#define mmTPC6_CFG_QM_TENSOR_13_DIM_1_SIZE 0xF86CF0 + +#define mmTPC6_CFG_QM_TENSOR_13_DIM_1_STRIDE 0xF86CF4 + +#define mmTPC6_CFG_QM_TENSOR_13_DIM_2_SIZE 0xF86CF8 + +#define mmTPC6_CFG_QM_TENSOR_13_DIM_2_STRIDE 0xF86CFC + +#define mmTPC6_CFG_QM_TENSOR_13_DIM_3_SIZE 0xF86D00 + +#define mmTPC6_CFG_QM_TENSOR_13_DIM_3_STRIDE 0xF86D04 + +#define mmTPC6_CFG_QM_TENSOR_13_DIM_4_SIZE 0xF86D08 + +#define mmTPC6_CFG_QM_TENSOR_13_DIM_4_STRIDE 0xF86D0C + +#define mmTPC6_CFG_QM_TENSOR_14_BASE_ADDR_LOW 0xF86D10 + +#define mmTPC6_CFG_QM_TENSOR_14_BASE_ADDR_HIGH 0xF86D14 + +#define mmTPC6_CFG_QM_TENSOR_14_PADDING_VALUE 0xF86D18 + +#define mmTPC6_CFG_QM_TENSOR_14_TENSOR_CONFIG 0xF86D1C + +#define mmTPC6_CFG_QM_TENSOR_14_DIM_0_SIZE 0xF86D20 + +#define mmTPC6_CFG_QM_TENSOR_14_DIM_0_STRIDE 0xF86D24 + +#define mmTPC6_CFG_QM_TENSOR_14_DIM_1_SIZE 0xF86D28 + +#define mmTPC6_CFG_QM_TENSOR_14_DIM_1_STRIDE 0xF86D2C + +#define mmTPC6_CFG_QM_TENSOR_14_DIM_2_SIZE 0xF86D30 + +#define mmTPC6_CFG_QM_TENSOR_14_DIM_2_STRIDE 0xF86D34 + +#define mmTPC6_CFG_QM_TENSOR_14_DIM_3_SIZE 0xF86D38 + +#define mmTPC6_CFG_QM_TENSOR_14_DIM_3_STRIDE 0xF86D3C + +#define mmTPC6_CFG_QM_TENSOR_14_DIM_4_SIZE 0xF86D40 + +#define mmTPC6_CFG_QM_TENSOR_14_DIM_4_STRIDE 0xF86D44 + +#define mmTPC6_CFG_QM_TENSOR_15_BASE_ADDR_LOW 0xF86D48 + +#define mmTPC6_CFG_QM_TENSOR_15_BASE_ADDR_HIGH 0xF86D4C + +#define mmTPC6_CFG_QM_TENSOR_15_PADDING_VALUE 0xF86D50 + +#define mmTPC6_CFG_QM_TENSOR_15_TENSOR_CONFIG 0xF86D54 + +#define mmTPC6_CFG_QM_TENSOR_15_DIM_0_SIZE 0xF86D58 + +#define mmTPC6_CFG_QM_TENSOR_15_DIM_0_STRIDE 0xF86D5C + +#define mmTPC6_CFG_QM_TENSOR_15_DIM_1_SIZE 0xF86D60 + +#define mmTPC6_CFG_QM_TENSOR_15_DIM_1_STRIDE 0xF86D64 + +#define mmTPC6_CFG_QM_TENSOR_15_DIM_2_SIZE 0xF86D68 + +#define mmTPC6_CFG_QM_TENSOR_15_DIM_2_STRIDE 0xF86D6C + +#define mmTPC6_CFG_QM_TENSOR_15_DIM_3_SIZE 0xF86D70 + +#define mmTPC6_CFG_QM_TENSOR_15_DIM_3_STRIDE 0xF86D74 + +#define mmTPC6_CFG_QM_TENSOR_15_DIM_4_SIZE 0xF86D78 + +#define mmTPC6_CFG_QM_TENSOR_15_DIM_4_STRIDE 0xF86D7C + +#define mmTPC6_CFG_QM_SYNC_OBJECT_MESSAGE 0xF86D80 + +#define mmTPC6_CFG_QM_SYNC_OBJECT_ADDR 0xF86D84 + +#define mmTPC6_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xF86D88 + +#define mmTPC6_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xF86D8C + +#define mmTPC6_CFG_QM_TID_BASE_DIM_0 0xF86D90 + +#define mmTPC6_CFG_QM_TID_SIZE_DIM_0 0xF86D94 + +#define mmTPC6_CFG_QM_TID_BASE_DIM_1 0xF86D98 + +#define mmTPC6_CFG_QM_TID_SIZE_DIM_1 0xF86D9C + +#define mmTPC6_CFG_QM_TID_BASE_DIM_2 0xF86DA0 + +#define mmTPC6_CFG_QM_TID_SIZE_DIM_2 0xF86DA4 + +#define mmTPC6_CFG_QM_TID_BASE_DIM_3 0xF86DA8 + +#define mmTPC6_CFG_QM_TID_SIZE_DIM_3 0xF86DAC + +#define mmTPC6_CFG_QM_TID_BASE_DIM_4 0xF86DB0 + +#define mmTPC6_CFG_QM_TID_SIZE_DIM_4 0xF86DB4 + +#define mmTPC6_CFG_QM_KERNEL_CONFIG 0xF86DB8 + +#define mmTPC6_CFG_QM_KERNEL_ID 0xF86DBC + +#define mmTPC6_CFG_QM_SRF_0 0xF86DC0 + +#define mmTPC6_CFG_QM_SRF_1 0xF86DC4 + +#define mmTPC6_CFG_QM_SRF_2 0xF86DC8 + +#define mmTPC6_CFG_QM_SRF_3 0xF86DCC + +#define mmTPC6_CFG_QM_SRF_4 0xF86DD0 + +#define mmTPC6_CFG_QM_SRF_5 0xF86DD4 + +#define mmTPC6_CFG_QM_SRF_6 0xF86DD8 + +#define mmTPC6_CFG_QM_SRF_7 0xF86DDC + +#define mmTPC6_CFG_QM_SRF_8 0xF86DE0 + +#define mmTPC6_CFG_QM_SRF_9 0xF86DE4 + +#define mmTPC6_CFG_QM_SRF_10 0xF86DE8 + +#define mmTPC6_CFG_QM_SRF_11 0xF86DEC + +#define mmTPC6_CFG_QM_SRF_12 0xF86DF0 + +#define mmTPC6_CFG_QM_SRF_13 0xF86DF4 + +#define mmTPC6_CFG_QM_SRF_14 0xF86DF8 + +#define mmTPC6_CFG_QM_SRF_15 0xF86DFC + +#define mmTPC6_CFG_QM_SRF_16 0xF86E00 + +#define mmTPC6_CFG_QM_SRF_17 0xF86E04 + +#define mmTPC6_CFG_QM_SRF_18 0xF86E08 + +#define mmTPC6_CFG_QM_SRF_19 0xF86E0C + +#define mmTPC6_CFG_QM_SRF_20 0xF86E10 + +#define mmTPC6_CFG_QM_SRF_21 0xF86E14 + +#define mmTPC6_CFG_QM_SRF_22 0xF86E18 + +#define mmTPC6_CFG_QM_SRF_23 0xF86E1C + +#define mmTPC6_CFG_QM_SRF_24 0xF86E20 + +#define mmTPC6_CFG_QM_SRF_25 0xF86E24 + +#define mmTPC6_CFG_QM_SRF_26 0xF86E28 + +#define mmTPC6_CFG_QM_SRF_27 0xF86E2C + +#define mmTPC6_CFG_QM_SRF_28 0xF86E30 + +#define mmTPC6_CFG_QM_SRF_29 0xF86E34 + +#define mmTPC6_CFG_QM_SRF_30 0xF86E38 + +#define mmTPC6_CFG_QM_SRF_31 0xF86E3C + +#endif /* ASIC_REG_TPC6_CFG_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc6_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc6_qm_regs.h new file mode 100644 index 000000000..e35ef7fd8 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc6_qm_regs.h @@ -0,0 +1,834 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC6_QM_REGS_H_ +#define ASIC_REG_TPC6_QM_REGS_H_ + +/* + ***************************************** + * TPC6_QM (Prototype: QMAN) + ***************************************** + */ + +#define mmTPC6_QM_GLBL_CFG0 0xF88000 + +#define mmTPC6_QM_GLBL_CFG1 0xF88004 + +#define mmTPC6_QM_GLBL_PROT 0xF88008 + +#define mmTPC6_QM_GLBL_ERR_CFG 0xF8800C + +#define mmTPC6_QM_GLBL_SECURE_PROPS_0 0xF88010 + +#define mmTPC6_QM_GLBL_SECURE_PROPS_1 0xF88014 + +#define mmTPC6_QM_GLBL_SECURE_PROPS_2 0xF88018 + +#define mmTPC6_QM_GLBL_SECURE_PROPS_3 0xF8801C + +#define mmTPC6_QM_GLBL_SECURE_PROPS_4 0xF88020 + +#define mmTPC6_QM_GLBL_NON_SECURE_PROPS_0 0xF88024 + +#define mmTPC6_QM_GLBL_NON_SECURE_PROPS_1 0xF88028 + +#define mmTPC6_QM_GLBL_NON_SECURE_PROPS_2 0xF8802C + +#define mmTPC6_QM_GLBL_NON_SECURE_PROPS_3 0xF88030 + +#define mmTPC6_QM_GLBL_NON_SECURE_PROPS_4 0xF88034 + +#define mmTPC6_QM_GLBL_STS0 0xF88038 + +#define mmTPC6_QM_GLBL_STS1_0 0xF88040 + +#define mmTPC6_QM_GLBL_STS1_1 0xF88044 + +#define mmTPC6_QM_GLBL_STS1_2 0xF88048 + +#define mmTPC6_QM_GLBL_STS1_3 0xF8804C + +#define mmTPC6_QM_GLBL_STS1_4 0xF88050 + +#define mmTPC6_QM_GLBL_MSG_EN_0 0xF88054 + +#define mmTPC6_QM_GLBL_MSG_EN_1 0xF88058 + +#define mmTPC6_QM_GLBL_MSG_EN_2 0xF8805C + +#define mmTPC6_QM_GLBL_MSG_EN_3 0xF88060 + +#define mmTPC6_QM_GLBL_MSG_EN_4 0xF88068 + +#define mmTPC6_QM_PQ_BASE_LO_0 0xF88070 + +#define mmTPC6_QM_PQ_BASE_LO_1 0xF88074 + +#define mmTPC6_QM_PQ_BASE_LO_2 0xF88078 + +#define mmTPC6_QM_PQ_BASE_LO_3 0xF8807C + +#define mmTPC6_QM_PQ_BASE_HI_0 0xF88080 + +#define mmTPC6_QM_PQ_BASE_HI_1 0xF88084 + +#define mmTPC6_QM_PQ_BASE_HI_2 0xF88088 + +#define mmTPC6_QM_PQ_BASE_HI_3 0xF8808C + +#define mmTPC6_QM_PQ_SIZE_0 0xF88090 + +#define mmTPC6_QM_PQ_SIZE_1 0xF88094 + +#define mmTPC6_QM_PQ_SIZE_2 0xF88098 + +#define mmTPC6_QM_PQ_SIZE_3 0xF8809C + +#define mmTPC6_QM_PQ_PI_0 0xF880A0 + +#define mmTPC6_QM_PQ_PI_1 0xF880A4 + +#define mmTPC6_QM_PQ_PI_2 0xF880A8 + +#define mmTPC6_QM_PQ_PI_3 0xF880AC + +#define mmTPC6_QM_PQ_CI_0 0xF880B0 + +#define mmTPC6_QM_PQ_CI_1 0xF880B4 + +#define mmTPC6_QM_PQ_CI_2 0xF880B8 + +#define mmTPC6_QM_PQ_CI_3 0xF880BC + +#define mmTPC6_QM_PQ_CFG0_0 0xF880C0 + +#define mmTPC6_QM_PQ_CFG0_1 0xF880C4 + +#define mmTPC6_QM_PQ_CFG0_2 0xF880C8 + +#define mmTPC6_QM_PQ_CFG0_3 0xF880CC + +#define mmTPC6_QM_PQ_CFG1_0 0xF880D0 + +#define mmTPC6_QM_PQ_CFG1_1 0xF880D4 + +#define mmTPC6_QM_PQ_CFG1_2 0xF880D8 + +#define mmTPC6_QM_PQ_CFG1_3 0xF880DC + +#define mmTPC6_QM_PQ_ARUSER_31_11_0 0xF880E0 + +#define mmTPC6_QM_PQ_ARUSER_31_11_1 0xF880E4 + +#define mmTPC6_QM_PQ_ARUSER_31_11_2 0xF880E8 + +#define mmTPC6_QM_PQ_ARUSER_31_11_3 0xF880EC + +#define mmTPC6_QM_PQ_STS0_0 0xF880F0 + +#define mmTPC6_QM_PQ_STS0_1 0xF880F4 + +#define mmTPC6_QM_PQ_STS0_2 0xF880F8 + +#define mmTPC6_QM_PQ_STS0_3 0xF880FC + +#define mmTPC6_QM_PQ_STS1_0 0xF88100 + +#define mmTPC6_QM_PQ_STS1_1 0xF88104 + +#define mmTPC6_QM_PQ_STS1_2 0xF88108 + +#define mmTPC6_QM_PQ_STS1_3 0xF8810C + +#define mmTPC6_QM_CQ_CFG0_0 0xF88110 + +#define mmTPC6_QM_CQ_CFG0_1 0xF88114 + +#define mmTPC6_QM_CQ_CFG0_2 0xF88118 + +#define mmTPC6_QM_CQ_CFG0_3 0xF8811C + +#define mmTPC6_QM_CQ_CFG0_4 0xF88120 + +#define mmTPC6_QM_CQ_CFG1_0 0xF88124 + +#define mmTPC6_QM_CQ_CFG1_1 0xF88128 + +#define mmTPC6_QM_CQ_CFG1_2 0xF8812C + +#define mmTPC6_QM_CQ_CFG1_3 0xF88130 + +#define mmTPC6_QM_CQ_CFG1_4 0xF88134 + +#define mmTPC6_QM_CQ_ARUSER_31_11_0 0xF88138 + +#define mmTPC6_QM_CQ_ARUSER_31_11_1 0xF8813C + +#define mmTPC6_QM_CQ_ARUSER_31_11_2 0xF88140 + +#define mmTPC6_QM_CQ_ARUSER_31_11_3 0xF88144 + +#define mmTPC6_QM_CQ_ARUSER_31_11_4 0xF88148 + +#define mmTPC6_QM_CQ_STS0_0 0xF8814C + +#define mmTPC6_QM_CQ_STS0_1 0xF88150 + +#define mmTPC6_QM_CQ_STS0_2 0xF88154 + +#define mmTPC6_QM_CQ_STS0_3 0xF88158 + +#define mmTPC6_QM_CQ_STS0_4 0xF8815C + +#define mmTPC6_QM_CQ_STS1_0 0xF88160 + +#define mmTPC6_QM_CQ_STS1_1 0xF88164 + +#define mmTPC6_QM_CQ_STS1_2 0xF88168 + +#define mmTPC6_QM_CQ_STS1_3 0xF8816C + +#define mmTPC6_QM_CQ_STS1_4 0xF88170 + +#define mmTPC6_QM_CQ_PTR_LO_0 0xF88174 + +#define mmTPC6_QM_CQ_PTR_HI_0 0xF88178 + +#define mmTPC6_QM_CQ_TSIZE_0 0xF8817C + +#define mmTPC6_QM_CQ_CTL_0 0xF88180 + +#define mmTPC6_QM_CQ_PTR_LO_1 0xF88184 + +#define mmTPC6_QM_CQ_PTR_HI_1 0xF88188 + +#define mmTPC6_QM_CQ_TSIZE_1 0xF8818C + +#define mmTPC6_QM_CQ_CTL_1 0xF88190 + +#define mmTPC6_QM_CQ_PTR_LO_2 0xF88194 + +#define mmTPC6_QM_CQ_PTR_HI_2 0xF88198 + +#define mmTPC6_QM_CQ_TSIZE_2 0xF8819C + +#define mmTPC6_QM_CQ_CTL_2 0xF881A0 + +#define mmTPC6_QM_CQ_PTR_LO_3 0xF881A4 + +#define mmTPC6_QM_CQ_PTR_HI_3 0xF881A8 + +#define mmTPC6_QM_CQ_TSIZE_3 0xF881AC + +#define mmTPC6_QM_CQ_CTL_3 0xF881B0 + +#define mmTPC6_QM_CQ_PTR_LO_4 0xF881B4 + +#define mmTPC6_QM_CQ_PTR_HI_4 0xF881B8 + +#define mmTPC6_QM_CQ_TSIZE_4 0xF881BC + +#define mmTPC6_QM_CQ_CTL_4 0xF881C0 + +#define mmTPC6_QM_CQ_PTR_LO_STS_0 0xF881C4 + +#define mmTPC6_QM_CQ_PTR_LO_STS_1 0xF881C8 + +#define mmTPC6_QM_CQ_PTR_LO_STS_2 0xF881CC + +#define mmTPC6_QM_CQ_PTR_LO_STS_3 0xF881D0 + +#define mmTPC6_QM_CQ_PTR_LO_STS_4 0xF881D4 + +#define mmTPC6_QM_CQ_PTR_HI_STS_0 0xF881D8 + +#define mmTPC6_QM_CQ_PTR_HI_STS_1 0xF881DC + +#define mmTPC6_QM_CQ_PTR_HI_STS_2 0xF881E0 + +#define mmTPC6_QM_CQ_PTR_HI_STS_3 0xF881E4 + +#define mmTPC6_QM_CQ_PTR_HI_STS_4 0xF881E8 + +#define mmTPC6_QM_CQ_TSIZE_STS_0 0xF881EC + +#define mmTPC6_QM_CQ_TSIZE_STS_1 0xF881F0 + +#define mmTPC6_QM_CQ_TSIZE_STS_2 0xF881F4 + +#define mmTPC6_QM_CQ_TSIZE_STS_3 0xF881F8 + +#define mmTPC6_QM_CQ_TSIZE_STS_4 0xF881FC + +#define mmTPC6_QM_CQ_CTL_STS_0 0xF88200 + +#define mmTPC6_QM_CQ_CTL_STS_1 0xF88204 + +#define mmTPC6_QM_CQ_CTL_STS_2 0xF88208 + +#define mmTPC6_QM_CQ_CTL_STS_3 0xF8820C + +#define mmTPC6_QM_CQ_CTL_STS_4 0xF88210 + +#define mmTPC6_QM_CQ_IFIFO_CNT_0 0xF88214 + +#define mmTPC6_QM_CQ_IFIFO_CNT_1 0xF88218 + +#define mmTPC6_QM_CQ_IFIFO_CNT_2 0xF8821C + +#define mmTPC6_QM_CQ_IFIFO_CNT_3 0xF88220 + +#define mmTPC6_QM_CQ_IFIFO_CNT_4 0xF88224 + +#define mmTPC6_QM_CP_MSG_BASE0_ADDR_LO_0 0xF88228 + +#define mmTPC6_QM_CP_MSG_BASE0_ADDR_LO_1 0xF8822C + +#define mmTPC6_QM_CP_MSG_BASE0_ADDR_LO_2 0xF88230 + +#define mmTPC6_QM_CP_MSG_BASE0_ADDR_LO_3 0xF88234 + +#define mmTPC6_QM_CP_MSG_BASE0_ADDR_LO_4 0xF88238 + +#define mmTPC6_QM_CP_MSG_BASE0_ADDR_HI_0 0xF8823C + +#define mmTPC6_QM_CP_MSG_BASE0_ADDR_HI_1 0xF88240 + +#define mmTPC6_QM_CP_MSG_BASE0_ADDR_HI_2 0xF88244 + +#define mmTPC6_QM_CP_MSG_BASE0_ADDR_HI_3 0xF88248 + +#define mmTPC6_QM_CP_MSG_BASE0_ADDR_HI_4 0xF8824C + +#define mmTPC6_QM_CP_MSG_BASE1_ADDR_LO_0 0xF88250 + +#define mmTPC6_QM_CP_MSG_BASE1_ADDR_LO_1 0xF88254 + +#define mmTPC6_QM_CP_MSG_BASE1_ADDR_LO_2 0xF88258 + +#define mmTPC6_QM_CP_MSG_BASE1_ADDR_LO_3 0xF8825C + +#define mmTPC6_QM_CP_MSG_BASE1_ADDR_LO_4 0xF88260 + +#define mmTPC6_QM_CP_MSG_BASE1_ADDR_HI_0 0xF88264 + +#define mmTPC6_QM_CP_MSG_BASE1_ADDR_HI_1 0xF88268 + +#define mmTPC6_QM_CP_MSG_BASE1_ADDR_HI_2 0xF8826C + +#define mmTPC6_QM_CP_MSG_BASE1_ADDR_HI_3 0xF88270 + +#define mmTPC6_QM_CP_MSG_BASE1_ADDR_HI_4 0xF88274 + +#define mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_0 0xF88278 + +#define mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_1 0xF8827C + +#define mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_2 0xF88280 + +#define mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_3 0xF88284 + +#define mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_4 0xF88288 + +#define mmTPC6_QM_CP_MSG_BASE2_ADDR_HI_0 0xF8828C + +#define mmTPC6_QM_CP_MSG_BASE2_ADDR_HI_1 0xF88290 + +#define mmTPC6_QM_CP_MSG_BASE2_ADDR_HI_2 0xF88294 + +#define mmTPC6_QM_CP_MSG_BASE2_ADDR_HI_3 0xF88298 + +#define mmTPC6_QM_CP_MSG_BASE2_ADDR_HI_4 0xF8829C + +#define mmTPC6_QM_CP_MSG_BASE3_ADDR_LO_0 0xF882A0 + +#define mmTPC6_QM_CP_MSG_BASE3_ADDR_LO_1 0xF882A4 + +#define mmTPC6_QM_CP_MSG_BASE3_ADDR_LO_2 0xF882A8 + +#define mmTPC6_QM_CP_MSG_BASE3_ADDR_LO_3 0xF882AC + +#define mmTPC6_QM_CP_MSG_BASE3_ADDR_LO_4 0xF882B0 + +#define mmTPC6_QM_CP_MSG_BASE3_ADDR_HI_0 0xF882B4 + +#define mmTPC6_QM_CP_MSG_BASE3_ADDR_HI_1 0xF882B8 + +#define mmTPC6_QM_CP_MSG_BASE3_ADDR_HI_2 0xF882BC + +#define mmTPC6_QM_CP_MSG_BASE3_ADDR_HI_3 0xF882C0 + +#define mmTPC6_QM_CP_MSG_BASE3_ADDR_HI_4 0xF882C4 + +#define mmTPC6_QM_CP_LDMA_TSIZE_OFFSET_0 0xF882C8 + +#define mmTPC6_QM_CP_LDMA_TSIZE_OFFSET_1 0xF882CC + +#define mmTPC6_QM_CP_LDMA_TSIZE_OFFSET_2 0xF882D0 + +#define mmTPC6_QM_CP_LDMA_TSIZE_OFFSET_3 0xF882D4 + +#define mmTPC6_QM_CP_LDMA_TSIZE_OFFSET_4 0xF882D8 + +#define mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0xF882E0 + +#define mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0xF882E4 + +#define mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0xF882E8 + +#define mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0xF882EC + +#define mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0xF882F0 + +#define mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0xF882F4 + +#define mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0xF882F8 + +#define mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0xF882FC + +#define mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0xF88300 + +#define mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0xF88304 + +#define mmTPC6_QM_CP_FENCE0_RDATA_0 0xF88308 + +#define mmTPC6_QM_CP_FENCE0_RDATA_1 0xF8830C + +#define mmTPC6_QM_CP_FENCE0_RDATA_2 0xF88310 + +#define mmTPC6_QM_CP_FENCE0_RDATA_3 0xF88314 + +#define mmTPC6_QM_CP_FENCE0_RDATA_4 0xF88318 + +#define mmTPC6_QM_CP_FENCE1_RDATA_0 0xF8831C + +#define mmTPC6_QM_CP_FENCE1_RDATA_1 0xF88320 + +#define mmTPC6_QM_CP_FENCE1_RDATA_2 0xF88324 + +#define mmTPC6_QM_CP_FENCE1_RDATA_3 0xF88328 + +#define mmTPC6_QM_CP_FENCE1_RDATA_4 0xF8832C + +#define mmTPC6_QM_CP_FENCE2_RDATA_0 0xF88330 + +#define mmTPC6_QM_CP_FENCE2_RDATA_1 0xF88334 + +#define mmTPC6_QM_CP_FENCE2_RDATA_2 0xF88338 + +#define mmTPC6_QM_CP_FENCE2_RDATA_3 0xF8833C + +#define mmTPC6_QM_CP_FENCE2_RDATA_4 0xF88340 + +#define mmTPC6_QM_CP_FENCE3_RDATA_0 0xF88344 + +#define mmTPC6_QM_CP_FENCE3_RDATA_1 0xF88348 + +#define mmTPC6_QM_CP_FENCE3_RDATA_2 0xF8834C + +#define mmTPC6_QM_CP_FENCE3_RDATA_3 0xF88350 + +#define mmTPC6_QM_CP_FENCE3_RDATA_4 0xF88354 + +#define mmTPC6_QM_CP_FENCE0_CNT_0 0xF88358 + +#define mmTPC6_QM_CP_FENCE0_CNT_1 0xF8835C + +#define mmTPC6_QM_CP_FENCE0_CNT_2 0xF88360 + +#define mmTPC6_QM_CP_FENCE0_CNT_3 0xF88364 + +#define mmTPC6_QM_CP_FENCE0_CNT_4 0xF88368 + +#define mmTPC6_QM_CP_FENCE1_CNT_0 0xF8836C + +#define mmTPC6_QM_CP_FENCE1_CNT_1 0xF88370 + +#define mmTPC6_QM_CP_FENCE1_CNT_2 0xF88374 + +#define mmTPC6_QM_CP_FENCE1_CNT_3 0xF88378 + +#define mmTPC6_QM_CP_FENCE1_CNT_4 0xF8837C + +#define mmTPC6_QM_CP_FENCE2_CNT_0 0xF88380 + +#define mmTPC6_QM_CP_FENCE2_CNT_1 0xF88384 + +#define mmTPC6_QM_CP_FENCE2_CNT_2 0xF88388 + +#define mmTPC6_QM_CP_FENCE2_CNT_3 0xF8838C + +#define mmTPC6_QM_CP_FENCE2_CNT_4 0xF88390 + +#define mmTPC6_QM_CP_FENCE3_CNT_0 0xF88394 + +#define mmTPC6_QM_CP_FENCE3_CNT_1 0xF88398 + +#define mmTPC6_QM_CP_FENCE3_CNT_2 0xF8839C + +#define mmTPC6_QM_CP_FENCE3_CNT_3 0xF883A0 + +#define mmTPC6_QM_CP_FENCE3_CNT_4 0xF883A4 + +#define mmTPC6_QM_CP_STS_0 0xF883A8 + +#define mmTPC6_QM_CP_STS_1 0xF883AC + +#define mmTPC6_QM_CP_STS_2 0xF883B0 + +#define mmTPC6_QM_CP_STS_3 0xF883B4 + +#define mmTPC6_QM_CP_STS_4 0xF883B8 + +#define mmTPC6_QM_CP_CURRENT_INST_LO_0 0xF883BC + +#define mmTPC6_QM_CP_CURRENT_INST_LO_1 0xF883C0 + +#define mmTPC6_QM_CP_CURRENT_INST_LO_2 0xF883C4 + +#define mmTPC6_QM_CP_CURRENT_INST_LO_3 0xF883C8 + +#define mmTPC6_QM_CP_CURRENT_INST_LO_4 0xF883CC + +#define mmTPC6_QM_CP_CURRENT_INST_HI_0 0xF883D0 + +#define mmTPC6_QM_CP_CURRENT_INST_HI_1 0xF883D4 + +#define mmTPC6_QM_CP_CURRENT_INST_HI_2 0xF883D8 + +#define mmTPC6_QM_CP_CURRENT_INST_HI_3 0xF883DC + +#define mmTPC6_QM_CP_CURRENT_INST_HI_4 0xF883E0 + +#define mmTPC6_QM_CP_BARRIER_CFG_0 0xF883F4 + +#define mmTPC6_QM_CP_BARRIER_CFG_1 0xF883F8 + +#define mmTPC6_QM_CP_BARRIER_CFG_2 0xF883FC + +#define mmTPC6_QM_CP_BARRIER_CFG_3 0xF88400 + +#define mmTPC6_QM_CP_BARRIER_CFG_4 0xF88404 + +#define mmTPC6_QM_CP_DBG_0_0 0xF88408 + +#define mmTPC6_QM_CP_DBG_0_1 0xF8840C + +#define mmTPC6_QM_CP_DBG_0_2 0xF88410 + +#define mmTPC6_QM_CP_DBG_0_3 0xF88414 + +#define mmTPC6_QM_CP_DBG_0_4 0xF88418 + +#define mmTPC6_QM_CP_ARUSER_31_11_0 0xF8841C + +#define mmTPC6_QM_CP_ARUSER_31_11_1 0xF88420 + +#define mmTPC6_QM_CP_ARUSER_31_11_2 0xF88424 + +#define mmTPC6_QM_CP_ARUSER_31_11_3 0xF88428 + +#define mmTPC6_QM_CP_ARUSER_31_11_4 0xF8842C + +#define mmTPC6_QM_CP_AWUSER_31_11_0 0xF88430 + +#define mmTPC6_QM_CP_AWUSER_31_11_1 0xF88434 + +#define mmTPC6_QM_CP_AWUSER_31_11_2 0xF88438 + +#define mmTPC6_QM_CP_AWUSER_31_11_3 0xF8843C + +#define mmTPC6_QM_CP_AWUSER_31_11_4 0xF88440 + +#define mmTPC6_QM_ARB_CFG_0 0xF88A00 + +#define mmTPC6_QM_ARB_CHOISE_Q_PUSH 0xF88A04 + +#define mmTPC6_QM_ARB_WRR_WEIGHT_0 0xF88A08 + +#define mmTPC6_QM_ARB_WRR_WEIGHT_1 0xF88A0C + +#define mmTPC6_QM_ARB_WRR_WEIGHT_2 0xF88A10 + +#define mmTPC6_QM_ARB_WRR_WEIGHT_3 0xF88A14 + +#define mmTPC6_QM_ARB_CFG_1 0xF88A18 + +#define mmTPC6_QM_ARB_MST_AVAIL_CRED_0 0xF88A20 + +#define mmTPC6_QM_ARB_MST_AVAIL_CRED_1 0xF88A24 + +#define mmTPC6_QM_ARB_MST_AVAIL_CRED_2 0xF88A28 + +#define mmTPC6_QM_ARB_MST_AVAIL_CRED_3 0xF88A2C + +#define mmTPC6_QM_ARB_MST_AVAIL_CRED_4 0xF88A30 + +#define mmTPC6_QM_ARB_MST_AVAIL_CRED_5 0xF88A34 + +#define mmTPC6_QM_ARB_MST_AVAIL_CRED_6 0xF88A38 + +#define mmTPC6_QM_ARB_MST_AVAIL_CRED_7 0xF88A3C + +#define mmTPC6_QM_ARB_MST_AVAIL_CRED_8 0xF88A40 + +#define mmTPC6_QM_ARB_MST_AVAIL_CRED_9 0xF88A44 + +#define mmTPC6_QM_ARB_MST_AVAIL_CRED_10 0xF88A48 + +#define mmTPC6_QM_ARB_MST_AVAIL_CRED_11 0xF88A4C + +#define mmTPC6_QM_ARB_MST_AVAIL_CRED_12 0xF88A50 + +#define mmTPC6_QM_ARB_MST_AVAIL_CRED_13 0xF88A54 + +#define mmTPC6_QM_ARB_MST_AVAIL_CRED_14 0xF88A58 + +#define mmTPC6_QM_ARB_MST_AVAIL_CRED_15 0xF88A5C + +#define mmTPC6_QM_ARB_MST_AVAIL_CRED_16 0xF88A60 + +#define mmTPC6_QM_ARB_MST_AVAIL_CRED_17 0xF88A64 + +#define mmTPC6_QM_ARB_MST_AVAIL_CRED_18 0xF88A68 + +#define mmTPC6_QM_ARB_MST_AVAIL_CRED_19 0xF88A6C + +#define mmTPC6_QM_ARB_MST_AVAIL_CRED_20 0xF88A70 + +#define mmTPC6_QM_ARB_MST_AVAIL_CRED_21 0xF88A74 + +#define mmTPC6_QM_ARB_MST_AVAIL_CRED_22 0xF88A78 + +#define mmTPC6_QM_ARB_MST_AVAIL_CRED_23 0xF88A7C + +#define mmTPC6_QM_ARB_MST_AVAIL_CRED_24 0xF88A80 + +#define mmTPC6_QM_ARB_MST_AVAIL_CRED_25 0xF88A84 + +#define mmTPC6_QM_ARB_MST_AVAIL_CRED_26 0xF88A88 + +#define mmTPC6_QM_ARB_MST_AVAIL_CRED_27 0xF88A8C + +#define mmTPC6_QM_ARB_MST_AVAIL_CRED_28 0xF88A90 + +#define mmTPC6_QM_ARB_MST_AVAIL_CRED_29 0xF88A94 + +#define mmTPC6_QM_ARB_MST_AVAIL_CRED_30 0xF88A98 + +#define mmTPC6_QM_ARB_MST_AVAIL_CRED_31 0xF88A9C + +#define mmTPC6_QM_ARB_MST_CRED_INC 0xF88AA0 + +#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_0 0xF88AA4 + +#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_1 0xF88AA8 + +#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_2 0xF88AAC + +#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_3 0xF88AB0 + +#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_4 0xF88AB4 + +#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_5 0xF88AB8 + +#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_6 0xF88ABC + +#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_7 0xF88AC0 + +#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_8 0xF88AC4 + +#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_9 0xF88AC8 + +#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_10 0xF88ACC + +#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_11 0xF88AD0 + +#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_12 0xF88AD4 + +#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_13 0xF88AD8 + +#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_14 0xF88ADC + +#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_15 0xF88AE0 + +#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_16 0xF88AE4 + +#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_17 0xF88AE8 + +#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_18 0xF88AEC + +#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_19 0xF88AF0 + +#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_20 0xF88AF4 + +#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_21 0xF88AF8 + +#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_22 0xF88AFC + +#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_23 0xF88B00 + +#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_24 0xF88B04 + +#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_25 0xF88B08 + +#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_26 0xF88B0C + +#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_27 0xF88B10 + +#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_28 0xF88B14 + +#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_29 0xF88B18 + +#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_30 0xF88B1C + +#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_31 0xF88B20 + +#define mmTPC6_QM_ARB_SLV_MASTER_INC_CRED_OFST 0xF88B28 + +#define mmTPC6_QM_ARB_MST_SLAVE_EN 0xF88B2C + +#define mmTPC6_QM_ARB_MST_QUIET_PER 0xF88B34 + +#define mmTPC6_QM_ARB_SLV_CHOISE_WDT 0xF88B38 + +#define mmTPC6_QM_ARB_SLV_ID 0xF88B3C + +#define mmTPC6_QM_ARB_MSG_MAX_INFLIGHT 0xF88B44 + +#define mmTPC6_QM_ARB_MSG_AWUSER_31_11 0xF88B48 + +#define mmTPC6_QM_ARB_MSG_AWUSER_SEC_PROP 0xF88B4C + +#define mmTPC6_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0xF88B50 + +#define mmTPC6_QM_ARB_BASE_LO 0xF88B54 + +#define mmTPC6_QM_ARB_BASE_HI 0xF88B58 + +#define mmTPC6_QM_ARB_STATE_STS 0xF88B80 + +#define mmTPC6_QM_ARB_CHOISE_FULLNESS_STS 0xF88B84 + +#define mmTPC6_QM_ARB_MSG_STS 0xF88B88 + +#define mmTPC6_QM_ARB_SLV_CHOISE_Q_HEAD 0xF88B8C + +#define mmTPC6_QM_ARB_ERR_CAUSE 0xF88B9C + +#define mmTPC6_QM_ARB_ERR_MSG_EN 0xF88BA0 + +#define mmTPC6_QM_ARB_ERR_STS_DRP 0xF88BA8 + +#define mmTPC6_QM_ARB_MST_CRED_STS_0 0xF88BB0 + +#define mmTPC6_QM_ARB_MST_CRED_STS_1 0xF88BB4 + +#define mmTPC6_QM_ARB_MST_CRED_STS_2 0xF88BB8 + +#define mmTPC6_QM_ARB_MST_CRED_STS_3 0xF88BBC + +#define mmTPC6_QM_ARB_MST_CRED_STS_4 0xF88BC0 + +#define mmTPC6_QM_ARB_MST_CRED_STS_5 0xF88BC4 + +#define mmTPC6_QM_ARB_MST_CRED_STS_6 0xF88BC8 + +#define mmTPC6_QM_ARB_MST_CRED_STS_7 0xF88BCC + +#define mmTPC6_QM_ARB_MST_CRED_STS_8 0xF88BD0 + +#define mmTPC6_QM_ARB_MST_CRED_STS_9 0xF88BD4 + +#define mmTPC6_QM_ARB_MST_CRED_STS_10 0xF88BD8 + +#define mmTPC6_QM_ARB_MST_CRED_STS_11 0xF88BDC + +#define mmTPC6_QM_ARB_MST_CRED_STS_12 0xF88BE0 + +#define mmTPC6_QM_ARB_MST_CRED_STS_13 0xF88BE4 + +#define mmTPC6_QM_ARB_MST_CRED_STS_14 0xF88BE8 + +#define mmTPC6_QM_ARB_MST_CRED_STS_15 0xF88BEC + +#define mmTPC6_QM_ARB_MST_CRED_STS_16 0xF88BF0 + +#define mmTPC6_QM_ARB_MST_CRED_STS_17 0xF88BF4 + +#define mmTPC6_QM_ARB_MST_CRED_STS_18 0xF88BF8 + +#define mmTPC6_QM_ARB_MST_CRED_STS_19 0xF88BFC + +#define mmTPC6_QM_ARB_MST_CRED_STS_20 0xF88C00 + +#define mmTPC6_QM_ARB_MST_CRED_STS_21 0xF88C04 + +#define mmTPC6_QM_ARB_MST_CRED_STS_22 0xF88C08 + +#define mmTPC6_QM_ARB_MST_CRED_STS_23 0xF88C0C + +#define mmTPC6_QM_ARB_MST_CRED_STS_24 0xF88C10 + +#define mmTPC6_QM_ARB_MST_CRED_STS_25 0xF88C14 + +#define mmTPC6_QM_ARB_MST_CRED_STS_26 0xF88C18 + +#define mmTPC6_QM_ARB_MST_CRED_STS_27 0xF88C1C + +#define mmTPC6_QM_ARB_MST_CRED_STS_28 0xF88C20 + +#define mmTPC6_QM_ARB_MST_CRED_STS_29 0xF88C24 + +#define mmTPC6_QM_ARB_MST_CRED_STS_30 0xF88C28 + +#define mmTPC6_QM_ARB_MST_CRED_STS_31 0xF88C2C + +#define mmTPC6_QM_CGM_CFG 0xF88C70 + +#define mmTPC6_QM_CGM_STS 0xF88C74 + +#define mmTPC6_QM_CGM_CFG1 0xF88C78 + +#define mmTPC6_QM_LOCAL_RANGE_BASE 0xF88C80 + +#define mmTPC6_QM_LOCAL_RANGE_SIZE 0xF88C84 + +#define mmTPC6_QM_CSMR_STRICT_PRIO_CFG 0xF88C90 + +#define mmTPC6_QM_HBW_RD_RATE_LIM_CFG_1 0xF88C94 + +#define mmTPC6_QM_LBW_WR_RATE_LIM_CFG_0 0xF88C98 + +#define mmTPC6_QM_LBW_WR_RATE_LIM_CFG_1 0xF88C9C + +#define mmTPC6_QM_HBW_RD_RATE_LIM_CFG_0 0xF88CA0 + +#define mmTPC6_QM_GLBL_AXCACHE 0xF88CA4 + +#define mmTPC6_QM_IND_GW_APB_CFG 0xF88CB0 + +#define mmTPC6_QM_IND_GW_APB_WDATA 0xF88CB4 + +#define mmTPC6_QM_IND_GW_APB_RDATA 0xF88CB8 + +#define mmTPC6_QM_IND_GW_APB_STATUS 0xF88CBC + +#define mmTPC6_QM_GLBL_ERR_ADDR_LO 0xF88CD0 + +#define mmTPC6_QM_GLBL_ERR_ADDR_HI 0xF88CD4 + +#define mmTPC6_QM_GLBL_ERR_WDATA 0xF88CD8 + +#define mmTPC6_QM_GLBL_MEM_INIT_BUSY 0xF88D00 + +#endif /* ASIC_REG_TPC6_QM_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc7_cfg_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc7_cfg_regs.h new file mode 100644 index 000000000..1887b10e5 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc7_cfg_regs.h @@ -0,0 +1,1226 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC7_CFG_REGS_H_ +#define ASIC_REG_TPC7_CFG_REGS_H_ + +/* + ***************************************** + * TPC7_CFG (Prototype: TPC) + ***************************************** + */ + +#define mmTPC7_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xFC6400 + +#define mmTPC7_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xFC6404 + +#define mmTPC7_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xFC6408 + +#define mmTPC7_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xFC640C + +#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xFC6410 + +#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xFC6414 + +#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xFC6418 + +#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xFC641C + +#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xFC6420 + +#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xFC6424 + +#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xFC6428 + +#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xFC642C + +#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xFC6430 + +#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xFC6434 + +#define mmTPC7_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xFC6438 + +#define mmTPC7_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xFC643C + +#define mmTPC7_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xFC6440 + +#define mmTPC7_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xFC6444 + +#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xFC6448 + +#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xFC644C + +#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xFC6450 + +#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xFC6454 + +#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xFC6458 + +#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xFC645C + +#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xFC6460 + +#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xFC6464 + +#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xFC6468 + +#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xFC646C + +#define mmTPC7_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xFC6470 + +#define mmTPC7_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xFC6474 + +#define mmTPC7_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xFC6478 + +#define mmTPC7_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xFC647C + +#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xFC6480 + +#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xFC6484 + +#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xFC6488 + +#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xFC648C + +#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xFC6490 + +#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xFC6494 + +#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xFC6498 + +#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xFC649C + +#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xFC64A0 + +#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xFC64A4 + +#define mmTPC7_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xFC64A8 + +#define mmTPC7_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xFC64AC + +#define mmTPC7_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xFC64B0 + +#define mmTPC7_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xFC64B4 + +#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xFC64B8 + +#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xFC64BC + +#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xFC64C0 + +#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xFC64C4 + +#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xFC64C8 + +#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xFC64CC + +#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xFC64D0 + +#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xFC64D4 + +#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xFC64D8 + +#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xFC64DC + +#define mmTPC7_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xFC64E0 + +#define mmTPC7_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xFC64E4 + +#define mmTPC7_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xFC64E8 + +#define mmTPC7_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xFC64EC + +#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xFC64F0 + +#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xFC64F4 + +#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xFC64F8 + +#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xFC64FC + +#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xFC6500 + +#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xFC6504 + +#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xFC6508 + +#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xFC650C + +#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xFC6510 + +#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xFC6514 + +#define mmTPC7_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xFC6518 + +#define mmTPC7_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xFC651C + +#define mmTPC7_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xFC6520 + +#define mmTPC7_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xFC6524 + +#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xFC6528 + +#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xFC652C + +#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xFC6530 + +#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xFC6534 + +#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xFC6538 + +#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xFC653C + +#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xFC6540 + +#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xFC6544 + +#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xFC6548 + +#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xFC654C + +#define mmTPC7_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xFC6550 + +#define mmTPC7_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xFC6554 + +#define mmTPC7_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xFC6558 + +#define mmTPC7_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xFC655C + +#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xFC6560 + +#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xFC6564 + +#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xFC6568 + +#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xFC656C + +#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xFC6570 + +#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xFC6574 + +#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xFC6578 + +#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xFC657C + +#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xFC6580 + +#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xFC6584 + +#define mmTPC7_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xFC6588 + +#define mmTPC7_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xFC658C + +#define mmTPC7_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xFC6590 + +#define mmTPC7_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xFC6594 + +#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xFC6598 + +#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xFC659C + +#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xFC65A0 + +#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xFC65A4 + +#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xFC65A8 + +#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xFC65AC + +#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xFC65B0 + +#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xFC65B4 + +#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xFC65B8 + +#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xFC65BC + +#define mmTPC7_CFG_KERNEL_TENSOR_8_BASE_ADDR_LOW 0xFC65C0 + +#define mmTPC7_CFG_KERNEL_TENSOR_8_BASE_ADDR_HIGH 0xFC65C4 + +#define mmTPC7_CFG_KERNEL_TENSOR_8_PADDING_VALUE 0xFC65C8 + +#define mmTPC7_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG 0xFC65CC + +#define mmTPC7_CFG_KERNEL_TENSOR_8_DIM_0_SIZE 0xFC65D0 + +#define mmTPC7_CFG_KERNEL_TENSOR_8_DIM_0_STRIDE 0xFC65D4 + +#define mmTPC7_CFG_KERNEL_TENSOR_8_DIM_1_SIZE 0xFC65D8 + +#define mmTPC7_CFG_KERNEL_TENSOR_8_DIM_1_STRIDE 0xFC65DC + +#define mmTPC7_CFG_KERNEL_TENSOR_8_DIM_2_SIZE 0xFC65E0 + +#define mmTPC7_CFG_KERNEL_TENSOR_8_DIM_2_STRIDE 0xFC65E4 + +#define mmTPC7_CFG_KERNEL_TENSOR_8_DIM_3_SIZE 0xFC65E8 + +#define mmTPC7_CFG_KERNEL_TENSOR_8_DIM_3_STRIDE 0xFC65EC + +#define mmTPC7_CFG_KERNEL_TENSOR_8_DIM_4_SIZE 0xFC65F0 + +#define mmTPC7_CFG_KERNEL_TENSOR_8_DIM_4_STRIDE 0xFC65F4 + +#define mmTPC7_CFG_KERNEL_TENSOR_9_BASE_ADDR_LOW 0xFC65F8 + +#define mmTPC7_CFG_KERNEL_TENSOR_9_BASE_ADDR_HIGH 0xFC65FC + +#define mmTPC7_CFG_KERNEL_TENSOR_9_PADDING_VALUE 0xFC6600 + +#define mmTPC7_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG 0xFC6604 + +#define mmTPC7_CFG_KERNEL_TENSOR_9_DIM_0_SIZE 0xFC6608 + +#define mmTPC7_CFG_KERNEL_TENSOR_9_DIM_0_STRIDE 0xFC660C + +#define mmTPC7_CFG_KERNEL_TENSOR_9_DIM_1_SIZE 0xFC6610 + +#define mmTPC7_CFG_KERNEL_TENSOR_9_DIM_1_STRIDE 0xFC6614 + +#define mmTPC7_CFG_KERNEL_TENSOR_9_DIM_2_SIZE 0xFC6618 + +#define mmTPC7_CFG_KERNEL_TENSOR_9_DIM_2_STRIDE 0xFC661C + +#define mmTPC7_CFG_KERNEL_TENSOR_9_DIM_3_SIZE 0xFC6620 + +#define mmTPC7_CFG_KERNEL_TENSOR_9_DIM_3_STRIDE 0xFC6624 + +#define mmTPC7_CFG_KERNEL_TENSOR_9_DIM_4_SIZE 0xFC6628 + +#define mmTPC7_CFG_KERNEL_TENSOR_9_DIM_4_STRIDE 0xFC662C + +#define mmTPC7_CFG_KERNEL_TENSOR_10_BASE_ADDR_LOW 0xFC6630 + +#define mmTPC7_CFG_KERNEL_TENSOR_10_BASE_ADDR_HIGH 0xFC6634 + +#define mmTPC7_CFG_KERNEL_TENSOR_10_PADDING_VALUE 0xFC6638 + +#define mmTPC7_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG 0xFC663C + +#define mmTPC7_CFG_KERNEL_TENSOR_10_DIM_0_SIZE 0xFC6640 + +#define mmTPC7_CFG_KERNEL_TENSOR_10_DIM_0_STRIDE 0xFC6644 + +#define mmTPC7_CFG_KERNEL_TENSOR_10_DIM_1_SIZE 0xFC6648 + +#define mmTPC7_CFG_KERNEL_TENSOR_10_DIM_1_STRIDE 0xFC664C + +#define mmTPC7_CFG_KERNEL_TENSOR_10_DIM_2_SIZE 0xFC6650 + +#define mmTPC7_CFG_KERNEL_TENSOR_10_DIM_2_STRIDE 0xFC6654 + +#define mmTPC7_CFG_KERNEL_TENSOR_10_DIM_3_SIZE 0xFC6658 + +#define mmTPC7_CFG_KERNEL_TENSOR_10_DIM_3_STRIDE 0xFC665C + +#define mmTPC7_CFG_KERNEL_TENSOR_10_DIM_4_SIZE 0xFC6660 + +#define mmTPC7_CFG_KERNEL_TENSOR_10_DIM_4_STRIDE 0xFC6664 + +#define mmTPC7_CFG_KERNEL_TENSOR_11_BASE_ADDR_LOW 0xFC6668 + +#define mmTPC7_CFG_KERNEL_TENSOR_11_BASE_ADDR_HIGH 0xFC666C + +#define mmTPC7_CFG_KERNEL_TENSOR_11_PADDING_VALUE 0xFC6670 + +#define mmTPC7_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG 0xFC6674 + +#define mmTPC7_CFG_KERNEL_TENSOR_11_DIM_0_SIZE 0xFC6678 + +#define mmTPC7_CFG_KERNEL_TENSOR_11_DIM_0_STRIDE 0xFC667C + +#define mmTPC7_CFG_KERNEL_TENSOR_11_DIM_1_SIZE 0xFC6680 + +#define mmTPC7_CFG_KERNEL_TENSOR_11_DIM_1_STRIDE 0xFC6684 + +#define mmTPC7_CFG_KERNEL_TENSOR_11_DIM_2_SIZE 0xFC6688 + +#define mmTPC7_CFG_KERNEL_TENSOR_11_DIM_2_STRIDE 0xFC668C + +#define mmTPC7_CFG_KERNEL_TENSOR_11_DIM_3_SIZE 0xFC6690 + +#define mmTPC7_CFG_KERNEL_TENSOR_11_DIM_3_STRIDE 0xFC6694 + +#define mmTPC7_CFG_KERNEL_TENSOR_11_DIM_4_SIZE 0xFC6698 + +#define mmTPC7_CFG_KERNEL_TENSOR_11_DIM_4_STRIDE 0xFC669C + +#define mmTPC7_CFG_KERNEL_TENSOR_12_BASE_ADDR_LOW 0xFC66A0 + +#define mmTPC7_CFG_KERNEL_TENSOR_12_BASE_ADDR_HIGH 0xFC66A4 + +#define mmTPC7_CFG_KERNEL_TENSOR_12_PADDING_VALUE 0xFC66A8 + +#define mmTPC7_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG 0xFC66AC + +#define mmTPC7_CFG_KERNEL_TENSOR_12_DIM_0_SIZE 0xFC66B0 + +#define mmTPC7_CFG_KERNEL_TENSOR_12_DIM_0_STRIDE 0xFC66B4 + +#define mmTPC7_CFG_KERNEL_TENSOR_12_DIM_1_SIZE 0xFC66B8 + +#define mmTPC7_CFG_KERNEL_TENSOR_12_DIM_1_STRIDE 0xFC66BC + +#define mmTPC7_CFG_KERNEL_TENSOR_12_DIM_2_SIZE 0xFC66C0 + +#define mmTPC7_CFG_KERNEL_TENSOR_12_DIM_2_STRIDE 0xFC66C4 + +#define mmTPC7_CFG_KERNEL_TENSOR_12_DIM_3_SIZE 0xFC66C8 + +#define mmTPC7_CFG_KERNEL_TENSOR_12_DIM_3_STRIDE 0xFC66CC + +#define mmTPC7_CFG_KERNEL_TENSOR_12_DIM_4_SIZE 0xFC66D0 + +#define mmTPC7_CFG_KERNEL_TENSOR_12_DIM_4_STRIDE 0xFC66D4 + +#define mmTPC7_CFG_KERNEL_TENSOR_13_BASE_ADDR_LOW 0xFC66D8 + +#define mmTPC7_CFG_KERNEL_TENSOR_13_BASE_ADDR_HIGH 0xFC66DC + +#define mmTPC7_CFG_KERNEL_TENSOR_13_PADDING_VALUE 0xFC66E0 + +#define mmTPC7_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG 0xFC66E4 + +#define mmTPC7_CFG_KERNEL_TENSOR_13_DIM_0_SIZE 0xFC66E8 + +#define mmTPC7_CFG_KERNEL_TENSOR_13_DIM_0_STRIDE 0xFC66EC + +#define mmTPC7_CFG_KERNEL_TENSOR_13_DIM_1_SIZE 0xFC66F0 + +#define mmTPC7_CFG_KERNEL_TENSOR_13_DIM_1_STRIDE 0xFC66F4 + +#define mmTPC7_CFG_KERNEL_TENSOR_13_DIM_2_SIZE 0xFC66F8 + +#define mmTPC7_CFG_KERNEL_TENSOR_13_DIM_2_STRIDE 0xFC66FC + +#define mmTPC7_CFG_KERNEL_TENSOR_13_DIM_3_SIZE 0xFC6700 + +#define mmTPC7_CFG_KERNEL_TENSOR_13_DIM_3_STRIDE 0xFC6704 + +#define mmTPC7_CFG_KERNEL_TENSOR_13_DIM_4_SIZE 0xFC6708 + +#define mmTPC7_CFG_KERNEL_TENSOR_13_DIM_4_STRIDE 0xFC670C + +#define mmTPC7_CFG_KERNEL_TENSOR_14_BASE_ADDR_LOW 0xFC6710 + +#define mmTPC7_CFG_KERNEL_TENSOR_14_BASE_ADDR_HIGH 0xFC6714 + +#define mmTPC7_CFG_KERNEL_TENSOR_14_PADDING_VALUE 0xFC6718 + +#define mmTPC7_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG 0xFC671C + +#define mmTPC7_CFG_KERNEL_TENSOR_14_DIM_0_SIZE 0xFC6720 + +#define mmTPC7_CFG_KERNEL_TENSOR_14_DIM_0_STRIDE 0xFC6724 + +#define mmTPC7_CFG_KERNEL_TENSOR_14_DIM_1_SIZE 0xFC6728 + +#define mmTPC7_CFG_KERNEL_TENSOR_14_DIM_1_STRIDE 0xFC672C + +#define mmTPC7_CFG_KERNEL_TENSOR_14_DIM_2_SIZE 0xFC6730 + +#define mmTPC7_CFG_KERNEL_TENSOR_14_DIM_2_STRIDE 0xFC6734 + +#define mmTPC7_CFG_KERNEL_TENSOR_14_DIM_3_SIZE 0xFC6738 + +#define mmTPC7_CFG_KERNEL_TENSOR_14_DIM_3_STRIDE 0xFC673C + +#define mmTPC7_CFG_KERNEL_TENSOR_14_DIM_4_SIZE 0xFC6740 + +#define mmTPC7_CFG_KERNEL_TENSOR_14_DIM_4_STRIDE 0xFC6744 + +#define mmTPC7_CFG_KERNEL_TENSOR_15_BASE_ADDR_LOW 0xFC6748 + +#define mmTPC7_CFG_KERNEL_TENSOR_15_BASE_ADDR_HIGH 0xFC674C + +#define mmTPC7_CFG_KERNEL_TENSOR_15_PADDING_VALUE 0xFC6750 + +#define mmTPC7_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG 0xFC6754 + +#define mmTPC7_CFG_KERNEL_TENSOR_15_DIM_0_SIZE 0xFC6758 + +#define mmTPC7_CFG_KERNEL_TENSOR_15_DIM_0_STRIDE 0xFC675C + +#define mmTPC7_CFG_KERNEL_TENSOR_15_DIM_1_SIZE 0xFC6760 + +#define mmTPC7_CFG_KERNEL_TENSOR_15_DIM_1_STRIDE 0xFC6764 + +#define mmTPC7_CFG_KERNEL_TENSOR_15_DIM_2_SIZE 0xFC6768 + +#define mmTPC7_CFG_KERNEL_TENSOR_15_DIM_2_STRIDE 0xFC676C + +#define mmTPC7_CFG_KERNEL_TENSOR_15_DIM_3_SIZE 0xFC6770 + +#define mmTPC7_CFG_KERNEL_TENSOR_15_DIM_3_STRIDE 0xFC6774 + +#define mmTPC7_CFG_KERNEL_TENSOR_15_DIM_4_SIZE 0xFC6778 + +#define mmTPC7_CFG_KERNEL_TENSOR_15_DIM_4_STRIDE 0xFC677C + +#define mmTPC7_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xFC6780 + +#define mmTPC7_CFG_KERNEL_SYNC_OBJECT_ADDR 0xFC6784 + +#define mmTPC7_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xFC6788 + +#define mmTPC7_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xFC678C + +#define mmTPC7_CFG_KERNEL_TID_BASE_DIM_0 0xFC6790 + +#define mmTPC7_CFG_KERNEL_TID_SIZE_DIM_0 0xFC6794 + +#define mmTPC7_CFG_KERNEL_TID_BASE_DIM_1 0xFC6798 + +#define mmTPC7_CFG_KERNEL_TID_SIZE_DIM_1 0xFC679C + +#define mmTPC7_CFG_KERNEL_TID_BASE_DIM_2 0xFC67A0 + +#define mmTPC7_CFG_KERNEL_TID_SIZE_DIM_2 0xFC67A4 + +#define mmTPC7_CFG_KERNEL_TID_BASE_DIM_3 0xFC67A8 + +#define mmTPC7_CFG_KERNEL_TID_SIZE_DIM_3 0xFC67AC + +#define mmTPC7_CFG_KERNEL_TID_BASE_DIM_4 0xFC67B0 + +#define mmTPC7_CFG_KERNEL_TID_SIZE_DIM_4 0xFC67B4 + +#define mmTPC7_CFG_KERNEL_KERNEL_CONFIG 0xFC67B8 + +#define mmTPC7_CFG_KERNEL_KERNEL_ID 0xFC67BC + +#define mmTPC7_CFG_KERNEL_SRF_0 0xFC67C0 + +#define mmTPC7_CFG_KERNEL_SRF_1 0xFC67C4 + +#define mmTPC7_CFG_KERNEL_SRF_2 0xFC67C8 + +#define mmTPC7_CFG_KERNEL_SRF_3 0xFC67CC + +#define mmTPC7_CFG_KERNEL_SRF_4 0xFC67D0 + +#define mmTPC7_CFG_KERNEL_SRF_5 0xFC67D4 + +#define mmTPC7_CFG_KERNEL_SRF_6 0xFC67D8 + +#define mmTPC7_CFG_KERNEL_SRF_7 0xFC67DC + +#define mmTPC7_CFG_KERNEL_SRF_8 0xFC67E0 + +#define mmTPC7_CFG_KERNEL_SRF_9 0xFC67E4 + +#define mmTPC7_CFG_KERNEL_SRF_10 0xFC67E8 + +#define mmTPC7_CFG_KERNEL_SRF_11 0xFC67EC + +#define mmTPC7_CFG_KERNEL_SRF_12 0xFC67F0 + +#define mmTPC7_CFG_KERNEL_SRF_13 0xFC67F4 + +#define mmTPC7_CFG_KERNEL_SRF_14 0xFC67F8 + +#define mmTPC7_CFG_KERNEL_SRF_15 0xFC67FC + +#define mmTPC7_CFG_KERNEL_SRF_16 0xFC6800 + +#define mmTPC7_CFG_KERNEL_SRF_17 0xFC6804 + +#define mmTPC7_CFG_KERNEL_SRF_18 0xFC6808 + +#define mmTPC7_CFG_KERNEL_SRF_19 0xFC680C + +#define mmTPC7_CFG_KERNEL_SRF_20 0xFC6810 + +#define mmTPC7_CFG_KERNEL_SRF_21 0xFC6814 + +#define mmTPC7_CFG_KERNEL_SRF_22 0xFC6818 + +#define mmTPC7_CFG_KERNEL_SRF_23 0xFC681C + +#define mmTPC7_CFG_KERNEL_SRF_24 0xFC6820 + +#define mmTPC7_CFG_KERNEL_SRF_25 0xFC6824 + +#define mmTPC7_CFG_KERNEL_SRF_26 0xFC6828 + +#define mmTPC7_CFG_KERNEL_SRF_27 0xFC682C + +#define mmTPC7_CFG_KERNEL_SRF_28 0xFC6830 + +#define mmTPC7_CFG_KERNEL_SRF_29 0xFC6834 + +#define mmTPC7_CFG_KERNEL_SRF_30 0xFC6838 + +#define mmTPC7_CFG_KERNEL_SRF_31 0xFC683C + +#define mmTPC7_CFG_ROUND_CSR 0xFC68FC + +#define mmTPC7_CFG_PROT 0xFC6900 + +#define mmTPC7_CFG_SEMAPHORE 0xFC6908 + +#define mmTPC7_CFG_VFLAGS 0xFC690C + +#define mmTPC7_CFG_SFLAGS 0xFC6910 + +#define mmTPC7_CFG_LFSR_POLYNOM 0xFC6918 + +#define mmTPC7_CFG_STATUS 0xFC691C + +#define mmTPC7_CFG_CFG_BASE_ADDRESS_HIGH 0xFC6920 + +#define mmTPC7_CFG_CFG_SUBTRACT_VALUE 0xFC6924 + +#define mmTPC7_CFG_SM_BASE_ADDRESS_HIGH 0xFC692C + +#define mmTPC7_CFG_TPC_CMD 0xFC6930 + +#define mmTPC7_CFG_TPC_EXECUTE 0xFC6938 + +#define mmTPC7_CFG_TPC_STALL 0xFC693C + +#define mmTPC7_CFG_ICACHE_BASE_ADDERESS_LOW 0xFC6940 + +#define mmTPC7_CFG_ICACHE_BASE_ADDERESS_HIGH 0xFC6944 + +#define mmTPC7_CFG_RD_RATE_LIMIT 0xFC6948 + +#define mmTPC7_CFG_WR_RATE_LIMIT 0xFC6950 + +#define mmTPC7_CFG_MSS_CONFIG 0xFC6954 + +#define mmTPC7_CFG_TPC_INTR_CAUSE 0xFC6958 + +#define mmTPC7_CFG_TPC_INTR_MASK 0xFC695C + +#define mmTPC7_CFG_WQ_CREDITS 0xFC6960 + +#define mmTPC7_CFG_ARUSER_LO 0xFC6964 + +#define mmTPC7_CFG_ARUSER_HI 0xFC6968 + +#define mmTPC7_CFG_AWUSER_LO 0xFC696C + +#define mmTPC7_CFG_AWUSER_HI 0xFC6970 + +#define mmTPC7_CFG_OPCODE_EXEC 0xFC6974 + +#define mmTPC7_CFG_LUT_FUNC32_BASE_ADDR_LO 0xFC6978 + +#define mmTPC7_CFG_LUT_FUNC32_BASE_ADDR_HI 0xFC697C + +#define mmTPC7_CFG_LUT_FUNC64_BASE_ADDR_LO 0xFC6980 + +#define mmTPC7_CFG_LUT_FUNC64_BASE_ADDR_HI 0xFC6984 + +#define mmTPC7_CFG_LUT_FUNC128_BASE_ADDR_LO 0xFC6988 + +#define mmTPC7_CFG_LUT_FUNC128_BASE_ADDR_HI 0xFC698C + +#define mmTPC7_CFG_LUT_FUNC256_BASE_ADDR_LO 0xFC6990 + +#define mmTPC7_CFG_LUT_FUNC256_BASE_ADDR_HI 0xFC6994 + +#define mmTPC7_CFG_TSB_CFG_MAX_SIZE 0xFC6998 + +#define mmTPC7_CFG_TSB_CFG 0xFC699C + +#define mmTPC7_CFG_DBGMEM_ADD 0xFC69A0 + +#define mmTPC7_CFG_DBGMEM_DATA_WR 0xFC69A4 + +#define mmTPC7_CFG_DBGMEM_DATA_RD 0xFC69A8 + +#define mmTPC7_CFG_DBGMEM_CTRL 0xFC69AC + +#define mmTPC7_CFG_DBGMEM_RC 0xFC69B0 + +#define mmTPC7_CFG_TSB_INFLIGHT_CNTR 0xFC69B4 + +#define mmTPC7_CFG_WQ_INFLIGHT_CNTR 0xFC69B8 + +#define mmTPC7_CFG_WQ_LBW_TOTAL_CNTR 0xFC69BC + +#define mmTPC7_CFG_WQ_HBW_TOTAL_CNTR 0xFC69C0 + +#define mmTPC7_CFG_IRQ_OCCOUPY_CNTR 0xFC69C4 + +#define mmTPC7_CFG_FUNC_MBIST_CNTRL 0xFC69D0 + +#define mmTPC7_CFG_FUNC_MBIST_PAT 0xFC69D4 + +#define mmTPC7_CFG_FUNC_MBIST_MEM_0 0xFC69D8 + +#define mmTPC7_CFG_FUNC_MBIST_MEM_1 0xFC69DC + +#define mmTPC7_CFG_FUNC_MBIST_MEM_2 0xFC69E0 + +#define mmTPC7_CFG_FUNC_MBIST_MEM_3 0xFC69E4 + +#define mmTPC7_CFG_FUNC_MBIST_MEM_4 0xFC69E8 + +#define mmTPC7_CFG_FUNC_MBIST_MEM_5 0xFC69EC + +#define mmTPC7_CFG_FUNC_MBIST_MEM_6 0xFC69F0 + +#define mmTPC7_CFG_FUNC_MBIST_MEM_7 0xFC69F4 + +#define mmTPC7_CFG_FUNC_MBIST_MEM_8 0xFC69F8 + +#define mmTPC7_CFG_FUNC_MBIST_MEM_9 0xFC69FC + +#define mmTPC7_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xFC6A00 + +#define mmTPC7_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xFC6A04 + +#define mmTPC7_CFG_QM_TENSOR_0_PADDING_VALUE 0xFC6A08 + +#define mmTPC7_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xFC6A0C + +#define mmTPC7_CFG_QM_TENSOR_0_DIM_0_SIZE 0xFC6A10 + +#define mmTPC7_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xFC6A14 + +#define mmTPC7_CFG_QM_TENSOR_0_DIM_1_SIZE 0xFC6A18 + +#define mmTPC7_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xFC6A1C + +#define mmTPC7_CFG_QM_TENSOR_0_DIM_2_SIZE 0xFC6A20 + +#define mmTPC7_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xFC6A24 + +#define mmTPC7_CFG_QM_TENSOR_0_DIM_3_SIZE 0xFC6A28 + +#define mmTPC7_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xFC6A2C + +#define mmTPC7_CFG_QM_TENSOR_0_DIM_4_SIZE 0xFC6A30 + +#define mmTPC7_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xFC6A34 + +#define mmTPC7_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xFC6A38 + +#define mmTPC7_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xFC6A3C + +#define mmTPC7_CFG_QM_TENSOR_1_PADDING_VALUE 0xFC6A40 + +#define mmTPC7_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xFC6A44 + +#define mmTPC7_CFG_QM_TENSOR_1_DIM_0_SIZE 0xFC6A48 + +#define mmTPC7_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xFC6A4C + +#define mmTPC7_CFG_QM_TENSOR_1_DIM_1_SIZE 0xFC6A50 + +#define mmTPC7_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xFC6A54 + +#define mmTPC7_CFG_QM_TENSOR_1_DIM_2_SIZE 0xFC6A58 + +#define mmTPC7_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xFC6A5C + +#define mmTPC7_CFG_QM_TENSOR_1_DIM_3_SIZE 0xFC6A60 + +#define mmTPC7_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xFC6A64 + +#define mmTPC7_CFG_QM_TENSOR_1_DIM_4_SIZE 0xFC6A68 + +#define mmTPC7_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xFC6A6C + +#define mmTPC7_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xFC6A70 + +#define mmTPC7_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xFC6A74 + +#define mmTPC7_CFG_QM_TENSOR_2_PADDING_VALUE 0xFC6A78 + +#define mmTPC7_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xFC6A7C + +#define mmTPC7_CFG_QM_TENSOR_2_DIM_0_SIZE 0xFC6A80 + +#define mmTPC7_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xFC6A84 + +#define mmTPC7_CFG_QM_TENSOR_2_DIM_1_SIZE 0xFC6A88 + +#define mmTPC7_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xFC6A8C + +#define mmTPC7_CFG_QM_TENSOR_2_DIM_2_SIZE 0xFC6A90 + +#define mmTPC7_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xFC6A94 + +#define mmTPC7_CFG_QM_TENSOR_2_DIM_3_SIZE 0xFC6A98 + +#define mmTPC7_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xFC6A9C + +#define mmTPC7_CFG_QM_TENSOR_2_DIM_4_SIZE 0xFC6AA0 + +#define mmTPC7_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xFC6AA4 + +#define mmTPC7_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xFC6AA8 + +#define mmTPC7_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xFC6AAC + +#define mmTPC7_CFG_QM_TENSOR_3_PADDING_VALUE 0xFC6AB0 + +#define mmTPC7_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xFC6AB4 + +#define mmTPC7_CFG_QM_TENSOR_3_DIM_0_SIZE 0xFC6AB8 + +#define mmTPC7_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xFC6ABC + +#define mmTPC7_CFG_QM_TENSOR_3_DIM_1_SIZE 0xFC6AC0 + +#define mmTPC7_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xFC6AC4 + +#define mmTPC7_CFG_QM_TENSOR_3_DIM_2_SIZE 0xFC6AC8 + +#define mmTPC7_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xFC6ACC + +#define mmTPC7_CFG_QM_TENSOR_3_DIM_3_SIZE 0xFC6AD0 + +#define mmTPC7_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xFC6AD4 + +#define mmTPC7_CFG_QM_TENSOR_3_DIM_4_SIZE 0xFC6AD8 + +#define mmTPC7_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xFC6ADC + +#define mmTPC7_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xFC6AE0 + +#define mmTPC7_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xFC6AE4 + +#define mmTPC7_CFG_QM_TENSOR_4_PADDING_VALUE 0xFC6AE8 + +#define mmTPC7_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xFC6AEC + +#define mmTPC7_CFG_QM_TENSOR_4_DIM_0_SIZE 0xFC6AF0 + +#define mmTPC7_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xFC6AF4 + +#define mmTPC7_CFG_QM_TENSOR_4_DIM_1_SIZE 0xFC6AF8 + +#define mmTPC7_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xFC6AFC + +#define mmTPC7_CFG_QM_TENSOR_4_DIM_2_SIZE 0xFC6B00 + +#define mmTPC7_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xFC6B04 + +#define mmTPC7_CFG_QM_TENSOR_4_DIM_3_SIZE 0xFC6B08 + +#define mmTPC7_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xFC6B0C + +#define mmTPC7_CFG_QM_TENSOR_4_DIM_4_SIZE 0xFC6B10 + +#define mmTPC7_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xFC6B14 + +#define mmTPC7_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xFC6B18 + +#define mmTPC7_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xFC6B1C + +#define mmTPC7_CFG_QM_TENSOR_5_PADDING_VALUE 0xFC6B20 + +#define mmTPC7_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xFC6B24 + +#define mmTPC7_CFG_QM_TENSOR_5_DIM_0_SIZE 0xFC6B28 + +#define mmTPC7_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xFC6B2C + +#define mmTPC7_CFG_QM_TENSOR_5_DIM_1_SIZE 0xFC6B30 + +#define mmTPC7_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xFC6B34 + +#define mmTPC7_CFG_QM_TENSOR_5_DIM_2_SIZE 0xFC6B38 + +#define mmTPC7_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xFC6B3C + +#define mmTPC7_CFG_QM_TENSOR_5_DIM_3_SIZE 0xFC6B40 + +#define mmTPC7_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xFC6B44 + +#define mmTPC7_CFG_QM_TENSOR_5_DIM_4_SIZE 0xFC6B48 + +#define mmTPC7_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xFC6B4C + +#define mmTPC7_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xFC6B50 + +#define mmTPC7_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xFC6B54 + +#define mmTPC7_CFG_QM_TENSOR_6_PADDING_VALUE 0xFC6B58 + +#define mmTPC7_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xFC6B5C + +#define mmTPC7_CFG_QM_TENSOR_6_DIM_0_SIZE 0xFC6B60 + +#define mmTPC7_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xFC6B64 + +#define mmTPC7_CFG_QM_TENSOR_6_DIM_1_SIZE 0xFC6B68 + +#define mmTPC7_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xFC6B6C + +#define mmTPC7_CFG_QM_TENSOR_6_DIM_2_SIZE 0xFC6B70 + +#define mmTPC7_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xFC6B74 + +#define mmTPC7_CFG_QM_TENSOR_6_DIM_3_SIZE 0xFC6B78 + +#define mmTPC7_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xFC6B7C + +#define mmTPC7_CFG_QM_TENSOR_6_DIM_4_SIZE 0xFC6B80 + +#define mmTPC7_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xFC6B84 + +#define mmTPC7_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xFC6B88 + +#define mmTPC7_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xFC6B8C + +#define mmTPC7_CFG_QM_TENSOR_7_PADDING_VALUE 0xFC6B90 + +#define mmTPC7_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xFC6B94 + +#define mmTPC7_CFG_QM_TENSOR_7_DIM_0_SIZE 0xFC6B98 + +#define mmTPC7_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xFC6B9C + +#define mmTPC7_CFG_QM_TENSOR_7_DIM_1_SIZE 0xFC6BA0 + +#define mmTPC7_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xFC6BA4 + +#define mmTPC7_CFG_QM_TENSOR_7_DIM_2_SIZE 0xFC6BA8 + +#define mmTPC7_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xFC6BAC + +#define mmTPC7_CFG_QM_TENSOR_7_DIM_3_SIZE 0xFC6BB0 + +#define mmTPC7_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xFC6BB4 + +#define mmTPC7_CFG_QM_TENSOR_7_DIM_4_SIZE 0xFC6BB8 + +#define mmTPC7_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xFC6BBC + +#define mmTPC7_CFG_QM_TENSOR_8_BASE_ADDR_LOW 0xFC6BC0 + +#define mmTPC7_CFG_QM_TENSOR_8_BASE_ADDR_HIGH 0xFC6BC4 + +#define mmTPC7_CFG_QM_TENSOR_8_PADDING_VALUE 0xFC6BC8 + +#define mmTPC7_CFG_QM_TENSOR_8_TENSOR_CONFIG 0xFC6BCC + +#define mmTPC7_CFG_QM_TENSOR_8_DIM_0_SIZE 0xFC6BD0 + +#define mmTPC7_CFG_QM_TENSOR_8_DIM_0_STRIDE 0xFC6BD4 + +#define mmTPC7_CFG_QM_TENSOR_8_DIM_1_SIZE 0xFC6BD8 + +#define mmTPC7_CFG_QM_TENSOR_8_DIM_1_STRIDE 0xFC6BDC + +#define mmTPC7_CFG_QM_TENSOR_8_DIM_2_SIZE 0xFC6BE0 + +#define mmTPC7_CFG_QM_TENSOR_8_DIM_2_STRIDE 0xFC6BE4 + +#define mmTPC7_CFG_QM_TENSOR_8_DIM_3_SIZE 0xFC6BE8 + +#define mmTPC7_CFG_QM_TENSOR_8_DIM_3_STRIDE 0xFC6BEC + +#define mmTPC7_CFG_QM_TENSOR_8_DIM_4_SIZE 0xFC6BF0 + +#define mmTPC7_CFG_QM_TENSOR_8_DIM_4_STRIDE 0xFC6BF4 + +#define mmTPC7_CFG_QM_TENSOR_9_BASE_ADDR_LOW 0xFC6BF8 + +#define mmTPC7_CFG_QM_TENSOR_9_BASE_ADDR_HIGH 0xFC6BFC + +#define mmTPC7_CFG_QM_TENSOR_9_PADDING_VALUE 0xFC6C00 + +#define mmTPC7_CFG_QM_TENSOR_9_TENSOR_CONFIG 0xFC6C04 + +#define mmTPC7_CFG_QM_TENSOR_9_DIM_0_SIZE 0xFC6C08 + +#define mmTPC7_CFG_QM_TENSOR_9_DIM_0_STRIDE 0xFC6C0C + +#define mmTPC7_CFG_QM_TENSOR_9_DIM_1_SIZE 0xFC6C10 + +#define mmTPC7_CFG_QM_TENSOR_9_DIM_1_STRIDE 0xFC6C14 + +#define mmTPC7_CFG_QM_TENSOR_9_DIM_2_SIZE 0xFC6C18 + +#define mmTPC7_CFG_QM_TENSOR_9_DIM_2_STRIDE 0xFC6C1C + +#define mmTPC7_CFG_QM_TENSOR_9_DIM_3_SIZE 0xFC6C20 + +#define mmTPC7_CFG_QM_TENSOR_9_DIM_3_STRIDE 0xFC6C24 + +#define mmTPC7_CFG_QM_TENSOR_9_DIM_4_SIZE 0xFC6C28 + +#define mmTPC7_CFG_QM_TENSOR_9_DIM_4_STRIDE 0xFC6C2C + +#define mmTPC7_CFG_QM_TENSOR_10_BASE_ADDR_LOW 0xFC6C30 + +#define mmTPC7_CFG_QM_TENSOR_10_BASE_ADDR_HIGH 0xFC6C34 + +#define mmTPC7_CFG_QM_TENSOR_10_PADDING_VALUE 0xFC6C38 + +#define mmTPC7_CFG_QM_TENSOR_10_TENSOR_CONFIG 0xFC6C3C + +#define mmTPC7_CFG_QM_TENSOR_10_DIM_0_SIZE 0xFC6C40 + +#define mmTPC7_CFG_QM_TENSOR_10_DIM_0_STRIDE 0xFC6C44 + +#define mmTPC7_CFG_QM_TENSOR_10_DIM_1_SIZE 0xFC6C48 + +#define mmTPC7_CFG_QM_TENSOR_10_DIM_1_STRIDE 0xFC6C4C + +#define mmTPC7_CFG_QM_TENSOR_10_DIM_2_SIZE 0xFC6C50 + +#define mmTPC7_CFG_QM_TENSOR_10_DIM_2_STRIDE 0xFC6C54 + +#define mmTPC7_CFG_QM_TENSOR_10_DIM_3_SIZE 0xFC6C58 + +#define mmTPC7_CFG_QM_TENSOR_10_DIM_3_STRIDE 0xFC6C5C + +#define mmTPC7_CFG_QM_TENSOR_10_DIM_4_SIZE 0xFC6C60 + +#define mmTPC7_CFG_QM_TENSOR_10_DIM_4_STRIDE 0xFC6C64 + +#define mmTPC7_CFG_QM_TENSOR_11_BASE_ADDR_LOW 0xFC6C68 + +#define mmTPC7_CFG_QM_TENSOR_11_BASE_ADDR_HIGH 0xFC6C6C + +#define mmTPC7_CFG_QM_TENSOR_11_PADDING_VALUE 0xFC6C70 + +#define mmTPC7_CFG_QM_TENSOR_11_TENSOR_CONFIG 0xFC6C74 + +#define mmTPC7_CFG_QM_TENSOR_11_DIM_0_SIZE 0xFC6C78 + +#define mmTPC7_CFG_QM_TENSOR_11_DIM_0_STRIDE 0xFC6C7C + +#define mmTPC7_CFG_QM_TENSOR_11_DIM_1_SIZE 0xFC6C80 + +#define mmTPC7_CFG_QM_TENSOR_11_DIM_1_STRIDE 0xFC6C84 + +#define mmTPC7_CFG_QM_TENSOR_11_DIM_2_SIZE 0xFC6C88 + +#define mmTPC7_CFG_QM_TENSOR_11_DIM_2_STRIDE 0xFC6C8C + +#define mmTPC7_CFG_QM_TENSOR_11_DIM_3_SIZE 0xFC6C90 + +#define mmTPC7_CFG_QM_TENSOR_11_DIM_3_STRIDE 0xFC6C94 + +#define mmTPC7_CFG_QM_TENSOR_11_DIM_4_SIZE 0xFC6C98 + +#define mmTPC7_CFG_QM_TENSOR_11_DIM_4_STRIDE 0xFC6C9C + +#define mmTPC7_CFG_QM_TENSOR_12_BASE_ADDR_LOW 0xFC6CA0 + +#define mmTPC7_CFG_QM_TENSOR_12_BASE_ADDR_HIGH 0xFC6CA4 + +#define mmTPC7_CFG_QM_TENSOR_12_PADDING_VALUE 0xFC6CA8 + +#define mmTPC7_CFG_QM_TENSOR_12_TENSOR_CONFIG 0xFC6CAC + +#define mmTPC7_CFG_QM_TENSOR_12_DIM_0_SIZE 0xFC6CB0 + +#define mmTPC7_CFG_QM_TENSOR_12_DIM_0_STRIDE 0xFC6CB4 + +#define mmTPC7_CFG_QM_TENSOR_12_DIM_1_SIZE 0xFC6CB8 + +#define mmTPC7_CFG_QM_TENSOR_12_DIM_1_STRIDE 0xFC6CBC + +#define mmTPC7_CFG_QM_TENSOR_12_DIM_2_SIZE 0xFC6CC0 + +#define mmTPC7_CFG_QM_TENSOR_12_DIM_2_STRIDE 0xFC6CC4 + +#define mmTPC7_CFG_QM_TENSOR_12_DIM_3_SIZE 0xFC6CC8 + +#define mmTPC7_CFG_QM_TENSOR_12_DIM_3_STRIDE 0xFC6CCC + +#define mmTPC7_CFG_QM_TENSOR_12_DIM_4_SIZE 0xFC6CD0 + +#define mmTPC7_CFG_QM_TENSOR_12_DIM_4_STRIDE 0xFC6CD4 + +#define mmTPC7_CFG_QM_TENSOR_13_BASE_ADDR_LOW 0xFC6CD8 + +#define mmTPC7_CFG_QM_TENSOR_13_BASE_ADDR_HIGH 0xFC6CDC + +#define mmTPC7_CFG_QM_TENSOR_13_PADDING_VALUE 0xFC6CE0 + +#define mmTPC7_CFG_QM_TENSOR_13_TENSOR_CONFIG 0xFC6CE4 + +#define mmTPC7_CFG_QM_TENSOR_13_DIM_0_SIZE 0xFC6CE8 + +#define mmTPC7_CFG_QM_TENSOR_13_DIM_0_STRIDE 0xFC6CEC + +#define mmTPC7_CFG_QM_TENSOR_13_DIM_1_SIZE 0xFC6CF0 + +#define mmTPC7_CFG_QM_TENSOR_13_DIM_1_STRIDE 0xFC6CF4 + +#define mmTPC7_CFG_QM_TENSOR_13_DIM_2_SIZE 0xFC6CF8 + +#define mmTPC7_CFG_QM_TENSOR_13_DIM_2_STRIDE 0xFC6CFC + +#define mmTPC7_CFG_QM_TENSOR_13_DIM_3_SIZE 0xFC6D00 + +#define mmTPC7_CFG_QM_TENSOR_13_DIM_3_STRIDE 0xFC6D04 + +#define mmTPC7_CFG_QM_TENSOR_13_DIM_4_SIZE 0xFC6D08 + +#define mmTPC7_CFG_QM_TENSOR_13_DIM_4_STRIDE 0xFC6D0C + +#define mmTPC7_CFG_QM_TENSOR_14_BASE_ADDR_LOW 0xFC6D10 + +#define mmTPC7_CFG_QM_TENSOR_14_BASE_ADDR_HIGH 0xFC6D14 + +#define mmTPC7_CFG_QM_TENSOR_14_PADDING_VALUE 0xFC6D18 + +#define mmTPC7_CFG_QM_TENSOR_14_TENSOR_CONFIG 0xFC6D1C + +#define mmTPC7_CFG_QM_TENSOR_14_DIM_0_SIZE 0xFC6D20 + +#define mmTPC7_CFG_QM_TENSOR_14_DIM_0_STRIDE 0xFC6D24 + +#define mmTPC7_CFG_QM_TENSOR_14_DIM_1_SIZE 0xFC6D28 + +#define mmTPC7_CFG_QM_TENSOR_14_DIM_1_STRIDE 0xFC6D2C + +#define mmTPC7_CFG_QM_TENSOR_14_DIM_2_SIZE 0xFC6D30 + +#define mmTPC7_CFG_QM_TENSOR_14_DIM_2_STRIDE 0xFC6D34 + +#define mmTPC7_CFG_QM_TENSOR_14_DIM_3_SIZE 0xFC6D38 + +#define mmTPC7_CFG_QM_TENSOR_14_DIM_3_STRIDE 0xFC6D3C + +#define mmTPC7_CFG_QM_TENSOR_14_DIM_4_SIZE 0xFC6D40 + +#define mmTPC7_CFG_QM_TENSOR_14_DIM_4_STRIDE 0xFC6D44 + +#define mmTPC7_CFG_QM_TENSOR_15_BASE_ADDR_LOW 0xFC6D48 + +#define mmTPC7_CFG_QM_TENSOR_15_BASE_ADDR_HIGH 0xFC6D4C + +#define mmTPC7_CFG_QM_TENSOR_15_PADDING_VALUE 0xFC6D50 + +#define mmTPC7_CFG_QM_TENSOR_15_TENSOR_CONFIG 0xFC6D54 + +#define mmTPC7_CFG_QM_TENSOR_15_DIM_0_SIZE 0xFC6D58 + +#define mmTPC7_CFG_QM_TENSOR_15_DIM_0_STRIDE 0xFC6D5C + +#define mmTPC7_CFG_QM_TENSOR_15_DIM_1_SIZE 0xFC6D60 + +#define mmTPC7_CFG_QM_TENSOR_15_DIM_1_STRIDE 0xFC6D64 + +#define mmTPC7_CFG_QM_TENSOR_15_DIM_2_SIZE 0xFC6D68 + +#define mmTPC7_CFG_QM_TENSOR_15_DIM_2_STRIDE 0xFC6D6C + +#define mmTPC7_CFG_QM_TENSOR_15_DIM_3_SIZE 0xFC6D70 + +#define mmTPC7_CFG_QM_TENSOR_15_DIM_3_STRIDE 0xFC6D74 + +#define mmTPC7_CFG_QM_TENSOR_15_DIM_4_SIZE 0xFC6D78 + +#define mmTPC7_CFG_QM_TENSOR_15_DIM_4_STRIDE 0xFC6D7C + +#define mmTPC7_CFG_QM_SYNC_OBJECT_MESSAGE 0xFC6D80 + +#define mmTPC7_CFG_QM_SYNC_OBJECT_ADDR 0xFC6D84 + +#define mmTPC7_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xFC6D88 + +#define mmTPC7_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xFC6D8C + +#define mmTPC7_CFG_QM_TID_BASE_DIM_0 0xFC6D90 + +#define mmTPC7_CFG_QM_TID_SIZE_DIM_0 0xFC6D94 + +#define mmTPC7_CFG_QM_TID_BASE_DIM_1 0xFC6D98 + +#define mmTPC7_CFG_QM_TID_SIZE_DIM_1 0xFC6D9C + +#define mmTPC7_CFG_QM_TID_BASE_DIM_2 0xFC6DA0 + +#define mmTPC7_CFG_QM_TID_SIZE_DIM_2 0xFC6DA4 + +#define mmTPC7_CFG_QM_TID_BASE_DIM_3 0xFC6DA8 + +#define mmTPC7_CFG_QM_TID_SIZE_DIM_3 0xFC6DAC + +#define mmTPC7_CFG_QM_TID_BASE_DIM_4 0xFC6DB0 + +#define mmTPC7_CFG_QM_TID_SIZE_DIM_4 0xFC6DB4 + +#define mmTPC7_CFG_QM_KERNEL_CONFIG 0xFC6DB8 + +#define mmTPC7_CFG_QM_KERNEL_ID 0xFC6DBC + +#define mmTPC7_CFG_QM_SRF_0 0xFC6DC0 + +#define mmTPC7_CFG_QM_SRF_1 0xFC6DC4 + +#define mmTPC7_CFG_QM_SRF_2 0xFC6DC8 + +#define mmTPC7_CFG_QM_SRF_3 0xFC6DCC + +#define mmTPC7_CFG_QM_SRF_4 0xFC6DD0 + +#define mmTPC7_CFG_QM_SRF_5 0xFC6DD4 + +#define mmTPC7_CFG_QM_SRF_6 0xFC6DD8 + +#define mmTPC7_CFG_QM_SRF_7 0xFC6DDC + +#define mmTPC7_CFG_QM_SRF_8 0xFC6DE0 + +#define mmTPC7_CFG_QM_SRF_9 0xFC6DE4 + +#define mmTPC7_CFG_QM_SRF_10 0xFC6DE8 + +#define mmTPC7_CFG_QM_SRF_11 0xFC6DEC + +#define mmTPC7_CFG_QM_SRF_12 0xFC6DF0 + +#define mmTPC7_CFG_QM_SRF_13 0xFC6DF4 + +#define mmTPC7_CFG_QM_SRF_14 0xFC6DF8 + +#define mmTPC7_CFG_QM_SRF_15 0xFC6DFC + +#define mmTPC7_CFG_QM_SRF_16 0xFC6E00 + +#define mmTPC7_CFG_QM_SRF_17 0xFC6E04 + +#define mmTPC7_CFG_QM_SRF_18 0xFC6E08 + +#define mmTPC7_CFG_QM_SRF_19 0xFC6E0C + +#define mmTPC7_CFG_QM_SRF_20 0xFC6E10 + +#define mmTPC7_CFG_QM_SRF_21 0xFC6E14 + +#define mmTPC7_CFG_QM_SRF_22 0xFC6E18 + +#define mmTPC7_CFG_QM_SRF_23 0xFC6E1C + +#define mmTPC7_CFG_QM_SRF_24 0xFC6E20 + +#define mmTPC7_CFG_QM_SRF_25 0xFC6E24 + +#define mmTPC7_CFG_QM_SRF_26 0xFC6E28 + +#define mmTPC7_CFG_QM_SRF_27 0xFC6E2C + +#define mmTPC7_CFG_QM_SRF_28 0xFC6E30 + +#define mmTPC7_CFG_QM_SRF_29 0xFC6E34 + +#define mmTPC7_CFG_QM_SRF_30 0xFC6E38 + +#define mmTPC7_CFG_QM_SRF_31 0xFC6E3C + +#endif /* ASIC_REG_TPC7_CFG_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc7_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc7_qm_regs.h new file mode 100644 index 000000000..5c36c972c --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc7_qm_regs.h @@ -0,0 +1,834 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC7_QM_REGS_H_ +#define ASIC_REG_TPC7_QM_REGS_H_ + +/* + ***************************************** + * TPC7_QM (Prototype: QMAN) + ***************************************** + */ + +#define mmTPC7_QM_GLBL_CFG0 0xFC8000 + +#define mmTPC7_QM_GLBL_CFG1 0xFC8004 + +#define mmTPC7_QM_GLBL_PROT 0xFC8008 + +#define mmTPC7_QM_GLBL_ERR_CFG 0xFC800C + +#define mmTPC7_QM_GLBL_SECURE_PROPS_0 0xFC8010 + +#define mmTPC7_QM_GLBL_SECURE_PROPS_1 0xFC8014 + +#define mmTPC7_QM_GLBL_SECURE_PROPS_2 0xFC8018 + +#define mmTPC7_QM_GLBL_SECURE_PROPS_3 0xFC801C + +#define mmTPC7_QM_GLBL_SECURE_PROPS_4 0xFC8020 + +#define mmTPC7_QM_GLBL_NON_SECURE_PROPS_0 0xFC8024 + +#define mmTPC7_QM_GLBL_NON_SECURE_PROPS_1 0xFC8028 + +#define mmTPC7_QM_GLBL_NON_SECURE_PROPS_2 0xFC802C + +#define mmTPC7_QM_GLBL_NON_SECURE_PROPS_3 0xFC8030 + +#define mmTPC7_QM_GLBL_NON_SECURE_PROPS_4 0xFC8034 + +#define mmTPC7_QM_GLBL_STS0 0xFC8038 + +#define mmTPC7_QM_GLBL_STS1_0 0xFC8040 + +#define mmTPC7_QM_GLBL_STS1_1 0xFC8044 + +#define mmTPC7_QM_GLBL_STS1_2 0xFC8048 + +#define mmTPC7_QM_GLBL_STS1_3 0xFC804C + +#define mmTPC7_QM_GLBL_STS1_4 0xFC8050 + +#define mmTPC7_QM_GLBL_MSG_EN_0 0xFC8054 + +#define mmTPC7_QM_GLBL_MSG_EN_1 0xFC8058 + +#define mmTPC7_QM_GLBL_MSG_EN_2 0xFC805C + +#define mmTPC7_QM_GLBL_MSG_EN_3 0xFC8060 + +#define mmTPC7_QM_GLBL_MSG_EN_4 0xFC8068 + +#define mmTPC7_QM_PQ_BASE_LO_0 0xFC8070 + +#define mmTPC7_QM_PQ_BASE_LO_1 0xFC8074 + +#define mmTPC7_QM_PQ_BASE_LO_2 0xFC8078 + +#define mmTPC7_QM_PQ_BASE_LO_3 0xFC807C + +#define mmTPC7_QM_PQ_BASE_HI_0 0xFC8080 + +#define mmTPC7_QM_PQ_BASE_HI_1 0xFC8084 + +#define mmTPC7_QM_PQ_BASE_HI_2 0xFC8088 + +#define mmTPC7_QM_PQ_BASE_HI_3 0xFC808C + +#define mmTPC7_QM_PQ_SIZE_0 0xFC8090 + +#define mmTPC7_QM_PQ_SIZE_1 0xFC8094 + +#define mmTPC7_QM_PQ_SIZE_2 0xFC8098 + +#define mmTPC7_QM_PQ_SIZE_3 0xFC809C + +#define mmTPC7_QM_PQ_PI_0 0xFC80A0 + +#define mmTPC7_QM_PQ_PI_1 0xFC80A4 + +#define mmTPC7_QM_PQ_PI_2 0xFC80A8 + +#define mmTPC7_QM_PQ_PI_3 0xFC80AC + +#define mmTPC7_QM_PQ_CI_0 0xFC80B0 + +#define mmTPC7_QM_PQ_CI_1 0xFC80B4 + +#define mmTPC7_QM_PQ_CI_2 0xFC80B8 + +#define mmTPC7_QM_PQ_CI_3 0xFC80BC + +#define mmTPC7_QM_PQ_CFG0_0 0xFC80C0 + +#define mmTPC7_QM_PQ_CFG0_1 0xFC80C4 + +#define mmTPC7_QM_PQ_CFG0_2 0xFC80C8 + +#define mmTPC7_QM_PQ_CFG0_3 0xFC80CC + +#define mmTPC7_QM_PQ_CFG1_0 0xFC80D0 + +#define mmTPC7_QM_PQ_CFG1_1 0xFC80D4 + +#define mmTPC7_QM_PQ_CFG1_2 0xFC80D8 + +#define mmTPC7_QM_PQ_CFG1_3 0xFC80DC + +#define mmTPC7_QM_PQ_ARUSER_31_11_0 0xFC80E0 + +#define mmTPC7_QM_PQ_ARUSER_31_11_1 0xFC80E4 + +#define mmTPC7_QM_PQ_ARUSER_31_11_2 0xFC80E8 + +#define mmTPC7_QM_PQ_ARUSER_31_11_3 0xFC80EC + +#define mmTPC7_QM_PQ_STS0_0 0xFC80F0 + +#define mmTPC7_QM_PQ_STS0_1 0xFC80F4 + +#define mmTPC7_QM_PQ_STS0_2 0xFC80F8 + +#define mmTPC7_QM_PQ_STS0_3 0xFC80FC + +#define mmTPC7_QM_PQ_STS1_0 0xFC8100 + +#define mmTPC7_QM_PQ_STS1_1 0xFC8104 + +#define mmTPC7_QM_PQ_STS1_2 0xFC8108 + +#define mmTPC7_QM_PQ_STS1_3 0xFC810C + +#define mmTPC7_QM_CQ_CFG0_0 0xFC8110 + +#define mmTPC7_QM_CQ_CFG0_1 0xFC8114 + +#define mmTPC7_QM_CQ_CFG0_2 0xFC8118 + +#define mmTPC7_QM_CQ_CFG0_3 0xFC811C + +#define mmTPC7_QM_CQ_CFG0_4 0xFC8120 + +#define mmTPC7_QM_CQ_CFG1_0 0xFC8124 + +#define mmTPC7_QM_CQ_CFG1_1 0xFC8128 + +#define mmTPC7_QM_CQ_CFG1_2 0xFC812C + +#define mmTPC7_QM_CQ_CFG1_3 0xFC8130 + +#define mmTPC7_QM_CQ_CFG1_4 0xFC8134 + +#define mmTPC7_QM_CQ_ARUSER_31_11_0 0xFC8138 + +#define mmTPC7_QM_CQ_ARUSER_31_11_1 0xFC813C + +#define mmTPC7_QM_CQ_ARUSER_31_11_2 0xFC8140 + +#define mmTPC7_QM_CQ_ARUSER_31_11_3 0xFC8144 + +#define mmTPC7_QM_CQ_ARUSER_31_11_4 0xFC8148 + +#define mmTPC7_QM_CQ_STS0_0 0xFC814C + +#define mmTPC7_QM_CQ_STS0_1 0xFC8150 + +#define mmTPC7_QM_CQ_STS0_2 0xFC8154 + +#define mmTPC7_QM_CQ_STS0_3 0xFC8158 + +#define mmTPC7_QM_CQ_STS0_4 0xFC815C + +#define mmTPC7_QM_CQ_STS1_0 0xFC8160 + +#define mmTPC7_QM_CQ_STS1_1 0xFC8164 + +#define mmTPC7_QM_CQ_STS1_2 0xFC8168 + +#define mmTPC7_QM_CQ_STS1_3 0xFC816C + +#define mmTPC7_QM_CQ_STS1_4 0xFC8170 + +#define mmTPC7_QM_CQ_PTR_LO_0 0xFC8174 + +#define mmTPC7_QM_CQ_PTR_HI_0 0xFC8178 + +#define mmTPC7_QM_CQ_TSIZE_0 0xFC817C + +#define mmTPC7_QM_CQ_CTL_0 0xFC8180 + +#define mmTPC7_QM_CQ_PTR_LO_1 0xFC8184 + +#define mmTPC7_QM_CQ_PTR_HI_1 0xFC8188 + +#define mmTPC7_QM_CQ_TSIZE_1 0xFC818C + +#define mmTPC7_QM_CQ_CTL_1 0xFC8190 + +#define mmTPC7_QM_CQ_PTR_LO_2 0xFC8194 + +#define mmTPC7_QM_CQ_PTR_HI_2 0xFC8198 + +#define mmTPC7_QM_CQ_TSIZE_2 0xFC819C + +#define mmTPC7_QM_CQ_CTL_2 0xFC81A0 + +#define mmTPC7_QM_CQ_PTR_LO_3 0xFC81A4 + +#define mmTPC7_QM_CQ_PTR_HI_3 0xFC81A8 + +#define mmTPC7_QM_CQ_TSIZE_3 0xFC81AC + +#define mmTPC7_QM_CQ_CTL_3 0xFC81B0 + +#define mmTPC7_QM_CQ_PTR_LO_4 0xFC81B4 + +#define mmTPC7_QM_CQ_PTR_HI_4 0xFC81B8 + +#define mmTPC7_QM_CQ_TSIZE_4 0xFC81BC + +#define mmTPC7_QM_CQ_CTL_4 0xFC81C0 + +#define mmTPC7_QM_CQ_PTR_LO_STS_0 0xFC81C4 + +#define mmTPC7_QM_CQ_PTR_LO_STS_1 0xFC81C8 + +#define mmTPC7_QM_CQ_PTR_LO_STS_2 0xFC81CC + +#define mmTPC7_QM_CQ_PTR_LO_STS_3 0xFC81D0 + +#define mmTPC7_QM_CQ_PTR_LO_STS_4 0xFC81D4 + +#define mmTPC7_QM_CQ_PTR_HI_STS_0 0xFC81D8 + +#define mmTPC7_QM_CQ_PTR_HI_STS_1 0xFC81DC + +#define mmTPC7_QM_CQ_PTR_HI_STS_2 0xFC81E0 + +#define mmTPC7_QM_CQ_PTR_HI_STS_3 0xFC81E4 + +#define mmTPC7_QM_CQ_PTR_HI_STS_4 0xFC81E8 + +#define mmTPC7_QM_CQ_TSIZE_STS_0 0xFC81EC + +#define mmTPC7_QM_CQ_TSIZE_STS_1 0xFC81F0 + +#define mmTPC7_QM_CQ_TSIZE_STS_2 0xFC81F4 + +#define mmTPC7_QM_CQ_TSIZE_STS_3 0xFC81F8 + +#define mmTPC7_QM_CQ_TSIZE_STS_4 0xFC81FC + +#define mmTPC7_QM_CQ_CTL_STS_0 0xFC8200 + +#define mmTPC7_QM_CQ_CTL_STS_1 0xFC8204 + +#define mmTPC7_QM_CQ_CTL_STS_2 0xFC8208 + +#define mmTPC7_QM_CQ_CTL_STS_3 0xFC820C + +#define mmTPC7_QM_CQ_CTL_STS_4 0xFC8210 + +#define mmTPC7_QM_CQ_IFIFO_CNT_0 0xFC8214 + +#define mmTPC7_QM_CQ_IFIFO_CNT_1 0xFC8218 + +#define mmTPC7_QM_CQ_IFIFO_CNT_2 0xFC821C + +#define mmTPC7_QM_CQ_IFIFO_CNT_3 0xFC8220 + +#define mmTPC7_QM_CQ_IFIFO_CNT_4 0xFC8224 + +#define mmTPC7_QM_CP_MSG_BASE0_ADDR_LO_0 0xFC8228 + +#define mmTPC7_QM_CP_MSG_BASE0_ADDR_LO_1 0xFC822C + +#define mmTPC7_QM_CP_MSG_BASE0_ADDR_LO_2 0xFC8230 + +#define mmTPC7_QM_CP_MSG_BASE0_ADDR_LO_3 0xFC8234 + +#define mmTPC7_QM_CP_MSG_BASE0_ADDR_LO_4 0xFC8238 + +#define mmTPC7_QM_CP_MSG_BASE0_ADDR_HI_0 0xFC823C + +#define mmTPC7_QM_CP_MSG_BASE0_ADDR_HI_1 0xFC8240 + +#define mmTPC7_QM_CP_MSG_BASE0_ADDR_HI_2 0xFC8244 + +#define mmTPC7_QM_CP_MSG_BASE0_ADDR_HI_3 0xFC8248 + +#define mmTPC7_QM_CP_MSG_BASE0_ADDR_HI_4 0xFC824C + +#define mmTPC7_QM_CP_MSG_BASE1_ADDR_LO_0 0xFC8250 + +#define mmTPC7_QM_CP_MSG_BASE1_ADDR_LO_1 0xFC8254 + +#define mmTPC7_QM_CP_MSG_BASE1_ADDR_LO_2 0xFC8258 + +#define mmTPC7_QM_CP_MSG_BASE1_ADDR_LO_3 0xFC825C + +#define mmTPC7_QM_CP_MSG_BASE1_ADDR_LO_4 0xFC8260 + +#define mmTPC7_QM_CP_MSG_BASE1_ADDR_HI_0 0xFC8264 + +#define mmTPC7_QM_CP_MSG_BASE1_ADDR_HI_1 0xFC8268 + +#define mmTPC7_QM_CP_MSG_BASE1_ADDR_HI_2 0xFC826C + +#define mmTPC7_QM_CP_MSG_BASE1_ADDR_HI_3 0xFC8270 + +#define mmTPC7_QM_CP_MSG_BASE1_ADDR_HI_4 0xFC8274 + +#define mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_0 0xFC8278 + +#define mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_1 0xFC827C + +#define mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_2 0xFC8280 + +#define mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_3 0xFC8284 + +#define mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_4 0xFC8288 + +#define mmTPC7_QM_CP_MSG_BASE2_ADDR_HI_0 0xFC828C + +#define mmTPC7_QM_CP_MSG_BASE2_ADDR_HI_1 0xFC8290 + +#define mmTPC7_QM_CP_MSG_BASE2_ADDR_HI_2 0xFC8294 + +#define mmTPC7_QM_CP_MSG_BASE2_ADDR_HI_3 0xFC8298 + +#define mmTPC7_QM_CP_MSG_BASE2_ADDR_HI_4 0xFC829C + +#define mmTPC7_QM_CP_MSG_BASE3_ADDR_LO_0 0xFC82A0 + +#define mmTPC7_QM_CP_MSG_BASE3_ADDR_LO_1 0xFC82A4 + +#define mmTPC7_QM_CP_MSG_BASE3_ADDR_LO_2 0xFC82A8 + +#define mmTPC7_QM_CP_MSG_BASE3_ADDR_LO_3 0xFC82AC + +#define mmTPC7_QM_CP_MSG_BASE3_ADDR_LO_4 0xFC82B0 + +#define mmTPC7_QM_CP_MSG_BASE3_ADDR_HI_0 0xFC82B4 + +#define mmTPC7_QM_CP_MSG_BASE3_ADDR_HI_1 0xFC82B8 + +#define mmTPC7_QM_CP_MSG_BASE3_ADDR_HI_2 0xFC82BC + +#define mmTPC7_QM_CP_MSG_BASE3_ADDR_HI_3 0xFC82C0 + +#define mmTPC7_QM_CP_MSG_BASE3_ADDR_HI_4 0xFC82C4 + +#define mmTPC7_QM_CP_LDMA_TSIZE_OFFSET_0 0xFC82C8 + +#define mmTPC7_QM_CP_LDMA_TSIZE_OFFSET_1 0xFC82CC + +#define mmTPC7_QM_CP_LDMA_TSIZE_OFFSET_2 0xFC82D0 + +#define mmTPC7_QM_CP_LDMA_TSIZE_OFFSET_3 0xFC82D4 + +#define mmTPC7_QM_CP_LDMA_TSIZE_OFFSET_4 0xFC82D8 + +#define mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0xFC82E0 + +#define mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0xFC82E4 + +#define mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0xFC82E8 + +#define mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0xFC82EC + +#define mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0xFC82F0 + +#define mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0xFC82F4 + +#define mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0xFC82F8 + +#define mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0xFC82FC + +#define mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0xFC8300 + +#define mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0xFC8304 + +#define mmTPC7_QM_CP_FENCE0_RDATA_0 0xFC8308 + +#define mmTPC7_QM_CP_FENCE0_RDATA_1 0xFC830C + +#define mmTPC7_QM_CP_FENCE0_RDATA_2 0xFC8310 + +#define mmTPC7_QM_CP_FENCE0_RDATA_3 0xFC8314 + +#define mmTPC7_QM_CP_FENCE0_RDATA_4 0xFC8318 + +#define mmTPC7_QM_CP_FENCE1_RDATA_0 0xFC831C + +#define mmTPC7_QM_CP_FENCE1_RDATA_1 0xFC8320 + +#define mmTPC7_QM_CP_FENCE1_RDATA_2 0xFC8324 + +#define mmTPC7_QM_CP_FENCE1_RDATA_3 0xFC8328 + +#define mmTPC7_QM_CP_FENCE1_RDATA_4 0xFC832C + +#define mmTPC7_QM_CP_FENCE2_RDATA_0 0xFC8330 + +#define mmTPC7_QM_CP_FENCE2_RDATA_1 0xFC8334 + +#define mmTPC7_QM_CP_FENCE2_RDATA_2 0xFC8338 + +#define mmTPC7_QM_CP_FENCE2_RDATA_3 0xFC833C + +#define mmTPC7_QM_CP_FENCE2_RDATA_4 0xFC8340 + +#define mmTPC7_QM_CP_FENCE3_RDATA_0 0xFC8344 + +#define mmTPC7_QM_CP_FENCE3_RDATA_1 0xFC8348 + +#define mmTPC7_QM_CP_FENCE3_RDATA_2 0xFC834C + +#define mmTPC7_QM_CP_FENCE3_RDATA_3 0xFC8350 + +#define mmTPC7_QM_CP_FENCE3_RDATA_4 0xFC8354 + +#define mmTPC7_QM_CP_FENCE0_CNT_0 0xFC8358 + +#define mmTPC7_QM_CP_FENCE0_CNT_1 0xFC835C + +#define mmTPC7_QM_CP_FENCE0_CNT_2 0xFC8360 + +#define mmTPC7_QM_CP_FENCE0_CNT_3 0xFC8364 + +#define mmTPC7_QM_CP_FENCE0_CNT_4 0xFC8368 + +#define mmTPC7_QM_CP_FENCE1_CNT_0 0xFC836C + +#define mmTPC7_QM_CP_FENCE1_CNT_1 0xFC8370 + +#define mmTPC7_QM_CP_FENCE1_CNT_2 0xFC8374 + +#define mmTPC7_QM_CP_FENCE1_CNT_3 0xFC8378 + +#define mmTPC7_QM_CP_FENCE1_CNT_4 0xFC837C + +#define mmTPC7_QM_CP_FENCE2_CNT_0 0xFC8380 + +#define mmTPC7_QM_CP_FENCE2_CNT_1 0xFC8384 + +#define mmTPC7_QM_CP_FENCE2_CNT_2 0xFC8388 + +#define mmTPC7_QM_CP_FENCE2_CNT_3 0xFC838C + +#define mmTPC7_QM_CP_FENCE2_CNT_4 0xFC8390 + +#define mmTPC7_QM_CP_FENCE3_CNT_0 0xFC8394 + +#define mmTPC7_QM_CP_FENCE3_CNT_1 0xFC8398 + +#define mmTPC7_QM_CP_FENCE3_CNT_2 0xFC839C + +#define mmTPC7_QM_CP_FENCE3_CNT_3 0xFC83A0 + +#define mmTPC7_QM_CP_FENCE3_CNT_4 0xFC83A4 + +#define mmTPC7_QM_CP_STS_0 0xFC83A8 + +#define mmTPC7_QM_CP_STS_1 0xFC83AC + +#define mmTPC7_QM_CP_STS_2 0xFC83B0 + +#define mmTPC7_QM_CP_STS_3 0xFC83B4 + +#define mmTPC7_QM_CP_STS_4 0xFC83B8 + +#define mmTPC7_QM_CP_CURRENT_INST_LO_0 0xFC83BC + +#define mmTPC7_QM_CP_CURRENT_INST_LO_1 0xFC83C0 + +#define mmTPC7_QM_CP_CURRENT_INST_LO_2 0xFC83C4 + +#define mmTPC7_QM_CP_CURRENT_INST_LO_3 0xFC83C8 + +#define mmTPC7_QM_CP_CURRENT_INST_LO_4 0xFC83CC + +#define mmTPC7_QM_CP_CURRENT_INST_HI_0 0xFC83D0 + +#define mmTPC7_QM_CP_CURRENT_INST_HI_1 0xFC83D4 + +#define mmTPC7_QM_CP_CURRENT_INST_HI_2 0xFC83D8 + +#define mmTPC7_QM_CP_CURRENT_INST_HI_3 0xFC83DC + +#define mmTPC7_QM_CP_CURRENT_INST_HI_4 0xFC83E0 + +#define mmTPC7_QM_CP_BARRIER_CFG_0 0xFC83F4 + +#define mmTPC7_QM_CP_BARRIER_CFG_1 0xFC83F8 + +#define mmTPC7_QM_CP_BARRIER_CFG_2 0xFC83FC + +#define mmTPC7_QM_CP_BARRIER_CFG_3 0xFC8400 + +#define mmTPC7_QM_CP_BARRIER_CFG_4 0xFC8404 + +#define mmTPC7_QM_CP_DBG_0_0 0xFC8408 + +#define mmTPC7_QM_CP_DBG_0_1 0xFC840C + +#define mmTPC7_QM_CP_DBG_0_2 0xFC8410 + +#define mmTPC7_QM_CP_DBG_0_3 0xFC8414 + +#define mmTPC7_QM_CP_DBG_0_4 0xFC8418 + +#define mmTPC7_QM_CP_ARUSER_31_11_0 0xFC841C + +#define mmTPC7_QM_CP_ARUSER_31_11_1 0xFC8420 + +#define mmTPC7_QM_CP_ARUSER_31_11_2 0xFC8424 + +#define mmTPC7_QM_CP_ARUSER_31_11_3 0xFC8428 + +#define mmTPC7_QM_CP_ARUSER_31_11_4 0xFC842C + +#define mmTPC7_QM_CP_AWUSER_31_11_0 0xFC8430 + +#define mmTPC7_QM_CP_AWUSER_31_11_1 0xFC8434 + +#define mmTPC7_QM_CP_AWUSER_31_11_2 0xFC8438 + +#define mmTPC7_QM_CP_AWUSER_31_11_3 0xFC843C + +#define mmTPC7_QM_CP_AWUSER_31_11_4 0xFC8440 + +#define mmTPC7_QM_ARB_CFG_0 0xFC8A00 + +#define mmTPC7_QM_ARB_CHOISE_Q_PUSH 0xFC8A04 + +#define mmTPC7_QM_ARB_WRR_WEIGHT_0 0xFC8A08 + +#define mmTPC7_QM_ARB_WRR_WEIGHT_1 0xFC8A0C + +#define mmTPC7_QM_ARB_WRR_WEIGHT_2 0xFC8A10 + +#define mmTPC7_QM_ARB_WRR_WEIGHT_3 0xFC8A14 + +#define mmTPC7_QM_ARB_CFG_1 0xFC8A18 + +#define mmTPC7_QM_ARB_MST_AVAIL_CRED_0 0xFC8A20 + +#define mmTPC7_QM_ARB_MST_AVAIL_CRED_1 0xFC8A24 + +#define mmTPC7_QM_ARB_MST_AVAIL_CRED_2 0xFC8A28 + +#define mmTPC7_QM_ARB_MST_AVAIL_CRED_3 0xFC8A2C + +#define mmTPC7_QM_ARB_MST_AVAIL_CRED_4 0xFC8A30 + +#define mmTPC7_QM_ARB_MST_AVAIL_CRED_5 0xFC8A34 + +#define mmTPC7_QM_ARB_MST_AVAIL_CRED_6 0xFC8A38 + +#define mmTPC7_QM_ARB_MST_AVAIL_CRED_7 0xFC8A3C + +#define mmTPC7_QM_ARB_MST_AVAIL_CRED_8 0xFC8A40 + +#define mmTPC7_QM_ARB_MST_AVAIL_CRED_9 0xFC8A44 + +#define mmTPC7_QM_ARB_MST_AVAIL_CRED_10 0xFC8A48 + +#define mmTPC7_QM_ARB_MST_AVAIL_CRED_11 0xFC8A4C + +#define mmTPC7_QM_ARB_MST_AVAIL_CRED_12 0xFC8A50 + +#define mmTPC7_QM_ARB_MST_AVAIL_CRED_13 0xFC8A54 + +#define mmTPC7_QM_ARB_MST_AVAIL_CRED_14 0xFC8A58 + +#define mmTPC7_QM_ARB_MST_AVAIL_CRED_15 0xFC8A5C + +#define mmTPC7_QM_ARB_MST_AVAIL_CRED_16 0xFC8A60 + +#define mmTPC7_QM_ARB_MST_AVAIL_CRED_17 0xFC8A64 + +#define mmTPC7_QM_ARB_MST_AVAIL_CRED_18 0xFC8A68 + +#define mmTPC7_QM_ARB_MST_AVAIL_CRED_19 0xFC8A6C + +#define mmTPC7_QM_ARB_MST_AVAIL_CRED_20 0xFC8A70 + +#define mmTPC7_QM_ARB_MST_AVAIL_CRED_21 0xFC8A74 + +#define mmTPC7_QM_ARB_MST_AVAIL_CRED_22 0xFC8A78 + +#define mmTPC7_QM_ARB_MST_AVAIL_CRED_23 0xFC8A7C + +#define mmTPC7_QM_ARB_MST_AVAIL_CRED_24 0xFC8A80 + +#define mmTPC7_QM_ARB_MST_AVAIL_CRED_25 0xFC8A84 + +#define mmTPC7_QM_ARB_MST_AVAIL_CRED_26 0xFC8A88 + +#define mmTPC7_QM_ARB_MST_AVAIL_CRED_27 0xFC8A8C + +#define mmTPC7_QM_ARB_MST_AVAIL_CRED_28 0xFC8A90 + +#define mmTPC7_QM_ARB_MST_AVAIL_CRED_29 0xFC8A94 + +#define mmTPC7_QM_ARB_MST_AVAIL_CRED_30 0xFC8A98 + +#define mmTPC7_QM_ARB_MST_AVAIL_CRED_31 0xFC8A9C + +#define mmTPC7_QM_ARB_MST_CRED_INC 0xFC8AA0 + +#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_0 0xFC8AA4 + +#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_1 0xFC8AA8 + +#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_2 0xFC8AAC + +#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_3 0xFC8AB0 + +#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_4 0xFC8AB4 + +#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_5 0xFC8AB8 + +#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_6 0xFC8ABC + +#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_7 0xFC8AC0 + +#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_8 0xFC8AC4 + +#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_9 0xFC8AC8 + +#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_10 0xFC8ACC + +#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_11 0xFC8AD0 + +#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_12 0xFC8AD4 + +#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_13 0xFC8AD8 + +#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_14 0xFC8ADC + +#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_15 0xFC8AE0 + +#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_16 0xFC8AE4 + +#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_17 0xFC8AE8 + +#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_18 0xFC8AEC + +#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_19 0xFC8AF0 + +#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_20 0xFC8AF4 + +#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_21 0xFC8AF8 + +#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_22 0xFC8AFC + +#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_23 0xFC8B00 + +#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_24 0xFC8B04 + +#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_25 0xFC8B08 + +#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_26 0xFC8B0C + +#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_27 0xFC8B10 + +#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_28 0xFC8B14 + +#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_29 0xFC8B18 + +#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_30 0xFC8B1C + +#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_31 0xFC8B20 + +#define mmTPC7_QM_ARB_SLV_MASTER_INC_CRED_OFST 0xFC8B28 + +#define mmTPC7_QM_ARB_MST_SLAVE_EN 0xFC8B2C + +#define mmTPC7_QM_ARB_MST_QUIET_PER 0xFC8B34 + +#define mmTPC7_QM_ARB_SLV_CHOISE_WDT 0xFC8B38 + +#define mmTPC7_QM_ARB_SLV_ID 0xFC8B3C + +#define mmTPC7_QM_ARB_MSG_MAX_INFLIGHT 0xFC8B44 + +#define mmTPC7_QM_ARB_MSG_AWUSER_31_11 0xFC8B48 + +#define mmTPC7_QM_ARB_MSG_AWUSER_SEC_PROP 0xFC8B4C + +#define mmTPC7_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0xFC8B50 + +#define mmTPC7_QM_ARB_BASE_LO 0xFC8B54 + +#define mmTPC7_QM_ARB_BASE_HI 0xFC8B58 + +#define mmTPC7_QM_ARB_STATE_STS 0xFC8B80 + +#define mmTPC7_QM_ARB_CHOISE_FULLNESS_STS 0xFC8B84 + +#define mmTPC7_QM_ARB_MSG_STS 0xFC8B88 + +#define mmTPC7_QM_ARB_SLV_CHOISE_Q_HEAD 0xFC8B8C + +#define mmTPC7_QM_ARB_ERR_CAUSE 0xFC8B9C + +#define mmTPC7_QM_ARB_ERR_MSG_EN 0xFC8BA0 + +#define mmTPC7_QM_ARB_ERR_STS_DRP 0xFC8BA8 + +#define mmTPC7_QM_ARB_MST_CRED_STS_0 0xFC8BB0 + +#define mmTPC7_QM_ARB_MST_CRED_STS_1 0xFC8BB4 + +#define mmTPC7_QM_ARB_MST_CRED_STS_2 0xFC8BB8 + +#define mmTPC7_QM_ARB_MST_CRED_STS_3 0xFC8BBC + +#define mmTPC7_QM_ARB_MST_CRED_STS_4 0xFC8BC0 + +#define mmTPC7_QM_ARB_MST_CRED_STS_5 0xFC8BC4 + +#define mmTPC7_QM_ARB_MST_CRED_STS_6 0xFC8BC8 + +#define mmTPC7_QM_ARB_MST_CRED_STS_7 0xFC8BCC + +#define mmTPC7_QM_ARB_MST_CRED_STS_8 0xFC8BD0 + +#define mmTPC7_QM_ARB_MST_CRED_STS_9 0xFC8BD4 + +#define mmTPC7_QM_ARB_MST_CRED_STS_10 0xFC8BD8 + +#define mmTPC7_QM_ARB_MST_CRED_STS_11 0xFC8BDC + +#define mmTPC7_QM_ARB_MST_CRED_STS_12 0xFC8BE0 + +#define mmTPC7_QM_ARB_MST_CRED_STS_13 0xFC8BE4 + +#define mmTPC7_QM_ARB_MST_CRED_STS_14 0xFC8BE8 + +#define mmTPC7_QM_ARB_MST_CRED_STS_15 0xFC8BEC + +#define mmTPC7_QM_ARB_MST_CRED_STS_16 0xFC8BF0 + +#define mmTPC7_QM_ARB_MST_CRED_STS_17 0xFC8BF4 + +#define mmTPC7_QM_ARB_MST_CRED_STS_18 0xFC8BF8 + +#define mmTPC7_QM_ARB_MST_CRED_STS_19 0xFC8BFC + +#define mmTPC7_QM_ARB_MST_CRED_STS_20 0xFC8C00 + +#define mmTPC7_QM_ARB_MST_CRED_STS_21 0xFC8C04 + +#define mmTPC7_QM_ARB_MST_CRED_STS_22 0xFC8C08 + +#define mmTPC7_QM_ARB_MST_CRED_STS_23 0xFC8C0C + +#define mmTPC7_QM_ARB_MST_CRED_STS_24 0xFC8C10 + +#define mmTPC7_QM_ARB_MST_CRED_STS_25 0xFC8C14 + +#define mmTPC7_QM_ARB_MST_CRED_STS_26 0xFC8C18 + +#define mmTPC7_QM_ARB_MST_CRED_STS_27 0xFC8C1C + +#define mmTPC7_QM_ARB_MST_CRED_STS_28 0xFC8C20 + +#define mmTPC7_QM_ARB_MST_CRED_STS_29 0xFC8C24 + +#define mmTPC7_QM_ARB_MST_CRED_STS_30 0xFC8C28 + +#define mmTPC7_QM_ARB_MST_CRED_STS_31 0xFC8C2C + +#define mmTPC7_QM_CGM_CFG 0xFC8C70 + +#define mmTPC7_QM_CGM_STS 0xFC8C74 + +#define mmTPC7_QM_CGM_CFG1 0xFC8C78 + +#define mmTPC7_QM_LOCAL_RANGE_BASE 0xFC8C80 + +#define mmTPC7_QM_LOCAL_RANGE_SIZE 0xFC8C84 + +#define mmTPC7_QM_CSMR_STRICT_PRIO_CFG 0xFC8C90 + +#define mmTPC7_QM_HBW_RD_RATE_LIM_CFG_1 0xFC8C94 + +#define mmTPC7_QM_LBW_WR_RATE_LIM_CFG_0 0xFC8C98 + +#define mmTPC7_QM_LBW_WR_RATE_LIM_CFG_1 0xFC8C9C + +#define mmTPC7_QM_HBW_RD_RATE_LIM_CFG_0 0xFC8CA0 + +#define mmTPC7_QM_GLBL_AXCACHE 0xFC8CA4 + +#define mmTPC7_QM_IND_GW_APB_CFG 0xFC8CB0 + +#define mmTPC7_QM_IND_GW_APB_WDATA 0xFC8CB4 + +#define mmTPC7_QM_IND_GW_APB_RDATA 0xFC8CB8 + +#define mmTPC7_QM_IND_GW_APB_STATUS 0xFC8CBC + +#define mmTPC7_QM_GLBL_ERR_ADDR_LO 0xFC8CD0 + +#define mmTPC7_QM_GLBL_ERR_ADDR_HI 0xFC8CD4 + +#define mmTPC7_QM_GLBL_ERR_WDATA 0xFC8CD8 + +#define mmTPC7_QM_GLBL_MEM_INIT_BUSY 0xFC8D00 + +#endif /* ASIC_REG_TPC7_QM_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi.h b/drivers/misc/habanalabs/include/gaudi/gaudi.h new file mode 100644 index 000000000..f9ea897ae --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/gaudi.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2018-2020 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef GAUDI_H +#define GAUDI_H + +#define SRAM_BAR_ID 0 +#define CFG_BAR_ID 2 +#define HBM_BAR_ID 4 + +#define SRAM_BAR_SIZE 0x4000000ull /* 64MB */ +#define CFG_BAR_SIZE 0x8000000ull /* 128MB */ + +#define CFG_BASE 0x7FFC000000ull +#define CFG_SIZE 0x4000000 /* 32MB CFG + 32MB DBG*/ + +#define SRAM_BASE_ADDR 0x7FF0000000ull +#define SRAM_SIZE 0x1400000 /* 20MB */ + +#define SPI_FLASH_BASE_ADDR 0x7FF8000000ull + +#define PSOC_SCRATCHPAD_ADDR 0x7FFBFE0000ull +#define PSOC_SCRATCHPAD_SIZE 0x10000 /* 64KB */ + +#define PCIE_FW_SRAM_ADDR 0x7FFBFF0000ull +#define PCIE_FW_SRAM_SIZE 0x8000 /* 32KB */ + +#define DRAM_PHYS_BASE 0x0ull + +#define HOST_PHYS_BASE 0x8000000000ull /* 0.5TB */ +#define HOST_PHYS_SIZE 0x1000000000000ull /* 0.25PB (48 bits) */ + +#define GAUDI_MSI_ENTRIES 32 + +#define QMAN_PQ_ENTRY_SIZE 16 /* Bytes */ + +#define MAX_ASID 1024 + +#define PROT_BITS_OFFS 0xF80 + +#define MME_NUMBER_OF_MASTER_ENGINES 2 + +#define MME_NUMBER_OF_SLAVE_ENGINES 2 + +#define TPC_NUMBER_OF_ENGINES 8 + +#define DMA_NUMBER_OF_CHANNELS 8 + +#define NIC_NUMBER_OF_MACROS 5 + +#define NIC_NUMBER_OF_ENGINES (NIC_NUMBER_OF_MACROS * 2) + +#define NUMBER_OF_IF 8 + +#define DEVICE_CACHE_LINE_SIZE 128 + +#endif /* GAUDI_H */ diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h b/drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h new file mode 100644 index 000000000..9ccba8437 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h @@ -0,0 +1,310 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2018-2020 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef __GAUDI_ASYNC_EVENTS_H_ +#define __GAUDI_ASYNC_EVENTS_H_ + +enum gaudi_async_event_id { + GAUDI_EVENT_PCIE_CORE_SERR = 32, + GAUDI_EVENT_PCIE_CORE_DERR = 33, + GAUDI_EVENT_PCIE_IF_SERR = 34, + GAUDI_EVENT_PCIE_IF_DERR = 35, + GAUDI_EVENT_PCIE_PHY_SERR = 36, + GAUDI_EVENT_PCIE_PHY_DERR = 37, + GAUDI_EVENT_TPC0_SERR = 38, + GAUDI_EVENT_TPC1_SERR = 39, + GAUDI_EVENT_TPC2_SERR = 40, + GAUDI_EVENT_TPC3_SERR = 41, + GAUDI_EVENT_TPC4_SERR = 42, + GAUDI_EVENT_TPC5_SERR = 43, + GAUDI_EVENT_TPC6_SERR = 44, + GAUDI_EVENT_TPC7_SERR = 45, + GAUDI_EVENT_TPC0_DERR = 46, + GAUDI_EVENT_TPC1_DERR = 47, + GAUDI_EVENT_TPC2_DERR = 48, + GAUDI_EVENT_TPC3_DERR = 49, + GAUDI_EVENT_TPC4_DERR = 50, + GAUDI_EVENT_TPC5_DERR = 51, + GAUDI_EVENT_TPC6_DERR = 52, + GAUDI_EVENT_TPC7_DERR = 53, + GAUDI_EVENT_MME0_ACC_SERR = 54, + GAUDI_EVENT_MME0_ACC_DERR = 55, + GAUDI_EVENT_MME0_SBAB_SERR = 56, + GAUDI_EVENT_MME0_SBAB_DERR = 57, + GAUDI_EVENT_MME1_ACC_SERR = 58, + GAUDI_EVENT_MME1_ACC_DERR = 59, + GAUDI_EVENT_MME1_SBAB_SERR = 60, + GAUDI_EVENT_MME1_SBAB_DERR = 61, + GAUDI_EVENT_MME2_ACC_SERR = 62, + GAUDI_EVENT_MME2_ACC_DERR = 63, + GAUDI_EVENT_MME2_SBAB_SERR = 64, + GAUDI_EVENT_MME2_SBAB_DERR = 65, + GAUDI_EVENT_MME3_ACC_SERR = 66, + GAUDI_EVENT_MME3_ACC_DERR = 67, + GAUDI_EVENT_MME3_SBAB_SERR = 68, + GAUDI_EVENT_MME3_SBAB_DERR = 69, + GAUDI_EVENT_DMA0_SERR_ECC = 70, + GAUDI_EVENT_DMA1_SERR_ECC = 71, + GAUDI_EVENT_DMA2_SERR_ECC = 72, + GAUDI_EVENT_DMA3_SERR_ECC = 73, + GAUDI_EVENT_DMA4_SERR_ECC = 74, + GAUDI_EVENT_DMA5_SERR_ECC = 75, + GAUDI_EVENT_DMA6_SERR_ECC = 76, + GAUDI_EVENT_DMA7_SERR_ECC = 77, + GAUDI_EVENT_DMA0_DERR_ECC = 78, + GAUDI_EVENT_DMA1_DERR_ECC = 79, + GAUDI_EVENT_DMA2_DERR_ECC = 80, + GAUDI_EVENT_DMA3_DERR_ECC = 81, + GAUDI_EVENT_DMA4_DERR_ECC = 82, + GAUDI_EVENT_DMA5_DERR_ECC = 83, + GAUDI_EVENT_DMA6_DERR_ECC = 84, + GAUDI_EVENT_DMA7_DERR_ECC = 85, + GAUDI_EVENT_CPU_IF_ECC_SERR = 86, + GAUDI_EVENT_CPU_IF_ECC_DERR = 87, + GAUDI_EVENT_PSOC_MEM_SERR = 88, + GAUDI_EVENT_PSOC_CORESIGHT_SERR = 89, + GAUDI_EVENT_PSOC_MEM_DERR = 90, + GAUDI_EVENT_PSOC_CORESIGHT_DERR = 91, + GAUDI_EVENT_SRAM0_SERR = 92, + GAUDI_EVENT_SRAM1_SERR = 93, + GAUDI_EVENT_SRAM2_SERR = 94, + GAUDI_EVENT_SRAM3_SERR = 95, + GAUDI_EVENT_SRAM7_SERR = 96, + GAUDI_EVENT_SRAM6_SERR = 97, + GAUDI_EVENT_SRAM5_SERR = 98, + GAUDI_EVENT_SRAM4_SERR = 99, + GAUDI_EVENT_SRAM8_SERR = 100, + GAUDI_EVENT_SRAM9_SERR = 101, + GAUDI_EVENT_SRAM10_SERR = 102, + GAUDI_EVENT_SRAM11_SERR = 103, + GAUDI_EVENT_SRAM15_SERR = 104, + GAUDI_EVENT_SRAM14_SERR = 105, + GAUDI_EVENT_SRAM13_SERR = 106, + GAUDI_EVENT_SRAM12_SERR = 107, + GAUDI_EVENT_SRAM16_SERR = 108, + GAUDI_EVENT_SRAM17_SERR = 109, + GAUDI_EVENT_SRAM18_SERR = 110, + GAUDI_EVENT_SRAM19_SERR = 111, + GAUDI_EVENT_SRAM23_SERR = 112, + GAUDI_EVENT_SRAM22_SERR = 113, + GAUDI_EVENT_SRAM21_SERR = 114, + GAUDI_EVENT_SRAM20_SERR = 115, + GAUDI_EVENT_SRAM24_SERR = 116, + GAUDI_EVENT_SRAM25_SERR = 117, + GAUDI_EVENT_SRAM26_SERR = 118, + GAUDI_EVENT_SRAM27_SERR = 119, + GAUDI_EVENT_SRAM31_SERR = 120, + GAUDI_EVENT_SRAM30_SERR = 121, + GAUDI_EVENT_SRAM29_SERR = 122, + GAUDI_EVENT_SRAM28_SERR = 123, + GAUDI_EVENT_SRAM0_DERR = 124, + GAUDI_EVENT_SRAM1_DERR = 125, + GAUDI_EVENT_SRAM2_DERR = 126, + GAUDI_EVENT_SRAM3_DERR = 127, + GAUDI_EVENT_SRAM7_DERR = 128, + GAUDI_EVENT_SRAM6_DERR = 129, + GAUDI_EVENT_SRAM5_DERR = 130, + GAUDI_EVENT_SRAM4_DERR = 131, + GAUDI_EVENT_SRAM8_DERR = 132, + GAUDI_EVENT_SRAM9_DERR = 133, + GAUDI_EVENT_SRAM10_DERR = 134, + GAUDI_EVENT_SRAM11_DERR = 135, + GAUDI_EVENT_SRAM15_DERR = 136, + GAUDI_EVENT_SRAM14_DERR = 137, + GAUDI_EVENT_SRAM13_DERR = 138, + GAUDI_EVENT_SRAM12_DERR = 139, + GAUDI_EVENT_SRAM16_DERR = 140, + GAUDI_EVENT_SRAM17_DERR = 141, + GAUDI_EVENT_SRAM18_DERR = 142, + GAUDI_EVENT_SRAM19_DERR = 143, + GAUDI_EVENT_SRAM23_DERR = 144, + GAUDI_EVENT_SRAM22_DERR = 145, + GAUDI_EVENT_SRAM21_DERR = 146, + GAUDI_EVENT_SRAM20_DERR = 147, + GAUDI_EVENT_SRAM24_DERR = 148, + GAUDI_EVENT_SRAM25_DERR = 149, + GAUDI_EVENT_SRAM26_DERR = 150, + GAUDI_EVENT_SRAM27_DERR = 151, + GAUDI_EVENT_SRAM31_DERR = 152, + GAUDI_EVENT_SRAM30_DERR = 153, + GAUDI_EVENT_SRAM29_DERR = 154, + GAUDI_EVENT_SRAM28_DERR = 155, + GAUDI_EVENT_NIC0_SERR = 156, + GAUDI_EVENT_NIC1_SERR = 157, + GAUDI_EVENT_NIC2_SERR = 158, + GAUDI_EVENT_NIC3_SERR = 159, + GAUDI_EVENT_NIC4_SERR = 160, + GAUDI_EVENT_NIC0_DERR = 166, + GAUDI_EVENT_NIC1_DERR = 167, + GAUDI_EVENT_NIC2_DERR = 168, + GAUDI_EVENT_NIC3_DERR = 169, + GAUDI_EVENT_NIC4_DERR = 170, + GAUDI_EVENT_DMA_IF0_SERR = 176, + GAUDI_EVENT_DMA_IF1_SERR = 177, + GAUDI_EVENT_DMA_IF2_SERR = 178, + GAUDI_EVENT_DMA_IF3_SERR = 179, + GAUDI_EVENT_DMA_IF0_DERR = 180, + GAUDI_EVENT_DMA_IF1_DERR = 181, + GAUDI_EVENT_DMA_IF2_DERR = 182, + GAUDI_EVENT_DMA_IF3_DERR = 183, + GAUDI_EVENT_GIC500 = 184, + GAUDI_EVENT_HBM_0_SERR = 185, + GAUDI_EVENT_HBM_1_SERR = 186, + GAUDI_EVENT_HBM_2_SERR = 187, + GAUDI_EVENT_HBM_3_SERR = 188, + GAUDI_EVENT_HBM_0_DERR = 189, + GAUDI_EVENT_HBM_1_DERR = 190, + GAUDI_EVENT_HBM_2_DERR = 191, + GAUDI_EVENT_HBM_3_DERR = 192, + GAUDI_EVENT_MMU_SERR = 193, + GAUDI_EVENT_MMU_DERR = 194, + GAUDI_EVENT_PCIE_DEC = 200, + GAUDI_EVENT_TPC0_DEC = 201, + GAUDI_EVENT_TPC1_DEC = 203, + GAUDI_EVENT_TPC2_DEC = 205, + GAUDI_EVENT_TPC3_DEC = 207, + GAUDI_EVENT_TPC4_DEC = 209, + GAUDI_EVENT_TPC5_DEC = 211, + GAUDI_EVENT_TPC6_DEC = 213, + GAUDI_EVENT_TPC7_DEC = 215, + GAUDI_EVENT_AXI_ECC = 217, + GAUDI_EVENT_L2_RAM_ECC = 218, + GAUDI_EVENT_MME0_WBC_RSP = 219, + GAUDI_EVENT_MME0_SBAB0_RSP = 220, + GAUDI_EVENT_MME1_WBC_RSP = 224, + GAUDI_EVENT_MME1_SBAB0_RSP = 225, + GAUDI_EVENT_MME2_WBC_RSP = 229, + GAUDI_EVENT_MME2_SBAB0_RSP = 230, + GAUDI_EVENT_MME3_WBC_RSP = 234, + GAUDI_EVENT_MME3_SBAB0_RSP = 235, + GAUDI_EVENT_PLL0 = 239, + GAUDI_EVENT_PLL1 = 240, + GAUDI_EVENT_PLL2 = 241, + GAUDI_EVENT_PLL3 = 242, + GAUDI_EVENT_PLL4 = 243, + GAUDI_EVENT_PLL5 = 244, + GAUDI_EVENT_PLL6 = 245, + GAUDI_EVENT_PLL7 = 246, + GAUDI_EVENT_PLL8 = 247, + GAUDI_EVENT_PLL9 = 248, + GAUDI_EVENT_PLL10 = 249, + GAUDI_EVENT_PLL11 = 250, + GAUDI_EVENT_PLL12 = 251, + GAUDI_EVENT_PLL13 = 252, + GAUDI_EVENT_PLL14 = 253, + GAUDI_EVENT_PLL15 = 254, + GAUDI_EVENT_PLL16 = 255, + GAUDI_EVENT_PLL17 = 256, + GAUDI_EVENT_CPU_AXI_SPLITTER = 257, + GAUDI_EVENT_PSOC_AXI_DEC = 262, + GAUDI_EVENT_PSOC_PRSTN_FALL = 263, + GAUDI_EVENT_NIC_SEI_0 = 264, + GAUDI_EVENT_NIC_SEI_1 = 265, + GAUDI_EVENT_NIC_SEI_2 = 266, + GAUDI_EVENT_NIC_SEI_3 = 267, + GAUDI_EVENT_NIC_SEI_4 = 268, + GAUDI_EVENT_PCIE_FLR = 290, + GAUDI_EVENT_TPC0_BMON_SPMU = 300, + GAUDI_EVENT_TPC0_KRN_ERR = 301, + GAUDI_EVENT_TPC1_BMON_SPMU = 306, + GAUDI_EVENT_TPC1_KRN_ERR = 307, + GAUDI_EVENT_TPC2_BMON_SPMU = 312, + GAUDI_EVENT_TPC2_KRN_ERR = 313, + GAUDI_EVENT_TPC3_BMON_SPMU = 318, + GAUDI_EVENT_TPC3_KRN_ERR = 319, + GAUDI_EVENT_TPC4_BMON_SPMU = 324, + GAUDI_EVENT_TPC4_KRN_ERR = 325, + GAUDI_EVENT_TPC5_BMON_SPMU = 330, + GAUDI_EVENT_TPC5_KRN_ERR = 331, + GAUDI_EVENT_TPC6_BMON_SPMU = 336, + GAUDI_EVENT_TPC6_KRN_ERR = 337, + GAUDI_EVENT_TPC7_BMON_SPMU = 342, + GAUDI_EVENT_TPC7_KRN_ERR = 343, + GAUDI_EVENT_MMU_PAGE_FAULT = 380, + GAUDI_EVENT_MMU_WR_PERM = 381, + GAUDI_EVENT_DMA_BM_CH0 = 383, + GAUDI_EVENT_DMA_BM_CH1 = 384, + GAUDI_EVENT_DMA_BM_CH2 = 385, + GAUDI_EVENT_DMA_BM_CH3 = 386, + GAUDI_EVENT_DMA_BM_CH4 = 387, + GAUDI_EVENT_DMA_BM_CH5 = 388, + GAUDI_EVENT_DMA_BM_CH6 = 389, + GAUDI_EVENT_DMA_BM_CH7 = 390, + GAUDI_EVENT_HBM0_SPI_0 = 395, + GAUDI_EVENT_HBM0_SPI_1 = 396, + GAUDI_EVENT_HBM1_SPI_0 = 399, + GAUDI_EVENT_HBM1_SPI_1 = 400, + GAUDI_EVENT_HBM2_SPI_0 = 403, + GAUDI_EVENT_HBM2_SPI_1 = 404, + GAUDI_EVENT_HBM3_SPI_0 = 407, + GAUDI_EVENT_HBM3_SPI_1 = 408, + GAUDI_EVENT_PSOC_GPIO_U16_0 = 421, + GAUDI_EVENT_PI_UPDATE = 484, + GAUDI_EVENT_HALT_MACHINE = 485, + GAUDI_EVENT_INTS_REGISTER = 486, + GAUDI_EVENT_SOFT_RESET = 487, + GAUDI_EVENT_RAZWI_OR_ADC = 548, + GAUDI_EVENT_TPC0_QM = 572, + GAUDI_EVENT_TPC1_QM = 573, + GAUDI_EVENT_TPC2_QM = 574, + GAUDI_EVENT_TPC3_QM = 575, + GAUDI_EVENT_TPC4_QM = 576, + GAUDI_EVENT_TPC5_QM = 577, + GAUDI_EVENT_TPC6_QM = 578, + GAUDI_EVENT_TPC7_QM = 579, + GAUDI_EVENT_MME0_QM = 581, + GAUDI_EVENT_MME2_QM = 582, + GAUDI_EVENT_DMA0_QM = 583, + GAUDI_EVENT_DMA1_QM = 584, + GAUDI_EVENT_DMA2_QM = 585, + GAUDI_EVENT_DMA3_QM = 586, + GAUDI_EVENT_DMA4_QM = 587, + GAUDI_EVENT_DMA5_QM = 588, + GAUDI_EVENT_DMA6_QM = 589, + GAUDI_EVENT_DMA7_QM = 590, + GAUDI_EVENT_NIC0_QM0 = 594, + GAUDI_EVENT_NIC0_QM1 = 595, + GAUDI_EVENT_NIC1_QM0 = 596, + GAUDI_EVENT_NIC1_QM1 = 597, + GAUDI_EVENT_NIC2_QM0 = 598, + GAUDI_EVENT_NIC2_QM1 = 599, + GAUDI_EVENT_NIC3_QM0 = 600, + GAUDI_EVENT_NIC3_QM1 = 601, + GAUDI_EVENT_NIC4_QM0 = 602, + GAUDI_EVENT_NIC4_QM1 = 603, + GAUDI_EVENT_DMA0_CORE = 604, + GAUDI_EVENT_DMA1_CORE = 605, + GAUDI_EVENT_DMA2_CORE = 606, + GAUDI_EVENT_DMA3_CORE = 607, + GAUDI_EVENT_DMA4_CORE = 608, + GAUDI_EVENT_DMA5_CORE = 609, + GAUDI_EVENT_DMA6_CORE = 610, + GAUDI_EVENT_DMA7_CORE = 611, + GAUDI_EVENT_NIC0_QP0 = 612, + GAUDI_EVENT_NIC0_QP1 = 613, + GAUDI_EVENT_NIC1_QP0 = 614, + GAUDI_EVENT_NIC1_QP1 = 615, + GAUDI_EVENT_NIC2_QP0 = 616, + GAUDI_EVENT_NIC2_QP1 = 617, + GAUDI_EVENT_NIC3_QP0 = 618, + GAUDI_EVENT_NIC3_QP1 = 619, + GAUDI_EVENT_NIC4_QP0 = 620, + GAUDI_EVENT_NIC4_QP1 = 621, + GAUDI_EVENT_FIX_POWER_ENV_S = 658, + GAUDI_EVENT_FIX_POWER_ENV_E = 659, + GAUDI_EVENT_FIX_THERMAL_ENV_S = 660, + GAUDI_EVENT_FIX_THERMAL_ENV_E = 661, + GAUDI_EVENT_RAZWI_OR_ADC_SW = 662, + GAUDI_EVENT_SIZE, +}; + +#endif /* __GAUDI_ASYNC_EVENTS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_async_ids_map_extended.h b/drivers/misc/habanalabs/include/gaudi/gaudi_async_ids_map_extended.h new file mode 100644 index 000000000..737176ba0 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/gaudi_async_ids_map_extended.h @@ -0,0 +1,694 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2018-2020 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef __GAUDI_ASYNC_IDS_MAP_EVENTS_EXT_H_ +#define __GAUDI_ASYNC_IDS_MAP_EVENTS_EXT_H_ + +struct gaudi_async_events_ids_map { + int fc_id; + int cpu_id; + int valid; + char name[64]; +}; + +static struct gaudi_async_events_ids_map gaudi_irq_map_table[] = { + { .fc_id = 0, .cpu_id = 0, .valid = 0, .name = "" }, + { .fc_id = 1, .cpu_id = 1, .valid = 0, .name = "" }, + { .fc_id = 2, .cpu_id = 2, .valid = 0, .name = "" }, + { .fc_id = 3, .cpu_id = 3, .valid = 0, .name = "" }, + { .fc_id = 4, .cpu_id = 4, .valid = 0, .name = "" }, + { .fc_id = 5, .cpu_id = 5, .valid = 0, .name = "" }, + { .fc_id = 6, .cpu_id = 6, .valid = 0, .name = "" }, + { .fc_id = 7, .cpu_id = 7, .valid = 0, .name = "" }, + { .fc_id = 8, .cpu_id = 8, .valid = 0, .name = "" }, + { .fc_id = 9, .cpu_id = 9, .valid = 0, .name = "" }, + { .fc_id = 10, .cpu_id = 10, .valid = 0, .name = "" }, + { .fc_id = 11, .cpu_id = 11, .valid = 0, .name = "" }, + { .fc_id = 12, .cpu_id = 12, .valid = 0, .name = "" }, + { .fc_id = 13, .cpu_id = 13, .valid = 0, .name = "" }, + { .fc_id = 14, .cpu_id = 14, .valid = 0, .name = "" }, + { .fc_id = 15, .cpu_id = 15, .valid = 0, .name = "" }, + { .fc_id = 16, .cpu_id = 16, .valid = 0, .name = "" }, + { .fc_id = 17, .cpu_id = 17, .valid = 0, .name = "" }, + { .fc_id = 18, .cpu_id = 18, .valid = 0, .name = "" }, + { .fc_id = 19, .cpu_id = 19, .valid = 0, .name = "" }, + { .fc_id = 20, .cpu_id = 20, .valid = 0, .name = "" }, + { .fc_id = 21, .cpu_id = 21, .valid = 0, .name = "" }, + { .fc_id = 22, .cpu_id = 22, .valid = 0, .name = "" }, + { .fc_id = 23, .cpu_id = 23, .valid = 0, .name = "" }, + { .fc_id = 24, .cpu_id = 24, .valid = 0, .name = "" }, + { .fc_id = 25, .cpu_id = 25, .valid = 0, .name = "" }, + { .fc_id = 26, .cpu_id = 26, .valid = 0, .name = "" }, + { .fc_id = 27, .cpu_id = 27, .valid = 0, .name = "" }, + { .fc_id = 28, .cpu_id = 28, .valid = 0, .name = "" }, + { .fc_id = 29, .cpu_id = 29, .valid = 0, .name = "" }, + { .fc_id = 30, .cpu_id = 30, .valid = 0, .name = "" }, + { .fc_id = 31, .cpu_id = 31, .valid = 0, .name = "" }, + { .fc_id = 32, .cpu_id = 32, .valid = 1, .name = "PCIE_CORE_SERR" }, + { .fc_id = 33, .cpu_id = 33, .valid = 1, .name = "PCIE_CORE_DERR" }, + { .fc_id = 34, .cpu_id = 34, .valid = 1, .name = "PCIE_IF_SERR" }, + { .fc_id = 35, .cpu_id = 35, .valid = 1, .name = "PCIE_IF_DERR" }, + { .fc_id = 36, .cpu_id = 36, .valid = 1, .name = "PCIE_PHY_SERR" }, + { .fc_id = 37, .cpu_id = 37, .valid = 1, .name = "PCIE_PHY_DERR" }, + { .fc_id = 38, .cpu_id = 38, .valid = 1, .name = "TPC0_SERR" }, + { .fc_id = 39, .cpu_id = 38, .valid = 1, .name = "TPC1_SERR" }, + { .fc_id = 40, .cpu_id = 38, .valid = 1, .name = "TPC2_SERR" }, + { .fc_id = 41, .cpu_id = 38, .valid = 1, .name = "TPC3_SERR" }, + { .fc_id = 42, .cpu_id = 38, .valid = 1, .name = "TPC4_SERR" }, + { .fc_id = 43, .cpu_id = 38, .valid = 1, .name = "TPC5_SERR" }, + { .fc_id = 44, .cpu_id = 38, .valid = 1, .name = "TPC6_SERR" }, + { .fc_id = 45, .cpu_id = 38, .valid = 1, .name = "TPC7_SERR" }, + { .fc_id = 46, .cpu_id = 39, .valid = 1, .name = "TPC0_DERR" }, + { .fc_id = 47, .cpu_id = 39, .valid = 1, .name = "TPC1_DERR" }, + { .fc_id = 48, .cpu_id = 39, .valid = 1, .name = "TPC2_DERR" }, + { .fc_id = 49, .cpu_id = 39, .valid = 1, .name = "TPC3_DERR" }, + { .fc_id = 50, .cpu_id = 39, .valid = 1, .name = "TPC4_DERR" }, + { .fc_id = 51, .cpu_id = 39, .valid = 1, .name = "TPC5_DERR" }, + { .fc_id = 52, .cpu_id = 39, .valid = 1, .name = "TPC6_DERR" }, + { .fc_id = 53, .cpu_id = 39, .valid = 1, .name = "TPC7_DERR" }, + { .fc_id = 54, .cpu_id = 40, .valid = 1, .name = "MME0_ACC_SERR" }, + { .fc_id = 55, .cpu_id = 41, .valid = 1, .name = "MME0_ACC_DERR" }, + { .fc_id = 56, .cpu_id = 42, .valid = 1, .name = "MME0_SBAB_SERR" }, + { .fc_id = 57, .cpu_id = 43, .valid = 1, .name = "MME0_SBAB_DERR" }, + { .fc_id = 58, .cpu_id = 44, .valid = 1, .name = "MME1_ACC_SERR" }, + { .fc_id = 59, .cpu_id = 45, .valid = 1, .name = "MME1_ACC_DERR" }, + { .fc_id = 60, .cpu_id = 46, .valid = 1, .name = "MME1_SBAB_SERR" }, + { .fc_id = 61, .cpu_id = 47, .valid = 1, .name = "MME1_SBAB_DERR" }, + { .fc_id = 62, .cpu_id = 48, .valid = 1, .name = "MME2_ACC_SERR" }, + { .fc_id = 63, .cpu_id = 49, .valid = 1, .name = "MME2_ACC_DERR" }, + { .fc_id = 64, .cpu_id = 50, .valid = 1, .name = "MME2_SBAB_SERR" }, + { .fc_id = 65, .cpu_id = 51, .valid = 1, .name = "MME2_SBAB_DERR" }, + { .fc_id = 66, .cpu_id = 52, .valid = 1, .name = "MME3_ACC_SERR" }, + { .fc_id = 67, .cpu_id = 53, .valid = 1, .name = "MME3_ACC_DERR" }, + { .fc_id = 68, .cpu_id = 54, .valid = 1, .name = "MME3_SBAB_SERR" }, + { .fc_id = 69, .cpu_id = 55, .valid = 1, .name = "MME3_SBAB_DERR" }, + { .fc_id = 70, .cpu_id = 56, .valid = 1, .name = "DMA0_SERR_ECC" }, + { .fc_id = 71, .cpu_id = 56, .valid = 1, .name = "DMA1_SERR_ECC" }, + { .fc_id = 72, .cpu_id = 56, .valid = 1, .name = "DMA2_SERR_ECC" }, + { .fc_id = 73, .cpu_id = 56, .valid = 1, .name = "DMA3_SERR_ECC" }, + { .fc_id = 74, .cpu_id = 56, .valid = 1, .name = "DMA4_SERR_ECC" }, + { .fc_id = 75, .cpu_id = 56, .valid = 1, .name = "DMA5_SERR_ECC" }, + { .fc_id = 76, .cpu_id = 56, .valid = 1, .name = "DMA6_SERR_ECC" }, + { .fc_id = 77, .cpu_id = 56, .valid = 1, .name = "DMA7_SERR_ECC" }, + { .fc_id = 78, .cpu_id = 57, .valid = 1, .name = "DMA0_DERR_ECC" }, + { .fc_id = 79, .cpu_id = 57, .valid = 1, .name = "DMA1_DERR_ECC" }, + { .fc_id = 80, .cpu_id = 57, .valid = 1, .name = "DMA2_DERR_ECC" }, + { .fc_id = 81, .cpu_id = 57, .valid = 1, .name = "DMA3_DERR_ECC" }, + { .fc_id = 82, .cpu_id = 57, .valid = 1, .name = "DMA4_DERR_ECC" }, + { .fc_id = 83, .cpu_id = 57, .valid = 1, .name = "DMA5_DERR_ECC" }, + { .fc_id = 84, .cpu_id = 57, .valid = 1, .name = "DMA6_DERR_ECC" }, + { .fc_id = 85, .cpu_id = 57, .valid = 1, .name = "DMA7_DERR_ECC" }, + { .fc_id = 86, .cpu_id = 58, .valid = 1, .name = "CPU_IF_ECC_SERR" }, + { .fc_id = 87, .cpu_id = 59, .valid = 1, .name = "CPU_IF_ECC_DERR" }, + { .fc_id = 88, .cpu_id = 60, .valid = 1, .name = "PSOC_MEM_SERR" }, + { .fc_id = 89, .cpu_id = 61, .valid = 1, + .name = "PSOC_CORESIGHT_SERR" }, + { .fc_id = 90, .cpu_id = 62, .valid = 1, .name = "PSOC_MEM_DERR" }, + { .fc_id = 91, .cpu_id = 63, .valid = 1, + .name = "PSOC_CORESIGHT_DERR" }, + { .fc_id = 92, .cpu_id = 64, .valid = 1, .name = "SRAM0_SERR" }, + { .fc_id = 93, .cpu_id = 64, .valid = 1, .name = "SRAM1_SERR" }, + { .fc_id = 94, .cpu_id = 64, .valid = 1, .name = "SRAM2_SERR" }, + { .fc_id = 95, .cpu_id = 64, .valid = 1, .name = "SRAM3_SERR" }, + { .fc_id = 96, .cpu_id = 64, .valid = 1, .name = "SRAM7_SERR" }, + { .fc_id = 97, .cpu_id = 64, .valid = 1, .name = "SRAM6_SERR" }, + { .fc_id = 98, .cpu_id = 64, .valid = 1, .name = "SRAM5_SERR" }, + { .fc_id = 99, .cpu_id = 64, .valid = 1, .name = "SRAM4_SERR" }, + { .fc_id = 100, .cpu_id = 64, .valid = 1, .name = "SRAM8_SERR" }, + { .fc_id = 101, .cpu_id = 64, .valid = 1, .name = "SRAM9_SERR" }, + { .fc_id = 102, .cpu_id = 64, .valid = 1, .name = "SRAM10_SERR" }, + { .fc_id = 103, .cpu_id = 64, .valid = 1, .name = "SRAM11_SERR" }, + { .fc_id = 104, .cpu_id = 64, .valid = 1, .name = "SRAM15_SERR" }, + { .fc_id = 105, .cpu_id = 64, .valid = 1, .name = "SRAM14_SERR" }, + { .fc_id = 106, .cpu_id = 64, .valid = 1, .name = "SRAM13_SERR" }, + { .fc_id = 107, .cpu_id = 64, .valid = 1, .name = "SRAM12_SERR" }, + { .fc_id = 108, .cpu_id = 64, .valid = 1, .name = "SRAM16_SERR" }, + { .fc_id = 109, .cpu_id = 64, .valid = 1, .name = "SRAM17_SERR" }, + { .fc_id = 110, .cpu_id = 64, .valid = 1, .name = "SRAM18_SERR" }, + { .fc_id = 111, .cpu_id = 64, .valid = 1, .name = "SRAM19_SERR" }, + { .fc_id = 112, .cpu_id = 64, .valid = 1, .name = "SRAM23_SERR" }, + { .fc_id = 113, .cpu_id = 64, .valid = 1, .name = "SRAM22_SERR" }, + { .fc_id = 114, .cpu_id = 64, .valid = 1, .name = "SRAM21_SERR" }, + { .fc_id = 115, .cpu_id = 64, .valid = 1, .name = "SRAM20_SERR" }, + { .fc_id = 116, .cpu_id = 64, .valid = 1, .name = "SRAM24_SERR" }, + { .fc_id = 117, .cpu_id = 64, .valid = 1, .name = "SRAM25_SERR" }, + { .fc_id = 118, .cpu_id = 64, .valid = 1, .name = "SRAM26_SERR" }, + { .fc_id = 119, .cpu_id = 64, .valid = 1, .name = "SRAM27_SERR" }, + { .fc_id = 120, .cpu_id = 64, .valid = 1, .name = "SRAM31_SERR" }, + { .fc_id = 121, .cpu_id = 64, .valid = 1, .name = "SRAM30_SERR" }, + { .fc_id = 122, .cpu_id = 64, .valid = 1, .name = "SRAM29_SERR" }, + { .fc_id = 123, .cpu_id = 64, .valid = 1, .name = "SRAM28_SERR" }, + { .fc_id = 124, .cpu_id = 65, .valid = 1, .name = "SRAM0_DERR" }, + { .fc_id = 125, .cpu_id = 65, .valid = 1, .name = "SRAM1_DERR" }, + { .fc_id = 126, .cpu_id = 65, .valid = 1, .name = "SRAM2_DERR" }, + { .fc_id = 127, .cpu_id = 65, .valid = 1, .name = "SRAM3_DERR" }, + { .fc_id = 128, .cpu_id = 65, .valid = 1, .name = "SRAM7_DERR" }, + { .fc_id = 129, .cpu_id = 65, .valid = 1, .name = "SRAM6_DERR" }, + { .fc_id = 130, .cpu_id = 65, .valid = 1, .name = "SRAM5_DERR" }, + { .fc_id = 131, .cpu_id = 65, .valid = 1, .name = "SRAM4_DERR" }, + { .fc_id = 132, .cpu_id = 65, .valid = 1, .name = "SRAM8_DERR" }, + { .fc_id = 133, .cpu_id = 65, .valid = 1, .name = "SRAM9_DERR" }, + { .fc_id = 134, .cpu_id = 65, .valid = 1, .name = "SRAM10_DERR" }, + { .fc_id = 135, .cpu_id = 65, .valid = 1, .name = "SRAM11_DERR" }, + { .fc_id = 136, .cpu_id = 65, .valid = 1, .name = "SRAM15_DERR" }, + { .fc_id = 137, .cpu_id = 65, .valid = 1, .name = "SRAM14_DERR" }, + { .fc_id = 138, .cpu_id = 65, .valid = 1, .name = "SRAM13_DERR" }, + { .fc_id = 139, .cpu_id = 65, .valid = 1, .name = "SRAM12_DERR" }, + { .fc_id = 140, .cpu_id = 65, .valid = 1, .name = "SRAM16_DERR" }, + { .fc_id = 141, .cpu_id = 65, .valid = 1, .name = "SRAM17_DERR" }, + { .fc_id = 142, .cpu_id = 65, .valid = 1, .name = "SRAM18_DERR" }, + { .fc_id = 143, .cpu_id = 65, .valid = 1, .name = "SRAM19_DERR" }, + { .fc_id = 144, .cpu_id = 65, .valid = 1, .name = "SRAM23_DERR" }, + { .fc_id = 145, .cpu_id = 65, .valid = 1, .name = "SRAM22_DERR" }, + { .fc_id = 146, .cpu_id = 65, .valid = 1, .name = "SRAM21_DERR" }, + { .fc_id = 147, .cpu_id = 65, .valid = 1, .name = "SRAM20_DERR" }, + { .fc_id = 148, .cpu_id = 65, .valid = 1, .name = "SRAM24_DERR" }, + { .fc_id = 149, .cpu_id = 65, .valid = 1, .name = "SRAM25_DERR" }, + { .fc_id = 150, .cpu_id = 65, .valid = 1, .name = "SRAM26_DERR" }, + { .fc_id = 151, .cpu_id = 65, .valid = 1, .name = "SRAM27_DERR" }, + { .fc_id = 152, .cpu_id = 65, .valid = 1, .name = "SRAM31_DERR" }, + { .fc_id = 153, .cpu_id = 65, .valid = 1, .name = "SRAM30_DERR" }, + { .fc_id = 154, .cpu_id = 65, .valid = 1, .name = "SRAM29_DERR" }, + { .fc_id = 155, .cpu_id = 65, .valid = 1, .name = "SRAM28_DERR" }, + { .fc_id = 156, .cpu_id = 66, .valid = 1, .name = "NIC0_SERR" }, + { .fc_id = 157, .cpu_id = 66, .valid = 1, .name = "NIC1_SERR" }, + { .fc_id = 158, .cpu_id = 66, .valid = 1, .name = "NIC2_SERR" }, + { .fc_id = 159, .cpu_id = 66, .valid = 1, .name = "NIC3_SERR" }, + { .fc_id = 160, .cpu_id = 66, .valid = 1, .name = "NIC4_SERR" }, + { .fc_id = 161, .cpu_id = 66, .valid = 0, .name = "" }, + { .fc_id = 162, .cpu_id = 66, .valid = 0, .name = "" }, + { .fc_id = 163, .cpu_id = 66, .valid = 0, .name = "" }, + { .fc_id = 164, .cpu_id = 66, .valid = 0, .name = "" }, + { .fc_id = 165, .cpu_id = 66, .valid = 0, .name = "" }, + { .fc_id = 166, .cpu_id = 67, .valid = 1, .name = "NIC0_DERR" }, + { .fc_id = 167, .cpu_id = 67, .valid = 1, .name = "NIC1_DERR" }, + { .fc_id = 168, .cpu_id = 67, .valid = 1, .name = "NIC2_DERR" }, + { .fc_id = 169, .cpu_id = 67, .valid = 1, .name = "NIC3_DERR" }, + { .fc_id = 170, .cpu_id = 67, .valid = 1, .name = "NIC4_DERR" }, + { .fc_id = 171, .cpu_id = 67, .valid = 0, .name = "" }, + { .fc_id = 172, .cpu_id = 67, .valid = 0, .name = "" }, + { .fc_id = 173, .cpu_id = 67, .valid = 0, .name = "" }, + { .fc_id = 174, .cpu_id = 67, .valid = 0, .name = "" }, + { .fc_id = 175, .cpu_id = 67, .valid = 0, .name = "" }, + { .fc_id = 176, .cpu_id = 68, .valid = 1, .name = "DMA_IF0_SERR" }, + { .fc_id = 177, .cpu_id = 68, .valid = 1, .name = "DMA_IF1_SERR" }, + { .fc_id = 178, .cpu_id = 68, .valid = 1, .name = "DMA_IF2_SERR" }, + { .fc_id = 179, .cpu_id = 68, .valid = 1, .name = "DMA_IF3_SERR" }, + { .fc_id = 180, .cpu_id = 69, .valid = 1, .name = "DMA_IF0_DERR" }, + { .fc_id = 181, .cpu_id = 69, .valid = 1, .name = "DMA_IF1_DERR" }, + { .fc_id = 182, .cpu_id = 69, .valid = 1, .name = "DMA_IF2_DERR" }, + { .fc_id = 183, .cpu_id = 69, .valid = 1, .name = "DMA_IF3_DERR" }, + { .fc_id = 184, .cpu_id = 70, .valid = 1, .name = "GIC500" }, + { .fc_id = 185, .cpu_id = 71, .valid = 1, .name = "HBM_0_SERR" }, + { .fc_id = 186, .cpu_id = 71, .valid = 1, .name = "HBM_1_SERR" }, + { .fc_id = 187, .cpu_id = 71, .valid = 1, .name = "HBM_2_SERR" }, + { .fc_id = 188, .cpu_id = 71, .valid = 1, .name = "HBM_3_SERR" }, + { .fc_id = 189, .cpu_id = 72, .valid = 1, .name = "HBM_0_DERR" }, + { .fc_id = 190, .cpu_id = 72, .valid = 1, .name = "HBM_1_DERR" }, + { .fc_id = 191, .cpu_id = 72, .valid = 1, .name = "HBM_2_DERR" }, + { .fc_id = 192, .cpu_id = 72, .valid = 1, .name = "HBM_3_DERR" }, + { .fc_id = 193, .cpu_id = 73, .valid = 1, .name = "MMU_SERR" }, + { .fc_id = 194, .cpu_id = 74, .valid = 1, .name = "MMU_DERR" }, + { .fc_id = 195, .cpu_id = 75, .valid = 0, .name = "" }, + { .fc_id = 196, .cpu_id = 76, .valid = 0, .name = "" }, + { .fc_id = 197, .cpu_id = 77, .valid = 0, .name = "" }, + { .fc_id = 198, .cpu_id = 78, .valid = 0, .name = "" }, + { .fc_id = 199, .cpu_id = 79, .valid = 0, .name = "" }, + { .fc_id = 200, .cpu_id = 80, .valid = 1, .name = "PCIE_DEC" }, + { .fc_id = 201, .cpu_id = 81, .valid = 1, .name = "TPC0_DEC" }, + { .fc_id = 202, .cpu_id = 82, .valid = 0, .name = "" }, + { .fc_id = 203, .cpu_id = 83, .valid = 1, .name = "TPC1_DEC" }, + { .fc_id = 204, .cpu_id = 84, .valid = 0, .name = "" }, + { .fc_id = 205, .cpu_id = 85, .valid = 1, .name = "TPC2_DEC" }, + { .fc_id = 206, .cpu_id = 86, .valid = 0, .name = "" }, + { .fc_id = 207, .cpu_id = 87, .valid = 1, .name = "TPC3_DEC" }, + { .fc_id = 208, .cpu_id = 88, .valid = 0, .name = "" }, + { .fc_id = 209, .cpu_id = 89, .valid = 1, .name = "TPC4_DEC" }, + { .fc_id = 210, .cpu_id = 90, .valid = 0, .name = "" }, + { .fc_id = 211, .cpu_id = 91, .valid = 1, .name = "TPC5_DEC" }, + { .fc_id = 212, .cpu_id = 92, .valid = 0, .name = "" }, + { .fc_id = 213, .cpu_id = 93, .valid = 1, .name = "TPC6_DEC" }, + { .fc_id = 214, .cpu_id = 94, .valid = 0, .name = "" }, + { .fc_id = 215, .cpu_id = 95, .valid = 1, .name = "TPC7_DEC" }, + { .fc_id = 216, .cpu_id = 96, .valid = 0, .name = "" }, + { .fc_id = 217, .cpu_id = 97, .valid = 1, .name = "AXI_ECC" }, + { .fc_id = 218, .cpu_id = 98, .valid = 1, .name = "L2_RAM_ECC" }, + { .fc_id = 219, .cpu_id = 99, .valid = 1, .name = "MME0_WBC_RSP" }, + { .fc_id = 220, .cpu_id = 100, .valid = 1, .name = "MME0_SBAB0_RSP" }, + { .fc_id = 221, .cpu_id = 101, .valid = 0, .name = "" }, + { .fc_id = 222, .cpu_id = 102, .valid = 0, .name = "" }, + { .fc_id = 223, .cpu_id = 103, .valid = 0, .name = "" }, + { .fc_id = 224, .cpu_id = 104, .valid = 1, .name = "MME1_WBC_RSP" }, + { .fc_id = 225, .cpu_id = 105, .valid = 1, .name = "MME1_SBAB0_RSP" }, + { .fc_id = 226, .cpu_id = 106, .valid = 0, .name = "" }, + { .fc_id = 227, .cpu_id = 107, .valid = 0, .name = "" }, + { .fc_id = 228, .cpu_id = 108, .valid = 0, .name = "" }, + { .fc_id = 229, .cpu_id = 109, .valid = 1, .name = "MME2_WBC_RSP" }, + { .fc_id = 230, .cpu_id = 110, .valid = 1, .name = "MME2_SBAB0_RSP" }, + { .fc_id = 231, .cpu_id = 111, .valid = 0, .name = "" }, + { .fc_id = 232, .cpu_id = 112, .valid = 0, .name = "" }, + { .fc_id = 233, .cpu_id = 113, .valid = 0, .name = "" }, + { .fc_id = 234, .cpu_id = 114, .valid = 1, .name = "MME3_WBC_RSP" }, + { .fc_id = 235, .cpu_id = 115, .valid = 1, .name = "MME3_SBAB0_RSP" }, + { .fc_id = 236, .cpu_id = 116, .valid = 0, .name = "" }, + { .fc_id = 237, .cpu_id = 117, .valid = 0, .name = "" }, + { .fc_id = 238, .cpu_id = 118, .valid = 0, .name = "" }, + { .fc_id = 239, .cpu_id = 119, .valid = 1, .name = "PLL0" }, + { .fc_id = 240, .cpu_id = 119, .valid = 1, .name = "PLL1" }, + { .fc_id = 241, .cpu_id = 119, .valid = 1, .name = "PLL2" }, + { .fc_id = 242, .cpu_id = 119, .valid = 1, .name = "PLL3" }, + { .fc_id = 243, .cpu_id = 119, .valid = 1, .name = "PLL4" }, + { .fc_id = 244, .cpu_id = 119, .valid = 1, .name = "PLL5" }, + { .fc_id = 245, .cpu_id = 119, .valid = 1, .name = "PLL6" }, + { .fc_id = 246, .cpu_id = 119, .valid = 1, .name = "PLL7" }, + { .fc_id = 247, .cpu_id = 119, .valid = 1, .name = "PLL8" }, + { .fc_id = 248, .cpu_id = 119, .valid = 1, .name = "PLL9" }, + { .fc_id = 249, .cpu_id = 119, .valid = 1, .name = "PLL10" }, + { .fc_id = 250, .cpu_id = 119, .valid = 1, .name = "PLL11" }, + { .fc_id = 251, .cpu_id = 119, .valid = 1, .name = "PLL12" }, + { .fc_id = 252, .cpu_id = 119, .valid = 1, .name = "PLL13" }, + { .fc_id = 253, .cpu_id = 119, .valid = 1, .name = "PLL14" }, + { .fc_id = 254, .cpu_id = 119, .valid = 1, .name = "PLL15" }, + { .fc_id = 255, .cpu_id = 119, .valid = 1, .name = "PLL16" }, + { .fc_id = 256, .cpu_id = 119, .valid = 1, .name = "PLL17" }, + { .fc_id = 257, .cpu_id = 120, .valid = 1, + .name = "CPU_AXI_SPLITTER" }, + { .fc_id = 258, .cpu_id = 121, .valid = 0, .name = "" }, + { .fc_id = 259, .cpu_id = 122, .valid = 0, .name = "" }, + { .fc_id = 260, .cpu_id = 123, .valid = 0, .name = "" }, + { .fc_id = 261, .cpu_id = 124, .valid = 0, .name = "" }, + { .fc_id = 262, .cpu_id = 125, .valid = 1, .name = "PSOC_AXI_DEC" }, + { .fc_id = 263, .cpu_id = 126, .valid = 1, .name = "PSOC_PRSTN_FALL" }, + { .fc_id = 264, .cpu_id = 127, .valid = 1, .name = "NIC_SEI_0" }, + { .fc_id = 265, .cpu_id = 127, .valid = 1, .name = "NIC_SEI_1" }, + { .fc_id = 266, .cpu_id = 127, .valid = 1, .name = "NIC_SEI_2" }, + { .fc_id = 267, .cpu_id = 127, .valid = 1, .name = "NIC_SEI_3" }, + { .fc_id = 268, .cpu_id = 127, .valid = 1, .name = "NIC_SEI_4" }, + { .fc_id = 269, .cpu_id = 128, .valid = 0, .name = "" }, + { .fc_id = 270, .cpu_id = 128, .valid = 0, .name = "" }, + { .fc_id = 271, .cpu_id = 128, .valid = 0, .name = "" }, + { .fc_id = 272, .cpu_id = 128, .valid = 0, .name = "" }, + { .fc_id = 273, .cpu_id = 128, .valid = 0, .name = "" }, + { .fc_id = 274, .cpu_id = 128, .valid = 0, .name = "" }, + { .fc_id = 275, .cpu_id = 128, .valid = 0, .name = "" }, + { .fc_id = 276, .cpu_id = 128, .valid = 0, .name = "" }, + { .fc_id = 277, .cpu_id = 129, .valid = 0, .name = "" }, + { .fc_id = 278, .cpu_id = 129, .valid = 0, .name = "" }, + { .fc_id = 279, .cpu_id = 129, .valid = 0, .name = "" }, + { .fc_id = 280, .cpu_id = 129, .valid = 0, .name = "" }, + { .fc_id = 281, .cpu_id = 130, .valid = 0, .name = "" }, + { .fc_id = 282, .cpu_id = 131, .valid = 0, .name = "" }, + { .fc_id = 283, .cpu_id = 132, .valid = 0, .name = "" }, + { .fc_id = 284, .cpu_id = 133, .valid = 0, .name = "" }, + { .fc_id = 285, .cpu_id = 134, .valid = 0, .name = "" }, + { .fc_id = 286, .cpu_id = 135, .valid = 0, .name = "" }, + { .fc_id = 287, .cpu_id = 136, .valid = 0, .name = "" }, + { .fc_id = 288, .cpu_id = 137, .valid = 0, .name = "" }, + { .fc_id = 289, .cpu_id = 138, .valid = 0, .name = "" }, + { .fc_id = 290, .cpu_id = 139, .valid = 1, .name = "PCIE_FLR" }, + { .fc_id = 291, .cpu_id = 140, .valid = 0, .name = "" }, + { .fc_id = 292, .cpu_id = 141, .valid = 0, .name = "" }, + { .fc_id = 293, .cpu_id = 142, .valid = 0, .name = "" }, + { .fc_id = 294, .cpu_id = 143, .valid = 0, .name = "" }, + { .fc_id = 295, .cpu_id = 144, .valid = 0, .name = "" }, + { .fc_id = 296, .cpu_id = 145, .valid = 0, .name = "" }, + { .fc_id = 297, .cpu_id = 146, .valid = 0, .name = "" }, + { .fc_id = 298, .cpu_id = 147, .valid = 0, .name = "" }, + { .fc_id = 299, .cpu_id = 148, .valid = 0, .name = "" }, + { .fc_id = 300, .cpu_id = 149, .valid = 1, .name = "TPC0_BMON_SPMU" }, + { .fc_id = 301, .cpu_id = 150, .valid = 1, .name = "TPC0_KRN_ERR" }, + { .fc_id = 302, .cpu_id = 151, .valid = 0, .name = "" }, + { .fc_id = 303, .cpu_id = 152, .valid = 0, .name = "" }, + { .fc_id = 304, .cpu_id = 153, .valid = 0, .name = "" }, + { .fc_id = 305, .cpu_id = 154, .valid = 0, .name = "" }, + { .fc_id = 306, .cpu_id = 155, .valid = 1, .name = "TPC1_BMON_SPMU" }, + { .fc_id = 307, .cpu_id = 156, .valid = 1, .name = "TPC1_KRN_ERR" }, + { .fc_id = 308, .cpu_id = 157, .valid = 0, .name = "" }, + { .fc_id = 309, .cpu_id = 158, .valid = 0, .name = "" }, + { .fc_id = 310, .cpu_id = 159, .valid = 0, .name = "" }, + { .fc_id = 311, .cpu_id = 160, .valid = 0, .name = "" }, + { .fc_id = 312, .cpu_id = 161, .valid = 1, .name = "TPC2_BMON_SPMU" }, + { .fc_id = 313, .cpu_id = 162, .valid = 1, .name = "TPC2_KRN_ERR" }, + { .fc_id = 314, .cpu_id = 163, .valid = 0, .name = "" }, + { .fc_id = 315, .cpu_id = 164, .valid = 0, .name = "" }, + { .fc_id = 316, .cpu_id = 165, .valid = 0, .name = "" }, + { .fc_id = 317, .cpu_id = 166, .valid = 0, .name = "" }, + { .fc_id = 318, .cpu_id = 167, .valid = 1, .name = "TPC3_BMON_SPMU" }, + { .fc_id = 319, .cpu_id = 168, .valid = 1, .name = "TPC3_KRN_ERR" }, + { .fc_id = 320, .cpu_id = 169, .valid = 0, .name = "" }, + { .fc_id = 321, .cpu_id = 170, .valid = 0, .name = "" }, + { .fc_id = 322, .cpu_id = 171, .valid = 0, .name = "" }, + { .fc_id = 323, .cpu_id = 172, .valid = 0, .name = "" }, + { .fc_id = 324, .cpu_id = 173, .valid = 1, .name = "TPC4_BMON_SPMU" }, + { .fc_id = 325, .cpu_id = 174, .valid = 1, .name = "TPC4_KRN_ERR" }, + { .fc_id = 326, .cpu_id = 175, .valid = 0, .name = "" }, + { .fc_id = 327, .cpu_id = 176, .valid = 0, .name = "" }, + { .fc_id = 328, .cpu_id = 177, .valid = 0, .name = "" }, + { .fc_id = 329, .cpu_id = 178, .valid = 0, .name = "" }, + { .fc_id = 330, .cpu_id = 179, .valid = 1, .name = "TPC5_BMON_SPMU" }, + { .fc_id = 331, .cpu_id = 180, .valid = 1, .name = "TPC5_KRN_ERR" }, + { .fc_id = 332, .cpu_id = 181, .valid = 0, .name = "" }, + { .fc_id = 333, .cpu_id = 182, .valid = 0, .name = "" }, + { .fc_id = 334, .cpu_id = 183, .valid = 0, .name = "" }, + { .fc_id = 335, .cpu_id = 184, .valid = 0, .name = "" }, + { .fc_id = 336, .cpu_id = 185, .valid = 1, .name = "TPC6_BMON_SPMU" }, + { .fc_id = 337, .cpu_id = 186, .valid = 1, .name = "TPC6_KRN_ERR" }, + { .fc_id = 338, .cpu_id = 187, .valid = 0, .name = "" }, + { .fc_id = 339, .cpu_id = 188, .valid = 0, .name = "" }, + { .fc_id = 340, .cpu_id = 189, .valid = 0, .name = "" }, + { .fc_id = 341, .cpu_id = 190, .valid = 0, .name = "" }, + { .fc_id = 342, .cpu_id = 191, .valid = 1, .name = "TPC7_BMON_SPMU" }, + { .fc_id = 343, .cpu_id = 192, .valid = 1, .name = "TPC7_KRN_ERR" }, + { .fc_id = 344, .cpu_id = 193, .valid = 0, .name = "" }, + { .fc_id = 345, .cpu_id = 194, .valid = 0, .name = "" }, + { .fc_id = 346, .cpu_id = 195, .valid = 0, .name = "" }, + { .fc_id = 347, .cpu_id = 196, .valid = 0, .name = "" }, + { .fc_id = 348, .cpu_id = 197, .valid = 0, .name = "" }, + { .fc_id = 349, .cpu_id = 198, .valid = 0, .name = "" }, + { .fc_id = 350, .cpu_id = 199, .valid = 0, .name = "" }, + { .fc_id = 351, .cpu_id = 200, .valid = 0, .name = "" }, + { .fc_id = 352, .cpu_id = 201, .valid = 0, .name = "" }, + { .fc_id = 353, .cpu_id = 202, .valid = 0, .name = "" }, + { .fc_id = 354, .cpu_id = 203, .valid = 0, .name = "" }, + { .fc_id = 355, .cpu_id = 204, .valid = 0, .name = "" }, + { .fc_id = 356, .cpu_id = 205, .valid = 0, .name = "" }, + { .fc_id = 357, .cpu_id = 206, .valid = 0, .name = "" }, + { .fc_id = 358, .cpu_id = 207, .valid = 0, .name = "" }, + { .fc_id = 359, .cpu_id = 208, .valid = 0, .name = "" }, + { .fc_id = 360, .cpu_id = 209, .valid = 0, .name = "" }, + { .fc_id = 361, .cpu_id = 210, .valid = 0, .name = "" }, + { .fc_id = 362, .cpu_id = 211, .valid = 0, .name = "" }, + { .fc_id = 363, .cpu_id = 212, .valid = 0, .name = "" }, + { .fc_id = 364, .cpu_id = 213, .valid = 0, .name = "" }, + { .fc_id = 365, .cpu_id = 214, .valid = 0, .name = "" }, + { .fc_id = 366, .cpu_id = 215, .valid = 0, .name = "" }, + { .fc_id = 367, .cpu_id = 216, .valid = 0, .name = "" }, + { .fc_id = 368, .cpu_id = 217, .valid = 0, .name = "" }, + { .fc_id = 369, .cpu_id = 218, .valid = 0, .name = "" }, + { .fc_id = 370, .cpu_id = 219, .valid = 0, .name = "" }, + { .fc_id = 371, .cpu_id = 220, .valid = 0, .name = "" }, + { .fc_id = 372, .cpu_id = 221, .valid = 0, .name = "" }, + { .fc_id = 373, .cpu_id = 222, .valid = 0, .name = "" }, + { .fc_id = 374, .cpu_id = 223, .valid = 0, .name = "" }, + { .fc_id = 375, .cpu_id = 224, .valid = 0, .name = "" }, + { .fc_id = 376, .cpu_id = 225, .valid = 0, .name = "" }, + { .fc_id = 377, .cpu_id = 226, .valid = 0, .name = "" }, + { .fc_id = 378, .cpu_id = 227, .valid = 0, .name = "" }, + { .fc_id = 379, .cpu_id = 228, .valid = 0, .name = "" }, + { .fc_id = 380, .cpu_id = 229, .valid = 1, .name = "MMU_PAGE_FAULT" }, + { .fc_id = 381, .cpu_id = 230, .valid = 1, .name = "MMU_WR_PERM" }, + { .fc_id = 382, .cpu_id = 231, .valid = 0, .name = "" }, + { .fc_id = 383, .cpu_id = 232, .valid = 1, .name = "DMA_BM_CH0" }, + { .fc_id = 384, .cpu_id = 233, .valid = 1, .name = "DMA_BM_CH1" }, + { .fc_id = 385, .cpu_id = 234, .valid = 1, .name = "DMA_BM_CH2" }, + { .fc_id = 386, .cpu_id = 235, .valid = 1, .name = "DMA_BM_CH3" }, + { .fc_id = 387, .cpu_id = 236, .valid = 1, .name = "DMA_BM_CH4" }, + { .fc_id = 388, .cpu_id = 237, .valid = 1, .name = "DMA_BM_CH5" }, + { .fc_id = 389, .cpu_id = 238, .valid = 1, .name = "DMA_BM_CH6" }, + { .fc_id = 390, .cpu_id = 239, .valid = 1, .name = "DMA_BM_CH7" }, + { .fc_id = 391, .cpu_id = 240, .valid = 0, .name = "" }, + { .fc_id = 392, .cpu_id = 241, .valid = 0, .name = "" }, + { .fc_id = 393, .cpu_id = 242, .valid = 0, .name = "" }, + { .fc_id = 394, .cpu_id = 243, .valid = 0, .name = "" }, + { .fc_id = 395, .cpu_id = 244, .valid = 1, .name = "HBM0_SPI_0" }, + { .fc_id = 396, .cpu_id = 245, .valid = 1, .name = "HBM0_SPI_1" }, + { .fc_id = 397, .cpu_id = 246, .valid = 0, .name = "" }, + { .fc_id = 398, .cpu_id = 247, .valid = 0, .name = "" }, + { .fc_id = 399, .cpu_id = 248, .valid = 1, .name = "HBM1_SPI_0" }, + { .fc_id = 400, .cpu_id = 249, .valid = 1, .name = "HBM1_SPI_1" }, + { .fc_id = 401, .cpu_id = 250, .valid = 0, .name = "" }, + { .fc_id = 402, .cpu_id = 251, .valid = 0, .name = "" }, + { .fc_id = 403, .cpu_id = 252, .valid = 1, .name = "HBM2_SPI_0" }, + { .fc_id = 404, .cpu_id = 253, .valid = 1, .name = "HBM2_SPI_1" }, + { .fc_id = 405, .cpu_id = 254, .valid = 0, .name = "" }, + { .fc_id = 406, .cpu_id = 255, .valid = 0, .name = "" }, + { .fc_id = 407, .cpu_id = 256, .valid = 1, .name = "HBM3_SPI_0" }, + { .fc_id = 408, .cpu_id = 257, .valid = 1, .name = "HBM3_SPI_1" }, + { .fc_id = 409, .cpu_id = 258, .valid = 0, .name = "" }, + { .fc_id = 410, .cpu_id = 259, .valid = 0, .name = "" }, + { .fc_id = 411, .cpu_id = 260, .valid = 0, .name = "" }, + { .fc_id = 412, .cpu_id = 261, .valid = 0, .name = "" }, + { .fc_id = 413, .cpu_id = 262, .valid = 0, .name = "" }, + { .fc_id = 414, .cpu_id = 263, .valid = 0, .name = "" }, + { .fc_id = 415, .cpu_id = 264, .valid = 0, .name = "" }, + { .fc_id = 416, .cpu_id = 265, .valid = 0, .name = "" }, + { .fc_id = 417, .cpu_id = 266, .valid = 0, .name = "" }, + { .fc_id = 418, .cpu_id = 267, .valid = 0, .name = "" }, + { .fc_id = 419, .cpu_id = 268, .valid = 0, .name = "" }, + { .fc_id = 420, .cpu_id = 269, .valid = 0, .name = "" }, + { .fc_id = 421, .cpu_id = 270, .valid = 1, .name = "PSOC_GPIO_U16_0" }, + { .fc_id = 422, .cpu_id = 271, .valid = 0, .name = "" }, + { .fc_id = 423, .cpu_id = 272, .valid = 0, .name = "" }, + { .fc_id = 424, .cpu_id = 273, .valid = 0, .name = "" }, + { .fc_id = 425, .cpu_id = 274, .valid = 0, .name = "" }, + { .fc_id = 426, .cpu_id = 275, .valid = 0, .name = "" }, + { .fc_id = 427, .cpu_id = 276, .valid = 0, .name = "" }, + { .fc_id = 428, .cpu_id = 277, .valid = 0, .name = "" }, + { .fc_id = 429, .cpu_id = 278, .valid = 0, .name = "" }, + { .fc_id = 430, .cpu_id = 279, .valid = 0, .name = "" }, + { .fc_id = 431, .cpu_id = 280, .valid = 0, .name = "" }, + { .fc_id = 432, .cpu_id = 281, .valid = 0, .name = "" }, + { .fc_id = 433, .cpu_id = 282, .valid = 0, .name = "" }, + { .fc_id = 434, .cpu_id = 283, .valid = 0, .name = "" }, + { .fc_id = 435, .cpu_id = 284, .valid = 0, .name = "" }, + { .fc_id = 436, .cpu_id = 285, .valid = 0, .name = "" }, + { .fc_id = 437, .cpu_id = 286, .valid = 0, .name = "" }, + { .fc_id = 438, .cpu_id = 287, .valid = 0, .name = "" }, + { .fc_id = 439, .cpu_id = 288, .valid = 0, .name = "" }, + { .fc_id = 440, .cpu_id = 289, .valid = 0, .name = "" }, + { .fc_id = 441, .cpu_id = 290, .valid = 0, .name = "" }, + { .fc_id = 442, .cpu_id = 291, .valid = 0, .name = "" }, + { .fc_id = 443, .cpu_id = 292, .valid = 0, .name = "" }, + { .fc_id = 444, .cpu_id = 293, .valid = 0, .name = "" }, + { .fc_id = 445, .cpu_id = 294, .valid = 0, .name = "" }, + { .fc_id = 446, .cpu_id = 295, .valid = 0, .name = "" }, + { .fc_id = 447, .cpu_id = 296, .valid = 0, .name = "" }, + { .fc_id = 448, .cpu_id = 297, .valid = 0, .name = "" }, + { .fc_id = 449, .cpu_id = 298, .valid = 0, .name = "" }, + { .fc_id = 450, .cpu_id = 299, .valid = 0, .name = "" }, + { .fc_id = 451, .cpu_id = 300, .valid = 0, .name = "" }, + { .fc_id = 452, .cpu_id = 301, .valid = 0, .name = "" }, + { .fc_id = 453, .cpu_id = 302, .valid = 0, .name = "" }, + { .fc_id = 454, .cpu_id = 303, .valid = 0, .name = "" }, + { .fc_id = 455, .cpu_id = 304, .valid = 0, .name = "" }, + { .fc_id = 456, .cpu_id = 305, .valid = 0, .name = "" }, + { .fc_id = 457, .cpu_id = 306, .valid = 0, .name = "" }, + { .fc_id = 458, .cpu_id = 307, .valid = 0, .name = "" }, + { .fc_id = 459, .cpu_id = 308, .valid = 0, .name = "" }, + { .fc_id = 460, .cpu_id = 309, .valid = 0, .name = "" }, + { .fc_id = 461, .cpu_id = 310, .valid = 0, .name = "" }, + { .fc_id = 462, .cpu_id = 311, .valid = 0, .name = "" }, + { .fc_id = 463, .cpu_id = 312, .valid = 0, .name = "" }, + { .fc_id = 464, .cpu_id = 313, .valid = 0, .name = "" }, + { .fc_id = 465, .cpu_id = 314, .valid = 0, .name = "" }, + { .fc_id = 466, .cpu_id = 315, .valid = 0, .name = "" }, + { .fc_id = 467, .cpu_id = 316, .valid = 0, .name = "" }, + { .fc_id = 468, .cpu_id = 317, .valid = 0, .name = "" }, + { .fc_id = 469, .cpu_id = 318, .valid = 0, .name = "" }, + { .fc_id = 470, .cpu_id = 319, .valid = 0, .name = "" }, + { .fc_id = 471, .cpu_id = 320, .valid = 0, .name = "" }, + { .fc_id = 472, .cpu_id = 321, .valid = 0, .name = "" }, + { .fc_id = 473, .cpu_id = 322, .valid = 0, .name = "" }, + { .fc_id = 474, .cpu_id = 323, .valid = 0, .name = "" }, + { .fc_id = 475, .cpu_id = 324, .valid = 0, .name = "" }, + { .fc_id = 476, .cpu_id = 325, .valid = 0, .name = "" }, + { .fc_id = 477, .cpu_id = 326, .valid = 0, .name = "" }, + { .fc_id = 478, .cpu_id = 327, .valid = 0, .name = "" }, + { .fc_id = 479, .cpu_id = 328, .valid = 0, .name = "" }, + { .fc_id = 480, .cpu_id = 329, .valid = 0, .name = "" }, + { .fc_id = 481, .cpu_id = 330, .valid = 0, .name = "" }, + { .fc_id = 482, .cpu_id = 331, .valid = 0, .name = "" }, + { .fc_id = 483, .cpu_id = 332, .valid = 0, .name = "" }, + { .fc_id = 484, .cpu_id = 333, .valid = 1, .name = "PI_UPDATE" }, + { .fc_id = 485, .cpu_id = 334, .valid = 1, .name = "HALT_MACHINE" }, + { .fc_id = 486, .cpu_id = 335, .valid = 1, .name = "INTS_REGISTER" }, + { .fc_id = 487, .cpu_id = 336, .valid = 1, .name = "SOFT_RESET" }, + { .fc_id = 488, .cpu_id = 337, .valid = 0, .name = "" }, + { .fc_id = 489, .cpu_id = 338, .valid = 0, .name = "" }, + { .fc_id = 490, .cpu_id = 339, .valid = 0, .name = "" }, + { .fc_id = 491, .cpu_id = 340, .valid = 0, .name = "" }, + { .fc_id = 492, .cpu_id = 341, .valid = 0, .name = "" }, + { .fc_id = 493, .cpu_id = 342, .valid = 0, .name = "" }, + { .fc_id = 494, .cpu_id = 343, .valid = 0, .name = "" }, + { .fc_id = 495, .cpu_id = 344, .valid = 0, .name = "" }, + { .fc_id = 496, .cpu_id = 345, .valid = 0, .name = "" }, + { .fc_id = 497, .cpu_id = 346, .valid = 0, .name = "" }, + { .fc_id = 498, .cpu_id = 347, .valid = 0, .name = "" }, + { .fc_id = 499, .cpu_id = 348, .valid = 0, .name = "" }, + { .fc_id = 500, .cpu_id = 349, .valid = 0, .name = "" }, + { .fc_id = 501, .cpu_id = 350, .valid = 0, .name = "" }, + { .fc_id = 502, .cpu_id = 351, .valid = 0, .name = "" }, + { .fc_id = 503, .cpu_id = 352, .valid = 0, .name = "" }, + { .fc_id = 504, .cpu_id = 353, .valid = 0, .name = "" }, + { .fc_id = 505, .cpu_id = 354, .valid = 0, .name = "" }, + { .fc_id = 506, .cpu_id = 355, .valid = 0, .name = "" }, + { .fc_id = 507, .cpu_id = 356, .valid = 0, .name = "" }, + { .fc_id = 508, .cpu_id = 357, .valid = 0, .name = "" }, + { .fc_id = 509, .cpu_id = 358, .valid = 0, .name = "" }, + { .fc_id = 510, .cpu_id = 359, .valid = 0, .name = "" }, + { .fc_id = 511, .cpu_id = 360, .valid = 0, .name = "" }, + { .fc_id = 512, .cpu_id = 361, .valid = 0, .name = "" }, + { .fc_id = 513, .cpu_id = 362, .valid = 0, .name = "" }, + { .fc_id = 514, .cpu_id = 363, .valid = 0, .name = "" }, + { .fc_id = 515, .cpu_id = 364, .valid = 0, .name = "" }, + { .fc_id = 516, .cpu_id = 365, .valid = 0, .name = "" }, + { .fc_id = 517, .cpu_id = 366, .valid = 0, .name = "" }, + { .fc_id = 518, .cpu_id = 367, .valid = 0, .name = "" }, + { .fc_id = 519, .cpu_id = 368, .valid = 0, .name = "" }, + { .fc_id = 520, .cpu_id = 369, .valid = 0, .name = "" }, + { .fc_id = 521, .cpu_id = 370, .valid = 0, .name = "" }, + { .fc_id = 522, .cpu_id = 371, .valid = 0, .name = "" }, + { .fc_id = 523, .cpu_id = 372, .valid = 0, .name = "" }, + { .fc_id = 524, .cpu_id = 373, .valid = 0, .name = "" }, + { .fc_id = 525, .cpu_id = 374, .valid = 0, .name = "" }, + { .fc_id = 526, .cpu_id = 375, .valid = 0, .name = "" }, + { .fc_id = 527, .cpu_id = 376, .valid = 0, .name = "" }, + { .fc_id = 528, .cpu_id = 377, .valid = 0, .name = "" }, + { .fc_id = 529, .cpu_id = 378, .valid = 0, .name = "" }, + { .fc_id = 530, .cpu_id = 379, .valid = 0, .name = "" }, + { .fc_id = 531, .cpu_id = 380, .valid = 0, .name = "" }, + { .fc_id = 532, .cpu_id = 381, .valid = 0, .name = "" }, + { .fc_id = 533, .cpu_id = 382, .valid = 0, .name = "" }, + { .fc_id = 534, .cpu_id = 383, .valid = 0, .name = "" }, + { .fc_id = 535, .cpu_id = 384, .valid = 0, .name = "" }, + { .fc_id = 536, .cpu_id = 385, .valid = 0, .name = "" }, + { .fc_id = 537, .cpu_id = 386, .valid = 0, .name = "" }, + { .fc_id = 538, .cpu_id = 387, .valid = 0, .name = "" }, + { .fc_id = 539, .cpu_id = 388, .valid = 0, .name = "" }, + { .fc_id = 540, .cpu_id = 389, .valid = 0, .name = "" }, + { .fc_id = 541, .cpu_id = 390, .valid = 0, .name = "" }, + { .fc_id = 542, .cpu_id = 391, .valid = 0, .name = "" }, + { .fc_id = 543, .cpu_id = 392, .valid = 0, .name = "" }, + { .fc_id = 544, .cpu_id = 393, .valid = 0, .name = "" }, + { .fc_id = 545, .cpu_id = 394, .valid = 0, .name = "" }, + { .fc_id = 546, .cpu_id = 395, .valid = 0, .name = "" }, + { .fc_id = 547, .cpu_id = 396, .valid = 0, .name = "" }, + { .fc_id = 548, .cpu_id = 397, .valid = 1, .name = "RAZWI_OR_ADC" }, + { .fc_id = 549, .cpu_id = 398, .valid = 0, .name = "" }, + { .fc_id = 550, .cpu_id = 399, .valid = 0, .name = "" }, + { .fc_id = 551, .cpu_id = 400, .valid = 0, .name = "" }, + { .fc_id = 552, .cpu_id = 401, .valid = 0, .name = "" }, + { .fc_id = 553, .cpu_id = 402, .valid = 0, .name = "" }, + { .fc_id = 554, .cpu_id = 403, .valid = 0, .name = "" }, + { .fc_id = 555, .cpu_id = 404, .valid = 0, .name = "" }, + { .fc_id = 556, .cpu_id = 405, .valid = 0, .name = "" }, + { .fc_id = 557, .cpu_id = 406, .valid = 0, .name = "" }, + { .fc_id = 558, .cpu_id = 407, .valid = 0, .name = "" }, + { .fc_id = 559, .cpu_id = 408, .valid = 0, .name = "" }, + { .fc_id = 560, .cpu_id = 409, .valid = 0, .name = "" }, + { .fc_id = 561, .cpu_id = 410, .valid = 0, .name = "" }, + { .fc_id = 562, .cpu_id = 411, .valid = 0, .name = "" }, + { .fc_id = 563, .cpu_id = 412, .valid = 0, .name = "" }, + { .fc_id = 564, .cpu_id = 413, .valid = 0, .name = "" }, + { .fc_id = 565, .cpu_id = 414, .valid = 0, .name = "" }, + { .fc_id = 566, .cpu_id = 415, .valid = 0, .name = "" }, + { .fc_id = 567, .cpu_id = 416, .valid = 0, .name = "" }, + { .fc_id = 568, .cpu_id = 417, .valid = 0, .name = "" }, + { .fc_id = 569, .cpu_id = 418, .valid = 0, .name = "" }, + { .fc_id = 570, .cpu_id = 419, .valid = 0, .name = "" }, + { .fc_id = 571, .cpu_id = 420, .valid = 0, .name = "" }, + { .fc_id = 572, .cpu_id = 421, .valid = 1, .name = "TPC0_QM" }, + { .fc_id = 573, .cpu_id = 422, .valid = 1, .name = "TPC1_QM" }, + { .fc_id = 574, .cpu_id = 423, .valid = 1, .name = "TPC2_QM" }, + { .fc_id = 575, .cpu_id = 424, .valid = 1, .name = "TPC3_QM" }, + { .fc_id = 576, .cpu_id = 425, .valid = 1, .name = "TPC4_QM" }, + { .fc_id = 577, .cpu_id = 426, .valid = 1, .name = "TPC5_QM" }, + { .fc_id = 578, .cpu_id = 427, .valid = 1, .name = "TPC6_QM" }, + { .fc_id = 579, .cpu_id = 428, .valid = 1, .name = "TPC7_QM" }, + { .fc_id = 580, .cpu_id = 429, .valid = 0, .name = "" }, + { .fc_id = 581, .cpu_id = 430, .valid = 1, .name = "MME0_QM" }, + { .fc_id = 582, .cpu_id = 431, .valid = 1, .name = "MME2_QM" }, + { .fc_id = 583, .cpu_id = 432, .valid = 1, .name = "DMA0_QM" }, + { .fc_id = 584, .cpu_id = 433, .valid = 1, .name = "DMA1_QM" }, + { .fc_id = 585, .cpu_id = 434, .valid = 1, .name = "DMA2_QM" }, + { .fc_id = 586, .cpu_id = 435, .valid = 1, .name = "DMA3_QM" }, + { .fc_id = 587, .cpu_id = 436, .valid = 1, .name = "DMA4_QM" }, + { .fc_id = 588, .cpu_id = 437, .valid = 1, .name = "DMA5_QM" }, + { .fc_id = 589, .cpu_id = 438, .valid = 1, .name = "DMA6_QM" }, + { .fc_id = 590, .cpu_id = 439, .valid = 1, .name = "DMA7_QM" }, + { .fc_id = 591, .cpu_id = 440, .valid = 0, .name = "" }, + { .fc_id = 592, .cpu_id = 441, .valid = 0, .name = "" }, + { .fc_id = 593, .cpu_id = 442, .valid = 0, .name = "" }, + { .fc_id = 594, .cpu_id = 443, .valid = 1, .name = "NIC0_QM0" }, + { .fc_id = 595, .cpu_id = 444, .valid = 1, .name = "NIC0_QM1" }, + { .fc_id = 596, .cpu_id = 445, .valid = 1, .name = "NIC1_QM0" }, + { .fc_id = 597, .cpu_id = 446, .valid = 1, .name = "NIC1_QM1" }, + { .fc_id = 598, .cpu_id = 447, .valid = 1, .name = "NIC2_QM0" }, + { .fc_id = 599, .cpu_id = 448, .valid = 1, .name = "NIC2_QM1" }, + { .fc_id = 600, .cpu_id = 449, .valid = 1, .name = "NIC3_QM0" }, + { .fc_id = 601, .cpu_id = 450, .valid = 1, .name = "NIC3_QM1" }, + { .fc_id = 602, .cpu_id = 451, .valid = 1, .name = "NIC4_QM0" }, + { .fc_id = 603, .cpu_id = 452, .valid = 1, .name = "NIC4_QM1" }, + { .fc_id = 604, .cpu_id = 453, .valid = 1, .name = "DMA0_CORE" }, + { .fc_id = 605, .cpu_id = 454, .valid = 1, .name = "DMA1_CORE" }, + { .fc_id = 606, .cpu_id = 455, .valid = 1, .name = "DMA2_CORE" }, + { .fc_id = 607, .cpu_id = 456, .valid = 1, .name = "DMA3_CORE" }, + { .fc_id = 608, .cpu_id = 457, .valid = 1, .name = "DMA4_CORE" }, + { .fc_id = 609, .cpu_id = 458, .valid = 1, .name = "DMA5_CORE" }, + { .fc_id = 610, .cpu_id = 459, .valid = 1, .name = "DMA6_CORE" }, + { .fc_id = 611, .cpu_id = 460, .valid = 1, .name = "DMA7_CORE" }, + { .fc_id = 612, .cpu_id = 461, .valid = 1, .name = "NIC0_QP0" }, + { .fc_id = 613, .cpu_id = 462, .valid = 1, .name = "NIC0_QP1" }, + { .fc_id = 614, .cpu_id = 463, .valid = 1, .name = "NIC1_QP0" }, + { .fc_id = 615, .cpu_id = 464, .valid = 1, .name = "NIC1_QP1" }, + { .fc_id = 616, .cpu_id = 465, .valid = 1, .name = "NIC2_QP0" }, + { .fc_id = 617, .cpu_id = 466, .valid = 1, .name = "NIC2_QP1" }, + { .fc_id = 618, .cpu_id = 467, .valid = 1, .name = "NIC3_QP0" }, + { .fc_id = 619, .cpu_id = 468, .valid = 1, .name = "NIC3_QP1" }, + { .fc_id = 620, .cpu_id = 469, .valid = 1, .name = "NIC4_QP0" }, + { .fc_id = 621, .cpu_id = 470, .valid = 1, .name = "NIC4_QP1" }, + { .fc_id = 622, .cpu_id = 471, .valid = 0, .name = "" }, + { .fc_id = 623, .cpu_id = 472, .valid = 0, .name = "" }, + { .fc_id = 624, .cpu_id = 473, .valid = 0, .name = "" }, + { .fc_id = 625, .cpu_id = 474, .valid = 0, .name = "" }, + { .fc_id = 626, .cpu_id = 475, .valid = 0, .name = "" }, + { .fc_id = 627, .cpu_id = 476, .valid = 0, .name = "" }, + { .fc_id = 628, .cpu_id = 477, .valid = 0, .name = "" }, + { .fc_id = 629, .cpu_id = 478, .valid = 0, .name = "" }, + { .fc_id = 630, .cpu_id = 479, .valid = 0, .name = "" }, + { .fc_id = 631, .cpu_id = 480, .valid = 0, .name = "" }, + { .fc_id = 632, .cpu_id = 481, .valid = 0, .name = "" }, + { .fc_id = 633, .cpu_id = 482, .valid = 0, .name = "" }, + { .fc_id = 634, .cpu_id = 483, .valid = 0, .name = "" }, + { .fc_id = 635, .cpu_id = 484, .valid = 0, .name = "" }, + { .fc_id = 636, .cpu_id = 485, .valid = 0, .name = "" }, + { .fc_id = 637, .cpu_id = 486, .valid = 0, .name = "" }, + { .fc_id = 638, .cpu_id = 487, .valid = 0, .name = "" }, + { .fc_id = 639, .cpu_id = 488, .valid = 0, .name = "" }, + { .fc_id = 640, .cpu_id = 489, .valid = 0, .name = "" }, + { .fc_id = 641, .cpu_id = 490, .valid = 0, .name = "" }, + { .fc_id = 642, .cpu_id = 491, .valid = 0, .name = "" }, + { .fc_id = 643, .cpu_id = 492, .valid = 0, .name = "" }, + { .fc_id = 644, .cpu_id = 493, .valid = 0, .name = "" }, + { .fc_id = 645, .cpu_id = 494, .valid = 0, .name = "" }, + { .fc_id = 646, .cpu_id = 495, .valid = 0, .name = "" }, + { .fc_id = 647, .cpu_id = 496, .valid = 0, .name = "" }, + { .fc_id = 648, .cpu_id = 497, .valid = 0, .name = "" }, + { .fc_id = 649, .cpu_id = 498, .valid = 0, .name = "" }, + { .fc_id = 650, .cpu_id = 499, .valid = 0, .name = "" }, + { .fc_id = 651, .cpu_id = 500, .valid = 0, .name = "" }, + { .fc_id = 652, .cpu_id = 501, .valid = 0, .name = "" }, + { .fc_id = 653, .cpu_id = 502, .valid = 0, .name = "" }, + { .fc_id = 654, .cpu_id = 503, .valid = 0, .name = "" }, + { .fc_id = 655, .cpu_id = 504, .valid = 0, .name = "" }, + { .fc_id = 656, .cpu_id = 505, .valid = 0, .name = "" }, + { .fc_id = 657, .cpu_id = 506, .valid = 0, .name = "" }, + { .fc_id = 658, .cpu_id = 507, .valid = 1, .name = "FIX_POWER_ENV_S" }, + { .fc_id = 659, .cpu_id = 508, .valid = 1, .name = "FIX_POWER_ENV_E" }, + { .fc_id = 660, .cpu_id = 509, .valid = 1, + .name = "FIX_THERMAL_ENV_S" }, + { .fc_id = 661, .cpu_id = 510, .valid = 1, + .name = "FIX_THERMAL_ENV_E" }, + { .fc_id = 662, .cpu_id = 511, .valid = 1, .name = "RAZWI_OR_ADC_SW" }, +}; + +#endif /* __GAUDI_ASYNC_IDS_MAP_EVENTS_EXT_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_coresight.h b/drivers/misc/habanalabs/include/gaudi/gaudi_coresight.h new file mode 100644 index 000000000..c45cc7f4d --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/gaudi_coresight.h @@ -0,0 +1,367 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2020 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef GAUDI_CORESIGHT_H +#define GAUDI_CORESIGHT_H + +enum gaudi_debug_stm_regs_index { + GAUDI_STM_FIRST = 0, + GAUDI_STM_MME0_ACC = GAUDI_STM_FIRST, + GAUDI_STM_MME0_SBAB, + GAUDI_STM_MME0_CTRL, + GAUDI_STM_MME1_ACC, + GAUDI_STM_MME1_SBAB, + GAUDI_STM_MME1_CTRL, + GAUDI_STM_MME2_ACC, + GAUDI_STM_MME2_SBAB, + GAUDI_STM_MME2_CTRL, + GAUDI_STM_MME3_ACC, + GAUDI_STM_MME3_SBAB, + GAUDI_STM_MME3_CTRL, + GAUDI_STM_DMA_IF_W_S, + GAUDI_STM_DMA_IF_E_S, + GAUDI_STM_DMA_IF_W_N, + GAUDI_STM_DMA_IF_E_N, + GAUDI_STM_CPU, + GAUDI_STM_DMA_CH_0_CS, + GAUDI_STM_DMA_CH_1_CS, + GAUDI_STM_DMA_CH_2_CS, + GAUDI_STM_DMA_CH_3_CS, + GAUDI_STM_DMA_CH_4_CS, + GAUDI_STM_DMA_CH_5_CS, + GAUDI_STM_DMA_CH_6_CS, + GAUDI_STM_DMA_CH_7_CS, + GAUDI_STM_PCIE, + GAUDI_STM_MMU_CS, + GAUDI_STM_PSOC, + GAUDI_STM_NIC0_0, + GAUDI_STM_NIC0_1, + GAUDI_STM_NIC1_0, + GAUDI_STM_NIC1_1, + GAUDI_STM_NIC2_0, + GAUDI_STM_NIC2_1, + GAUDI_STM_NIC3_0, + GAUDI_STM_NIC3_1, + GAUDI_STM_NIC4_0, + GAUDI_STM_NIC4_1, + GAUDI_STM_TPC0_EML, + GAUDI_STM_TPC1_EML, + GAUDI_STM_TPC2_EML, + GAUDI_STM_TPC3_EML, + GAUDI_STM_TPC4_EML, + GAUDI_STM_TPC5_EML, + GAUDI_STM_TPC6_EML, + GAUDI_STM_TPC7_EML, + GAUDI_STM_LAST = GAUDI_STM_TPC7_EML +}; + +enum gaudi_debug_etf_regs_index { + GAUDI_ETF_FIRST = 0, + GAUDI_ETF_MME0_ACC = GAUDI_ETF_FIRST, + GAUDI_ETF_MME0_SBAB, + GAUDI_ETF_MME0_CTRL, + GAUDI_ETF_MME1_ACC, + GAUDI_ETF_MME1_SBAB, + GAUDI_ETF_MME1_CTRL, + GAUDI_ETF_MME2_ACC, + GAUDI_ETF_MME2_SBAB, + GAUDI_ETF_MME2_CTRL, + GAUDI_ETF_MME3_ACC, + GAUDI_ETF_MME3_SBAB, + GAUDI_ETF_MME3_CTRL, + GAUDI_ETF_DMA_IF_W_S, + GAUDI_ETF_DMA_IF_E_S, + GAUDI_ETF_DMA_IF_W_N, + GAUDI_ETF_DMA_IF_E_N, + GAUDI_ETF_CPU_0, + GAUDI_ETF_CPU_1, + GAUDI_ETF_CPU_TRACE, + GAUDI_ETF_DMA_CH_0_CS, + GAUDI_ETF_DMA_CH_1_CS, + GAUDI_ETF_DMA_CH_2_CS, + GAUDI_ETF_DMA_CH_3_CS, + GAUDI_ETF_DMA_CH_4_CS, + GAUDI_ETF_DMA_CH_5_CS, + GAUDI_ETF_DMA_CH_6_CS, + GAUDI_ETF_DMA_CH_7_CS, + GAUDI_ETF_PCIE, + GAUDI_ETF_MMU_CS, + GAUDI_ETF_PSOC, + GAUDI_ETF_NIC0_0, + GAUDI_ETF_NIC0_1, + GAUDI_ETF_NIC1_0, + GAUDI_ETF_NIC1_1, + GAUDI_ETF_NIC2_0, + GAUDI_ETF_NIC2_1, + GAUDI_ETF_NIC3_0, + GAUDI_ETF_NIC3_1, + GAUDI_ETF_NIC4_0, + GAUDI_ETF_NIC4_1, + GAUDI_ETF_TPC0_EML, + GAUDI_ETF_TPC1_EML, + GAUDI_ETF_TPC2_EML, + GAUDI_ETF_TPC3_EML, + GAUDI_ETF_TPC4_EML, + GAUDI_ETF_TPC5_EML, + GAUDI_ETF_TPC6_EML, + GAUDI_ETF_TPC7_EML, + GAUDI_ETF_LAST = GAUDI_ETF_TPC7_EML +}; + +enum gaudi_debug_funnel_regs_index { + GAUDI_FUNNEL_FIRST = 0, + GAUDI_FUNNEL_MME0_ACC = GAUDI_FUNNEL_FIRST, + GAUDI_FUNNEL_MME1_ACC, + GAUDI_FUNNEL_MME2_ACC, + GAUDI_FUNNEL_MME3_ACC, + GAUDI_FUNNEL_SRAM_Y0_X0, + GAUDI_FUNNEL_SRAM_Y0_X1, + GAUDI_FUNNEL_SRAM_Y0_X2, + GAUDI_FUNNEL_SRAM_Y0_X3, + GAUDI_FUNNEL_SRAM_Y0_X4, + GAUDI_FUNNEL_SRAM_Y0_X5, + GAUDI_FUNNEL_SRAM_Y0_X6, + GAUDI_FUNNEL_SRAM_Y0_X7, + GAUDI_FUNNEL_SRAM_Y1_X0, + GAUDI_FUNNEL_SRAM_Y1_X1, + GAUDI_FUNNEL_SRAM_Y1_X2, + GAUDI_FUNNEL_SRAM_Y1_X3, + GAUDI_FUNNEL_SRAM_Y1_X4, + GAUDI_FUNNEL_SRAM_Y1_X5, + GAUDI_FUNNEL_SRAM_Y1_X6, + GAUDI_FUNNEL_SRAM_Y1_X7, + GAUDI_FUNNEL_SRAM_Y2_X0, + GAUDI_FUNNEL_SRAM_Y2_X1, + GAUDI_FUNNEL_SRAM_Y2_X2, + GAUDI_FUNNEL_SRAM_Y2_X3, + GAUDI_FUNNEL_SRAM_Y2_X4, + GAUDI_FUNNEL_SRAM_Y2_X5, + GAUDI_FUNNEL_SRAM_Y2_X6, + GAUDI_FUNNEL_SRAM_Y2_X7, + GAUDI_FUNNEL_SRAM_Y3_X0, + GAUDI_FUNNEL_SRAM_Y3_X1, + GAUDI_FUNNEL_SRAM_Y3_X2, + GAUDI_FUNNEL_SRAM_Y3_X4, + GAUDI_FUNNEL_SRAM_Y3_X3, + GAUDI_FUNNEL_SRAM_Y3_X5, + GAUDI_FUNNEL_SRAM_Y3_X6, + GAUDI_FUNNEL_SRAM_Y3_X7, + GAUDI_FUNNEL_SIF_0, + GAUDI_FUNNEL_SIF_1, + GAUDI_FUNNEL_SIF_2, + GAUDI_FUNNEL_SIF_3, + GAUDI_FUNNEL_SIF_4, + GAUDI_FUNNEL_SIF_5, + GAUDI_FUNNEL_SIF_6, + GAUDI_FUNNEL_SIF_7, + GAUDI_FUNNEL_NIF_0, + GAUDI_FUNNEL_NIF_1, + GAUDI_FUNNEL_NIF_2, + GAUDI_FUNNEL_NIF_3, + GAUDI_FUNNEL_NIF_4, + GAUDI_FUNNEL_NIF_5, + GAUDI_FUNNEL_NIF_6, + GAUDI_FUNNEL_NIF_7, + GAUDI_FUNNEL_DMA_IF_W_S, + GAUDI_FUNNEL_DMA_IF_E_S, + GAUDI_FUNNEL_DMA_IF_W_N, + GAUDI_FUNNEL_DMA_IF_E_N, + GAUDI_FUNNEL_CPU, + GAUDI_FUNNEL_NIC_TPC_W_S, + GAUDI_FUNNEL_NIC_TPC_E_S, + GAUDI_FUNNEL_NIC_TPC_W_N, + GAUDI_FUNNEL_NIC_TPC_E_N, + GAUDI_FUNNEL_PCIE, + GAUDI_FUNNEL_PSOC, + GAUDI_FUNNEL_NIC0, + GAUDI_FUNNEL_NIC1, + GAUDI_FUNNEL_NIC2, + GAUDI_FUNNEL_NIC3, + GAUDI_FUNNEL_NIC4, + GAUDI_FUNNEL_TPC0_EML, + GAUDI_FUNNEL_TPC1_EML, + GAUDI_FUNNEL_TPC2_EML, + GAUDI_FUNNEL_TPC3_EML, + GAUDI_FUNNEL_TPC4_EML, + GAUDI_FUNNEL_TPC5_EML, + GAUDI_FUNNEL_TPC6_EML, + GAUDI_FUNNEL_TPC7_EML, + GAUDI_FUNNEL_LAST = GAUDI_FUNNEL_TPC7_EML +}; + +enum gaudi_debug_bmon_regs_index { + GAUDI_BMON_FIRST = 0, + GAUDI_BMON_MME0_ACC_0 = GAUDI_BMON_FIRST, + GAUDI_BMON_MME0_SBAB_0, + GAUDI_BMON_MME0_SBAB_1, + GAUDI_BMON_MME0_CTRL_0, + GAUDI_BMON_MME0_CTRL_1, + GAUDI_BMON_MME1_ACC_0, + GAUDI_BMON_MME1_SBAB_0, + GAUDI_BMON_MME1_SBAB_1, + GAUDI_BMON_MME1_CTRL_0, + GAUDI_BMON_MME1_CTRL_1, + GAUDI_BMON_MME2_ACC_0, + GAUDI_BMON_MME2_SBAB_0, + GAUDI_BMON_MME2_SBAB_1, + GAUDI_BMON_MME2_CTRL_0, + GAUDI_BMON_MME2_CTRL_1, + GAUDI_BMON_MME3_ACC_0, + GAUDI_BMON_MME3_SBAB_0, + GAUDI_BMON_MME3_SBAB_1, + GAUDI_BMON_MME3_CTRL_0, + GAUDI_BMON_MME3_CTRL_1, + GAUDI_BMON_DMA_IF_W_S_SOB_WR, + GAUDI_BMON_DMA_IF_W_S_0_WR, + GAUDI_BMON_DMA_IF_W_S_0_RD, + GAUDI_BMON_DMA_IF_W_S_1_WR, + GAUDI_BMON_DMA_IF_W_S_1_RD, + GAUDI_BMON_DMA_IF_E_S_SOB_WR, + GAUDI_BMON_DMA_IF_E_S_0_WR, + GAUDI_BMON_DMA_IF_E_S_0_RD, + GAUDI_BMON_DMA_IF_E_S_1_WR, + GAUDI_BMON_DMA_IF_E_S_1_RD, + GAUDI_BMON_DMA_IF_W_N_SOB_WR, + GAUDI_BMON_DMA_IF_W_N_HBM0_WR, + GAUDI_BMON_DMA_IF_W_N_HBM0_RD, + GAUDI_BMON_DMA_IF_W_N_HBM1_WR, + GAUDI_BMON_DMA_IF_W_N_HBM1_RD, + GAUDI_BMON_DMA_IF_E_N_SOB_WR, + GAUDI_BMON_DMA_IF_E_N_HBM0_WR, + GAUDI_BMON_DMA_IF_E_N_HBM0_RD, + GAUDI_BMON_DMA_IF_E_N_HBM1_WR, + GAUDI_BMON_DMA_IF_E_N_HBM1_RD, + GAUDI_BMON_CPU_WR, + GAUDI_BMON_CPU_RD, + GAUDI_BMON_DMA_CH_0_0, + GAUDI_BMON_DMA_CH_0_1, + GAUDI_BMON_DMA_CH_1_0, + GAUDI_BMON_DMA_CH_1_1, + GAUDI_BMON_DMA_CH_2_0, + GAUDI_BMON_DMA_CH_2_1, + GAUDI_BMON_DMA_CH_3_0, + GAUDI_BMON_DMA_CH_3_1, + GAUDI_BMON_DMA_CH_4_0, + GAUDI_BMON_DMA_CH_4_1, + GAUDI_BMON_DMA_CH_5_0, + GAUDI_BMON_DMA_CH_5_1, + GAUDI_BMON_DMA_CH_6_0, + GAUDI_BMON_DMA_CH_6_1, + GAUDI_BMON_DMA_CH_7_0, + GAUDI_BMON_DMA_CH_7_1, + GAUDI_BMON_PCIE_MSTR_WR, + GAUDI_BMON_PCIE_MSTR_RD, + GAUDI_BMON_PCIE_SLV_WR, + GAUDI_BMON_PCIE_SLV_RD, + GAUDI_BMON_MMU_0, + GAUDI_BMON_MMU_1, + GAUDI_BMON_NIC0_0, + GAUDI_BMON_NIC0_1, + GAUDI_BMON_NIC0_2, + GAUDI_BMON_NIC0_3, + GAUDI_BMON_NIC0_4, + GAUDI_BMON_NIC1_0, + GAUDI_BMON_NIC1_1, + GAUDI_BMON_NIC1_2, + GAUDI_BMON_NIC1_3, + GAUDI_BMON_NIC1_4, + GAUDI_BMON_NIC2_0, + GAUDI_BMON_NIC2_1, + GAUDI_BMON_NIC2_2, + GAUDI_BMON_NIC2_3, + GAUDI_BMON_NIC2_4, + GAUDI_BMON_NIC3_0, + GAUDI_BMON_NIC3_1, + GAUDI_BMON_NIC3_2, + GAUDI_BMON_NIC3_3, + GAUDI_BMON_NIC3_4, + GAUDI_BMON_NIC4_0, + GAUDI_BMON_NIC4_1, + GAUDI_BMON_NIC4_2, + GAUDI_BMON_NIC4_3, + GAUDI_BMON_NIC4_4, + GAUDI_BMON_TPC0_EML_0, + GAUDI_BMON_TPC0_EML_1, + GAUDI_BMON_TPC0_EML_2, + GAUDI_BMON_TPC0_EML_3, + GAUDI_BMON_TPC1_EML_0, + GAUDI_BMON_TPC1_EML_1, + GAUDI_BMON_TPC1_EML_2, + GAUDI_BMON_TPC1_EML_3, + GAUDI_BMON_TPC2_EML_0, + GAUDI_BMON_TPC2_EML_1, + GAUDI_BMON_TPC2_EML_2, + GAUDI_BMON_TPC2_EML_3, + GAUDI_BMON_TPC3_EML_0, + GAUDI_BMON_TPC3_EML_1, + GAUDI_BMON_TPC3_EML_2, + GAUDI_BMON_TPC3_EML_3, + GAUDI_BMON_TPC4_EML_0, + GAUDI_BMON_TPC4_EML_1, + GAUDI_BMON_TPC4_EML_2, + GAUDI_BMON_TPC4_EML_3, + GAUDI_BMON_TPC5_EML_0, + GAUDI_BMON_TPC5_EML_1, + GAUDI_BMON_TPC5_EML_2, + GAUDI_BMON_TPC5_EML_3, + GAUDI_BMON_TPC6_EML_0, + GAUDI_BMON_TPC6_EML_1, + GAUDI_BMON_TPC6_EML_2, + GAUDI_BMON_TPC6_EML_3, + GAUDI_BMON_TPC7_EML_0, + GAUDI_BMON_TPC7_EML_1, + GAUDI_BMON_TPC7_EML_2, + GAUDI_BMON_TPC7_EML_3, + GAUDI_BMON_LAST = GAUDI_BMON_TPC7_EML_3 +}; + +enum gaudi_debug_spmu_regs_index { + GAUDI_SPMU_FIRST = 0, + GAUDI_SPMU_MME0_ACC = GAUDI_SPMU_FIRST, + GAUDI_SPMU_MME0_SBAB, + GAUDI_SPMU_MME0_CTRL, + GAUDI_SPMU_MME1_ACC, + GAUDI_SPMU_MME1_SBAB, + GAUDI_SPMU_MME1_CTRL, + GAUDI_SPMU_MME2_MME2_ACC, + GAUDI_SPMU_MME2_SBAB, + GAUDI_SPMU_MME2_CTRL, + GAUDI_SPMU_MME3_ACC, + GAUDI_SPMU_MME3_SBAB, + GAUDI_SPMU_MME3_CTRL, + GAUDI_SPMU_DMA_CH_0_CS, + GAUDI_SPMU_DMA_CH_1_CS, + GAUDI_SPMU_DMA_CH_2_CS, + GAUDI_SPMU_DMA_CH_3_CS, + GAUDI_SPMU_DMA_CH_4_CS, + GAUDI_SPMU_DMA_CH_5_CS, + GAUDI_SPMU_DMA_CH_6_CS, + GAUDI_SPMU_DMA_CH_7_CS, + GAUDI_SPMU_PCIE, + GAUDI_SPMU_MMU_CS, + GAUDI_SPMU_NIC0_0, + GAUDI_SPMU_NIC0_1, + GAUDI_SPMU_NIC1_0, + GAUDI_SPMU_NIC1_1, + GAUDI_SPMU_NIC2_0, + GAUDI_SPMU_NIC2_1, + GAUDI_SPMU_NIC3_0, + GAUDI_SPMU_NIC3_1, + GAUDI_SPMU_NIC4_0, + GAUDI_SPMU_NIC4_1, + GAUDI_SPMU_TPC0_EML, + GAUDI_SPMU_TPC1_EML, + GAUDI_SPMU_TPC2_EML, + GAUDI_SPMU_TPC3_EML, + GAUDI_SPMU_TPC4_EML, + GAUDI_SPMU_TPC5_EML, + GAUDI_SPMU_TPC6_EML, + GAUDI_SPMU_TPC7_EML, + GAUDI_SPMU_LAST = GAUDI_SPMU_TPC7_EML +}; + +#endif /* GAUDI_CORESIGHT_H */ diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h b/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h new file mode 100644 index 000000000..8aadc6357 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2019-2020 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef GAUDI_FW_IF_H +#define GAUDI_FW_IF_H + +#define GAUDI_EVENT_QUEUE_MSI_IDX 8 +#define GAUDI_NIC_PORT1_MSI_IDX 10 +#define GAUDI_NIC_PORT3_MSI_IDX 12 +#define GAUDI_NIC_PORT5_MSI_IDX 14 +#define GAUDI_NIC_PORT7_MSI_IDX 16 +#define GAUDI_NIC_PORT9_MSI_IDX 18 + +#define UBOOT_FW_OFFSET 0x100000 /* 1MB in SRAM */ +#define LINUX_FW_OFFSET 0x800000 /* 8MB in HBM */ + +enum gaudi_pll_index { + CPU_PLL = 0, + PCI_PLL, + SRAM_PLL, + HBM_PLL, + NIC_PLL, + DMA_PLL, + MESH_PLL, + MME_PLL, + TPC_PLL, + IF_PLL +}; + +#define GAUDI_PLL_FREQ_LOW 200000000 /* 200 MHz */ + +#endif /* GAUDI_FW_IF_H */ diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h new file mode 100644 index 000000000..46aed13f1 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h @@ -0,0 +1,429 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2020 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef GAUDI_MASKS_H_ +#define GAUDI_MASKS_H_ + +#include "asic_reg/gaudi_regs.h" + +/* Useful masks for bits in various registers */ +#define PCI_DMA_QMAN_ENABLE (\ + (FIELD_PREP(DMA0_QM_GLBL_CFG0_PQF_EN_MASK, 0xF)) | \ + (FIELD_PREP(DMA0_QM_GLBL_CFG0_CQF_EN_MASK, 0xF)) | \ + (FIELD_PREP(DMA0_QM_GLBL_CFG0_CP_EN_MASK, 0xF))) + +#define QMAN_EXTERNAL_MAKE_TRUSTED (\ + (FIELD_PREP(DMA0_QM_GLBL_PROT_PQF_MASK, 0xF)) | \ + (FIELD_PREP(DMA0_QM_GLBL_PROT_CQF_MASK, 0xF)) | \ + (FIELD_PREP(DMA0_QM_GLBL_PROT_CP_MASK, 0xF)) | \ + (FIELD_PREP(DMA0_QM_GLBL_PROT_ERR_MASK, 0x1))) + +#define QMAN_INTERNAL_MAKE_TRUSTED (\ + (FIELD_PREP(DMA0_QM_GLBL_PROT_PQF_MASK, 0xF)) | \ + (FIELD_PREP(DMA0_QM_GLBL_PROT_ERR_MASK, 0x1))) + +#define HBM_DMA_QMAN_ENABLE (\ + (FIELD_PREP(DMA0_QM_GLBL_CFG0_PQF_EN_MASK, 0xF)) | \ + (FIELD_PREP(DMA0_QM_GLBL_CFG0_CQF_EN_MASK, 0x1F)) | \ + (FIELD_PREP(DMA0_QM_GLBL_CFG0_CP_EN_MASK, 0x1F))) + +#define QMAN_MME_ENABLE (\ + (FIELD_PREP(MME0_QM_GLBL_CFG0_PQF_EN_MASK, 0xF)) | \ + (FIELD_PREP(MME0_QM_GLBL_CFG0_CQF_EN_MASK, 0x1F)) | \ + (FIELD_PREP(MME0_QM_GLBL_CFG0_CP_EN_MASK, 0x1F))) + +#define QMAN_TPC_ENABLE (\ + (FIELD_PREP(TPC0_QM_GLBL_CFG0_PQF_EN_MASK, 0xF)) | \ + (FIELD_PREP(TPC0_QM_GLBL_CFG0_CQF_EN_MASK, 0x1F)) | \ + (FIELD_PREP(TPC0_QM_GLBL_CFG0_CP_EN_MASK, 0x1F))) + +#define QMAN_UPPER_CP_CGM_PWR_GATE_EN (\ + (FIELD_PREP(DMA0_QM_CGM_CFG_IDLE_TH_MASK, 0x20)) | \ + (FIELD_PREP(DMA0_QM_CGM_CFG_G2F_TH_MASK, 0xA)) | \ + (FIELD_PREP(DMA0_QM_CGM_CFG_CP_IDLE_MASK_MASK, 0x10)) | \ + (FIELD_PREP(DMA0_QM_CGM_CFG_EN_MASK, 0x1))) + +#define QMAN_COMMON_CP_CGM_PWR_GATE_EN (\ + (FIELD_PREP(DMA0_QM_CGM_CFG_IDLE_TH_MASK, 0x20)) | \ + (FIELD_PREP(DMA0_QM_CGM_CFG_G2F_TH_MASK, 0xA)) | \ + (FIELD_PREP(DMA0_QM_CGM_CFG_CP_IDLE_MASK_MASK, 0xF)) | \ + (FIELD_PREP(DMA0_QM_CGM_CFG_EN_MASK, 0x1))) + +#define PCI_DMA_QMAN_GLBL_ERR_CFG_MSG_EN_MASK (\ + (FIELD_PREP(DMA0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK, 0xF)) | \ + (FIELD_PREP(DMA0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK, 0xF)) | \ + (FIELD_PREP(DMA0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK, 0xF))) + +#define PCI_DMA_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK (\ + (FIELD_PREP(DMA0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK, 0xF)) | \ + (FIELD_PREP(DMA0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK, 0xF)) | \ + (FIELD_PREP(DMA0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK, 0xF))) + +#define HBM_DMA_QMAN_GLBL_ERR_CFG_MSG_EN_MASK (\ + (FIELD_PREP(DMA0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK, 0xF)) | \ + (FIELD_PREP(DMA0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK, 0x1F)) | \ + (FIELD_PREP(DMA0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK, 0x1F))) + +#define HBM_DMA_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK (\ + (FIELD_PREP(DMA0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK, 0xF)) | \ + (FIELD_PREP(DMA0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK, 0x1F)) | \ + (FIELD_PREP(DMA0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK, 0x1F))) + +#define TPC_QMAN_GLBL_ERR_CFG_MSG_EN_MASK (\ + (FIELD_PREP(TPC0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK, 0xF)) | \ + (FIELD_PREP(TPC0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK, 0x1F)) | \ + (FIELD_PREP(TPC0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK, 0x1F))) + +#define TPC_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK (\ + (FIELD_PREP(TPC0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK, 0xF)) | \ + (FIELD_PREP(TPC0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK, 0x1F)) | \ + (FIELD_PREP(TPC0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK, 0x1F))) + +#define MME_QMAN_GLBL_ERR_CFG_MSG_EN_MASK (\ + (FIELD_PREP(MME0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK, 0xF)) | \ + (FIELD_PREP(MME0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK, 0x1F)) | \ + (FIELD_PREP(MME0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK, 0x1F))) + +#define MME_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK (\ + (FIELD_PREP(MME0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK, 0xF)) | \ + (FIELD_PREP(MME0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK, 0x1F)) | \ + (FIELD_PREP(MME0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK, 0x1F))) + +#define QMAN_CGM1_PWR_GATE_EN (FIELD_PREP(DMA0_QM_CGM_CFG1_MASK_TH_MASK, 0xA)) + +/* RESET registers configuration */ +#define CFG_RST_L_PSOC_MASK BIT_MASK(0) +#define CFG_RST_L_PCIE_MASK BIT_MASK(1) +#define CFG_RST_L_PCIE_IF_MASK BIT_MASK(2) +#define CFG_RST_L_HBM_S_PLL_MASK BIT_MASK(3) +#define CFG_RST_L_TPC_S_PLL_MASK BIT_MASK(4) +#define CFG_RST_L_MME_S_PLL_MASK BIT_MASK(5) +#define CFG_RST_L_CPU_PLL_MASK BIT_MASK(6) +#define CFG_RST_L_PCIE_PLL_MASK BIT_MASK(7) +#define CFG_RST_L_NIC_S_PLL_MASK BIT_MASK(8) +#define CFG_RST_L_HBM_N_PLL_MASK BIT_MASK(9) +#define CFG_RST_L_TPC_N_PLL_MASK BIT_MASK(10) +#define CFG_RST_L_MME_N_PLL_MASK BIT_MASK(11) +#define CFG_RST_L_NIC_N_PLL_MASK BIT_MASK(12) +#define CFG_RST_L_DMA_W_PLL_MASK BIT_MASK(13) +#define CFG_RST_L_SIF_W_PLL_MASK BIT_MASK(14) +#define CFG_RST_L_MESH_W_PLL_MASK BIT_MASK(15) +#define CFG_RST_L_SRAM_W_PLL_MASK BIT_MASK(16) +#define CFG_RST_L_DMA_E_PLL_MASK BIT_MASK(17) +#define CFG_RST_L_SIF_E_PLL_MASK BIT_MASK(18) +#define CFG_RST_L_MESH_E_PLL_MASK BIT_MASK(19) +#define CFG_RST_L_SRAM_E_PLL_MASK BIT_MASK(20) + +#define CFG_RST_L_IF_1_MASK BIT_MASK(21) +#define CFG_RST_L_IF_0_MASK BIT_MASK(22) +#define CFG_RST_L_IF_2_MASK BIT_MASK(23) +#define CFG_RST_L_IF_3_MASK BIT_MASK(24) +#define CFG_RST_L_IF_MASK GENMASK(24, 21) + +#define CFG_RST_L_TPC_0_MASK BIT_MASK(25) +#define CFG_RST_L_TPC_1_MASK BIT_MASK(26) +#define CFG_RST_L_TPC_2_MASK BIT_MASK(27) +#define CFG_RST_L_TPC_3_MASK BIT_MASK(28) +#define CFG_RST_L_TPC_4_MASK BIT_MASK(29) +#define CFG_RST_L_TPC_5_MASK BIT_MASK(30) +#define CFG_RST_L_TPC_6_MASK BIT_MASK(31) +#define CFG_RST_L_TPC_MASK GENMASK(31, 25) + +#define CFG_RST_H_TPC_7_MASK BIT_MASK(0) + +#define CFG_RST_H_MME_0_MASK BIT_MASK(1) +#define CFG_RST_H_MME_1_MASK BIT_MASK(2) +#define CFG_RST_H_MME_2_MASK BIT_MASK(3) +#define CFG_RST_H_MME_3_MASK BIT_MASK(4) +#define CFG_RST_H_MME_MASK GENMASK(4, 1) + +#define CFG_RST_H_HBM_0_MASK BIT_MASK(5) +#define CFG_RST_H_HBM_1_MASK BIT_MASK(6) +#define CFG_RST_H_HBM_2_MASK BIT_MASK(7) +#define CFG_RST_H_HBM_3_MASK BIT_MASK(8) +#define CFG_RST_H_HBM_MASK GENMASK(8, 5) + +#define CFG_RST_H_NIC_0_MASK BIT_MASK(9) +#define CFG_RST_H_NIC_1_MASK BIT_MASK(10) +#define CFG_RST_H_NIC_2_MASK BIT_MASK(11) +#define CFG_RST_H_NIC_3_MASK BIT_MASK(12) +#define CFG_RST_H_NIC_4_MASK BIT_MASK(13) +#define CFG_RST_H_NIC_MASK GENMASK(13, 9) + +#define CFG_RST_H_SM_0_MASK BIT_MASK(14) +#define CFG_RST_H_SM_1_MASK BIT_MASK(15) +#define CFG_RST_H_SM_2_MASK BIT_MASK(16) +#define CFG_RST_H_SM_3_MASK BIT_MASK(17) +#define CFG_RST_H_SM_MASK GENMASK(17, 14) + +#define CFG_RST_H_DMA_0_MASK BIT_MASK(18) +#define CFG_RST_H_DMA_1_MASK BIT_MASK(19) +#define CFG_RST_H_DMA_MASK GENMASK(19, 18) + +#define CFG_RST_H_CPU_MASK BIT_MASK(20) +#define CFG_RST_H_MMU_MASK BIT_MASK(21) + +#define UNIT_RST_L_PSOC_SHIFT 0 +#define UNIT_RST_L_PCIE_SHIFT 1 +#define UNIT_RST_L_PCIE_IF_SHIFT 2 +#define UNIT_RST_L_HBM_S_PLL_SHIFT 3 +#define UNIT_RST_L_TPC_S_PLL_SHIFT 4 +#define UNIT_RST_L_MME_S_PLL_SHIFT 5 +#define UNIT_RST_L_CPU_PLL_SHIFT 6 +#define UNIT_RST_L_PCIE_PLL_SHIFT 7 +#define UNIT_RST_L_NIC_S_PLL_SHIFT 8 +#define UNIT_RST_L_HBM_N_PLL_SHIFT 9 +#define UNIT_RST_L_TPC_N_PLL_SHIFT 10 +#define UNIT_RST_L_MME_N_PLL_SHIFT 11 +#define UNIT_RST_L_NIC_N_PLL_SHIFT 12 +#define UNIT_RST_L_DMA_W_PLL_SHIFT 13 +#define UNIT_RST_L_SIF_W_PLL_SHIFT 14 +#define UNIT_RST_L_MESH_W_PLL_SHIFT 15 +#define UNIT_RST_L_SRAM_W_PLL_SHIFT 16 +#define UNIT_RST_L_DMA_E_PLL_SHIFT 17 +#define UNIT_RST_L_SIF_E_PLL_SHIFT 18 +#define UNIT_RST_L_MESH_E_PLL_SHIFT 19 +#define UNIT_RST_L_SRAM_E_PLL_SHIFT 20 +#define UNIT_RST_L_TPC_0_SHIFT 21 +#define UNIT_RST_L_TPC_1_SHIFT 22 +#define UNIT_RST_L_TPC_2_SHIFT 23 +#define UNIT_RST_L_TPC_3_SHIFT 24 +#define UNIT_RST_L_TPC_4_SHIFT 25 +#define UNIT_RST_L_TPC_5_SHIFT 26 +#define UNIT_RST_L_TPC_6_SHIFT 27 +#define UNIT_RST_L_TPC_7_SHIFT 28 +#define UNIT_RST_L_MME_0_SHIFT 29 +#define UNIT_RST_L_MME_1_SHIFT 30 +#define UNIT_RST_L_MME_2_SHIFT 31 + +#define UNIT_RST_H_MME_3_SHIFT 0 +#define UNIT_RST_H_HBM_0_SHIFT 1 +#define UNIT_RST_H_HBM_1_SHIFT 2 +#define UNIT_RST_H_HBM_2_SHIFT 3 +#define UNIT_RST_H_HBM_3_SHIFT 4 +#define UNIT_RST_H_NIC_0_SHIFT 5 +#define UNIT_RST_H_NIC_1_SHIFT 6 +#define UNIT_RST_H_NIC_2_SHIFT 7 +#define UNIT_RST_H_NIC_3_SHIFT 8 +#define UNIT_RST_H_NIC_4_SHIFT 9 +#define UNIT_RST_H_SM_0_SHIFT 10 +#define UNIT_RST_H_SM_1_SHIFT 11 +#define UNIT_RST_H_SM_2_SHIFT 12 +#define UNIT_RST_H_SM_3_SHIFT 13 +#define UNIT_RST_H_IF_0_SHIFT 14 +#define UNIT_RST_H_IF_1_SHIFT 15 +#define UNIT_RST_H_IF_2_SHIFT 16 +#define UNIT_RST_H_IF_3_SHIFT 17 +#define UNIT_RST_H_DMA_0_SHIFT 18 +#define UNIT_RST_H_DMA_1_SHIFT 19 +#define UNIT_RST_H_CPU_SHIFT 20 +#define UNIT_RST_H_MMU_SHIFT 21 + +#define UNIT_RST_H_HBM_MASK ((1 << UNIT_RST_H_HBM_0_SHIFT) | \ + (1 << UNIT_RST_H_HBM_1_SHIFT) | \ + (1 << UNIT_RST_H_HBM_2_SHIFT) | \ + (1 << UNIT_RST_H_HBM_3_SHIFT)) + +#define UNIT_RST_H_NIC_MASK ((1 << UNIT_RST_H_NIC_0_SHIFT) | \ + (1 << UNIT_RST_H_NIC_1_SHIFT) | \ + (1 << UNIT_RST_H_NIC_2_SHIFT) | \ + (1 << UNIT_RST_H_NIC_3_SHIFT) | \ + (1 << UNIT_RST_H_NIC_4_SHIFT)) + +#define UNIT_RST_H_SM_MASK ((1 << UNIT_RST_H_SM_0_SHIFT) | \ + (1 << UNIT_RST_H_SM_1_SHIFT) | \ + (1 << UNIT_RST_H_SM_2_SHIFT) | \ + (1 << UNIT_RST_H_SM_3_SHIFT)) + +#define UNIT_RST_H_MME_MASK ((1 << UNIT_RST_H_MME_0_SHIFT) | \ + (1 << UNIT_RST_H_MME_1_SHIFT) | \ + (1 << UNIT_RST_H_MME_2_SHIFT)) + +#define UNIT_RST_L_MME_MASK (1 << UNIT_RST_L_MME_3_SHIFT) + +#define UNIT_RST_L_IF_MASK ((1 << UNIT_RST_L_IF_0_SHIFT) | \ + (1 << UNIT_RST_L_IF_1_SHIFT) | \ + (1 << UNIT_RST_L_IF_2_SHIFT) | \ + (1 << UNIT_RST_L_IF_3_SHIFT)) + +#define UNIT_RST_L_TPC_MASK ((1 << UNIT_RST_L_TPC_0_SHIFT) | \ + (1 << UNIT_RST_L_TPC_1_SHIFT) | \ + (1 << UNIT_RST_L_TPC_2_SHIFT) | \ + (1 << UNIT_RST_L_TPC_3_SHIFT) | \ + (1 << UNIT_RST_L_TPC_4_SHIFT) | \ + (1 << UNIT_RST_L_TPC_5_SHIFT) | \ + (1 << UNIT_RST_L_TPC_6_SHIFT) | \ + (1 << UNIT_RST_L_TPC_7_SHIFT)) + +/* CPU_CA53_CFG_ARM_RST_CONTROL */ +#define CPU_CA53_CFG_ARM_RST_CONTROL_NCPUPORESET_SHIFT 0 +#define CPU_CA53_CFG_ARM_RST_CONTROL_NCPUPORESET_MASK 0x3 +#define CPU_CA53_CFG_ARM_RST_CONTROL_NCORERESET_SHIFT 4 +#define CPU_CA53_CFG_ARM_RST_CONTROL_NCORERESET_MASK 0x30 +#define CPU_CA53_CFG_ARM_RST_CONTROL_NL2RESET_SHIFT 8 +#define CPU_CA53_CFG_ARM_RST_CONTROL_NL2RESET_MASK 0x100 +#define CPU_CA53_CFG_ARM_RST_CONTROL_NPRESETDBG_SHIFT 12 +#define CPU_CA53_CFG_ARM_RST_CONTROL_NPRESETDBG_MASK 0x1000 +#define CPU_CA53_CFG_ARM_RST_CONTROL_NMBISTRESET_SHIFT 16 +#define CPU_CA53_CFG_ARM_RST_CONTROL_NMBISTRESET_MASK 0x10000 +#define CPU_CA53_CFG_ARM_RST_CONTROL_WARMRSTREQ_SHIFT 20 +#define CPU_CA53_CFG_ARM_RST_CONTROL_WARMRSTREQ_MASK 0x300000 + +#define CPU_RESET_ASSERT (\ + 1 << CPU_CA53_CFG_ARM_RST_CONTROL_NMBISTRESET_SHIFT) + +#define CPU_RESET_CORE0_DEASSERT (\ + 1 << CPU_CA53_CFG_ARM_RST_CONTROL_NCPUPORESET_SHIFT |\ + 1 << CPU_CA53_CFG_ARM_RST_CONTROL_NCORERESET_SHIFT |\ + 1 << CPU_CA53_CFG_ARM_RST_CONTROL_NL2RESET_SHIFT |\ + 1 << CPU_CA53_CFG_ARM_RST_CONTROL_NMBISTRESET_SHIFT) + +/* QM_IDLE_MASK is valid for all engines QM idle check */ +#define QM_IDLE_MASK (DMA0_QM_GLBL_STS0_PQF_IDLE_MASK | \ + DMA0_QM_GLBL_STS0_CQF_IDLE_MASK | \ + DMA0_QM_GLBL_STS0_CP_IDLE_MASK) + +/* CGM_IDLE_MASK is valid for all engines CGM idle check */ +#define CGM_IDLE_MASK DMA0_QM_CGM_STS_AGENT_IDLE_MASK + +#define TPC_IDLE_MASK ((1 << TPC0_CFG_STATUS_SCALAR_PIPE_EMPTY_SHIFT) | \ + (1 << TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_SHIFT) | \ + (1 << TPC0_CFG_STATUS_IQ_EMPTY_SHIFT) | \ + (1 << TPC0_CFG_STATUS_SB_EMPTY_SHIFT) | \ + (1 << TPC0_CFG_STATUS_QM_IDLE_SHIFT) | \ + (1 << TPC0_CFG_STATUS_QM_RDY_SHIFT)) + +#define MME0_CTRL_ARCH_STATUS_SB_A_EMPTY_MASK 0x80 +#define MME0_CTRL_ARCH_STATUS_SB_B_EMPTY_MASK 0x100 +#define MME0_CTRL_ARCH_STATUS_WBC_AXI_IDLE_MASK 0x1000 + +#define MME_ARCH_IDLE_MASK (MME0_CTRL_ARCH_STATUS_SB_A_EMPTY_MASK | \ + MME0_CTRL_ARCH_STATUS_SB_B_EMPTY_MASK | \ + MME0_CTRL_ARCH_STATUS_WBC_AXI_IDLE_MASK) + +#define IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts) \ + ((((qm_glbl_sts0) & QM_IDLE_MASK) == QM_IDLE_MASK) && \ + (((qm_cgm_sts) & CGM_IDLE_MASK) == CGM_IDLE_MASK)) + +#define IS_DMA_IDLE(dma_core_sts0) \ + !(dma_core_sts0 & DMA0_CORE_STS0_BUSY_MASK) + +#define IS_TPC_IDLE(tpc_cfg_sts) \ + (((tpc_cfg_sts) & TPC_IDLE_MASK) == TPC_IDLE_MASK) + +#define IS_MME_IDLE(mme_arch_sts) \ + (((mme_arch_sts) & MME_ARCH_IDLE_MASK) == MME_ARCH_IDLE_MASK) + +enum axi_id { + AXI_ID_MME, + AXI_ID_TPC, + AXI_ID_DMA, + AXI_ID_NIC, /* Local NIC */ + AXI_ID_PCI, + AXI_ID_CPU, + AXI_ID_PSOC, + AXI_ID_MMU, + AXI_ID_NIC_FT /* Feed-Through NIC */ +}; + +/* RAZWI initiator ID is built from the location in the chip and the AXI ID */ + +#define RAZWI_INITIATOR_AXI_ID_SHIFT 20 +#define RAZWI_INITIATOR_AXI_ID_MASK 0xF +#define RAZWI_INITIATOR_X_SHIFT 24 +#define RAZWI_INITIATOR_X_MASK 0xF +#define RAZWI_INITIATOR_Y_SHIFT 28 +#define RAZWI_INITIATOR_Y_MASK 0x7 + +#define RAZWI_INITIATOR_ID_AXI_ID(axi_id) \ + (((axi_id) & RAZWI_INITIATOR_AXI_ID_MASK) << \ + RAZWI_INITIATOR_AXI_ID_SHIFT) + +#define RAZWI_INITIATOR_ID_X_Y(x, y) \ + ((((y) & RAZWI_INITIATOR_Y_MASK) << RAZWI_INITIATOR_Y_SHIFT) | \ + (((x) & RAZWI_INITIATOR_X_MASK) << RAZWI_INITIATOR_X_SHIFT)) + +#define RAZWI_INITIATOR_ID_X_Y_TPC0_NIC0 RAZWI_INITIATOR_ID_X_Y(1, 1) +#define RAZWI_INITIATOR_ID_X_Y_TPC1 RAZWI_INITIATOR_ID_X_Y(2, 1) +#define RAZWI_INITIATOR_ID_X_Y_MME0_0 RAZWI_INITIATOR_ID_X_Y(3, 1) +#define RAZWI_INITIATOR_ID_X_Y_MME0_1 RAZWI_INITIATOR_ID_X_Y(4, 1) +#define RAZWI_INITIATOR_ID_X_Y_MME1_0 RAZWI_INITIATOR_ID_X_Y(5, 1) +#define RAZWI_INITIATOR_ID_X_Y_MME1_1 RAZWI_INITIATOR_ID_X_Y(6, 1) +#define RAZWI_INITIATOR_ID_X_Y_TPC2 RAZWI_INITIATOR_ID_X_Y(7, 1) +#define RAZWI_INITIATOR_ID_X_Y_TPC3_PCI_CPU_PSOC \ + RAZWI_INITIATOR_ID_X_Y(8, 1) +#define RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_0 RAZWI_INITIATOR_ID_X_Y(0, 1) +#define RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_0 RAZWI_INITIATOR_ID_X_Y(9, 1) +#define RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_1 RAZWI_INITIATOR_ID_X_Y(0, 2) +#define RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_1 RAZWI_INITIATOR_ID_X_Y(9, 2) +#define RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_0 RAZWI_INITIATOR_ID_X_Y(0, 3) +#define RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_0 RAZWI_INITIATOR_ID_X_Y(9, 3) +#define RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_1 RAZWI_INITIATOR_ID_X_Y(0, 4) +#define RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_1 RAZWI_INITIATOR_ID_X_Y(9, 4) +#define RAZWI_INITIATOR_ID_X_Y_TPC4_NIC1_NIC2 RAZWI_INITIATOR_ID_X_Y(1, 6) +#define RAZWI_INITIATOR_ID_X_Y_TPC5 RAZWI_INITIATOR_ID_X_Y(2, 6) +#define RAZWI_INITIATOR_ID_X_Y_MME2_0 RAZWI_INITIATOR_ID_X_Y(3, 6) +#define RAZWI_INITIATOR_ID_X_Y_MME2_1 RAZWI_INITIATOR_ID_X_Y(4, 6) +#define RAZWI_INITIATOR_ID_X_Y_MME3_0 RAZWI_INITIATOR_ID_X_Y(5, 6) +#define RAZWI_INITIATOR_ID_X_Y_MME3_1 RAZWI_INITIATOR_ID_X_Y(6, 6) +#define RAZWI_INITIATOR_ID_X_Y_TPC6 RAZWI_INITIATOR_ID_X_Y(7, 6) +#define RAZWI_INITIATOR_ID_X_Y_TPC7_NIC4_NIC5 RAZWI_INITIATOR_ID_X_Y(8, 6) + +#define PSOC_ETR_AXICTL_PROTCTRLBIT1_SHIFT 1 + +/* STLB_CACHE_INV */ +#define STLB_CACHE_INV_PRODUCER_INDEX_SHIFT 0 +#define STLB_CACHE_INV_PRODUCER_INDEX_MASK 0xFF +#define STLB_CACHE_INV_INDEX_MASK_SHIFT 8 +#define STLB_CACHE_INV_INDEX_MASK_MASK 0xFF00 + +#define MME_ACC_ACC_STALL_R_SHIFT 0 +#define MME_SBAB_SB_STALL_R_SHIFT 0 + +#define PCIE_WRAP_LBW_PROT_OVR_RD_EN_MASK 0x700 +#define PCIE_WRAP_LBW_PROT_OVR_WR_EN_MASK 0x7000 + +#define PCIE_WRAP_LBW_DRAIN_CFG_EN_SHIFT 0 +#define PCIE_WRAP_HBW_DRAIN_CFG_EN_SHIFT 0 + +/* DMA_IF_HBM_CRED_EN */ +#define DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT 0 +#define DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_MASK 0x1 +#define DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT 1 +#define DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_MASK 0x2 + +#define DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT 0 +#define DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT 0 +#define DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT 0 +#define DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT 0 + +#define IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT 0 +#define IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT 0 + +#define IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT 0 +#define IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT 0 + +/* MMU_UP_PAGE_ERROR_CAPTURE */ +#define MMU_UP_PAGE_ERROR_CAPTURE_VA_49_32_MASK 0x3FFFF +#define MMU_UP_PAGE_ERROR_CAPTURE_ENTRY_VALID_MASK 0x40000 + +/* MMU_UP_ACCESS_ERROR_CAPTURE */ +#define MMU_UP_ACCESS_ERROR_CAPTURE_VA_49_32_MASK 0x3FFFF +#define MMU_UP_ACCESS_ERROR_CAPTURE_ENTRY_VALID_MASK 0x40000 + +#define QM_ARB_ERR_MSG_EN_CHOISE_OVF_MASK 0x1 +#define QM_ARB_ERR_MSG_EN_CHOISE_WDT_MASK 0x2 +#define QM_ARB_ERR_MSG_EN_AXI_LBW_ERR_MASK 0x4 + +#define QM_ARB_ERR_MSG_EN_MASK (\ + QM_ARB_ERR_MSG_EN_CHOISE_OVF_MASK |\ + QM_ARB_ERR_MSG_EN_AXI_LBW_ERR_MASK) + +#define PCIE_AUX_FLR_CTRL_HW_CTRL_MASK 0x1 +#define PCIE_AUX_FLR_CTRL_INT_MASK_MASK 0x2 + +#endif /* GAUDI_MASKS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h b/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h new file mode 100644 index 000000000..f30f2c045 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h @@ -0,0 +1,215 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2017-2020 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef GAUDI_PACKETS_H +#define GAUDI_PACKETS_H + +#include <linux/types.h> + +#define PACKET_HEADER_PACKET_ID_SHIFT 56 +#define PACKET_HEADER_PACKET_ID_MASK 0x1F00000000000000ull + +enum packet_id { + PACKET_WREG_32 = 0x1, + PACKET_WREG_BULK = 0x2, + PACKET_MSG_LONG = 0x3, + PACKET_MSG_SHORT = 0x4, + PACKET_CP_DMA = 0x5, + PACKET_REPEAT = 0x6, + PACKET_MSG_PROT = 0x7, + PACKET_FENCE = 0x8, + PACKET_LIN_DMA = 0x9, + PACKET_NOP = 0xA, + PACKET_STOP = 0xB, + PACKET_ARB_POINT = 0xC, + PACKET_WAIT = 0xD, + PACKET_LOAD_AND_EXE = 0xF, + MAX_PACKET_ID = (PACKET_HEADER_PACKET_ID_MASK >> + PACKET_HEADER_PACKET_ID_SHIFT) + 1 +}; + +#define GAUDI_PKT_CTL_OPCODE_SHIFT 24 +#define GAUDI_PKT_CTL_OPCODE_MASK 0x1F000000 + +#define GAUDI_PKT_CTL_EB_SHIFT 29 +#define GAUDI_PKT_CTL_EB_MASK 0x20000000 + +#define GAUDI_PKT_CTL_RB_SHIFT 30 +#define GAUDI_PKT_CTL_RB_MASK 0x40000000 + +#define GAUDI_PKT_CTL_MB_SHIFT 31 +#define GAUDI_PKT_CTL_MB_MASK 0x80000000 + +/* All packets have, at least, an 8-byte header, which contains + * the packet type. The kernel driver uses the packet header for packet + * validation and to perform any necessary required preparation before + * sending them off to the hardware. + */ +struct gaudi_packet { + __le64 header; + /* The rest of the packet data follows. Use the corresponding + * packet_XXX struct to deference the data, based on packet type + */ + u8 contents[0]; +}; + +struct packet_nop { + __le32 reserved; + __le32 ctl; +}; + +struct packet_stop { + __le32 reserved; + __le32 ctl; +}; + +struct packet_wreg32 { + __le32 value; + __le32 ctl; +}; + +struct packet_wreg_bulk { + __le32 size64; + __le32 ctl; + __le64 values[0]; /* data starts here */ +}; + +struct packet_msg_long { + __le32 value; + __le32 ctl; + __le64 addr; +}; + +#define GAUDI_PKT_SHORT_VAL_SOB_SYNC_VAL_SHIFT 0 +#define GAUDI_PKT_SHORT_VAL_SOB_SYNC_VAL_MASK 0x00007FFF + +#define GAUDI_PKT_SHORT_VAL_SOB_MOD_SHIFT 31 +#define GAUDI_PKT_SHORT_VAL_SOB_MOD_MASK 0x80000000 + +#define GAUDI_PKT_SHORT_VAL_MON_SYNC_GID_SHIFT 0 +#define GAUDI_PKT_SHORT_VAL_MON_SYNC_GID_MASK 0x000000FF + +#define GAUDI_PKT_SHORT_VAL_MON_MASK_SHIFT 8 +#define GAUDI_PKT_SHORT_VAL_MON_MASK_MASK 0x0000FF00 + +#define GAUDI_PKT_SHORT_VAL_MON_MODE_SHIFT 16 +#define GAUDI_PKT_SHORT_VAL_MON_MODE_MASK 0x00010000 + +#define GAUDI_PKT_SHORT_VAL_MON_SYNC_VAL_SHIFT 17 +#define GAUDI_PKT_SHORT_VAL_MON_SYNC_VAL_MASK 0xFFFE0000 + +#define GAUDI_PKT_SHORT_CTL_ADDR_SHIFT 0 +#define GAUDI_PKT_SHORT_CTL_ADDR_MASK 0x0000FFFF + +#define GAUDI_PKT_SHORT_CTL_OP_SHIFT 20 +#define GAUDI_PKT_SHORT_CTL_OP_MASK 0x00300000 + +#define GAUDI_PKT_SHORT_CTL_BASE_SHIFT 22 +#define GAUDI_PKT_SHORT_CTL_BASE_MASK 0x00C00000 + +#define GAUDI_PKT_SHORT_CTL_OPCODE_SHIFT 24 +#define GAUDI_PKT_SHORT_CTL_OPCODE_MASK 0x1F000000 + +#define GAUDI_PKT_SHORT_CTL_EB_SHIFT 29 +#define GAUDI_PKT_SHORT_CTL_EB_MASK 0x20000000 + +#define GAUDI_PKT_SHORT_CTL_RB_SHIFT 30 +#define GAUDI_PKT_SHORT_CTL_RB_MASK 0x40000000 + +#define GAUDI_PKT_SHORT_CTL_MB_SHIFT 31 +#define GAUDI_PKT_SHORT_CTL_MB_MASK 0x80000000 + +struct packet_msg_short { + __le32 value; + __le32 ctl; +}; + +struct packet_msg_prot { + __le32 value; + __le32 ctl; + __le64 addr; +}; + +#define GAUDI_PKT_FENCE_CFG_DEC_VAL_SHIFT 0 +#define GAUDI_PKT_FENCE_CFG_DEC_VAL_MASK 0x0000000F + +#define GAUDI_PKT_FENCE_CFG_TARGET_VAL_SHIFT 16 +#define GAUDI_PKT_FENCE_CFG_TARGET_VAL_MASK 0x00FF0000 + +#define GAUDI_PKT_FENCE_CFG_ID_SHIFT 30 +#define GAUDI_PKT_FENCE_CFG_ID_MASK 0xC0000000 + +#define GAUDI_PKT_FENCE_CTL_PRED_SHIFT 0 +#define GAUDI_PKT_FENCE_CTL_PRED_MASK 0x0000001F + +#define GAUDI_PKT_FENCE_CTL_OPCODE_SHIFT 24 +#define GAUDI_PKT_FENCE_CTL_OPCODE_MASK 0x1F000000 + +#define GAUDI_PKT_FENCE_CTL_EB_SHIFT 29 +#define GAUDI_PKT_FENCE_CTL_EB_MASK 0x20000000 + +#define GAUDI_PKT_FENCE_CTL_RB_SHIFT 30 +#define GAUDI_PKT_FENCE_CTL_RB_MASK 0x40000000 + +#define GAUDI_PKT_FENCE_CTL_MB_SHIFT 31 +#define GAUDI_PKT_FENCE_CTL_MB_MASK 0x80000000 + +struct packet_fence { + __le32 cfg; + __le32 ctl; +}; + +#define GAUDI_PKT_LIN_DMA_CTL_WRCOMP_EN_SHIFT 0 +#define GAUDI_PKT_LIN_DMA_CTL_WRCOMP_EN_MASK 0x00000001 + +#define GAUDI_PKT_LIN_DMA_CTL_LIN_SHIFT 3 +#define GAUDI_PKT_LIN_DMA_CTL_LIN_MASK 0x00000008 + +#define GAUDI_PKT_LIN_DMA_CTL_MEMSET_SHIFT 4 +#define GAUDI_PKT_LIN_DMA_CTL_MEMSET_MASK 0x00000010 + +#define GAUDI_PKT_LIN_DMA_DST_ADDR_SHIFT 0 +#define GAUDI_PKT_LIN_DMA_DST_ADDR_MASK 0x00FFFFFFFFFFFFFFull + +struct packet_lin_dma { + __le32 tsize; + __le32 ctl; + __le64 src_addr; + __le64 dst_addr; +}; + +struct packet_arb_point { + __le32 cfg; + __le32 ctl; +}; + +struct packet_repeat { + __le32 cfg; + __le32 ctl; +}; + +struct packet_wait { + __le32 cfg; + __le32 ctl; +}; + +#define GAUDI_PKT_LOAD_AND_EXE_CFG_DST_SHIFT 0 +#define GAUDI_PKT_LOAD_AND_EXE_CFG_DST_MASK 0x00000001 + +struct packet_load_and_exe { + __le32 cfg; + __le32 ctl; + __le64 src_addr; +}; + +struct packet_cp_dma { + __le32 tsize; + __le32 ctl; + __le64 src_addr; +}; + +#endif /* GAUDI_PACKETS_H */ diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h b/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h new file mode 100644 index 000000000..977fb341a --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2019-2020 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef GAUDI_REG_MAP_H_ +#define GAUDI_REG_MAP_H_ + +/* + * PSOC scratch-pad registers + */ +#define mmHW_STATE mmPSOC_GLOBAL_CONF_SCRATCHPAD_0 +#define mmFUSE_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_22 +#define mmCPU_CMD_STATUS_TO_HOST mmPSOC_GLOBAL_CONF_SCRATCHPAD_23 +#define mmCPU_BOOT_ERR0 mmPSOC_GLOBAL_CONF_SCRATCHPAD_24 +#define mmCPU_BOOT_ERR1 mmPSOC_GLOBAL_CONF_SCRATCHPAD_25 +#define mmUPD_STS mmPSOC_GLOBAL_CONF_SCRATCHPAD_26 +#define mmUPD_CMD mmPSOC_GLOBAL_CONF_SCRATCHPAD_27 +#define mmPREBOOT_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_28 +#define mmUBOOT_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_29 +#define mmRDWR_TEST mmPSOC_GLOBAL_CONF_SCRATCHPAD_30 +#define mmBTL_ID mmPSOC_GLOBAL_CONF_SCRATCHPAD_31 +#define mmPREBOOT_PCIE_EN mmPSOC_GLOBAL_CONF_COLD_RST_FLOPS_1 +#define mmUPD_PENDING_STS mmPSOC_GLOBAL_CONF_COLD_RST_FLOPS_3 + +#endif /* GAUDI_REG_MAP_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h new file mode 100644 index 000000000..4e0dbbbbd --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h @@ -0,0 +1,190 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_CPU_CA53_CFG_MASKS_H_ +#define ASIC_REG_CPU_CA53_CFG_MASKS_H_ + +/* + ***************************************** + * CPU_CA53_CFG (Prototype: CA53_CFG) + ***************************************** + */ + +/* CPU_CA53_CFG_ARM_CFG */ +#define CPU_CA53_CFG_ARM_CFG_AA64NAA32_SHIFT 0 +#define CPU_CA53_CFG_ARM_CFG_AA64NAA32_MASK 0x3 +#define CPU_CA53_CFG_ARM_CFG_END_SHIFT 4 +#define CPU_CA53_CFG_ARM_CFG_END_MASK 0x30 +#define CPU_CA53_CFG_ARM_CFG_TE_SHIFT 8 +#define CPU_CA53_CFG_ARM_CFG_TE_MASK 0x300 +#define CPU_CA53_CFG_ARM_CFG_VINITHI_SHIFT 12 +#define CPU_CA53_CFG_ARM_CFG_VINITHI_MASK 0x3000 + +/* CPU_CA53_CFG_RST_ADDR_LSB */ +#define CPU_CA53_CFG_RST_ADDR_LSB_VECTOR_SHIFT 0 +#define CPU_CA53_CFG_RST_ADDR_LSB_VECTOR_MASK 0xFFFFFFFF + +/* CPU_CA53_CFG_RST_ADDR_MSB */ +#define CPU_CA53_CFG_RST_ADDR_MSB_VECTOR_SHIFT 0 +#define CPU_CA53_CFG_RST_ADDR_MSB_VECTOR_MASK 0xFF + +/* CPU_CA53_CFG_ARM_RST_CONTROL */ +#define CPU_CA53_CFG_ARM_RST_CONTROL_NCPUPORESET_SHIFT 0 +#define CPU_CA53_CFG_ARM_RST_CONTROL_NCPUPORESET_MASK 0x3 +#define CPU_CA53_CFG_ARM_RST_CONTROL_NCORERESET_SHIFT 4 +#define CPU_CA53_CFG_ARM_RST_CONTROL_NCORERESET_MASK 0x30 +#define CPU_CA53_CFG_ARM_RST_CONTROL_NL2RESET_SHIFT 8 +#define CPU_CA53_CFG_ARM_RST_CONTROL_NL2RESET_MASK 0x100 +#define CPU_CA53_CFG_ARM_RST_CONTROL_NPRESETDBG_SHIFT 12 +#define CPU_CA53_CFG_ARM_RST_CONTROL_NPRESETDBG_MASK 0x1000 +#define CPU_CA53_CFG_ARM_RST_CONTROL_NMBISTRESET_SHIFT 16 +#define CPU_CA53_CFG_ARM_RST_CONTROL_NMBISTRESET_MASK 0x10000 +#define CPU_CA53_CFG_ARM_RST_CONTROL_WARMRSTREQ_SHIFT 20 +#define CPU_CA53_CFG_ARM_RST_CONTROL_WARMRSTREQ_MASK 0x300000 + +/* CPU_CA53_CFG_ARM_AFFINITY */ +#define CPU_CA53_CFG_ARM_AFFINITY_LEVEL_1_SHIFT 0 +#define CPU_CA53_CFG_ARM_AFFINITY_LEVEL_1_MASK 0xFF +#define CPU_CA53_CFG_ARM_AFFINITY_LEVEL_2_SHIFT 8 +#define CPU_CA53_CFG_ARM_AFFINITY_LEVEL_2_MASK 0xFF00 + +/* CPU_CA53_CFG_ARM_DISABLE */ +#define CPU_CA53_CFG_ARM_DISABLE_CP15S_SHIFT 0 +#define CPU_CA53_CFG_ARM_DISABLE_CP15S_MASK 0x3 +#define CPU_CA53_CFG_ARM_DISABLE_CRYPTO_SHIFT 4 +#define CPU_CA53_CFG_ARM_DISABLE_CRYPTO_MASK 0x30 +#define CPU_CA53_CFG_ARM_DISABLE_L2_RST_SHIFT 8 +#define CPU_CA53_CFG_ARM_DISABLE_L2_RST_MASK 0x100 +#define CPU_CA53_CFG_ARM_DISABLE_DBG_L1_RST_SHIFT 9 +#define CPU_CA53_CFG_ARM_DISABLE_DBG_L1_RST_MASK 0x200 + +/* CPU_CA53_CFG_ARM_GIC_PERIPHBASE */ +#define CPU_CA53_CFG_ARM_GIC_PERIPHBASE_PERIPHBASE_SHIFT 0 +#define CPU_CA53_CFG_ARM_GIC_PERIPHBASE_PERIPHBASE_MASK 0x3FFFFF + +/* CPU_CA53_CFG_ARM_GIC_IRQ_CFG */ +#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NREI_SHIFT 0 +#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NREI_MASK 0x3 +#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NSEI_SHIFT 4 +#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NSEI_MASK 0x30 +#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NIRQ_SHIFT 8 +#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NIRQ_MASK 0x300 +#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NFIQ_SHIFT 12 +#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NFIQ_MASK 0x3000 +#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NVFIQ_SHIFT 16 +#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NVFIQ_MASK 0x30000 +#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NVIRQ_SHIFT 20 +#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NVIRQ_MASK 0x300000 +#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NVSEI_SHIFT 24 +#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NVSEI_MASK 0x3000000 +#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_GIC_EN_SHIFT 31 +#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_GIC_EN_MASK 0x80000000 + +/* CPU_CA53_CFG_ARM_PWR_MNG */ +#define CPU_CA53_CFG_ARM_PWR_MNG_CLREXMONREQ_SHIFT 0 +#define CPU_CA53_CFG_ARM_PWR_MNG_CLREXMONREQ_MASK 0x1 +#define CPU_CA53_CFG_ARM_PWR_MNG_EVENTI_SHIFT 1 +#define CPU_CA53_CFG_ARM_PWR_MNG_EVENTI_MASK 0x2 +#define CPU_CA53_CFG_ARM_PWR_MNG_L2FLUSHREQ_SHIFT 2 +#define CPU_CA53_CFG_ARM_PWR_MNG_L2FLUSHREQ_MASK 0x4 +#define CPU_CA53_CFG_ARM_PWR_MNG_L2QREQN_SHIFT 3 +#define CPU_CA53_CFG_ARM_PWR_MNG_L2QREQN_MASK 0x8 +#define CPU_CA53_CFG_ARM_PWR_MNG_CPUQREQN_SHIFT 4 +#define CPU_CA53_CFG_ARM_PWR_MNG_CPUQREQN_MASK 0x30 +#define CPU_CA53_CFG_ARM_PWR_MNG_NEONQREQN_SHIFT 8 +#define CPU_CA53_CFG_ARM_PWR_MNG_NEONQREQN_MASK 0x300 +#define CPU_CA53_CFG_ARM_PWR_MNG_DBGPWRDUP_SHIFT 12 +#define CPU_CA53_CFG_ARM_PWR_MNG_DBGPWRDUP_MASK 0x3000 + +/* CPU_CA53_CFG_ARB_DBG_ROM_ADDR */ +#define CPU_CA53_CFG_ARB_DBG_ROM_ADDR_DEBUG_ROM_BASE_ADDR_SHIFT 0 +#define CPU_CA53_CFG_ARB_DBG_ROM_ADDR_DEBUG_ROM_BASE_ADDR_MASK 0xFFFFFFF +#define CPU_CA53_CFG_ARB_DBG_ROM_ADDR_DEBUG_ROM_BASE_ADDR_VALID_SHIFT 31 +#define CPU_CA53_CFG_ARB_DBG_ROM_ADDR_DEBUG_ROM_BASE_ADDR_VALID_MASK 0x80000000 + +/* CPU_CA53_CFG_ARM_DBG_MODES */ +#define CPU_CA53_CFG_ARM_DBG_MODES_EDBGRQ_SHIFT 0 +#define CPU_CA53_CFG_ARM_DBG_MODES_EDBGRQ_MASK 0x3 +#define CPU_CA53_CFG_ARM_DBG_MODES_DBGEN_SHIFT 4 +#define CPU_CA53_CFG_ARM_DBG_MODES_DBGEN_MASK 0x30 +#define CPU_CA53_CFG_ARM_DBG_MODES_NIDEN_SHIFT 8 +#define CPU_CA53_CFG_ARM_DBG_MODES_NIDEN_MASK 0x300 +#define CPU_CA53_CFG_ARM_DBG_MODES_SPIDEN_SHIFT 12 +#define CPU_CA53_CFG_ARM_DBG_MODES_SPIDEN_MASK 0x3000 +#define CPU_CA53_CFG_ARM_DBG_MODES_SPNIDEN_SHIFT 16 +#define CPU_CA53_CFG_ARM_DBG_MODES_SPNIDEN_MASK 0x30000 + +/* CPU_CA53_CFG_ARM_PWR_STAT_0 */ +#define CPU_CA53_CFG_ARM_PWR_STAT_0_CLREXMONACK_SHIFT 0 +#define CPU_CA53_CFG_ARM_PWR_STAT_0_CLREXMONACK_MASK 0x1 +#define CPU_CA53_CFG_ARM_PWR_STAT_0_EVENTO_SHIFT 1 +#define CPU_CA53_CFG_ARM_PWR_STAT_0_EVENTO_MASK 0x2 +#define CPU_CA53_CFG_ARM_PWR_STAT_0_STANDBYWFI_SHIFT 4 +#define CPU_CA53_CFG_ARM_PWR_STAT_0_STANDBYWFI_MASK 0x30 +#define CPU_CA53_CFG_ARM_PWR_STAT_0_STANDBYWFE_SHIFT 8 +#define CPU_CA53_CFG_ARM_PWR_STAT_0_STANDBYWFE_MASK 0x300 +#define CPU_CA53_CFG_ARM_PWR_STAT_0_STANDBYWFIL2_SHIFT 12 +#define CPU_CA53_CFG_ARM_PWR_STAT_0_STANDBYWFIL2_MASK 0x1000 +#define CPU_CA53_CFG_ARM_PWR_STAT_0_L2FLUSHDONE_SHIFT 13 +#define CPU_CA53_CFG_ARM_PWR_STAT_0_L2FLUSHDONE_MASK 0x2000 +#define CPU_CA53_CFG_ARM_PWR_STAT_0_SMPEN_SHIFT 16 +#define CPU_CA53_CFG_ARM_PWR_STAT_0_SMPEN_MASK 0x30000 + +/* CPU_CA53_CFG_ARM_PWR_STAT_1 */ +#define CPU_CA53_CFG_ARM_PWR_STAT_1_CPUQACTIVE_SHIFT 0 +#define CPU_CA53_CFG_ARM_PWR_STAT_1_CPUQACTIVE_MASK 0x3 +#define CPU_CA53_CFG_ARM_PWR_STAT_1_CPUQDENY_SHIFT 4 +#define CPU_CA53_CFG_ARM_PWR_STAT_1_CPUQDENY_MASK 0x30 +#define CPU_CA53_CFG_ARM_PWR_STAT_1_CPUQACCEPTN_SHIFT 8 +#define CPU_CA53_CFG_ARM_PWR_STAT_1_CPUQACCEPTN_MASK 0x300 +#define CPU_CA53_CFG_ARM_PWR_STAT_1_NEONQACTIVE_SHIFT 12 +#define CPU_CA53_CFG_ARM_PWR_STAT_1_NEONQACTIVE_MASK 0x3000 +#define CPU_CA53_CFG_ARM_PWR_STAT_1_NEONQDENY_SHIFT 16 +#define CPU_CA53_CFG_ARM_PWR_STAT_1_NEONQDENY_MASK 0x30000 +#define CPU_CA53_CFG_ARM_PWR_STAT_1_NEONQACCEPTN_SHIFT 20 +#define CPU_CA53_CFG_ARM_PWR_STAT_1_NEONQACCEPTN_MASK 0x300000 +#define CPU_CA53_CFG_ARM_PWR_STAT_1_L2QACTIVE_SHIFT 24 +#define CPU_CA53_CFG_ARM_PWR_STAT_1_L2QACTIVE_MASK 0x1000000 +#define CPU_CA53_CFG_ARM_PWR_STAT_1_L2QDENY_SHIFT 25 +#define CPU_CA53_CFG_ARM_PWR_STAT_1_L2QDENY_MASK 0x2000000 +#define CPU_CA53_CFG_ARM_PWR_STAT_1_L2QACCEPTN_SHIFT 26 +#define CPU_CA53_CFG_ARM_PWR_STAT_1_L2QACCEPTN_MASK 0x4000000 + +/* CPU_CA53_CFG_ARM_DBG_STATUS */ +#define CPU_CA53_CFG_ARM_DBG_STATUS_DBGACK_SHIFT 0 +#define CPU_CA53_CFG_ARM_DBG_STATUS_DBGACK_MASK 0x3 +#define CPU_CA53_CFG_ARM_DBG_STATUS_COMMRX_SHIFT 4 +#define CPU_CA53_CFG_ARM_DBG_STATUS_COMMRX_MASK 0x30 +#define CPU_CA53_CFG_ARM_DBG_STATUS_COMMTX_SHIFT 8 +#define CPU_CA53_CFG_ARM_DBG_STATUS_COMMTX_MASK 0x300 +#define CPU_CA53_CFG_ARM_DBG_STATUS_DBGRSTREQ_SHIFT 12 +#define CPU_CA53_CFG_ARM_DBG_STATUS_DBGRSTREQ_MASK 0x3000 +#define CPU_CA53_CFG_ARM_DBG_STATUS_DBGNOPWRDWN_SHIFT 16 +#define CPU_CA53_CFG_ARM_DBG_STATUS_DBGNOPWRDWN_MASK 0x30000 +#define CPU_CA53_CFG_ARM_DBG_STATUS_DBGPWRUPREQ_SHIFT 20 +#define CPU_CA53_CFG_ARM_DBG_STATUS_DBGPWRUPREQ_MASK 0x300000 + +/* CPU_CA53_CFG_ARM_MEM_ATTR */ +#define CPU_CA53_CFG_ARM_MEM_ATTR_RDMEMATTR_SHIFT 0 +#define CPU_CA53_CFG_ARM_MEM_ATTR_RDMEMATTR_MASK 0xFF +#define CPU_CA53_CFG_ARM_MEM_ATTR_WRMEMATTR_SHIFT 8 +#define CPU_CA53_CFG_ARM_MEM_ATTR_WRMEMATTR_MASK 0xFF00 +#define CPU_CA53_CFG_ARM_MEM_ATTR_RACKM_SHIFT 16 +#define CPU_CA53_CFG_ARM_MEM_ATTR_RACKM_MASK 0x10000 +#define CPU_CA53_CFG_ARM_MEM_ATTR_WACKM_SHIFT 20 +#define CPU_CA53_CFG_ARM_MEM_ATTR_WACKM_MASK 0x100000 + +/* CPU_CA53_CFG_ARM_PMU */ +#define CPU_CA53_CFG_ARM_PMU_EVENT_SHIFT 0 +#define CPU_CA53_CFG_ARM_PMU_EVENT_MASK 0x3FFFFFFF + +#endif /* ASIC_REG_CPU_CA53_CFG_MASKS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h new file mode 100644 index 000000000..f3faf1aad --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_CPU_CA53_CFG_REGS_H_ +#define ASIC_REG_CPU_CA53_CFG_REGS_H_ + +/* + ***************************************** + * CPU_CA53_CFG (Prototype: CA53_CFG) + ***************************************** + */ + +#define mmCPU_CA53_CFG_ARM_CFG 0x441100 + +#define mmCPU_CA53_CFG_RST_ADDR_LSB_0 0x441104 + +#define mmCPU_CA53_CFG_RST_ADDR_LSB_1 0x441108 + +#define mmCPU_CA53_CFG_RST_ADDR_MSB_0 0x441114 + +#define mmCPU_CA53_CFG_RST_ADDR_MSB_1 0x441118 + +#define mmCPU_CA53_CFG_ARM_RST_CONTROL 0x441124 + +#define mmCPU_CA53_CFG_ARM_AFFINITY 0x441128 + +#define mmCPU_CA53_CFG_ARM_DISABLE 0x44112C + +#define mmCPU_CA53_CFG_ARM_GIC_PERIPHBASE 0x441130 + +#define mmCPU_CA53_CFG_ARM_GIC_IRQ_CFG 0x441134 + +#define mmCPU_CA53_CFG_ARM_PWR_MNG 0x441138 + +#define mmCPU_CA53_CFG_ARB_DBG_ROM_ADDR 0x44113C + +#define mmCPU_CA53_CFG_ARM_DBG_MODES 0x441140 + +#define mmCPU_CA53_CFG_ARM_PWR_STAT_0 0x441200 + +#define mmCPU_CA53_CFG_ARM_PWR_STAT_1 0x441204 + +#define mmCPU_CA53_CFG_ARM_DBG_STATUS 0x441208 + +#define mmCPU_CA53_CFG_ARM_MEM_ATTR 0x44120C + +#define mmCPU_CA53_CFG_ARM_PMU_0 0x441210 + +#define mmCPU_CA53_CFG_ARM_PMU_1 0x441214 + +#endif /* ASIC_REG_CPU_CA53_CFG_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_if_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_if_regs.h new file mode 100644 index 000000000..cf6579189 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_if_regs.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_CPU_IF_REGS_H_ +#define ASIC_REG_CPU_IF_REGS_H_ + +/* + ***************************************** + * CPU_IF (Prototype: CPU_IF) + ***************************************** + */ + +#define mmCPU_IF_PF_PQ_PI 0x442100 + +#define mmCPU_IF_ARUSER_OVR 0x442104 + +#define mmCPU_IF_ARUSER_OVR_EN 0x442108 + +#define mmCPU_IF_AWUSER_OVR 0x44210C + +#define mmCPU_IF_AWUSER_OVR_EN 0x442110 + +#define mmCPU_IF_AXCACHE_OVR 0x442114 + +#define mmCPU_IF_LOCK_OVR 0x442118 + +#define mmCPU_IF_PROT_OVR 0x44211C + +#define mmCPU_IF_MAX_OUTSTANDING 0x442120 + +#define mmCPU_IF_EARLY_BRESP_EN 0x442124 + +#define mmCPU_IF_FORCE_RSP_OK 0x442128 + +#define mmCPU_IF_CPU_MSB_ADDR 0x44212C + +#define mmCPU_IF_AXI_SPLIT_INTR 0x442130 + +#endif /* ASIC_REG_CPU_IF_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_pll_regs.h new file mode 100644 index 000000000..8c8f9726d --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_pll_regs.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_CPU_PLL_REGS_H_ +#define ASIC_REG_CPU_PLL_REGS_H_ + +/* + ***************************************** + * CPU_PLL (Prototype: PLL) + ***************************************** + */ + +#define mmCPU_PLL_NR 0x4A2100 + +#define mmCPU_PLL_NF 0x4A2104 + +#define mmCPU_PLL_OD 0x4A2108 + +#define mmCPU_PLL_NB 0x4A210C + +#define mmCPU_PLL_CFG 0x4A2110 + +#define mmCPU_PLL_LOSE_MASK 0x4A2120 + +#define mmCPU_PLL_LOCK_INTR 0x4A2128 + +#define mmCPU_PLL_LOCK_BYPASS 0x4A212C + +#define mmCPU_PLL_DATA_CHNG 0x4A2130 + +#define mmCPU_PLL_RST 0x4A2134 + +#define mmCPU_PLL_SLIP_WD_CNTR 0x4A2150 + +#define mmCPU_PLL_DIV_FACTOR_0 0x4A2200 + +#define mmCPU_PLL_DIV_FACTOR_1 0x4A2204 + +#define mmCPU_PLL_DIV_FACTOR_2 0x4A2208 + +#define mmCPU_PLL_DIV_FACTOR_3 0x4A220C + +#define mmCPU_PLL_DIV_FACTOR_CMD_0 0x4A2220 + +#define mmCPU_PLL_DIV_FACTOR_CMD_1 0x4A2224 + +#define mmCPU_PLL_DIV_FACTOR_CMD_2 0x4A2228 + +#define mmCPU_PLL_DIV_FACTOR_CMD_3 0x4A222C + +#define mmCPU_PLL_DIV_SEL_0 0x4A2280 + +#define mmCPU_PLL_DIV_SEL_1 0x4A2284 + +#define mmCPU_PLL_DIV_SEL_2 0x4A2288 + +#define mmCPU_PLL_DIV_SEL_3 0x4A228C + +#define mmCPU_PLL_DIV_EN_0 0x4A22A0 + +#define mmCPU_PLL_DIV_EN_1 0x4A22A4 + +#define mmCPU_PLL_DIV_EN_2 0x4A22A8 + +#define mmCPU_PLL_DIV_EN_3 0x4A22AC + +#define mmCPU_PLL_DIV_FACTOR_BUSY_0 0x4A22C0 + +#define mmCPU_PLL_DIV_FACTOR_BUSY_1 0x4A22C4 + +#define mmCPU_PLL_DIV_FACTOR_BUSY_2 0x4A22C8 + +#define mmCPU_PLL_DIV_FACTOR_BUSY_3 0x4A22CC + +#define mmCPU_PLL_CLK_GATER 0x4A2300 + +#define mmCPU_PLL_CLK_RLX_0 0x4A2310 + +#define mmCPU_PLL_CLK_RLX_1 0x4A2314 + +#define mmCPU_PLL_CLK_RLX_2 0x4A2318 + +#define mmCPU_PLL_CLK_RLX_3 0x4A231C + +#define mmCPU_PLL_REF_CNTR_PERIOD 0x4A2400 + +#define mmCPU_PLL_REF_LOW_THRESHOLD 0x4A2410 + +#define mmCPU_PLL_REF_HIGH_THRESHOLD 0x4A2420 + +#define mmCPU_PLL_PLL_NOT_STABLE 0x4A2430 + +#define mmCPU_PLL_FREQ_CALC_EN 0x4A2440 + +#endif /* ASIC_REG_CPU_PLL_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_masks.h new file mode 100644 index 000000000..028143408 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_masks.h @@ -0,0 +1,418 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA_CH_0_MASKS_H_ +#define ASIC_REG_DMA_CH_0_MASKS_H_ + +/* + ***************************************** + * DMA_CH_0 (Prototype: DMA_CH) + ***************************************** + */ + +/* DMA_CH_0_CFG0 */ +#define DMA_CH_0_CFG0_RD_MAX_OUTSTAND_SHIFT 0 +#define DMA_CH_0_CFG0_RD_MAX_OUTSTAND_MASK 0x3FF +#define DMA_CH_0_CFG0_WR_MAX_OUTSTAND_SHIFT 16 +#define DMA_CH_0_CFG0_WR_MAX_OUTSTAND_MASK 0xFFF0000 + +/* DMA_CH_0_CFG1 */ +#define DMA_CH_0_CFG1_RD_BUF_MAX_SIZE_SHIFT 0 +#define DMA_CH_0_CFG1_RD_BUF_MAX_SIZE_MASK 0x3FF + +/* DMA_CH_0_ERRMSG_ADDR_LO */ +#define DMA_CH_0_ERRMSG_ADDR_LO_VAL_SHIFT 0 +#define DMA_CH_0_ERRMSG_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_ERRMSG_ADDR_HI */ +#define DMA_CH_0_ERRMSG_ADDR_HI_VAL_SHIFT 0 +#define DMA_CH_0_ERRMSG_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_ERRMSG_WDATA */ +#define DMA_CH_0_ERRMSG_WDATA_VAL_SHIFT 0 +#define DMA_CH_0_ERRMSG_WDATA_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_RD_COMP_ADDR_LO */ +#define DMA_CH_0_RD_COMP_ADDR_LO_VAL_SHIFT 0 +#define DMA_CH_0_RD_COMP_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_RD_COMP_ADDR_HI */ +#define DMA_CH_0_RD_COMP_ADDR_HI_VAL_SHIFT 0 +#define DMA_CH_0_RD_COMP_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_RD_COMP_WDATA */ +#define DMA_CH_0_RD_COMP_WDATA_VAL_SHIFT 0 +#define DMA_CH_0_RD_COMP_WDATA_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_WR_COMP_ADDR_LO */ +#define DMA_CH_0_WR_COMP_ADDR_LO_VAL_SHIFT 0 +#define DMA_CH_0_WR_COMP_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_WR_COMP_ADDR_HI */ +#define DMA_CH_0_WR_COMP_ADDR_HI_VAL_SHIFT 0 +#define DMA_CH_0_WR_COMP_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_WR_COMP_WDATA */ +#define DMA_CH_0_WR_COMP_WDATA_VAL_SHIFT 0 +#define DMA_CH_0_WR_COMP_WDATA_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_LDMA_SRC_ADDR_LO */ +#define DMA_CH_0_LDMA_SRC_ADDR_LO_VAL_SHIFT 0 +#define DMA_CH_0_LDMA_SRC_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_LDMA_SRC_ADDR_HI */ +#define DMA_CH_0_LDMA_SRC_ADDR_HI_VAL_SHIFT 0 +#define DMA_CH_0_LDMA_SRC_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_LDMA_DST_ADDR_LO */ +#define DMA_CH_0_LDMA_DST_ADDR_LO_VAL_SHIFT 0 +#define DMA_CH_0_LDMA_DST_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_LDMA_DST_ADDR_HI */ +#define DMA_CH_0_LDMA_DST_ADDR_HI_VAL_SHIFT 0 +#define DMA_CH_0_LDMA_DST_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_LDMA_TSIZE */ +#define DMA_CH_0_LDMA_TSIZE_VAL_SHIFT 0 +#define DMA_CH_0_LDMA_TSIZE_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_COMIT_TRANSFER */ +#define DMA_CH_0_COMIT_TRANSFER_PCI_UPS_WKORDR_SHIFT 0 +#define DMA_CH_0_COMIT_TRANSFER_PCI_UPS_WKORDR_MASK 0x1 +#define DMA_CH_0_COMIT_TRANSFER_RD_COMP_EN_SHIFT 1 +#define DMA_CH_0_COMIT_TRANSFER_RD_COMP_EN_MASK 0x2 +#define DMA_CH_0_COMIT_TRANSFER_WR_COMP_EN_SHIFT 2 +#define DMA_CH_0_COMIT_TRANSFER_WR_COMP_EN_MASK 0x4 +#define DMA_CH_0_COMIT_TRANSFER_NOSNOOP_SHIFT 3 +#define DMA_CH_0_COMIT_TRANSFER_NOSNOOP_MASK 0x8 +#define DMA_CH_0_COMIT_TRANSFER_SRC_ADDR_INC_DIS_SHIFT 4 +#define DMA_CH_0_COMIT_TRANSFER_SRC_ADDR_INC_DIS_MASK 0x10 +#define DMA_CH_0_COMIT_TRANSFER_DST_ADDR_INC_DIS_SHIFT 5 +#define DMA_CH_0_COMIT_TRANSFER_DST_ADDR_INC_DIS_MASK 0x20 +#define DMA_CH_0_COMIT_TRANSFER_MEM_SET_SHIFT 6 +#define DMA_CH_0_COMIT_TRANSFER_MEM_SET_MASK 0x40 +#define DMA_CH_0_COMIT_TRANSFER_MOD_TENSOR_SHIFT 15 +#define DMA_CH_0_COMIT_TRANSFER_MOD_TENSOR_MASK 0x8000 +#define DMA_CH_0_COMIT_TRANSFER_CTL_SHIFT 16 +#define DMA_CH_0_COMIT_TRANSFER_CTL_MASK 0xFFFF0000 + +/* DMA_CH_0_STS0 */ +#define DMA_CH_0_STS0_DMA_BUSY_SHIFT 0 +#define DMA_CH_0_STS0_DMA_BUSY_MASK 0x1 +#define DMA_CH_0_STS0_RD_STS_CTX_FULL_SHIFT 1 +#define DMA_CH_0_STS0_RD_STS_CTX_FULL_MASK 0x2 +#define DMA_CH_0_STS0_WR_STS_CTX_FULL_SHIFT 2 +#define DMA_CH_0_STS0_WR_STS_CTX_FULL_MASK 0x4 + +/* DMA_CH_0_STS1 */ +#define DMA_CH_0_STS1_RD_STS_CTX_CNT_SHIFT 0 +#define DMA_CH_0_STS1_RD_STS_CTX_CNT_MASK 0xFFFFFFFF + +/* DMA_CH_0_STS2 */ +#define DMA_CH_0_STS2_WR_STS_CTX_CNT_SHIFT 0 +#define DMA_CH_0_STS2_WR_STS_CTX_CNT_MASK 0xFFFFFFFF + +/* DMA_CH_0_STS3 */ +#define DMA_CH_0_STS3_RD_STS_TRN_CNT_SHIFT 0 +#define DMA_CH_0_STS3_RD_STS_TRN_CNT_MASK 0xFFFFFFFF + +/* DMA_CH_0_STS4 */ +#define DMA_CH_0_STS4_WR_STS_TRN_CNT_SHIFT 0 +#define DMA_CH_0_STS4_WR_STS_TRN_CNT_MASK 0xFFFFFFFF + +/* DMA_CH_0_SRC_ADDR_LO_STS */ +#define DMA_CH_0_SRC_ADDR_LO_STS_VAL_SHIFT 0 +#define DMA_CH_0_SRC_ADDR_LO_STS_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_SRC_ADDR_HI_STS */ +#define DMA_CH_0_SRC_ADDR_HI_STS_VAL_SHIFT 0 +#define DMA_CH_0_SRC_ADDR_HI_STS_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_SRC_TSIZE_STS */ +#define DMA_CH_0_SRC_TSIZE_STS_VAL_SHIFT 0 +#define DMA_CH_0_SRC_TSIZE_STS_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_DST_ADDR_LO_STS */ +#define DMA_CH_0_DST_ADDR_LO_STS_VAL_SHIFT 0 +#define DMA_CH_0_DST_ADDR_LO_STS_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_DST_ADDR_HI_STS */ +#define DMA_CH_0_DST_ADDR_HI_STS_VAL_SHIFT 0 +#define DMA_CH_0_DST_ADDR_HI_STS_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_DST_TSIZE_STS */ +#define DMA_CH_0_DST_TSIZE_STS_VAL_SHIFT 0 +#define DMA_CH_0_DST_TSIZE_STS_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_RD_RATE_LIM_EN */ +#define DMA_CH_0_RD_RATE_LIM_EN_VAL_SHIFT 0 +#define DMA_CH_0_RD_RATE_LIM_EN_VAL_MASK 0x1 + +/* DMA_CH_0_RD_RATE_LIM_RST_TOKEN */ +#define DMA_CH_0_RD_RATE_LIM_RST_TOKEN_VAL_SHIFT 0 +#define DMA_CH_0_RD_RATE_LIM_RST_TOKEN_VAL_MASK 0xFFFF + +/* DMA_CH_0_RD_RATE_LIM_SAT */ +#define DMA_CH_0_RD_RATE_LIM_SAT_VAL_SHIFT 0 +#define DMA_CH_0_RD_RATE_LIM_SAT_VAL_MASK 0xFFFF + +/* DMA_CH_0_RD_RATE_LIM_TOUT */ +#define DMA_CH_0_RD_RATE_LIM_TOUT_VAL_SHIFT 0 +#define DMA_CH_0_RD_RATE_LIM_TOUT_VAL_MASK 0x7FFFFFFF + +/* DMA_CH_0_WR_RATE_LIM_EN */ +#define DMA_CH_0_WR_RATE_LIM_EN_VAL_SHIFT 0 +#define DMA_CH_0_WR_RATE_LIM_EN_VAL_MASK 0x1 + +/* DMA_CH_0_WR_RATE_LIM_RST_TOKEN */ +#define DMA_CH_0_WR_RATE_LIM_RST_TOKEN_VAL_SHIFT 0 +#define DMA_CH_0_WR_RATE_LIM_RST_TOKEN_VAL_MASK 0xFFFF + +/* DMA_CH_0_WR_RATE_LIM_SAT */ +#define DMA_CH_0_WR_RATE_LIM_SAT_VAL_SHIFT 0 +#define DMA_CH_0_WR_RATE_LIM_SAT_VAL_MASK 0xFFFF + +/* DMA_CH_0_WR_RATE_LIM_TOUT */ +#define DMA_CH_0_WR_RATE_LIM_TOUT_VAL_SHIFT 0 +#define DMA_CH_0_WR_RATE_LIM_TOUT_VAL_MASK 0x7FFFFFFF + +/* DMA_CH_0_CFG2 */ +#define DMA_CH_0_CFG2_FORCE_WORD_SHIFT 0 +#define DMA_CH_0_CFG2_FORCE_WORD_MASK 0x1 + +/* DMA_CH_0_TDMA_CTL */ +#define DMA_CH_0_TDMA_CTL_DTYPE_SHIFT 0 +#define DMA_CH_0_TDMA_CTL_DTYPE_MASK 0x7 + +/* DMA_CH_0_TDMA_SRC_BASE_ADDR_LO */ +#define DMA_CH_0_TDMA_SRC_BASE_ADDR_LO_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_SRC_BASE_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_SRC_BASE_ADDR_HI */ +#define DMA_CH_0_TDMA_SRC_BASE_ADDR_HI_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_SRC_BASE_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_SRC_ROI_BASE_0 */ +#define DMA_CH_0_TDMA_SRC_ROI_BASE_0_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_SRC_ROI_BASE_0_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_SRC_ROI_SIZE_0 */ +#define DMA_CH_0_TDMA_SRC_ROI_SIZE_0_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_SRC_ROI_SIZE_0_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_SRC_VALID_ELEMENTS_0 */ +#define DMA_CH_0_TDMA_SRC_VALID_ELEMENTS_0_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_SRC_VALID_ELEMENTS_0_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_SRC_START_OFFSET_0 */ +#define DMA_CH_0_TDMA_SRC_START_OFFSET_0_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_SRC_START_OFFSET_0_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_SRC_STRIDE_0 */ +#define DMA_CH_0_TDMA_SRC_STRIDE_0_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_SRC_STRIDE_0_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_SRC_ROI_BASE_1 */ +#define DMA_CH_0_TDMA_SRC_ROI_BASE_1_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_SRC_ROI_BASE_1_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_SRC_ROI_SIZE_1 */ +#define DMA_CH_0_TDMA_SRC_ROI_SIZE_1_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_SRC_ROI_SIZE_1_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_SRC_VALID_ELEMENTS_1 */ +#define DMA_CH_0_TDMA_SRC_VALID_ELEMENTS_1_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_SRC_VALID_ELEMENTS_1_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_SRC_START_OFFSET_1 */ +#define DMA_CH_0_TDMA_SRC_START_OFFSET_1_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_SRC_START_OFFSET_1_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_SRC_STRIDE_1 */ +#define DMA_CH_0_TDMA_SRC_STRIDE_1_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_SRC_STRIDE_1_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_SRC_ROI_BASE_2 */ +#define DMA_CH_0_TDMA_SRC_ROI_BASE_2_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_SRC_ROI_BASE_2_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_SRC_ROI_SIZE_2 */ +#define DMA_CH_0_TDMA_SRC_ROI_SIZE_2_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_SRC_ROI_SIZE_2_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_SRC_VALID_ELEMENTS_2 */ +#define DMA_CH_0_TDMA_SRC_VALID_ELEMENTS_2_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_SRC_VALID_ELEMENTS_2_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_SRC_START_OFFSET_2 */ +#define DMA_CH_0_TDMA_SRC_START_OFFSET_2_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_SRC_START_OFFSET_2_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_SRC_STRIDE_2 */ +#define DMA_CH_0_TDMA_SRC_STRIDE_2_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_SRC_STRIDE_2_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_SRC_ROI_BASE_3 */ +#define DMA_CH_0_TDMA_SRC_ROI_BASE_3_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_SRC_ROI_BASE_3_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_SRC_ROI_SIZE_3 */ +#define DMA_CH_0_TDMA_SRC_ROI_SIZE_3_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_SRC_ROI_SIZE_3_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_SRC_VALID_ELEMENTS_3 */ +#define DMA_CH_0_TDMA_SRC_VALID_ELEMENTS_3_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_SRC_VALID_ELEMENTS_3_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_SRC_START_OFFSET_3 */ +#define DMA_CH_0_TDMA_SRC_START_OFFSET_3_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_SRC_START_OFFSET_3_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_SRC_STRIDE_3 */ +#define DMA_CH_0_TDMA_SRC_STRIDE_3_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_SRC_STRIDE_3_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_SRC_ROI_BASE_4 */ +#define DMA_CH_0_TDMA_SRC_ROI_BASE_4_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_SRC_ROI_BASE_4_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_SRC_ROI_SIZE_4 */ +#define DMA_CH_0_TDMA_SRC_ROI_SIZE_4_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_SRC_ROI_SIZE_4_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_SRC_VALID_ELEMENTS_4 */ +#define DMA_CH_0_TDMA_SRC_VALID_ELEMENTS_4_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_SRC_VALID_ELEMENTS_4_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_SRC_START_OFFSET_4 */ +#define DMA_CH_0_TDMA_SRC_START_OFFSET_4_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_SRC_START_OFFSET_4_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_SRC_STRIDE_4 */ +#define DMA_CH_0_TDMA_SRC_STRIDE_4_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_SRC_STRIDE_4_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_DST_BASE_ADDR_LO */ +#define DMA_CH_0_TDMA_DST_BASE_ADDR_LO_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_DST_BASE_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_DST_BASE_ADDR_HI */ +#define DMA_CH_0_TDMA_DST_BASE_ADDR_HI_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_DST_BASE_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_DST_ROI_BASE_0 */ +#define DMA_CH_0_TDMA_DST_ROI_BASE_0_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_DST_ROI_BASE_0_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_DST_ROI_SIZE_0 */ +#define DMA_CH_0_TDMA_DST_ROI_SIZE_0_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_DST_ROI_SIZE_0_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_DST_VALID_ELEMENTS_0 */ +#define DMA_CH_0_TDMA_DST_VALID_ELEMENTS_0_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_DST_VALID_ELEMENTS_0_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_DST_START_OFFSET_0 */ +#define DMA_CH_0_TDMA_DST_START_OFFSET_0_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_DST_START_OFFSET_0_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_DST_STRIDE_0 */ +#define DMA_CH_0_TDMA_DST_STRIDE_0_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_DST_STRIDE_0_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_DST_ROI_BASE_1 */ +#define DMA_CH_0_TDMA_DST_ROI_BASE_1_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_DST_ROI_BASE_1_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_DST_ROI_SIZE_1 */ +#define DMA_CH_0_TDMA_DST_ROI_SIZE_1_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_DST_ROI_SIZE_1_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_DST_VALID_ELEMENTS_1 */ +#define DMA_CH_0_TDMA_DST_VALID_ELEMENTS_1_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_DST_VALID_ELEMENTS_1_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_DST_START_OFFSET_1 */ +#define DMA_CH_0_TDMA_DST_START_OFFSET_1_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_DST_START_OFFSET_1_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_DST_STRIDE_1 */ +#define DMA_CH_0_TDMA_DST_STRIDE_1_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_DST_STRIDE_1_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_DST_ROI_BASE_2 */ +#define DMA_CH_0_TDMA_DST_ROI_BASE_2_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_DST_ROI_BASE_2_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_DST_ROI_SIZE_2 */ +#define DMA_CH_0_TDMA_DST_ROI_SIZE_2_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_DST_ROI_SIZE_2_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_DST_VALID_ELEMENTS_2 */ +#define DMA_CH_0_TDMA_DST_VALID_ELEMENTS_2_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_DST_VALID_ELEMENTS_2_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_DST_START_OFFSET_2 */ +#define DMA_CH_0_TDMA_DST_START_OFFSET_2_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_DST_START_OFFSET_2_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_DST_STRIDE_2 */ +#define DMA_CH_0_TDMA_DST_STRIDE_2_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_DST_STRIDE_2_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_DST_ROI_BASE_3 */ +#define DMA_CH_0_TDMA_DST_ROI_BASE_3_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_DST_ROI_BASE_3_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_DST_ROI_SIZE_3 */ +#define DMA_CH_0_TDMA_DST_ROI_SIZE_3_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_DST_ROI_SIZE_3_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_DST_VALID_ELEMENTS_3 */ +#define DMA_CH_0_TDMA_DST_VALID_ELEMENTS_3_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_DST_VALID_ELEMENTS_3_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_DST_START_OFFSET_3 */ +#define DMA_CH_0_TDMA_DST_START_OFFSET_3_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_DST_START_OFFSET_3_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_DST_STRIDE_3 */ +#define DMA_CH_0_TDMA_DST_STRIDE_3_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_DST_STRIDE_3_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_DST_ROI_BASE_4 */ +#define DMA_CH_0_TDMA_DST_ROI_BASE_4_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_DST_ROI_BASE_4_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_DST_ROI_SIZE_4 */ +#define DMA_CH_0_TDMA_DST_ROI_SIZE_4_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_DST_ROI_SIZE_4_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_DST_VALID_ELEMENTS_4 */ +#define DMA_CH_0_TDMA_DST_VALID_ELEMENTS_4_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_DST_VALID_ELEMENTS_4_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_DST_START_OFFSET_4 */ +#define DMA_CH_0_TDMA_DST_START_OFFSET_4_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_DST_START_OFFSET_4_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_TDMA_DST_STRIDE_4 */ +#define DMA_CH_0_TDMA_DST_STRIDE_4_VAL_SHIFT 0 +#define DMA_CH_0_TDMA_DST_STRIDE_4_VAL_MASK 0xFFFFFFFF + +/* DMA_CH_0_MEM_INIT_BUSY */ +#define DMA_CH_0_MEM_INIT_BUSY_SBC_DATA_SHIFT 0 +#define DMA_CH_0_MEM_INIT_BUSY_SBC_DATA_MASK 0xFF +#define DMA_CH_0_MEM_INIT_BUSY_SBC_MD_SHIFT 8 +#define DMA_CH_0_MEM_INIT_BUSY_SBC_MD_MASK 0x100 + +#endif /* ASIC_REG_DMA_CH_0_MASKS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h new file mode 100644 index 000000000..0b246fe6a --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h @@ -0,0 +1,208 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA_CH_0_REGS_H_ +#define ASIC_REG_DMA_CH_0_REGS_H_ + +/* + ***************************************** + * DMA_CH_0 (Prototype: DMA_CH) + ***************************************** + */ + +#define mmDMA_CH_0_CFG0 0x401000 + +#define mmDMA_CH_0_CFG1 0x401004 + +#define mmDMA_CH_0_ERRMSG_ADDR_LO 0x401008 + +#define mmDMA_CH_0_ERRMSG_ADDR_HI 0x40100C + +#define mmDMA_CH_0_ERRMSG_WDATA 0x401010 + +#define mmDMA_CH_0_RD_COMP_ADDR_LO 0x401014 + +#define mmDMA_CH_0_RD_COMP_ADDR_HI 0x401018 + +#define mmDMA_CH_0_RD_COMP_WDATA 0x40101C + +#define mmDMA_CH_0_WR_COMP_ADDR_LO 0x401020 + +#define mmDMA_CH_0_WR_COMP_ADDR_HI 0x401024 + +#define mmDMA_CH_0_WR_COMP_WDATA 0x401028 + +#define mmDMA_CH_0_LDMA_SRC_ADDR_LO 0x40102C + +#define mmDMA_CH_0_LDMA_SRC_ADDR_HI 0x401030 + +#define mmDMA_CH_0_LDMA_DST_ADDR_LO 0x401034 + +#define mmDMA_CH_0_LDMA_DST_ADDR_HI 0x401038 + +#define mmDMA_CH_0_LDMA_TSIZE 0x40103C + +#define mmDMA_CH_0_COMIT_TRANSFER 0x401040 + +#define mmDMA_CH_0_STS0 0x401044 + +#define mmDMA_CH_0_STS1 0x401048 + +#define mmDMA_CH_0_STS2 0x40104C + +#define mmDMA_CH_0_STS3 0x401050 + +#define mmDMA_CH_0_STS4 0x401054 + +#define mmDMA_CH_0_SRC_ADDR_LO_STS 0x401058 + +#define mmDMA_CH_0_SRC_ADDR_HI_STS 0x40105C + +#define mmDMA_CH_0_SRC_TSIZE_STS 0x401060 + +#define mmDMA_CH_0_DST_ADDR_LO_STS 0x401064 + +#define mmDMA_CH_0_DST_ADDR_HI_STS 0x401068 + +#define mmDMA_CH_0_DST_TSIZE_STS 0x40106C + +#define mmDMA_CH_0_RD_RATE_LIM_EN 0x401070 + +#define mmDMA_CH_0_RD_RATE_LIM_RST_TOKEN 0x401074 + +#define mmDMA_CH_0_RD_RATE_LIM_SAT 0x401078 + +#define mmDMA_CH_0_RD_RATE_LIM_TOUT 0x40107C + +#define mmDMA_CH_0_WR_RATE_LIM_EN 0x401080 + +#define mmDMA_CH_0_WR_RATE_LIM_RST_TOKEN 0x401084 + +#define mmDMA_CH_0_WR_RATE_LIM_SAT 0x401088 + +#define mmDMA_CH_0_WR_RATE_LIM_TOUT 0x40108C + +#define mmDMA_CH_0_CFG2 0x401090 + +#define mmDMA_CH_0_TDMA_CTL 0x401100 + +#define mmDMA_CH_0_TDMA_SRC_BASE_ADDR_LO 0x401104 + +#define mmDMA_CH_0_TDMA_SRC_BASE_ADDR_HI 0x401108 + +#define mmDMA_CH_0_TDMA_SRC_ROI_BASE_0 0x40110C + +#define mmDMA_CH_0_TDMA_SRC_ROI_SIZE_0 0x401110 + +#define mmDMA_CH_0_TDMA_SRC_VALID_ELEMENTS_0 0x401114 + +#define mmDMA_CH_0_TDMA_SRC_START_OFFSET_0 0x401118 + +#define mmDMA_CH_0_TDMA_SRC_STRIDE_0 0x40111C + +#define mmDMA_CH_0_TDMA_SRC_ROI_BASE_1 0x401120 + +#define mmDMA_CH_0_TDMA_SRC_ROI_SIZE_1 0x401124 + +#define mmDMA_CH_0_TDMA_SRC_VALID_ELEMENTS_1 0x401128 + +#define mmDMA_CH_0_TDMA_SRC_START_OFFSET_1 0x40112C + +#define mmDMA_CH_0_TDMA_SRC_STRIDE_1 0x401130 + +#define mmDMA_CH_0_TDMA_SRC_ROI_BASE_2 0x401134 + +#define mmDMA_CH_0_TDMA_SRC_ROI_SIZE_2 0x401138 + +#define mmDMA_CH_0_TDMA_SRC_VALID_ELEMENTS_2 0x40113C + +#define mmDMA_CH_0_TDMA_SRC_START_OFFSET_2 0x401140 + +#define mmDMA_CH_0_TDMA_SRC_STRIDE_2 0x401144 + +#define mmDMA_CH_0_TDMA_SRC_ROI_BASE_3 0x401148 + +#define mmDMA_CH_0_TDMA_SRC_ROI_SIZE_3 0x40114C + +#define mmDMA_CH_0_TDMA_SRC_VALID_ELEMENTS_3 0x401150 + +#define mmDMA_CH_0_TDMA_SRC_START_OFFSET_3 0x401154 + +#define mmDMA_CH_0_TDMA_SRC_STRIDE_3 0x401158 + +#define mmDMA_CH_0_TDMA_SRC_ROI_BASE_4 0x40115C + +#define mmDMA_CH_0_TDMA_SRC_ROI_SIZE_4 0x401160 + +#define mmDMA_CH_0_TDMA_SRC_VALID_ELEMENTS_4 0x401164 + +#define mmDMA_CH_0_TDMA_SRC_START_OFFSET_4 0x401168 + +#define mmDMA_CH_0_TDMA_SRC_STRIDE_4 0x40116C + +#define mmDMA_CH_0_TDMA_DST_BASE_ADDR_LO 0x401170 + +#define mmDMA_CH_0_TDMA_DST_BASE_ADDR_HI 0x401174 + +#define mmDMA_CH_0_TDMA_DST_ROI_BASE_0 0x401178 + +#define mmDMA_CH_0_TDMA_DST_ROI_SIZE_0 0x40117C + +#define mmDMA_CH_0_TDMA_DST_VALID_ELEMENTS_0 0x401180 + +#define mmDMA_CH_0_TDMA_DST_START_OFFSET_0 0x401184 + +#define mmDMA_CH_0_TDMA_DST_STRIDE_0 0x401188 + +#define mmDMA_CH_0_TDMA_DST_ROI_BASE_1 0x40118C + +#define mmDMA_CH_0_TDMA_DST_ROI_SIZE_1 0x401190 + +#define mmDMA_CH_0_TDMA_DST_VALID_ELEMENTS_1 0x401194 + +#define mmDMA_CH_0_TDMA_DST_START_OFFSET_1 0x401198 + +#define mmDMA_CH_0_TDMA_DST_STRIDE_1 0x40119C + +#define mmDMA_CH_0_TDMA_DST_ROI_BASE_2 0x4011A0 + +#define mmDMA_CH_0_TDMA_DST_ROI_SIZE_2 0x4011A4 + +#define mmDMA_CH_0_TDMA_DST_VALID_ELEMENTS_2 0x4011A8 + +#define mmDMA_CH_0_TDMA_DST_START_OFFSET_2 0x4011AC + +#define mmDMA_CH_0_TDMA_DST_STRIDE_2 0x4011B0 + +#define mmDMA_CH_0_TDMA_DST_ROI_BASE_3 0x4011B4 + +#define mmDMA_CH_0_TDMA_DST_ROI_SIZE_3 0x4011B8 + +#define mmDMA_CH_0_TDMA_DST_VALID_ELEMENTS_3 0x4011BC + +#define mmDMA_CH_0_TDMA_DST_START_OFFSET_3 0x4011C0 + +#define mmDMA_CH_0_TDMA_DST_STRIDE_3 0x4011C4 + +#define mmDMA_CH_0_TDMA_DST_ROI_BASE_4 0x4011C8 + +#define mmDMA_CH_0_TDMA_DST_ROI_SIZE_4 0x4011CC + +#define mmDMA_CH_0_TDMA_DST_VALID_ELEMENTS_4 0x4011D0 + +#define mmDMA_CH_0_TDMA_DST_START_OFFSET_4 0x4011D4 + +#define mmDMA_CH_0_TDMA_DST_STRIDE_4 0x4011D8 + +#define mmDMA_CH_0_MEM_INIT_BUSY 0x4011FC + +#endif /* ASIC_REG_DMA_CH_0_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h new file mode 100644 index 000000000..544903172 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h @@ -0,0 +1,208 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA_CH_1_REGS_H_ +#define ASIC_REG_DMA_CH_1_REGS_H_ + +/* + ***************************************** + * DMA_CH_1 (Prototype: DMA_CH) + ***************************************** + */ + +#define mmDMA_CH_1_CFG0 0x409000 + +#define mmDMA_CH_1_CFG1 0x409004 + +#define mmDMA_CH_1_ERRMSG_ADDR_LO 0x409008 + +#define mmDMA_CH_1_ERRMSG_ADDR_HI 0x40900C + +#define mmDMA_CH_1_ERRMSG_WDATA 0x409010 + +#define mmDMA_CH_1_RD_COMP_ADDR_LO 0x409014 + +#define mmDMA_CH_1_RD_COMP_ADDR_HI 0x409018 + +#define mmDMA_CH_1_RD_COMP_WDATA 0x40901C + +#define mmDMA_CH_1_WR_COMP_ADDR_LO 0x409020 + +#define mmDMA_CH_1_WR_COMP_ADDR_HI 0x409024 + +#define mmDMA_CH_1_WR_COMP_WDATA 0x409028 + +#define mmDMA_CH_1_LDMA_SRC_ADDR_LO 0x40902C + +#define mmDMA_CH_1_LDMA_SRC_ADDR_HI 0x409030 + +#define mmDMA_CH_1_LDMA_DST_ADDR_LO 0x409034 + +#define mmDMA_CH_1_LDMA_DST_ADDR_HI 0x409038 + +#define mmDMA_CH_1_LDMA_TSIZE 0x40903C + +#define mmDMA_CH_1_COMIT_TRANSFER 0x409040 + +#define mmDMA_CH_1_STS0 0x409044 + +#define mmDMA_CH_1_STS1 0x409048 + +#define mmDMA_CH_1_STS2 0x40904C + +#define mmDMA_CH_1_STS3 0x409050 + +#define mmDMA_CH_1_STS4 0x409054 + +#define mmDMA_CH_1_SRC_ADDR_LO_STS 0x409058 + +#define mmDMA_CH_1_SRC_ADDR_HI_STS 0x40905C + +#define mmDMA_CH_1_SRC_TSIZE_STS 0x409060 + +#define mmDMA_CH_1_DST_ADDR_LO_STS 0x409064 + +#define mmDMA_CH_1_DST_ADDR_HI_STS 0x409068 + +#define mmDMA_CH_1_DST_TSIZE_STS 0x40906C + +#define mmDMA_CH_1_RD_RATE_LIM_EN 0x409070 + +#define mmDMA_CH_1_RD_RATE_LIM_RST_TOKEN 0x409074 + +#define mmDMA_CH_1_RD_RATE_LIM_SAT 0x409078 + +#define mmDMA_CH_1_RD_RATE_LIM_TOUT 0x40907C + +#define mmDMA_CH_1_WR_RATE_LIM_EN 0x409080 + +#define mmDMA_CH_1_WR_RATE_LIM_RST_TOKEN 0x409084 + +#define mmDMA_CH_1_WR_RATE_LIM_SAT 0x409088 + +#define mmDMA_CH_1_WR_RATE_LIM_TOUT 0x40908C + +#define mmDMA_CH_1_CFG2 0x409090 + +#define mmDMA_CH_1_TDMA_CTL 0x409100 + +#define mmDMA_CH_1_TDMA_SRC_BASE_ADDR_LO 0x409104 + +#define mmDMA_CH_1_TDMA_SRC_BASE_ADDR_HI 0x409108 + +#define mmDMA_CH_1_TDMA_SRC_ROI_BASE_0 0x40910C + +#define mmDMA_CH_1_TDMA_SRC_ROI_SIZE_0 0x409110 + +#define mmDMA_CH_1_TDMA_SRC_VALID_ELEMENTS_0 0x409114 + +#define mmDMA_CH_1_TDMA_SRC_START_OFFSET_0 0x409118 + +#define mmDMA_CH_1_TDMA_SRC_STRIDE_0 0x40911C + +#define mmDMA_CH_1_TDMA_SRC_ROI_BASE_1 0x409120 + +#define mmDMA_CH_1_TDMA_SRC_ROI_SIZE_1 0x409124 + +#define mmDMA_CH_1_TDMA_SRC_VALID_ELEMENTS_1 0x409128 + +#define mmDMA_CH_1_TDMA_SRC_START_OFFSET_1 0x40912C + +#define mmDMA_CH_1_TDMA_SRC_STRIDE_1 0x409130 + +#define mmDMA_CH_1_TDMA_SRC_ROI_BASE_2 0x409134 + +#define mmDMA_CH_1_TDMA_SRC_ROI_SIZE_2 0x409138 + +#define mmDMA_CH_1_TDMA_SRC_VALID_ELEMENTS_2 0x40913C + +#define mmDMA_CH_1_TDMA_SRC_START_OFFSET_2 0x409140 + +#define mmDMA_CH_1_TDMA_SRC_STRIDE_2 0x409144 + +#define mmDMA_CH_1_TDMA_SRC_ROI_BASE_3 0x409148 + +#define mmDMA_CH_1_TDMA_SRC_ROI_SIZE_3 0x40914C + +#define mmDMA_CH_1_TDMA_SRC_VALID_ELEMENTS_3 0x409150 + +#define mmDMA_CH_1_TDMA_SRC_START_OFFSET_3 0x409154 + +#define mmDMA_CH_1_TDMA_SRC_STRIDE_3 0x409158 + +#define mmDMA_CH_1_TDMA_SRC_ROI_BASE_4 0x40915C + +#define mmDMA_CH_1_TDMA_SRC_ROI_SIZE_4 0x409160 + +#define mmDMA_CH_1_TDMA_SRC_VALID_ELEMENTS_4 0x409164 + +#define mmDMA_CH_1_TDMA_SRC_START_OFFSET_4 0x409168 + +#define mmDMA_CH_1_TDMA_SRC_STRIDE_4 0x40916C + +#define mmDMA_CH_1_TDMA_DST_BASE_ADDR_LO 0x409170 + +#define mmDMA_CH_1_TDMA_DST_BASE_ADDR_HI 0x409174 + +#define mmDMA_CH_1_TDMA_DST_ROI_BASE_0 0x409178 + +#define mmDMA_CH_1_TDMA_DST_ROI_SIZE_0 0x40917C + +#define mmDMA_CH_1_TDMA_DST_VALID_ELEMENTS_0 0x409180 + +#define mmDMA_CH_1_TDMA_DST_START_OFFSET_0 0x409184 + +#define mmDMA_CH_1_TDMA_DST_STRIDE_0 0x409188 + +#define mmDMA_CH_1_TDMA_DST_ROI_BASE_1 0x40918C + +#define mmDMA_CH_1_TDMA_DST_ROI_SIZE_1 0x409190 + +#define mmDMA_CH_1_TDMA_DST_VALID_ELEMENTS_1 0x409194 + +#define mmDMA_CH_1_TDMA_DST_START_OFFSET_1 0x409198 + +#define mmDMA_CH_1_TDMA_DST_STRIDE_1 0x40919C + +#define mmDMA_CH_1_TDMA_DST_ROI_BASE_2 0x4091A0 + +#define mmDMA_CH_1_TDMA_DST_ROI_SIZE_2 0x4091A4 + +#define mmDMA_CH_1_TDMA_DST_VALID_ELEMENTS_2 0x4091A8 + +#define mmDMA_CH_1_TDMA_DST_START_OFFSET_2 0x4091AC + +#define mmDMA_CH_1_TDMA_DST_STRIDE_2 0x4091B0 + +#define mmDMA_CH_1_TDMA_DST_ROI_BASE_3 0x4091B4 + +#define mmDMA_CH_1_TDMA_DST_ROI_SIZE_3 0x4091B8 + +#define mmDMA_CH_1_TDMA_DST_VALID_ELEMENTS_3 0x4091BC + +#define mmDMA_CH_1_TDMA_DST_START_OFFSET_3 0x4091C0 + +#define mmDMA_CH_1_TDMA_DST_STRIDE_3 0x4091C4 + +#define mmDMA_CH_1_TDMA_DST_ROI_BASE_4 0x4091C8 + +#define mmDMA_CH_1_TDMA_DST_ROI_SIZE_4 0x4091CC + +#define mmDMA_CH_1_TDMA_DST_VALID_ELEMENTS_4 0x4091D0 + +#define mmDMA_CH_1_TDMA_DST_START_OFFSET_4 0x4091D4 + +#define mmDMA_CH_1_TDMA_DST_STRIDE_4 0x4091D8 + +#define mmDMA_CH_1_MEM_INIT_BUSY 0x4091FC + +#endif /* ASIC_REG_DMA_CH_1_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h new file mode 100644 index 000000000..a4768521d --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h @@ -0,0 +1,208 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA_CH_2_REGS_H_ +#define ASIC_REG_DMA_CH_2_REGS_H_ + +/* + ***************************************** + * DMA_CH_2 (Prototype: DMA_CH) + ***************************************** + */ + +#define mmDMA_CH_2_CFG0 0x411000 + +#define mmDMA_CH_2_CFG1 0x411004 + +#define mmDMA_CH_2_ERRMSG_ADDR_LO 0x411008 + +#define mmDMA_CH_2_ERRMSG_ADDR_HI 0x41100C + +#define mmDMA_CH_2_ERRMSG_WDATA 0x411010 + +#define mmDMA_CH_2_RD_COMP_ADDR_LO 0x411014 + +#define mmDMA_CH_2_RD_COMP_ADDR_HI 0x411018 + +#define mmDMA_CH_2_RD_COMP_WDATA 0x41101C + +#define mmDMA_CH_2_WR_COMP_ADDR_LO 0x411020 + +#define mmDMA_CH_2_WR_COMP_ADDR_HI 0x411024 + +#define mmDMA_CH_2_WR_COMP_WDATA 0x411028 + +#define mmDMA_CH_2_LDMA_SRC_ADDR_LO 0x41102C + +#define mmDMA_CH_2_LDMA_SRC_ADDR_HI 0x411030 + +#define mmDMA_CH_2_LDMA_DST_ADDR_LO 0x411034 + +#define mmDMA_CH_2_LDMA_DST_ADDR_HI 0x411038 + +#define mmDMA_CH_2_LDMA_TSIZE 0x41103C + +#define mmDMA_CH_2_COMIT_TRANSFER 0x411040 + +#define mmDMA_CH_2_STS0 0x411044 + +#define mmDMA_CH_2_STS1 0x411048 + +#define mmDMA_CH_2_STS2 0x41104C + +#define mmDMA_CH_2_STS3 0x411050 + +#define mmDMA_CH_2_STS4 0x411054 + +#define mmDMA_CH_2_SRC_ADDR_LO_STS 0x411058 + +#define mmDMA_CH_2_SRC_ADDR_HI_STS 0x41105C + +#define mmDMA_CH_2_SRC_TSIZE_STS 0x411060 + +#define mmDMA_CH_2_DST_ADDR_LO_STS 0x411064 + +#define mmDMA_CH_2_DST_ADDR_HI_STS 0x411068 + +#define mmDMA_CH_2_DST_TSIZE_STS 0x41106C + +#define mmDMA_CH_2_RD_RATE_LIM_EN 0x411070 + +#define mmDMA_CH_2_RD_RATE_LIM_RST_TOKEN 0x411074 + +#define mmDMA_CH_2_RD_RATE_LIM_SAT 0x411078 + +#define mmDMA_CH_2_RD_RATE_LIM_TOUT 0x41107C + +#define mmDMA_CH_2_WR_RATE_LIM_EN 0x411080 + +#define mmDMA_CH_2_WR_RATE_LIM_RST_TOKEN 0x411084 + +#define mmDMA_CH_2_WR_RATE_LIM_SAT 0x411088 + +#define mmDMA_CH_2_WR_RATE_LIM_TOUT 0x41108C + +#define mmDMA_CH_2_CFG2 0x411090 + +#define mmDMA_CH_2_TDMA_CTL 0x411100 + +#define mmDMA_CH_2_TDMA_SRC_BASE_ADDR_LO 0x411104 + +#define mmDMA_CH_2_TDMA_SRC_BASE_ADDR_HI 0x411108 + +#define mmDMA_CH_2_TDMA_SRC_ROI_BASE_0 0x41110C + +#define mmDMA_CH_2_TDMA_SRC_ROI_SIZE_0 0x411110 + +#define mmDMA_CH_2_TDMA_SRC_VALID_ELEMENTS_0 0x411114 + +#define mmDMA_CH_2_TDMA_SRC_START_OFFSET_0 0x411118 + +#define mmDMA_CH_2_TDMA_SRC_STRIDE_0 0x41111C + +#define mmDMA_CH_2_TDMA_SRC_ROI_BASE_1 0x411120 + +#define mmDMA_CH_2_TDMA_SRC_ROI_SIZE_1 0x411124 + +#define mmDMA_CH_2_TDMA_SRC_VALID_ELEMENTS_1 0x411128 + +#define mmDMA_CH_2_TDMA_SRC_START_OFFSET_1 0x41112C + +#define mmDMA_CH_2_TDMA_SRC_STRIDE_1 0x411130 + +#define mmDMA_CH_2_TDMA_SRC_ROI_BASE_2 0x411134 + +#define mmDMA_CH_2_TDMA_SRC_ROI_SIZE_2 0x411138 + +#define mmDMA_CH_2_TDMA_SRC_VALID_ELEMENTS_2 0x41113C + +#define mmDMA_CH_2_TDMA_SRC_START_OFFSET_2 0x411140 + +#define mmDMA_CH_2_TDMA_SRC_STRIDE_2 0x411144 + +#define mmDMA_CH_2_TDMA_SRC_ROI_BASE_3 0x411148 + +#define mmDMA_CH_2_TDMA_SRC_ROI_SIZE_3 0x41114C + +#define mmDMA_CH_2_TDMA_SRC_VALID_ELEMENTS_3 0x411150 + +#define mmDMA_CH_2_TDMA_SRC_START_OFFSET_3 0x411154 + +#define mmDMA_CH_2_TDMA_SRC_STRIDE_3 0x411158 + +#define mmDMA_CH_2_TDMA_SRC_ROI_BASE_4 0x41115C + +#define mmDMA_CH_2_TDMA_SRC_ROI_SIZE_4 0x411160 + +#define mmDMA_CH_2_TDMA_SRC_VALID_ELEMENTS_4 0x411164 + +#define mmDMA_CH_2_TDMA_SRC_START_OFFSET_4 0x411168 + +#define mmDMA_CH_2_TDMA_SRC_STRIDE_4 0x41116C + +#define mmDMA_CH_2_TDMA_DST_BASE_ADDR_LO 0x411170 + +#define mmDMA_CH_2_TDMA_DST_BASE_ADDR_HI 0x411174 + +#define mmDMA_CH_2_TDMA_DST_ROI_BASE_0 0x411178 + +#define mmDMA_CH_2_TDMA_DST_ROI_SIZE_0 0x41117C + +#define mmDMA_CH_2_TDMA_DST_VALID_ELEMENTS_0 0x411180 + +#define mmDMA_CH_2_TDMA_DST_START_OFFSET_0 0x411184 + +#define mmDMA_CH_2_TDMA_DST_STRIDE_0 0x411188 + +#define mmDMA_CH_2_TDMA_DST_ROI_BASE_1 0x41118C + +#define mmDMA_CH_2_TDMA_DST_ROI_SIZE_1 0x411190 + +#define mmDMA_CH_2_TDMA_DST_VALID_ELEMENTS_1 0x411194 + +#define mmDMA_CH_2_TDMA_DST_START_OFFSET_1 0x411198 + +#define mmDMA_CH_2_TDMA_DST_STRIDE_1 0x41119C + +#define mmDMA_CH_2_TDMA_DST_ROI_BASE_2 0x4111A0 + +#define mmDMA_CH_2_TDMA_DST_ROI_SIZE_2 0x4111A4 + +#define mmDMA_CH_2_TDMA_DST_VALID_ELEMENTS_2 0x4111A8 + +#define mmDMA_CH_2_TDMA_DST_START_OFFSET_2 0x4111AC + +#define mmDMA_CH_2_TDMA_DST_STRIDE_2 0x4111B0 + +#define mmDMA_CH_2_TDMA_DST_ROI_BASE_3 0x4111B4 + +#define mmDMA_CH_2_TDMA_DST_ROI_SIZE_3 0x4111B8 + +#define mmDMA_CH_2_TDMA_DST_VALID_ELEMENTS_3 0x4111BC + +#define mmDMA_CH_2_TDMA_DST_START_OFFSET_3 0x4111C0 + +#define mmDMA_CH_2_TDMA_DST_STRIDE_3 0x4111C4 + +#define mmDMA_CH_2_TDMA_DST_ROI_BASE_4 0x4111C8 + +#define mmDMA_CH_2_TDMA_DST_ROI_SIZE_4 0x4111CC + +#define mmDMA_CH_2_TDMA_DST_VALID_ELEMENTS_4 0x4111D0 + +#define mmDMA_CH_2_TDMA_DST_START_OFFSET_4 0x4111D4 + +#define mmDMA_CH_2_TDMA_DST_STRIDE_4 0x4111D8 + +#define mmDMA_CH_2_MEM_INIT_BUSY 0x4111FC + +#endif /* ASIC_REG_DMA_CH_2_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h new file mode 100644 index 000000000..619d01897 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h @@ -0,0 +1,208 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA_CH_3_REGS_H_ +#define ASIC_REG_DMA_CH_3_REGS_H_ + +/* + ***************************************** + * DMA_CH_3 (Prototype: DMA_CH) + ***************************************** + */ + +#define mmDMA_CH_3_CFG0 0x419000 + +#define mmDMA_CH_3_CFG1 0x419004 + +#define mmDMA_CH_3_ERRMSG_ADDR_LO 0x419008 + +#define mmDMA_CH_3_ERRMSG_ADDR_HI 0x41900C + +#define mmDMA_CH_3_ERRMSG_WDATA 0x419010 + +#define mmDMA_CH_3_RD_COMP_ADDR_LO 0x419014 + +#define mmDMA_CH_3_RD_COMP_ADDR_HI 0x419018 + +#define mmDMA_CH_3_RD_COMP_WDATA 0x41901C + +#define mmDMA_CH_3_WR_COMP_ADDR_LO 0x419020 + +#define mmDMA_CH_3_WR_COMP_ADDR_HI 0x419024 + +#define mmDMA_CH_3_WR_COMP_WDATA 0x419028 + +#define mmDMA_CH_3_LDMA_SRC_ADDR_LO 0x41902C + +#define mmDMA_CH_3_LDMA_SRC_ADDR_HI 0x419030 + +#define mmDMA_CH_3_LDMA_DST_ADDR_LO 0x419034 + +#define mmDMA_CH_3_LDMA_DST_ADDR_HI 0x419038 + +#define mmDMA_CH_3_LDMA_TSIZE 0x41903C + +#define mmDMA_CH_3_COMIT_TRANSFER 0x419040 + +#define mmDMA_CH_3_STS0 0x419044 + +#define mmDMA_CH_3_STS1 0x419048 + +#define mmDMA_CH_3_STS2 0x41904C + +#define mmDMA_CH_3_STS3 0x419050 + +#define mmDMA_CH_3_STS4 0x419054 + +#define mmDMA_CH_3_SRC_ADDR_LO_STS 0x419058 + +#define mmDMA_CH_3_SRC_ADDR_HI_STS 0x41905C + +#define mmDMA_CH_3_SRC_TSIZE_STS 0x419060 + +#define mmDMA_CH_3_DST_ADDR_LO_STS 0x419064 + +#define mmDMA_CH_3_DST_ADDR_HI_STS 0x419068 + +#define mmDMA_CH_3_DST_TSIZE_STS 0x41906C + +#define mmDMA_CH_3_RD_RATE_LIM_EN 0x419070 + +#define mmDMA_CH_3_RD_RATE_LIM_RST_TOKEN 0x419074 + +#define mmDMA_CH_3_RD_RATE_LIM_SAT 0x419078 + +#define mmDMA_CH_3_RD_RATE_LIM_TOUT 0x41907C + +#define mmDMA_CH_3_WR_RATE_LIM_EN 0x419080 + +#define mmDMA_CH_3_WR_RATE_LIM_RST_TOKEN 0x419084 + +#define mmDMA_CH_3_WR_RATE_LIM_SAT 0x419088 + +#define mmDMA_CH_3_WR_RATE_LIM_TOUT 0x41908C + +#define mmDMA_CH_3_CFG2 0x419090 + +#define mmDMA_CH_3_TDMA_CTL 0x419100 + +#define mmDMA_CH_3_TDMA_SRC_BASE_ADDR_LO 0x419104 + +#define mmDMA_CH_3_TDMA_SRC_BASE_ADDR_HI 0x419108 + +#define mmDMA_CH_3_TDMA_SRC_ROI_BASE_0 0x41910C + +#define mmDMA_CH_3_TDMA_SRC_ROI_SIZE_0 0x419110 + +#define mmDMA_CH_3_TDMA_SRC_VALID_ELEMENTS_0 0x419114 + +#define mmDMA_CH_3_TDMA_SRC_START_OFFSET_0 0x419118 + +#define mmDMA_CH_3_TDMA_SRC_STRIDE_0 0x41911C + +#define mmDMA_CH_3_TDMA_SRC_ROI_BASE_1 0x419120 + +#define mmDMA_CH_3_TDMA_SRC_ROI_SIZE_1 0x419124 + +#define mmDMA_CH_3_TDMA_SRC_VALID_ELEMENTS_1 0x419128 + +#define mmDMA_CH_3_TDMA_SRC_START_OFFSET_1 0x41912C + +#define mmDMA_CH_3_TDMA_SRC_STRIDE_1 0x419130 + +#define mmDMA_CH_3_TDMA_SRC_ROI_BASE_2 0x419134 + +#define mmDMA_CH_3_TDMA_SRC_ROI_SIZE_2 0x419138 + +#define mmDMA_CH_3_TDMA_SRC_VALID_ELEMENTS_2 0x41913C + +#define mmDMA_CH_3_TDMA_SRC_START_OFFSET_2 0x419140 + +#define mmDMA_CH_3_TDMA_SRC_STRIDE_2 0x419144 + +#define mmDMA_CH_3_TDMA_SRC_ROI_BASE_3 0x419148 + +#define mmDMA_CH_3_TDMA_SRC_ROI_SIZE_3 0x41914C + +#define mmDMA_CH_3_TDMA_SRC_VALID_ELEMENTS_3 0x419150 + +#define mmDMA_CH_3_TDMA_SRC_START_OFFSET_3 0x419154 + +#define mmDMA_CH_3_TDMA_SRC_STRIDE_3 0x419158 + +#define mmDMA_CH_3_TDMA_SRC_ROI_BASE_4 0x41915C + +#define mmDMA_CH_3_TDMA_SRC_ROI_SIZE_4 0x419160 + +#define mmDMA_CH_3_TDMA_SRC_VALID_ELEMENTS_4 0x419164 + +#define mmDMA_CH_3_TDMA_SRC_START_OFFSET_4 0x419168 + +#define mmDMA_CH_3_TDMA_SRC_STRIDE_4 0x41916C + +#define mmDMA_CH_3_TDMA_DST_BASE_ADDR_LO 0x419170 + +#define mmDMA_CH_3_TDMA_DST_BASE_ADDR_HI 0x419174 + +#define mmDMA_CH_3_TDMA_DST_ROI_BASE_0 0x419178 + +#define mmDMA_CH_3_TDMA_DST_ROI_SIZE_0 0x41917C + +#define mmDMA_CH_3_TDMA_DST_VALID_ELEMENTS_0 0x419180 + +#define mmDMA_CH_3_TDMA_DST_START_OFFSET_0 0x419184 + +#define mmDMA_CH_3_TDMA_DST_STRIDE_0 0x419188 + +#define mmDMA_CH_3_TDMA_DST_ROI_BASE_1 0x41918C + +#define mmDMA_CH_3_TDMA_DST_ROI_SIZE_1 0x419190 + +#define mmDMA_CH_3_TDMA_DST_VALID_ELEMENTS_1 0x419194 + +#define mmDMA_CH_3_TDMA_DST_START_OFFSET_1 0x419198 + +#define mmDMA_CH_3_TDMA_DST_STRIDE_1 0x41919C + +#define mmDMA_CH_3_TDMA_DST_ROI_BASE_2 0x4191A0 + +#define mmDMA_CH_3_TDMA_DST_ROI_SIZE_2 0x4191A4 + +#define mmDMA_CH_3_TDMA_DST_VALID_ELEMENTS_2 0x4191A8 + +#define mmDMA_CH_3_TDMA_DST_START_OFFSET_2 0x4191AC + +#define mmDMA_CH_3_TDMA_DST_STRIDE_2 0x4191B0 + +#define mmDMA_CH_3_TDMA_DST_ROI_BASE_3 0x4191B4 + +#define mmDMA_CH_3_TDMA_DST_ROI_SIZE_3 0x4191B8 + +#define mmDMA_CH_3_TDMA_DST_VALID_ELEMENTS_3 0x4191BC + +#define mmDMA_CH_3_TDMA_DST_START_OFFSET_3 0x4191C0 + +#define mmDMA_CH_3_TDMA_DST_STRIDE_3 0x4191C4 + +#define mmDMA_CH_3_TDMA_DST_ROI_BASE_4 0x4191C8 + +#define mmDMA_CH_3_TDMA_DST_ROI_SIZE_4 0x4191CC + +#define mmDMA_CH_3_TDMA_DST_VALID_ELEMENTS_4 0x4191D0 + +#define mmDMA_CH_3_TDMA_DST_START_OFFSET_4 0x4191D4 + +#define mmDMA_CH_3_TDMA_DST_STRIDE_4 0x4191D8 + +#define mmDMA_CH_3_MEM_INIT_BUSY 0x4191FC + +#endif /* ASIC_REG_DMA_CH_3_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h new file mode 100644 index 000000000..038617e16 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h @@ -0,0 +1,208 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA_CH_4_REGS_H_ +#define ASIC_REG_DMA_CH_4_REGS_H_ + +/* + ***************************************** + * DMA_CH_4 (Prototype: DMA_CH) + ***************************************** + */ + +#define mmDMA_CH_4_CFG0 0x421000 + +#define mmDMA_CH_4_CFG1 0x421004 + +#define mmDMA_CH_4_ERRMSG_ADDR_LO 0x421008 + +#define mmDMA_CH_4_ERRMSG_ADDR_HI 0x42100C + +#define mmDMA_CH_4_ERRMSG_WDATA 0x421010 + +#define mmDMA_CH_4_RD_COMP_ADDR_LO 0x421014 + +#define mmDMA_CH_4_RD_COMP_ADDR_HI 0x421018 + +#define mmDMA_CH_4_RD_COMP_WDATA 0x42101C + +#define mmDMA_CH_4_WR_COMP_ADDR_LO 0x421020 + +#define mmDMA_CH_4_WR_COMP_ADDR_HI 0x421024 + +#define mmDMA_CH_4_WR_COMP_WDATA 0x421028 + +#define mmDMA_CH_4_LDMA_SRC_ADDR_LO 0x42102C + +#define mmDMA_CH_4_LDMA_SRC_ADDR_HI 0x421030 + +#define mmDMA_CH_4_LDMA_DST_ADDR_LO 0x421034 + +#define mmDMA_CH_4_LDMA_DST_ADDR_HI 0x421038 + +#define mmDMA_CH_4_LDMA_TSIZE 0x42103C + +#define mmDMA_CH_4_COMIT_TRANSFER 0x421040 + +#define mmDMA_CH_4_STS0 0x421044 + +#define mmDMA_CH_4_STS1 0x421048 + +#define mmDMA_CH_4_STS2 0x42104C + +#define mmDMA_CH_4_STS3 0x421050 + +#define mmDMA_CH_4_STS4 0x421054 + +#define mmDMA_CH_4_SRC_ADDR_LO_STS 0x421058 + +#define mmDMA_CH_4_SRC_ADDR_HI_STS 0x42105C + +#define mmDMA_CH_4_SRC_TSIZE_STS 0x421060 + +#define mmDMA_CH_4_DST_ADDR_LO_STS 0x421064 + +#define mmDMA_CH_4_DST_ADDR_HI_STS 0x421068 + +#define mmDMA_CH_4_DST_TSIZE_STS 0x42106C + +#define mmDMA_CH_4_RD_RATE_LIM_EN 0x421070 + +#define mmDMA_CH_4_RD_RATE_LIM_RST_TOKEN 0x421074 + +#define mmDMA_CH_4_RD_RATE_LIM_SAT 0x421078 + +#define mmDMA_CH_4_RD_RATE_LIM_TOUT 0x42107C + +#define mmDMA_CH_4_WR_RATE_LIM_EN 0x421080 + +#define mmDMA_CH_4_WR_RATE_LIM_RST_TOKEN 0x421084 + +#define mmDMA_CH_4_WR_RATE_LIM_SAT 0x421088 + +#define mmDMA_CH_4_WR_RATE_LIM_TOUT 0x42108C + +#define mmDMA_CH_4_CFG2 0x421090 + +#define mmDMA_CH_4_TDMA_CTL 0x421100 + +#define mmDMA_CH_4_TDMA_SRC_BASE_ADDR_LO 0x421104 + +#define mmDMA_CH_4_TDMA_SRC_BASE_ADDR_HI 0x421108 + +#define mmDMA_CH_4_TDMA_SRC_ROI_BASE_0 0x42110C + +#define mmDMA_CH_4_TDMA_SRC_ROI_SIZE_0 0x421110 + +#define mmDMA_CH_4_TDMA_SRC_VALID_ELEMENTS_0 0x421114 + +#define mmDMA_CH_4_TDMA_SRC_START_OFFSET_0 0x421118 + +#define mmDMA_CH_4_TDMA_SRC_STRIDE_0 0x42111C + +#define mmDMA_CH_4_TDMA_SRC_ROI_BASE_1 0x421120 + +#define mmDMA_CH_4_TDMA_SRC_ROI_SIZE_1 0x421124 + +#define mmDMA_CH_4_TDMA_SRC_VALID_ELEMENTS_1 0x421128 + +#define mmDMA_CH_4_TDMA_SRC_START_OFFSET_1 0x42112C + +#define mmDMA_CH_4_TDMA_SRC_STRIDE_1 0x421130 + +#define mmDMA_CH_4_TDMA_SRC_ROI_BASE_2 0x421134 + +#define mmDMA_CH_4_TDMA_SRC_ROI_SIZE_2 0x421138 + +#define mmDMA_CH_4_TDMA_SRC_VALID_ELEMENTS_2 0x42113C + +#define mmDMA_CH_4_TDMA_SRC_START_OFFSET_2 0x421140 + +#define mmDMA_CH_4_TDMA_SRC_STRIDE_2 0x421144 + +#define mmDMA_CH_4_TDMA_SRC_ROI_BASE_3 0x421148 + +#define mmDMA_CH_4_TDMA_SRC_ROI_SIZE_3 0x42114C + +#define mmDMA_CH_4_TDMA_SRC_VALID_ELEMENTS_3 0x421150 + +#define mmDMA_CH_4_TDMA_SRC_START_OFFSET_3 0x421154 + +#define mmDMA_CH_4_TDMA_SRC_STRIDE_3 0x421158 + +#define mmDMA_CH_4_TDMA_SRC_ROI_BASE_4 0x42115C + +#define mmDMA_CH_4_TDMA_SRC_ROI_SIZE_4 0x421160 + +#define mmDMA_CH_4_TDMA_SRC_VALID_ELEMENTS_4 0x421164 + +#define mmDMA_CH_4_TDMA_SRC_START_OFFSET_4 0x421168 + +#define mmDMA_CH_4_TDMA_SRC_STRIDE_4 0x42116C + +#define mmDMA_CH_4_TDMA_DST_BASE_ADDR_LO 0x421170 + +#define mmDMA_CH_4_TDMA_DST_BASE_ADDR_HI 0x421174 + +#define mmDMA_CH_4_TDMA_DST_ROI_BASE_0 0x421178 + +#define mmDMA_CH_4_TDMA_DST_ROI_SIZE_0 0x42117C + +#define mmDMA_CH_4_TDMA_DST_VALID_ELEMENTS_0 0x421180 + +#define mmDMA_CH_4_TDMA_DST_START_OFFSET_0 0x421184 + +#define mmDMA_CH_4_TDMA_DST_STRIDE_0 0x421188 + +#define mmDMA_CH_4_TDMA_DST_ROI_BASE_1 0x42118C + +#define mmDMA_CH_4_TDMA_DST_ROI_SIZE_1 0x421190 + +#define mmDMA_CH_4_TDMA_DST_VALID_ELEMENTS_1 0x421194 + +#define mmDMA_CH_4_TDMA_DST_START_OFFSET_1 0x421198 + +#define mmDMA_CH_4_TDMA_DST_STRIDE_1 0x42119C + +#define mmDMA_CH_4_TDMA_DST_ROI_BASE_2 0x4211A0 + +#define mmDMA_CH_4_TDMA_DST_ROI_SIZE_2 0x4211A4 + +#define mmDMA_CH_4_TDMA_DST_VALID_ELEMENTS_2 0x4211A8 + +#define mmDMA_CH_4_TDMA_DST_START_OFFSET_2 0x4211AC + +#define mmDMA_CH_4_TDMA_DST_STRIDE_2 0x4211B0 + +#define mmDMA_CH_4_TDMA_DST_ROI_BASE_3 0x4211B4 + +#define mmDMA_CH_4_TDMA_DST_ROI_SIZE_3 0x4211B8 + +#define mmDMA_CH_4_TDMA_DST_VALID_ELEMENTS_3 0x4211BC + +#define mmDMA_CH_4_TDMA_DST_START_OFFSET_3 0x4211C0 + +#define mmDMA_CH_4_TDMA_DST_STRIDE_3 0x4211C4 + +#define mmDMA_CH_4_TDMA_DST_ROI_BASE_4 0x4211C8 + +#define mmDMA_CH_4_TDMA_DST_ROI_SIZE_4 0x4211CC + +#define mmDMA_CH_4_TDMA_DST_VALID_ELEMENTS_4 0x4211D0 + +#define mmDMA_CH_4_TDMA_DST_START_OFFSET_4 0x4211D4 + +#define mmDMA_CH_4_TDMA_DST_STRIDE_4 0x4211D8 + +#define mmDMA_CH_4_MEM_INIT_BUSY 0x4211FC + +#endif /* ASIC_REG_DMA_CH_4_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_masks.h new file mode 100644 index 000000000..f43b564af --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_masks.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA_MACRO_MASKS_H_ +#define ASIC_REG_DMA_MACRO_MASKS_H_ + +/* + ***************************************** + * DMA_MACRO (Prototype: DMA_MACRO) + ***************************************** + */ + +/* DMA_MACRO_LBW_RANGE_HIT_BLOCK */ +#define DMA_MACRO_LBW_RANGE_HIT_BLOCK_R_SHIFT 0 +#define DMA_MACRO_LBW_RANGE_HIT_BLOCK_R_MASK 0xFFFF + +/* DMA_MACRO_LBW_RANGE_MASK */ +#define DMA_MACRO_LBW_RANGE_MASK_R_SHIFT 0 +#define DMA_MACRO_LBW_RANGE_MASK_R_MASK 0x3FFFFFF + +/* DMA_MACRO_LBW_RANGE_BASE */ +#define DMA_MACRO_LBW_RANGE_BASE_R_SHIFT 0 +#define DMA_MACRO_LBW_RANGE_BASE_R_MASK 0x3FFFFFF + +/* DMA_MACRO_HBW_RANGE_HIT_BLOCK */ +#define DMA_MACRO_HBW_RANGE_HIT_BLOCK_R_SHIFT 0 +#define DMA_MACRO_HBW_RANGE_HIT_BLOCK_R_MASK 0xFF + +/* DMA_MACRO_HBW_RANGE_MASK_49_32 */ +#define DMA_MACRO_HBW_RANGE_MASK_49_32_R_SHIFT 0 +#define DMA_MACRO_HBW_RANGE_MASK_49_32_R_MASK 0x3FFFF + +/* DMA_MACRO_HBW_RANGE_MASK_31_0 */ +#define DMA_MACRO_HBW_RANGE_MASK_31_0_R_SHIFT 0 +#define DMA_MACRO_HBW_RANGE_MASK_31_0_R_MASK 0xFFFFFFFF + +/* DMA_MACRO_HBW_RANGE_BASE_49_32 */ +#define DMA_MACRO_HBW_RANGE_BASE_49_32_R_SHIFT 0 +#define DMA_MACRO_HBW_RANGE_BASE_49_32_R_MASK 0x3FFFF + +/* DMA_MACRO_HBW_RANGE_BASE_31_0 */ +#define DMA_MACRO_HBW_RANGE_BASE_31_0_R_SHIFT 0 +#define DMA_MACRO_HBW_RANGE_BASE_31_0_R_MASK 0xFFFFFFFF + +/* DMA_MACRO_WRITE_EN */ +#define DMA_MACRO_WRITE_EN_R_SHIFT 0 +#define DMA_MACRO_WRITE_EN_R_MASK 0x1 + +/* DMA_MACRO_WRITE_CREDIT */ +#define DMA_MACRO_WRITE_CREDIT_R_SHIFT 0 +#define DMA_MACRO_WRITE_CREDIT_R_MASK 0x3FF + +/* DMA_MACRO_READ_EN */ +#define DMA_MACRO_READ_EN_R_SHIFT 0 +#define DMA_MACRO_READ_EN_R_MASK 0x1 + +/* DMA_MACRO_READ_CREDIT */ +#define DMA_MACRO_READ_CREDIT_R_SHIFT 0 +#define DMA_MACRO_READ_CREDIT_R_MASK 0x3FF + +/* DMA_MACRO_SRAM_BUSY */ + +/* DMA_MACRO_RAZWI_LBW_WT_VLD */ +#define DMA_MACRO_RAZWI_LBW_WT_VLD_R_SHIFT 0 +#define DMA_MACRO_RAZWI_LBW_WT_VLD_R_MASK 0x1 + +/* DMA_MACRO_RAZWI_LBW_WT_ID */ +#define DMA_MACRO_RAZWI_LBW_WT_ID_R_SHIFT 0 +#define DMA_MACRO_RAZWI_LBW_WT_ID_R_MASK 0x7FFF + +/* DMA_MACRO_RAZWI_LBW_RD_VLD */ +#define DMA_MACRO_RAZWI_LBW_RD_VLD_R_SHIFT 0 +#define DMA_MACRO_RAZWI_LBW_RD_VLD_R_MASK 0x1 + +/* DMA_MACRO_RAZWI_LBW_RD_ID */ +#define DMA_MACRO_RAZWI_LBW_RD_ID_R_SHIFT 0 +#define DMA_MACRO_RAZWI_LBW_RD_ID_R_MASK 0x7FFF + +/* DMA_MACRO_RAZWI_HBW_WT_VLD */ +#define DMA_MACRO_RAZWI_HBW_WT_VLD_R_SHIFT 0 +#define DMA_MACRO_RAZWI_HBW_WT_VLD_R_MASK 0x1 + +/* DMA_MACRO_RAZWI_HBW_WT_ID */ +#define DMA_MACRO_RAZWI_HBW_WT_ID_R_SHIFT 0 +#define DMA_MACRO_RAZWI_HBW_WT_ID_R_MASK 0x1FFFFFFF + +/* DMA_MACRO_RAZWI_HBW_RD_VLD */ +#define DMA_MACRO_RAZWI_HBW_RD_VLD_R_SHIFT 0 +#define DMA_MACRO_RAZWI_HBW_RD_VLD_R_MASK 0x1 + +/* DMA_MACRO_RAZWI_HBW_RD_ID */ +#define DMA_MACRO_RAZWI_HBW_RD_ID_R_SHIFT 0 +#define DMA_MACRO_RAZWI_HBW_RD_ID_R_MASK 0x1FFFFFFF + +#endif /* ASIC_REG_DMA_MACRO_MASKS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_regs.h new file mode 100644 index 000000000..c3bfc1b8e --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_regs.h @@ -0,0 +1,180 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA_MACRO_REGS_H_ +#define ASIC_REG_DMA_MACRO_REGS_H_ + +/* + ***************************************** + * DMA_MACRO (Prototype: DMA_MACRO) + ***************************************** + */ + +#define mmDMA_MACRO_LBW_RANGE_HIT_BLOCK 0x4B0000 + +#define mmDMA_MACRO_LBW_RANGE_MASK_0 0x4B0004 + +#define mmDMA_MACRO_LBW_RANGE_MASK_1 0x4B0008 + +#define mmDMA_MACRO_LBW_RANGE_MASK_2 0x4B000C + +#define mmDMA_MACRO_LBW_RANGE_MASK_3 0x4B0010 + +#define mmDMA_MACRO_LBW_RANGE_MASK_4 0x4B0014 + +#define mmDMA_MACRO_LBW_RANGE_MASK_5 0x4B0018 + +#define mmDMA_MACRO_LBW_RANGE_MASK_6 0x4B001C + +#define mmDMA_MACRO_LBW_RANGE_MASK_7 0x4B0020 + +#define mmDMA_MACRO_LBW_RANGE_MASK_8 0x4B0024 + +#define mmDMA_MACRO_LBW_RANGE_MASK_9 0x4B0028 + +#define mmDMA_MACRO_LBW_RANGE_MASK_10 0x4B002C + +#define mmDMA_MACRO_LBW_RANGE_MASK_11 0x4B0030 + +#define mmDMA_MACRO_LBW_RANGE_MASK_12 0x4B0034 + +#define mmDMA_MACRO_LBW_RANGE_MASK_13 0x4B0038 + +#define mmDMA_MACRO_LBW_RANGE_MASK_14 0x4B003C + +#define mmDMA_MACRO_LBW_RANGE_MASK_15 0x4B0040 + +#define mmDMA_MACRO_LBW_RANGE_BASE_0 0x4B0044 + +#define mmDMA_MACRO_LBW_RANGE_BASE_1 0x4B0048 + +#define mmDMA_MACRO_LBW_RANGE_BASE_2 0x4B004C + +#define mmDMA_MACRO_LBW_RANGE_BASE_3 0x4B0050 + +#define mmDMA_MACRO_LBW_RANGE_BASE_4 0x4B0054 + +#define mmDMA_MACRO_LBW_RANGE_BASE_5 0x4B0058 + +#define mmDMA_MACRO_LBW_RANGE_BASE_6 0x4B005C + +#define mmDMA_MACRO_LBW_RANGE_BASE_7 0x4B0060 + +#define mmDMA_MACRO_LBW_RANGE_BASE_8 0x4B0064 + +#define mmDMA_MACRO_LBW_RANGE_BASE_9 0x4B0068 + +#define mmDMA_MACRO_LBW_RANGE_BASE_10 0x4B006C + +#define mmDMA_MACRO_LBW_RANGE_BASE_11 0x4B0070 + +#define mmDMA_MACRO_LBW_RANGE_BASE_12 0x4B0074 + +#define mmDMA_MACRO_LBW_RANGE_BASE_13 0x4B0078 + +#define mmDMA_MACRO_LBW_RANGE_BASE_14 0x4B007C + +#define mmDMA_MACRO_LBW_RANGE_BASE_15 0x4B0080 + +#define mmDMA_MACRO_HBW_RANGE_HIT_BLOCK 0x4B0084 + +#define mmDMA_MACRO_HBW_RANGE_MASK_49_32_0 0x4B00A8 + +#define mmDMA_MACRO_HBW_RANGE_MASK_49_32_1 0x4B00AC + +#define mmDMA_MACRO_HBW_RANGE_MASK_49_32_2 0x4B00B0 + +#define mmDMA_MACRO_HBW_RANGE_MASK_49_32_3 0x4B00B4 + +#define mmDMA_MACRO_HBW_RANGE_MASK_49_32_4 0x4B00B8 + +#define mmDMA_MACRO_HBW_RANGE_MASK_49_32_5 0x4B00BC + +#define mmDMA_MACRO_HBW_RANGE_MASK_49_32_6 0x4B00C0 + +#define mmDMA_MACRO_HBW_RANGE_MASK_49_32_7 0x4B00C4 + +#define mmDMA_MACRO_HBW_RANGE_MASK_31_0_0 0x4B00C8 + +#define mmDMA_MACRO_HBW_RANGE_MASK_31_0_1 0x4B00CC + +#define mmDMA_MACRO_HBW_RANGE_MASK_31_0_2 0x4B00D0 + +#define mmDMA_MACRO_HBW_RANGE_MASK_31_0_3 0x4B00D4 + +#define mmDMA_MACRO_HBW_RANGE_MASK_31_0_4 0x4B00D8 + +#define mmDMA_MACRO_HBW_RANGE_MASK_31_0_5 0x4B00DC + +#define mmDMA_MACRO_HBW_RANGE_MASK_31_0_6 0x4B00E0 + +#define mmDMA_MACRO_HBW_RANGE_MASK_31_0_7 0x4B00E4 + +#define mmDMA_MACRO_HBW_RANGE_BASE_49_32_0 0x4B00E8 + +#define mmDMA_MACRO_HBW_RANGE_BASE_49_32_1 0x4B00EC + +#define mmDMA_MACRO_HBW_RANGE_BASE_49_32_2 0x4B00F0 + +#define mmDMA_MACRO_HBW_RANGE_BASE_49_32_3 0x4B00F4 + +#define mmDMA_MACRO_HBW_RANGE_BASE_49_32_4 0x4B00F8 + +#define mmDMA_MACRO_HBW_RANGE_BASE_49_32_5 0x4B00FC + +#define mmDMA_MACRO_HBW_RANGE_BASE_49_32_6 0x4B0100 + +#define mmDMA_MACRO_HBW_RANGE_BASE_49_32_7 0x4B0104 + +#define mmDMA_MACRO_HBW_RANGE_BASE_31_0_0 0x4B0108 + +#define mmDMA_MACRO_HBW_RANGE_BASE_31_0_1 0x4B010C + +#define mmDMA_MACRO_HBW_RANGE_BASE_31_0_2 0x4B0110 + +#define mmDMA_MACRO_HBW_RANGE_BASE_31_0_3 0x4B0114 + +#define mmDMA_MACRO_HBW_RANGE_BASE_31_0_4 0x4B0118 + +#define mmDMA_MACRO_HBW_RANGE_BASE_31_0_5 0x4B011C + +#define mmDMA_MACRO_HBW_RANGE_BASE_31_0_6 0x4B0120 + +#define mmDMA_MACRO_HBW_RANGE_BASE_31_0_7 0x4B0124 + +#define mmDMA_MACRO_WRITE_EN 0x4B0128 + +#define mmDMA_MACRO_WRITE_CREDIT 0x4B012C + +#define mmDMA_MACRO_READ_EN 0x4B0130 + +#define mmDMA_MACRO_READ_CREDIT 0x4B0134 + +#define mmDMA_MACRO_SRAM_BUSY 0x4B0138 + +#define mmDMA_MACRO_RAZWI_LBW_WT_VLD 0x4B013C + +#define mmDMA_MACRO_RAZWI_LBW_WT_ID 0x4B0140 + +#define mmDMA_MACRO_RAZWI_LBW_RD_VLD 0x4B0144 + +#define mmDMA_MACRO_RAZWI_LBW_RD_ID 0x4B0148 + +#define mmDMA_MACRO_RAZWI_HBW_WT_VLD 0x4B014C + +#define mmDMA_MACRO_RAZWI_HBW_WT_ID 0x4B0150 + +#define mmDMA_MACRO_RAZWI_HBW_RD_VLD 0x4B0154 + +#define mmDMA_MACRO_RAZWI_HBW_RD_ID 0x4B0158 + +#endif /* ASIC_REG_DMA_MACRO_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h new file mode 100644 index 000000000..bc977488c --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h @@ -0,0 +1,208 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA_NRTR_MASKS_H_ +#define ASIC_REG_DMA_NRTR_MASKS_H_ + +/* + ***************************************** + * DMA_NRTR (Prototype: IF_NRTR) + ***************************************** + */ + +/* DMA_NRTR_HBW_MAX_CRED */ +#define DMA_NRTR_HBW_MAX_CRED_WR_RQ_SHIFT 0 +#define DMA_NRTR_HBW_MAX_CRED_WR_RQ_MASK 0x3F +#define DMA_NRTR_HBW_MAX_CRED_WR_RS_SHIFT 8 +#define DMA_NRTR_HBW_MAX_CRED_WR_RS_MASK 0x3F00 +#define DMA_NRTR_HBW_MAX_CRED_RD_RQ_SHIFT 16 +#define DMA_NRTR_HBW_MAX_CRED_RD_RQ_MASK 0x3F0000 +#define DMA_NRTR_HBW_MAX_CRED_RD_RS_SHIFT 24 +#define DMA_NRTR_HBW_MAX_CRED_RD_RS_MASK 0x3F000000 + +/* DMA_NRTR_LBW_MAX_CRED */ +#define DMA_NRTR_LBW_MAX_CRED_WR_RQ_SHIFT 0 +#define DMA_NRTR_LBW_MAX_CRED_WR_RQ_MASK 0x3F +#define DMA_NRTR_LBW_MAX_CRED_WR_RS_SHIFT 8 +#define DMA_NRTR_LBW_MAX_CRED_WR_RS_MASK 0x3F00 +#define DMA_NRTR_LBW_MAX_CRED_RD_RQ_SHIFT 16 +#define DMA_NRTR_LBW_MAX_CRED_RD_RQ_MASK 0x3F0000 +#define DMA_NRTR_LBW_MAX_CRED_RD_RS_SHIFT 24 +#define DMA_NRTR_LBW_MAX_CRED_RD_RS_MASK 0x3F000000 + +/* DMA_NRTR_DBG_E_ARB */ +#define DMA_NRTR_DBG_E_ARB_W_SHIFT 0 +#define DMA_NRTR_DBG_E_ARB_W_MASK 0x7 +#define DMA_NRTR_DBG_E_ARB_S_SHIFT 8 +#define DMA_NRTR_DBG_E_ARB_S_MASK 0x700 +#define DMA_NRTR_DBG_E_ARB_N_SHIFT 16 +#define DMA_NRTR_DBG_E_ARB_N_MASK 0x70000 +#define DMA_NRTR_DBG_E_ARB_L_SHIFT 24 +#define DMA_NRTR_DBG_E_ARB_L_MASK 0x7000000 + +/* DMA_NRTR_DBG_W_ARB */ +#define DMA_NRTR_DBG_W_ARB_E_SHIFT 0 +#define DMA_NRTR_DBG_W_ARB_E_MASK 0x7 +#define DMA_NRTR_DBG_W_ARB_S_SHIFT 8 +#define DMA_NRTR_DBG_W_ARB_S_MASK 0x700 +#define DMA_NRTR_DBG_W_ARB_N_SHIFT 16 +#define DMA_NRTR_DBG_W_ARB_N_MASK 0x70000 +#define DMA_NRTR_DBG_W_ARB_L_SHIFT 24 +#define DMA_NRTR_DBG_W_ARB_L_MASK 0x7000000 + +/* DMA_NRTR_DBG_N_ARB */ +#define DMA_NRTR_DBG_N_ARB_W_SHIFT 0 +#define DMA_NRTR_DBG_N_ARB_W_MASK 0x7 +#define DMA_NRTR_DBG_N_ARB_E_SHIFT 8 +#define DMA_NRTR_DBG_N_ARB_E_MASK 0x700 +#define DMA_NRTR_DBG_N_ARB_S_SHIFT 16 +#define DMA_NRTR_DBG_N_ARB_S_MASK 0x70000 +#define DMA_NRTR_DBG_N_ARB_L_SHIFT 24 +#define DMA_NRTR_DBG_N_ARB_L_MASK 0x7000000 + +/* DMA_NRTR_DBG_S_ARB */ +#define DMA_NRTR_DBG_S_ARB_W_SHIFT 0 +#define DMA_NRTR_DBG_S_ARB_W_MASK 0x7 +#define DMA_NRTR_DBG_S_ARB_E_SHIFT 8 +#define DMA_NRTR_DBG_S_ARB_E_MASK 0x700 +#define DMA_NRTR_DBG_S_ARB_N_SHIFT 16 +#define DMA_NRTR_DBG_S_ARB_N_MASK 0x70000 +#define DMA_NRTR_DBG_S_ARB_L_SHIFT 24 +#define DMA_NRTR_DBG_S_ARB_L_MASK 0x7000000 + +/* DMA_NRTR_DBG_L_ARB */ +#define DMA_NRTR_DBG_L_ARB_W_SHIFT 0 +#define DMA_NRTR_DBG_L_ARB_W_MASK 0x7 +#define DMA_NRTR_DBG_L_ARB_E_SHIFT 8 +#define DMA_NRTR_DBG_L_ARB_E_MASK 0x700 +#define DMA_NRTR_DBG_L_ARB_S_SHIFT 16 +#define DMA_NRTR_DBG_L_ARB_S_MASK 0x70000 +#define DMA_NRTR_DBG_L_ARB_N_SHIFT 24 +#define DMA_NRTR_DBG_L_ARB_N_MASK 0x7000000 + +/* DMA_NRTR_DBG_E_ARB_MAX */ +#define DMA_NRTR_DBG_E_ARB_MAX_CREDIT_SHIFT 0 +#define DMA_NRTR_DBG_E_ARB_MAX_CREDIT_MASK 0x3F + +/* DMA_NRTR_DBG_W_ARB_MAX */ +#define DMA_NRTR_DBG_W_ARB_MAX_CREDIT_SHIFT 0 +#define DMA_NRTR_DBG_W_ARB_MAX_CREDIT_MASK 0x3F + +/* DMA_NRTR_DBG_N_ARB_MAX */ +#define DMA_NRTR_DBG_N_ARB_MAX_CREDIT_SHIFT 0 +#define DMA_NRTR_DBG_N_ARB_MAX_CREDIT_MASK 0x3F + +/* DMA_NRTR_DBG_S_ARB_MAX */ +#define DMA_NRTR_DBG_S_ARB_MAX_CREDIT_SHIFT 0 +#define DMA_NRTR_DBG_S_ARB_MAX_CREDIT_MASK 0x3F + +/* DMA_NRTR_DBG_L_ARB_MAX */ +#define DMA_NRTR_DBG_L_ARB_MAX_CREDIT_SHIFT 0 +#define DMA_NRTR_DBG_L_ARB_MAX_CREDIT_MASK 0x3F + +/* DMA_NRTR_SPLIT_COEF */ +#define DMA_NRTR_SPLIT_COEF_VAL_SHIFT 0 +#define DMA_NRTR_SPLIT_COEF_VAL_MASK 0xFFFF + +/* DMA_NRTR_SPLIT_CFG */ +#define DMA_NRTR_SPLIT_CFG_FORCE_WAK_ORDER_SHIFT 0 +#define DMA_NRTR_SPLIT_CFG_FORCE_WAK_ORDER_MASK 0x1 +#define DMA_NRTR_SPLIT_CFG_FORCE_STRONG_ORDER_SHIFT 1 +#define DMA_NRTR_SPLIT_CFG_FORCE_STRONG_ORDER_MASK 0x2 +#define DMA_NRTR_SPLIT_CFG_DEFAULT_MESH_SHIFT 2 +#define DMA_NRTR_SPLIT_CFG_DEFAULT_MESH_MASK 0xC +#define DMA_NRTR_SPLIT_CFG_RD_RATE_LIM_EN_SHIFT 4 +#define DMA_NRTR_SPLIT_CFG_RD_RATE_LIM_EN_MASK 0x10 +#define DMA_NRTR_SPLIT_CFG_WR_RATE_LIM_EN_SHIFT 5 +#define DMA_NRTR_SPLIT_CFG_WR_RATE_LIM_EN_MASK 0x20 +#define DMA_NRTR_SPLIT_CFG_B2B_OPT_SHIFT 6 +#define DMA_NRTR_SPLIT_CFG_B2B_OPT_MASK 0x1C0 + +/* DMA_NRTR_SPLIT_RD_SAT */ +#define DMA_NRTR_SPLIT_RD_SAT_VAL_SHIFT 0 +#define DMA_NRTR_SPLIT_RD_SAT_VAL_MASK 0xFFFF + +/* DMA_NRTR_SPLIT_RD_RST_TOKEN */ +#define DMA_NRTR_SPLIT_RD_RST_TOKEN_VAL_SHIFT 0 +#define DMA_NRTR_SPLIT_RD_RST_TOKEN_VAL_MASK 0xFFFF + +/* DMA_NRTR_SPLIT_RD_TIMEOUT */ +#define DMA_NRTR_SPLIT_RD_TIMEOUT_VAL_SHIFT 0 +#define DMA_NRTR_SPLIT_RD_TIMEOUT_VAL_MASK 0xFFFFFFFF + +/* DMA_NRTR_SPLIT_WR_SAT */ +#define DMA_NRTR_SPLIT_WR_SAT_VAL_SHIFT 0 +#define DMA_NRTR_SPLIT_WR_SAT_VAL_MASK 0xFFFF + +/* DMA_NRTR_WPLIT_WR_TST_TOLEN */ +#define DMA_NRTR_WPLIT_WR_TST_TOLEN_VAL_SHIFT 0 +#define DMA_NRTR_WPLIT_WR_TST_TOLEN_VAL_MASK 0xFFFF + +/* DMA_NRTR_SPLIT_WR_TIMEOUT */ +#define DMA_NRTR_SPLIT_WR_TIMEOUT_VAL_SHIFT 0 +#define DMA_NRTR_SPLIT_WR_TIMEOUT_VAL_MASK 0xFFFFFFFF + +/* DMA_NRTR_HBW_RANGE_HIT */ +#define DMA_NRTR_HBW_RANGE_HIT_IND_SHIFT 0 +#define DMA_NRTR_HBW_RANGE_HIT_IND_MASK 0xFF + +/* DMA_NRTR_HBW_RANGE_MASK_L */ +#define DMA_NRTR_HBW_RANGE_MASK_L_VAL_SHIFT 0 +#define DMA_NRTR_HBW_RANGE_MASK_L_VAL_MASK 0xFFFFFFFF + +/* DMA_NRTR_HBW_RANGE_MASK_H */ +#define DMA_NRTR_HBW_RANGE_MASK_H_VAL_SHIFT 0 +#define DMA_NRTR_HBW_RANGE_MASK_H_VAL_MASK 0x3FFFF + +/* DMA_NRTR_HBW_RANGE_BASE_L */ +#define DMA_NRTR_HBW_RANGE_BASE_L_VAL_SHIFT 0 +#define DMA_NRTR_HBW_RANGE_BASE_L_VAL_MASK 0xFFFFFFFF + +/* DMA_NRTR_HBW_RANGE_BASE_H */ +#define DMA_NRTR_HBW_RANGE_BASE_H_VAL_SHIFT 0 +#define DMA_NRTR_HBW_RANGE_BASE_H_VAL_MASK 0x3FFFF + +/* DMA_NRTR_LBW_RANGE_HIT */ +#define DMA_NRTR_LBW_RANGE_HIT_IND_SHIFT 0 +#define DMA_NRTR_LBW_RANGE_HIT_IND_MASK 0xFFFF + +/* DMA_NRTR_LBW_RANGE_MASK */ +#define DMA_NRTR_LBW_RANGE_MASK_VAL_SHIFT 0 +#define DMA_NRTR_LBW_RANGE_MASK_VAL_MASK 0x3FFFFFF + +/* DMA_NRTR_LBW_RANGE_BASE */ +#define DMA_NRTR_LBW_RANGE_BASE_VAL_SHIFT 0 +#define DMA_NRTR_LBW_RANGE_BASE_VAL_MASK 0x3FFFFFF + +/* DMA_NRTR_RGLTR */ +#define DMA_NRTR_RGLTR_WR_EN_SHIFT 0 +#define DMA_NRTR_RGLTR_WR_EN_MASK 0x1 +#define DMA_NRTR_RGLTR_RD_EN_SHIFT 4 +#define DMA_NRTR_RGLTR_RD_EN_MASK 0x10 + +/* DMA_NRTR_RGLTR_WR_RESULT */ +#define DMA_NRTR_RGLTR_WR_RESULT_VAL_SHIFT 0 +#define DMA_NRTR_RGLTR_WR_RESULT_VAL_MASK 0xFF + +/* DMA_NRTR_RGLTR_RD_RESULT */ +#define DMA_NRTR_RGLTR_RD_RESULT_VAL_SHIFT 0 +#define DMA_NRTR_RGLTR_RD_RESULT_VAL_MASK 0xFF + +/* DMA_NRTR_SCRAMB_EN */ +#define DMA_NRTR_SCRAMB_EN_VAL_SHIFT 0 +#define DMA_NRTR_SCRAMB_EN_VAL_MASK 0x1 + +/* DMA_NRTR_NON_LIN_SCRAMB */ +#define DMA_NRTR_NON_LIN_SCRAMB_EN_SHIFT 0 +#define DMA_NRTR_NON_LIN_SCRAMB_EN_MASK 0x1 + +#endif /* ASIC_REG_DMA_NRTR_MASKS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h new file mode 100644 index 000000000..c4abc7ff1 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h @@ -0,0 +1,226 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA_NRTR_REGS_H_ +#define ASIC_REG_DMA_NRTR_REGS_H_ + +/* + ***************************************** + * DMA_NRTR (Prototype: IF_NRTR) + ***************************************** + */ + +#define mmDMA_NRTR_HBW_MAX_CRED 0x1C0100 + +#define mmDMA_NRTR_LBW_MAX_CRED 0x1C0120 + +#define mmDMA_NRTR_DBG_E_ARB 0x1C0300 + +#define mmDMA_NRTR_DBG_W_ARB 0x1C0304 + +#define mmDMA_NRTR_DBG_N_ARB 0x1C0308 + +#define mmDMA_NRTR_DBG_S_ARB 0x1C030C + +#define mmDMA_NRTR_DBG_L_ARB 0x1C0310 + +#define mmDMA_NRTR_DBG_E_ARB_MAX 0x1C0320 + +#define mmDMA_NRTR_DBG_W_ARB_MAX 0x1C0324 + +#define mmDMA_NRTR_DBG_N_ARB_MAX 0x1C0328 + +#define mmDMA_NRTR_DBG_S_ARB_MAX 0x1C032C + +#define mmDMA_NRTR_DBG_L_ARB_MAX 0x1C0330 + +#define mmDMA_NRTR_SPLIT_COEF_0 0x1C0400 + +#define mmDMA_NRTR_SPLIT_COEF_1 0x1C0404 + +#define mmDMA_NRTR_SPLIT_COEF_2 0x1C0408 + +#define mmDMA_NRTR_SPLIT_COEF_3 0x1C040C + +#define mmDMA_NRTR_SPLIT_COEF_4 0x1C0410 + +#define mmDMA_NRTR_SPLIT_COEF_5 0x1C0414 + +#define mmDMA_NRTR_SPLIT_COEF_6 0x1C0418 + +#define mmDMA_NRTR_SPLIT_COEF_7 0x1C041C + +#define mmDMA_NRTR_SPLIT_COEF_8 0x1C0420 + +#define mmDMA_NRTR_SPLIT_COEF_9 0x1C0424 + +#define mmDMA_NRTR_SPLIT_CFG 0x1C0440 + +#define mmDMA_NRTR_SPLIT_RD_SAT 0x1C0444 + +#define mmDMA_NRTR_SPLIT_RD_RST_TOKEN 0x1C0448 + +#define mmDMA_NRTR_SPLIT_RD_TIMEOUT_0 0x1C044C + +#define mmDMA_NRTR_SPLIT_RD_TIMEOUT_1 0x1C0450 + +#define mmDMA_NRTR_SPLIT_WR_SAT 0x1C0454 + +#define mmDMA_NRTR_WPLIT_WR_TST_TOLEN 0x1C0458 + +#define mmDMA_NRTR_SPLIT_WR_TIMEOUT_0 0x1C045C + +#define mmDMA_NRTR_SPLIT_WR_TIMEOUT_1 0x1C0460 + +#define mmDMA_NRTR_HBW_RANGE_HIT 0x1C0470 + +#define mmDMA_NRTR_HBW_RANGE_MASK_L_0 0x1C0480 + +#define mmDMA_NRTR_HBW_RANGE_MASK_L_1 0x1C0484 + +#define mmDMA_NRTR_HBW_RANGE_MASK_L_2 0x1C0488 + +#define mmDMA_NRTR_HBW_RANGE_MASK_L_3 0x1C048C + +#define mmDMA_NRTR_HBW_RANGE_MASK_L_4 0x1C0490 + +#define mmDMA_NRTR_HBW_RANGE_MASK_L_5 0x1C0494 + +#define mmDMA_NRTR_HBW_RANGE_MASK_L_6 0x1C0498 + +#define mmDMA_NRTR_HBW_RANGE_MASK_L_7 0x1C049C + +#define mmDMA_NRTR_HBW_RANGE_MASK_H_0 0x1C04A0 + +#define mmDMA_NRTR_HBW_RANGE_MASK_H_1 0x1C04A4 + +#define mmDMA_NRTR_HBW_RANGE_MASK_H_2 0x1C04A8 + +#define mmDMA_NRTR_HBW_RANGE_MASK_H_3 0x1C04AC + +#define mmDMA_NRTR_HBW_RANGE_MASK_H_4 0x1C04B0 + +#define mmDMA_NRTR_HBW_RANGE_MASK_H_5 0x1C04B4 + +#define mmDMA_NRTR_HBW_RANGE_MASK_H_6 0x1C04B8 + +#define mmDMA_NRTR_HBW_RANGE_MASK_H_7 0x1C04BC + +#define mmDMA_NRTR_HBW_RANGE_BASE_L_0 0x1C04C0 + +#define mmDMA_NRTR_HBW_RANGE_BASE_L_1 0x1C04C4 + +#define mmDMA_NRTR_HBW_RANGE_BASE_L_2 0x1C04C8 + +#define mmDMA_NRTR_HBW_RANGE_BASE_L_3 0x1C04CC + +#define mmDMA_NRTR_HBW_RANGE_BASE_L_4 0x1C04D0 + +#define mmDMA_NRTR_HBW_RANGE_BASE_L_5 0x1C04D4 + +#define mmDMA_NRTR_HBW_RANGE_BASE_L_6 0x1C04D8 + +#define mmDMA_NRTR_HBW_RANGE_BASE_L_7 0x1C04DC + +#define mmDMA_NRTR_HBW_RANGE_BASE_H_0 0x1C04E0 + +#define mmDMA_NRTR_HBW_RANGE_BASE_H_1 0x1C04E4 + +#define mmDMA_NRTR_HBW_RANGE_BASE_H_2 0x1C04E8 + +#define mmDMA_NRTR_HBW_RANGE_BASE_H_3 0x1C04EC + +#define mmDMA_NRTR_HBW_RANGE_BASE_H_4 0x1C04F0 + +#define mmDMA_NRTR_HBW_RANGE_BASE_H_5 0x1C04F4 + +#define mmDMA_NRTR_HBW_RANGE_BASE_H_6 0x1C04F8 + +#define mmDMA_NRTR_HBW_RANGE_BASE_H_7 0x1C04FC + +#define mmDMA_NRTR_LBW_RANGE_HIT 0x1C0500 + +#define mmDMA_NRTR_LBW_RANGE_MASK_0 0x1C0510 + +#define mmDMA_NRTR_LBW_RANGE_MASK_1 0x1C0514 + +#define mmDMA_NRTR_LBW_RANGE_MASK_2 0x1C0518 + +#define mmDMA_NRTR_LBW_RANGE_MASK_3 0x1C051C + +#define mmDMA_NRTR_LBW_RANGE_MASK_4 0x1C0520 + +#define mmDMA_NRTR_LBW_RANGE_MASK_5 0x1C0524 + +#define mmDMA_NRTR_LBW_RANGE_MASK_6 0x1C0528 + +#define mmDMA_NRTR_LBW_RANGE_MASK_7 0x1C052C + +#define mmDMA_NRTR_LBW_RANGE_MASK_8 0x1C0530 + +#define mmDMA_NRTR_LBW_RANGE_MASK_9 0x1C0534 + +#define mmDMA_NRTR_LBW_RANGE_MASK_10 0x1C0538 + +#define mmDMA_NRTR_LBW_RANGE_MASK_11 0x1C053C + +#define mmDMA_NRTR_LBW_RANGE_MASK_12 0x1C0540 + +#define mmDMA_NRTR_LBW_RANGE_MASK_13 0x1C0544 + +#define mmDMA_NRTR_LBW_RANGE_MASK_14 0x1C0548 + +#define mmDMA_NRTR_LBW_RANGE_MASK_15 0x1C054C + +#define mmDMA_NRTR_LBW_RANGE_BASE_0 0x1C0550 + +#define mmDMA_NRTR_LBW_RANGE_BASE_1 0x1C0554 + +#define mmDMA_NRTR_LBW_RANGE_BASE_2 0x1C0558 + +#define mmDMA_NRTR_LBW_RANGE_BASE_3 0x1C055C + +#define mmDMA_NRTR_LBW_RANGE_BASE_4 0x1C0560 + +#define mmDMA_NRTR_LBW_RANGE_BASE_5 0x1C0564 + +#define mmDMA_NRTR_LBW_RANGE_BASE_6 0x1C0568 + +#define mmDMA_NRTR_LBW_RANGE_BASE_7 0x1C056C + +#define mmDMA_NRTR_LBW_RANGE_BASE_8 0x1C0570 + +#define mmDMA_NRTR_LBW_RANGE_BASE_9 0x1C0574 + +#define mmDMA_NRTR_LBW_RANGE_BASE_10 0x1C0578 + +#define mmDMA_NRTR_LBW_RANGE_BASE_11 0x1C057C + +#define mmDMA_NRTR_LBW_RANGE_BASE_12 0x1C0580 + +#define mmDMA_NRTR_LBW_RANGE_BASE_13 0x1C0584 + +#define mmDMA_NRTR_LBW_RANGE_BASE_14 0x1C0588 + +#define mmDMA_NRTR_LBW_RANGE_BASE_15 0x1C058C + +#define mmDMA_NRTR_RGLTR 0x1C0590 + +#define mmDMA_NRTR_RGLTR_WR_RESULT 0x1C0594 + +#define mmDMA_NRTR_RGLTR_RD_RESULT 0x1C0598 + +#define mmDMA_NRTR_SCRAMB_EN 0x1C0600 + +#define mmDMA_NRTR_NON_LIN_SCRAMB 0x1C0604 + +#endif /* ASIC_REG_DMA_NRTR_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h new file mode 100644 index 000000000..b17f72c31 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h @@ -0,0 +1,464 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA_QM_0_MASKS_H_ +#define ASIC_REG_DMA_QM_0_MASKS_H_ + +/* + ***************************************** + * DMA_QM_0 (Prototype: QMAN) + ***************************************** + */ + +/* DMA_QM_0_GLBL_CFG0 */ +#define DMA_QM_0_GLBL_CFG0_PQF_EN_SHIFT 0 +#define DMA_QM_0_GLBL_CFG0_PQF_EN_MASK 0x1 +#define DMA_QM_0_GLBL_CFG0_CQF_EN_SHIFT 1 +#define DMA_QM_0_GLBL_CFG0_CQF_EN_MASK 0x2 +#define DMA_QM_0_GLBL_CFG0_CP_EN_SHIFT 2 +#define DMA_QM_0_GLBL_CFG0_CP_EN_MASK 0x4 +#define DMA_QM_0_GLBL_CFG0_DMA_EN_SHIFT 3 +#define DMA_QM_0_GLBL_CFG0_DMA_EN_MASK 0x8 + +/* DMA_QM_0_GLBL_CFG1 */ +#define DMA_QM_0_GLBL_CFG1_PQF_STOP_SHIFT 0 +#define DMA_QM_0_GLBL_CFG1_PQF_STOP_MASK 0x1 +#define DMA_QM_0_GLBL_CFG1_CQF_STOP_SHIFT 1 +#define DMA_QM_0_GLBL_CFG1_CQF_STOP_MASK 0x2 +#define DMA_QM_0_GLBL_CFG1_CP_STOP_SHIFT 2 +#define DMA_QM_0_GLBL_CFG1_CP_STOP_MASK 0x4 +#define DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT 3 +#define DMA_QM_0_GLBL_CFG1_DMA_STOP_MASK 0x8 +#define DMA_QM_0_GLBL_CFG1_PQF_FLUSH_SHIFT 8 +#define DMA_QM_0_GLBL_CFG1_PQF_FLUSH_MASK 0x100 +#define DMA_QM_0_GLBL_CFG1_CQF_FLUSH_SHIFT 9 +#define DMA_QM_0_GLBL_CFG1_CQF_FLUSH_MASK 0x200 +#define DMA_QM_0_GLBL_CFG1_CP_FLUSH_SHIFT 10 +#define DMA_QM_0_GLBL_CFG1_CP_FLUSH_MASK 0x400 +#define DMA_QM_0_GLBL_CFG1_DMA_FLUSH_SHIFT 11 +#define DMA_QM_0_GLBL_CFG1_DMA_FLUSH_MASK 0x800 + +/* DMA_QM_0_GLBL_PROT */ +#define DMA_QM_0_GLBL_PROT_PQF_PROT_SHIFT 0 +#define DMA_QM_0_GLBL_PROT_PQF_PROT_MASK 0x1 +#define DMA_QM_0_GLBL_PROT_CQF_PROT_SHIFT 1 +#define DMA_QM_0_GLBL_PROT_CQF_PROT_MASK 0x2 +#define DMA_QM_0_GLBL_PROT_CP_PROT_SHIFT 2 +#define DMA_QM_0_GLBL_PROT_CP_PROT_MASK 0x4 +#define DMA_QM_0_GLBL_PROT_DMA_PROT_SHIFT 3 +#define DMA_QM_0_GLBL_PROT_DMA_PROT_MASK 0x8 +#define DMA_QM_0_GLBL_PROT_PQF_ERR_PROT_SHIFT 4 +#define DMA_QM_0_GLBL_PROT_PQF_ERR_PROT_MASK 0x10 +#define DMA_QM_0_GLBL_PROT_CQF_ERR_PROT_SHIFT 5 +#define DMA_QM_0_GLBL_PROT_CQF_ERR_PROT_MASK 0x20 +#define DMA_QM_0_GLBL_PROT_CP_ERR_PROT_SHIFT 6 +#define DMA_QM_0_GLBL_PROT_CP_ERR_PROT_MASK 0x40 +#define DMA_QM_0_GLBL_PROT_DMA_ERR_PROT_SHIFT 7 +#define DMA_QM_0_GLBL_PROT_DMA_ERR_PROT_MASK 0x80 + +/* DMA_QM_0_GLBL_ERR_CFG */ +#define DMA_QM_0_GLBL_ERR_CFG_PQF_ERR_INT_EN_SHIFT 0 +#define DMA_QM_0_GLBL_ERR_CFG_PQF_ERR_INT_EN_MASK 0x1 +#define DMA_QM_0_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT 1 +#define DMA_QM_0_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK 0x2 +#define DMA_QM_0_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT 2 +#define DMA_QM_0_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK 0x4 +#define DMA_QM_0_GLBL_ERR_CFG_CQF_ERR_INT_EN_SHIFT 3 +#define DMA_QM_0_GLBL_ERR_CFG_CQF_ERR_INT_EN_MASK 0x8 +#define DMA_QM_0_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT 4 +#define DMA_QM_0_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK 0x10 +#define DMA_QM_0_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT 5 +#define DMA_QM_0_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK 0x20 +#define DMA_QM_0_GLBL_ERR_CFG_CP_ERR_INT_EN_SHIFT 6 +#define DMA_QM_0_GLBL_ERR_CFG_CP_ERR_INT_EN_MASK 0x40 +#define DMA_QM_0_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT 7 +#define DMA_QM_0_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK 0x80 +#define DMA_QM_0_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT 8 +#define DMA_QM_0_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK 0x100 +#define DMA_QM_0_GLBL_ERR_CFG_DMA_ERR_INT_EN_SHIFT 9 +#define DMA_QM_0_GLBL_ERR_CFG_DMA_ERR_INT_EN_MASK 0x200 +#define DMA_QM_0_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT 10 +#define DMA_QM_0_GLBL_ERR_CFG_DMA_ERR_MSG_EN_MASK 0x400 +#define DMA_QM_0_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT 11 +#define DMA_QM_0_GLBL_ERR_CFG_DMA_STOP_ON_ERR_MASK 0x800 + +/* DMA_QM_0_GLBL_ERR_ADDR_LO */ +#define DMA_QM_0_GLBL_ERR_ADDR_LO_VAL_SHIFT 0 +#define DMA_QM_0_GLBL_ERR_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_GLBL_ERR_ADDR_HI */ +#define DMA_QM_0_GLBL_ERR_ADDR_HI_VAL_SHIFT 0 +#define DMA_QM_0_GLBL_ERR_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_GLBL_ERR_WDATA */ +#define DMA_QM_0_GLBL_ERR_WDATA_VAL_SHIFT 0 +#define DMA_QM_0_GLBL_ERR_WDATA_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_GLBL_SECURE_PROPS */ +#define DMA_QM_0_GLBL_SECURE_PROPS_ASID_SHIFT 0 +#define DMA_QM_0_GLBL_SECURE_PROPS_ASID_MASK 0x3FF +#define DMA_QM_0_GLBL_SECURE_PROPS_MMBP_SHIFT 10 +#define DMA_QM_0_GLBL_SECURE_PROPS_MMBP_MASK 0x400 + +/* DMA_QM_0_GLBL_NON_SECURE_PROPS */ +#define DMA_QM_0_GLBL_NON_SECURE_PROPS_ASID_SHIFT 0 +#define DMA_QM_0_GLBL_NON_SECURE_PROPS_ASID_MASK 0x3FF +#define DMA_QM_0_GLBL_NON_SECURE_PROPS_MMBP_SHIFT 10 +#define DMA_QM_0_GLBL_NON_SECURE_PROPS_MMBP_MASK 0x400 + +/* DMA_QM_0_GLBL_STS0 */ +#define DMA_QM_0_GLBL_STS0_PQF_IDLE_SHIFT 0 +#define DMA_QM_0_GLBL_STS0_PQF_IDLE_MASK 0x1 +#define DMA_QM_0_GLBL_STS0_CQF_IDLE_SHIFT 1 +#define DMA_QM_0_GLBL_STS0_CQF_IDLE_MASK 0x2 +#define DMA_QM_0_GLBL_STS0_CP_IDLE_SHIFT 2 +#define DMA_QM_0_GLBL_STS0_CP_IDLE_MASK 0x4 +#define DMA_QM_0_GLBL_STS0_DMA_IDLE_SHIFT 3 +#define DMA_QM_0_GLBL_STS0_DMA_IDLE_MASK 0x8 +#define DMA_QM_0_GLBL_STS0_PQF_IS_STOP_SHIFT 4 +#define DMA_QM_0_GLBL_STS0_PQF_IS_STOP_MASK 0x10 +#define DMA_QM_0_GLBL_STS0_CQF_IS_STOP_SHIFT 5 +#define DMA_QM_0_GLBL_STS0_CQF_IS_STOP_MASK 0x20 +#define DMA_QM_0_GLBL_STS0_CP_IS_STOP_SHIFT 6 +#define DMA_QM_0_GLBL_STS0_CP_IS_STOP_MASK 0x40 +#define DMA_QM_0_GLBL_STS0_DMA_IS_STOP_SHIFT 7 +#define DMA_QM_0_GLBL_STS0_DMA_IS_STOP_MASK 0x80 + +/* DMA_QM_0_GLBL_STS1 */ +#define DMA_QM_0_GLBL_STS1_PQF_RD_ERR_SHIFT 0 +#define DMA_QM_0_GLBL_STS1_PQF_RD_ERR_MASK 0x1 +#define DMA_QM_0_GLBL_STS1_CQF_RD_ERR_SHIFT 1 +#define DMA_QM_0_GLBL_STS1_CQF_RD_ERR_MASK 0x2 +#define DMA_QM_0_GLBL_STS1_CP_RD_ERR_SHIFT 2 +#define DMA_QM_0_GLBL_STS1_CP_RD_ERR_MASK 0x4 +#define DMA_QM_0_GLBL_STS1_CP_UNDEF_CMD_ERR_SHIFT 3 +#define DMA_QM_0_GLBL_STS1_CP_UNDEF_CMD_ERR_MASK 0x8 +#define DMA_QM_0_GLBL_STS1_CP_STOP_OP_SHIFT 4 +#define DMA_QM_0_GLBL_STS1_CP_STOP_OP_MASK 0x10 +#define DMA_QM_0_GLBL_STS1_CP_MSG_WR_ERR_SHIFT 5 +#define DMA_QM_0_GLBL_STS1_CP_MSG_WR_ERR_MASK 0x20 +#define DMA_QM_0_GLBL_STS1_DMA_RD_ERR_SHIFT 8 +#define DMA_QM_0_GLBL_STS1_DMA_RD_ERR_MASK 0x100 +#define DMA_QM_0_GLBL_STS1_DMA_WR_ERR_SHIFT 9 +#define DMA_QM_0_GLBL_STS1_DMA_WR_ERR_MASK 0x200 +#define DMA_QM_0_GLBL_STS1_DMA_RD_MSG_ERR_SHIFT 10 +#define DMA_QM_0_GLBL_STS1_DMA_RD_MSG_ERR_MASK 0x400 +#define DMA_QM_0_GLBL_STS1_DMA_WR_MSG_ERR_SHIFT 11 +#define DMA_QM_0_GLBL_STS1_DMA_WR_MSG_ERR_MASK 0x800 + +/* DMA_QM_0_PQ_BASE_LO */ +#define DMA_QM_0_PQ_BASE_LO_VAL_SHIFT 0 +#define DMA_QM_0_PQ_BASE_LO_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_PQ_BASE_HI */ +#define DMA_QM_0_PQ_BASE_HI_VAL_SHIFT 0 +#define DMA_QM_0_PQ_BASE_HI_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_PQ_SIZE */ +#define DMA_QM_0_PQ_SIZE_VAL_SHIFT 0 +#define DMA_QM_0_PQ_SIZE_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_PQ_PI */ +#define DMA_QM_0_PQ_PI_VAL_SHIFT 0 +#define DMA_QM_0_PQ_PI_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_PQ_CI */ +#define DMA_QM_0_PQ_CI_VAL_SHIFT 0 +#define DMA_QM_0_PQ_CI_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_PQ_CFG0 */ +#define DMA_QM_0_PQ_CFG0_RESERVED_SHIFT 0 +#define DMA_QM_0_PQ_CFG0_RESERVED_MASK 0x1 + +/* DMA_QM_0_PQ_CFG1 */ +#define DMA_QM_0_PQ_CFG1_CREDIT_LIM_SHIFT 0 +#define DMA_QM_0_PQ_CFG1_CREDIT_LIM_MASK 0xFFFF +#define DMA_QM_0_PQ_CFG1_MAX_INFLIGHT_SHIFT 16 +#define DMA_QM_0_PQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000 + +/* DMA_QM_0_PQ_ARUSER */ +#define DMA_QM_0_PQ_ARUSER_NOSNOOP_SHIFT 0 +#define DMA_QM_0_PQ_ARUSER_NOSNOOP_MASK 0x1 +#define DMA_QM_0_PQ_ARUSER_WORD_SHIFT 1 +#define DMA_QM_0_PQ_ARUSER_WORD_MASK 0x2 + +/* DMA_QM_0_PQ_PUSH0 */ +#define DMA_QM_0_PQ_PUSH0_PTR_LO_SHIFT 0 +#define DMA_QM_0_PQ_PUSH0_PTR_LO_MASK 0xFFFFFFFF + +/* DMA_QM_0_PQ_PUSH1 */ +#define DMA_QM_0_PQ_PUSH1_PTR_HI_SHIFT 0 +#define DMA_QM_0_PQ_PUSH1_PTR_HI_MASK 0xFFFFFFFF + +/* DMA_QM_0_PQ_PUSH2 */ +#define DMA_QM_0_PQ_PUSH2_TSIZE_SHIFT 0 +#define DMA_QM_0_PQ_PUSH2_TSIZE_MASK 0xFFFFFFFF + +/* DMA_QM_0_PQ_PUSH3 */ +#define DMA_QM_0_PQ_PUSH3_RPT_SHIFT 0 +#define DMA_QM_0_PQ_PUSH3_RPT_MASK 0xFFFF +#define DMA_QM_0_PQ_PUSH3_CTL_SHIFT 16 +#define DMA_QM_0_PQ_PUSH3_CTL_MASK 0xFFFF0000 + +/* DMA_QM_0_PQ_STS0 */ +#define DMA_QM_0_PQ_STS0_PQ_CREDIT_CNT_SHIFT 0 +#define DMA_QM_0_PQ_STS0_PQ_CREDIT_CNT_MASK 0xFFFF +#define DMA_QM_0_PQ_STS0_PQ_FREE_CNT_SHIFT 16 +#define DMA_QM_0_PQ_STS0_PQ_FREE_CNT_MASK 0xFFFF0000 + +/* DMA_QM_0_PQ_STS1 */ +#define DMA_QM_0_PQ_STS1_PQ_INFLIGHT_CNT_SHIFT 0 +#define DMA_QM_0_PQ_STS1_PQ_INFLIGHT_CNT_MASK 0xFFFF +#define DMA_QM_0_PQ_STS1_PQ_BUF_EMPTY_SHIFT 30 +#define DMA_QM_0_PQ_STS1_PQ_BUF_EMPTY_MASK 0x40000000 +#define DMA_QM_0_PQ_STS1_PQ_BUSY_SHIFT 31 +#define DMA_QM_0_PQ_STS1_PQ_BUSY_MASK 0x80000000 + +/* DMA_QM_0_PQ_RD_RATE_LIM_EN */ +#define DMA_QM_0_PQ_RD_RATE_LIM_EN_VAL_SHIFT 0 +#define DMA_QM_0_PQ_RD_RATE_LIM_EN_VAL_MASK 0x1 + +/* DMA_QM_0_PQ_RD_RATE_LIM_RST_TOKEN */ +#define DMA_QM_0_PQ_RD_RATE_LIM_RST_TOKEN_VAL_SHIFT 0 +#define DMA_QM_0_PQ_RD_RATE_LIM_RST_TOKEN_VAL_MASK 0xFFFF + +/* DMA_QM_0_PQ_RD_RATE_LIM_SAT */ +#define DMA_QM_0_PQ_RD_RATE_LIM_SAT_VAL_SHIFT 0 +#define DMA_QM_0_PQ_RD_RATE_LIM_SAT_VAL_MASK 0xFFFF + +/* DMA_QM_0_PQ_RD_RATE_LIM_TOUT */ +#define DMA_QM_0_PQ_RD_RATE_LIM_TOUT_VAL_SHIFT 0 +#define DMA_QM_0_PQ_RD_RATE_LIM_TOUT_VAL_MASK 0x7FFFFFFF + +/* DMA_QM_0_CQ_CFG0 */ +#define DMA_QM_0_CQ_CFG0_RESERVED_SHIFT 0 +#define DMA_QM_0_CQ_CFG0_RESERVED_MASK 0x1 + +/* DMA_QM_0_CQ_CFG1 */ +#define DMA_QM_0_CQ_CFG1_CREDIT_LIM_SHIFT 0 +#define DMA_QM_0_CQ_CFG1_CREDIT_LIM_MASK 0xFFFF +#define DMA_QM_0_CQ_CFG1_MAX_INFLIGHT_SHIFT 16 +#define DMA_QM_0_CQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000 + +/* DMA_QM_0_CQ_ARUSER */ +#define DMA_QM_0_CQ_ARUSER_NOSNOOP_SHIFT 0 +#define DMA_QM_0_CQ_ARUSER_NOSNOOP_MASK 0x1 +#define DMA_QM_0_CQ_ARUSER_WORD_SHIFT 1 +#define DMA_QM_0_CQ_ARUSER_WORD_MASK 0x2 + +/* DMA_QM_0_CQ_PTR_LO */ +#define DMA_QM_0_CQ_PTR_LO_VAL_SHIFT 0 +#define DMA_QM_0_CQ_PTR_LO_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CQ_PTR_HI */ +#define DMA_QM_0_CQ_PTR_HI_VAL_SHIFT 0 +#define DMA_QM_0_CQ_PTR_HI_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CQ_TSIZE */ +#define DMA_QM_0_CQ_TSIZE_VAL_SHIFT 0 +#define DMA_QM_0_CQ_TSIZE_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CQ_CTL */ +#define DMA_QM_0_CQ_CTL_RPT_SHIFT 0 +#define DMA_QM_0_CQ_CTL_RPT_MASK 0xFFFF +#define DMA_QM_0_CQ_CTL_CTL_SHIFT 16 +#define DMA_QM_0_CQ_CTL_CTL_MASK 0xFFFF0000 + +/* DMA_QM_0_CQ_PTR_LO_STS */ +#define DMA_QM_0_CQ_PTR_LO_STS_VAL_SHIFT 0 +#define DMA_QM_0_CQ_PTR_LO_STS_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CQ_PTR_HI_STS */ +#define DMA_QM_0_CQ_PTR_HI_STS_VAL_SHIFT 0 +#define DMA_QM_0_CQ_PTR_HI_STS_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CQ_TSIZE_STS */ +#define DMA_QM_0_CQ_TSIZE_STS_VAL_SHIFT 0 +#define DMA_QM_0_CQ_TSIZE_STS_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CQ_CTL_STS */ +#define DMA_QM_0_CQ_CTL_STS_RPT_SHIFT 0 +#define DMA_QM_0_CQ_CTL_STS_RPT_MASK 0xFFFF +#define DMA_QM_0_CQ_CTL_STS_CTL_SHIFT 16 +#define DMA_QM_0_CQ_CTL_STS_CTL_MASK 0xFFFF0000 + +/* DMA_QM_0_CQ_STS0 */ +#define DMA_QM_0_CQ_STS0_CQ_CREDIT_CNT_SHIFT 0 +#define DMA_QM_0_CQ_STS0_CQ_CREDIT_CNT_MASK 0xFFFF +#define DMA_QM_0_CQ_STS0_CQ_FREE_CNT_SHIFT 16 +#define DMA_QM_0_CQ_STS0_CQ_FREE_CNT_MASK 0xFFFF0000 + +/* DMA_QM_0_CQ_STS1 */ +#define DMA_QM_0_CQ_STS1_CQ_INFLIGHT_CNT_SHIFT 0 +#define DMA_QM_0_CQ_STS1_CQ_INFLIGHT_CNT_MASK 0xFFFF +#define DMA_QM_0_CQ_STS1_CQ_BUF_EMPTY_SHIFT 30 +#define DMA_QM_0_CQ_STS1_CQ_BUF_EMPTY_MASK 0x40000000 +#define DMA_QM_0_CQ_STS1_CQ_BUSY_SHIFT 31 +#define DMA_QM_0_CQ_STS1_CQ_BUSY_MASK 0x80000000 + +/* DMA_QM_0_CQ_RD_RATE_LIM_EN */ +#define DMA_QM_0_CQ_RD_RATE_LIM_EN_VAL_SHIFT 0 +#define DMA_QM_0_CQ_RD_RATE_LIM_EN_VAL_MASK 0x1 + +/* DMA_QM_0_CQ_RD_RATE_LIM_RST_TOKEN */ +#define DMA_QM_0_CQ_RD_RATE_LIM_RST_TOKEN_VAL_SHIFT 0 +#define DMA_QM_0_CQ_RD_RATE_LIM_RST_TOKEN_VAL_MASK 0xFFFF + +/* DMA_QM_0_CQ_RD_RATE_LIM_SAT */ +#define DMA_QM_0_CQ_RD_RATE_LIM_SAT_VAL_SHIFT 0 +#define DMA_QM_0_CQ_RD_RATE_LIM_SAT_VAL_MASK 0xFFFF + +/* DMA_QM_0_CQ_RD_RATE_LIM_TOUT */ +#define DMA_QM_0_CQ_RD_RATE_LIM_TOUT_VAL_SHIFT 0 +#define DMA_QM_0_CQ_RD_RATE_LIM_TOUT_VAL_MASK 0x7FFFFFFF + +/* DMA_QM_0_CQ_IFIFO_CNT */ +#define DMA_QM_0_CQ_IFIFO_CNT_VAL_SHIFT 0 +#define DMA_QM_0_CQ_IFIFO_CNT_VAL_MASK 0x3 + +/* DMA_QM_0_CP_MSG_BASE0_ADDR_LO */ +#define DMA_QM_0_CP_MSG_BASE0_ADDR_LO_VAL_SHIFT 0 +#define DMA_QM_0_CP_MSG_BASE0_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CP_MSG_BASE0_ADDR_HI */ +#define DMA_QM_0_CP_MSG_BASE0_ADDR_HI_VAL_SHIFT 0 +#define DMA_QM_0_CP_MSG_BASE0_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CP_MSG_BASE1_ADDR_LO */ +#define DMA_QM_0_CP_MSG_BASE1_ADDR_LO_VAL_SHIFT 0 +#define DMA_QM_0_CP_MSG_BASE1_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CP_MSG_BASE1_ADDR_HI */ +#define DMA_QM_0_CP_MSG_BASE1_ADDR_HI_VAL_SHIFT 0 +#define DMA_QM_0_CP_MSG_BASE1_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CP_MSG_BASE2_ADDR_LO */ +#define DMA_QM_0_CP_MSG_BASE2_ADDR_LO_VAL_SHIFT 0 +#define DMA_QM_0_CP_MSG_BASE2_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CP_MSG_BASE2_ADDR_HI */ +#define DMA_QM_0_CP_MSG_BASE2_ADDR_HI_VAL_SHIFT 0 +#define DMA_QM_0_CP_MSG_BASE2_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CP_MSG_BASE3_ADDR_LO */ +#define DMA_QM_0_CP_MSG_BASE3_ADDR_LO_VAL_SHIFT 0 +#define DMA_QM_0_CP_MSG_BASE3_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CP_MSG_BASE3_ADDR_HI */ +#define DMA_QM_0_CP_MSG_BASE3_ADDR_HI_VAL_SHIFT 0 +#define DMA_QM_0_CP_MSG_BASE3_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CP_LDMA_TSIZE_OFFSET */ +#define DMA_QM_0_CP_LDMA_TSIZE_OFFSET_VAL_SHIFT 0 +#define DMA_QM_0_CP_LDMA_TSIZE_OFFSET_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CP_LDMA_SRC_BASE_LO_OFFSET */ +#define DMA_QM_0_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_SHIFT 0 +#define DMA_QM_0_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CP_LDMA_SRC_BASE_HI_OFFSET */ +#define DMA_QM_0_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_SHIFT 0 +#define DMA_QM_0_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CP_LDMA_DST_BASE_LO_OFFSET */ +#define DMA_QM_0_CP_LDMA_DST_BASE_LO_OFFSET_VAL_SHIFT 0 +#define DMA_QM_0_CP_LDMA_DST_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CP_LDMA_DST_BASE_HI_OFFSET */ +#define DMA_QM_0_CP_LDMA_DST_BASE_HI_OFFSET_VAL_SHIFT 0 +#define DMA_QM_0_CP_LDMA_DST_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CP_LDMA_COMMIT_OFFSET */ +#define DMA_QM_0_CP_LDMA_COMMIT_OFFSET_VAL_SHIFT 0 +#define DMA_QM_0_CP_LDMA_COMMIT_OFFSET_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CP_FENCE0_RDATA */ +#define DMA_QM_0_CP_FENCE0_RDATA_INC_VAL_SHIFT 0 +#define DMA_QM_0_CP_FENCE0_RDATA_INC_VAL_MASK 0xF + +/* DMA_QM_0_CP_FENCE1_RDATA */ +#define DMA_QM_0_CP_FENCE1_RDATA_INC_VAL_SHIFT 0 +#define DMA_QM_0_CP_FENCE1_RDATA_INC_VAL_MASK 0xF + +/* DMA_QM_0_CP_FENCE2_RDATA */ +#define DMA_QM_0_CP_FENCE2_RDATA_INC_VAL_SHIFT 0 +#define DMA_QM_0_CP_FENCE2_RDATA_INC_VAL_MASK 0xF + +/* DMA_QM_0_CP_FENCE3_RDATA */ +#define DMA_QM_0_CP_FENCE3_RDATA_INC_VAL_SHIFT 0 +#define DMA_QM_0_CP_FENCE3_RDATA_INC_VAL_MASK 0xF + +/* DMA_QM_0_CP_FENCE0_CNT */ +#define DMA_QM_0_CP_FENCE0_CNT_VAL_SHIFT 0 +#define DMA_QM_0_CP_FENCE0_CNT_VAL_MASK 0xFF + +/* DMA_QM_0_CP_FENCE1_CNT */ +#define DMA_QM_0_CP_FENCE1_CNT_VAL_SHIFT 0 +#define DMA_QM_0_CP_FENCE1_CNT_VAL_MASK 0xFF + +/* DMA_QM_0_CP_FENCE2_CNT */ +#define DMA_QM_0_CP_FENCE2_CNT_VAL_SHIFT 0 +#define DMA_QM_0_CP_FENCE2_CNT_VAL_MASK 0xFF + +/* DMA_QM_0_CP_FENCE3_CNT */ +#define DMA_QM_0_CP_FENCE3_CNT_VAL_SHIFT 0 +#define DMA_QM_0_CP_FENCE3_CNT_VAL_MASK 0xFF + +/* DMA_QM_0_CP_STS */ +#define DMA_QM_0_CP_STS_MSG_INFLIGHT_CNT_SHIFT 0 +#define DMA_QM_0_CP_STS_MSG_INFLIGHT_CNT_MASK 0xFFFF +#define DMA_QM_0_CP_STS_ERDY_SHIFT 16 +#define DMA_QM_0_CP_STS_ERDY_MASK 0x10000 +#define DMA_QM_0_CP_STS_RRDY_SHIFT 17 +#define DMA_QM_0_CP_STS_RRDY_MASK 0x20000 +#define DMA_QM_0_CP_STS_MRDY_SHIFT 18 +#define DMA_QM_0_CP_STS_MRDY_MASK 0x40000 +#define DMA_QM_0_CP_STS_SW_STOP_SHIFT 19 +#define DMA_QM_0_CP_STS_SW_STOP_MASK 0x80000 +#define DMA_QM_0_CP_STS_FENCE_ID_SHIFT 20 +#define DMA_QM_0_CP_STS_FENCE_ID_MASK 0x300000 +#define DMA_QM_0_CP_STS_FENCE_IN_PROGRESS_SHIFT 22 +#define DMA_QM_0_CP_STS_FENCE_IN_PROGRESS_MASK 0x400000 + +/* DMA_QM_0_CP_CURRENT_INST_LO */ +#define DMA_QM_0_CP_CURRENT_INST_LO_VAL_SHIFT 0 +#define DMA_QM_0_CP_CURRENT_INST_LO_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CP_CURRENT_INST_HI */ +#define DMA_QM_0_CP_CURRENT_INST_HI_VAL_SHIFT 0 +#define DMA_QM_0_CP_CURRENT_INST_HI_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CP_BARRIER_CFG */ +#define DMA_QM_0_CP_BARRIER_CFG_EBGUARD_SHIFT 0 +#define DMA_QM_0_CP_BARRIER_CFG_EBGUARD_MASK 0xFFF + +/* DMA_QM_0_CP_DBG_0 */ +#define DMA_QM_0_CP_DBG_0_VAL_SHIFT 0 +#define DMA_QM_0_CP_DBG_0_VAL_MASK 0xFF + +/* DMA_QM_0_PQ_BUF_ADDR */ +#define DMA_QM_0_PQ_BUF_ADDR_VAL_SHIFT 0 +#define DMA_QM_0_PQ_BUF_ADDR_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_PQ_BUF_RDATA */ +#define DMA_QM_0_PQ_BUF_RDATA_VAL_SHIFT 0 +#define DMA_QM_0_PQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CQ_BUF_ADDR */ +#define DMA_QM_0_CQ_BUF_ADDR_VAL_SHIFT 0 +#define DMA_QM_0_CQ_BUF_ADDR_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CQ_BUF_RDATA */ +#define DMA_QM_0_CQ_BUF_RDATA_VAL_SHIFT 0 +#define DMA_QM_0_CQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF + +#endif /* ASIC_REG_DMA_QM_0_MASKS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h new file mode 100644 index 000000000..bf360b301 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h @@ -0,0 +1,178 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA_QM_0_REGS_H_ +#define ASIC_REG_DMA_QM_0_REGS_H_ + +/* + ***************************************** + * DMA_QM_0 (Prototype: QMAN) + ***************************************** + */ + +#define mmDMA_QM_0_GLBL_CFG0 0x400000 + +#define mmDMA_QM_0_GLBL_CFG1 0x400004 + +#define mmDMA_QM_0_GLBL_PROT 0x400008 + +#define mmDMA_QM_0_GLBL_ERR_CFG 0x40000C + +#define mmDMA_QM_0_GLBL_ERR_ADDR_LO 0x400010 + +#define mmDMA_QM_0_GLBL_ERR_ADDR_HI 0x400014 + +#define mmDMA_QM_0_GLBL_ERR_WDATA 0x400018 + +#define mmDMA_QM_0_GLBL_SECURE_PROPS 0x40001C + +#define mmDMA_QM_0_GLBL_NON_SECURE_PROPS 0x400020 + +#define mmDMA_QM_0_GLBL_STS0 0x400024 + +#define mmDMA_QM_0_GLBL_STS1 0x400028 + +#define mmDMA_QM_0_PQ_BASE_LO 0x400060 + +#define mmDMA_QM_0_PQ_BASE_HI 0x400064 + +#define mmDMA_QM_0_PQ_SIZE 0x400068 + +#define mmDMA_QM_0_PQ_PI 0x40006C + +#define mmDMA_QM_0_PQ_CI 0x400070 + +#define mmDMA_QM_0_PQ_CFG0 0x400074 + +#define mmDMA_QM_0_PQ_CFG1 0x400078 + +#define mmDMA_QM_0_PQ_ARUSER 0x40007C + +#define mmDMA_QM_0_PQ_PUSH0 0x400080 + +#define mmDMA_QM_0_PQ_PUSH1 0x400084 + +#define mmDMA_QM_0_PQ_PUSH2 0x400088 + +#define mmDMA_QM_0_PQ_PUSH3 0x40008C + +#define mmDMA_QM_0_PQ_STS0 0x400090 + +#define mmDMA_QM_0_PQ_STS1 0x400094 + +#define mmDMA_QM_0_PQ_RD_RATE_LIM_EN 0x4000A0 + +#define mmDMA_QM_0_PQ_RD_RATE_LIM_RST_TOKEN 0x4000A4 + +#define mmDMA_QM_0_PQ_RD_RATE_LIM_SAT 0x4000A8 + +#define mmDMA_QM_0_PQ_RD_RATE_LIM_TOUT 0x4000AC + +#define mmDMA_QM_0_CQ_CFG0 0x4000B0 + +#define mmDMA_QM_0_CQ_CFG1 0x4000B4 + +#define mmDMA_QM_0_CQ_ARUSER 0x4000B8 + +#define mmDMA_QM_0_CQ_PTR_LO 0x4000C0 + +#define mmDMA_QM_0_CQ_PTR_HI 0x4000C4 + +#define mmDMA_QM_0_CQ_TSIZE 0x4000C8 + +#define mmDMA_QM_0_CQ_CTL 0x4000CC + +#define mmDMA_QM_0_CQ_PTR_LO_STS 0x4000D4 + +#define mmDMA_QM_0_CQ_PTR_HI_STS 0x4000D8 + +#define mmDMA_QM_0_CQ_TSIZE_STS 0x4000DC + +#define mmDMA_QM_0_CQ_CTL_STS 0x4000E0 + +#define mmDMA_QM_0_CQ_STS0 0x4000E4 + +#define mmDMA_QM_0_CQ_STS1 0x4000E8 + +#define mmDMA_QM_0_CQ_RD_RATE_LIM_EN 0x4000F0 + +#define mmDMA_QM_0_CQ_RD_RATE_LIM_RST_TOKEN 0x4000F4 + +#define mmDMA_QM_0_CQ_RD_RATE_LIM_SAT 0x4000F8 + +#define mmDMA_QM_0_CQ_RD_RATE_LIM_TOUT 0x4000FC + +#define mmDMA_QM_0_CQ_IFIFO_CNT 0x400108 + +#define mmDMA_QM_0_CP_MSG_BASE0_ADDR_LO 0x400120 + +#define mmDMA_QM_0_CP_MSG_BASE0_ADDR_HI 0x400124 + +#define mmDMA_QM_0_CP_MSG_BASE1_ADDR_LO 0x400128 + +#define mmDMA_QM_0_CP_MSG_BASE1_ADDR_HI 0x40012C + +#define mmDMA_QM_0_CP_MSG_BASE2_ADDR_LO 0x400130 + +#define mmDMA_QM_0_CP_MSG_BASE2_ADDR_HI 0x400134 + +#define mmDMA_QM_0_CP_MSG_BASE3_ADDR_LO 0x400138 + +#define mmDMA_QM_0_CP_MSG_BASE3_ADDR_HI 0x40013C + +#define mmDMA_QM_0_CP_LDMA_TSIZE_OFFSET 0x400140 + +#define mmDMA_QM_0_CP_LDMA_SRC_BASE_LO_OFFSET 0x400144 + +#define mmDMA_QM_0_CP_LDMA_SRC_BASE_HI_OFFSET 0x400148 + +#define mmDMA_QM_0_CP_LDMA_DST_BASE_LO_OFFSET 0x40014C + +#define mmDMA_QM_0_CP_LDMA_DST_BASE_HI_OFFSET 0x400150 + +#define mmDMA_QM_0_CP_LDMA_COMMIT_OFFSET 0x400154 + +#define mmDMA_QM_0_CP_FENCE0_RDATA 0x400158 + +#define mmDMA_QM_0_CP_FENCE1_RDATA 0x40015C + +#define mmDMA_QM_0_CP_FENCE2_RDATA 0x400160 + +#define mmDMA_QM_0_CP_FENCE3_RDATA 0x400164 + +#define mmDMA_QM_0_CP_FENCE0_CNT 0x400168 + +#define mmDMA_QM_0_CP_FENCE1_CNT 0x40016C + +#define mmDMA_QM_0_CP_FENCE2_CNT 0x400170 + +#define mmDMA_QM_0_CP_FENCE3_CNT 0x400174 + +#define mmDMA_QM_0_CP_STS 0x400178 + +#define mmDMA_QM_0_CP_CURRENT_INST_LO 0x40017C + +#define mmDMA_QM_0_CP_CURRENT_INST_HI 0x400180 + +#define mmDMA_QM_0_CP_BARRIER_CFG 0x400184 + +#define mmDMA_QM_0_CP_DBG_0 0x400188 + +#define mmDMA_QM_0_PQ_BUF_ADDR 0x400300 + +#define mmDMA_QM_0_PQ_BUF_RDATA 0x400304 + +#define mmDMA_QM_0_CQ_BUF_ADDR 0x400308 + +#define mmDMA_QM_0_CQ_BUF_RDATA 0x40030C + +#endif /* ASIC_REG_DMA_QM_0_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h new file mode 100644 index 000000000..51d432d05 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h @@ -0,0 +1,178 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA_QM_1_REGS_H_ +#define ASIC_REG_DMA_QM_1_REGS_H_ + +/* + ***************************************** + * DMA_QM_1 (Prototype: QMAN) + ***************************************** + */ + +#define mmDMA_QM_1_GLBL_CFG0 0x408000 + +#define mmDMA_QM_1_GLBL_CFG1 0x408004 + +#define mmDMA_QM_1_GLBL_PROT 0x408008 + +#define mmDMA_QM_1_GLBL_ERR_CFG 0x40800C + +#define mmDMA_QM_1_GLBL_ERR_ADDR_LO 0x408010 + +#define mmDMA_QM_1_GLBL_ERR_ADDR_HI 0x408014 + +#define mmDMA_QM_1_GLBL_ERR_WDATA 0x408018 + +#define mmDMA_QM_1_GLBL_SECURE_PROPS 0x40801C + +#define mmDMA_QM_1_GLBL_NON_SECURE_PROPS 0x408020 + +#define mmDMA_QM_1_GLBL_STS0 0x408024 + +#define mmDMA_QM_1_GLBL_STS1 0x408028 + +#define mmDMA_QM_1_PQ_BASE_LO 0x408060 + +#define mmDMA_QM_1_PQ_BASE_HI 0x408064 + +#define mmDMA_QM_1_PQ_SIZE 0x408068 + +#define mmDMA_QM_1_PQ_PI 0x40806C + +#define mmDMA_QM_1_PQ_CI 0x408070 + +#define mmDMA_QM_1_PQ_CFG0 0x408074 + +#define mmDMA_QM_1_PQ_CFG1 0x408078 + +#define mmDMA_QM_1_PQ_ARUSER 0x40807C + +#define mmDMA_QM_1_PQ_PUSH0 0x408080 + +#define mmDMA_QM_1_PQ_PUSH1 0x408084 + +#define mmDMA_QM_1_PQ_PUSH2 0x408088 + +#define mmDMA_QM_1_PQ_PUSH3 0x40808C + +#define mmDMA_QM_1_PQ_STS0 0x408090 + +#define mmDMA_QM_1_PQ_STS1 0x408094 + +#define mmDMA_QM_1_PQ_RD_RATE_LIM_EN 0x4080A0 + +#define mmDMA_QM_1_PQ_RD_RATE_LIM_RST_TOKEN 0x4080A4 + +#define mmDMA_QM_1_PQ_RD_RATE_LIM_SAT 0x4080A8 + +#define mmDMA_QM_1_PQ_RD_RATE_LIM_TOUT 0x4080AC + +#define mmDMA_QM_1_CQ_CFG0 0x4080B0 + +#define mmDMA_QM_1_CQ_CFG1 0x4080B4 + +#define mmDMA_QM_1_CQ_ARUSER 0x4080B8 + +#define mmDMA_QM_1_CQ_PTR_LO 0x4080C0 + +#define mmDMA_QM_1_CQ_PTR_HI 0x4080C4 + +#define mmDMA_QM_1_CQ_TSIZE 0x4080C8 + +#define mmDMA_QM_1_CQ_CTL 0x4080CC + +#define mmDMA_QM_1_CQ_PTR_LO_STS 0x4080D4 + +#define mmDMA_QM_1_CQ_PTR_HI_STS 0x4080D8 + +#define mmDMA_QM_1_CQ_TSIZE_STS 0x4080DC + +#define mmDMA_QM_1_CQ_CTL_STS 0x4080E0 + +#define mmDMA_QM_1_CQ_STS0 0x4080E4 + +#define mmDMA_QM_1_CQ_STS1 0x4080E8 + +#define mmDMA_QM_1_CQ_RD_RATE_LIM_EN 0x4080F0 + +#define mmDMA_QM_1_CQ_RD_RATE_LIM_RST_TOKEN 0x4080F4 + +#define mmDMA_QM_1_CQ_RD_RATE_LIM_SAT 0x4080F8 + +#define mmDMA_QM_1_CQ_RD_RATE_LIM_TOUT 0x4080FC + +#define mmDMA_QM_1_CQ_IFIFO_CNT 0x408108 + +#define mmDMA_QM_1_CP_MSG_BASE0_ADDR_LO 0x408120 + +#define mmDMA_QM_1_CP_MSG_BASE0_ADDR_HI 0x408124 + +#define mmDMA_QM_1_CP_MSG_BASE1_ADDR_LO 0x408128 + +#define mmDMA_QM_1_CP_MSG_BASE1_ADDR_HI 0x40812C + +#define mmDMA_QM_1_CP_MSG_BASE2_ADDR_LO 0x408130 + +#define mmDMA_QM_1_CP_MSG_BASE2_ADDR_HI 0x408134 + +#define mmDMA_QM_1_CP_MSG_BASE3_ADDR_LO 0x408138 + +#define mmDMA_QM_1_CP_MSG_BASE3_ADDR_HI 0x40813C + +#define mmDMA_QM_1_CP_LDMA_TSIZE_OFFSET 0x408140 + +#define mmDMA_QM_1_CP_LDMA_SRC_BASE_LO_OFFSET 0x408144 + +#define mmDMA_QM_1_CP_LDMA_SRC_BASE_HI_OFFSET 0x408148 + +#define mmDMA_QM_1_CP_LDMA_DST_BASE_LO_OFFSET 0x40814C + +#define mmDMA_QM_1_CP_LDMA_DST_BASE_HI_OFFSET 0x408150 + +#define mmDMA_QM_1_CP_LDMA_COMMIT_OFFSET 0x408154 + +#define mmDMA_QM_1_CP_FENCE0_RDATA 0x408158 + +#define mmDMA_QM_1_CP_FENCE1_RDATA 0x40815C + +#define mmDMA_QM_1_CP_FENCE2_RDATA 0x408160 + +#define mmDMA_QM_1_CP_FENCE3_RDATA 0x408164 + +#define mmDMA_QM_1_CP_FENCE0_CNT 0x408168 + +#define mmDMA_QM_1_CP_FENCE1_CNT 0x40816C + +#define mmDMA_QM_1_CP_FENCE2_CNT 0x408170 + +#define mmDMA_QM_1_CP_FENCE3_CNT 0x408174 + +#define mmDMA_QM_1_CP_STS 0x408178 + +#define mmDMA_QM_1_CP_CURRENT_INST_LO 0x40817C + +#define mmDMA_QM_1_CP_CURRENT_INST_HI 0x408180 + +#define mmDMA_QM_1_CP_BARRIER_CFG 0x408184 + +#define mmDMA_QM_1_CP_DBG_0 0x408188 + +#define mmDMA_QM_1_PQ_BUF_ADDR 0x408300 + +#define mmDMA_QM_1_PQ_BUF_RDATA 0x408304 + +#define mmDMA_QM_1_CQ_BUF_ADDR 0x408308 + +#define mmDMA_QM_1_CQ_BUF_RDATA 0x40830C + +#endif /* ASIC_REG_DMA_QM_1_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h new file mode 100644 index 000000000..18fc0c2b6 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h @@ -0,0 +1,178 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA_QM_2_REGS_H_ +#define ASIC_REG_DMA_QM_2_REGS_H_ + +/* + ***************************************** + * DMA_QM_2 (Prototype: QMAN) + ***************************************** + */ + +#define mmDMA_QM_2_GLBL_CFG0 0x410000 + +#define mmDMA_QM_2_GLBL_CFG1 0x410004 + +#define mmDMA_QM_2_GLBL_PROT 0x410008 + +#define mmDMA_QM_2_GLBL_ERR_CFG 0x41000C + +#define mmDMA_QM_2_GLBL_ERR_ADDR_LO 0x410010 + +#define mmDMA_QM_2_GLBL_ERR_ADDR_HI 0x410014 + +#define mmDMA_QM_2_GLBL_ERR_WDATA 0x410018 + +#define mmDMA_QM_2_GLBL_SECURE_PROPS 0x41001C + +#define mmDMA_QM_2_GLBL_NON_SECURE_PROPS 0x410020 + +#define mmDMA_QM_2_GLBL_STS0 0x410024 + +#define mmDMA_QM_2_GLBL_STS1 0x410028 + +#define mmDMA_QM_2_PQ_BASE_LO 0x410060 + +#define mmDMA_QM_2_PQ_BASE_HI 0x410064 + +#define mmDMA_QM_2_PQ_SIZE 0x410068 + +#define mmDMA_QM_2_PQ_PI 0x41006C + +#define mmDMA_QM_2_PQ_CI 0x410070 + +#define mmDMA_QM_2_PQ_CFG0 0x410074 + +#define mmDMA_QM_2_PQ_CFG1 0x410078 + +#define mmDMA_QM_2_PQ_ARUSER 0x41007C + +#define mmDMA_QM_2_PQ_PUSH0 0x410080 + +#define mmDMA_QM_2_PQ_PUSH1 0x410084 + +#define mmDMA_QM_2_PQ_PUSH2 0x410088 + +#define mmDMA_QM_2_PQ_PUSH3 0x41008C + +#define mmDMA_QM_2_PQ_STS0 0x410090 + +#define mmDMA_QM_2_PQ_STS1 0x410094 + +#define mmDMA_QM_2_PQ_RD_RATE_LIM_EN 0x4100A0 + +#define mmDMA_QM_2_PQ_RD_RATE_LIM_RST_TOKEN 0x4100A4 + +#define mmDMA_QM_2_PQ_RD_RATE_LIM_SAT 0x4100A8 + +#define mmDMA_QM_2_PQ_RD_RATE_LIM_TOUT 0x4100AC + +#define mmDMA_QM_2_CQ_CFG0 0x4100B0 + +#define mmDMA_QM_2_CQ_CFG1 0x4100B4 + +#define mmDMA_QM_2_CQ_ARUSER 0x4100B8 + +#define mmDMA_QM_2_CQ_PTR_LO 0x4100C0 + +#define mmDMA_QM_2_CQ_PTR_HI 0x4100C4 + +#define mmDMA_QM_2_CQ_TSIZE 0x4100C8 + +#define mmDMA_QM_2_CQ_CTL 0x4100CC + +#define mmDMA_QM_2_CQ_PTR_LO_STS 0x4100D4 + +#define mmDMA_QM_2_CQ_PTR_HI_STS 0x4100D8 + +#define mmDMA_QM_2_CQ_TSIZE_STS 0x4100DC + +#define mmDMA_QM_2_CQ_CTL_STS 0x4100E0 + +#define mmDMA_QM_2_CQ_STS0 0x4100E4 + +#define mmDMA_QM_2_CQ_STS1 0x4100E8 + +#define mmDMA_QM_2_CQ_RD_RATE_LIM_EN 0x4100F0 + +#define mmDMA_QM_2_CQ_RD_RATE_LIM_RST_TOKEN 0x4100F4 + +#define mmDMA_QM_2_CQ_RD_RATE_LIM_SAT 0x4100F8 + +#define mmDMA_QM_2_CQ_RD_RATE_LIM_TOUT 0x4100FC + +#define mmDMA_QM_2_CQ_IFIFO_CNT 0x410108 + +#define mmDMA_QM_2_CP_MSG_BASE0_ADDR_LO 0x410120 + +#define mmDMA_QM_2_CP_MSG_BASE0_ADDR_HI 0x410124 + +#define mmDMA_QM_2_CP_MSG_BASE1_ADDR_LO 0x410128 + +#define mmDMA_QM_2_CP_MSG_BASE1_ADDR_HI 0x41012C + +#define mmDMA_QM_2_CP_MSG_BASE2_ADDR_LO 0x410130 + +#define mmDMA_QM_2_CP_MSG_BASE2_ADDR_HI 0x410134 + +#define mmDMA_QM_2_CP_MSG_BASE3_ADDR_LO 0x410138 + +#define mmDMA_QM_2_CP_MSG_BASE3_ADDR_HI 0x41013C + +#define mmDMA_QM_2_CP_LDMA_TSIZE_OFFSET 0x410140 + +#define mmDMA_QM_2_CP_LDMA_SRC_BASE_LO_OFFSET 0x410144 + +#define mmDMA_QM_2_CP_LDMA_SRC_BASE_HI_OFFSET 0x410148 + +#define mmDMA_QM_2_CP_LDMA_DST_BASE_LO_OFFSET 0x41014C + +#define mmDMA_QM_2_CP_LDMA_DST_BASE_HI_OFFSET 0x410150 + +#define mmDMA_QM_2_CP_LDMA_COMMIT_OFFSET 0x410154 + +#define mmDMA_QM_2_CP_FENCE0_RDATA 0x410158 + +#define mmDMA_QM_2_CP_FENCE1_RDATA 0x41015C + +#define mmDMA_QM_2_CP_FENCE2_RDATA 0x410160 + +#define mmDMA_QM_2_CP_FENCE3_RDATA 0x410164 + +#define mmDMA_QM_2_CP_FENCE0_CNT 0x410168 + +#define mmDMA_QM_2_CP_FENCE1_CNT 0x41016C + +#define mmDMA_QM_2_CP_FENCE2_CNT 0x410170 + +#define mmDMA_QM_2_CP_FENCE3_CNT 0x410174 + +#define mmDMA_QM_2_CP_STS 0x410178 + +#define mmDMA_QM_2_CP_CURRENT_INST_LO 0x41017C + +#define mmDMA_QM_2_CP_CURRENT_INST_HI 0x410180 + +#define mmDMA_QM_2_CP_BARRIER_CFG 0x410184 + +#define mmDMA_QM_2_CP_DBG_0 0x410188 + +#define mmDMA_QM_2_PQ_BUF_ADDR 0x410300 + +#define mmDMA_QM_2_PQ_BUF_RDATA 0x410304 + +#define mmDMA_QM_2_CQ_BUF_ADDR 0x410308 + +#define mmDMA_QM_2_CQ_BUF_RDATA 0x41030C + +#endif /* ASIC_REG_DMA_QM_2_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h new file mode 100644 index 000000000..6cf7204bf --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h @@ -0,0 +1,178 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA_QM_3_REGS_H_ +#define ASIC_REG_DMA_QM_3_REGS_H_ + +/* + ***************************************** + * DMA_QM_3 (Prototype: QMAN) + ***************************************** + */ + +#define mmDMA_QM_3_GLBL_CFG0 0x418000 + +#define mmDMA_QM_3_GLBL_CFG1 0x418004 + +#define mmDMA_QM_3_GLBL_PROT 0x418008 + +#define mmDMA_QM_3_GLBL_ERR_CFG 0x41800C + +#define mmDMA_QM_3_GLBL_ERR_ADDR_LO 0x418010 + +#define mmDMA_QM_3_GLBL_ERR_ADDR_HI 0x418014 + +#define mmDMA_QM_3_GLBL_ERR_WDATA 0x418018 + +#define mmDMA_QM_3_GLBL_SECURE_PROPS 0x41801C + +#define mmDMA_QM_3_GLBL_NON_SECURE_PROPS 0x418020 + +#define mmDMA_QM_3_GLBL_STS0 0x418024 + +#define mmDMA_QM_3_GLBL_STS1 0x418028 + +#define mmDMA_QM_3_PQ_BASE_LO 0x418060 + +#define mmDMA_QM_3_PQ_BASE_HI 0x418064 + +#define mmDMA_QM_3_PQ_SIZE 0x418068 + +#define mmDMA_QM_3_PQ_PI 0x41806C + +#define mmDMA_QM_3_PQ_CI 0x418070 + +#define mmDMA_QM_3_PQ_CFG0 0x418074 + +#define mmDMA_QM_3_PQ_CFG1 0x418078 + +#define mmDMA_QM_3_PQ_ARUSER 0x41807C + +#define mmDMA_QM_3_PQ_PUSH0 0x418080 + +#define mmDMA_QM_3_PQ_PUSH1 0x418084 + +#define mmDMA_QM_3_PQ_PUSH2 0x418088 + +#define mmDMA_QM_3_PQ_PUSH3 0x41808C + +#define mmDMA_QM_3_PQ_STS0 0x418090 + +#define mmDMA_QM_3_PQ_STS1 0x418094 + +#define mmDMA_QM_3_PQ_RD_RATE_LIM_EN 0x4180A0 + +#define mmDMA_QM_3_PQ_RD_RATE_LIM_RST_TOKEN 0x4180A4 + +#define mmDMA_QM_3_PQ_RD_RATE_LIM_SAT 0x4180A8 + +#define mmDMA_QM_3_PQ_RD_RATE_LIM_TOUT 0x4180AC + +#define mmDMA_QM_3_CQ_CFG0 0x4180B0 + +#define mmDMA_QM_3_CQ_CFG1 0x4180B4 + +#define mmDMA_QM_3_CQ_ARUSER 0x4180B8 + +#define mmDMA_QM_3_CQ_PTR_LO 0x4180C0 + +#define mmDMA_QM_3_CQ_PTR_HI 0x4180C4 + +#define mmDMA_QM_3_CQ_TSIZE 0x4180C8 + +#define mmDMA_QM_3_CQ_CTL 0x4180CC + +#define mmDMA_QM_3_CQ_PTR_LO_STS 0x4180D4 + +#define mmDMA_QM_3_CQ_PTR_HI_STS 0x4180D8 + +#define mmDMA_QM_3_CQ_TSIZE_STS 0x4180DC + +#define mmDMA_QM_3_CQ_CTL_STS 0x4180E0 + +#define mmDMA_QM_3_CQ_STS0 0x4180E4 + +#define mmDMA_QM_3_CQ_STS1 0x4180E8 + +#define mmDMA_QM_3_CQ_RD_RATE_LIM_EN 0x4180F0 + +#define mmDMA_QM_3_CQ_RD_RATE_LIM_RST_TOKEN 0x4180F4 + +#define mmDMA_QM_3_CQ_RD_RATE_LIM_SAT 0x4180F8 + +#define mmDMA_QM_3_CQ_RD_RATE_LIM_TOUT 0x4180FC + +#define mmDMA_QM_3_CQ_IFIFO_CNT 0x418108 + +#define mmDMA_QM_3_CP_MSG_BASE0_ADDR_LO 0x418120 + +#define mmDMA_QM_3_CP_MSG_BASE0_ADDR_HI 0x418124 + +#define mmDMA_QM_3_CP_MSG_BASE1_ADDR_LO 0x418128 + +#define mmDMA_QM_3_CP_MSG_BASE1_ADDR_HI 0x41812C + +#define mmDMA_QM_3_CP_MSG_BASE2_ADDR_LO 0x418130 + +#define mmDMA_QM_3_CP_MSG_BASE2_ADDR_HI 0x418134 + +#define mmDMA_QM_3_CP_MSG_BASE3_ADDR_LO 0x418138 + +#define mmDMA_QM_3_CP_MSG_BASE3_ADDR_HI 0x41813C + +#define mmDMA_QM_3_CP_LDMA_TSIZE_OFFSET 0x418140 + +#define mmDMA_QM_3_CP_LDMA_SRC_BASE_LO_OFFSET 0x418144 + +#define mmDMA_QM_3_CP_LDMA_SRC_BASE_HI_OFFSET 0x418148 + +#define mmDMA_QM_3_CP_LDMA_DST_BASE_LO_OFFSET 0x41814C + +#define mmDMA_QM_3_CP_LDMA_DST_BASE_HI_OFFSET 0x418150 + +#define mmDMA_QM_3_CP_LDMA_COMMIT_OFFSET 0x418154 + +#define mmDMA_QM_3_CP_FENCE0_RDATA 0x418158 + +#define mmDMA_QM_3_CP_FENCE1_RDATA 0x41815C + +#define mmDMA_QM_3_CP_FENCE2_RDATA 0x418160 + +#define mmDMA_QM_3_CP_FENCE3_RDATA 0x418164 + +#define mmDMA_QM_3_CP_FENCE0_CNT 0x418168 + +#define mmDMA_QM_3_CP_FENCE1_CNT 0x41816C + +#define mmDMA_QM_3_CP_FENCE2_CNT 0x418170 + +#define mmDMA_QM_3_CP_FENCE3_CNT 0x418174 + +#define mmDMA_QM_3_CP_STS 0x418178 + +#define mmDMA_QM_3_CP_CURRENT_INST_LO 0x41817C + +#define mmDMA_QM_3_CP_CURRENT_INST_HI 0x418180 + +#define mmDMA_QM_3_CP_BARRIER_CFG 0x418184 + +#define mmDMA_QM_3_CP_DBG_0 0x418188 + +#define mmDMA_QM_3_PQ_BUF_ADDR 0x418300 + +#define mmDMA_QM_3_PQ_BUF_RDATA 0x418304 + +#define mmDMA_QM_3_CQ_BUF_ADDR 0x418308 + +#define mmDMA_QM_3_CQ_BUF_RDATA 0x41830C + +#endif /* ASIC_REG_DMA_QM_3_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h new file mode 100644 index 000000000..36fef2682 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h @@ -0,0 +1,178 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA_QM_4_REGS_H_ +#define ASIC_REG_DMA_QM_4_REGS_H_ + +/* + ***************************************** + * DMA_QM_4 (Prototype: QMAN) + ***************************************** + */ + +#define mmDMA_QM_4_GLBL_CFG0 0x420000 + +#define mmDMA_QM_4_GLBL_CFG1 0x420004 + +#define mmDMA_QM_4_GLBL_PROT 0x420008 + +#define mmDMA_QM_4_GLBL_ERR_CFG 0x42000C + +#define mmDMA_QM_4_GLBL_ERR_ADDR_LO 0x420010 + +#define mmDMA_QM_4_GLBL_ERR_ADDR_HI 0x420014 + +#define mmDMA_QM_4_GLBL_ERR_WDATA 0x420018 + +#define mmDMA_QM_4_GLBL_SECURE_PROPS 0x42001C + +#define mmDMA_QM_4_GLBL_NON_SECURE_PROPS 0x420020 + +#define mmDMA_QM_4_GLBL_STS0 0x420024 + +#define mmDMA_QM_4_GLBL_STS1 0x420028 + +#define mmDMA_QM_4_PQ_BASE_LO 0x420060 + +#define mmDMA_QM_4_PQ_BASE_HI 0x420064 + +#define mmDMA_QM_4_PQ_SIZE 0x420068 + +#define mmDMA_QM_4_PQ_PI 0x42006C + +#define mmDMA_QM_4_PQ_CI 0x420070 + +#define mmDMA_QM_4_PQ_CFG0 0x420074 + +#define mmDMA_QM_4_PQ_CFG1 0x420078 + +#define mmDMA_QM_4_PQ_ARUSER 0x42007C + +#define mmDMA_QM_4_PQ_PUSH0 0x420080 + +#define mmDMA_QM_4_PQ_PUSH1 0x420084 + +#define mmDMA_QM_4_PQ_PUSH2 0x420088 + +#define mmDMA_QM_4_PQ_PUSH3 0x42008C + +#define mmDMA_QM_4_PQ_STS0 0x420090 + +#define mmDMA_QM_4_PQ_STS1 0x420094 + +#define mmDMA_QM_4_PQ_RD_RATE_LIM_EN 0x4200A0 + +#define mmDMA_QM_4_PQ_RD_RATE_LIM_RST_TOKEN 0x4200A4 + +#define mmDMA_QM_4_PQ_RD_RATE_LIM_SAT 0x4200A8 + +#define mmDMA_QM_4_PQ_RD_RATE_LIM_TOUT 0x4200AC + +#define mmDMA_QM_4_CQ_CFG0 0x4200B0 + +#define mmDMA_QM_4_CQ_CFG1 0x4200B4 + +#define mmDMA_QM_4_CQ_ARUSER 0x4200B8 + +#define mmDMA_QM_4_CQ_PTR_LO 0x4200C0 + +#define mmDMA_QM_4_CQ_PTR_HI 0x4200C4 + +#define mmDMA_QM_4_CQ_TSIZE 0x4200C8 + +#define mmDMA_QM_4_CQ_CTL 0x4200CC + +#define mmDMA_QM_4_CQ_PTR_LO_STS 0x4200D4 + +#define mmDMA_QM_4_CQ_PTR_HI_STS 0x4200D8 + +#define mmDMA_QM_4_CQ_TSIZE_STS 0x4200DC + +#define mmDMA_QM_4_CQ_CTL_STS 0x4200E0 + +#define mmDMA_QM_4_CQ_STS0 0x4200E4 + +#define mmDMA_QM_4_CQ_STS1 0x4200E8 + +#define mmDMA_QM_4_CQ_RD_RATE_LIM_EN 0x4200F0 + +#define mmDMA_QM_4_CQ_RD_RATE_LIM_RST_TOKEN 0x4200F4 + +#define mmDMA_QM_4_CQ_RD_RATE_LIM_SAT 0x4200F8 + +#define mmDMA_QM_4_CQ_RD_RATE_LIM_TOUT 0x4200FC + +#define mmDMA_QM_4_CQ_IFIFO_CNT 0x420108 + +#define mmDMA_QM_4_CP_MSG_BASE0_ADDR_LO 0x420120 + +#define mmDMA_QM_4_CP_MSG_BASE0_ADDR_HI 0x420124 + +#define mmDMA_QM_4_CP_MSG_BASE1_ADDR_LO 0x420128 + +#define mmDMA_QM_4_CP_MSG_BASE1_ADDR_HI 0x42012C + +#define mmDMA_QM_4_CP_MSG_BASE2_ADDR_LO 0x420130 + +#define mmDMA_QM_4_CP_MSG_BASE2_ADDR_HI 0x420134 + +#define mmDMA_QM_4_CP_MSG_BASE3_ADDR_LO 0x420138 + +#define mmDMA_QM_4_CP_MSG_BASE3_ADDR_HI 0x42013C + +#define mmDMA_QM_4_CP_LDMA_TSIZE_OFFSET 0x420140 + +#define mmDMA_QM_4_CP_LDMA_SRC_BASE_LO_OFFSET 0x420144 + +#define mmDMA_QM_4_CP_LDMA_SRC_BASE_HI_OFFSET 0x420148 + +#define mmDMA_QM_4_CP_LDMA_DST_BASE_LO_OFFSET 0x42014C + +#define mmDMA_QM_4_CP_LDMA_DST_BASE_HI_OFFSET 0x420150 + +#define mmDMA_QM_4_CP_LDMA_COMMIT_OFFSET 0x420154 + +#define mmDMA_QM_4_CP_FENCE0_RDATA 0x420158 + +#define mmDMA_QM_4_CP_FENCE1_RDATA 0x42015C + +#define mmDMA_QM_4_CP_FENCE2_RDATA 0x420160 + +#define mmDMA_QM_4_CP_FENCE3_RDATA 0x420164 + +#define mmDMA_QM_4_CP_FENCE0_CNT 0x420168 + +#define mmDMA_QM_4_CP_FENCE1_CNT 0x42016C + +#define mmDMA_QM_4_CP_FENCE2_CNT 0x420170 + +#define mmDMA_QM_4_CP_FENCE3_CNT 0x420174 + +#define mmDMA_QM_4_CP_STS 0x420178 + +#define mmDMA_QM_4_CP_CURRENT_INST_LO 0x42017C + +#define mmDMA_QM_4_CP_CURRENT_INST_HI 0x420180 + +#define mmDMA_QM_4_CP_BARRIER_CFG 0x420184 + +#define mmDMA_QM_4_CP_DBG_0 0x420188 + +#define mmDMA_QM_4_PQ_BUF_ADDR 0x420300 + +#define mmDMA_QM_4_PQ_BUF_RDATA 0x420304 + +#define mmDMA_QM_4_CQ_BUF_ADDR 0x420308 + +#define mmDMA_QM_4_CQ_BUF_RDATA 0x42030C + +#endif /* ASIC_REG_DMA_QM_4_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/goya_blocks.h b/drivers/misc/habanalabs/include/goya/asic_reg/goya_blocks.h new file mode 100644 index 000000000..85b15010c --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/goya_blocks.h @@ -0,0 +1,1372 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef GOYA_BLOCKS_H_ +#define GOYA_BLOCKS_H_ + +#define mmPCI_NRTR_BASE 0x7FFC000000ull +#define PCI_NRTR_MAX_OFFSET 0x608 +#define PCI_NRTR_SECTION 0x4000 +#define mmPCI_RD_REGULATOR_BASE 0x7FFC004000ull +#define PCI_RD_REGULATOR_MAX_OFFSET 0x74 +#define PCI_RD_REGULATOR_SECTION 0x1000 +#define mmPCI_WR_REGULATOR_BASE 0x7FFC005000ull +#define PCI_WR_REGULATOR_MAX_OFFSET 0x74 +#define PCI_WR_REGULATOR_SECTION 0x3B000 +#define mmMME1_RTR_BASE 0x7FFC040000ull +#define MME1_RTR_MAX_OFFSET 0x608 +#define MME1_RTR_SECTION 0x4000 +#define mmMME1_RD_REGULATOR_BASE 0x7FFC044000ull +#define MME1_RD_REGULATOR_MAX_OFFSET 0x74 +#define MME1_RD_REGULATOR_SECTION 0x1000 +#define mmMME1_WR_REGULATOR_BASE 0x7FFC045000ull +#define MME1_WR_REGULATOR_MAX_OFFSET 0x74 +#define MME1_WR_REGULATOR_SECTION 0x3B000 +#define mmMME2_RTR_BASE 0x7FFC080000ull +#define MME2_RTR_MAX_OFFSET 0x608 +#define MME2_RTR_SECTION 0x4000 +#define mmMME2_RD_REGULATOR_BASE 0x7FFC084000ull +#define MME2_RD_REGULATOR_MAX_OFFSET 0x74 +#define MME2_RD_REGULATOR_SECTION 0x1000 +#define mmMME2_WR_REGULATOR_BASE 0x7FFC085000ull +#define MME2_WR_REGULATOR_MAX_OFFSET 0x74 +#define MME2_WR_REGULATOR_SECTION 0x3B000 +#define mmMME3_RTR_BASE 0x7FFC0C0000ull +#define MME3_RTR_MAX_OFFSET 0x608 +#define MME3_RTR_SECTION 0x4000 +#define mmMME3_RD_REGULATOR_BASE 0x7FFC0C4000ull +#define MME3_RD_REGULATOR_MAX_OFFSET 0x74 +#define MME3_RD_REGULATOR_SECTION 0x1000 +#define mmMME3_WR_REGULATOR_BASE 0x7FFC0C5000ull +#define MME3_WR_REGULATOR_MAX_OFFSET 0x74 +#define MME3_WR_REGULATOR_SECTION 0xB000 +#define mmMME_BASE 0x7FFC0D0000ull +#define MME_MAX_OFFSET 0xBB0 +#define MME_SECTION 0x8000 +#define mmMME_QM_BASE 0x7FFC0D8000ull +#define MME_QM_MAX_OFFSET 0x310 +#define MME_QM_SECTION 0x1000 +#define mmMME_CMDQ_BASE 0x7FFC0D9000ull +#define MME_CMDQ_MAX_OFFSET 0x310 +#define MME_CMDQ_SECTION 0x1000 +#define mmACC_MS_ECC_MEM_0_BASE 0x7FFC0DA000ull +#define ACC_MS_ECC_MEM_0_MAX_OFFSET 0x0 +#define ACC_MS_ECC_MEM_0_SECTION 0x1000 +#define mmACC_MS_ECC_MEM_1_BASE 0x7FFC0DB000ull +#define ACC_MS_ECC_MEM_1_MAX_OFFSET 0x0 +#define ACC_MS_ECC_MEM_1_SECTION 0x1000 +#define mmACC_MS_ECC_MEM_2_BASE 0x7FFC0DC000ull +#define ACC_MS_ECC_MEM_2_MAX_OFFSET 0x0 +#define ACC_MS_ECC_MEM_2_SECTION 0x1000 +#define mmACC_MS_ECC_MEM_3_BASE 0x7FFC0DD000ull +#define ACC_MS_ECC_MEM_3_MAX_OFFSET 0x0 +#define ACC_MS_ECC_MEM_3_SECTION 0x1000 +#define mmSBA_ECC_MEM_BASE 0x7FFC0DE000ull +#define SBA_ECC_MEM_MAX_OFFSET 0x0 +#define SBA_ECC_MEM_SECTION 0x1000 +#define mmSBB_ECC_MEM_BASE 0x7FFC0DF000ull +#define SBB_ECC_MEM_MAX_OFFSET 0x0 +#define SBB_ECC_MEM_SECTION 0x21000 +#define mmMME4_RTR_BASE 0x7FFC100000ull +#define MME4_RTR_MAX_OFFSET 0x608 +#define MME4_RTR_SECTION 0x4000 +#define mmMME4_RD_REGULATOR_BASE 0x7FFC104000ull +#define MME4_RD_REGULATOR_MAX_OFFSET 0x74 +#define MME4_RD_REGULATOR_SECTION 0x1000 +#define mmMME4_WR_REGULATOR_BASE 0x7FFC105000ull +#define MME4_WR_REGULATOR_MAX_OFFSET 0x74 +#define MME4_WR_REGULATOR_SECTION 0xB000 +#define mmSYNC_MNGR_BASE 0x7FFC110000ull +#define SYNC_MNGR_MAX_OFFSET 0x4400 +#define SYNC_MNGR_SECTION 0x30000 +#define mmMME5_RTR_BASE 0x7FFC140000ull +#define MME5_RTR_MAX_OFFSET 0x608 +#define MME5_RTR_SECTION 0x4000 +#define mmMME5_RD_REGULATOR_BASE 0x7FFC144000ull +#define MME5_RD_REGULATOR_MAX_OFFSET 0x74 +#define MME5_RD_REGULATOR_SECTION 0x1000 +#define mmMME5_WR_REGULATOR_BASE 0x7FFC145000ull +#define MME5_WR_REGULATOR_MAX_OFFSET 0x74 +#define MME5_WR_REGULATOR_SECTION 0x3B000 +#define mmMME6_RTR_BASE 0x7FFC180000ull +#define MME6_RTR_MAX_OFFSET 0x608 +#define MME6_RTR_SECTION 0x4000 +#define mmMME6_RD_REGULATOR_BASE 0x7FFC184000ull +#define MME6_RD_REGULATOR_MAX_OFFSET 0x74 +#define MME6_RD_REGULATOR_SECTION 0x1000 +#define mmMME6_WR_REGULATOR_BASE 0x7FFC185000ull +#define MME6_WR_REGULATOR_MAX_OFFSET 0x74 +#define MME6_WR_REGULATOR_SECTION 0x3B000 +#define mmDMA_NRTR_BASE 0x7FFC1C0000ull +#define DMA_NRTR_MAX_OFFSET 0x608 +#define DMA_NRTR_SECTION 0x4000 +#define mmDMA_RD_REGULATOR_BASE 0x7FFC1C4000ull +#define DMA_RD_REGULATOR_MAX_OFFSET 0x74 +#define DMA_RD_REGULATOR_SECTION 0x1000 +#define mmDMA_WR_REGULATOR_BASE 0x7FFC1C5000ull +#define DMA_WR_REGULATOR_MAX_OFFSET 0x74 +#define DMA_WR_REGULATOR_SECTION 0x3B000 +#define mmSRAM_Y0_X0_BANK_BASE 0x7FFC200000ull +#define SRAM_Y0_X0_BANK_MAX_OFFSET 0x4 +#define SRAM_Y0_X0_BANK_SECTION 0x1000 +#define mmSRAM_Y0_X0_RTR_BASE 0x7FFC201000ull +#define SRAM_Y0_X0_RTR_MAX_OFFSET 0x334 +#define SRAM_Y0_X0_RTR_SECTION 0x3000 +#define mmSRAM_Y0_X1_BANK_BASE 0x7FFC204000ull +#define SRAM_Y0_X1_BANK_MAX_OFFSET 0x4 +#define SRAM_Y0_X1_BANK_SECTION 0x1000 +#define mmSRAM_Y0_X1_RTR_BASE 0x7FFC205000ull +#define SRAM_Y0_X1_RTR_MAX_OFFSET 0x334 +#define SRAM_Y0_X1_RTR_SECTION 0x3000 +#define mmSRAM_Y0_X2_BANK_BASE 0x7FFC208000ull +#define SRAM_Y0_X2_BANK_MAX_OFFSET 0x4 +#define SRAM_Y0_X2_BANK_SECTION 0x1000 +#define mmSRAM_Y0_X2_RTR_BASE 0x7FFC209000ull +#define SRAM_Y0_X2_RTR_MAX_OFFSET 0x334 +#define SRAM_Y0_X2_RTR_SECTION 0x3000 +#define mmSRAM_Y0_X3_BANK_BASE 0x7FFC20C000ull +#define SRAM_Y0_X3_BANK_MAX_OFFSET 0x4 +#define SRAM_Y0_X3_BANK_SECTION 0x1000 +#define mmSRAM_Y0_X3_RTR_BASE 0x7FFC20D000ull +#define SRAM_Y0_X3_RTR_MAX_OFFSET 0x334 +#define SRAM_Y0_X3_RTR_SECTION 0x3000 +#define mmSRAM_Y0_X4_BANK_BASE 0x7FFC210000ull +#define SRAM_Y0_X4_BANK_MAX_OFFSET 0x4 +#define SRAM_Y0_X4_BANK_SECTION 0x1000 +#define mmSRAM_Y0_X4_RTR_BASE 0x7FFC211000ull +#define SRAM_Y0_X4_RTR_MAX_OFFSET 0x334 +#define SRAM_Y0_X4_RTR_SECTION 0xF000 +#define mmSRAM_Y1_X0_BANK_BASE 0x7FFC220000ull +#define SRAM_Y1_X0_BANK_MAX_OFFSET 0x4 +#define SRAM_Y1_X0_BANK_SECTION 0x1000 +#define mmSRAM_Y1_X0_RTR_BASE 0x7FFC221000ull +#define SRAM_Y1_X0_RTR_MAX_OFFSET 0x334 +#define SRAM_Y1_X0_RTR_SECTION 0x3000 +#define mmSRAM_Y1_X1_BANK_BASE 0x7FFC224000ull +#define SRAM_Y1_X1_BANK_MAX_OFFSET 0x4 +#define SRAM_Y1_X1_BANK_SECTION 0x1000 +#define mmSRAM_Y1_X1_RTR_BASE 0x7FFC225000ull +#define SRAM_Y1_X1_RTR_MAX_OFFSET 0x334 +#define SRAM_Y1_X1_RTR_SECTION 0x3000 +#define mmSRAM_Y1_X2_BANK_BASE 0x7FFC228000ull +#define SRAM_Y1_X2_BANK_MAX_OFFSET 0x4 +#define SRAM_Y1_X2_BANK_SECTION 0x1000 +#define mmSRAM_Y1_X2_RTR_BASE 0x7FFC229000ull +#define SRAM_Y1_X2_RTR_MAX_OFFSET 0x334 +#define SRAM_Y1_X2_RTR_SECTION 0x3000 +#define mmSRAM_Y1_X3_BANK_BASE 0x7FFC22C000ull +#define SRAM_Y1_X3_BANK_MAX_OFFSET 0x4 +#define SRAM_Y1_X3_BANK_SECTION 0x1000 +#define mmSRAM_Y1_X3_RTR_BASE 0x7FFC22D000ull +#define SRAM_Y1_X3_RTR_MAX_OFFSET 0x334 +#define SRAM_Y1_X3_RTR_SECTION 0x3000 +#define mmSRAM_Y1_X4_BANK_BASE 0x7FFC230000ull +#define SRAM_Y1_X4_BANK_MAX_OFFSET 0x4 +#define SRAM_Y1_X4_BANK_SECTION 0x1000 +#define mmSRAM_Y1_X4_RTR_BASE 0x7FFC231000ull +#define SRAM_Y1_X4_RTR_MAX_OFFSET 0x334 +#define SRAM_Y1_X4_RTR_SECTION 0xF000 +#define mmSRAM_Y2_X0_BANK_BASE 0x7FFC240000ull +#define SRAM_Y2_X0_BANK_MAX_OFFSET 0x4 +#define SRAM_Y2_X0_BANK_SECTION 0x1000 +#define mmSRAM_Y2_X0_RTR_BASE 0x7FFC241000ull +#define SRAM_Y2_X0_RTR_MAX_OFFSET 0x334 +#define SRAM_Y2_X0_RTR_SECTION 0x3000 +#define mmSRAM_Y2_X1_BANK_BASE 0x7FFC244000ull +#define SRAM_Y2_X1_BANK_MAX_OFFSET 0x4 +#define SRAM_Y2_X1_BANK_SECTION 0x1000 +#define mmSRAM_Y2_X1_RTR_BASE 0x7FFC245000ull +#define SRAM_Y2_X1_RTR_MAX_OFFSET 0x334 +#define SRAM_Y2_X1_RTR_SECTION 0x3000 +#define mmSRAM_Y2_X2_BANK_BASE 0x7FFC248000ull +#define SRAM_Y2_X2_BANK_MAX_OFFSET 0x4 +#define SRAM_Y2_X2_BANK_SECTION 0x1000 +#define mmSRAM_Y2_X2_RTR_BASE 0x7FFC249000ull +#define SRAM_Y2_X2_RTR_MAX_OFFSET 0x334 +#define SRAM_Y2_X2_RTR_SECTION 0x3000 +#define mmSRAM_Y2_X3_BANK_BASE 0x7FFC24C000ull +#define SRAM_Y2_X3_BANK_MAX_OFFSET 0x4 +#define SRAM_Y2_X3_BANK_SECTION 0x1000 +#define mmSRAM_Y2_X3_RTR_BASE 0x7FFC24D000ull +#define SRAM_Y2_X3_RTR_MAX_OFFSET 0x334 +#define SRAM_Y2_X3_RTR_SECTION 0x3000 +#define mmSRAM_Y2_X4_BANK_BASE 0x7FFC250000ull +#define SRAM_Y2_X4_BANK_MAX_OFFSET 0x4 +#define SRAM_Y2_X4_BANK_SECTION 0x1000 +#define mmSRAM_Y2_X4_RTR_BASE 0x7FFC251000ull +#define SRAM_Y2_X4_RTR_MAX_OFFSET 0x334 +#define SRAM_Y2_X4_RTR_SECTION 0xF000 +#define mmSRAM_Y3_X0_BANK_BASE 0x7FFC260000ull +#define SRAM_Y3_X0_BANK_MAX_OFFSET 0x4 +#define SRAM_Y3_X0_BANK_SECTION 0x1000 +#define mmSRAM_Y3_X0_RTR_BASE 0x7FFC261000ull +#define SRAM_Y3_X0_RTR_MAX_OFFSET 0x334 +#define SRAM_Y3_X0_RTR_SECTION 0x3000 +#define mmSRAM_Y3_X1_BANK_BASE 0x7FFC264000ull +#define SRAM_Y3_X1_BANK_MAX_OFFSET 0x4 +#define SRAM_Y3_X1_BANK_SECTION 0x1000 +#define mmSRAM_Y3_X1_RTR_BASE 0x7FFC265000ull +#define SRAM_Y3_X1_RTR_MAX_OFFSET 0x334 +#define SRAM_Y3_X1_RTR_SECTION 0x3000 +#define mmSRAM_Y3_X2_BANK_BASE 0x7FFC268000ull +#define SRAM_Y3_X2_BANK_MAX_OFFSET 0x4 +#define SRAM_Y3_X2_BANK_SECTION 0x1000 +#define mmSRAM_Y3_X2_RTR_BASE 0x7FFC269000ull +#define SRAM_Y3_X2_RTR_MAX_OFFSET 0x334 +#define SRAM_Y3_X2_RTR_SECTION 0x3000 +#define mmSRAM_Y3_X3_BANK_BASE 0x7FFC26C000ull +#define SRAM_Y3_X3_BANK_MAX_OFFSET 0x4 +#define SRAM_Y3_X3_BANK_SECTION 0x1000 +#define mmSRAM_Y3_X3_RTR_BASE 0x7FFC26D000ull +#define SRAM_Y3_X3_RTR_MAX_OFFSET 0x334 +#define SRAM_Y3_X3_RTR_SECTION 0x3000 +#define mmSRAM_Y3_X4_BANK_BASE 0x7FFC270000ull +#define SRAM_Y3_X4_BANK_MAX_OFFSET 0x4 +#define SRAM_Y3_X4_BANK_SECTION 0x1000 +#define mmSRAM_Y3_X4_RTR_BASE 0x7FFC271000ull +#define SRAM_Y3_X4_RTR_MAX_OFFSET 0x334 +#define SRAM_Y3_X4_RTR_SECTION 0xF000 +#define mmSRAM_Y4_X0_BANK_BASE 0x7FFC280000ull +#define SRAM_Y4_X0_BANK_MAX_OFFSET 0x4 +#define SRAM_Y4_X0_BANK_SECTION 0x1000 +#define mmSRAM_Y4_X0_RTR_BASE 0x7FFC281000ull +#define SRAM_Y4_X0_RTR_MAX_OFFSET 0x334 +#define SRAM_Y4_X0_RTR_SECTION 0x3000 +#define mmSRAM_Y4_X1_BANK_BASE 0x7FFC284000ull +#define SRAM_Y4_X1_BANK_MAX_OFFSET 0x4 +#define SRAM_Y4_X1_BANK_SECTION 0x1000 +#define mmSRAM_Y4_X1_RTR_BASE 0x7FFC285000ull +#define SRAM_Y4_X1_RTR_MAX_OFFSET 0x334 +#define SRAM_Y4_X1_RTR_SECTION 0x3000 +#define mmSRAM_Y4_X2_BANK_BASE 0x7FFC288000ull +#define SRAM_Y4_X2_BANK_MAX_OFFSET 0x4 +#define SRAM_Y4_X2_BANK_SECTION 0x1000 +#define mmSRAM_Y4_X2_RTR_BASE 0x7FFC289000ull +#define SRAM_Y4_X2_RTR_MAX_OFFSET 0x334 +#define SRAM_Y4_X2_RTR_SECTION 0x3000 +#define mmSRAM_Y4_X3_BANK_BASE 0x7FFC28C000ull +#define SRAM_Y4_X3_BANK_MAX_OFFSET 0x4 +#define SRAM_Y4_X3_BANK_SECTION 0x1000 +#define mmSRAM_Y4_X3_RTR_BASE 0x7FFC28D000ull +#define SRAM_Y4_X3_RTR_MAX_OFFSET 0x334 +#define SRAM_Y4_X3_RTR_SECTION 0x3000 +#define mmSRAM_Y4_X4_BANK_BASE 0x7FFC290000ull +#define SRAM_Y4_X4_BANK_MAX_OFFSET 0x4 +#define SRAM_Y4_X4_BANK_SECTION 0x1000 +#define mmSRAM_Y4_X4_RTR_BASE 0x7FFC291000ull +#define SRAM_Y4_X4_RTR_MAX_OFFSET 0x334 +#define SRAM_Y4_X4_RTR_SECTION 0xF000 +#define mmSRAM_Y5_X0_BANK_BASE 0x7FFC2A0000ull +#define SRAM_Y5_X0_BANK_MAX_OFFSET 0x4 +#define SRAM_Y5_X0_BANK_SECTION 0x1000 +#define mmSRAM_Y5_X0_RTR_BASE 0x7FFC2A1000ull +#define SRAM_Y5_X0_RTR_MAX_OFFSET 0x334 +#define SRAM_Y5_X0_RTR_SECTION 0x3000 +#define mmSRAM_Y5_X1_BANK_BASE 0x7FFC2A4000ull +#define SRAM_Y5_X1_BANK_MAX_OFFSET 0x4 +#define SRAM_Y5_X1_BANK_SECTION 0x1000 +#define mmSRAM_Y5_X1_RTR_BASE 0x7FFC2A5000ull +#define SRAM_Y5_X1_RTR_MAX_OFFSET 0x334 +#define SRAM_Y5_X1_RTR_SECTION 0x3000 +#define mmSRAM_Y5_X2_BANK_BASE 0x7FFC2A8000ull +#define SRAM_Y5_X2_BANK_MAX_OFFSET 0x4 +#define SRAM_Y5_X2_BANK_SECTION 0x1000 +#define mmSRAM_Y5_X2_RTR_BASE 0x7FFC2A9000ull +#define SRAM_Y5_X2_RTR_MAX_OFFSET 0x334 +#define SRAM_Y5_X2_RTR_SECTION 0x3000 +#define mmSRAM_Y5_X3_BANK_BASE 0x7FFC2AC000ull +#define SRAM_Y5_X3_BANK_MAX_OFFSET 0x4 +#define SRAM_Y5_X3_BANK_SECTION 0x1000 +#define mmSRAM_Y5_X3_RTR_BASE 0x7FFC2AD000ull +#define SRAM_Y5_X3_RTR_MAX_OFFSET 0x334 +#define SRAM_Y5_X3_RTR_SECTION 0x3000 +#define mmSRAM_Y5_X4_BANK_BASE 0x7FFC2B0000ull +#define SRAM_Y5_X4_BANK_MAX_OFFSET 0x4 +#define SRAM_Y5_X4_BANK_SECTION 0x1000 +#define mmSRAM_Y5_X4_RTR_BASE 0x7FFC2B1000ull +#define SRAM_Y5_X4_RTR_MAX_OFFSET 0x334 +#define SRAM_Y5_X4_RTR_SECTION 0x14F000 +#define mmDMA_QM_0_BASE 0x7FFC400000ull +#define DMA_QM_0_MAX_OFFSET 0x310 +#define DMA_QM_0_SECTION 0x1000 +#define mmDMA_CH_0_BASE 0x7FFC401000ull +#define DMA_CH_0_MAX_OFFSET 0x200 +#define DMA_CH_0_SECTION 0x7000 +#define mmDMA_QM_1_BASE 0x7FFC408000ull +#define DMA_QM_1_MAX_OFFSET 0x310 +#define DMA_QM_1_SECTION 0x1000 +#define mmDMA_CH_1_BASE 0x7FFC409000ull +#define DMA_CH_1_MAX_OFFSET 0x200 +#define DMA_CH_1_SECTION 0x7000 +#define mmDMA_QM_2_BASE 0x7FFC410000ull +#define DMA_QM_2_MAX_OFFSET 0x310 +#define DMA_QM_2_SECTION 0x1000 +#define mmDMA_CH_2_BASE 0x7FFC411000ull +#define DMA_CH_2_MAX_OFFSET 0x200 +#define DMA_CH_2_SECTION 0x7000 +#define mmDMA_QM_3_BASE 0x7FFC418000ull +#define DMA_QM_3_MAX_OFFSET 0x310 +#define DMA_QM_3_SECTION 0x1000 +#define mmDMA_CH_3_BASE 0x7FFC419000ull +#define DMA_CH_3_MAX_OFFSET 0x200 +#define DMA_CH_3_SECTION 0x7000 +#define mmDMA_QM_4_BASE 0x7FFC420000ull +#define DMA_QM_4_MAX_OFFSET 0x310 +#define DMA_QM_4_SECTION 0x1000 +#define mmDMA_CH_4_BASE 0x7FFC421000ull +#define DMA_CH_4_MAX_OFFSET 0x200 +#define DMA_CH_4_SECTION 0x20000 +#define mmCPU_CA53_CFG_BASE 0x7FFC441000ull +#define CPU_CA53_CFG_MAX_OFFSET 0x218 +#define CPU_CA53_CFG_SECTION 0x1000 +#define mmCPU_IF_BASE 0x7FFC442000ull +#define CPU_IF_MAX_OFFSET 0x134 +#define CPU_IF_SECTION 0x2000 +#define mmCPU_TIMESTAMP_BASE 0x7FFC444000ull +#define CPU_TIMESTAMP_MAX_OFFSET 0x1000 +#define CPU_TIMESTAMP_SECTION 0x3C000 +#define mmMMU_BASE 0x7FFC480000ull +#define MMU_MAX_OFFSET 0x44 +#define MMU_SECTION 0x10000 +#define mmSTLB_BASE 0x7FFC490000ull +#define STLB_MAX_OFFSET 0x50 +#define STLB_SECTION 0x10000 +#define mmNORTH_THERMAL_SENSOR_BASE 0x7FFC4A0000ull +#define NORTH_THERMAL_SENSOR_MAX_OFFSET 0xE64 +#define NORTH_THERMAL_SENSOR_SECTION 0x1000 +#define mmMC_PLL_BASE 0x7FFC4A1000ull +#define MC_PLL_MAX_OFFSET 0x444 +#define MC_PLL_SECTION 0x1000 +#define mmCPU_PLL_BASE 0x7FFC4A2000ull +#define CPU_PLL_MAX_OFFSET 0x444 +#define CPU_PLL_SECTION 0x1000 +#define mmIC_PLL_BASE 0x7FFC4A3000ull +#define IC_PLL_MAX_OFFSET 0x444 +#define IC_PLL_SECTION 0x1000 +#define mmDMA_PROCESS_MON_BASE 0x7FFC4A4000ull +#define DMA_PROCESS_MON_MAX_OFFSET 0x4 +#define DMA_PROCESS_MON_SECTION 0xC000 +#define mmDMA_MACRO_BASE 0x7FFC4B0000ull +#define DMA_MACRO_MAX_OFFSET 0x15C +#define DMA_MACRO_SECTION 0x150000 +#define mmDDR_PHY_CH0_BASE 0x7FFC600000ull +#define DDR_PHY_CH0_MAX_OFFSET 0x0 +#define DDR_PHY_CH0_SECTION 0x40000 +#define mmDDR_MC_CH0_BASE 0x7FFC640000ull +#define DDR_MC_CH0_MAX_OFFSET 0xF34 +#define DDR_MC_CH0_SECTION 0x8000 +#define mmDDR_MISC_CH0_BASE 0x7FFC648000ull +#define DDR_MISC_CH0_MAX_OFFSET 0x204 +#define DDR_MISC_CH0_SECTION 0xB8000 +#define mmDDR_PHY_CH1_BASE 0x7FFC700000ull +#define DDR_PHY_CH1_MAX_OFFSET 0x0 +#define DDR_PHY_CH1_SECTION 0x40000 +#define mmDDR_MC_CH1_BASE 0x7FFC740000ull +#define DDR_MC_CH1_MAX_OFFSET 0xF34 +#define DDR_MC_CH1_SECTION 0x8000 +#define mmDDR_MISC_CH1_BASE 0x7FFC748000ull +#define DDR_MISC_CH1_MAX_OFFSET 0x204 +#define DDR_MISC_CH1_SECTION 0xB8000 +#define mmGIC_BASE 0x7FFC800000ull +#define GIC_MAX_OFFSET 0x10000 +#define GIC_SECTION 0x401000 +#define mmPCIE_WRAP_BASE 0x7FFCC01000ull +#define PCIE_WRAP_MAX_OFFSET 0xDF4 +#define PCIE_WRAP_SECTION 0x1000 +#define mmPCIE_DBI_BASE 0x7FFCC02000ull +#define PCIE_DBI_MAX_OFFSET 0xC04 +#define PCIE_DBI_SECTION 0x2000 +#define mmPCIE_CORE_BASE 0x7FFCC04000ull +#define PCIE_CORE_MAX_OFFSET 0x9B8 +#define PCIE_CORE_SECTION 0x1000 +#define mmPCIE_DB_CFG_BASE 0x7FFCC05000ull +#define PCIE_DB_CFG_MAX_OFFSET 0xE34 +#define PCIE_DB_CFG_SECTION 0x1000 +#define mmPCIE_DB_CMD_BASE 0x7FFCC06000ull +#define PCIE_DB_CMD_MAX_OFFSET 0x810 +#define PCIE_DB_CMD_SECTION 0x1000 +#define mmPCIE_AUX_BASE 0x7FFCC07000ull +#define PCIE_AUX_MAX_OFFSET 0x9BC +#define PCIE_AUX_SECTION 0x1000 +#define mmPCIE_DB_RSV_BASE 0x7FFCC08000ull +#define PCIE_DB_RSV_MAX_OFFSET 0x800 +#define PCIE_DB_RSV_SECTION 0x8000 +#define mmPCIE_PHY_BASE 0x7FFCC10000ull +#define PCIE_PHY_MAX_OFFSET 0x924 +#define PCIE_PHY_SECTION 0x30000 +#define mmPSOC_I2C_M0_BASE 0x7FFCC40000ull +#define PSOC_I2C_M0_MAX_OFFSET 0x100 +#define PSOC_I2C_M0_SECTION 0x1000 +#define mmPSOC_I2C_M1_BASE 0x7FFCC41000ull +#define PSOC_I2C_M1_MAX_OFFSET 0x100 +#define PSOC_I2C_M1_SECTION 0x1000 +#define mmPSOC_I2C_S_BASE 0x7FFCC42000ull +#define PSOC_I2C_S_MAX_OFFSET 0x100 +#define PSOC_I2C_S_SECTION 0x1000 +#define mmPSOC_SPI_BASE 0x7FFCC43000ull +#define PSOC_SPI_MAX_OFFSET 0x100 +#define PSOC_SPI_SECTION 0x1000 +#define mmPSOC_EMMC_BASE 0x7FFCC44000ull +#define PSOC_EMMC_MAX_OFFSET 0xF70 +#define PSOC_EMMC_SECTION 0x1000 +#define mmPSOC_UART_0_BASE 0x7FFCC45000ull +#define PSOC_UART_0_MAX_OFFSET 0x1000 +#define PSOC_UART_0_SECTION 0x1000 +#define mmPSOC_UART_1_BASE 0x7FFCC46000ull +#define PSOC_UART_1_MAX_OFFSET 0x1000 +#define PSOC_UART_1_SECTION 0x1000 +#define mmPSOC_TIMER_BASE 0x7FFCC47000ull +#define PSOC_TIMER_MAX_OFFSET 0x1000 +#define PSOC_TIMER_SECTION 0x1000 +#define mmPSOC_WDOG_BASE 0x7FFCC48000ull +#define PSOC_WDOG_MAX_OFFSET 0x1000 +#define PSOC_WDOG_SECTION 0x1000 +#define mmPSOC_TIMESTAMP_BASE 0x7FFCC49000ull +#define PSOC_TIMESTAMP_MAX_OFFSET 0x1000 +#define PSOC_TIMESTAMP_SECTION 0x1000 +#define mmPSOC_EFUSE_BASE 0x7FFCC4A000ull +#define PSOC_EFUSE_MAX_OFFSET 0x10C +#define PSOC_EFUSE_SECTION 0x1000 +#define mmPSOC_GLOBAL_CONF_BASE 0x7FFCC4B000ull +#define PSOC_GLOBAL_CONF_MAX_OFFSET 0xA48 +#define PSOC_GLOBAL_CONF_SECTION 0x1000 +#define mmPSOC_GPIO0_BASE 0x7FFCC4C000ull +#define PSOC_GPIO0_MAX_OFFSET 0x1000 +#define PSOC_GPIO0_SECTION 0x1000 +#define mmPSOC_GPIO1_BASE 0x7FFCC4D000ull +#define PSOC_GPIO1_MAX_OFFSET 0x1000 +#define PSOC_GPIO1_SECTION 0x1000 +#define mmPSOC_BTL_BASE 0x7FFCC4E000ull +#define PSOC_BTL_MAX_OFFSET 0x124 +#define PSOC_BTL_SECTION 0x1000 +#define mmPSOC_CS_TRACE_BASE 0x7FFCC4F000ull +#define PSOC_CS_TRACE_MAX_OFFSET 0x0 +#define PSOC_CS_TRACE_SECTION 0x1000 +#define mmPSOC_GPIO2_BASE 0x7FFCC50000ull +#define PSOC_GPIO2_MAX_OFFSET 0x1000 +#define PSOC_GPIO2_SECTION 0x1000 +#define mmPSOC_GPIO3_BASE 0x7FFCC51000ull +#define PSOC_GPIO3_MAX_OFFSET 0x1000 +#define PSOC_GPIO3_SECTION 0x1000 +#define mmPSOC_GPIO4_BASE 0x7FFCC52000ull +#define PSOC_GPIO4_MAX_OFFSET 0x1000 +#define PSOC_GPIO4_SECTION 0x1000 +#define mmPSOC_DFT_EFUSE_BASE 0x7FFCC53000ull +#define PSOC_DFT_EFUSE_MAX_OFFSET 0x10C +#define PSOC_DFT_EFUSE_SECTION 0x1000 +#define mmPSOC_PM_BASE 0x7FFCC54000ull +#define PSOC_PM_MAX_OFFSET 0x4 +#define PSOC_PM_SECTION 0x1000 +#define mmPSOC_TS_BASE 0x7FFCC55000ull +#define PSOC_TS_MAX_OFFSET 0xE64 +#define PSOC_TS_SECTION 0xB000 +#define mmPSOC_MII_BASE 0x7FFCC60000ull +#define PSOC_MII_MAX_OFFSET 0x105C +#define PSOC_MII_SECTION 0x10000 +#define mmPSOC_EMMC_PLL_BASE 0x7FFCC70000ull +#define PSOC_EMMC_PLL_MAX_OFFSET 0x444 +#define PSOC_EMMC_PLL_SECTION 0x1000 +#define mmPSOC_MME_PLL_BASE 0x7FFCC71000ull +#define PSOC_MME_PLL_MAX_OFFSET 0x444 +#define PSOC_MME_PLL_SECTION 0x1000 +#define mmPSOC_PCI_PLL_BASE 0x7FFCC72000ull +#define PSOC_PCI_PLL_MAX_OFFSET 0x444 +#define PSOC_PCI_PLL_SECTION 0x6000 +#define mmPSOC_PWM0_BASE 0x7FFCC78000ull +#define PSOC_PWM0_MAX_OFFSET 0x58 +#define PSOC_PWM0_SECTION 0x1000 +#define mmPSOC_PWM1_BASE 0x7FFCC79000ull +#define PSOC_PWM1_MAX_OFFSET 0x58 +#define PSOC_PWM1_SECTION 0x1000 +#define mmPSOC_PWM2_BASE 0x7FFCC7A000ull +#define PSOC_PWM2_MAX_OFFSET 0x58 +#define PSOC_PWM2_SECTION 0x1000 +#define mmPSOC_PWM3_BASE 0x7FFCC7B000ull +#define PSOC_PWM3_MAX_OFFSET 0x58 +#define PSOC_PWM3_SECTION 0x185000 +#define mmTPC0_NRTR_BASE 0x7FFCE00000ull +#define TPC0_NRTR_MAX_OFFSET 0x608 +#define TPC0_NRTR_SECTION 0x1000 +#define mmTPC_PLL_BASE 0x7FFCE01000ull +#define TPC_PLL_MAX_OFFSET 0x444 +#define TPC_PLL_SECTION 0x1000 +#define mmTPC_THEMAL_SENSOR_BASE 0x7FFCE02000ull +#define TPC_THEMAL_SENSOR_MAX_OFFSET 0xE64 +#define TPC_THEMAL_SENSOR_SECTION 0x1000 +#define mmTPC_PROCESS_MON_BASE 0x7FFCE03000ull +#define TPC_PROCESS_MON_MAX_OFFSET 0x4 +#define TPC_PROCESS_MON_SECTION 0x1000 +#define mmTPC0_RD_REGULATOR_BASE 0x7FFCE04000ull +#define TPC0_RD_REGULATOR_MAX_OFFSET 0x74 +#define TPC0_RD_REGULATOR_SECTION 0x1000 +#define mmTPC0_WR_REGULATOR_BASE 0x7FFCE05000ull +#define TPC0_WR_REGULATOR_MAX_OFFSET 0x74 +#define TPC0_WR_REGULATOR_SECTION 0x1000 +#define mmTPC0_CFG_BASE 0x7FFCE06000ull +#define TPC0_CFG_MAX_OFFSET 0xE30 +#define TPC0_CFG_SECTION 0x2000 +#define mmTPC0_QM_BASE 0x7FFCE08000ull +#define TPC0_QM_MAX_OFFSET 0x310 +#define TPC0_QM_SECTION 0x1000 +#define mmTPC0_CMDQ_BASE 0x7FFCE09000ull +#define TPC0_CMDQ_MAX_OFFSET 0x310 +#define TPC0_CMDQ_SECTION 0x37000 +#define mmTPC1_RTR_BASE 0x7FFCE40000ull +#define TPC1_RTR_MAX_OFFSET 0x608 +#define TPC1_RTR_SECTION 0x4000 +#define mmTPC1_WR_REGULATOR_BASE 0x7FFCE44000ull +#define TPC1_WR_REGULATOR_MAX_OFFSET 0x74 +#define TPC1_WR_REGULATOR_SECTION 0x1000 +#define mmTPC1_RD_REGULATOR_BASE 0x7FFCE45000ull +#define TPC1_RD_REGULATOR_MAX_OFFSET 0x74 +#define TPC1_RD_REGULATOR_SECTION 0x1000 +#define mmTPC1_CFG_BASE 0x7FFCE46000ull +#define TPC1_CFG_MAX_OFFSET 0xE30 +#define TPC1_CFG_SECTION 0x2000 +#define mmTPC1_QM_BASE 0x7FFCE48000ull +#define TPC1_QM_MAX_OFFSET 0x310 +#define TPC1_QM_SECTION 0x1000 +#define mmTPC1_CMDQ_BASE 0x7FFCE49000ull +#define TPC1_CMDQ_MAX_OFFSET 0x310 +#define TPC1_CMDQ_SECTION 0x37000 +#define mmTPC2_RTR_BASE 0x7FFCE80000ull +#define TPC2_RTR_MAX_OFFSET 0x608 +#define TPC2_RTR_SECTION 0x4000 +#define mmTPC2_RD_REGULATOR_BASE 0x7FFCE84000ull +#define TPC2_RD_REGULATOR_MAX_OFFSET 0x74 +#define TPC2_RD_REGULATOR_SECTION 0x1000 +#define mmTPC2_WR_REGULATOR_BASE 0x7FFCE85000ull +#define TPC2_WR_REGULATOR_MAX_OFFSET 0x74 +#define TPC2_WR_REGULATOR_SECTION 0x1000 +#define mmTPC2_CFG_BASE 0x7FFCE86000ull +#define TPC2_CFG_MAX_OFFSET 0xE30 +#define TPC2_CFG_SECTION 0x2000 +#define mmTPC2_QM_BASE 0x7FFCE88000ull +#define TPC2_QM_MAX_OFFSET 0x310 +#define TPC2_QM_SECTION 0x1000 +#define mmTPC2_CMDQ_BASE 0x7FFCE89000ull +#define TPC2_CMDQ_MAX_OFFSET 0x310 +#define TPC2_CMDQ_SECTION 0x37000 +#define mmTPC3_RTR_BASE 0x7FFCEC0000ull +#define TPC3_RTR_MAX_OFFSET 0x608 +#define TPC3_RTR_SECTION 0x4000 +#define mmTPC3_RD_REGULATOR_BASE 0x7FFCEC4000ull +#define TPC3_RD_REGULATOR_MAX_OFFSET 0x74 +#define TPC3_RD_REGULATOR_SECTION 0x1000 +#define mmTPC3_WR_REGULATOR_BASE 0x7FFCEC5000ull +#define TPC3_WR_REGULATOR_MAX_OFFSET 0x74 +#define TPC3_WR_REGULATOR_SECTION 0x1000 +#define mmTPC3_CFG_BASE 0x7FFCEC6000ull +#define TPC3_CFG_MAX_OFFSET 0xE30 +#define TPC3_CFG_SECTION 0x2000 +#define mmTPC3_QM_BASE 0x7FFCEC8000ull +#define TPC3_QM_MAX_OFFSET 0x310 +#define TPC3_QM_SECTION 0x1000 +#define mmTPC3_CMDQ_BASE 0x7FFCEC9000ull +#define TPC3_CMDQ_MAX_OFFSET 0x310 +#define TPC3_CMDQ_SECTION 0x37000 +#define mmTPC4_RTR_BASE 0x7FFCF00000ull +#define TPC4_RTR_MAX_OFFSET 0x608 +#define TPC4_RTR_SECTION 0x4000 +#define mmTPC4_RD_REGULATOR_BASE 0x7FFCF04000ull +#define TPC4_RD_REGULATOR_MAX_OFFSET 0x74 +#define TPC4_RD_REGULATOR_SECTION 0x1000 +#define mmTPC4_WR_REGULATOR_BASE 0x7FFCF05000ull +#define TPC4_WR_REGULATOR_MAX_OFFSET 0x74 +#define TPC4_WR_REGULATOR_SECTION 0x1000 +#define mmTPC4_CFG_BASE 0x7FFCF06000ull +#define TPC4_CFG_MAX_OFFSET 0xE30 +#define TPC4_CFG_SECTION 0x2000 +#define mmTPC4_QM_BASE 0x7FFCF08000ull +#define TPC4_QM_MAX_OFFSET 0x310 +#define TPC4_QM_SECTION 0x1000 +#define mmTPC4_CMDQ_BASE 0x7FFCF09000ull +#define TPC4_CMDQ_MAX_OFFSET 0x310 +#define TPC4_CMDQ_SECTION 0x37000 +#define mmTPC5_RTR_BASE 0x7FFCF40000ull +#define TPC5_RTR_MAX_OFFSET 0x608 +#define TPC5_RTR_SECTION 0x4000 +#define mmTPC5_RD_REGULATOR_BASE 0x7FFCF44000ull +#define TPC5_RD_REGULATOR_MAX_OFFSET 0x74 +#define TPC5_RD_REGULATOR_SECTION 0x1000 +#define mmTPC5_WR_REGULATOR_BASE 0x7FFCF45000ull +#define TPC5_WR_REGULATOR_MAX_OFFSET 0x74 +#define TPC5_WR_REGULATOR_SECTION 0x1000 +#define mmTPC5_CFG_BASE 0x7FFCF46000ull +#define TPC5_CFG_MAX_OFFSET 0xE30 +#define TPC5_CFG_SECTION 0x2000 +#define mmTPC5_QM_BASE 0x7FFCF48000ull +#define TPC5_QM_MAX_OFFSET 0x310 +#define TPC5_QM_SECTION 0x1000 +#define mmTPC5_CMDQ_BASE 0x7FFCF49000ull +#define TPC5_CMDQ_MAX_OFFSET 0x310 +#define TPC5_CMDQ_SECTION 0x37000 +#define mmTPC6_RTR_BASE 0x7FFCF80000ull +#define TPC6_RTR_MAX_OFFSET 0x608 +#define TPC6_RTR_SECTION 0x4000 +#define mmTPC6_RD_REGULATOR_BASE 0x7FFCF84000ull +#define TPC6_RD_REGULATOR_MAX_OFFSET 0x74 +#define TPC6_RD_REGULATOR_SECTION 0x1000 +#define mmTPC6_WR_REGULATOR_BASE 0x7FFCF85000ull +#define TPC6_WR_REGULATOR_MAX_OFFSET 0x74 +#define TPC6_WR_REGULATOR_SECTION 0x1000 +#define mmTPC6_CFG_BASE 0x7FFCF86000ull +#define TPC6_CFG_MAX_OFFSET 0xE30 +#define TPC6_CFG_SECTION 0x2000 +#define mmTPC6_QM_BASE 0x7FFCF88000ull +#define TPC6_QM_MAX_OFFSET 0x310 +#define TPC6_QM_SECTION 0x1000 +#define mmTPC6_CMDQ_BASE 0x7FFCF89000ull +#define TPC6_CMDQ_MAX_OFFSET 0x310 +#define TPC6_CMDQ_SECTION 0x37000 +#define mmTPC7_NRTR_BASE 0x7FFCFC0000ull +#define TPC7_NRTR_MAX_OFFSET 0x608 +#define TPC7_NRTR_SECTION 0x4000 +#define mmTPC7_RD_REGULATOR_BASE 0x7FFCFC4000ull +#define TPC7_RD_REGULATOR_MAX_OFFSET 0x74 +#define TPC7_RD_REGULATOR_SECTION 0x1000 +#define mmTPC7_WR_REGULATOR_BASE 0x7FFCFC5000ull +#define TPC7_WR_REGULATOR_MAX_OFFSET 0x74 +#define TPC7_WR_REGULATOR_SECTION 0x1000 +#define mmTPC7_CFG_BASE 0x7FFCFC6000ull +#define TPC7_CFG_MAX_OFFSET 0xE30 +#define TPC7_CFG_SECTION 0x2000 +#define mmTPC7_QM_BASE 0x7FFCFC8000ull +#define TPC7_QM_MAX_OFFSET 0x310 +#define TPC7_QM_SECTION 0x1000 +#define mmTPC7_CMDQ_BASE 0x7FFCFC9000ull +#define TPC7_CMDQ_MAX_OFFSET 0x310 +#define TPC7_CMDQ_SECTION 0x1037000 +#define mmMME_TOP_TABLE_BASE 0x7FFE000000ull +#define MME_TOP_TABLE_MAX_OFFSET 0x1000 +#define MME_TOP_TABLE_SECTION 0x1000 +#define mmMME0_RTR_FUNNEL_BASE 0x7FFE001000ull +#define MME0_RTR_FUNNEL_MAX_OFFSET 0x1000 +#define MME0_RTR_FUNNEL_SECTION 0x40000 +#define mmMME1_RTR_FUNNEL_BASE 0x7FFE041000ull +#define MME1_RTR_FUNNEL_MAX_OFFSET 0x1000 +#define MME1_RTR_FUNNEL_SECTION 0x1000 +#define mmMME1_SBA_STM_BASE 0x7FFE042000ull +#define MME1_SBA_STM_MAX_OFFSET 0x1000 +#define MME1_SBA_STM_SECTION 0x1000 +#define mmMME1_SBA_CTI_BASE 0x7FFE043000ull +#define MME1_SBA_CTI_MAX_OFFSET 0x1000 +#define MME1_SBA_CTI_SECTION 0x1000 +#define mmMME1_SBA_ETF_BASE 0x7FFE044000ull +#define MME1_SBA_ETF_MAX_OFFSET 0x1000 +#define MME1_SBA_ETF_SECTION 0x1000 +#define mmMME1_SBA_SPMU_BASE 0x7FFE045000ull +#define MME1_SBA_SPMU_MAX_OFFSET 0x1000 +#define MME1_SBA_SPMU_SECTION 0x1000 +#define mmMME1_SBA_CTI0_BASE 0x7FFE046000ull +#define MME1_SBA_CTI0_MAX_OFFSET 0x1000 +#define MME1_SBA_CTI0_SECTION 0x1000 +#define mmMME1_SBA_CTI1_BASE 0x7FFE047000ull +#define MME1_SBA_CTI1_MAX_OFFSET 0x1000 +#define MME1_SBA_CTI1_SECTION 0x1000 +#define mmMME1_SBA_BMON0_BASE 0x7FFE048000ull +#define MME1_SBA_BMON0_MAX_OFFSET 0x1000 +#define MME1_SBA_BMON0_SECTION 0x1000 +#define mmMME1_SBA_BMON1_BASE 0x7FFE049000ull +#define MME1_SBA_BMON1_MAX_OFFSET 0x1000 +#define MME1_SBA_BMON1_SECTION 0x38000 +#define mmMME2_RTR_FUNNEL_BASE 0x7FFE081000ull +#define MME2_RTR_FUNNEL_MAX_OFFSET 0x1000 +#define MME2_RTR_FUNNEL_SECTION 0x40000 +#define mmMME3_RTR_FUNNEL_BASE 0x7FFE0C1000ull +#define MME3_RTR_FUNNEL_MAX_OFFSET 0x1000 +#define MME3_RTR_FUNNEL_SECTION 0x1000 +#define mmMME3_SBB_STM_BASE 0x7FFE0C2000ull +#define MME3_SBB_STM_MAX_OFFSET 0x1000 +#define MME3_SBB_STM_SECTION 0x1000 +#define mmMME3_SBB_CTI_BASE 0x7FFE0C3000ull +#define MME3_SBB_CTI_MAX_OFFSET 0x1000 +#define MME3_SBB_CTI_SECTION 0x1000 +#define mmMME3_SBB_ETF_BASE 0x7FFE0C4000ull +#define MME3_SBB_ETF_MAX_OFFSET 0x1000 +#define MME3_SBB_ETF_SECTION 0x1000 +#define mmMME3_SBB_SPMU_BASE 0x7FFE0C5000ull +#define MME3_SBB_SPMU_MAX_OFFSET 0x1000 +#define MME3_SBB_SPMU_SECTION 0x1000 +#define mmMME3_SBB_CTI0_BASE 0x7FFE0C6000ull +#define MME3_SBB_CTI0_MAX_OFFSET 0x1000 +#define MME3_SBB_CTI0_SECTION 0x1000 +#define mmMME3_SBB_CTI1_BASE 0x7FFE0C7000ull +#define MME3_SBB_CTI1_MAX_OFFSET 0x1000 +#define MME3_SBB_CTI1_SECTION 0x1000 +#define mmMME3_SBB_BMON0_BASE 0x7FFE0C8000ull +#define MME3_SBB_BMON0_MAX_OFFSET 0x1000 +#define MME3_SBB_BMON0_SECTION 0x1000 +#define mmMME3_SBB_BMON1_BASE 0x7FFE0C9000ull +#define MME3_SBB_BMON1_MAX_OFFSET 0x1000 +#define MME3_SBB_BMON1_SECTION 0x38000 +#define mmMME4_RTR_FUNNEL_BASE 0x7FFE101000ull +#define MME4_RTR_FUNNEL_MAX_OFFSET 0x1000 +#define MME4_RTR_FUNNEL_SECTION 0x1000 +#define mmMME4_WACS_STM_BASE 0x7FFE102000ull +#define MME4_WACS_STM_MAX_OFFSET 0x1000 +#define MME4_WACS_STM_SECTION 0x1000 +#define mmMME4_WACS_CTI_BASE 0x7FFE103000ull +#define MME4_WACS_CTI_MAX_OFFSET 0x1000 +#define MME4_WACS_CTI_SECTION 0x1000 +#define mmMME4_WACS_ETF_BASE 0x7FFE104000ull +#define MME4_WACS_ETF_MAX_OFFSET 0x1000 +#define MME4_WACS_ETF_SECTION 0x1000 +#define mmMME4_WACS_SPMU_BASE 0x7FFE105000ull +#define MME4_WACS_SPMU_MAX_OFFSET 0x1000 +#define MME4_WACS_SPMU_SECTION 0x1000 +#define mmMME4_WACS_CTI0_BASE 0x7FFE106000ull +#define MME4_WACS_CTI0_MAX_OFFSET 0x1000 +#define MME4_WACS_CTI0_SECTION 0x1000 +#define mmMME4_WACS_CTI1_BASE 0x7FFE107000ull +#define MME4_WACS_CTI1_MAX_OFFSET 0x1000 +#define MME4_WACS_CTI1_SECTION 0x1000 +#define mmMME4_WACS_BMON0_BASE 0x7FFE108000ull +#define MME4_WACS_BMON0_MAX_OFFSET 0x1000 +#define MME4_WACS_BMON0_SECTION 0x1000 +#define mmMME4_WACS_BMON1_BASE 0x7FFE109000ull +#define MME4_WACS_BMON1_MAX_OFFSET 0x1000 +#define MME4_WACS_BMON1_SECTION 0x1000 +#define mmMME4_WACS_BMON2_BASE 0x7FFE10A000ull +#define MME4_WACS_BMON2_MAX_OFFSET 0x1000 +#define MME4_WACS_BMON2_SECTION 0x1000 +#define mmMME4_WACS_BMON3_BASE 0x7FFE10B000ull +#define MME4_WACS_BMON3_MAX_OFFSET 0x1000 +#define MME4_WACS_BMON3_SECTION 0x1000 +#define mmMME4_WACS_BMON4_BASE 0x7FFE10C000ull +#define MME4_WACS_BMON4_MAX_OFFSET 0x1000 +#define MME4_WACS_BMON4_SECTION 0x1000 +#define mmMME4_WACS_BMON5_BASE 0x7FFE10D000ull +#define MME4_WACS_BMON5_MAX_OFFSET 0x1000 +#define MME4_WACS_BMON5_SECTION 0x1000 +#define mmMME4_WACS_BMON6_BASE 0x7FFE10E000ull +#define MME4_WACS_BMON6_MAX_OFFSET 0x1000 +#define MME4_WACS_BMON6_SECTION 0x4000 +#define mmMME4_WACS2_STM_BASE 0x7FFE112000ull +#define MME4_WACS2_STM_MAX_OFFSET 0x1000 +#define MME4_WACS2_STM_SECTION 0x1000 +#define mmMME4_WACS2_CTI_BASE 0x7FFE113000ull +#define MME4_WACS2_CTI_MAX_OFFSET 0x1000 +#define MME4_WACS2_CTI_SECTION 0x1000 +#define mmMME4_WACS2_ETF_BASE 0x7FFE114000ull +#define MME4_WACS2_ETF_MAX_OFFSET 0x1000 +#define MME4_WACS2_ETF_SECTION 0x1000 +#define mmMME4_WACS2_SPMU_BASE 0x7FFE115000ull +#define MME4_WACS2_SPMU_MAX_OFFSET 0x1000 +#define MME4_WACS2_SPMU_SECTION 0x1000 +#define mmMME4_WACS2_CTI0_BASE 0x7FFE116000ull +#define MME4_WACS2_CTI0_MAX_OFFSET 0x1000 +#define MME4_WACS2_CTI0_SECTION 0x1000 +#define mmMME4_WACS2_CTI1_BASE 0x7FFE117000ull +#define MME4_WACS2_CTI1_MAX_OFFSET 0x1000 +#define MME4_WACS2_CTI1_SECTION 0x1000 +#define mmMME4_WACS2_BMON0_BASE 0x7FFE118000ull +#define MME4_WACS2_BMON0_MAX_OFFSET 0x1000 +#define MME4_WACS2_BMON0_SECTION 0x1000 +#define mmMME4_WACS2_BMON1_BASE 0x7FFE119000ull +#define MME4_WACS2_BMON1_MAX_OFFSET 0x1000 +#define MME4_WACS2_BMON1_SECTION 0x1000 +#define mmMME4_WACS2_BMON2_BASE 0x7FFE11A000ull +#define MME4_WACS2_BMON2_MAX_OFFSET 0x1000 +#define MME4_WACS2_BMON2_SECTION 0x27000 +#define mmMME5_RTR_FUNNEL_BASE 0x7FFE141000ull +#define MME5_RTR_FUNNEL_MAX_OFFSET 0x1000 +#define MME5_RTR_FUNNEL_SECTION 0x2BF000 +#define mmDMA_ROM_TABLE_BASE 0x7FFE400000ull +#define DMA_ROM_TABLE_MAX_OFFSET 0x1000 +#define DMA_ROM_TABLE_SECTION 0x1000 +#define mmDMA_CH_0_CS_STM_BASE 0x7FFE401000ull +#define DMA_CH_0_CS_STM_MAX_OFFSET 0x1000 +#define DMA_CH_0_CS_STM_SECTION 0x1000 +#define mmDMA_CH_0_CS_CTI_BASE 0x7FFE402000ull +#define DMA_CH_0_CS_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_0_CS_CTI_SECTION 0x1000 +#define mmDMA_CH_0_CS_ETF_BASE 0x7FFE403000ull +#define DMA_CH_0_CS_ETF_MAX_OFFSET 0x1000 +#define DMA_CH_0_CS_ETF_SECTION 0x1000 +#define mmDMA_CH_0_CS_SPMU_BASE 0x7FFE404000ull +#define DMA_CH_0_CS_SPMU_MAX_OFFSET 0x1000 +#define DMA_CH_0_CS_SPMU_SECTION 0x1000 +#define mmDMA_CH_0_BMON_CTI_BASE 0x7FFE405000ull +#define DMA_CH_0_BMON_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_0_BMON_CTI_SECTION 0x1000 +#define mmDMA_CH_0_USER_CTI_BASE 0x7FFE406000ull +#define DMA_CH_0_USER_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_0_USER_CTI_SECTION 0x1000 +#define mmDMA_CH_0_BMON_0_BASE 0x7FFE407000ull +#define DMA_CH_0_BMON_0_MAX_OFFSET 0x1000 +#define DMA_CH_0_BMON_0_SECTION 0x1000 +#define mmDMA_CH_0_BMON_1_BASE 0x7FFE408000ull +#define DMA_CH_0_BMON_1_MAX_OFFSET 0x1000 +#define DMA_CH_0_BMON_1_SECTION 0x9000 +#define mmDMA_CH_1_CS_STM_BASE 0x7FFE411000ull +#define DMA_CH_1_CS_STM_MAX_OFFSET 0x1000 +#define DMA_CH_1_CS_STM_SECTION 0x1000 +#define mmDMA_CH_1_CS_CTI_BASE 0x7FFE412000ull +#define DMA_CH_1_CS_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_1_CS_CTI_SECTION 0x1000 +#define mmDMA_CH_1_CS_ETF_BASE 0x7FFE413000ull +#define DMA_CH_1_CS_ETF_MAX_OFFSET 0x1000 +#define DMA_CH_1_CS_ETF_SECTION 0x1000 +#define mmDMA_CH_1_CS_SPMU_BASE 0x7FFE414000ull +#define DMA_CH_1_CS_SPMU_MAX_OFFSET 0x1000 +#define DMA_CH_1_CS_SPMU_SECTION 0x1000 +#define mmDMA_CH_1_BMON_CTI_BASE 0x7FFE415000ull +#define DMA_CH_1_BMON_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_1_BMON_CTI_SECTION 0x1000 +#define mmDMA_CH_1_USER_CTI_BASE 0x7FFE416000ull +#define DMA_CH_1_USER_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_1_USER_CTI_SECTION 0x1000 +#define mmDMA_CH_1_BMON_0_BASE 0x7FFE417000ull +#define DMA_CH_1_BMON_0_MAX_OFFSET 0x1000 +#define DMA_CH_1_BMON_0_SECTION 0x1000 +#define mmDMA_CH_1_BMON_1_BASE 0x7FFE418000ull +#define DMA_CH_1_BMON_1_MAX_OFFSET 0x1000 +#define DMA_CH_1_BMON_1_SECTION 0x9000 +#define mmDMA_CH_2_CS_STM_BASE 0x7FFE421000ull +#define DMA_CH_2_CS_STM_MAX_OFFSET 0x1000 +#define DMA_CH_2_CS_STM_SECTION 0x1000 +#define mmDMA_CH_2_CS_CTI_BASE 0x7FFE422000ull +#define DMA_CH_2_CS_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_2_CS_CTI_SECTION 0x1000 +#define mmDMA_CH_2_CS_ETF_BASE 0x7FFE423000ull +#define DMA_CH_2_CS_ETF_MAX_OFFSET 0x1000 +#define DMA_CH_2_CS_ETF_SECTION 0x1000 +#define mmDMA_CH_2_CS_SPMU_BASE 0x7FFE424000ull +#define DMA_CH_2_CS_SPMU_MAX_OFFSET 0x1000 +#define DMA_CH_2_CS_SPMU_SECTION 0x1000 +#define mmDMA_CH_2_BMON_CTI_BASE 0x7FFE425000ull +#define DMA_CH_2_BMON_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_2_BMON_CTI_SECTION 0x1000 +#define mmDMA_CH_2_USER_CTI_BASE 0x7FFE426000ull +#define DMA_CH_2_USER_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_2_USER_CTI_SECTION 0x1000 +#define mmDMA_CH_2_BMON_0_BASE 0x7FFE427000ull +#define DMA_CH_2_BMON_0_MAX_OFFSET 0x1000 +#define DMA_CH_2_BMON_0_SECTION 0x1000 +#define mmDMA_CH_2_BMON_1_BASE 0x7FFE428000ull +#define DMA_CH_2_BMON_1_MAX_OFFSET 0x1000 +#define DMA_CH_2_BMON_1_SECTION 0x9000 +#define mmDMA_CH_3_CS_STM_BASE 0x7FFE431000ull +#define DMA_CH_3_CS_STM_MAX_OFFSET 0x1000 +#define DMA_CH_3_CS_STM_SECTION 0x1000 +#define mmDMA_CH_3_CS_CTI_BASE 0x7FFE432000ull +#define DMA_CH_3_CS_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_3_CS_CTI_SECTION 0x1000 +#define mmDMA_CH_3_CS_ETF_BASE 0x7FFE433000ull +#define DMA_CH_3_CS_ETF_MAX_OFFSET 0x1000 +#define DMA_CH_3_CS_ETF_SECTION 0x1000 +#define mmDMA_CH_3_CS_SPMU_BASE 0x7FFE434000ull +#define DMA_CH_3_CS_SPMU_MAX_OFFSET 0x1000 +#define DMA_CH_3_CS_SPMU_SECTION 0x1000 +#define mmDMA_CH_3_BMON_CTI_BASE 0x7FFE435000ull +#define DMA_CH_3_BMON_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_3_BMON_CTI_SECTION 0x1000 +#define mmDMA_CH_3_USER_CTI_BASE 0x7FFE436000ull +#define DMA_CH_3_USER_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_3_USER_CTI_SECTION 0x1000 +#define mmDMA_CH_3_BMON_0_BASE 0x7FFE437000ull +#define DMA_CH_3_BMON_0_MAX_OFFSET 0x1000 +#define DMA_CH_3_BMON_0_SECTION 0x1000 +#define mmDMA_CH_3_BMON_1_BASE 0x7FFE438000ull +#define DMA_CH_3_BMON_1_MAX_OFFSET 0x1000 +#define DMA_CH_3_BMON_1_SECTION 0x9000 +#define mmDMA_CH_4_CS_STM_BASE 0x7FFE441000ull +#define DMA_CH_4_CS_STM_MAX_OFFSET 0x1000 +#define DMA_CH_4_CS_STM_SECTION 0x1000 +#define mmDMA_CH_4_CS_CTI_BASE 0x7FFE442000ull +#define DMA_CH_4_CS_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_4_CS_CTI_SECTION 0x1000 +#define mmDMA_CH_4_CS_ETF_BASE 0x7FFE443000ull +#define DMA_CH_4_CS_ETF_MAX_OFFSET 0x1000 +#define DMA_CH_4_CS_ETF_SECTION 0x1000 +#define mmDMA_CH_4_CS_SPMU_BASE 0x7FFE444000ull +#define DMA_CH_4_CS_SPMU_MAX_OFFSET 0x1000 +#define DMA_CH_4_CS_SPMU_SECTION 0x1000 +#define mmDMA_CH_4_BMON_CTI_BASE 0x7FFE445000ull +#define DMA_CH_4_BMON_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_4_BMON_CTI_SECTION 0x1000 +#define mmDMA_CH_4_USER_CTI_BASE 0x7FFE446000ull +#define DMA_CH_4_USER_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_4_USER_CTI_SECTION 0x1000 +#define mmDMA_CH_4_BMON_0_BASE 0x7FFE447000ull +#define DMA_CH_4_BMON_0_MAX_OFFSET 0x1000 +#define DMA_CH_4_BMON_0_SECTION 0x1000 +#define mmDMA_CH_4_BMON_1_BASE 0x7FFE448000ull +#define DMA_CH_4_BMON_1_MAX_OFFSET 0x1000 +#define DMA_CH_4_BMON_1_SECTION 0x8000 +#define mmDMA_CH_FUNNEL_6_1_BASE 0x7FFE450000ull +#define DMA_CH_FUNNEL_6_1_MAX_OFFSET 0x1000 +#define DMA_CH_FUNNEL_6_1_SECTION 0x11000 +#define mmDMA_MACRO_CS_STM_BASE 0x7FFE461000ull +#define DMA_MACRO_CS_STM_MAX_OFFSET 0x1000 +#define DMA_MACRO_CS_STM_SECTION 0x1000 +#define mmDMA_MACRO_CS_CTI_BASE 0x7FFE462000ull +#define DMA_MACRO_CS_CTI_MAX_OFFSET 0x1000 +#define DMA_MACRO_CS_CTI_SECTION 0x1000 +#define mmDMA_MACRO_CS_ETF_BASE 0x7FFE463000ull +#define DMA_MACRO_CS_ETF_MAX_OFFSET 0x1000 +#define DMA_MACRO_CS_ETF_SECTION 0x1000 +#define mmDMA_MACRO_CS_SPMU_BASE 0x7FFE464000ull +#define DMA_MACRO_CS_SPMU_MAX_OFFSET 0x1000 +#define DMA_MACRO_CS_SPMU_SECTION 0x1000 +#define mmDMA_MACRO_BMON_CTI_BASE 0x7FFE465000ull +#define DMA_MACRO_BMON_CTI_MAX_OFFSET 0x1000 +#define DMA_MACRO_BMON_CTI_SECTION 0x1000 +#define mmDMA_MACRO_USER_CTI_BASE 0x7FFE466000ull +#define DMA_MACRO_USER_CTI_MAX_OFFSET 0x1000 +#define DMA_MACRO_USER_CTI_SECTION 0x1000 +#define mmDMA_MACRO_BMON_0_BASE 0x7FFE467000ull +#define DMA_MACRO_BMON_0_MAX_OFFSET 0x1000 +#define DMA_MACRO_BMON_0_SECTION 0x1000 +#define mmDMA_MACRO_BMON_1_BASE 0x7FFE468000ull +#define DMA_MACRO_BMON_1_MAX_OFFSET 0x1000 +#define DMA_MACRO_BMON_1_SECTION 0x1000 +#define mmDMA_MACRO_BMON_2_BASE 0x7FFE469000ull +#define DMA_MACRO_BMON_2_MAX_OFFSET 0x1000 +#define DMA_MACRO_BMON_2_SECTION 0x1000 +#define mmDMA_MACRO_BMON_3_BASE 0x7FFE46A000ull +#define DMA_MACRO_BMON_3_MAX_OFFSET 0x1000 +#define DMA_MACRO_BMON_3_SECTION 0x1000 +#define mmDMA_MACRO_BMON_4_BASE 0x7FFE46B000ull +#define DMA_MACRO_BMON_4_MAX_OFFSET 0x1000 +#define DMA_MACRO_BMON_4_SECTION 0x1000 +#define mmDMA_MACRO_BMON_5_BASE 0x7FFE46C000ull +#define DMA_MACRO_BMON_5_MAX_OFFSET 0x1000 +#define DMA_MACRO_BMON_5_SECTION 0x1000 +#define mmDMA_MACRO_BMON_6_BASE 0x7FFE46D000ull +#define DMA_MACRO_BMON_6_MAX_OFFSET 0x1000 +#define DMA_MACRO_BMON_6_SECTION 0x1000 +#define mmDMA_MACRO_BMON_7_BASE 0x7FFE46E000ull +#define DMA_MACRO_BMON_7_MAX_OFFSET 0x1000 +#define DMA_MACRO_BMON_7_SECTION 0x2000 +#define mmDMA_MACRO_FUNNEL_3_1_BASE 0x7FFE470000ull +#define DMA_MACRO_FUNNEL_3_1_MAX_OFFSET 0x1000 +#define DMA_MACRO_FUNNEL_3_1_SECTION 0x10000 +#define mmCPU_ROM_TABLE_BASE 0x7FFE480000ull +#define CPU_ROM_TABLE_MAX_OFFSET 0x1000 +#define CPU_ROM_TABLE_SECTION 0x1000 +#define mmCPU_ETF_0_BASE 0x7FFE481000ull +#define CPU_ETF_0_MAX_OFFSET 0x1000 +#define CPU_ETF_0_SECTION 0x1000 +#define mmCPU_ETF_1_BASE 0x7FFE482000ull +#define CPU_ETF_1_MAX_OFFSET 0x1000 +#define CPU_ETF_1_SECTION 0x2000 +#define mmCPU_CTI_BASE 0x7FFE484000ull +#define CPU_CTI_MAX_OFFSET 0x1000 +#define CPU_CTI_SECTION 0x1000 +#define mmCPU_FUNNEL_BASE 0x7FFE485000ull +#define CPU_FUNNEL_MAX_OFFSET 0x1000 +#define CPU_FUNNEL_SECTION 0x1000 +#define mmCPU_STM_BASE 0x7FFE486000ull +#define CPU_STM_MAX_OFFSET 0x1000 +#define CPU_STM_SECTION 0x1000 +#define mmCPU_CTI_TRACE_BASE 0x7FFE487000ull +#define CPU_CTI_TRACE_MAX_OFFSET 0x1000 +#define CPU_CTI_TRACE_SECTION 0x1000 +#define mmCPU_ETF_TRACE_BASE 0x7FFE488000ull +#define CPU_ETF_TRACE_MAX_OFFSET 0x1000 +#define CPU_ETF_TRACE_SECTION 0x1000 +#define mmCPU_WR_BMON_BASE 0x7FFE489000ull +#define CPU_WR_BMON_MAX_OFFSET 0x1000 +#define CPU_WR_BMON_SECTION 0x1000 +#define mmCPU_RD_BMON_BASE 0x7FFE48A000ull +#define CPU_RD_BMON_MAX_OFFSET 0x1000 +#define CPU_RD_BMON_SECTION 0x37000 +#define mmMMU_CS_STM_BASE 0x7FFE4C1000ull +#define MMU_CS_STM_MAX_OFFSET 0x1000 +#define MMU_CS_STM_SECTION 0x1000 +#define mmMMU_CS_CTI_BASE 0x7FFE4C2000ull +#define MMU_CS_CTI_MAX_OFFSET 0x1000 +#define MMU_CS_CTI_SECTION 0x1000 +#define mmMMU_CS_ETF_BASE 0x7FFE4C3000ull +#define MMU_CS_ETF_MAX_OFFSET 0x1000 +#define MMU_CS_ETF_SECTION 0x1000 +#define mmMMU_CS_SPMU_BASE 0x7FFE4C4000ull +#define MMU_CS_SPMU_MAX_OFFSET 0x1000 +#define MMU_CS_SPMU_SECTION 0x1000 +#define mmMMU_BMON_CTI_BASE 0x7FFE4C5000ull +#define MMU_BMON_CTI_MAX_OFFSET 0x1000 +#define MMU_BMON_CTI_SECTION 0x1000 +#define mmMMU_USER_CTI_BASE 0x7FFE4C6000ull +#define MMU_USER_CTI_MAX_OFFSET 0x1000 +#define MMU_USER_CTI_SECTION 0x1000 +#define mmMMU_BMON_0_BASE 0x7FFE4C7000ull +#define MMU_BMON_0_MAX_OFFSET 0x1000 +#define MMU_BMON_0_SECTION 0x1000 +#define mmMMU_BMON_1_BASE 0x7FFE4C8000ull +#define MMU_BMON_1_MAX_OFFSET 0x1000 +#define MMU_BMON_1_SECTION 0x338000 +#define mmCA53_BASE 0x7FFE800000ull +#define CA53_MAX_OFFSET 0x1000 +#define CA53_SECTION 0x400000 +#define mmPCI_ROM_TABLE_BASE 0x7FFEC00000ull +#define PCI_ROM_TABLE_MAX_OFFSET 0x1000 +#define PCI_ROM_TABLE_SECTION 0x1000 +#define mmPCIE_STM_BASE 0x7FFEC01000ull +#define PCIE_STM_MAX_OFFSET 0x1000 +#define PCIE_STM_SECTION 0x1000 +#define mmPCIE_ETF_BASE 0x7FFEC02000ull +#define PCIE_ETF_MAX_OFFSET 0x1000 +#define PCIE_ETF_SECTION 0x1000 +#define mmPCIE_CTI_0_BASE 0x7FFEC03000ull +#define PCIE_CTI_0_MAX_OFFSET 0x1000 +#define PCIE_CTI_0_SECTION 0x1000 +#define mmPCIE_SPMU_BASE 0x7FFEC04000ull +#define PCIE_SPMU_MAX_OFFSET 0x1000 +#define PCIE_SPMU_SECTION 0x1000 +#define mmPCIE_CTI_1_BASE 0x7FFEC05000ull +#define PCIE_CTI_1_MAX_OFFSET 0x1000 +#define PCIE_CTI_1_SECTION 0x1000 +#define mmPCIE_FUNNEL_BASE 0x7FFEC06000ull +#define PCIE_FUNNEL_MAX_OFFSET 0x1000 +#define PCIE_FUNNEL_SECTION 0x1000 +#define mmPCIE_BMON_MSTR_WR_BASE 0x7FFEC07000ull +#define PCIE_BMON_MSTR_WR_MAX_OFFSET 0x1000 +#define PCIE_BMON_MSTR_WR_SECTION 0x1000 +#define mmPCIE_BMON_MSTR_RD_BASE 0x7FFEC08000ull +#define PCIE_BMON_MSTR_RD_MAX_OFFSET 0x1000 +#define PCIE_BMON_MSTR_RD_SECTION 0x1000 +#define mmPCIE_BMON_SLV_WR_BASE 0x7FFEC09000ull +#define PCIE_BMON_SLV_WR_MAX_OFFSET 0x1000 +#define PCIE_BMON_SLV_WR_SECTION 0x1000 +#define mmPCIE_BMON_SLV_RD_BASE 0x7FFEC0A000ull +#define PCIE_BMON_SLV_RD_MAX_OFFSET 0x1000 +#define PCIE_BMON_SLV_RD_SECTION 0x36000 +#define mmPSOC_CTI_BASE 0x7FFEC40000ull +#define PSOC_CTI_MAX_OFFSET 0x1000 +#define PSOC_CTI_SECTION 0x1000 +#define mmPSOC_STM_BASE 0x7FFEC41000ull +#define PSOC_STM_MAX_OFFSET 0x1000 +#define PSOC_STM_SECTION 0x1000 +#define mmPSOC_FUNNEL_BASE 0x7FFEC42000ull +#define PSOC_FUNNEL_MAX_OFFSET 0x1000 +#define PSOC_FUNNEL_SECTION 0x1000 +#define mmPSOC_ETR_BASE 0x7FFEC43000ull +#define PSOC_ETR_MAX_OFFSET 0x1000 +#define PSOC_ETR_SECTION 0x1000 +#define mmPSOC_ETF_BASE 0x7FFEC44000ull +#define PSOC_ETF_MAX_OFFSET 0x1000 +#define PSOC_ETF_SECTION 0x1000 +#define mmPSOC_TS_CTI_BASE 0x7FFEC45000ull +#define PSOC_TS_CTI_MAX_OFFSET 0x1000 +#define PSOC_TS_CTI_SECTION 0xB000 +#define mmTOP_ROM_TABLE_BASE 0x7FFEC50000ull +#define TOP_ROM_TABLE_MAX_OFFSET 0x1000 +#define TOP_ROM_TABLE_SECTION 0x1F0000 +#define mmTPC1_RTR_FUNNEL_BASE 0x7FFEE40000ull +#define TPC1_RTR_FUNNEL_MAX_OFFSET 0x1000 +#define TPC1_RTR_FUNNEL_SECTION 0x40000 +#define mmTPC2_RTR_FUNNEL_BASE 0x7FFEE80000ull +#define TPC2_RTR_FUNNEL_MAX_OFFSET 0x1000 +#define TPC2_RTR_FUNNEL_SECTION 0x40000 +#define mmTPC3_RTR_FUNNEL_BASE 0x7FFEEC0000ull +#define TPC3_RTR_FUNNEL_MAX_OFFSET 0x1000 +#define TPC3_RTR_FUNNEL_SECTION 0x40000 +#define mmTPC4_RTR_FUNNEL_BASE 0x7FFEF00000ull +#define TPC4_RTR_FUNNEL_MAX_OFFSET 0x1000 +#define TPC4_RTR_FUNNEL_SECTION 0x40000 +#define mmTPC5_RTR_FUNNEL_BASE 0x7FFEF40000ull +#define TPC5_RTR_FUNNEL_MAX_OFFSET 0x1000 +#define TPC5_RTR_FUNNEL_SECTION 0x40000 +#define mmTPC6_RTR_FUNNEL_BASE 0x7FFEF80000ull +#define TPC6_RTR_FUNNEL_MAX_OFFSET 0x1000 +#define TPC6_RTR_FUNNEL_SECTION 0x81000 +#define mmTPC0_EML_SPMU_BASE 0x7FFF001000ull +#define TPC0_EML_SPMU_MAX_OFFSET 0x1000 +#define TPC0_EML_SPMU_SECTION 0x1000 +#define mmTPC0_EML_ETF_BASE 0x7FFF002000ull +#define TPC0_EML_ETF_MAX_OFFSET 0x1000 +#define TPC0_EML_ETF_SECTION 0x1000 +#define mmTPC0_EML_STM_BASE 0x7FFF003000ull +#define TPC0_EML_STM_MAX_OFFSET 0x1000 +#define TPC0_EML_STM_SECTION 0x1000 +#define mmTPC0_EML_ETM_R4_BASE 0x7FFF004000ull +#define TPC0_EML_ETM_R4_MAX_OFFSET 0x0 +#define TPC0_EML_ETM_R4_SECTION 0x1000 +#define mmTPC0_EML_CTI_BASE 0x7FFF005000ull +#define TPC0_EML_CTI_MAX_OFFSET 0x1000 +#define TPC0_EML_CTI_SECTION 0x1000 +#define mmTPC0_EML_FUNNEL_BASE 0x7FFF006000ull +#define TPC0_EML_FUNNEL_MAX_OFFSET 0x1000 +#define TPC0_EML_FUNNEL_SECTION 0x1000 +#define mmTPC0_EML_BUSMON_0_BASE 0x7FFF007000ull +#define TPC0_EML_BUSMON_0_MAX_OFFSET 0x1000 +#define TPC0_EML_BUSMON_0_SECTION 0x1000 +#define mmTPC0_EML_BUSMON_1_BASE 0x7FFF008000ull +#define TPC0_EML_BUSMON_1_MAX_OFFSET 0x1000 +#define TPC0_EML_BUSMON_1_SECTION 0x1000 +#define mmTPC0_EML_BUSMON_2_BASE 0x7FFF009000ull +#define TPC0_EML_BUSMON_2_MAX_OFFSET 0x1000 +#define TPC0_EML_BUSMON_2_SECTION 0x1000 +#define mmTPC0_EML_BUSMON_3_BASE 0x7FFF00A000ull +#define TPC0_EML_BUSMON_3_MAX_OFFSET 0x1000 +#define TPC0_EML_BUSMON_3_SECTION 0x36000 +#define mmTPC0_EML_CFG_BASE 0x7FFF040000ull +#define TPC0_EML_CFG_MAX_OFFSET 0x338 +#define TPC0_EML_CFG_SECTION 0x1BF000 +#define mmTPC0_EML_CS_BASE 0x7FFF1FF000ull +#define TPC0_EML_CS_MAX_OFFSET 0x1000 +#define TPC0_EML_CS_SECTION 0x2000 +#define mmTPC1_EML_SPMU_BASE 0x7FFF201000ull +#define TPC1_EML_SPMU_MAX_OFFSET 0x1000 +#define TPC1_EML_SPMU_SECTION 0x1000 +#define mmTPC1_EML_ETF_BASE 0x7FFF202000ull +#define TPC1_EML_ETF_MAX_OFFSET 0x1000 +#define TPC1_EML_ETF_SECTION 0x1000 +#define mmTPC1_EML_STM_BASE 0x7FFF203000ull +#define TPC1_EML_STM_MAX_OFFSET 0x1000 +#define TPC1_EML_STM_SECTION 0x1000 +#define mmTPC1_EML_ETM_R4_BASE 0x7FFF204000ull +#define TPC1_EML_ETM_R4_MAX_OFFSET 0x0 +#define TPC1_EML_ETM_R4_SECTION 0x1000 +#define mmTPC1_EML_CTI_BASE 0x7FFF205000ull +#define TPC1_EML_CTI_MAX_OFFSET 0x1000 +#define TPC1_EML_CTI_SECTION 0x1000 +#define mmTPC1_EML_FUNNEL_BASE 0x7FFF206000ull +#define TPC1_EML_FUNNEL_MAX_OFFSET 0x1000 +#define TPC1_EML_FUNNEL_SECTION 0x1000 +#define mmTPC1_EML_BUSMON_0_BASE 0x7FFF207000ull +#define TPC1_EML_BUSMON_0_MAX_OFFSET 0x1000 +#define TPC1_EML_BUSMON_0_SECTION 0x1000 +#define mmTPC1_EML_BUSMON_1_BASE 0x7FFF208000ull +#define TPC1_EML_BUSMON_1_MAX_OFFSET 0x1000 +#define TPC1_EML_BUSMON_1_SECTION 0x1000 +#define mmTPC1_EML_BUSMON_2_BASE 0x7FFF209000ull +#define TPC1_EML_BUSMON_2_MAX_OFFSET 0x1000 +#define TPC1_EML_BUSMON_2_SECTION 0x1000 +#define mmTPC1_EML_BUSMON_3_BASE 0x7FFF20A000ull +#define TPC1_EML_BUSMON_3_MAX_OFFSET 0x1000 +#define TPC1_EML_BUSMON_3_SECTION 0x36000 +#define mmTPC1_EML_CFG_BASE 0x7FFF240000ull +#define TPC1_EML_CFG_MAX_OFFSET 0x338 +#define TPC1_EML_CFG_SECTION 0x1BF000 +#define mmTPC1_EML_CS_BASE 0x7FFF3FF000ull +#define TPC1_EML_CS_MAX_OFFSET 0x1000 +#define TPC1_EML_CS_SECTION 0x2000 +#define mmTPC2_EML_SPMU_BASE 0x7FFF401000ull +#define TPC2_EML_SPMU_MAX_OFFSET 0x1000 +#define TPC2_EML_SPMU_SECTION 0x1000 +#define mmTPC2_EML_ETF_BASE 0x7FFF402000ull +#define TPC2_EML_ETF_MAX_OFFSET 0x1000 +#define TPC2_EML_ETF_SECTION 0x1000 +#define mmTPC2_EML_STM_BASE 0x7FFF403000ull +#define TPC2_EML_STM_MAX_OFFSET 0x1000 +#define TPC2_EML_STM_SECTION 0x1000 +#define mmTPC2_EML_ETM_R4_BASE 0x7FFF404000ull +#define TPC2_EML_ETM_R4_MAX_OFFSET 0x0 +#define TPC2_EML_ETM_R4_SECTION 0x1000 +#define mmTPC2_EML_CTI_BASE 0x7FFF405000ull +#define TPC2_EML_CTI_MAX_OFFSET 0x1000 +#define TPC2_EML_CTI_SECTION 0x1000 +#define mmTPC2_EML_FUNNEL_BASE 0x7FFF406000ull +#define TPC2_EML_FUNNEL_MAX_OFFSET 0x1000 +#define TPC2_EML_FUNNEL_SECTION 0x1000 +#define mmTPC2_EML_BUSMON_0_BASE 0x7FFF407000ull +#define TPC2_EML_BUSMON_0_MAX_OFFSET 0x1000 +#define TPC2_EML_BUSMON_0_SECTION 0x1000 +#define mmTPC2_EML_BUSMON_1_BASE 0x7FFF408000ull +#define TPC2_EML_BUSMON_1_MAX_OFFSET 0x1000 +#define TPC2_EML_BUSMON_1_SECTION 0x1000 +#define mmTPC2_EML_BUSMON_2_BASE 0x7FFF409000ull +#define TPC2_EML_BUSMON_2_MAX_OFFSET 0x1000 +#define TPC2_EML_BUSMON_2_SECTION 0x1000 +#define mmTPC2_EML_BUSMON_3_BASE 0x7FFF40A000ull +#define TPC2_EML_BUSMON_3_MAX_OFFSET 0x1000 +#define TPC2_EML_BUSMON_3_SECTION 0x36000 +#define mmTPC2_EML_CFG_BASE 0x7FFF440000ull +#define TPC2_EML_CFG_MAX_OFFSET 0x338 +#define TPC2_EML_CFG_SECTION 0x1BF000 +#define mmTPC2_EML_CS_BASE 0x7FFF5FF000ull +#define TPC2_EML_CS_MAX_OFFSET 0x1000 +#define TPC2_EML_CS_SECTION 0x2000 +#define mmTPC3_EML_SPMU_BASE 0x7FFF601000ull +#define TPC3_EML_SPMU_MAX_OFFSET 0x1000 +#define TPC3_EML_SPMU_SECTION 0x1000 +#define mmTPC3_EML_ETF_BASE 0x7FFF602000ull +#define TPC3_EML_ETF_MAX_OFFSET 0x1000 +#define TPC3_EML_ETF_SECTION 0x1000 +#define mmTPC3_EML_STM_BASE 0x7FFF603000ull +#define TPC3_EML_STM_MAX_OFFSET 0x1000 +#define TPC3_EML_STM_SECTION 0x1000 +#define mmTPC3_EML_ETM_R4_BASE 0x7FFF604000ull +#define TPC3_EML_ETM_R4_MAX_OFFSET 0x0 +#define TPC3_EML_ETM_R4_SECTION 0x1000 +#define mmTPC3_EML_CTI_BASE 0x7FFF605000ull +#define TPC3_EML_CTI_MAX_OFFSET 0x1000 +#define TPC3_EML_CTI_SECTION 0x1000 +#define mmTPC3_EML_FUNNEL_BASE 0x7FFF606000ull +#define TPC3_EML_FUNNEL_MAX_OFFSET 0x1000 +#define TPC3_EML_FUNNEL_SECTION 0x1000 +#define mmTPC3_EML_BUSMON_0_BASE 0x7FFF607000ull +#define TPC3_EML_BUSMON_0_MAX_OFFSET 0x1000 +#define TPC3_EML_BUSMON_0_SECTION 0x1000 +#define mmTPC3_EML_BUSMON_1_BASE 0x7FFF608000ull +#define TPC3_EML_BUSMON_1_MAX_OFFSET 0x1000 +#define TPC3_EML_BUSMON_1_SECTION 0x1000 +#define mmTPC3_EML_BUSMON_2_BASE 0x7FFF609000ull +#define TPC3_EML_BUSMON_2_MAX_OFFSET 0x1000 +#define TPC3_EML_BUSMON_2_SECTION 0x1000 +#define mmTPC3_EML_BUSMON_3_BASE 0x7FFF60A000ull +#define TPC3_EML_BUSMON_3_MAX_OFFSET 0x1000 +#define TPC3_EML_BUSMON_3_SECTION 0x36000 +#define mmTPC3_EML_CFG_BASE 0x7FFF640000ull +#define TPC3_EML_CFG_MAX_OFFSET 0x338 +#define TPC3_EML_CFG_SECTION 0x1BF000 +#define mmTPC3_EML_CS_BASE 0x7FFF7FF000ull +#define TPC3_EML_CS_MAX_OFFSET 0x1000 +#define TPC3_EML_CS_SECTION 0x2000 +#define mmTPC4_EML_SPMU_BASE 0x7FFF801000ull +#define TPC4_EML_SPMU_MAX_OFFSET 0x1000 +#define TPC4_EML_SPMU_SECTION 0x1000 +#define mmTPC4_EML_ETF_BASE 0x7FFF802000ull +#define TPC4_EML_ETF_MAX_OFFSET 0x1000 +#define TPC4_EML_ETF_SECTION 0x1000 +#define mmTPC4_EML_STM_BASE 0x7FFF803000ull +#define TPC4_EML_STM_MAX_OFFSET 0x1000 +#define TPC4_EML_STM_SECTION 0x1000 +#define mmTPC4_EML_ETM_R4_BASE 0x7FFF804000ull +#define TPC4_EML_ETM_R4_MAX_OFFSET 0x0 +#define TPC4_EML_ETM_R4_SECTION 0x1000 +#define mmTPC4_EML_CTI_BASE 0x7FFF805000ull +#define TPC4_EML_CTI_MAX_OFFSET 0x1000 +#define TPC4_EML_CTI_SECTION 0x1000 +#define mmTPC4_EML_FUNNEL_BASE 0x7FFF806000ull +#define TPC4_EML_FUNNEL_MAX_OFFSET 0x1000 +#define TPC4_EML_FUNNEL_SECTION 0x1000 +#define mmTPC4_EML_BUSMON_0_BASE 0x7FFF807000ull +#define TPC4_EML_BUSMON_0_MAX_OFFSET 0x1000 +#define TPC4_EML_BUSMON_0_SECTION 0x1000 +#define mmTPC4_EML_BUSMON_1_BASE 0x7FFF808000ull +#define TPC4_EML_BUSMON_1_MAX_OFFSET 0x1000 +#define TPC4_EML_BUSMON_1_SECTION 0x1000 +#define mmTPC4_EML_BUSMON_2_BASE 0x7FFF809000ull +#define TPC4_EML_BUSMON_2_MAX_OFFSET 0x1000 +#define TPC4_EML_BUSMON_2_SECTION 0x1000 +#define mmTPC4_EML_BUSMON_3_BASE 0x7FFF80A000ull +#define TPC4_EML_BUSMON_3_MAX_OFFSET 0x1000 +#define TPC4_EML_BUSMON_3_SECTION 0x36000 +#define mmTPC4_EML_CFG_BASE 0x7FFF840000ull +#define TPC4_EML_CFG_MAX_OFFSET 0x338 +#define TPC4_EML_CFG_SECTION 0x1BF000 +#define mmTPC4_EML_CS_BASE 0x7FFF9FF000ull +#define TPC4_EML_CS_MAX_OFFSET 0x1000 +#define TPC4_EML_CS_SECTION 0x2000 +#define mmTPC5_EML_SPMU_BASE 0x7FFFA01000ull +#define TPC5_EML_SPMU_MAX_OFFSET 0x1000 +#define TPC5_EML_SPMU_SECTION 0x1000 +#define mmTPC5_EML_ETF_BASE 0x7FFFA02000ull +#define TPC5_EML_ETF_MAX_OFFSET 0x1000 +#define TPC5_EML_ETF_SECTION 0x1000 +#define mmTPC5_EML_STM_BASE 0x7FFFA03000ull +#define TPC5_EML_STM_MAX_OFFSET 0x1000 +#define TPC5_EML_STM_SECTION 0x1000 +#define mmTPC5_EML_ETM_R4_BASE 0x7FFFA04000ull +#define TPC5_EML_ETM_R4_MAX_OFFSET 0x0 +#define TPC5_EML_ETM_R4_SECTION 0x1000 +#define mmTPC5_EML_CTI_BASE 0x7FFFA05000ull +#define TPC5_EML_CTI_MAX_OFFSET 0x1000 +#define TPC5_EML_CTI_SECTION 0x1000 +#define mmTPC5_EML_FUNNEL_BASE 0x7FFFA06000ull +#define TPC5_EML_FUNNEL_MAX_OFFSET 0x1000 +#define TPC5_EML_FUNNEL_SECTION 0x1000 +#define mmTPC5_EML_BUSMON_0_BASE 0x7FFFA07000ull +#define TPC5_EML_BUSMON_0_MAX_OFFSET 0x1000 +#define TPC5_EML_BUSMON_0_SECTION 0x1000 +#define mmTPC5_EML_BUSMON_1_BASE 0x7FFFA08000ull +#define TPC5_EML_BUSMON_1_MAX_OFFSET 0x1000 +#define TPC5_EML_BUSMON_1_SECTION 0x1000 +#define mmTPC5_EML_BUSMON_2_BASE 0x7FFFA09000ull +#define TPC5_EML_BUSMON_2_MAX_OFFSET 0x1000 +#define TPC5_EML_BUSMON_2_SECTION 0x1000 +#define mmTPC5_EML_BUSMON_3_BASE 0x7FFFA0A000ull +#define TPC5_EML_BUSMON_3_MAX_OFFSET 0x1000 +#define TPC5_EML_BUSMON_3_SECTION 0x36000 +#define mmTPC5_EML_CFG_BASE 0x7FFFA40000ull +#define TPC5_EML_CFG_MAX_OFFSET 0x338 +#define TPC5_EML_CFG_SECTION 0x1BF000 +#define mmTPC5_EML_CS_BASE 0x7FFFBFF000ull +#define TPC5_EML_CS_MAX_OFFSET 0x1000 +#define TPC5_EML_CS_SECTION 0x2000 +#define mmTPC6_EML_SPMU_BASE 0x7FFFC01000ull +#define TPC6_EML_SPMU_MAX_OFFSET 0x1000 +#define TPC6_EML_SPMU_SECTION 0x1000 +#define mmTPC6_EML_ETF_BASE 0x7FFFC02000ull +#define TPC6_EML_ETF_MAX_OFFSET 0x1000 +#define TPC6_EML_ETF_SECTION 0x1000 +#define mmTPC6_EML_STM_BASE 0x7FFFC03000ull +#define TPC6_EML_STM_MAX_OFFSET 0x1000 +#define TPC6_EML_STM_SECTION 0x1000 +#define mmTPC6_EML_ETM_R4_BASE 0x7FFFC04000ull +#define TPC6_EML_ETM_R4_MAX_OFFSET 0x0 +#define TPC6_EML_ETM_R4_SECTION 0x1000 +#define mmTPC6_EML_CTI_BASE 0x7FFFC05000ull +#define TPC6_EML_CTI_MAX_OFFSET 0x1000 +#define TPC6_EML_CTI_SECTION 0x1000 +#define mmTPC6_EML_FUNNEL_BASE 0x7FFFC06000ull +#define TPC6_EML_FUNNEL_MAX_OFFSET 0x1000 +#define TPC6_EML_FUNNEL_SECTION 0x1000 +#define mmTPC6_EML_BUSMON_0_BASE 0x7FFFC07000ull +#define TPC6_EML_BUSMON_0_MAX_OFFSET 0x1000 +#define TPC6_EML_BUSMON_0_SECTION 0x1000 +#define mmTPC6_EML_BUSMON_1_BASE 0x7FFFC08000ull +#define TPC6_EML_BUSMON_1_MAX_OFFSET 0x1000 +#define TPC6_EML_BUSMON_1_SECTION 0x1000 +#define mmTPC6_EML_BUSMON_2_BASE 0x7FFFC09000ull +#define TPC6_EML_BUSMON_2_MAX_OFFSET 0x1000 +#define TPC6_EML_BUSMON_2_SECTION 0x1000 +#define mmTPC6_EML_BUSMON_3_BASE 0x7FFFC0A000ull +#define TPC6_EML_BUSMON_3_MAX_OFFSET 0x1000 +#define TPC6_EML_BUSMON_3_SECTION 0x36000 +#define mmTPC6_EML_CFG_BASE 0x7FFFC40000ull +#define TPC6_EML_CFG_MAX_OFFSET 0x338 +#define TPC6_EML_CFG_SECTION 0x1BF000 +#define mmTPC6_EML_CS_BASE 0x7FFFDFF000ull +#define TPC6_EML_CS_MAX_OFFSET 0x1000 +#define TPC6_EML_CS_SECTION 0x2000 +#define mmTPC7_EML_SPMU_BASE 0x7FFFE01000ull +#define TPC7_EML_SPMU_MAX_OFFSET 0x1000 +#define TPC7_EML_SPMU_SECTION 0x1000 +#define mmTPC7_EML_ETF_BASE 0x7FFFE02000ull +#define TPC7_EML_ETF_MAX_OFFSET 0x1000 +#define TPC7_EML_ETF_SECTION 0x1000 +#define mmTPC7_EML_STM_BASE 0x7FFFE03000ull +#define TPC7_EML_STM_MAX_OFFSET 0x1000 +#define TPC7_EML_STM_SECTION 0x1000 +#define mmTPC7_EML_ETM_R4_BASE 0x7FFFE04000ull +#define TPC7_EML_ETM_R4_MAX_OFFSET 0x0 +#define TPC7_EML_ETM_R4_SECTION 0x1000 +#define mmTPC7_EML_CTI_BASE 0x7FFFE05000ull +#define TPC7_EML_CTI_MAX_OFFSET 0x1000 +#define TPC7_EML_CTI_SECTION 0x1000 +#define mmTPC7_EML_FUNNEL_BASE 0x7FFFE06000ull +#define TPC7_EML_FUNNEL_MAX_OFFSET 0x1000 +#define TPC7_EML_FUNNEL_SECTION 0x1000 +#define mmTPC7_EML_BUSMON_0_BASE 0x7FFFE07000ull +#define TPC7_EML_BUSMON_0_MAX_OFFSET 0x1000 +#define TPC7_EML_BUSMON_0_SECTION 0x1000 +#define mmTPC7_EML_BUSMON_1_BASE 0x7FFFE08000ull +#define TPC7_EML_BUSMON_1_MAX_OFFSET 0x1000 +#define TPC7_EML_BUSMON_1_SECTION 0x1000 +#define mmTPC7_EML_BUSMON_2_BASE 0x7FFFE09000ull +#define TPC7_EML_BUSMON_2_MAX_OFFSET 0x1000 +#define TPC7_EML_BUSMON_2_SECTION 0x1000 +#define mmTPC7_EML_BUSMON_3_BASE 0x7FFFE0A000ull +#define TPC7_EML_BUSMON_3_MAX_OFFSET 0x1000 +#define TPC7_EML_BUSMON_3_SECTION 0x36000 +#define mmTPC7_EML_CFG_BASE 0x7FFFE40000ull +#define TPC7_EML_CFG_MAX_OFFSET 0x338 +#define TPC7_EML_CFG_SECTION 0x1BF000 +#define mmTPC7_EML_CS_BASE 0x7FFFFFF000ull +#define TPC7_EML_CS_MAX_OFFSET 0x1000 + +#endif /* GOYA_BLOCKS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h new file mode 100644 index 000000000..067489bd0 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h @@ -0,0 +1,264 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef ASIC_REG_GOYA_MASKS_H_ +#define ASIC_REG_GOYA_MASKS_H_ + +#include "goya_regs.h" + +/* Useful masks for bits in various registers */ +#define QMAN_DMA_ENABLE (\ + (1 << DMA_QM_0_GLBL_CFG0_PQF_EN_SHIFT) | \ + (1 << DMA_QM_0_GLBL_CFG0_CQF_EN_SHIFT) | \ + (1 << DMA_QM_0_GLBL_CFG0_CP_EN_SHIFT) | \ + (1 << DMA_QM_0_GLBL_CFG0_DMA_EN_SHIFT)) + +#define QMAN_DMA_FULLY_TRUSTED (\ + (1 << DMA_QM_0_GLBL_PROT_PQF_PROT_SHIFT) | \ + (1 << DMA_QM_0_GLBL_PROT_CQF_PROT_SHIFT) | \ + (1 << DMA_QM_0_GLBL_PROT_CP_PROT_SHIFT) | \ + (1 << DMA_QM_0_GLBL_PROT_DMA_PROT_SHIFT) | \ + (1 << DMA_QM_0_GLBL_PROT_PQF_ERR_PROT_SHIFT) | \ + (1 << DMA_QM_0_GLBL_PROT_CQF_ERR_PROT_SHIFT) | \ + (1 << DMA_QM_0_GLBL_PROT_CP_ERR_PROT_SHIFT) | \ + (1 << DMA_QM_0_GLBL_PROT_DMA_ERR_PROT_SHIFT)) + +#define QMAN_DMA_PARTLY_TRUSTED (\ + (1 << DMA_QM_0_GLBL_PROT_PQF_PROT_SHIFT) | \ + (1 << DMA_QM_0_GLBL_PROT_CQF_PROT_SHIFT) | \ + (1 << DMA_QM_0_GLBL_PROT_CP_PROT_SHIFT) | \ + (1 << DMA_QM_0_GLBL_PROT_PQF_ERR_PROT_SHIFT) | \ + (1 << DMA_QM_0_GLBL_PROT_CQF_ERR_PROT_SHIFT) | \ + (1 << DMA_QM_0_GLBL_PROT_CP_ERR_PROT_SHIFT) | \ + (1 << DMA_QM_0_GLBL_PROT_DMA_ERR_PROT_SHIFT)) + +#define QMAN_DMA_STOP (\ + (1 << DMA_QM_0_GLBL_CFG1_PQF_STOP_SHIFT) | \ + (1 << DMA_QM_0_GLBL_CFG1_CQF_STOP_SHIFT) | \ + (1 << DMA_QM_0_GLBL_CFG1_CP_STOP_SHIFT) | \ + (1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT)) + +#define QMAN_DMA_IS_STOPPED (\ + (1 << DMA_QM_0_GLBL_STS0_PQF_IS_STOP_SHIFT) | \ + (1 << DMA_QM_0_GLBL_STS0_CQF_IS_STOP_SHIFT) | \ + (1 << DMA_QM_0_GLBL_STS0_CP_IS_STOP_SHIFT) | \ + (1 << DMA_QM_0_GLBL_STS0_DMA_IS_STOP_SHIFT)) + +#define QMAN_DMA_ERR_MSG_EN (\ + (1 << DMA_QM_0_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT) | \ + (1 << DMA_QM_0_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT) | \ + (1 << DMA_QM_0_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT) | \ + (1 << DMA_QM_0_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT) | \ + (1 << DMA_QM_0_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT) | \ + (1 << DMA_QM_0_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT) | \ + (1 << DMA_QM_0_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT)) + +#define QMAN_MME_ENABLE (\ + (1 << MME_QM_GLBL_CFG0_PQF_EN_SHIFT) | \ + (1 << MME_QM_GLBL_CFG0_CQF_EN_SHIFT) | \ + (1 << MME_QM_GLBL_CFG0_CP_EN_SHIFT)) + +#define CMDQ_MME_ENABLE (\ + (1 << MME_CMDQ_GLBL_CFG0_CQF_EN_SHIFT) | \ + (1 << MME_CMDQ_GLBL_CFG0_CP_EN_SHIFT)) + +#define QMAN_MME_STOP (\ + (1 << MME_QM_GLBL_CFG1_PQF_STOP_SHIFT) | \ + (1 << MME_QM_GLBL_CFG1_CQF_STOP_SHIFT) | \ + (1 << MME_QM_GLBL_CFG1_CP_STOP_SHIFT)) + +#define CMDQ_MME_STOP (\ + (1 << MME_CMDQ_GLBL_CFG1_CQF_STOP_SHIFT) | \ + (1 << MME_CMDQ_GLBL_CFG1_CP_STOP_SHIFT)) + +#define QMAN_MME_ERR_MSG_EN (\ + (1 << MME_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT) | \ + (1 << MME_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT) | \ + (1 << MME_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT) | \ + (1 << MME_QM_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT) | \ + (1 << MME_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT) | \ + (1 << MME_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT) | \ + (1 << MME_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT) | \ + (1 << MME_QM_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT)) + +#define CMDQ_MME_ERR_MSG_EN (\ + (1 << MME_CMDQ_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT) | \ + (1 << MME_CMDQ_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT) | \ + (1 << MME_CMDQ_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT) | \ + (1 << MME_CMDQ_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT) | \ + (1 << MME_CMDQ_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT) | \ + (1 << MME_CMDQ_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT) | \ + (1 << MME_CMDQ_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT) | \ + (1 << MME_CMDQ_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT)) + +#define QMAN_MME_ERR_PROT (\ + (1 << MME_QM_GLBL_PROT_PQF_ERR_PROT_SHIFT) | \ + (1 << MME_QM_GLBL_PROT_CQF_ERR_PROT_SHIFT) | \ + (1 << MME_QM_GLBL_PROT_CP_ERR_PROT_SHIFT) | \ + (1 << MME_QM_GLBL_PROT_DMA_ERR_PROT_SHIFT)) + +#define CMDQ_MME_ERR_PROT (\ + (1 << MME_CMDQ_GLBL_PROT_PQF_ERR_PROT_SHIFT) | \ + (1 << MME_CMDQ_GLBL_PROT_CQF_ERR_PROT_SHIFT) | \ + (1 << MME_CMDQ_GLBL_PROT_CP_ERR_PROT_SHIFT) | \ + (1 << MME_CMDQ_GLBL_PROT_DMA_ERR_PROT_SHIFT)) + +#define QMAN_TPC_ENABLE (\ + (1 << TPC0_QM_GLBL_CFG0_PQF_EN_SHIFT) | \ + (1 << TPC0_QM_GLBL_CFG0_CQF_EN_SHIFT) | \ + (1 << TPC0_QM_GLBL_CFG0_CP_EN_SHIFT)) + +#define CMDQ_TPC_ENABLE (\ + (1 << TPC0_CMDQ_GLBL_CFG0_CQF_EN_SHIFT) | \ + (1 << TPC0_CMDQ_GLBL_CFG0_CP_EN_SHIFT)) + +#define QMAN_TPC_STOP (\ + (1 << TPC0_QM_GLBL_CFG1_PQF_STOP_SHIFT) | \ + (1 << TPC0_QM_GLBL_CFG1_CQF_STOP_SHIFT) | \ + (1 << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT)) + +#define CMDQ_TPC_STOP (\ + (1 << TPC0_CMDQ_GLBL_CFG1_CQF_STOP_SHIFT) | \ + (1 << TPC0_CMDQ_GLBL_CFG1_CP_STOP_SHIFT)) + +#define QMAN_TPC_ERR_MSG_EN (\ + (1 << TPC0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT) | \ + (1 << TPC0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT) | \ + (1 << TPC0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT) | \ + (1 << TPC0_QM_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT) | \ + (1 << TPC0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT) | \ + (1 << TPC0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT) | \ + (1 << TPC0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT) | \ + (1 << TPC0_QM_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT)) + +#define CMDQ_TPC_ERR_MSG_EN (\ + (1 << TPC0_CMDQ_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT) | \ + (1 << TPC0_CMDQ_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT) | \ + (1 << TPC0_CMDQ_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT) | \ + (1 << TPC0_CMDQ_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT) | \ + (1 << TPC0_CMDQ_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT) | \ + (1 << TPC0_CMDQ_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT) | \ + (1 << TPC0_CMDQ_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT) | \ + (1 << TPC0_CMDQ_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT)) + +#define QMAN_TPC_ERR_PROT (\ + (1 << TPC0_QM_GLBL_PROT_PQF_ERR_PROT_SHIFT) | \ + (1 << TPC0_QM_GLBL_PROT_CQF_ERR_PROT_SHIFT) | \ + (1 << TPC0_QM_GLBL_PROT_CP_ERR_PROT_SHIFT) | \ + (1 << TPC0_QM_GLBL_PROT_DMA_ERR_PROT_SHIFT)) + +#define CMDQ_TPC_ERR_PROT (\ + (1 << TPC0_CMDQ_GLBL_PROT_PQF_ERR_PROT_SHIFT) | \ + (1 << TPC0_CMDQ_GLBL_PROT_CQF_ERR_PROT_SHIFT) | \ + (1 << TPC0_CMDQ_GLBL_PROT_CP_ERR_PROT_SHIFT) | \ + (1 << TPC0_CMDQ_GLBL_PROT_DMA_ERR_PROT_SHIFT)) + +/* RESETS */ +#define DMA_MME_TPC_RESET (\ + 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_TPC_SHIFT |\ + 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_MME_SHIFT |\ + 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_DMA_SHIFT) + +#define RESET_ALL (\ + 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_TPC_SHIFT |\ + 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_MME_SHIFT |\ + 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_MC_SHIFT |\ + 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_CPU_SHIFT |\ + 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_PSOC_SHIFT |\ + 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_IC_IF_SHIFT |\ + PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_SRAM_MASK |\ + 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_DMA_SHIFT |\ + 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_DMA_IF_SHIFT) + +#define CA53_RESET (\ + (~\ + (1 << PSOC_GLOBAL_CONF_UNIT_RST_N_CPU_SHIFT)\ + ) & 0x7FFFFF) + +#define CPU_RESET_ASSERT (\ + 1 << CPU_CA53_CFG_ARM_RST_CONTROL_NMBISTRESET_SHIFT) + +#define CPU_RESET_CORE0_DEASSERT (\ + 1 << CPU_CA53_CFG_ARM_RST_CONTROL_NCPUPORESET_SHIFT |\ + 1 << CPU_CA53_CFG_ARM_RST_CONTROL_NCORERESET_SHIFT |\ + 1 << CPU_CA53_CFG_ARM_RST_CONTROL_NL2RESET_SHIFT |\ + 1 << CPU_CA53_CFG_ARM_RST_CONTROL_NMBISTRESET_SHIFT) + +#define GOYA_IRQ_HBW_ID_MASK 0x1FFF +#define GOYA_IRQ_HBW_ID_SHIFT 0 +#define GOYA_IRQ_HBW_INTERNAL_ID_MASK 0xE000 +#define GOYA_IRQ_HBW_INTERNAL_ID_SHIFT 13 +#define GOYA_IRQ_HBW_AGENT_ID_MASK 0x1F0000 +#define GOYA_IRQ_HBW_AGENT_ID_SHIFT 16 +#define GOYA_IRQ_HBW_Y_MASK 0xE00000 +#define GOYA_IRQ_HBW_Y_SHIFT 21 +#define GOYA_IRQ_HBW_X_MASK 0x7000000 +#define GOYA_IRQ_HBW_X_SHIFT 24 +#define GOYA_IRQ_LBW_ID_MASK 0xFF +#define GOYA_IRQ_LBW_ID_SHIFT 0 +#define GOYA_IRQ_LBW_INTERNAL_ID_MASK 0x700 +#define GOYA_IRQ_LBW_INTERNAL_ID_SHIFT 8 +#define GOYA_IRQ_LBW_AGENT_ID_MASK 0xF800 +#define GOYA_IRQ_LBW_AGENT_ID_SHIFT 11 +#define GOYA_IRQ_LBW_Y_MASK 0x70000 +#define GOYA_IRQ_LBW_Y_SHIFT 16 +#define GOYA_IRQ_LBW_X_MASK 0x380000 +#define GOYA_IRQ_LBW_X_SHIFT 19 + +#define DMA_QM_IDLE_MASK (DMA_QM_0_GLBL_STS0_PQF_IDLE_MASK | \ + DMA_QM_0_GLBL_STS0_CQF_IDLE_MASK | \ + DMA_QM_0_GLBL_STS0_CP_IDLE_MASK | \ + DMA_QM_0_GLBL_STS0_DMA_IDLE_MASK) + +#define TPC_QM_IDLE_MASK (TPC0_QM_GLBL_STS0_PQF_IDLE_MASK | \ + TPC0_QM_GLBL_STS0_CQF_IDLE_MASK | \ + TPC0_QM_GLBL_STS0_CP_IDLE_MASK) + +#define TPC_CMDQ_IDLE_MASK (TPC0_CMDQ_GLBL_STS0_CQF_IDLE_MASK | \ + TPC0_CMDQ_GLBL_STS0_CP_IDLE_MASK) + +#define TPC_CFG_IDLE_MASK (TPC0_CFG_STATUS_SCALAR_PIPE_EMPTY_MASK | \ + TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_MASK | \ + TPC0_CFG_STATUS_IQ_EMPTY_MASK | \ + TPC0_CFG_STATUS_NO_INFLIGH_MEM_ACCESSES_MASK) + +#define MME_QM_IDLE_MASK (MME_QM_GLBL_STS0_PQF_IDLE_MASK | \ + MME_QM_GLBL_STS0_CQF_IDLE_MASK | \ + MME_QM_GLBL_STS0_CP_IDLE_MASK) + +#define MME_CMDQ_IDLE_MASK (MME_CMDQ_GLBL_STS0_CQF_IDLE_MASK | \ + MME_CMDQ_GLBL_STS0_CP_IDLE_MASK) + +#define MME_ARCH_IDLE_MASK (MME_ARCH_STATUS_SB_A_EMPTY_MASK | \ + MME_ARCH_STATUS_SB_B_EMPTY_MASK | \ + MME_ARCH_STATUS_SB_CIN_EMPTY_MASK | \ + MME_ARCH_STATUS_SB_COUT_EMPTY_MASK) + +#define MME_SHADOW_IDLE_MASK (MME_SHADOW_0_STATUS_A_MASK | \ + MME_SHADOW_0_STATUS_B_MASK | \ + MME_SHADOW_0_STATUS_CIN_MASK | \ + MME_SHADOW_0_STATUS_COUT_MASK | \ + MME_SHADOW_0_STATUS_TE_MASK | \ + MME_SHADOW_0_STATUS_LD_MASK | \ + MME_SHADOW_0_STATUS_ST_MASK) + +#define TPC1_CFG_TPC_STALL_V_SHIFT TPC0_CFG_TPC_STALL_V_SHIFT +#define TPC2_CFG_TPC_STALL_V_SHIFT TPC0_CFG_TPC_STALL_V_SHIFT +#define TPC3_CFG_TPC_STALL_V_SHIFT TPC0_CFG_TPC_STALL_V_SHIFT +#define TPC4_CFG_TPC_STALL_V_SHIFT TPC0_CFG_TPC_STALL_V_SHIFT +#define TPC5_CFG_TPC_STALL_V_SHIFT TPC0_CFG_TPC_STALL_V_SHIFT +#define TPC6_CFG_TPC_STALL_V_SHIFT TPC0_CFG_TPC_STALL_V_SHIFT +#define TPC7_CFG_TPC_STALL_V_SHIFT TPC0_CFG_TPC_STALL_V_SHIFT + +#define DMA_QM_1_GLBL_CFG1_DMA_STOP_SHIFT DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT +#define DMA_QM_2_GLBL_CFG1_DMA_STOP_SHIFT DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT +#define DMA_QM_3_GLBL_CFG1_DMA_STOP_SHIFT DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT +#define DMA_QM_4_GLBL_CFG1_DMA_STOP_SHIFT DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT + +#define PSOC_ETR_AXICTL_PROTCTRLBIT1_SHIFT 1 + +#endif /* ASIC_REG_GOYA_MASKS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h new file mode 100644 index 000000000..ce65c9da5 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h @@ -0,0 +1,122 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef ASIC_REG_GOYA_REGS_H_ +#define ASIC_REG_GOYA_REGS_H_ + +#include "goya_blocks.h" +#include "stlb_regs.h" +#include "mmu_regs.h" +#include "pcie_aux_regs.h" +#include "pcie_wrap_regs.h" +#include "psoc_global_conf_regs.h" +#include "psoc_spi_regs.h" +#include "psoc_mme_pll_regs.h" +#include "psoc_pci_pll_regs.h" +#include "psoc_emmc_pll_regs.h" +#include "psoc_timestamp_regs.h" +#include "cpu_if_regs.h" +#include "cpu_ca53_cfg_regs.h" +#include "cpu_pll_regs.h" +#include "ic_pll_regs.h" +#include "mc_pll_regs.h" +#include "tpc_pll_regs.h" +#include "dma_qm_0_regs.h" +#include "dma_qm_1_regs.h" +#include "dma_qm_2_regs.h" +#include "dma_qm_3_regs.h" +#include "dma_qm_4_regs.h" +#include "dma_ch_0_regs.h" +#include "dma_ch_1_regs.h" +#include "dma_ch_2_regs.h" +#include "dma_ch_3_regs.h" +#include "dma_ch_4_regs.h" +#include "dma_macro_regs.h" +#include "dma_nrtr_regs.h" +#include "pci_nrtr_regs.h" +#include "sram_y0_x0_rtr_regs.h" +#include "sram_y0_x1_rtr_regs.h" +#include "sram_y0_x2_rtr_regs.h" +#include "sram_y0_x3_rtr_regs.h" +#include "sram_y0_x4_rtr_regs.h" +#include "mme_regs.h" +#include "mme_qm_regs.h" +#include "mme_cmdq_regs.h" +#include "mme1_rtr_regs.h" +#include "mme2_rtr_regs.h" +#include "mme3_rtr_regs.h" +#include "mme4_rtr_regs.h" +#include "mme5_rtr_regs.h" +#include "mme6_rtr_regs.h" +#include "tpc0_cfg_regs.h" +#include "tpc1_cfg_regs.h" +#include "tpc2_cfg_regs.h" +#include "tpc3_cfg_regs.h" +#include "tpc4_cfg_regs.h" +#include "tpc5_cfg_regs.h" +#include "tpc6_cfg_regs.h" +#include "tpc7_cfg_regs.h" +#include "tpc0_qm_regs.h" +#include "tpc1_qm_regs.h" +#include "tpc2_qm_regs.h" +#include "tpc3_qm_regs.h" +#include "tpc4_qm_regs.h" +#include "tpc5_qm_regs.h" +#include "tpc6_qm_regs.h" +#include "tpc7_qm_regs.h" +#include "tpc0_cmdq_regs.h" +#include "tpc1_cmdq_regs.h" +#include "tpc2_cmdq_regs.h" +#include "tpc3_cmdq_regs.h" +#include "tpc4_cmdq_regs.h" +#include "tpc5_cmdq_regs.h" +#include "tpc6_cmdq_regs.h" +#include "tpc7_cmdq_regs.h" +#include "tpc0_nrtr_regs.h" +#include "tpc1_rtr_regs.h" +#include "tpc2_rtr_regs.h" +#include "tpc3_rtr_regs.h" +#include "tpc4_rtr_regs.h" +#include "tpc5_rtr_regs.h" +#include "tpc6_rtr_regs.h" +#include "tpc7_nrtr_regs.h" +#include "tpc0_eml_cfg_regs.h" +#include "psoc_etr_regs.h" + +#include "psoc_global_conf_masks.h" +#include "dma_macro_masks.h" +#include "dma_qm_0_masks.h" +#include "dma_ch_0_masks.h" +#include "tpc0_qm_masks.h" +#include "tpc0_cmdq_masks.h" +#include "mme_qm_masks.h" +#include "mme_cmdq_masks.h" +#include "tpc0_cfg_masks.h" +#include "tpc0_eml_cfg_masks.h" +#include "mme1_rtr_masks.h" +#include "tpc0_nrtr_masks.h" +#include "dma_nrtr_masks.h" +#include "pci_nrtr_masks.h" +#include "stlb_masks.h" +#include "cpu_ca53_cfg_masks.h" +#include "mmu_masks.h" +#include "mme_masks.h" + +#define mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG 0xC02000 +#define mmPCIE_DBI_MSIX_DOORBELL_OFF 0xC02948 + +#define mmSYNC_MNGR_MON_PAY_ADDRL_0 0x113000 +#define mmSYNC_MNGR_SOB_OBJ_0 0x112000 +#define mmSYNC_MNGR_SOB_OBJ_1000 0x112FA0 +#define mmSYNC_MNGR_SOB_OBJ_1007 0x112FBC +#define mmSYNC_MNGR_SOB_OBJ_1023 0x112FFC +#define mmSYNC_MNGR_MON_STATUS_0 0x114000 +#define mmSYNC_MNGR_MON_STATUS_255 0x1143FC + +#define mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR 0x800040 + +#endif /* ASIC_REG_GOYA_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/ic_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/ic_pll_regs.h new file mode 100644 index 000000000..4ae7fed8b --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/ic_pll_regs.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_IC_PLL_REGS_H_ +#define ASIC_REG_IC_PLL_REGS_H_ + +/* + ***************************************** + * IC_PLL (Prototype: PLL) + ***************************************** + */ + +#define mmIC_PLL_NR 0x4A3100 + +#define mmIC_PLL_NF 0x4A3104 + +#define mmIC_PLL_OD 0x4A3108 + +#define mmIC_PLL_NB 0x4A310C + +#define mmIC_PLL_CFG 0x4A3110 + +#define mmIC_PLL_LOSE_MASK 0x4A3120 + +#define mmIC_PLL_LOCK_INTR 0x4A3128 + +#define mmIC_PLL_LOCK_BYPASS 0x4A312C + +#define mmIC_PLL_DATA_CHNG 0x4A3130 + +#define mmIC_PLL_RST 0x4A3134 + +#define mmIC_PLL_SLIP_WD_CNTR 0x4A3150 + +#define mmIC_PLL_DIV_FACTOR_0 0x4A3200 + +#define mmIC_PLL_DIV_FACTOR_1 0x4A3204 + +#define mmIC_PLL_DIV_FACTOR_2 0x4A3208 + +#define mmIC_PLL_DIV_FACTOR_3 0x4A320C + +#define mmIC_PLL_DIV_FACTOR_CMD_0 0x4A3220 + +#define mmIC_PLL_DIV_FACTOR_CMD_1 0x4A3224 + +#define mmIC_PLL_DIV_FACTOR_CMD_2 0x4A3228 + +#define mmIC_PLL_DIV_FACTOR_CMD_3 0x4A322C + +#define mmIC_PLL_DIV_SEL_0 0x4A3280 + +#define mmIC_PLL_DIV_SEL_1 0x4A3284 + +#define mmIC_PLL_DIV_SEL_2 0x4A3288 + +#define mmIC_PLL_DIV_SEL_3 0x4A328C + +#define mmIC_PLL_DIV_EN_0 0x4A32A0 + +#define mmIC_PLL_DIV_EN_1 0x4A32A4 + +#define mmIC_PLL_DIV_EN_2 0x4A32A8 + +#define mmIC_PLL_DIV_EN_3 0x4A32AC + +#define mmIC_PLL_DIV_FACTOR_BUSY_0 0x4A32C0 + +#define mmIC_PLL_DIV_FACTOR_BUSY_1 0x4A32C4 + +#define mmIC_PLL_DIV_FACTOR_BUSY_2 0x4A32C8 + +#define mmIC_PLL_DIV_FACTOR_BUSY_3 0x4A32CC + +#define mmIC_PLL_CLK_GATER 0x4A3300 + +#define mmIC_PLL_CLK_RLX_0 0x4A3310 + +#define mmIC_PLL_CLK_RLX_1 0x4A3314 + +#define mmIC_PLL_CLK_RLX_2 0x4A3318 + +#define mmIC_PLL_CLK_RLX_3 0x4A331C + +#define mmIC_PLL_REF_CNTR_PERIOD 0x4A3400 + +#define mmIC_PLL_REF_LOW_THRESHOLD 0x4A3410 + +#define mmIC_PLL_REF_HIGH_THRESHOLD 0x4A3420 + +#define mmIC_PLL_PLL_NOT_STABLE 0x4A3430 + +#define mmIC_PLL_FREQ_CALC_EN 0x4A3440 + +#endif /* ASIC_REG_IC_PLL_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mc_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mc_pll_regs.h new file mode 100644 index 000000000..6d35d8527 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/mc_pll_regs.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_MC_PLL_REGS_H_ +#define ASIC_REG_MC_PLL_REGS_H_ + +/* + ***************************************** + * MC_PLL (Prototype: PLL) + ***************************************** + */ + +#define mmMC_PLL_NR 0x4A1100 + +#define mmMC_PLL_NF 0x4A1104 + +#define mmMC_PLL_OD 0x4A1108 + +#define mmMC_PLL_NB 0x4A110C + +#define mmMC_PLL_CFG 0x4A1110 + +#define mmMC_PLL_LOSE_MASK 0x4A1120 + +#define mmMC_PLL_LOCK_INTR 0x4A1128 + +#define mmMC_PLL_LOCK_BYPASS 0x4A112C + +#define mmMC_PLL_DATA_CHNG 0x4A1130 + +#define mmMC_PLL_RST 0x4A1134 + +#define mmMC_PLL_SLIP_WD_CNTR 0x4A1150 + +#define mmMC_PLL_DIV_FACTOR_0 0x4A1200 + +#define mmMC_PLL_DIV_FACTOR_1 0x4A1204 + +#define mmMC_PLL_DIV_FACTOR_2 0x4A1208 + +#define mmMC_PLL_DIV_FACTOR_3 0x4A120C + +#define mmMC_PLL_DIV_FACTOR_CMD_0 0x4A1220 + +#define mmMC_PLL_DIV_FACTOR_CMD_1 0x4A1224 + +#define mmMC_PLL_DIV_FACTOR_CMD_2 0x4A1228 + +#define mmMC_PLL_DIV_FACTOR_CMD_3 0x4A122C + +#define mmMC_PLL_DIV_SEL_0 0x4A1280 + +#define mmMC_PLL_DIV_SEL_1 0x4A1284 + +#define mmMC_PLL_DIV_SEL_2 0x4A1288 + +#define mmMC_PLL_DIV_SEL_3 0x4A128C + +#define mmMC_PLL_DIV_EN_0 0x4A12A0 + +#define mmMC_PLL_DIV_EN_1 0x4A12A4 + +#define mmMC_PLL_DIV_EN_2 0x4A12A8 + +#define mmMC_PLL_DIV_EN_3 0x4A12AC + +#define mmMC_PLL_DIV_FACTOR_BUSY_0 0x4A12C0 + +#define mmMC_PLL_DIV_FACTOR_BUSY_1 0x4A12C4 + +#define mmMC_PLL_DIV_FACTOR_BUSY_2 0x4A12C8 + +#define mmMC_PLL_DIV_FACTOR_BUSY_3 0x4A12CC + +#define mmMC_PLL_CLK_GATER 0x4A1300 + +#define mmMC_PLL_CLK_RLX_0 0x4A1310 + +#define mmMC_PLL_CLK_RLX_1 0x4A1314 + +#define mmMC_PLL_CLK_RLX_2 0x4A1318 + +#define mmMC_PLL_CLK_RLX_3 0x4A131C + +#define mmMC_PLL_REF_CNTR_PERIOD 0x4A1400 + +#define mmMC_PLL_REF_LOW_THRESHOLD 0x4A1410 + +#define mmMC_PLL_REF_HIGH_THRESHOLD 0x4A1420 + +#define mmMC_PLL_PLL_NOT_STABLE 0x4A1430 + +#define mmMC_PLL_FREQ_CALC_EN 0x4A1440 + +#endif /* ASIC_REG_MC_PLL_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h new file mode 100644 index 000000000..6c23f8b96 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h @@ -0,0 +1,652 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_MME1_RTR_MASKS_H_ +#define ASIC_REG_MME1_RTR_MASKS_H_ + +/* + ***************************************** + * MME1_RTR (Prototype: MME_RTR) + ***************************************** + */ + +/* MME1_RTR_HBW_RD_RQ_E_ARB */ +#define MME1_RTR_HBW_RD_RQ_E_ARB_W_SHIFT 0 +#define MME1_RTR_HBW_RD_RQ_E_ARB_W_MASK 0x7 +#define MME1_RTR_HBW_RD_RQ_E_ARB_S_SHIFT 8 +#define MME1_RTR_HBW_RD_RQ_E_ARB_S_MASK 0x700 +#define MME1_RTR_HBW_RD_RQ_E_ARB_N_SHIFT 16 +#define MME1_RTR_HBW_RD_RQ_E_ARB_N_MASK 0x70000 +#define MME1_RTR_HBW_RD_RQ_E_ARB_L_SHIFT 24 +#define MME1_RTR_HBW_RD_RQ_E_ARB_L_MASK 0x7000000 + +/* MME1_RTR_HBW_RD_RQ_W_ARB */ +#define MME1_RTR_HBW_RD_RQ_W_ARB_E_SHIFT 0 +#define MME1_RTR_HBW_RD_RQ_W_ARB_E_MASK 0x7 +#define MME1_RTR_HBW_RD_RQ_W_ARB_S_SHIFT 8 +#define MME1_RTR_HBW_RD_RQ_W_ARB_S_MASK 0x700 +#define MME1_RTR_HBW_RD_RQ_W_ARB_N_SHIFT 16 +#define MME1_RTR_HBW_RD_RQ_W_ARB_N_MASK 0x70000 +#define MME1_RTR_HBW_RD_RQ_W_ARB_L_SHIFT 24 +#define MME1_RTR_HBW_RD_RQ_W_ARB_L_MASK 0x7000000 + +/* MME1_RTR_HBW_RD_RQ_N_ARB */ +#define MME1_RTR_HBW_RD_RQ_N_ARB_W_SHIFT 0 +#define MME1_RTR_HBW_RD_RQ_N_ARB_W_MASK 0x7 +#define MME1_RTR_HBW_RD_RQ_N_ARB_E_SHIFT 8 +#define MME1_RTR_HBW_RD_RQ_N_ARB_E_MASK 0x700 +#define MME1_RTR_HBW_RD_RQ_N_ARB_S_SHIFT 16 +#define MME1_RTR_HBW_RD_RQ_N_ARB_S_MASK 0x70000 +#define MME1_RTR_HBW_RD_RQ_N_ARB_L_SHIFT 24 +#define MME1_RTR_HBW_RD_RQ_N_ARB_L_MASK 0x7000000 + +/* MME1_RTR_HBW_RD_RQ_S_ARB */ +#define MME1_RTR_HBW_RD_RQ_S_ARB_W_SHIFT 0 +#define MME1_RTR_HBW_RD_RQ_S_ARB_W_MASK 0x7 +#define MME1_RTR_HBW_RD_RQ_S_ARB_E_SHIFT 8 +#define MME1_RTR_HBW_RD_RQ_S_ARB_E_MASK 0x700 +#define MME1_RTR_HBW_RD_RQ_S_ARB_N_SHIFT 16 +#define MME1_RTR_HBW_RD_RQ_S_ARB_N_MASK 0x70000 +#define MME1_RTR_HBW_RD_RQ_S_ARB_L_SHIFT 24 +#define MME1_RTR_HBW_RD_RQ_S_ARB_L_MASK 0x7000000 + +/* MME1_RTR_HBW_RD_RQ_L_ARB */ +#define MME1_RTR_HBW_RD_RQ_L_ARB_W_SHIFT 0 +#define MME1_RTR_HBW_RD_RQ_L_ARB_W_MASK 0x7 +#define MME1_RTR_HBW_RD_RQ_L_ARB_E_SHIFT 8 +#define MME1_RTR_HBW_RD_RQ_L_ARB_E_MASK 0x700 +#define MME1_RTR_HBW_RD_RQ_L_ARB_S_SHIFT 16 +#define MME1_RTR_HBW_RD_RQ_L_ARB_S_MASK 0x70000 +#define MME1_RTR_HBW_RD_RQ_L_ARB_N_SHIFT 24 +#define MME1_RTR_HBW_RD_RQ_L_ARB_N_MASK 0x7000000 + +/* MME1_RTR_HBW_E_ARB_MAX */ +#define MME1_RTR_HBW_E_ARB_MAX_CREDIT_SHIFT 0 +#define MME1_RTR_HBW_E_ARB_MAX_CREDIT_MASK 0x3F + +/* MME1_RTR_HBW_W_ARB_MAX */ +#define MME1_RTR_HBW_W_ARB_MAX_CREDIT_SHIFT 0 +#define MME1_RTR_HBW_W_ARB_MAX_CREDIT_MASK 0x3F + +/* MME1_RTR_HBW_N_ARB_MAX */ +#define MME1_RTR_HBW_N_ARB_MAX_CREDIT_SHIFT 0 +#define MME1_RTR_HBW_N_ARB_MAX_CREDIT_MASK 0x3F + +/* MME1_RTR_HBW_S_ARB_MAX */ +#define MME1_RTR_HBW_S_ARB_MAX_CREDIT_SHIFT 0 +#define MME1_RTR_HBW_S_ARB_MAX_CREDIT_MASK 0x3F + +/* MME1_RTR_HBW_L_ARB_MAX */ +#define MME1_RTR_HBW_L_ARB_MAX_CREDIT_SHIFT 0 +#define MME1_RTR_HBW_L_ARB_MAX_CREDIT_MASK 0x3F + +/* MME1_RTR_HBW_RD_RS_MAX_CREDIT */ +#define MME1_RTR_HBW_RD_RS_MAX_CREDIT_A_SHIFT 0 +#define MME1_RTR_HBW_RD_RS_MAX_CREDIT_A_MASK 0x3F +#define MME1_RTR_HBW_RD_RS_MAX_CREDIT_B_SHIFT 8 +#define MME1_RTR_HBW_RD_RS_MAX_CREDIT_B_MASK 0x3F00 + +/* MME1_RTR_HBW_WR_RQ_MAX_CREDIT */ +#define MME1_RTR_HBW_WR_RQ_MAX_CREDIT_VAL_SHIFT 0 +#define MME1_RTR_HBW_WR_RQ_MAX_CREDIT_VAL_MASK 0x3F + +/* MME1_RTR_HBW_RD_RQ_MAX_CREDIT */ +#define MME1_RTR_HBW_RD_RQ_MAX_CREDIT_A_SHIFT 0 +#define MME1_RTR_HBW_RD_RQ_MAX_CREDIT_A_MASK 0x3F +#define MME1_RTR_HBW_RD_RQ_MAX_CREDIT_B_SHIFT 8 +#define MME1_RTR_HBW_RD_RQ_MAX_CREDIT_B_MASK 0x3F00 +#define MME1_RTR_HBW_RD_RQ_MAX_CREDIT_IC_SHIFT 16 +#define MME1_RTR_HBW_RD_RQ_MAX_CREDIT_IC_MASK 0x3F0000 + +/* MME1_RTR_HBW_RD_RS_E_ARB */ +#define MME1_RTR_HBW_RD_RS_E_ARB_W_SHIFT 0 +#define MME1_RTR_HBW_RD_RS_E_ARB_W_MASK 0x7 +#define MME1_RTR_HBW_RD_RS_E_ARB_S_SHIFT 8 +#define MME1_RTR_HBW_RD_RS_E_ARB_S_MASK 0x700 +#define MME1_RTR_HBW_RD_RS_E_ARB_N_SHIFT 16 +#define MME1_RTR_HBW_RD_RS_E_ARB_N_MASK 0x70000 +#define MME1_RTR_HBW_RD_RS_E_ARB_L_SHIFT 24 +#define MME1_RTR_HBW_RD_RS_E_ARB_L_MASK 0x7000000 + +/* MME1_RTR_HBW_RD_RS_W_ARB */ +#define MME1_RTR_HBW_RD_RS_W_ARB_E_SHIFT 0 +#define MME1_RTR_HBW_RD_RS_W_ARB_E_MASK 0x7 +#define MME1_RTR_HBW_RD_RS_W_ARB_S_SHIFT 8 +#define MME1_RTR_HBW_RD_RS_W_ARB_S_MASK 0x700 +#define MME1_RTR_HBW_RD_RS_W_ARB_N_SHIFT 16 +#define MME1_RTR_HBW_RD_RS_W_ARB_N_MASK 0x70000 +#define MME1_RTR_HBW_RD_RS_W_ARB_L_SHIFT 24 +#define MME1_RTR_HBW_RD_RS_W_ARB_L_MASK 0x7000000 + +/* MME1_RTR_HBW_RD_RS_N_ARB */ +#define MME1_RTR_HBW_RD_RS_N_ARB_W_SHIFT 0 +#define MME1_RTR_HBW_RD_RS_N_ARB_W_MASK 0x7 +#define MME1_RTR_HBW_RD_RS_N_ARB_E_SHIFT 8 +#define MME1_RTR_HBW_RD_RS_N_ARB_E_MASK 0x700 +#define MME1_RTR_HBW_RD_RS_N_ARB_S_SHIFT 16 +#define MME1_RTR_HBW_RD_RS_N_ARB_S_MASK 0x70000 +#define MME1_RTR_HBW_RD_RS_N_ARB_L_SHIFT 24 +#define MME1_RTR_HBW_RD_RS_N_ARB_L_MASK 0x7000000 + +/* MME1_RTR_HBW_RD_RS_S_ARB */ +#define MME1_RTR_HBW_RD_RS_S_ARB_W_SHIFT 0 +#define MME1_RTR_HBW_RD_RS_S_ARB_W_MASK 0x7 +#define MME1_RTR_HBW_RD_RS_S_ARB_E_SHIFT 8 +#define MME1_RTR_HBW_RD_RS_S_ARB_E_MASK 0x700 +#define MME1_RTR_HBW_RD_RS_S_ARB_N_SHIFT 16 +#define MME1_RTR_HBW_RD_RS_S_ARB_N_MASK 0x70000 +#define MME1_RTR_HBW_RD_RS_S_ARB_L_SHIFT 24 +#define MME1_RTR_HBW_RD_RS_S_ARB_L_MASK 0x7000000 + +/* MME1_RTR_HBW_RD_RS_L_ARB */ +#define MME1_RTR_HBW_RD_RS_L_ARB_W_SHIFT 0 +#define MME1_RTR_HBW_RD_RS_L_ARB_W_MASK 0x7 +#define MME1_RTR_HBW_RD_RS_L_ARB_E_SHIFT 8 +#define MME1_RTR_HBW_RD_RS_L_ARB_E_MASK 0x700 +#define MME1_RTR_HBW_RD_RS_L_ARB_S_SHIFT 16 +#define MME1_RTR_HBW_RD_RS_L_ARB_S_MASK 0x70000 +#define MME1_RTR_HBW_RD_RS_L_ARB_N_SHIFT 24 +#define MME1_RTR_HBW_RD_RS_L_ARB_N_MASK 0x7000000 + +/* MME1_RTR_HBW_WR_RQ_E_ARB */ +#define MME1_RTR_HBW_WR_RQ_E_ARB_W_SHIFT 0 +#define MME1_RTR_HBW_WR_RQ_E_ARB_W_MASK 0x7 +#define MME1_RTR_HBW_WR_RQ_E_ARB_S_SHIFT 8 +#define MME1_RTR_HBW_WR_RQ_E_ARB_S_MASK 0x700 +#define MME1_RTR_HBW_WR_RQ_E_ARB_N_SHIFT 16 +#define MME1_RTR_HBW_WR_RQ_E_ARB_N_MASK 0x70000 +#define MME1_RTR_HBW_WR_RQ_E_ARB_L_SHIFT 24 +#define MME1_RTR_HBW_WR_RQ_E_ARB_L_MASK 0x7000000 + +/* MME1_RTR_HBW_WR_RQ_W_ARB */ +#define MME1_RTR_HBW_WR_RQ_W_ARB_E_SHIFT 0 +#define MME1_RTR_HBW_WR_RQ_W_ARB_E_MASK 0x7 +#define MME1_RTR_HBW_WR_RQ_W_ARB_S_SHIFT 8 +#define MME1_RTR_HBW_WR_RQ_W_ARB_S_MASK 0x700 +#define MME1_RTR_HBW_WR_RQ_W_ARB_N_SHIFT 16 +#define MME1_RTR_HBW_WR_RQ_W_ARB_N_MASK 0x70000 +#define MME1_RTR_HBW_WR_RQ_W_ARB_L_SHIFT 24 +#define MME1_RTR_HBW_WR_RQ_W_ARB_L_MASK 0x7000000 + +/* MME1_RTR_HBW_WR_RQ_N_ARB */ +#define MME1_RTR_HBW_WR_RQ_N_ARB_W_SHIFT 0 +#define MME1_RTR_HBW_WR_RQ_N_ARB_W_MASK 0x7 +#define MME1_RTR_HBW_WR_RQ_N_ARB_E_SHIFT 8 +#define MME1_RTR_HBW_WR_RQ_N_ARB_E_MASK 0x700 +#define MME1_RTR_HBW_WR_RQ_N_ARB_S_SHIFT 16 +#define MME1_RTR_HBW_WR_RQ_N_ARB_S_MASK 0x70000 +#define MME1_RTR_HBW_WR_RQ_N_ARB_L_SHIFT 24 +#define MME1_RTR_HBW_WR_RQ_N_ARB_L_MASK 0x7000000 + +/* MME1_RTR_HBW_WR_RQ_S_ARB */ +#define MME1_RTR_HBW_WR_RQ_S_ARB_W_SHIFT 0 +#define MME1_RTR_HBW_WR_RQ_S_ARB_W_MASK 0x7 +#define MME1_RTR_HBW_WR_RQ_S_ARB_E_SHIFT 8 +#define MME1_RTR_HBW_WR_RQ_S_ARB_E_MASK 0x700 +#define MME1_RTR_HBW_WR_RQ_S_ARB_N_SHIFT 16 +#define MME1_RTR_HBW_WR_RQ_S_ARB_N_MASK 0x70000 +#define MME1_RTR_HBW_WR_RQ_S_ARB_L_SHIFT 24 +#define MME1_RTR_HBW_WR_RQ_S_ARB_L_MASK 0x7000000 + +/* MME1_RTR_HBW_WR_RQ_L_ARB */ +#define MME1_RTR_HBW_WR_RQ_L_ARB_W_SHIFT 0 +#define MME1_RTR_HBW_WR_RQ_L_ARB_W_MASK 0x7 +#define MME1_RTR_HBW_WR_RQ_L_ARB_E_SHIFT 8 +#define MME1_RTR_HBW_WR_RQ_L_ARB_E_MASK 0x700 +#define MME1_RTR_HBW_WR_RQ_L_ARB_S_SHIFT 16 +#define MME1_RTR_HBW_WR_RQ_L_ARB_S_MASK 0x70000 +#define MME1_RTR_HBW_WR_RQ_L_ARB_N_SHIFT 24 +#define MME1_RTR_HBW_WR_RQ_L_ARB_N_MASK 0x7000000 + +/* MME1_RTR_HBW_WR_RS_E_ARB */ +#define MME1_RTR_HBW_WR_RS_E_ARB_W_SHIFT 0 +#define MME1_RTR_HBW_WR_RS_E_ARB_W_MASK 0x7 +#define MME1_RTR_HBW_WR_RS_E_ARB_S_SHIFT 8 +#define MME1_RTR_HBW_WR_RS_E_ARB_S_MASK 0x700 +#define MME1_RTR_HBW_WR_RS_E_ARB_N_SHIFT 16 +#define MME1_RTR_HBW_WR_RS_E_ARB_N_MASK 0x70000 +#define MME1_RTR_HBW_WR_RS_E_ARB_L_SHIFT 24 +#define MME1_RTR_HBW_WR_RS_E_ARB_L_MASK 0x7000000 + +/* MME1_RTR_HBW_WR_RS_W_ARB */ +#define MME1_RTR_HBW_WR_RS_W_ARB_E_SHIFT 0 +#define MME1_RTR_HBW_WR_RS_W_ARB_E_MASK 0x7 +#define MME1_RTR_HBW_WR_RS_W_ARB_S_SHIFT 8 +#define MME1_RTR_HBW_WR_RS_W_ARB_S_MASK 0x700 +#define MME1_RTR_HBW_WR_RS_W_ARB_N_SHIFT 16 +#define MME1_RTR_HBW_WR_RS_W_ARB_N_MASK 0x70000 +#define MME1_RTR_HBW_WR_RS_W_ARB_L_SHIFT 24 +#define MME1_RTR_HBW_WR_RS_W_ARB_L_MASK 0x7000000 + +/* MME1_RTR_HBW_WR_RS_N_ARB */ +#define MME1_RTR_HBW_WR_RS_N_ARB_W_SHIFT 0 +#define MME1_RTR_HBW_WR_RS_N_ARB_W_MASK 0x7 +#define MME1_RTR_HBW_WR_RS_N_ARB_E_SHIFT 8 +#define MME1_RTR_HBW_WR_RS_N_ARB_E_MASK 0x700 +#define MME1_RTR_HBW_WR_RS_N_ARB_S_SHIFT 16 +#define MME1_RTR_HBW_WR_RS_N_ARB_S_MASK 0x70000 +#define MME1_RTR_HBW_WR_RS_N_ARB_L_SHIFT 24 +#define MME1_RTR_HBW_WR_RS_N_ARB_L_MASK 0x7000000 + +/* MME1_RTR_HBW_WR_RS_S_ARB */ +#define MME1_RTR_HBW_WR_RS_S_ARB_W_SHIFT 0 +#define MME1_RTR_HBW_WR_RS_S_ARB_W_MASK 0x7 +#define MME1_RTR_HBW_WR_RS_S_ARB_E_SHIFT 8 +#define MME1_RTR_HBW_WR_RS_S_ARB_E_MASK 0x700 +#define MME1_RTR_HBW_WR_RS_S_ARB_N_SHIFT 16 +#define MME1_RTR_HBW_WR_RS_S_ARB_N_MASK 0x70000 +#define MME1_RTR_HBW_WR_RS_S_ARB_L_SHIFT 24 +#define MME1_RTR_HBW_WR_RS_S_ARB_L_MASK 0x7000000 + +/* MME1_RTR_HBW_WR_RS_L_ARB */ +#define MME1_RTR_HBW_WR_RS_L_ARB_W_SHIFT 0 +#define MME1_RTR_HBW_WR_RS_L_ARB_W_MASK 0x7 +#define MME1_RTR_HBW_WR_RS_L_ARB_E_SHIFT 8 +#define MME1_RTR_HBW_WR_RS_L_ARB_E_MASK 0x700 +#define MME1_RTR_HBW_WR_RS_L_ARB_S_SHIFT 16 +#define MME1_RTR_HBW_WR_RS_L_ARB_S_MASK 0x70000 +#define MME1_RTR_HBW_WR_RS_L_ARB_N_SHIFT 24 +#define MME1_RTR_HBW_WR_RS_L_ARB_N_MASK 0x7000000 + +/* MME1_RTR_LBW_RD_RQ_E_ARB */ +#define MME1_RTR_LBW_RD_RQ_E_ARB_W_SHIFT 0 +#define MME1_RTR_LBW_RD_RQ_E_ARB_W_MASK 0x7 +#define MME1_RTR_LBW_RD_RQ_E_ARB_S_SHIFT 8 +#define MME1_RTR_LBW_RD_RQ_E_ARB_S_MASK 0x700 +#define MME1_RTR_LBW_RD_RQ_E_ARB_N_SHIFT 16 +#define MME1_RTR_LBW_RD_RQ_E_ARB_N_MASK 0x70000 +#define MME1_RTR_LBW_RD_RQ_E_ARB_L_SHIFT 24 +#define MME1_RTR_LBW_RD_RQ_E_ARB_L_MASK 0x7000000 + +/* MME1_RTR_LBW_RD_RQ_W_ARB */ +#define MME1_RTR_LBW_RD_RQ_W_ARB_E_SHIFT 0 +#define MME1_RTR_LBW_RD_RQ_W_ARB_E_MASK 0x7 +#define MME1_RTR_LBW_RD_RQ_W_ARB_S_SHIFT 8 +#define MME1_RTR_LBW_RD_RQ_W_ARB_S_MASK 0x700 +#define MME1_RTR_LBW_RD_RQ_W_ARB_N_SHIFT 16 +#define MME1_RTR_LBW_RD_RQ_W_ARB_N_MASK 0x70000 +#define MME1_RTR_LBW_RD_RQ_W_ARB_L_SHIFT 24 +#define MME1_RTR_LBW_RD_RQ_W_ARB_L_MASK 0x7000000 + +/* MME1_RTR_LBW_RD_RQ_N_ARB */ +#define MME1_RTR_LBW_RD_RQ_N_ARB_W_SHIFT 0 +#define MME1_RTR_LBW_RD_RQ_N_ARB_W_MASK 0x7 +#define MME1_RTR_LBW_RD_RQ_N_ARB_E_SHIFT 8 +#define MME1_RTR_LBW_RD_RQ_N_ARB_E_MASK 0x700 +#define MME1_RTR_LBW_RD_RQ_N_ARB_S_SHIFT 16 +#define MME1_RTR_LBW_RD_RQ_N_ARB_S_MASK 0x70000 +#define MME1_RTR_LBW_RD_RQ_N_ARB_L_SHIFT 24 +#define MME1_RTR_LBW_RD_RQ_N_ARB_L_MASK 0x7000000 + +/* MME1_RTR_LBW_RD_RQ_S_ARB */ +#define MME1_RTR_LBW_RD_RQ_S_ARB_W_SHIFT 0 +#define MME1_RTR_LBW_RD_RQ_S_ARB_W_MASK 0x7 +#define MME1_RTR_LBW_RD_RQ_S_ARB_E_SHIFT 8 +#define MME1_RTR_LBW_RD_RQ_S_ARB_E_MASK 0x700 +#define MME1_RTR_LBW_RD_RQ_S_ARB_N_SHIFT 16 +#define MME1_RTR_LBW_RD_RQ_S_ARB_N_MASK 0x70000 +#define MME1_RTR_LBW_RD_RQ_S_ARB_L_SHIFT 24 +#define MME1_RTR_LBW_RD_RQ_S_ARB_L_MASK 0x7000000 + +/* MME1_RTR_LBW_RD_RQ_L_ARB */ +#define MME1_RTR_LBW_RD_RQ_L_ARB_W_SHIFT 0 +#define MME1_RTR_LBW_RD_RQ_L_ARB_W_MASK 0x7 +#define MME1_RTR_LBW_RD_RQ_L_ARB_E_SHIFT 8 +#define MME1_RTR_LBW_RD_RQ_L_ARB_E_MASK 0x700 +#define MME1_RTR_LBW_RD_RQ_L_ARB_S_SHIFT 16 +#define MME1_RTR_LBW_RD_RQ_L_ARB_S_MASK 0x70000 +#define MME1_RTR_LBW_RD_RQ_L_ARB_N_SHIFT 24 +#define MME1_RTR_LBW_RD_RQ_L_ARB_N_MASK 0x7000000 + +/* MME1_RTR_LBW_E_ARB_MAX */ +#define MME1_RTR_LBW_E_ARB_MAX_CREDIT_SHIFT 0 +#define MME1_RTR_LBW_E_ARB_MAX_CREDIT_MASK 0x3F + +/* MME1_RTR_LBW_W_ARB_MAX */ +#define MME1_RTR_LBW_W_ARB_MAX_CREDIT_SHIFT 0 +#define MME1_RTR_LBW_W_ARB_MAX_CREDIT_MASK 0x3F + +/* MME1_RTR_LBW_N_ARB_MAX */ +#define MME1_RTR_LBW_N_ARB_MAX_CREDIT_SHIFT 0 +#define MME1_RTR_LBW_N_ARB_MAX_CREDIT_MASK 0x3F + +/* MME1_RTR_LBW_S_ARB_MAX */ +#define MME1_RTR_LBW_S_ARB_MAX_CREDIT_SHIFT 0 +#define MME1_RTR_LBW_S_ARB_MAX_CREDIT_MASK 0x3F + +/* MME1_RTR_LBW_L_ARB_MAX */ +#define MME1_RTR_LBW_L_ARB_MAX_CREDIT_SHIFT 0 +#define MME1_RTR_LBW_L_ARB_MAX_CREDIT_MASK 0x3F + +/* MME1_RTR_LBW_SRAM_MAX_CREDIT */ +#define MME1_RTR_LBW_SRAM_MAX_CREDIT_MSTR_SHIFT 0 +#define MME1_RTR_LBW_SRAM_MAX_CREDIT_MSTR_MASK 0x3F +#define MME1_RTR_LBW_SRAM_MAX_CREDIT_SLV_SHIFT 8 +#define MME1_RTR_LBW_SRAM_MAX_CREDIT_SLV_MASK 0x3F00 + +/* MME1_RTR_LBW_RD_RS_E_ARB */ +#define MME1_RTR_LBW_RD_RS_E_ARB_W_SHIFT 0 +#define MME1_RTR_LBW_RD_RS_E_ARB_W_MASK 0x7 +#define MME1_RTR_LBW_RD_RS_E_ARB_S_SHIFT 8 +#define MME1_RTR_LBW_RD_RS_E_ARB_S_MASK 0x700 +#define MME1_RTR_LBW_RD_RS_E_ARB_N_SHIFT 16 +#define MME1_RTR_LBW_RD_RS_E_ARB_N_MASK 0x70000 +#define MME1_RTR_LBW_RD_RS_E_ARB_L_SHIFT 24 +#define MME1_RTR_LBW_RD_RS_E_ARB_L_MASK 0x7000000 + +/* MME1_RTR_LBW_RD_RS_W_ARB */ +#define MME1_RTR_LBW_RD_RS_W_ARB_E_SHIFT 0 +#define MME1_RTR_LBW_RD_RS_W_ARB_E_MASK 0x7 +#define MME1_RTR_LBW_RD_RS_W_ARB_S_SHIFT 8 +#define MME1_RTR_LBW_RD_RS_W_ARB_S_MASK 0x700 +#define MME1_RTR_LBW_RD_RS_W_ARB_N_SHIFT 16 +#define MME1_RTR_LBW_RD_RS_W_ARB_N_MASK 0x70000 +#define MME1_RTR_LBW_RD_RS_W_ARB_L_SHIFT 24 +#define MME1_RTR_LBW_RD_RS_W_ARB_L_MASK 0x7000000 + +/* MME1_RTR_LBW_RD_RS_N_ARB */ +#define MME1_RTR_LBW_RD_RS_N_ARB_W_SHIFT 0 +#define MME1_RTR_LBW_RD_RS_N_ARB_W_MASK 0x7 +#define MME1_RTR_LBW_RD_RS_N_ARB_E_SHIFT 8 +#define MME1_RTR_LBW_RD_RS_N_ARB_E_MASK 0x700 +#define MME1_RTR_LBW_RD_RS_N_ARB_S_SHIFT 16 +#define MME1_RTR_LBW_RD_RS_N_ARB_S_MASK 0x70000 +#define MME1_RTR_LBW_RD_RS_N_ARB_L_SHIFT 24 +#define MME1_RTR_LBW_RD_RS_N_ARB_L_MASK 0x7000000 + +/* MME1_RTR_LBW_RD_RS_S_ARB */ +#define MME1_RTR_LBW_RD_RS_S_ARB_W_SHIFT 0 +#define MME1_RTR_LBW_RD_RS_S_ARB_W_MASK 0x7 +#define MME1_RTR_LBW_RD_RS_S_ARB_E_SHIFT 8 +#define MME1_RTR_LBW_RD_RS_S_ARB_E_MASK 0x700 +#define MME1_RTR_LBW_RD_RS_S_ARB_N_SHIFT 16 +#define MME1_RTR_LBW_RD_RS_S_ARB_N_MASK 0x70000 +#define MME1_RTR_LBW_RD_RS_S_ARB_L_SHIFT 24 +#define MME1_RTR_LBW_RD_RS_S_ARB_L_MASK 0x7000000 + +/* MME1_RTR_LBW_RD_RS_L_ARB */ +#define MME1_RTR_LBW_RD_RS_L_ARB_W_SHIFT 0 +#define MME1_RTR_LBW_RD_RS_L_ARB_W_MASK 0x7 +#define MME1_RTR_LBW_RD_RS_L_ARB_E_SHIFT 8 +#define MME1_RTR_LBW_RD_RS_L_ARB_E_MASK 0x700 +#define MME1_RTR_LBW_RD_RS_L_ARB_S_SHIFT 16 +#define MME1_RTR_LBW_RD_RS_L_ARB_S_MASK 0x70000 +#define MME1_RTR_LBW_RD_RS_L_ARB_N_SHIFT 24 +#define MME1_RTR_LBW_RD_RS_L_ARB_N_MASK 0x7000000 + +/* MME1_RTR_LBW_WR_RQ_E_ARB */ +#define MME1_RTR_LBW_WR_RQ_E_ARB_W_SHIFT 0 +#define MME1_RTR_LBW_WR_RQ_E_ARB_W_MASK 0x7 +#define MME1_RTR_LBW_WR_RQ_E_ARB_S_SHIFT 8 +#define MME1_RTR_LBW_WR_RQ_E_ARB_S_MASK 0x700 +#define MME1_RTR_LBW_WR_RQ_E_ARB_N_SHIFT 16 +#define MME1_RTR_LBW_WR_RQ_E_ARB_N_MASK 0x70000 +#define MME1_RTR_LBW_WR_RQ_E_ARB_L_SHIFT 24 +#define MME1_RTR_LBW_WR_RQ_E_ARB_L_MASK 0x7000000 + +/* MME1_RTR_LBW_WR_RQ_W_ARB */ +#define MME1_RTR_LBW_WR_RQ_W_ARB_E_SHIFT 0 +#define MME1_RTR_LBW_WR_RQ_W_ARB_E_MASK 0x7 +#define MME1_RTR_LBW_WR_RQ_W_ARB_S_SHIFT 8 +#define MME1_RTR_LBW_WR_RQ_W_ARB_S_MASK 0x700 +#define MME1_RTR_LBW_WR_RQ_W_ARB_N_SHIFT 16 +#define MME1_RTR_LBW_WR_RQ_W_ARB_N_MASK 0x70000 +#define MME1_RTR_LBW_WR_RQ_W_ARB_L_SHIFT 24 +#define MME1_RTR_LBW_WR_RQ_W_ARB_L_MASK 0x7000000 + +/* MME1_RTR_LBW_WR_RQ_N_ARB */ +#define MME1_RTR_LBW_WR_RQ_N_ARB_W_SHIFT 0 +#define MME1_RTR_LBW_WR_RQ_N_ARB_W_MASK 0x7 +#define MME1_RTR_LBW_WR_RQ_N_ARB_E_SHIFT 8 +#define MME1_RTR_LBW_WR_RQ_N_ARB_E_MASK 0x700 +#define MME1_RTR_LBW_WR_RQ_N_ARB_S_SHIFT 16 +#define MME1_RTR_LBW_WR_RQ_N_ARB_S_MASK 0x70000 +#define MME1_RTR_LBW_WR_RQ_N_ARB_L_SHIFT 24 +#define MME1_RTR_LBW_WR_RQ_N_ARB_L_MASK 0x7000000 + +/* MME1_RTR_LBW_WR_RQ_S_ARB */ +#define MME1_RTR_LBW_WR_RQ_S_ARB_W_SHIFT 0 +#define MME1_RTR_LBW_WR_RQ_S_ARB_W_MASK 0x7 +#define MME1_RTR_LBW_WR_RQ_S_ARB_E_SHIFT 8 +#define MME1_RTR_LBW_WR_RQ_S_ARB_E_MASK 0x700 +#define MME1_RTR_LBW_WR_RQ_S_ARB_N_SHIFT 16 +#define MME1_RTR_LBW_WR_RQ_S_ARB_N_MASK 0x70000 +#define MME1_RTR_LBW_WR_RQ_S_ARB_L_SHIFT 24 +#define MME1_RTR_LBW_WR_RQ_S_ARB_L_MASK 0x7000000 + +/* MME1_RTR_LBW_WR_RQ_L_ARB */ +#define MME1_RTR_LBW_WR_RQ_L_ARB_W_SHIFT 0 +#define MME1_RTR_LBW_WR_RQ_L_ARB_W_MASK 0x7 +#define MME1_RTR_LBW_WR_RQ_L_ARB_E_SHIFT 8 +#define MME1_RTR_LBW_WR_RQ_L_ARB_E_MASK 0x700 +#define MME1_RTR_LBW_WR_RQ_L_ARB_S_SHIFT 16 +#define MME1_RTR_LBW_WR_RQ_L_ARB_S_MASK 0x70000 +#define MME1_RTR_LBW_WR_RQ_L_ARB_N_SHIFT 24 +#define MME1_RTR_LBW_WR_RQ_L_ARB_N_MASK 0x7000000 + +/* MME1_RTR_LBW_WR_RS_E_ARB */ +#define MME1_RTR_LBW_WR_RS_E_ARB_W_SHIFT 0 +#define MME1_RTR_LBW_WR_RS_E_ARB_W_MASK 0x7 +#define MME1_RTR_LBW_WR_RS_E_ARB_S_SHIFT 8 +#define MME1_RTR_LBW_WR_RS_E_ARB_S_MASK 0x700 +#define MME1_RTR_LBW_WR_RS_E_ARB_N_SHIFT 16 +#define MME1_RTR_LBW_WR_RS_E_ARB_N_MASK 0x70000 +#define MME1_RTR_LBW_WR_RS_E_ARB_L_SHIFT 24 +#define MME1_RTR_LBW_WR_RS_E_ARB_L_MASK 0x7000000 + +/* MME1_RTR_LBW_WR_RS_W_ARB */ +#define MME1_RTR_LBW_WR_RS_W_ARB_E_SHIFT 0 +#define MME1_RTR_LBW_WR_RS_W_ARB_E_MASK 0x7 +#define MME1_RTR_LBW_WR_RS_W_ARB_S_SHIFT 8 +#define MME1_RTR_LBW_WR_RS_W_ARB_S_MASK 0x700 +#define MME1_RTR_LBW_WR_RS_W_ARB_N_SHIFT 16 +#define MME1_RTR_LBW_WR_RS_W_ARB_N_MASK 0x70000 +#define MME1_RTR_LBW_WR_RS_W_ARB_L_SHIFT 24 +#define MME1_RTR_LBW_WR_RS_W_ARB_L_MASK 0x7000000 + +/* MME1_RTR_LBW_WR_RS_N_ARB */ +#define MME1_RTR_LBW_WR_RS_N_ARB_W_SHIFT 0 +#define MME1_RTR_LBW_WR_RS_N_ARB_W_MASK 0x7 +#define MME1_RTR_LBW_WR_RS_N_ARB_E_SHIFT 8 +#define MME1_RTR_LBW_WR_RS_N_ARB_E_MASK 0x700 +#define MME1_RTR_LBW_WR_RS_N_ARB_S_SHIFT 16 +#define MME1_RTR_LBW_WR_RS_N_ARB_S_MASK 0x70000 +#define MME1_RTR_LBW_WR_RS_N_ARB_L_SHIFT 24 +#define MME1_RTR_LBW_WR_RS_N_ARB_L_MASK 0x7000000 + +/* MME1_RTR_LBW_WR_RS_S_ARB */ +#define MME1_RTR_LBW_WR_RS_S_ARB_W_SHIFT 0 +#define MME1_RTR_LBW_WR_RS_S_ARB_W_MASK 0x7 +#define MME1_RTR_LBW_WR_RS_S_ARB_E_SHIFT 8 +#define MME1_RTR_LBW_WR_RS_S_ARB_E_MASK 0x700 +#define MME1_RTR_LBW_WR_RS_S_ARB_N_SHIFT 16 +#define MME1_RTR_LBW_WR_RS_S_ARB_N_MASK 0x70000 +#define MME1_RTR_LBW_WR_RS_S_ARB_L_SHIFT 24 +#define MME1_RTR_LBW_WR_RS_S_ARB_L_MASK 0x7000000 + +/* MME1_RTR_LBW_WR_RS_L_ARB */ +#define MME1_RTR_LBW_WR_RS_L_ARB_W_SHIFT 0 +#define MME1_RTR_LBW_WR_RS_L_ARB_W_MASK 0x7 +#define MME1_RTR_LBW_WR_RS_L_ARB_E_SHIFT 8 +#define MME1_RTR_LBW_WR_RS_L_ARB_E_MASK 0x700 +#define MME1_RTR_LBW_WR_RS_L_ARB_S_SHIFT 16 +#define MME1_RTR_LBW_WR_RS_L_ARB_S_MASK 0x70000 +#define MME1_RTR_LBW_WR_RS_L_ARB_N_SHIFT 24 +#define MME1_RTR_LBW_WR_RS_L_ARB_N_MASK 0x7000000 + +/* MME1_RTR_DBG_E_ARB */ +#define MME1_RTR_DBG_E_ARB_W_SHIFT 0 +#define MME1_RTR_DBG_E_ARB_W_MASK 0x7 +#define MME1_RTR_DBG_E_ARB_S_SHIFT 8 +#define MME1_RTR_DBG_E_ARB_S_MASK 0x700 +#define MME1_RTR_DBG_E_ARB_N_SHIFT 16 +#define MME1_RTR_DBG_E_ARB_N_MASK 0x70000 +#define MME1_RTR_DBG_E_ARB_L_SHIFT 24 +#define MME1_RTR_DBG_E_ARB_L_MASK 0x7000000 + +/* MME1_RTR_DBG_W_ARB */ +#define MME1_RTR_DBG_W_ARB_E_SHIFT 0 +#define MME1_RTR_DBG_W_ARB_E_MASK 0x7 +#define MME1_RTR_DBG_W_ARB_S_SHIFT 8 +#define MME1_RTR_DBG_W_ARB_S_MASK 0x700 +#define MME1_RTR_DBG_W_ARB_N_SHIFT 16 +#define MME1_RTR_DBG_W_ARB_N_MASK 0x70000 +#define MME1_RTR_DBG_W_ARB_L_SHIFT 24 +#define MME1_RTR_DBG_W_ARB_L_MASK 0x7000000 + +/* MME1_RTR_DBG_N_ARB */ +#define MME1_RTR_DBG_N_ARB_W_SHIFT 0 +#define MME1_RTR_DBG_N_ARB_W_MASK 0x7 +#define MME1_RTR_DBG_N_ARB_E_SHIFT 8 +#define MME1_RTR_DBG_N_ARB_E_MASK 0x700 +#define MME1_RTR_DBG_N_ARB_S_SHIFT 16 +#define MME1_RTR_DBG_N_ARB_S_MASK 0x70000 +#define MME1_RTR_DBG_N_ARB_L_SHIFT 24 +#define MME1_RTR_DBG_N_ARB_L_MASK 0x7000000 + +/* MME1_RTR_DBG_S_ARB */ +#define MME1_RTR_DBG_S_ARB_W_SHIFT 0 +#define MME1_RTR_DBG_S_ARB_W_MASK 0x7 +#define MME1_RTR_DBG_S_ARB_E_SHIFT 8 +#define MME1_RTR_DBG_S_ARB_E_MASK 0x700 +#define MME1_RTR_DBG_S_ARB_N_SHIFT 16 +#define MME1_RTR_DBG_S_ARB_N_MASK 0x70000 +#define MME1_RTR_DBG_S_ARB_L_SHIFT 24 +#define MME1_RTR_DBG_S_ARB_L_MASK 0x7000000 + +/* MME1_RTR_DBG_L_ARB */ +#define MME1_RTR_DBG_L_ARB_W_SHIFT 0 +#define MME1_RTR_DBG_L_ARB_W_MASK 0x7 +#define MME1_RTR_DBG_L_ARB_E_SHIFT 8 +#define MME1_RTR_DBG_L_ARB_E_MASK 0x700 +#define MME1_RTR_DBG_L_ARB_S_SHIFT 16 +#define MME1_RTR_DBG_L_ARB_S_MASK 0x70000 +#define MME1_RTR_DBG_L_ARB_N_SHIFT 24 +#define MME1_RTR_DBG_L_ARB_N_MASK 0x7000000 + +/* MME1_RTR_DBG_E_ARB_MAX */ +#define MME1_RTR_DBG_E_ARB_MAX_CREDIT_SHIFT 0 +#define MME1_RTR_DBG_E_ARB_MAX_CREDIT_MASK 0x3F + +/* MME1_RTR_DBG_W_ARB_MAX */ +#define MME1_RTR_DBG_W_ARB_MAX_CREDIT_SHIFT 0 +#define MME1_RTR_DBG_W_ARB_MAX_CREDIT_MASK 0x3F + +/* MME1_RTR_DBG_N_ARB_MAX */ +#define MME1_RTR_DBG_N_ARB_MAX_CREDIT_SHIFT 0 +#define MME1_RTR_DBG_N_ARB_MAX_CREDIT_MASK 0x3F + +/* MME1_RTR_DBG_S_ARB_MAX */ +#define MME1_RTR_DBG_S_ARB_MAX_CREDIT_SHIFT 0 +#define MME1_RTR_DBG_S_ARB_MAX_CREDIT_MASK 0x3F + +/* MME1_RTR_DBG_L_ARB_MAX */ +#define MME1_RTR_DBG_L_ARB_MAX_CREDIT_SHIFT 0 +#define MME1_RTR_DBG_L_ARB_MAX_CREDIT_MASK 0x3F + +/* MME1_RTR_SPLIT_COEF */ +#define MME1_RTR_SPLIT_COEF_VAL_SHIFT 0 +#define MME1_RTR_SPLIT_COEF_VAL_MASK 0xFFFF + +/* MME1_RTR_SPLIT_CFG */ +#define MME1_RTR_SPLIT_CFG_FORCE_WAK_ORDER_SHIFT 0 +#define MME1_RTR_SPLIT_CFG_FORCE_WAK_ORDER_MASK 0x1 +#define MME1_RTR_SPLIT_CFG_FORCE_STRONG_ORDER_SHIFT 1 +#define MME1_RTR_SPLIT_CFG_FORCE_STRONG_ORDER_MASK 0x2 +#define MME1_RTR_SPLIT_CFG_DEFAULT_MESH_SHIFT 2 +#define MME1_RTR_SPLIT_CFG_DEFAULT_MESH_MASK 0xC +#define MME1_RTR_SPLIT_CFG_WR_RATE_LIM_EN_SHIFT 4 +#define MME1_RTR_SPLIT_CFG_WR_RATE_LIM_EN_MASK 0x10 +#define MME1_RTR_SPLIT_CFG_RD_RATE_LIM_EN_SHIFT 5 +#define MME1_RTR_SPLIT_CFG_RD_RATE_LIM_EN_MASK 0x20 +#define MME1_RTR_SPLIT_CFG_B2B_OPT_SHIFT 6 +#define MME1_RTR_SPLIT_CFG_B2B_OPT_MASK 0x1C0 + +/* MME1_RTR_SPLIT_RD_SAT */ +#define MME1_RTR_SPLIT_RD_SAT_VAL_SHIFT 0 +#define MME1_RTR_SPLIT_RD_SAT_VAL_MASK 0xFFFF + +/* MME1_RTR_SPLIT_RD_RST_TOKEN */ +#define MME1_RTR_SPLIT_RD_RST_TOKEN_VAL_SHIFT 0 +#define MME1_RTR_SPLIT_RD_RST_TOKEN_VAL_MASK 0xFFFF + +/* MME1_RTR_SPLIT_RD_TIMEOUT */ +#define MME1_RTR_SPLIT_RD_TIMEOUT_VAL_SHIFT 0 +#define MME1_RTR_SPLIT_RD_TIMEOUT_VAL_MASK 0xFFFFFFFF + +/* MME1_RTR_SPLIT_WR_SAT */ +#define MME1_RTR_SPLIT_WR_SAT_VAL_SHIFT 0 +#define MME1_RTR_SPLIT_WR_SAT_VAL_MASK 0xFFFF + +/* MME1_RTR_WPLIT_WR_TST_TOLEN */ +#define MME1_RTR_WPLIT_WR_TST_TOLEN_VAL_SHIFT 0 +#define MME1_RTR_WPLIT_WR_TST_TOLEN_VAL_MASK 0xFFFF + +/* MME1_RTR_SPLIT_WR_TIMEOUT */ +#define MME1_RTR_SPLIT_WR_TIMEOUT_VAL_SHIFT 0 +#define MME1_RTR_SPLIT_WR_TIMEOUT_VAL_MASK 0xFFFFFFFF + +/* MME1_RTR_HBW_RANGE_HIT */ +#define MME1_RTR_HBW_RANGE_HIT_IND_SHIFT 0 +#define MME1_RTR_HBW_RANGE_HIT_IND_MASK 0xFF + +/* MME1_RTR_HBW_RANGE_MASK_L */ +#define MME1_RTR_HBW_RANGE_MASK_L_VAL_SHIFT 0 +#define MME1_RTR_HBW_RANGE_MASK_L_VAL_MASK 0xFFFFFFFF + +/* MME1_RTR_HBW_RANGE_MASK_H */ +#define MME1_RTR_HBW_RANGE_MASK_H_VAL_SHIFT 0 +#define MME1_RTR_HBW_RANGE_MASK_H_VAL_MASK 0x3FFFF + +/* MME1_RTR_HBW_RANGE_BASE_L */ +#define MME1_RTR_HBW_RANGE_BASE_L_VAL_SHIFT 0 +#define MME1_RTR_HBW_RANGE_BASE_L_VAL_MASK 0xFFFFFFFF + +/* MME1_RTR_HBW_RANGE_BASE_H */ +#define MME1_RTR_HBW_RANGE_BASE_H_VAL_SHIFT 0 +#define MME1_RTR_HBW_RANGE_BASE_H_VAL_MASK 0x3FFFF + +/* MME1_RTR_LBW_RANGE_HIT */ +#define MME1_RTR_LBW_RANGE_HIT_IND_SHIFT 0 +#define MME1_RTR_LBW_RANGE_HIT_IND_MASK 0xFFFF + +/* MME1_RTR_LBW_RANGE_MASK */ +#define MME1_RTR_LBW_RANGE_MASK_VAL_SHIFT 0 +#define MME1_RTR_LBW_RANGE_MASK_VAL_MASK 0x3FFFFFF + +/* MME1_RTR_LBW_RANGE_BASE */ +#define MME1_RTR_LBW_RANGE_BASE_VAL_SHIFT 0 +#define MME1_RTR_LBW_RANGE_BASE_VAL_MASK 0x3FFFFFF + +/* MME1_RTR_RGLTR */ +#define MME1_RTR_RGLTR_WR_EN_SHIFT 0 +#define MME1_RTR_RGLTR_WR_EN_MASK 0x1 +#define MME1_RTR_RGLTR_RD_EN_SHIFT 4 +#define MME1_RTR_RGLTR_RD_EN_MASK 0x10 + +/* MME1_RTR_RGLTR_WR_RESULT */ +#define MME1_RTR_RGLTR_WR_RESULT_VAL_SHIFT 0 +#define MME1_RTR_RGLTR_WR_RESULT_VAL_MASK 0xFF + +/* MME1_RTR_RGLTR_RD_RESULT */ +#define MME1_RTR_RGLTR_RD_RESULT_VAL_SHIFT 0 +#define MME1_RTR_RGLTR_RD_RESULT_VAL_MASK 0xFF + +/* MME1_RTR_SCRAMB_EN */ +#define MME1_RTR_SCRAMB_EN_VAL_SHIFT 0 +#define MME1_RTR_SCRAMB_EN_VAL_MASK 0x1 + +/* MME1_RTR_NON_LIN_SCRAMB */ +#define MME1_RTR_NON_LIN_SCRAMB_EN_SHIFT 0 +#define MME1_RTR_NON_LIN_SCRAMB_EN_MASK 0x1 + +#endif /* ASIC_REG_MME1_RTR_MASKS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h new file mode 100644 index 000000000..122e9d529 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h @@ -0,0 +1,330 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_MME1_RTR_REGS_H_ +#define ASIC_REG_MME1_RTR_REGS_H_ + +/* + ***************************************** + * MME1_RTR (Prototype: MME_RTR) + ***************************************** + */ + +#define mmMME1_RTR_HBW_RD_RQ_E_ARB 0x40100 + +#define mmMME1_RTR_HBW_RD_RQ_W_ARB 0x40104 + +#define mmMME1_RTR_HBW_RD_RQ_N_ARB 0x40108 + +#define mmMME1_RTR_HBW_RD_RQ_S_ARB 0x4010C + +#define mmMME1_RTR_HBW_RD_RQ_L_ARB 0x40110 + +#define mmMME1_RTR_HBW_E_ARB_MAX 0x40120 + +#define mmMME1_RTR_HBW_W_ARB_MAX 0x40124 + +#define mmMME1_RTR_HBW_N_ARB_MAX 0x40128 + +#define mmMME1_RTR_HBW_S_ARB_MAX 0x4012C + +#define mmMME1_RTR_HBW_L_ARB_MAX 0x40130 + +#define mmMME1_RTR_HBW_RD_RS_MAX_CREDIT 0x40140 + +#define mmMME1_RTR_HBW_WR_RQ_MAX_CREDIT 0x40144 + +#define mmMME1_RTR_HBW_RD_RQ_MAX_CREDIT 0x40148 + +#define mmMME1_RTR_HBW_RD_RS_E_ARB 0x40150 + +#define mmMME1_RTR_HBW_RD_RS_W_ARB 0x40154 + +#define mmMME1_RTR_HBW_RD_RS_N_ARB 0x40158 + +#define mmMME1_RTR_HBW_RD_RS_S_ARB 0x4015C + +#define mmMME1_RTR_HBW_RD_RS_L_ARB 0x40160 + +#define mmMME1_RTR_HBW_WR_RQ_E_ARB 0x40170 + +#define mmMME1_RTR_HBW_WR_RQ_W_ARB 0x40174 + +#define mmMME1_RTR_HBW_WR_RQ_N_ARB 0x40178 + +#define mmMME1_RTR_HBW_WR_RQ_S_ARB 0x4017C + +#define mmMME1_RTR_HBW_WR_RQ_L_ARB 0x40180 + +#define mmMME1_RTR_HBW_WR_RS_E_ARB 0x40190 + +#define mmMME1_RTR_HBW_WR_RS_W_ARB 0x40194 + +#define mmMME1_RTR_HBW_WR_RS_N_ARB 0x40198 + +#define mmMME1_RTR_HBW_WR_RS_S_ARB 0x4019C + +#define mmMME1_RTR_HBW_WR_RS_L_ARB 0x401A0 + +#define mmMME1_RTR_LBW_RD_RQ_E_ARB 0x40200 + +#define mmMME1_RTR_LBW_RD_RQ_W_ARB 0x40204 + +#define mmMME1_RTR_LBW_RD_RQ_N_ARB 0x40208 + +#define mmMME1_RTR_LBW_RD_RQ_S_ARB 0x4020C + +#define mmMME1_RTR_LBW_RD_RQ_L_ARB 0x40210 + +#define mmMME1_RTR_LBW_E_ARB_MAX 0x40220 + +#define mmMME1_RTR_LBW_W_ARB_MAX 0x40224 + +#define mmMME1_RTR_LBW_N_ARB_MAX 0x40228 + +#define mmMME1_RTR_LBW_S_ARB_MAX 0x4022C + +#define mmMME1_RTR_LBW_L_ARB_MAX 0x40230 + +#define mmMME1_RTR_LBW_SRAM_MAX_CREDIT 0x40240 + +#define mmMME1_RTR_LBW_RD_RS_E_ARB 0x40250 + +#define mmMME1_RTR_LBW_RD_RS_W_ARB 0x40254 + +#define mmMME1_RTR_LBW_RD_RS_N_ARB 0x40258 + +#define mmMME1_RTR_LBW_RD_RS_S_ARB 0x4025C + +#define mmMME1_RTR_LBW_RD_RS_L_ARB 0x40260 + +#define mmMME1_RTR_LBW_WR_RQ_E_ARB 0x40270 + +#define mmMME1_RTR_LBW_WR_RQ_W_ARB 0x40274 + +#define mmMME1_RTR_LBW_WR_RQ_N_ARB 0x40278 + +#define mmMME1_RTR_LBW_WR_RQ_S_ARB 0x4027C + +#define mmMME1_RTR_LBW_WR_RQ_L_ARB 0x40280 + +#define mmMME1_RTR_LBW_WR_RS_E_ARB 0x40290 + +#define mmMME1_RTR_LBW_WR_RS_W_ARB 0x40294 + +#define mmMME1_RTR_LBW_WR_RS_N_ARB 0x40298 + +#define mmMME1_RTR_LBW_WR_RS_S_ARB 0x4029C + +#define mmMME1_RTR_LBW_WR_RS_L_ARB 0x402A0 + +#define mmMME1_RTR_DBG_E_ARB 0x40300 + +#define mmMME1_RTR_DBG_W_ARB 0x40304 + +#define mmMME1_RTR_DBG_N_ARB 0x40308 + +#define mmMME1_RTR_DBG_S_ARB 0x4030C + +#define mmMME1_RTR_DBG_L_ARB 0x40310 + +#define mmMME1_RTR_DBG_E_ARB_MAX 0x40320 + +#define mmMME1_RTR_DBG_W_ARB_MAX 0x40324 + +#define mmMME1_RTR_DBG_N_ARB_MAX 0x40328 + +#define mmMME1_RTR_DBG_S_ARB_MAX 0x4032C + +#define mmMME1_RTR_DBG_L_ARB_MAX 0x40330 + +#define mmMME1_RTR_SPLIT_COEF_0 0x40400 + +#define mmMME1_RTR_SPLIT_COEF_1 0x40404 + +#define mmMME1_RTR_SPLIT_COEF_2 0x40408 + +#define mmMME1_RTR_SPLIT_COEF_3 0x4040C + +#define mmMME1_RTR_SPLIT_COEF_4 0x40410 + +#define mmMME1_RTR_SPLIT_COEF_5 0x40414 + +#define mmMME1_RTR_SPLIT_COEF_6 0x40418 + +#define mmMME1_RTR_SPLIT_COEF_7 0x4041C + +#define mmMME1_RTR_SPLIT_COEF_8 0x40420 + +#define mmMME1_RTR_SPLIT_COEF_9 0x40424 + +#define mmMME1_RTR_SPLIT_CFG 0x40440 + +#define mmMME1_RTR_SPLIT_RD_SAT 0x40444 + +#define mmMME1_RTR_SPLIT_RD_RST_TOKEN 0x40448 + +#define mmMME1_RTR_SPLIT_RD_TIMEOUT_0 0x4044C + +#define mmMME1_RTR_SPLIT_RD_TIMEOUT_1 0x40450 + +#define mmMME1_RTR_SPLIT_WR_SAT 0x40454 + +#define mmMME1_RTR_WPLIT_WR_TST_TOLEN 0x40458 + +#define mmMME1_RTR_SPLIT_WR_TIMEOUT_0 0x4045C + +#define mmMME1_RTR_SPLIT_WR_TIMEOUT_1 0x40460 + +#define mmMME1_RTR_HBW_RANGE_HIT 0x40470 + +#define mmMME1_RTR_HBW_RANGE_MASK_L_0 0x40480 + +#define mmMME1_RTR_HBW_RANGE_MASK_L_1 0x40484 + +#define mmMME1_RTR_HBW_RANGE_MASK_L_2 0x40488 + +#define mmMME1_RTR_HBW_RANGE_MASK_L_3 0x4048C + +#define mmMME1_RTR_HBW_RANGE_MASK_L_4 0x40490 + +#define mmMME1_RTR_HBW_RANGE_MASK_L_5 0x40494 + +#define mmMME1_RTR_HBW_RANGE_MASK_L_6 0x40498 + +#define mmMME1_RTR_HBW_RANGE_MASK_L_7 0x4049C + +#define mmMME1_RTR_HBW_RANGE_MASK_H_0 0x404A0 + +#define mmMME1_RTR_HBW_RANGE_MASK_H_1 0x404A4 + +#define mmMME1_RTR_HBW_RANGE_MASK_H_2 0x404A8 + +#define mmMME1_RTR_HBW_RANGE_MASK_H_3 0x404AC + +#define mmMME1_RTR_HBW_RANGE_MASK_H_4 0x404B0 + +#define mmMME1_RTR_HBW_RANGE_MASK_H_5 0x404B4 + +#define mmMME1_RTR_HBW_RANGE_MASK_H_6 0x404B8 + +#define mmMME1_RTR_HBW_RANGE_MASK_H_7 0x404BC + +#define mmMME1_RTR_HBW_RANGE_BASE_L_0 0x404C0 + +#define mmMME1_RTR_HBW_RANGE_BASE_L_1 0x404C4 + +#define mmMME1_RTR_HBW_RANGE_BASE_L_2 0x404C8 + +#define mmMME1_RTR_HBW_RANGE_BASE_L_3 0x404CC + +#define mmMME1_RTR_HBW_RANGE_BASE_L_4 0x404D0 + +#define mmMME1_RTR_HBW_RANGE_BASE_L_5 0x404D4 + +#define mmMME1_RTR_HBW_RANGE_BASE_L_6 0x404D8 + +#define mmMME1_RTR_HBW_RANGE_BASE_L_7 0x404DC + +#define mmMME1_RTR_HBW_RANGE_BASE_H_0 0x404E0 + +#define mmMME1_RTR_HBW_RANGE_BASE_H_1 0x404E4 + +#define mmMME1_RTR_HBW_RANGE_BASE_H_2 0x404E8 + +#define mmMME1_RTR_HBW_RANGE_BASE_H_3 0x404EC + +#define mmMME1_RTR_HBW_RANGE_BASE_H_4 0x404F0 + +#define mmMME1_RTR_HBW_RANGE_BASE_H_5 0x404F4 + +#define mmMME1_RTR_HBW_RANGE_BASE_H_6 0x404F8 + +#define mmMME1_RTR_HBW_RANGE_BASE_H_7 0x404FC + +#define mmMME1_RTR_LBW_RANGE_HIT 0x40500 + +#define mmMME1_RTR_LBW_RANGE_MASK_0 0x40510 + +#define mmMME1_RTR_LBW_RANGE_MASK_1 0x40514 + +#define mmMME1_RTR_LBW_RANGE_MASK_2 0x40518 + +#define mmMME1_RTR_LBW_RANGE_MASK_3 0x4051C + +#define mmMME1_RTR_LBW_RANGE_MASK_4 0x40520 + +#define mmMME1_RTR_LBW_RANGE_MASK_5 0x40524 + +#define mmMME1_RTR_LBW_RANGE_MASK_6 0x40528 + +#define mmMME1_RTR_LBW_RANGE_MASK_7 0x4052C + +#define mmMME1_RTR_LBW_RANGE_MASK_8 0x40530 + +#define mmMME1_RTR_LBW_RANGE_MASK_9 0x40534 + +#define mmMME1_RTR_LBW_RANGE_MASK_10 0x40538 + +#define mmMME1_RTR_LBW_RANGE_MASK_11 0x4053C + +#define mmMME1_RTR_LBW_RANGE_MASK_12 0x40540 + +#define mmMME1_RTR_LBW_RANGE_MASK_13 0x40544 + +#define mmMME1_RTR_LBW_RANGE_MASK_14 0x40548 + +#define mmMME1_RTR_LBW_RANGE_MASK_15 0x4054C + +#define mmMME1_RTR_LBW_RANGE_BASE_0 0x40550 + +#define mmMME1_RTR_LBW_RANGE_BASE_1 0x40554 + +#define mmMME1_RTR_LBW_RANGE_BASE_2 0x40558 + +#define mmMME1_RTR_LBW_RANGE_BASE_3 0x4055C + +#define mmMME1_RTR_LBW_RANGE_BASE_4 0x40560 + +#define mmMME1_RTR_LBW_RANGE_BASE_5 0x40564 + +#define mmMME1_RTR_LBW_RANGE_BASE_6 0x40568 + +#define mmMME1_RTR_LBW_RANGE_BASE_7 0x4056C + +#define mmMME1_RTR_LBW_RANGE_BASE_8 0x40570 + +#define mmMME1_RTR_LBW_RANGE_BASE_9 0x40574 + +#define mmMME1_RTR_LBW_RANGE_BASE_10 0x40578 + +#define mmMME1_RTR_LBW_RANGE_BASE_11 0x4057C + +#define mmMME1_RTR_LBW_RANGE_BASE_12 0x40580 + +#define mmMME1_RTR_LBW_RANGE_BASE_13 0x40584 + +#define mmMME1_RTR_LBW_RANGE_BASE_14 0x40588 + +#define mmMME1_RTR_LBW_RANGE_BASE_15 0x4058C + +#define mmMME1_RTR_RGLTR 0x40590 + +#define mmMME1_RTR_RGLTR_WR_RESULT 0x40594 + +#define mmMME1_RTR_RGLTR_RD_RESULT 0x40598 + +#define mmMME1_RTR_SCRAMB_EN 0x40600 + +#define mmMME1_RTR_NON_LIN_SCRAMB 0x40604 + +#endif /* ASIC_REG_MME1_RTR_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h new file mode 100644 index 000000000..00ce2252b --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h @@ -0,0 +1,330 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_MME2_RTR_REGS_H_ +#define ASIC_REG_MME2_RTR_REGS_H_ + +/* + ***************************************** + * MME2_RTR (Prototype: MME_RTR) + ***************************************** + */ + +#define mmMME2_RTR_HBW_RD_RQ_E_ARB 0x80100 + +#define mmMME2_RTR_HBW_RD_RQ_W_ARB 0x80104 + +#define mmMME2_RTR_HBW_RD_RQ_N_ARB 0x80108 + +#define mmMME2_RTR_HBW_RD_RQ_S_ARB 0x8010C + +#define mmMME2_RTR_HBW_RD_RQ_L_ARB 0x80110 + +#define mmMME2_RTR_HBW_E_ARB_MAX 0x80120 + +#define mmMME2_RTR_HBW_W_ARB_MAX 0x80124 + +#define mmMME2_RTR_HBW_N_ARB_MAX 0x80128 + +#define mmMME2_RTR_HBW_S_ARB_MAX 0x8012C + +#define mmMME2_RTR_HBW_L_ARB_MAX 0x80130 + +#define mmMME2_RTR_HBW_RD_RS_MAX_CREDIT 0x80140 + +#define mmMME2_RTR_HBW_WR_RQ_MAX_CREDIT 0x80144 + +#define mmMME2_RTR_HBW_RD_RQ_MAX_CREDIT 0x80148 + +#define mmMME2_RTR_HBW_RD_RS_E_ARB 0x80150 + +#define mmMME2_RTR_HBW_RD_RS_W_ARB 0x80154 + +#define mmMME2_RTR_HBW_RD_RS_N_ARB 0x80158 + +#define mmMME2_RTR_HBW_RD_RS_S_ARB 0x8015C + +#define mmMME2_RTR_HBW_RD_RS_L_ARB 0x80160 + +#define mmMME2_RTR_HBW_WR_RQ_E_ARB 0x80170 + +#define mmMME2_RTR_HBW_WR_RQ_W_ARB 0x80174 + +#define mmMME2_RTR_HBW_WR_RQ_N_ARB 0x80178 + +#define mmMME2_RTR_HBW_WR_RQ_S_ARB 0x8017C + +#define mmMME2_RTR_HBW_WR_RQ_L_ARB 0x80180 + +#define mmMME2_RTR_HBW_WR_RS_E_ARB 0x80190 + +#define mmMME2_RTR_HBW_WR_RS_W_ARB 0x80194 + +#define mmMME2_RTR_HBW_WR_RS_N_ARB 0x80198 + +#define mmMME2_RTR_HBW_WR_RS_S_ARB 0x8019C + +#define mmMME2_RTR_HBW_WR_RS_L_ARB 0x801A0 + +#define mmMME2_RTR_LBW_RD_RQ_E_ARB 0x80200 + +#define mmMME2_RTR_LBW_RD_RQ_W_ARB 0x80204 + +#define mmMME2_RTR_LBW_RD_RQ_N_ARB 0x80208 + +#define mmMME2_RTR_LBW_RD_RQ_S_ARB 0x8020C + +#define mmMME2_RTR_LBW_RD_RQ_L_ARB 0x80210 + +#define mmMME2_RTR_LBW_E_ARB_MAX 0x80220 + +#define mmMME2_RTR_LBW_W_ARB_MAX 0x80224 + +#define mmMME2_RTR_LBW_N_ARB_MAX 0x80228 + +#define mmMME2_RTR_LBW_S_ARB_MAX 0x8022C + +#define mmMME2_RTR_LBW_L_ARB_MAX 0x80230 + +#define mmMME2_RTR_LBW_SRAM_MAX_CREDIT 0x80240 + +#define mmMME2_RTR_LBW_RD_RS_E_ARB 0x80250 + +#define mmMME2_RTR_LBW_RD_RS_W_ARB 0x80254 + +#define mmMME2_RTR_LBW_RD_RS_N_ARB 0x80258 + +#define mmMME2_RTR_LBW_RD_RS_S_ARB 0x8025C + +#define mmMME2_RTR_LBW_RD_RS_L_ARB 0x80260 + +#define mmMME2_RTR_LBW_WR_RQ_E_ARB 0x80270 + +#define mmMME2_RTR_LBW_WR_RQ_W_ARB 0x80274 + +#define mmMME2_RTR_LBW_WR_RQ_N_ARB 0x80278 + +#define mmMME2_RTR_LBW_WR_RQ_S_ARB 0x8027C + +#define mmMME2_RTR_LBW_WR_RQ_L_ARB 0x80280 + +#define mmMME2_RTR_LBW_WR_RS_E_ARB 0x80290 + +#define mmMME2_RTR_LBW_WR_RS_W_ARB 0x80294 + +#define mmMME2_RTR_LBW_WR_RS_N_ARB 0x80298 + +#define mmMME2_RTR_LBW_WR_RS_S_ARB 0x8029C + +#define mmMME2_RTR_LBW_WR_RS_L_ARB 0x802A0 + +#define mmMME2_RTR_DBG_E_ARB 0x80300 + +#define mmMME2_RTR_DBG_W_ARB 0x80304 + +#define mmMME2_RTR_DBG_N_ARB 0x80308 + +#define mmMME2_RTR_DBG_S_ARB 0x8030C + +#define mmMME2_RTR_DBG_L_ARB 0x80310 + +#define mmMME2_RTR_DBG_E_ARB_MAX 0x80320 + +#define mmMME2_RTR_DBG_W_ARB_MAX 0x80324 + +#define mmMME2_RTR_DBG_N_ARB_MAX 0x80328 + +#define mmMME2_RTR_DBG_S_ARB_MAX 0x8032C + +#define mmMME2_RTR_DBG_L_ARB_MAX 0x80330 + +#define mmMME2_RTR_SPLIT_COEF_0 0x80400 + +#define mmMME2_RTR_SPLIT_COEF_1 0x80404 + +#define mmMME2_RTR_SPLIT_COEF_2 0x80408 + +#define mmMME2_RTR_SPLIT_COEF_3 0x8040C + +#define mmMME2_RTR_SPLIT_COEF_4 0x80410 + +#define mmMME2_RTR_SPLIT_COEF_5 0x80414 + +#define mmMME2_RTR_SPLIT_COEF_6 0x80418 + +#define mmMME2_RTR_SPLIT_COEF_7 0x8041C + +#define mmMME2_RTR_SPLIT_COEF_8 0x80420 + +#define mmMME2_RTR_SPLIT_COEF_9 0x80424 + +#define mmMME2_RTR_SPLIT_CFG 0x80440 + +#define mmMME2_RTR_SPLIT_RD_SAT 0x80444 + +#define mmMME2_RTR_SPLIT_RD_RST_TOKEN 0x80448 + +#define mmMME2_RTR_SPLIT_RD_TIMEOUT_0 0x8044C + +#define mmMME2_RTR_SPLIT_RD_TIMEOUT_1 0x80450 + +#define mmMME2_RTR_SPLIT_WR_SAT 0x80454 + +#define mmMME2_RTR_WPLIT_WR_TST_TOLEN 0x80458 + +#define mmMME2_RTR_SPLIT_WR_TIMEOUT_0 0x8045C + +#define mmMME2_RTR_SPLIT_WR_TIMEOUT_1 0x80460 + +#define mmMME2_RTR_HBW_RANGE_HIT 0x80470 + +#define mmMME2_RTR_HBW_RANGE_MASK_L_0 0x80480 + +#define mmMME2_RTR_HBW_RANGE_MASK_L_1 0x80484 + +#define mmMME2_RTR_HBW_RANGE_MASK_L_2 0x80488 + +#define mmMME2_RTR_HBW_RANGE_MASK_L_3 0x8048C + +#define mmMME2_RTR_HBW_RANGE_MASK_L_4 0x80490 + +#define mmMME2_RTR_HBW_RANGE_MASK_L_5 0x80494 + +#define mmMME2_RTR_HBW_RANGE_MASK_L_6 0x80498 + +#define mmMME2_RTR_HBW_RANGE_MASK_L_7 0x8049C + +#define mmMME2_RTR_HBW_RANGE_MASK_H_0 0x804A0 + +#define mmMME2_RTR_HBW_RANGE_MASK_H_1 0x804A4 + +#define mmMME2_RTR_HBW_RANGE_MASK_H_2 0x804A8 + +#define mmMME2_RTR_HBW_RANGE_MASK_H_3 0x804AC + +#define mmMME2_RTR_HBW_RANGE_MASK_H_4 0x804B0 + +#define mmMME2_RTR_HBW_RANGE_MASK_H_5 0x804B4 + +#define mmMME2_RTR_HBW_RANGE_MASK_H_6 0x804B8 + +#define mmMME2_RTR_HBW_RANGE_MASK_H_7 0x804BC + +#define mmMME2_RTR_HBW_RANGE_BASE_L_0 0x804C0 + +#define mmMME2_RTR_HBW_RANGE_BASE_L_1 0x804C4 + +#define mmMME2_RTR_HBW_RANGE_BASE_L_2 0x804C8 + +#define mmMME2_RTR_HBW_RANGE_BASE_L_3 0x804CC + +#define mmMME2_RTR_HBW_RANGE_BASE_L_4 0x804D0 + +#define mmMME2_RTR_HBW_RANGE_BASE_L_5 0x804D4 + +#define mmMME2_RTR_HBW_RANGE_BASE_L_6 0x804D8 + +#define mmMME2_RTR_HBW_RANGE_BASE_L_7 0x804DC + +#define mmMME2_RTR_HBW_RANGE_BASE_H_0 0x804E0 + +#define mmMME2_RTR_HBW_RANGE_BASE_H_1 0x804E4 + +#define mmMME2_RTR_HBW_RANGE_BASE_H_2 0x804E8 + +#define mmMME2_RTR_HBW_RANGE_BASE_H_3 0x804EC + +#define mmMME2_RTR_HBW_RANGE_BASE_H_4 0x804F0 + +#define mmMME2_RTR_HBW_RANGE_BASE_H_5 0x804F4 + +#define mmMME2_RTR_HBW_RANGE_BASE_H_6 0x804F8 + +#define mmMME2_RTR_HBW_RANGE_BASE_H_7 0x804FC + +#define mmMME2_RTR_LBW_RANGE_HIT 0x80500 + +#define mmMME2_RTR_LBW_RANGE_MASK_0 0x80510 + +#define mmMME2_RTR_LBW_RANGE_MASK_1 0x80514 + +#define mmMME2_RTR_LBW_RANGE_MASK_2 0x80518 + +#define mmMME2_RTR_LBW_RANGE_MASK_3 0x8051C + +#define mmMME2_RTR_LBW_RANGE_MASK_4 0x80520 + +#define mmMME2_RTR_LBW_RANGE_MASK_5 0x80524 + +#define mmMME2_RTR_LBW_RANGE_MASK_6 0x80528 + +#define mmMME2_RTR_LBW_RANGE_MASK_7 0x8052C + +#define mmMME2_RTR_LBW_RANGE_MASK_8 0x80530 + +#define mmMME2_RTR_LBW_RANGE_MASK_9 0x80534 + +#define mmMME2_RTR_LBW_RANGE_MASK_10 0x80538 + +#define mmMME2_RTR_LBW_RANGE_MASK_11 0x8053C + +#define mmMME2_RTR_LBW_RANGE_MASK_12 0x80540 + +#define mmMME2_RTR_LBW_RANGE_MASK_13 0x80544 + +#define mmMME2_RTR_LBW_RANGE_MASK_14 0x80548 + +#define mmMME2_RTR_LBW_RANGE_MASK_15 0x8054C + +#define mmMME2_RTR_LBW_RANGE_BASE_0 0x80550 + +#define mmMME2_RTR_LBW_RANGE_BASE_1 0x80554 + +#define mmMME2_RTR_LBW_RANGE_BASE_2 0x80558 + +#define mmMME2_RTR_LBW_RANGE_BASE_3 0x8055C + +#define mmMME2_RTR_LBW_RANGE_BASE_4 0x80560 + +#define mmMME2_RTR_LBW_RANGE_BASE_5 0x80564 + +#define mmMME2_RTR_LBW_RANGE_BASE_6 0x80568 + +#define mmMME2_RTR_LBW_RANGE_BASE_7 0x8056C + +#define mmMME2_RTR_LBW_RANGE_BASE_8 0x80570 + +#define mmMME2_RTR_LBW_RANGE_BASE_9 0x80574 + +#define mmMME2_RTR_LBW_RANGE_BASE_10 0x80578 + +#define mmMME2_RTR_LBW_RANGE_BASE_11 0x8057C + +#define mmMME2_RTR_LBW_RANGE_BASE_12 0x80580 + +#define mmMME2_RTR_LBW_RANGE_BASE_13 0x80584 + +#define mmMME2_RTR_LBW_RANGE_BASE_14 0x80588 + +#define mmMME2_RTR_LBW_RANGE_BASE_15 0x8058C + +#define mmMME2_RTR_RGLTR 0x80590 + +#define mmMME2_RTR_RGLTR_WR_RESULT 0x80594 + +#define mmMME2_RTR_RGLTR_RD_RESULT 0x80598 + +#define mmMME2_RTR_SCRAMB_EN 0x80600 + +#define mmMME2_RTR_NON_LIN_SCRAMB 0x80604 + +#endif /* ASIC_REG_MME2_RTR_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h new file mode 100644 index 000000000..8e3eb7fd2 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h @@ -0,0 +1,330 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_MME3_RTR_REGS_H_ +#define ASIC_REG_MME3_RTR_REGS_H_ + +/* + ***************************************** + * MME3_RTR (Prototype: MME_RTR) + ***************************************** + */ + +#define mmMME3_RTR_HBW_RD_RQ_E_ARB 0xC0100 + +#define mmMME3_RTR_HBW_RD_RQ_W_ARB 0xC0104 + +#define mmMME3_RTR_HBW_RD_RQ_N_ARB 0xC0108 + +#define mmMME3_RTR_HBW_RD_RQ_S_ARB 0xC010C + +#define mmMME3_RTR_HBW_RD_RQ_L_ARB 0xC0110 + +#define mmMME3_RTR_HBW_E_ARB_MAX 0xC0120 + +#define mmMME3_RTR_HBW_W_ARB_MAX 0xC0124 + +#define mmMME3_RTR_HBW_N_ARB_MAX 0xC0128 + +#define mmMME3_RTR_HBW_S_ARB_MAX 0xC012C + +#define mmMME3_RTR_HBW_L_ARB_MAX 0xC0130 + +#define mmMME3_RTR_HBW_RD_RS_MAX_CREDIT 0xC0140 + +#define mmMME3_RTR_HBW_WR_RQ_MAX_CREDIT 0xC0144 + +#define mmMME3_RTR_HBW_RD_RQ_MAX_CREDIT 0xC0148 + +#define mmMME3_RTR_HBW_RD_RS_E_ARB 0xC0150 + +#define mmMME3_RTR_HBW_RD_RS_W_ARB 0xC0154 + +#define mmMME3_RTR_HBW_RD_RS_N_ARB 0xC0158 + +#define mmMME3_RTR_HBW_RD_RS_S_ARB 0xC015C + +#define mmMME3_RTR_HBW_RD_RS_L_ARB 0xC0160 + +#define mmMME3_RTR_HBW_WR_RQ_E_ARB 0xC0170 + +#define mmMME3_RTR_HBW_WR_RQ_W_ARB 0xC0174 + +#define mmMME3_RTR_HBW_WR_RQ_N_ARB 0xC0178 + +#define mmMME3_RTR_HBW_WR_RQ_S_ARB 0xC017C + +#define mmMME3_RTR_HBW_WR_RQ_L_ARB 0xC0180 + +#define mmMME3_RTR_HBW_WR_RS_E_ARB 0xC0190 + +#define mmMME3_RTR_HBW_WR_RS_W_ARB 0xC0194 + +#define mmMME3_RTR_HBW_WR_RS_N_ARB 0xC0198 + +#define mmMME3_RTR_HBW_WR_RS_S_ARB 0xC019C + +#define mmMME3_RTR_HBW_WR_RS_L_ARB 0xC01A0 + +#define mmMME3_RTR_LBW_RD_RQ_E_ARB 0xC0200 + +#define mmMME3_RTR_LBW_RD_RQ_W_ARB 0xC0204 + +#define mmMME3_RTR_LBW_RD_RQ_N_ARB 0xC0208 + +#define mmMME3_RTR_LBW_RD_RQ_S_ARB 0xC020C + +#define mmMME3_RTR_LBW_RD_RQ_L_ARB 0xC0210 + +#define mmMME3_RTR_LBW_E_ARB_MAX 0xC0220 + +#define mmMME3_RTR_LBW_W_ARB_MAX 0xC0224 + +#define mmMME3_RTR_LBW_N_ARB_MAX 0xC0228 + +#define mmMME3_RTR_LBW_S_ARB_MAX 0xC022C + +#define mmMME3_RTR_LBW_L_ARB_MAX 0xC0230 + +#define mmMME3_RTR_LBW_SRAM_MAX_CREDIT 0xC0240 + +#define mmMME3_RTR_LBW_RD_RS_E_ARB 0xC0250 + +#define mmMME3_RTR_LBW_RD_RS_W_ARB 0xC0254 + +#define mmMME3_RTR_LBW_RD_RS_N_ARB 0xC0258 + +#define mmMME3_RTR_LBW_RD_RS_S_ARB 0xC025C + +#define mmMME3_RTR_LBW_RD_RS_L_ARB 0xC0260 + +#define mmMME3_RTR_LBW_WR_RQ_E_ARB 0xC0270 + +#define mmMME3_RTR_LBW_WR_RQ_W_ARB 0xC0274 + +#define mmMME3_RTR_LBW_WR_RQ_N_ARB 0xC0278 + +#define mmMME3_RTR_LBW_WR_RQ_S_ARB 0xC027C + +#define mmMME3_RTR_LBW_WR_RQ_L_ARB 0xC0280 + +#define mmMME3_RTR_LBW_WR_RS_E_ARB 0xC0290 + +#define mmMME3_RTR_LBW_WR_RS_W_ARB 0xC0294 + +#define mmMME3_RTR_LBW_WR_RS_N_ARB 0xC0298 + +#define mmMME3_RTR_LBW_WR_RS_S_ARB 0xC029C + +#define mmMME3_RTR_LBW_WR_RS_L_ARB 0xC02A0 + +#define mmMME3_RTR_DBG_E_ARB 0xC0300 + +#define mmMME3_RTR_DBG_W_ARB 0xC0304 + +#define mmMME3_RTR_DBG_N_ARB 0xC0308 + +#define mmMME3_RTR_DBG_S_ARB 0xC030C + +#define mmMME3_RTR_DBG_L_ARB 0xC0310 + +#define mmMME3_RTR_DBG_E_ARB_MAX 0xC0320 + +#define mmMME3_RTR_DBG_W_ARB_MAX 0xC0324 + +#define mmMME3_RTR_DBG_N_ARB_MAX 0xC0328 + +#define mmMME3_RTR_DBG_S_ARB_MAX 0xC032C + +#define mmMME3_RTR_DBG_L_ARB_MAX 0xC0330 + +#define mmMME3_RTR_SPLIT_COEF_0 0xC0400 + +#define mmMME3_RTR_SPLIT_COEF_1 0xC0404 + +#define mmMME3_RTR_SPLIT_COEF_2 0xC0408 + +#define mmMME3_RTR_SPLIT_COEF_3 0xC040C + +#define mmMME3_RTR_SPLIT_COEF_4 0xC0410 + +#define mmMME3_RTR_SPLIT_COEF_5 0xC0414 + +#define mmMME3_RTR_SPLIT_COEF_6 0xC0418 + +#define mmMME3_RTR_SPLIT_COEF_7 0xC041C + +#define mmMME3_RTR_SPLIT_COEF_8 0xC0420 + +#define mmMME3_RTR_SPLIT_COEF_9 0xC0424 + +#define mmMME3_RTR_SPLIT_CFG 0xC0440 + +#define mmMME3_RTR_SPLIT_RD_SAT 0xC0444 + +#define mmMME3_RTR_SPLIT_RD_RST_TOKEN 0xC0448 + +#define mmMME3_RTR_SPLIT_RD_TIMEOUT_0 0xC044C + +#define mmMME3_RTR_SPLIT_RD_TIMEOUT_1 0xC0450 + +#define mmMME3_RTR_SPLIT_WR_SAT 0xC0454 + +#define mmMME3_RTR_WPLIT_WR_TST_TOLEN 0xC0458 + +#define mmMME3_RTR_SPLIT_WR_TIMEOUT_0 0xC045C + +#define mmMME3_RTR_SPLIT_WR_TIMEOUT_1 0xC0460 + +#define mmMME3_RTR_HBW_RANGE_HIT 0xC0470 + +#define mmMME3_RTR_HBW_RANGE_MASK_L_0 0xC0480 + +#define mmMME3_RTR_HBW_RANGE_MASK_L_1 0xC0484 + +#define mmMME3_RTR_HBW_RANGE_MASK_L_2 0xC0488 + +#define mmMME3_RTR_HBW_RANGE_MASK_L_3 0xC048C + +#define mmMME3_RTR_HBW_RANGE_MASK_L_4 0xC0490 + +#define mmMME3_RTR_HBW_RANGE_MASK_L_5 0xC0494 + +#define mmMME3_RTR_HBW_RANGE_MASK_L_6 0xC0498 + +#define mmMME3_RTR_HBW_RANGE_MASK_L_7 0xC049C + +#define mmMME3_RTR_HBW_RANGE_MASK_H_0 0xC04A0 + +#define mmMME3_RTR_HBW_RANGE_MASK_H_1 0xC04A4 + +#define mmMME3_RTR_HBW_RANGE_MASK_H_2 0xC04A8 + +#define mmMME3_RTR_HBW_RANGE_MASK_H_3 0xC04AC + +#define mmMME3_RTR_HBW_RANGE_MASK_H_4 0xC04B0 + +#define mmMME3_RTR_HBW_RANGE_MASK_H_5 0xC04B4 + +#define mmMME3_RTR_HBW_RANGE_MASK_H_6 0xC04B8 + +#define mmMME3_RTR_HBW_RANGE_MASK_H_7 0xC04BC + +#define mmMME3_RTR_HBW_RANGE_BASE_L_0 0xC04C0 + +#define mmMME3_RTR_HBW_RANGE_BASE_L_1 0xC04C4 + +#define mmMME3_RTR_HBW_RANGE_BASE_L_2 0xC04C8 + +#define mmMME3_RTR_HBW_RANGE_BASE_L_3 0xC04CC + +#define mmMME3_RTR_HBW_RANGE_BASE_L_4 0xC04D0 + +#define mmMME3_RTR_HBW_RANGE_BASE_L_5 0xC04D4 + +#define mmMME3_RTR_HBW_RANGE_BASE_L_6 0xC04D8 + +#define mmMME3_RTR_HBW_RANGE_BASE_L_7 0xC04DC + +#define mmMME3_RTR_HBW_RANGE_BASE_H_0 0xC04E0 + +#define mmMME3_RTR_HBW_RANGE_BASE_H_1 0xC04E4 + +#define mmMME3_RTR_HBW_RANGE_BASE_H_2 0xC04E8 + +#define mmMME3_RTR_HBW_RANGE_BASE_H_3 0xC04EC + +#define mmMME3_RTR_HBW_RANGE_BASE_H_4 0xC04F0 + +#define mmMME3_RTR_HBW_RANGE_BASE_H_5 0xC04F4 + +#define mmMME3_RTR_HBW_RANGE_BASE_H_6 0xC04F8 + +#define mmMME3_RTR_HBW_RANGE_BASE_H_7 0xC04FC + +#define mmMME3_RTR_LBW_RANGE_HIT 0xC0500 + +#define mmMME3_RTR_LBW_RANGE_MASK_0 0xC0510 + +#define mmMME3_RTR_LBW_RANGE_MASK_1 0xC0514 + +#define mmMME3_RTR_LBW_RANGE_MASK_2 0xC0518 + +#define mmMME3_RTR_LBW_RANGE_MASK_3 0xC051C + +#define mmMME3_RTR_LBW_RANGE_MASK_4 0xC0520 + +#define mmMME3_RTR_LBW_RANGE_MASK_5 0xC0524 + +#define mmMME3_RTR_LBW_RANGE_MASK_6 0xC0528 + +#define mmMME3_RTR_LBW_RANGE_MASK_7 0xC052C + +#define mmMME3_RTR_LBW_RANGE_MASK_8 0xC0530 + +#define mmMME3_RTR_LBW_RANGE_MASK_9 0xC0534 + +#define mmMME3_RTR_LBW_RANGE_MASK_10 0xC0538 + +#define mmMME3_RTR_LBW_RANGE_MASK_11 0xC053C + +#define mmMME3_RTR_LBW_RANGE_MASK_12 0xC0540 + +#define mmMME3_RTR_LBW_RANGE_MASK_13 0xC0544 + +#define mmMME3_RTR_LBW_RANGE_MASK_14 0xC0548 + +#define mmMME3_RTR_LBW_RANGE_MASK_15 0xC054C + +#define mmMME3_RTR_LBW_RANGE_BASE_0 0xC0550 + +#define mmMME3_RTR_LBW_RANGE_BASE_1 0xC0554 + +#define mmMME3_RTR_LBW_RANGE_BASE_2 0xC0558 + +#define mmMME3_RTR_LBW_RANGE_BASE_3 0xC055C + +#define mmMME3_RTR_LBW_RANGE_BASE_4 0xC0560 + +#define mmMME3_RTR_LBW_RANGE_BASE_5 0xC0564 + +#define mmMME3_RTR_LBW_RANGE_BASE_6 0xC0568 + +#define mmMME3_RTR_LBW_RANGE_BASE_7 0xC056C + +#define mmMME3_RTR_LBW_RANGE_BASE_8 0xC0570 + +#define mmMME3_RTR_LBW_RANGE_BASE_9 0xC0574 + +#define mmMME3_RTR_LBW_RANGE_BASE_10 0xC0578 + +#define mmMME3_RTR_LBW_RANGE_BASE_11 0xC057C + +#define mmMME3_RTR_LBW_RANGE_BASE_12 0xC0580 + +#define mmMME3_RTR_LBW_RANGE_BASE_13 0xC0584 + +#define mmMME3_RTR_LBW_RANGE_BASE_14 0xC0588 + +#define mmMME3_RTR_LBW_RANGE_BASE_15 0xC058C + +#define mmMME3_RTR_RGLTR 0xC0590 + +#define mmMME3_RTR_RGLTR_WR_RESULT 0xC0594 + +#define mmMME3_RTR_RGLTR_RD_RESULT 0xC0598 + +#define mmMME3_RTR_SCRAMB_EN 0xC0600 + +#define mmMME3_RTR_NON_LIN_SCRAMB 0xC0604 + +#endif /* ASIC_REG_MME3_RTR_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h new file mode 100644 index 000000000..79b67bbc8 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h @@ -0,0 +1,330 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_MME4_RTR_REGS_H_ +#define ASIC_REG_MME4_RTR_REGS_H_ + +/* + ***************************************** + * MME4_RTR (Prototype: MME_RTR) + ***************************************** + */ + +#define mmMME4_RTR_HBW_RD_RQ_E_ARB 0x100100 + +#define mmMME4_RTR_HBW_RD_RQ_W_ARB 0x100104 + +#define mmMME4_RTR_HBW_RD_RQ_N_ARB 0x100108 + +#define mmMME4_RTR_HBW_RD_RQ_S_ARB 0x10010C + +#define mmMME4_RTR_HBW_RD_RQ_L_ARB 0x100110 + +#define mmMME4_RTR_HBW_E_ARB_MAX 0x100120 + +#define mmMME4_RTR_HBW_W_ARB_MAX 0x100124 + +#define mmMME4_RTR_HBW_N_ARB_MAX 0x100128 + +#define mmMME4_RTR_HBW_S_ARB_MAX 0x10012C + +#define mmMME4_RTR_HBW_L_ARB_MAX 0x100130 + +#define mmMME4_RTR_HBW_RD_RS_MAX_CREDIT 0x100140 + +#define mmMME4_RTR_HBW_WR_RQ_MAX_CREDIT 0x100144 + +#define mmMME4_RTR_HBW_RD_RQ_MAX_CREDIT 0x100148 + +#define mmMME4_RTR_HBW_RD_RS_E_ARB 0x100150 + +#define mmMME4_RTR_HBW_RD_RS_W_ARB 0x100154 + +#define mmMME4_RTR_HBW_RD_RS_N_ARB 0x100158 + +#define mmMME4_RTR_HBW_RD_RS_S_ARB 0x10015C + +#define mmMME4_RTR_HBW_RD_RS_L_ARB 0x100160 + +#define mmMME4_RTR_HBW_WR_RQ_E_ARB 0x100170 + +#define mmMME4_RTR_HBW_WR_RQ_W_ARB 0x100174 + +#define mmMME4_RTR_HBW_WR_RQ_N_ARB 0x100178 + +#define mmMME4_RTR_HBW_WR_RQ_S_ARB 0x10017C + +#define mmMME4_RTR_HBW_WR_RQ_L_ARB 0x100180 + +#define mmMME4_RTR_HBW_WR_RS_E_ARB 0x100190 + +#define mmMME4_RTR_HBW_WR_RS_W_ARB 0x100194 + +#define mmMME4_RTR_HBW_WR_RS_N_ARB 0x100198 + +#define mmMME4_RTR_HBW_WR_RS_S_ARB 0x10019C + +#define mmMME4_RTR_HBW_WR_RS_L_ARB 0x1001A0 + +#define mmMME4_RTR_LBW_RD_RQ_E_ARB 0x100200 + +#define mmMME4_RTR_LBW_RD_RQ_W_ARB 0x100204 + +#define mmMME4_RTR_LBW_RD_RQ_N_ARB 0x100208 + +#define mmMME4_RTR_LBW_RD_RQ_S_ARB 0x10020C + +#define mmMME4_RTR_LBW_RD_RQ_L_ARB 0x100210 + +#define mmMME4_RTR_LBW_E_ARB_MAX 0x100220 + +#define mmMME4_RTR_LBW_W_ARB_MAX 0x100224 + +#define mmMME4_RTR_LBW_N_ARB_MAX 0x100228 + +#define mmMME4_RTR_LBW_S_ARB_MAX 0x10022C + +#define mmMME4_RTR_LBW_L_ARB_MAX 0x100230 + +#define mmMME4_RTR_LBW_SRAM_MAX_CREDIT 0x100240 + +#define mmMME4_RTR_LBW_RD_RS_E_ARB 0x100250 + +#define mmMME4_RTR_LBW_RD_RS_W_ARB 0x100254 + +#define mmMME4_RTR_LBW_RD_RS_N_ARB 0x100258 + +#define mmMME4_RTR_LBW_RD_RS_S_ARB 0x10025C + +#define mmMME4_RTR_LBW_RD_RS_L_ARB 0x100260 + +#define mmMME4_RTR_LBW_WR_RQ_E_ARB 0x100270 + +#define mmMME4_RTR_LBW_WR_RQ_W_ARB 0x100274 + +#define mmMME4_RTR_LBW_WR_RQ_N_ARB 0x100278 + +#define mmMME4_RTR_LBW_WR_RQ_S_ARB 0x10027C + +#define mmMME4_RTR_LBW_WR_RQ_L_ARB 0x100280 + +#define mmMME4_RTR_LBW_WR_RS_E_ARB 0x100290 + +#define mmMME4_RTR_LBW_WR_RS_W_ARB 0x100294 + +#define mmMME4_RTR_LBW_WR_RS_N_ARB 0x100298 + +#define mmMME4_RTR_LBW_WR_RS_S_ARB 0x10029C + +#define mmMME4_RTR_LBW_WR_RS_L_ARB 0x1002A0 + +#define mmMME4_RTR_DBG_E_ARB 0x100300 + +#define mmMME4_RTR_DBG_W_ARB 0x100304 + +#define mmMME4_RTR_DBG_N_ARB 0x100308 + +#define mmMME4_RTR_DBG_S_ARB 0x10030C + +#define mmMME4_RTR_DBG_L_ARB 0x100310 + +#define mmMME4_RTR_DBG_E_ARB_MAX 0x100320 + +#define mmMME4_RTR_DBG_W_ARB_MAX 0x100324 + +#define mmMME4_RTR_DBG_N_ARB_MAX 0x100328 + +#define mmMME4_RTR_DBG_S_ARB_MAX 0x10032C + +#define mmMME4_RTR_DBG_L_ARB_MAX 0x100330 + +#define mmMME4_RTR_SPLIT_COEF_0 0x100400 + +#define mmMME4_RTR_SPLIT_COEF_1 0x100404 + +#define mmMME4_RTR_SPLIT_COEF_2 0x100408 + +#define mmMME4_RTR_SPLIT_COEF_3 0x10040C + +#define mmMME4_RTR_SPLIT_COEF_4 0x100410 + +#define mmMME4_RTR_SPLIT_COEF_5 0x100414 + +#define mmMME4_RTR_SPLIT_COEF_6 0x100418 + +#define mmMME4_RTR_SPLIT_COEF_7 0x10041C + +#define mmMME4_RTR_SPLIT_COEF_8 0x100420 + +#define mmMME4_RTR_SPLIT_COEF_9 0x100424 + +#define mmMME4_RTR_SPLIT_CFG 0x100440 + +#define mmMME4_RTR_SPLIT_RD_SAT 0x100444 + +#define mmMME4_RTR_SPLIT_RD_RST_TOKEN 0x100448 + +#define mmMME4_RTR_SPLIT_RD_TIMEOUT_0 0x10044C + +#define mmMME4_RTR_SPLIT_RD_TIMEOUT_1 0x100450 + +#define mmMME4_RTR_SPLIT_WR_SAT 0x100454 + +#define mmMME4_RTR_WPLIT_WR_TST_TOLEN 0x100458 + +#define mmMME4_RTR_SPLIT_WR_TIMEOUT_0 0x10045C + +#define mmMME4_RTR_SPLIT_WR_TIMEOUT_1 0x100460 + +#define mmMME4_RTR_HBW_RANGE_HIT 0x100470 + +#define mmMME4_RTR_HBW_RANGE_MASK_L_0 0x100480 + +#define mmMME4_RTR_HBW_RANGE_MASK_L_1 0x100484 + +#define mmMME4_RTR_HBW_RANGE_MASK_L_2 0x100488 + +#define mmMME4_RTR_HBW_RANGE_MASK_L_3 0x10048C + +#define mmMME4_RTR_HBW_RANGE_MASK_L_4 0x100490 + +#define mmMME4_RTR_HBW_RANGE_MASK_L_5 0x100494 + +#define mmMME4_RTR_HBW_RANGE_MASK_L_6 0x100498 + +#define mmMME4_RTR_HBW_RANGE_MASK_L_7 0x10049C + +#define mmMME4_RTR_HBW_RANGE_MASK_H_0 0x1004A0 + +#define mmMME4_RTR_HBW_RANGE_MASK_H_1 0x1004A4 + +#define mmMME4_RTR_HBW_RANGE_MASK_H_2 0x1004A8 + +#define mmMME4_RTR_HBW_RANGE_MASK_H_3 0x1004AC + +#define mmMME4_RTR_HBW_RANGE_MASK_H_4 0x1004B0 + +#define mmMME4_RTR_HBW_RANGE_MASK_H_5 0x1004B4 + +#define mmMME4_RTR_HBW_RANGE_MASK_H_6 0x1004B8 + +#define mmMME4_RTR_HBW_RANGE_MASK_H_7 0x1004BC + +#define mmMME4_RTR_HBW_RANGE_BASE_L_0 0x1004C0 + +#define mmMME4_RTR_HBW_RANGE_BASE_L_1 0x1004C4 + +#define mmMME4_RTR_HBW_RANGE_BASE_L_2 0x1004C8 + +#define mmMME4_RTR_HBW_RANGE_BASE_L_3 0x1004CC + +#define mmMME4_RTR_HBW_RANGE_BASE_L_4 0x1004D0 + +#define mmMME4_RTR_HBW_RANGE_BASE_L_5 0x1004D4 + +#define mmMME4_RTR_HBW_RANGE_BASE_L_6 0x1004D8 + +#define mmMME4_RTR_HBW_RANGE_BASE_L_7 0x1004DC + +#define mmMME4_RTR_HBW_RANGE_BASE_H_0 0x1004E0 + +#define mmMME4_RTR_HBW_RANGE_BASE_H_1 0x1004E4 + +#define mmMME4_RTR_HBW_RANGE_BASE_H_2 0x1004E8 + +#define mmMME4_RTR_HBW_RANGE_BASE_H_3 0x1004EC + +#define mmMME4_RTR_HBW_RANGE_BASE_H_4 0x1004F0 + +#define mmMME4_RTR_HBW_RANGE_BASE_H_5 0x1004F4 + +#define mmMME4_RTR_HBW_RANGE_BASE_H_6 0x1004F8 + +#define mmMME4_RTR_HBW_RANGE_BASE_H_7 0x1004FC + +#define mmMME4_RTR_LBW_RANGE_HIT 0x100500 + +#define mmMME4_RTR_LBW_RANGE_MASK_0 0x100510 + +#define mmMME4_RTR_LBW_RANGE_MASK_1 0x100514 + +#define mmMME4_RTR_LBW_RANGE_MASK_2 0x100518 + +#define mmMME4_RTR_LBW_RANGE_MASK_3 0x10051C + +#define mmMME4_RTR_LBW_RANGE_MASK_4 0x100520 + +#define mmMME4_RTR_LBW_RANGE_MASK_5 0x100524 + +#define mmMME4_RTR_LBW_RANGE_MASK_6 0x100528 + +#define mmMME4_RTR_LBW_RANGE_MASK_7 0x10052C + +#define mmMME4_RTR_LBW_RANGE_MASK_8 0x100530 + +#define mmMME4_RTR_LBW_RANGE_MASK_9 0x100534 + +#define mmMME4_RTR_LBW_RANGE_MASK_10 0x100538 + +#define mmMME4_RTR_LBW_RANGE_MASK_11 0x10053C + +#define mmMME4_RTR_LBW_RANGE_MASK_12 0x100540 + +#define mmMME4_RTR_LBW_RANGE_MASK_13 0x100544 + +#define mmMME4_RTR_LBW_RANGE_MASK_14 0x100548 + +#define mmMME4_RTR_LBW_RANGE_MASK_15 0x10054C + +#define mmMME4_RTR_LBW_RANGE_BASE_0 0x100550 + +#define mmMME4_RTR_LBW_RANGE_BASE_1 0x100554 + +#define mmMME4_RTR_LBW_RANGE_BASE_2 0x100558 + +#define mmMME4_RTR_LBW_RANGE_BASE_3 0x10055C + +#define mmMME4_RTR_LBW_RANGE_BASE_4 0x100560 + +#define mmMME4_RTR_LBW_RANGE_BASE_5 0x100564 + +#define mmMME4_RTR_LBW_RANGE_BASE_6 0x100568 + +#define mmMME4_RTR_LBW_RANGE_BASE_7 0x10056C + +#define mmMME4_RTR_LBW_RANGE_BASE_8 0x100570 + +#define mmMME4_RTR_LBW_RANGE_BASE_9 0x100574 + +#define mmMME4_RTR_LBW_RANGE_BASE_10 0x100578 + +#define mmMME4_RTR_LBW_RANGE_BASE_11 0x10057C + +#define mmMME4_RTR_LBW_RANGE_BASE_12 0x100580 + +#define mmMME4_RTR_LBW_RANGE_BASE_13 0x100584 + +#define mmMME4_RTR_LBW_RANGE_BASE_14 0x100588 + +#define mmMME4_RTR_LBW_RANGE_BASE_15 0x10058C + +#define mmMME4_RTR_RGLTR 0x100590 + +#define mmMME4_RTR_RGLTR_WR_RESULT 0x100594 + +#define mmMME4_RTR_RGLTR_RD_RESULT 0x100598 + +#define mmMME4_RTR_SCRAMB_EN 0x100600 + +#define mmMME4_RTR_NON_LIN_SCRAMB 0x100604 + +#endif /* ASIC_REG_MME4_RTR_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h new file mode 100644 index 000000000..0ac3c37ce --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h @@ -0,0 +1,330 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_MME5_RTR_REGS_H_ +#define ASIC_REG_MME5_RTR_REGS_H_ + +/* + ***************************************** + * MME5_RTR (Prototype: MME_RTR) + ***************************************** + */ + +#define mmMME5_RTR_HBW_RD_RQ_E_ARB 0x140100 + +#define mmMME5_RTR_HBW_RD_RQ_W_ARB 0x140104 + +#define mmMME5_RTR_HBW_RD_RQ_N_ARB 0x140108 + +#define mmMME5_RTR_HBW_RD_RQ_S_ARB 0x14010C + +#define mmMME5_RTR_HBW_RD_RQ_L_ARB 0x140110 + +#define mmMME5_RTR_HBW_E_ARB_MAX 0x140120 + +#define mmMME5_RTR_HBW_W_ARB_MAX 0x140124 + +#define mmMME5_RTR_HBW_N_ARB_MAX 0x140128 + +#define mmMME5_RTR_HBW_S_ARB_MAX 0x14012C + +#define mmMME5_RTR_HBW_L_ARB_MAX 0x140130 + +#define mmMME5_RTR_HBW_RD_RS_MAX_CREDIT 0x140140 + +#define mmMME5_RTR_HBW_WR_RQ_MAX_CREDIT 0x140144 + +#define mmMME5_RTR_HBW_RD_RQ_MAX_CREDIT 0x140148 + +#define mmMME5_RTR_HBW_RD_RS_E_ARB 0x140150 + +#define mmMME5_RTR_HBW_RD_RS_W_ARB 0x140154 + +#define mmMME5_RTR_HBW_RD_RS_N_ARB 0x140158 + +#define mmMME5_RTR_HBW_RD_RS_S_ARB 0x14015C + +#define mmMME5_RTR_HBW_RD_RS_L_ARB 0x140160 + +#define mmMME5_RTR_HBW_WR_RQ_E_ARB 0x140170 + +#define mmMME5_RTR_HBW_WR_RQ_W_ARB 0x140174 + +#define mmMME5_RTR_HBW_WR_RQ_N_ARB 0x140178 + +#define mmMME5_RTR_HBW_WR_RQ_S_ARB 0x14017C + +#define mmMME5_RTR_HBW_WR_RQ_L_ARB 0x140180 + +#define mmMME5_RTR_HBW_WR_RS_E_ARB 0x140190 + +#define mmMME5_RTR_HBW_WR_RS_W_ARB 0x140194 + +#define mmMME5_RTR_HBW_WR_RS_N_ARB 0x140198 + +#define mmMME5_RTR_HBW_WR_RS_S_ARB 0x14019C + +#define mmMME5_RTR_HBW_WR_RS_L_ARB 0x1401A0 + +#define mmMME5_RTR_LBW_RD_RQ_E_ARB 0x140200 + +#define mmMME5_RTR_LBW_RD_RQ_W_ARB 0x140204 + +#define mmMME5_RTR_LBW_RD_RQ_N_ARB 0x140208 + +#define mmMME5_RTR_LBW_RD_RQ_S_ARB 0x14020C + +#define mmMME5_RTR_LBW_RD_RQ_L_ARB 0x140210 + +#define mmMME5_RTR_LBW_E_ARB_MAX 0x140220 + +#define mmMME5_RTR_LBW_W_ARB_MAX 0x140224 + +#define mmMME5_RTR_LBW_N_ARB_MAX 0x140228 + +#define mmMME5_RTR_LBW_S_ARB_MAX 0x14022C + +#define mmMME5_RTR_LBW_L_ARB_MAX 0x140230 + +#define mmMME5_RTR_LBW_SRAM_MAX_CREDIT 0x140240 + +#define mmMME5_RTR_LBW_RD_RS_E_ARB 0x140250 + +#define mmMME5_RTR_LBW_RD_RS_W_ARB 0x140254 + +#define mmMME5_RTR_LBW_RD_RS_N_ARB 0x140258 + +#define mmMME5_RTR_LBW_RD_RS_S_ARB 0x14025C + +#define mmMME5_RTR_LBW_RD_RS_L_ARB 0x140260 + +#define mmMME5_RTR_LBW_WR_RQ_E_ARB 0x140270 + +#define mmMME5_RTR_LBW_WR_RQ_W_ARB 0x140274 + +#define mmMME5_RTR_LBW_WR_RQ_N_ARB 0x140278 + +#define mmMME5_RTR_LBW_WR_RQ_S_ARB 0x14027C + +#define mmMME5_RTR_LBW_WR_RQ_L_ARB 0x140280 + +#define mmMME5_RTR_LBW_WR_RS_E_ARB 0x140290 + +#define mmMME5_RTR_LBW_WR_RS_W_ARB 0x140294 + +#define mmMME5_RTR_LBW_WR_RS_N_ARB 0x140298 + +#define mmMME5_RTR_LBW_WR_RS_S_ARB 0x14029C + +#define mmMME5_RTR_LBW_WR_RS_L_ARB 0x1402A0 + +#define mmMME5_RTR_DBG_E_ARB 0x140300 + +#define mmMME5_RTR_DBG_W_ARB 0x140304 + +#define mmMME5_RTR_DBG_N_ARB 0x140308 + +#define mmMME5_RTR_DBG_S_ARB 0x14030C + +#define mmMME5_RTR_DBG_L_ARB 0x140310 + +#define mmMME5_RTR_DBG_E_ARB_MAX 0x140320 + +#define mmMME5_RTR_DBG_W_ARB_MAX 0x140324 + +#define mmMME5_RTR_DBG_N_ARB_MAX 0x140328 + +#define mmMME5_RTR_DBG_S_ARB_MAX 0x14032C + +#define mmMME5_RTR_DBG_L_ARB_MAX 0x140330 + +#define mmMME5_RTR_SPLIT_COEF_0 0x140400 + +#define mmMME5_RTR_SPLIT_COEF_1 0x140404 + +#define mmMME5_RTR_SPLIT_COEF_2 0x140408 + +#define mmMME5_RTR_SPLIT_COEF_3 0x14040C + +#define mmMME5_RTR_SPLIT_COEF_4 0x140410 + +#define mmMME5_RTR_SPLIT_COEF_5 0x140414 + +#define mmMME5_RTR_SPLIT_COEF_6 0x140418 + +#define mmMME5_RTR_SPLIT_COEF_7 0x14041C + +#define mmMME5_RTR_SPLIT_COEF_8 0x140420 + +#define mmMME5_RTR_SPLIT_COEF_9 0x140424 + +#define mmMME5_RTR_SPLIT_CFG 0x140440 + +#define mmMME5_RTR_SPLIT_RD_SAT 0x140444 + +#define mmMME5_RTR_SPLIT_RD_RST_TOKEN 0x140448 + +#define mmMME5_RTR_SPLIT_RD_TIMEOUT_0 0x14044C + +#define mmMME5_RTR_SPLIT_RD_TIMEOUT_1 0x140450 + +#define mmMME5_RTR_SPLIT_WR_SAT 0x140454 + +#define mmMME5_RTR_WPLIT_WR_TST_TOLEN 0x140458 + +#define mmMME5_RTR_SPLIT_WR_TIMEOUT_0 0x14045C + +#define mmMME5_RTR_SPLIT_WR_TIMEOUT_1 0x140460 + +#define mmMME5_RTR_HBW_RANGE_HIT 0x140470 + +#define mmMME5_RTR_HBW_RANGE_MASK_L_0 0x140480 + +#define mmMME5_RTR_HBW_RANGE_MASK_L_1 0x140484 + +#define mmMME5_RTR_HBW_RANGE_MASK_L_2 0x140488 + +#define mmMME5_RTR_HBW_RANGE_MASK_L_3 0x14048C + +#define mmMME5_RTR_HBW_RANGE_MASK_L_4 0x140490 + +#define mmMME5_RTR_HBW_RANGE_MASK_L_5 0x140494 + +#define mmMME5_RTR_HBW_RANGE_MASK_L_6 0x140498 + +#define mmMME5_RTR_HBW_RANGE_MASK_L_7 0x14049C + +#define mmMME5_RTR_HBW_RANGE_MASK_H_0 0x1404A0 + +#define mmMME5_RTR_HBW_RANGE_MASK_H_1 0x1404A4 + +#define mmMME5_RTR_HBW_RANGE_MASK_H_2 0x1404A8 + +#define mmMME5_RTR_HBW_RANGE_MASK_H_3 0x1404AC + +#define mmMME5_RTR_HBW_RANGE_MASK_H_4 0x1404B0 + +#define mmMME5_RTR_HBW_RANGE_MASK_H_5 0x1404B4 + +#define mmMME5_RTR_HBW_RANGE_MASK_H_6 0x1404B8 + +#define mmMME5_RTR_HBW_RANGE_MASK_H_7 0x1404BC + +#define mmMME5_RTR_HBW_RANGE_BASE_L_0 0x1404C0 + +#define mmMME5_RTR_HBW_RANGE_BASE_L_1 0x1404C4 + +#define mmMME5_RTR_HBW_RANGE_BASE_L_2 0x1404C8 + +#define mmMME5_RTR_HBW_RANGE_BASE_L_3 0x1404CC + +#define mmMME5_RTR_HBW_RANGE_BASE_L_4 0x1404D0 + +#define mmMME5_RTR_HBW_RANGE_BASE_L_5 0x1404D4 + +#define mmMME5_RTR_HBW_RANGE_BASE_L_6 0x1404D8 + +#define mmMME5_RTR_HBW_RANGE_BASE_L_7 0x1404DC + +#define mmMME5_RTR_HBW_RANGE_BASE_H_0 0x1404E0 + +#define mmMME5_RTR_HBW_RANGE_BASE_H_1 0x1404E4 + +#define mmMME5_RTR_HBW_RANGE_BASE_H_2 0x1404E8 + +#define mmMME5_RTR_HBW_RANGE_BASE_H_3 0x1404EC + +#define mmMME5_RTR_HBW_RANGE_BASE_H_4 0x1404F0 + +#define mmMME5_RTR_HBW_RANGE_BASE_H_5 0x1404F4 + +#define mmMME5_RTR_HBW_RANGE_BASE_H_6 0x1404F8 + +#define mmMME5_RTR_HBW_RANGE_BASE_H_7 0x1404FC + +#define mmMME5_RTR_LBW_RANGE_HIT 0x140500 + +#define mmMME5_RTR_LBW_RANGE_MASK_0 0x140510 + +#define mmMME5_RTR_LBW_RANGE_MASK_1 0x140514 + +#define mmMME5_RTR_LBW_RANGE_MASK_2 0x140518 + +#define mmMME5_RTR_LBW_RANGE_MASK_3 0x14051C + +#define mmMME5_RTR_LBW_RANGE_MASK_4 0x140520 + +#define mmMME5_RTR_LBW_RANGE_MASK_5 0x140524 + +#define mmMME5_RTR_LBW_RANGE_MASK_6 0x140528 + +#define mmMME5_RTR_LBW_RANGE_MASK_7 0x14052C + +#define mmMME5_RTR_LBW_RANGE_MASK_8 0x140530 + +#define mmMME5_RTR_LBW_RANGE_MASK_9 0x140534 + +#define mmMME5_RTR_LBW_RANGE_MASK_10 0x140538 + +#define mmMME5_RTR_LBW_RANGE_MASK_11 0x14053C + +#define mmMME5_RTR_LBW_RANGE_MASK_12 0x140540 + +#define mmMME5_RTR_LBW_RANGE_MASK_13 0x140544 + +#define mmMME5_RTR_LBW_RANGE_MASK_14 0x140548 + +#define mmMME5_RTR_LBW_RANGE_MASK_15 0x14054C + +#define mmMME5_RTR_LBW_RANGE_BASE_0 0x140550 + +#define mmMME5_RTR_LBW_RANGE_BASE_1 0x140554 + +#define mmMME5_RTR_LBW_RANGE_BASE_2 0x140558 + +#define mmMME5_RTR_LBW_RANGE_BASE_3 0x14055C + +#define mmMME5_RTR_LBW_RANGE_BASE_4 0x140560 + +#define mmMME5_RTR_LBW_RANGE_BASE_5 0x140564 + +#define mmMME5_RTR_LBW_RANGE_BASE_6 0x140568 + +#define mmMME5_RTR_LBW_RANGE_BASE_7 0x14056C + +#define mmMME5_RTR_LBW_RANGE_BASE_8 0x140570 + +#define mmMME5_RTR_LBW_RANGE_BASE_9 0x140574 + +#define mmMME5_RTR_LBW_RANGE_BASE_10 0x140578 + +#define mmMME5_RTR_LBW_RANGE_BASE_11 0x14057C + +#define mmMME5_RTR_LBW_RANGE_BASE_12 0x140580 + +#define mmMME5_RTR_LBW_RANGE_BASE_13 0x140584 + +#define mmMME5_RTR_LBW_RANGE_BASE_14 0x140588 + +#define mmMME5_RTR_LBW_RANGE_BASE_15 0x14058C + +#define mmMME5_RTR_RGLTR 0x140590 + +#define mmMME5_RTR_RGLTR_WR_RESULT 0x140594 + +#define mmMME5_RTR_RGLTR_RD_RESULT 0x140598 + +#define mmMME5_RTR_SCRAMB_EN 0x140600 + +#define mmMME5_RTR_NON_LIN_SCRAMB 0x140604 + +#endif /* ASIC_REG_MME5_RTR_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h new file mode 100644 index 000000000..50c49cce7 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h @@ -0,0 +1,330 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_MME6_RTR_REGS_H_ +#define ASIC_REG_MME6_RTR_REGS_H_ + +/* + ***************************************** + * MME6_RTR (Prototype: MME_RTR) + ***************************************** + */ + +#define mmMME6_RTR_HBW_RD_RQ_E_ARB 0x180100 + +#define mmMME6_RTR_HBW_RD_RQ_W_ARB 0x180104 + +#define mmMME6_RTR_HBW_RD_RQ_N_ARB 0x180108 + +#define mmMME6_RTR_HBW_RD_RQ_S_ARB 0x18010C + +#define mmMME6_RTR_HBW_RD_RQ_L_ARB 0x180110 + +#define mmMME6_RTR_HBW_E_ARB_MAX 0x180120 + +#define mmMME6_RTR_HBW_W_ARB_MAX 0x180124 + +#define mmMME6_RTR_HBW_N_ARB_MAX 0x180128 + +#define mmMME6_RTR_HBW_S_ARB_MAX 0x18012C + +#define mmMME6_RTR_HBW_L_ARB_MAX 0x180130 + +#define mmMME6_RTR_HBW_RD_RS_MAX_CREDIT 0x180140 + +#define mmMME6_RTR_HBW_WR_RQ_MAX_CREDIT 0x180144 + +#define mmMME6_RTR_HBW_RD_RQ_MAX_CREDIT 0x180148 + +#define mmMME6_RTR_HBW_RD_RS_E_ARB 0x180150 + +#define mmMME6_RTR_HBW_RD_RS_W_ARB 0x180154 + +#define mmMME6_RTR_HBW_RD_RS_N_ARB 0x180158 + +#define mmMME6_RTR_HBW_RD_RS_S_ARB 0x18015C + +#define mmMME6_RTR_HBW_RD_RS_L_ARB 0x180160 + +#define mmMME6_RTR_HBW_WR_RQ_E_ARB 0x180170 + +#define mmMME6_RTR_HBW_WR_RQ_W_ARB 0x180174 + +#define mmMME6_RTR_HBW_WR_RQ_N_ARB 0x180178 + +#define mmMME6_RTR_HBW_WR_RQ_S_ARB 0x18017C + +#define mmMME6_RTR_HBW_WR_RQ_L_ARB 0x180180 + +#define mmMME6_RTR_HBW_WR_RS_E_ARB 0x180190 + +#define mmMME6_RTR_HBW_WR_RS_W_ARB 0x180194 + +#define mmMME6_RTR_HBW_WR_RS_N_ARB 0x180198 + +#define mmMME6_RTR_HBW_WR_RS_S_ARB 0x18019C + +#define mmMME6_RTR_HBW_WR_RS_L_ARB 0x1801A0 + +#define mmMME6_RTR_LBW_RD_RQ_E_ARB 0x180200 + +#define mmMME6_RTR_LBW_RD_RQ_W_ARB 0x180204 + +#define mmMME6_RTR_LBW_RD_RQ_N_ARB 0x180208 + +#define mmMME6_RTR_LBW_RD_RQ_S_ARB 0x18020C + +#define mmMME6_RTR_LBW_RD_RQ_L_ARB 0x180210 + +#define mmMME6_RTR_LBW_E_ARB_MAX 0x180220 + +#define mmMME6_RTR_LBW_W_ARB_MAX 0x180224 + +#define mmMME6_RTR_LBW_N_ARB_MAX 0x180228 + +#define mmMME6_RTR_LBW_S_ARB_MAX 0x18022C + +#define mmMME6_RTR_LBW_L_ARB_MAX 0x180230 + +#define mmMME6_RTR_LBW_SRAM_MAX_CREDIT 0x180240 + +#define mmMME6_RTR_LBW_RD_RS_E_ARB 0x180250 + +#define mmMME6_RTR_LBW_RD_RS_W_ARB 0x180254 + +#define mmMME6_RTR_LBW_RD_RS_N_ARB 0x180258 + +#define mmMME6_RTR_LBW_RD_RS_S_ARB 0x18025C + +#define mmMME6_RTR_LBW_RD_RS_L_ARB 0x180260 + +#define mmMME6_RTR_LBW_WR_RQ_E_ARB 0x180270 + +#define mmMME6_RTR_LBW_WR_RQ_W_ARB 0x180274 + +#define mmMME6_RTR_LBW_WR_RQ_N_ARB 0x180278 + +#define mmMME6_RTR_LBW_WR_RQ_S_ARB 0x18027C + +#define mmMME6_RTR_LBW_WR_RQ_L_ARB 0x180280 + +#define mmMME6_RTR_LBW_WR_RS_E_ARB 0x180290 + +#define mmMME6_RTR_LBW_WR_RS_W_ARB 0x180294 + +#define mmMME6_RTR_LBW_WR_RS_N_ARB 0x180298 + +#define mmMME6_RTR_LBW_WR_RS_S_ARB 0x18029C + +#define mmMME6_RTR_LBW_WR_RS_L_ARB 0x1802A0 + +#define mmMME6_RTR_DBG_E_ARB 0x180300 + +#define mmMME6_RTR_DBG_W_ARB 0x180304 + +#define mmMME6_RTR_DBG_N_ARB 0x180308 + +#define mmMME6_RTR_DBG_S_ARB 0x18030C + +#define mmMME6_RTR_DBG_L_ARB 0x180310 + +#define mmMME6_RTR_DBG_E_ARB_MAX 0x180320 + +#define mmMME6_RTR_DBG_W_ARB_MAX 0x180324 + +#define mmMME6_RTR_DBG_N_ARB_MAX 0x180328 + +#define mmMME6_RTR_DBG_S_ARB_MAX 0x18032C + +#define mmMME6_RTR_DBG_L_ARB_MAX 0x180330 + +#define mmMME6_RTR_SPLIT_COEF_0 0x180400 + +#define mmMME6_RTR_SPLIT_COEF_1 0x180404 + +#define mmMME6_RTR_SPLIT_COEF_2 0x180408 + +#define mmMME6_RTR_SPLIT_COEF_3 0x18040C + +#define mmMME6_RTR_SPLIT_COEF_4 0x180410 + +#define mmMME6_RTR_SPLIT_COEF_5 0x180414 + +#define mmMME6_RTR_SPLIT_COEF_6 0x180418 + +#define mmMME6_RTR_SPLIT_COEF_7 0x18041C + +#define mmMME6_RTR_SPLIT_COEF_8 0x180420 + +#define mmMME6_RTR_SPLIT_COEF_9 0x180424 + +#define mmMME6_RTR_SPLIT_CFG 0x180440 + +#define mmMME6_RTR_SPLIT_RD_SAT 0x180444 + +#define mmMME6_RTR_SPLIT_RD_RST_TOKEN 0x180448 + +#define mmMME6_RTR_SPLIT_RD_TIMEOUT_0 0x18044C + +#define mmMME6_RTR_SPLIT_RD_TIMEOUT_1 0x180450 + +#define mmMME6_RTR_SPLIT_WR_SAT 0x180454 + +#define mmMME6_RTR_WPLIT_WR_TST_TOLEN 0x180458 + +#define mmMME6_RTR_SPLIT_WR_TIMEOUT_0 0x18045C + +#define mmMME6_RTR_SPLIT_WR_TIMEOUT_1 0x180460 + +#define mmMME6_RTR_HBW_RANGE_HIT 0x180470 + +#define mmMME6_RTR_HBW_RANGE_MASK_L_0 0x180480 + +#define mmMME6_RTR_HBW_RANGE_MASK_L_1 0x180484 + +#define mmMME6_RTR_HBW_RANGE_MASK_L_2 0x180488 + +#define mmMME6_RTR_HBW_RANGE_MASK_L_3 0x18048C + +#define mmMME6_RTR_HBW_RANGE_MASK_L_4 0x180490 + +#define mmMME6_RTR_HBW_RANGE_MASK_L_5 0x180494 + +#define mmMME6_RTR_HBW_RANGE_MASK_L_6 0x180498 + +#define mmMME6_RTR_HBW_RANGE_MASK_L_7 0x18049C + +#define mmMME6_RTR_HBW_RANGE_MASK_H_0 0x1804A0 + +#define mmMME6_RTR_HBW_RANGE_MASK_H_1 0x1804A4 + +#define mmMME6_RTR_HBW_RANGE_MASK_H_2 0x1804A8 + +#define mmMME6_RTR_HBW_RANGE_MASK_H_3 0x1804AC + +#define mmMME6_RTR_HBW_RANGE_MASK_H_4 0x1804B0 + +#define mmMME6_RTR_HBW_RANGE_MASK_H_5 0x1804B4 + +#define mmMME6_RTR_HBW_RANGE_MASK_H_6 0x1804B8 + +#define mmMME6_RTR_HBW_RANGE_MASK_H_7 0x1804BC + +#define mmMME6_RTR_HBW_RANGE_BASE_L_0 0x1804C0 + +#define mmMME6_RTR_HBW_RANGE_BASE_L_1 0x1804C4 + +#define mmMME6_RTR_HBW_RANGE_BASE_L_2 0x1804C8 + +#define mmMME6_RTR_HBW_RANGE_BASE_L_3 0x1804CC + +#define mmMME6_RTR_HBW_RANGE_BASE_L_4 0x1804D0 + +#define mmMME6_RTR_HBW_RANGE_BASE_L_5 0x1804D4 + +#define mmMME6_RTR_HBW_RANGE_BASE_L_6 0x1804D8 + +#define mmMME6_RTR_HBW_RANGE_BASE_L_7 0x1804DC + +#define mmMME6_RTR_HBW_RANGE_BASE_H_0 0x1804E0 + +#define mmMME6_RTR_HBW_RANGE_BASE_H_1 0x1804E4 + +#define mmMME6_RTR_HBW_RANGE_BASE_H_2 0x1804E8 + +#define mmMME6_RTR_HBW_RANGE_BASE_H_3 0x1804EC + +#define mmMME6_RTR_HBW_RANGE_BASE_H_4 0x1804F0 + +#define mmMME6_RTR_HBW_RANGE_BASE_H_5 0x1804F4 + +#define mmMME6_RTR_HBW_RANGE_BASE_H_6 0x1804F8 + +#define mmMME6_RTR_HBW_RANGE_BASE_H_7 0x1804FC + +#define mmMME6_RTR_LBW_RANGE_HIT 0x180500 + +#define mmMME6_RTR_LBW_RANGE_MASK_0 0x180510 + +#define mmMME6_RTR_LBW_RANGE_MASK_1 0x180514 + +#define mmMME6_RTR_LBW_RANGE_MASK_2 0x180518 + +#define mmMME6_RTR_LBW_RANGE_MASK_3 0x18051C + +#define mmMME6_RTR_LBW_RANGE_MASK_4 0x180520 + +#define mmMME6_RTR_LBW_RANGE_MASK_5 0x180524 + +#define mmMME6_RTR_LBW_RANGE_MASK_6 0x180528 + +#define mmMME6_RTR_LBW_RANGE_MASK_7 0x18052C + +#define mmMME6_RTR_LBW_RANGE_MASK_8 0x180530 + +#define mmMME6_RTR_LBW_RANGE_MASK_9 0x180534 + +#define mmMME6_RTR_LBW_RANGE_MASK_10 0x180538 + +#define mmMME6_RTR_LBW_RANGE_MASK_11 0x18053C + +#define mmMME6_RTR_LBW_RANGE_MASK_12 0x180540 + +#define mmMME6_RTR_LBW_RANGE_MASK_13 0x180544 + +#define mmMME6_RTR_LBW_RANGE_MASK_14 0x180548 + +#define mmMME6_RTR_LBW_RANGE_MASK_15 0x18054C + +#define mmMME6_RTR_LBW_RANGE_BASE_0 0x180550 + +#define mmMME6_RTR_LBW_RANGE_BASE_1 0x180554 + +#define mmMME6_RTR_LBW_RANGE_BASE_2 0x180558 + +#define mmMME6_RTR_LBW_RANGE_BASE_3 0x18055C + +#define mmMME6_RTR_LBW_RANGE_BASE_4 0x180560 + +#define mmMME6_RTR_LBW_RANGE_BASE_5 0x180564 + +#define mmMME6_RTR_LBW_RANGE_BASE_6 0x180568 + +#define mmMME6_RTR_LBW_RANGE_BASE_7 0x18056C + +#define mmMME6_RTR_LBW_RANGE_BASE_8 0x180570 + +#define mmMME6_RTR_LBW_RANGE_BASE_9 0x180574 + +#define mmMME6_RTR_LBW_RANGE_BASE_10 0x180578 + +#define mmMME6_RTR_LBW_RANGE_BASE_11 0x18057C + +#define mmMME6_RTR_LBW_RANGE_BASE_12 0x180580 + +#define mmMME6_RTR_LBW_RANGE_BASE_13 0x180584 + +#define mmMME6_RTR_LBW_RANGE_BASE_14 0x180588 + +#define mmMME6_RTR_LBW_RANGE_BASE_15 0x18058C + +#define mmMME6_RTR_RGLTR 0x180590 + +#define mmMME6_RTR_RGLTR_WR_RESULT 0x180594 + +#define mmMME6_RTR_RGLTR_RD_RESULT 0x180598 + +#define mmMME6_RTR_SCRAMB_EN 0x180600 + +#define mmMME6_RTR_NON_LIN_SCRAMB 0x180604 + +#endif /* ASIC_REG_MME6_RTR_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h new file mode 100644 index 000000000..fe7d95bdc --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h @@ -0,0 +1,372 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_MME_CMDQ_MASKS_H_ +#define ASIC_REG_MME_CMDQ_MASKS_H_ + +/* + ***************************************** + * MME_CMDQ (Prototype: CMDQ) + ***************************************** + */ + +/* MME_CMDQ_GLBL_CFG0 */ +#define MME_CMDQ_GLBL_CFG0_PQF_EN_SHIFT 0 +#define MME_CMDQ_GLBL_CFG0_PQF_EN_MASK 0x1 +#define MME_CMDQ_GLBL_CFG0_CQF_EN_SHIFT 1 +#define MME_CMDQ_GLBL_CFG0_CQF_EN_MASK 0x2 +#define MME_CMDQ_GLBL_CFG0_CP_EN_SHIFT 2 +#define MME_CMDQ_GLBL_CFG0_CP_EN_MASK 0x4 +#define MME_CMDQ_GLBL_CFG0_DMA_EN_SHIFT 3 +#define MME_CMDQ_GLBL_CFG0_DMA_EN_MASK 0x8 + +/* MME_CMDQ_GLBL_CFG1 */ +#define MME_CMDQ_GLBL_CFG1_PQF_STOP_SHIFT 0 +#define MME_CMDQ_GLBL_CFG1_PQF_STOP_MASK 0x1 +#define MME_CMDQ_GLBL_CFG1_CQF_STOP_SHIFT 1 +#define MME_CMDQ_GLBL_CFG1_CQF_STOP_MASK 0x2 +#define MME_CMDQ_GLBL_CFG1_CP_STOP_SHIFT 2 +#define MME_CMDQ_GLBL_CFG1_CP_STOP_MASK 0x4 +#define MME_CMDQ_GLBL_CFG1_DMA_STOP_SHIFT 3 +#define MME_CMDQ_GLBL_CFG1_DMA_STOP_MASK 0x8 +#define MME_CMDQ_GLBL_CFG1_PQF_FLUSH_SHIFT 8 +#define MME_CMDQ_GLBL_CFG1_PQF_FLUSH_MASK 0x100 +#define MME_CMDQ_GLBL_CFG1_CQF_FLUSH_SHIFT 9 +#define MME_CMDQ_GLBL_CFG1_CQF_FLUSH_MASK 0x200 +#define MME_CMDQ_GLBL_CFG1_CP_FLUSH_SHIFT 10 +#define MME_CMDQ_GLBL_CFG1_CP_FLUSH_MASK 0x400 +#define MME_CMDQ_GLBL_CFG1_DMA_FLUSH_SHIFT 11 +#define MME_CMDQ_GLBL_CFG1_DMA_FLUSH_MASK 0x800 + +/* MME_CMDQ_GLBL_PROT */ +#define MME_CMDQ_GLBL_PROT_PQF_PROT_SHIFT 0 +#define MME_CMDQ_GLBL_PROT_PQF_PROT_MASK 0x1 +#define MME_CMDQ_GLBL_PROT_CQF_PROT_SHIFT 1 +#define MME_CMDQ_GLBL_PROT_CQF_PROT_MASK 0x2 +#define MME_CMDQ_GLBL_PROT_CP_PROT_SHIFT 2 +#define MME_CMDQ_GLBL_PROT_CP_PROT_MASK 0x4 +#define MME_CMDQ_GLBL_PROT_DMA_PROT_SHIFT 3 +#define MME_CMDQ_GLBL_PROT_DMA_PROT_MASK 0x8 +#define MME_CMDQ_GLBL_PROT_PQF_ERR_PROT_SHIFT 4 +#define MME_CMDQ_GLBL_PROT_PQF_ERR_PROT_MASK 0x10 +#define MME_CMDQ_GLBL_PROT_CQF_ERR_PROT_SHIFT 5 +#define MME_CMDQ_GLBL_PROT_CQF_ERR_PROT_MASK 0x20 +#define MME_CMDQ_GLBL_PROT_CP_ERR_PROT_SHIFT 6 +#define MME_CMDQ_GLBL_PROT_CP_ERR_PROT_MASK 0x40 +#define MME_CMDQ_GLBL_PROT_DMA_ERR_PROT_SHIFT 7 +#define MME_CMDQ_GLBL_PROT_DMA_ERR_PROT_MASK 0x80 + +/* MME_CMDQ_GLBL_ERR_CFG */ +#define MME_CMDQ_GLBL_ERR_CFG_PQF_ERR_INT_EN_SHIFT 0 +#define MME_CMDQ_GLBL_ERR_CFG_PQF_ERR_INT_EN_MASK 0x1 +#define MME_CMDQ_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT 1 +#define MME_CMDQ_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK 0x2 +#define MME_CMDQ_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT 2 +#define MME_CMDQ_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK 0x4 +#define MME_CMDQ_GLBL_ERR_CFG_CQF_ERR_INT_EN_SHIFT 3 +#define MME_CMDQ_GLBL_ERR_CFG_CQF_ERR_INT_EN_MASK 0x8 +#define MME_CMDQ_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT 4 +#define MME_CMDQ_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK 0x10 +#define MME_CMDQ_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT 5 +#define MME_CMDQ_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK 0x20 +#define MME_CMDQ_GLBL_ERR_CFG_CP_ERR_INT_EN_SHIFT 6 +#define MME_CMDQ_GLBL_ERR_CFG_CP_ERR_INT_EN_MASK 0x40 +#define MME_CMDQ_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT 7 +#define MME_CMDQ_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK 0x80 +#define MME_CMDQ_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT 8 +#define MME_CMDQ_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK 0x100 +#define MME_CMDQ_GLBL_ERR_CFG_DMA_ERR_INT_EN_SHIFT 9 +#define MME_CMDQ_GLBL_ERR_CFG_DMA_ERR_INT_EN_MASK 0x200 +#define MME_CMDQ_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT 10 +#define MME_CMDQ_GLBL_ERR_CFG_DMA_ERR_MSG_EN_MASK 0x400 +#define MME_CMDQ_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT 11 +#define MME_CMDQ_GLBL_ERR_CFG_DMA_STOP_ON_ERR_MASK 0x800 + +/* MME_CMDQ_GLBL_ERR_ADDR_LO */ +#define MME_CMDQ_GLBL_ERR_ADDR_LO_VAL_SHIFT 0 +#define MME_CMDQ_GLBL_ERR_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_GLBL_ERR_ADDR_HI */ +#define MME_CMDQ_GLBL_ERR_ADDR_HI_VAL_SHIFT 0 +#define MME_CMDQ_GLBL_ERR_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_GLBL_ERR_WDATA */ +#define MME_CMDQ_GLBL_ERR_WDATA_VAL_SHIFT 0 +#define MME_CMDQ_GLBL_ERR_WDATA_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_GLBL_SECURE_PROPS */ +#define MME_CMDQ_GLBL_SECURE_PROPS_ASID_SHIFT 0 +#define MME_CMDQ_GLBL_SECURE_PROPS_ASID_MASK 0x3FF +#define MME_CMDQ_GLBL_SECURE_PROPS_MMBP_SHIFT 10 +#define MME_CMDQ_GLBL_SECURE_PROPS_MMBP_MASK 0x400 + +/* MME_CMDQ_GLBL_NON_SECURE_PROPS */ +#define MME_CMDQ_GLBL_NON_SECURE_PROPS_ASID_SHIFT 0 +#define MME_CMDQ_GLBL_NON_SECURE_PROPS_ASID_MASK 0x3FF +#define MME_CMDQ_GLBL_NON_SECURE_PROPS_MMBP_SHIFT 10 +#define MME_CMDQ_GLBL_NON_SECURE_PROPS_MMBP_MASK 0x400 + +/* MME_CMDQ_GLBL_STS0 */ +#define MME_CMDQ_GLBL_STS0_PQF_IDLE_SHIFT 0 +#define MME_CMDQ_GLBL_STS0_PQF_IDLE_MASK 0x1 +#define MME_CMDQ_GLBL_STS0_CQF_IDLE_SHIFT 1 +#define MME_CMDQ_GLBL_STS0_CQF_IDLE_MASK 0x2 +#define MME_CMDQ_GLBL_STS0_CP_IDLE_SHIFT 2 +#define MME_CMDQ_GLBL_STS0_CP_IDLE_MASK 0x4 +#define MME_CMDQ_GLBL_STS0_DMA_IDLE_SHIFT 3 +#define MME_CMDQ_GLBL_STS0_DMA_IDLE_MASK 0x8 +#define MME_CMDQ_GLBL_STS0_PQF_IS_STOP_SHIFT 4 +#define MME_CMDQ_GLBL_STS0_PQF_IS_STOP_MASK 0x10 +#define MME_CMDQ_GLBL_STS0_CQF_IS_STOP_SHIFT 5 +#define MME_CMDQ_GLBL_STS0_CQF_IS_STOP_MASK 0x20 +#define MME_CMDQ_GLBL_STS0_CP_IS_STOP_SHIFT 6 +#define MME_CMDQ_GLBL_STS0_CP_IS_STOP_MASK 0x40 +#define MME_CMDQ_GLBL_STS0_DMA_IS_STOP_SHIFT 7 +#define MME_CMDQ_GLBL_STS0_DMA_IS_STOP_MASK 0x80 + +/* MME_CMDQ_GLBL_STS1 */ +#define MME_CMDQ_GLBL_STS1_PQF_RD_ERR_SHIFT 0 +#define MME_CMDQ_GLBL_STS1_PQF_RD_ERR_MASK 0x1 +#define MME_CMDQ_GLBL_STS1_CQF_RD_ERR_SHIFT 1 +#define MME_CMDQ_GLBL_STS1_CQF_RD_ERR_MASK 0x2 +#define MME_CMDQ_GLBL_STS1_CP_RD_ERR_SHIFT 2 +#define MME_CMDQ_GLBL_STS1_CP_RD_ERR_MASK 0x4 +#define MME_CMDQ_GLBL_STS1_CP_UNDEF_CMD_ERR_SHIFT 3 +#define MME_CMDQ_GLBL_STS1_CP_UNDEF_CMD_ERR_MASK 0x8 +#define MME_CMDQ_GLBL_STS1_CP_STOP_OP_SHIFT 4 +#define MME_CMDQ_GLBL_STS1_CP_STOP_OP_MASK 0x10 +#define MME_CMDQ_GLBL_STS1_CP_MSG_WR_ERR_SHIFT 5 +#define MME_CMDQ_GLBL_STS1_CP_MSG_WR_ERR_MASK 0x20 +#define MME_CMDQ_GLBL_STS1_DMA_RD_ERR_SHIFT 8 +#define MME_CMDQ_GLBL_STS1_DMA_RD_ERR_MASK 0x100 +#define MME_CMDQ_GLBL_STS1_DMA_WR_ERR_SHIFT 9 +#define MME_CMDQ_GLBL_STS1_DMA_WR_ERR_MASK 0x200 +#define MME_CMDQ_GLBL_STS1_DMA_RD_MSG_ERR_SHIFT 10 +#define MME_CMDQ_GLBL_STS1_DMA_RD_MSG_ERR_MASK 0x400 +#define MME_CMDQ_GLBL_STS1_DMA_WR_MSG_ERR_SHIFT 11 +#define MME_CMDQ_GLBL_STS1_DMA_WR_MSG_ERR_MASK 0x800 + +/* MME_CMDQ_CQ_CFG0 */ +#define MME_CMDQ_CQ_CFG0_RESERVED_SHIFT 0 +#define MME_CMDQ_CQ_CFG0_RESERVED_MASK 0x1 + +/* MME_CMDQ_CQ_CFG1 */ +#define MME_CMDQ_CQ_CFG1_CREDIT_LIM_SHIFT 0 +#define MME_CMDQ_CQ_CFG1_CREDIT_LIM_MASK 0xFFFF +#define MME_CMDQ_CQ_CFG1_MAX_INFLIGHT_SHIFT 16 +#define MME_CMDQ_CQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000 + +/* MME_CMDQ_CQ_ARUSER */ +#define MME_CMDQ_CQ_ARUSER_NOSNOOP_SHIFT 0 +#define MME_CMDQ_CQ_ARUSER_NOSNOOP_MASK 0x1 +#define MME_CMDQ_CQ_ARUSER_WORD_SHIFT 1 +#define MME_CMDQ_CQ_ARUSER_WORD_MASK 0x2 + +/* MME_CMDQ_CQ_PTR_LO */ +#define MME_CMDQ_CQ_PTR_LO_VAL_SHIFT 0 +#define MME_CMDQ_CQ_PTR_LO_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CQ_PTR_HI */ +#define MME_CMDQ_CQ_PTR_HI_VAL_SHIFT 0 +#define MME_CMDQ_CQ_PTR_HI_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CQ_TSIZE */ +#define MME_CMDQ_CQ_TSIZE_VAL_SHIFT 0 +#define MME_CMDQ_CQ_TSIZE_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CQ_CTL */ +#define MME_CMDQ_CQ_CTL_RPT_SHIFT 0 +#define MME_CMDQ_CQ_CTL_RPT_MASK 0xFFFF +#define MME_CMDQ_CQ_CTL_CTL_SHIFT 16 +#define MME_CMDQ_CQ_CTL_CTL_MASK 0xFFFF0000 + +/* MME_CMDQ_CQ_PTR_LO_STS */ +#define MME_CMDQ_CQ_PTR_LO_STS_VAL_SHIFT 0 +#define MME_CMDQ_CQ_PTR_LO_STS_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CQ_PTR_HI_STS */ +#define MME_CMDQ_CQ_PTR_HI_STS_VAL_SHIFT 0 +#define MME_CMDQ_CQ_PTR_HI_STS_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CQ_TSIZE_STS */ +#define MME_CMDQ_CQ_TSIZE_STS_VAL_SHIFT 0 +#define MME_CMDQ_CQ_TSIZE_STS_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CQ_CTL_STS */ +#define MME_CMDQ_CQ_CTL_STS_RPT_SHIFT 0 +#define MME_CMDQ_CQ_CTL_STS_RPT_MASK 0xFFFF +#define MME_CMDQ_CQ_CTL_STS_CTL_SHIFT 16 +#define MME_CMDQ_CQ_CTL_STS_CTL_MASK 0xFFFF0000 + +/* MME_CMDQ_CQ_STS0 */ +#define MME_CMDQ_CQ_STS0_CQ_CREDIT_CNT_SHIFT 0 +#define MME_CMDQ_CQ_STS0_CQ_CREDIT_CNT_MASK 0xFFFF +#define MME_CMDQ_CQ_STS0_CQ_FREE_CNT_SHIFT 16 +#define MME_CMDQ_CQ_STS0_CQ_FREE_CNT_MASK 0xFFFF0000 + +/* MME_CMDQ_CQ_STS1 */ +#define MME_CMDQ_CQ_STS1_CQ_INFLIGHT_CNT_SHIFT 0 +#define MME_CMDQ_CQ_STS1_CQ_INFLIGHT_CNT_MASK 0xFFFF +#define MME_CMDQ_CQ_STS1_CQ_BUF_EMPTY_SHIFT 30 +#define MME_CMDQ_CQ_STS1_CQ_BUF_EMPTY_MASK 0x40000000 +#define MME_CMDQ_CQ_STS1_CQ_BUSY_SHIFT 31 +#define MME_CMDQ_CQ_STS1_CQ_BUSY_MASK 0x80000000 + +/* MME_CMDQ_CQ_RD_RATE_LIM_EN */ +#define MME_CMDQ_CQ_RD_RATE_LIM_EN_VAL_SHIFT 0 +#define MME_CMDQ_CQ_RD_RATE_LIM_EN_VAL_MASK 0x1 + +/* MME_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN */ +#define MME_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN_VAL_SHIFT 0 +#define MME_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN_VAL_MASK 0xFFFF + +/* MME_CMDQ_CQ_RD_RATE_LIM_SAT */ +#define MME_CMDQ_CQ_RD_RATE_LIM_SAT_VAL_SHIFT 0 +#define MME_CMDQ_CQ_RD_RATE_LIM_SAT_VAL_MASK 0xFFFF + +/* MME_CMDQ_CQ_RD_RATE_LIM_TOUT */ +#define MME_CMDQ_CQ_RD_RATE_LIM_TOUT_VAL_SHIFT 0 +#define MME_CMDQ_CQ_RD_RATE_LIM_TOUT_VAL_MASK 0x7FFFFFFF + +/* MME_CMDQ_CQ_IFIFO_CNT */ +#define MME_CMDQ_CQ_IFIFO_CNT_VAL_SHIFT 0 +#define MME_CMDQ_CQ_IFIFO_CNT_VAL_MASK 0x3 + +/* MME_CMDQ_CP_MSG_BASE0_ADDR_LO */ +#define MME_CMDQ_CP_MSG_BASE0_ADDR_LO_VAL_SHIFT 0 +#define MME_CMDQ_CP_MSG_BASE0_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CP_MSG_BASE0_ADDR_HI */ +#define MME_CMDQ_CP_MSG_BASE0_ADDR_HI_VAL_SHIFT 0 +#define MME_CMDQ_CP_MSG_BASE0_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CP_MSG_BASE1_ADDR_LO */ +#define MME_CMDQ_CP_MSG_BASE1_ADDR_LO_VAL_SHIFT 0 +#define MME_CMDQ_CP_MSG_BASE1_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CP_MSG_BASE1_ADDR_HI */ +#define MME_CMDQ_CP_MSG_BASE1_ADDR_HI_VAL_SHIFT 0 +#define MME_CMDQ_CP_MSG_BASE1_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CP_MSG_BASE2_ADDR_LO */ +#define MME_CMDQ_CP_MSG_BASE2_ADDR_LO_VAL_SHIFT 0 +#define MME_CMDQ_CP_MSG_BASE2_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CP_MSG_BASE2_ADDR_HI */ +#define MME_CMDQ_CP_MSG_BASE2_ADDR_HI_VAL_SHIFT 0 +#define MME_CMDQ_CP_MSG_BASE2_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CP_MSG_BASE3_ADDR_LO */ +#define MME_CMDQ_CP_MSG_BASE3_ADDR_LO_VAL_SHIFT 0 +#define MME_CMDQ_CP_MSG_BASE3_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CP_MSG_BASE3_ADDR_HI */ +#define MME_CMDQ_CP_MSG_BASE3_ADDR_HI_VAL_SHIFT 0 +#define MME_CMDQ_CP_MSG_BASE3_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CP_LDMA_TSIZE_OFFSET */ +#define MME_CMDQ_CP_LDMA_TSIZE_OFFSET_VAL_SHIFT 0 +#define MME_CMDQ_CP_LDMA_TSIZE_OFFSET_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET */ +#define MME_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_SHIFT 0 +#define MME_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET */ +#define MME_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_SHIFT 0 +#define MME_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET */ +#define MME_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET_VAL_SHIFT 0 +#define MME_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET */ +#define MME_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET_VAL_SHIFT 0 +#define MME_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CP_LDMA_COMMIT_OFFSET */ +#define MME_CMDQ_CP_LDMA_COMMIT_OFFSET_VAL_SHIFT 0 +#define MME_CMDQ_CP_LDMA_COMMIT_OFFSET_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CP_FENCE0_RDATA */ +#define MME_CMDQ_CP_FENCE0_RDATA_INC_VAL_SHIFT 0 +#define MME_CMDQ_CP_FENCE0_RDATA_INC_VAL_MASK 0xF + +/* MME_CMDQ_CP_FENCE1_RDATA */ +#define MME_CMDQ_CP_FENCE1_RDATA_INC_VAL_SHIFT 0 +#define MME_CMDQ_CP_FENCE1_RDATA_INC_VAL_MASK 0xF + +/* MME_CMDQ_CP_FENCE2_RDATA */ +#define MME_CMDQ_CP_FENCE2_RDATA_INC_VAL_SHIFT 0 +#define MME_CMDQ_CP_FENCE2_RDATA_INC_VAL_MASK 0xF + +/* MME_CMDQ_CP_FENCE3_RDATA */ +#define MME_CMDQ_CP_FENCE3_RDATA_INC_VAL_SHIFT 0 +#define MME_CMDQ_CP_FENCE3_RDATA_INC_VAL_MASK 0xF + +/* MME_CMDQ_CP_FENCE0_CNT */ +#define MME_CMDQ_CP_FENCE0_CNT_VAL_SHIFT 0 +#define MME_CMDQ_CP_FENCE0_CNT_VAL_MASK 0xFF + +/* MME_CMDQ_CP_FENCE1_CNT */ +#define MME_CMDQ_CP_FENCE1_CNT_VAL_SHIFT 0 +#define MME_CMDQ_CP_FENCE1_CNT_VAL_MASK 0xFF + +/* MME_CMDQ_CP_FENCE2_CNT */ +#define MME_CMDQ_CP_FENCE2_CNT_VAL_SHIFT 0 +#define MME_CMDQ_CP_FENCE2_CNT_VAL_MASK 0xFF + +/* MME_CMDQ_CP_FENCE3_CNT */ +#define MME_CMDQ_CP_FENCE3_CNT_VAL_SHIFT 0 +#define MME_CMDQ_CP_FENCE3_CNT_VAL_MASK 0xFF + +/* MME_CMDQ_CP_STS */ +#define MME_CMDQ_CP_STS_MSG_INFLIGHT_CNT_SHIFT 0 +#define MME_CMDQ_CP_STS_MSG_INFLIGHT_CNT_MASK 0xFFFF +#define MME_CMDQ_CP_STS_ERDY_SHIFT 16 +#define MME_CMDQ_CP_STS_ERDY_MASK 0x10000 +#define MME_CMDQ_CP_STS_RRDY_SHIFT 17 +#define MME_CMDQ_CP_STS_RRDY_MASK 0x20000 +#define MME_CMDQ_CP_STS_MRDY_SHIFT 18 +#define MME_CMDQ_CP_STS_MRDY_MASK 0x40000 +#define MME_CMDQ_CP_STS_SW_STOP_SHIFT 19 +#define MME_CMDQ_CP_STS_SW_STOP_MASK 0x80000 +#define MME_CMDQ_CP_STS_FENCE_ID_SHIFT 20 +#define MME_CMDQ_CP_STS_FENCE_ID_MASK 0x300000 +#define MME_CMDQ_CP_STS_FENCE_IN_PROGRESS_SHIFT 22 +#define MME_CMDQ_CP_STS_FENCE_IN_PROGRESS_MASK 0x400000 + +/* MME_CMDQ_CP_CURRENT_INST_LO */ +#define MME_CMDQ_CP_CURRENT_INST_LO_VAL_SHIFT 0 +#define MME_CMDQ_CP_CURRENT_INST_LO_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CP_CURRENT_INST_HI */ +#define MME_CMDQ_CP_CURRENT_INST_HI_VAL_SHIFT 0 +#define MME_CMDQ_CP_CURRENT_INST_HI_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CP_BARRIER_CFG */ +#define MME_CMDQ_CP_BARRIER_CFG_EBGUARD_SHIFT 0 +#define MME_CMDQ_CP_BARRIER_CFG_EBGUARD_MASK 0xFFF + +/* MME_CMDQ_CP_DBG_0 */ +#define MME_CMDQ_CP_DBG_0_VAL_SHIFT 0 +#define MME_CMDQ_CP_DBG_0_VAL_MASK 0xFF + +/* MME_CMDQ_CQ_BUF_ADDR */ +#define MME_CMDQ_CQ_BUF_ADDR_VAL_SHIFT 0 +#define MME_CMDQ_CQ_BUF_ADDR_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CQ_BUF_RDATA */ +#define MME_CMDQ_CQ_BUF_RDATA_VAL_SHIFT 0 +#define MME_CMDQ_CQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF + +#endif /* ASIC_REG_MME_CMDQ_MASKS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h new file mode 100644 index 000000000..5f8b85d2b --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h @@ -0,0 +1,138 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_MME_CMDQ_REGS_H_ +#define ASIC_REG_MME_CMDQ_REGS_H_ + +/* + ***************************************** + * MME_CMDQ (Prototype: CMDQ) + ***************************************** + */ + +#define mmMME_CMDQ_GLBL_CFG0 0xD9000 + +#define mmMME_CMDQ_GLBL_CFG1 0xD9004 + +#define mmMME_CMDQ_GLBL_PROT 0xD9008 + +#define mmMME_CMDQ_GLBL_ERR_CFG 0xD900C + +#define mmMME_CMDQ_GLBL_ERR_ADDR_LO 0xD9010 + +#define mmMME_CMDQ_GLBL_ERR_ADDR_HI 0xD9014 + +#define mmMME_CMDQ_GLBL_ERR_WDATA 0xD9018 + +#define mmMME_CMDQ_GLBL_SECURE_PROPS 0xD901C + +#define mmMME_CMDQ_GLBL_NON_SECURE_PROPS 0xD9020 + +#define mmMME_CMDQ_GLBL_STS0 0xD9024 + +#define mmMME_CMDQ_GLBL_STS1 0xD9028 + +#define mmMME_CMDQ_CQ_CFG0 0xD90B0 + +#define mmMME_CMDQ_CQ_CFG1 0xD90B4 + +#define mmMME_CMDQ_CQ_ARUSER 0xD90B8 + +#define mmMME_CMDQ_CQ_PTR_LO 0xD90C0 + +#define mmMME_CMDQ_CQ_PTR_HI 0xD90C4 + +#define mmMME_CMDQ_CQ_TSIZE 0xD90C8 + +#define mmMME_CMDQ_CQ_CTL 0xD90CC + +#define mmMME_CMDQ_CQ_PTR_LO_STS 0xD90D4 + +#define mmMME_CMDQ_CQ_PTR_HI_STS 0xD90D8 + +#define mmMME_CMDQ_CQ_TSIZE_STS 0xD90DC + +#define mmMME_CMDQ_CQ_CTL_STS 0xD90E0 + +#define mmMME_CMDQ_CQ_STS0 0xD90E4 + +#define mmMME_CMDQ_CQ_STS1 0xD90E8 + +#define mmMME_CMDQ_CQ_RD_RATE_LIM_EN 0xD90F0 + +#define mmMME_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xD90F4 + +#define mmMME_CMDQ_CQ_RD_RATE_LIM_SAT 0xD90F8 + +#define mmMME_CMDQ_CQ_RD_RATE_LIM_TOUT 0xD90FC + +#define mmMME_CMDQ_CQ_IFIFO_CNT 0xD9108 + +#define mmMME_CMDQ_CP_MSG_BASE0_ADDR_LO 0xD9120 + +#define mmMME_CMDQ_CP_MSG_BASE0_ADDR_HI 0xD9124 + +#define mmMME_CMDQ_CP_MSG_BASE1_ADDR_LO 0xD9128 + +#define mmMME_CMDQ_CP_MSG_BASE1_ADDR_HI 0xD912C + +#define mmMME_CMDQ_CP_MSG_BASE2_ADDR_LO 0xD9130 + +#define mmMME_CMDQ_CP_MSG_BASE2_ADDR_HI 0xD9134 + +#define mmMME_CMDQ_CP_MSG_BASE3_ADDR_LO 0xD9138 + +#define mmMME_CMDQ_CP_MSG_BASE3_ADDR_HI 0xD913C + +#define mmMME_CMDQ_CP_LDMA_TSIZE_OFFSET 0xD9140 + +#define mmMME_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xD9144 + +#define mmMME_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xD9148 + +#define mmMME_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xD914C + +#define mmMME_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xD9150 + +#define mmMME_CMDQ_CP_LDMA_COMMIT_OFFSET 0xD9154 + +#define mmMME_CMDQ_CP_FENCE0_RDATA 0xD9158 + +#define mmMME_CMDQ_CP_FENCE1_RDATA 0xD915C + +#define mmMME_CMDQ_CP_FENCE2_RDATA 0xD9160 + +#define mmMME_CMDQ_CP_FENCE3_RDATA 0xD9164 + +#define mmMME_CMDQ_CP_FENCE0_CNT 0xD9168 + +#define mmMME_CMDQ_CP_FENCE1_CNT 0xD916C + +#define mmMME_CMDQ_CP_FENCE2_CNT 0xD9170 + +#define mmMME_CMDQ_CP_FENCE3_CNT 0xD9174 + +#define mmMME_CMDQ_CP_STS 0xD9178 + +#define mmMME_CMDQ_CP_CURRENT_INST_LO 0xD917C + +#define mmMME_CMDQ_CP_CURRENT_INST_HI 0xD9180 + +#define mmMME_CMDQ_CP_BARRIER_CFG 0xD9184 + +#define mmMME_CMDQ_CP_DBG_0 0xD9188 + +#define mmMME_CMDQ_CQ_BUF_ADDR 0xD9308 + +#define mmMME_CMDQ_CQ_BUF_RDATA 0xD930C + +#endif /* ASIC_REG_MME_CMDQ_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme_masks.h new file mode 100644 index 000000000..1882c413c --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme_masks.h @@ -0,0 +1,1536 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_MME_MASKS_H_ +#define ASIC_REG_MME_MASKS_H_ + +/* + ***************************************** + * MME (Prototype: MME) + ***************************************** + */ + +/* MME_ARCH_STATUS */ +#define MME_ARCH_STATUS_A_SHIFT 0 +#define MME_ARCH_STATUS_A_MASK 0x1 +#define MME_ARCH_STATUS_B_SHIFT 1 +#define MME_ARCH_STATUS_B_MASK 0x2 +#define MME_ARCH_STATUS_CIN_SHIFT 2 +#define MME_ARCH_STATUS_CIN_MASK 0x4 +#define MME_ARCH_STATUS_COUT_SHIFT 3 +#define MME_ARCH_STATUS_COUT_MASK 0x8 +#define MME_ARCH_STATUS_TE_SHIFT 4 +#define MME_ARCH_STATUS_TE_MASK 0x10 +#define MME_ARCH_STATUS_LD_SHIFT 5 +#define MME_ARCH_STATUS_LD_MASK 0x20 +#define MME_ARCH_STATUS_ST_SHIFT 6 +#define MME_ARCH_STATUS_ST_MASK 0x40 +#define MME_ARCH_STATUS_SB_A_EMPTY_SHIFT 7 +#define MME_ARCH_STATUS_SB_A_EMPTY_MASK 0x80 +#define MME_ARCH_STATUS_SB_B_EMPTY_SHIFT 8 +#define MME_ARCH_STATUS_SB_B_EMPTY_MASK 0x100 +#define MME_ARCH_STATUS_SB_CIN_EMPTY_SHIFT 9 +#define MME_ARCH_STATUS_SB_CIN_EMPTY_MASK 0x200 +#define MME_ARCH_STATUS_SB_COUT_EMPTY_SHIFT 10 +#define MME_ARCH_STATUS_SB_COUT_EMPTY_MASK 0x400 +#define MME_ARCH_STATUS_SM_IDLE_SHIFT 11 +#define MME_ARCH_STATUS_SM_IDLE_MASK 0x800 +#define MME_ARCH_STATUS_WBC_AXI_IDLE_SHIFT 12 +#define MME_ARCH_STATUS_WBC_AXI_IDLE_MASK 0xF000 +#define MME_ARCH_STATUS_SBC_AXI_IDLE_SHIFT 16 +#define MME_ARCH_STATUS_SBC_AXI_IDLE_MASK 0x30000 +#define MME_ARCH_STATUS_SBB_AXI_IDLE_SHIFT 18 +#define MME_ARCH_STATUS_SBB_AXI_IDLE_MASK 0xC0000 +#define MME_ARCH_STATUS_SBA_AXI_IDLE_SHIFT 20 +#define MME_ARCH_STATUS_SBA_AXI_IDLE_MASK 0x300000 +#define MME_ARCH_STATUS_FREE_ACCUMS_SHIFT 22 +#define MME_ARCH_STATUS_FREE_ACCUMS_MASK 0x1C00000 + +/* MME_ARCH_A_BASE_ADDR_HIGH */ +#define MME_ARCH_A_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_ARCH_A_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_ARCH_B_BASE_ADDR_HIGH */ +#define MME_ARCH_B_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_ARCH_B_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_ARCH_CIN_BASE_ADDR_HIGH */ +#define MME_ARCH_CIN_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_ARCH_CIN_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_ARCH_COUT_BASE_ADDR_HIGH */ +#define MME_ARCH_COUT_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_ARCH_COUT_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_ARCH_BIAS_BASE_ADDR_HIGH */ +#define MME_ARCH_BIAS_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_ARCH_BIAS_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_ARCH_A_BASE_ADDR_LOW */ +#define MME_ARCH_A_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_ARCH_A_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_ARCH_B_BASE_ADDR_LOW */ +#define MME_ARCH_B_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_ARCH_B_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_ARCH_CIN_BASE_ADDR_LOW */ +#define MME_ARCH_CIN_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_ARCH_CIN_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_ARCH_COUT_BASE_ADDR_LOW */ +#define MME_ARCH_COUT_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_ARCH_COUT_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_ARCH_BIAS_BASE_ADDR_LOW */ +#define MME_ARCH_BIAS_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_ARCH_BIAS_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_ARCH_HEADER */ +#define MME_ARCH_HEADER_SIGNAL_MASK_SHIFT 0 +#define MME_ARCH_HEADER_SIGNAL_MASK_MASK 0x1F +#define MME_ARCH_HEADER_SIGNAL_EN_SHIFT 5 +#define MME_ARCH_HEADER_SIGNAL_EN_MASK 0x20 +#define MME_ARCH_HEADER_TRANS_A_SHIFT 6 +#define MME_ARCH_HEADER_TRANS_A_MASK 0x40 +#define MME_ARCH_HEADER_LOWER_A_SHIFT 7 +#define MME_ARCH_HEADER_LOWER_A_MASK 0x80 +#define MME_ARCH_HEADER_ACCUM_MASK_SHIFT 8 +#define MME_ARCH_HEADER_ACCUM_MASK_MASK 0xF00 +#define MME_ARCH_HEADER_LOAD_BIAS_SHIFT 12 +#define MME_ARCH_HEADER_LOAD_BIAS_MASK 0x1000 +#define MME_ARCH_HEADER_LOAD_CIN_SHIFT 13 +#define MME_ARCH_HEADER_LOAD_CIN_MASK 0x2000 +#define MME_ARCH_HEADER_STORE_OUT_SHIFT 15 +#define MME_ARCH_HEADER_STORE_OUT_MASK 0x8000 +#define MME_ARCH_HEADER_ACC_LD_INC_DISABLE_SHIFT 16 +#define MME_ARCH_HEADER_ACC_LD_INC_DISABLE_MASK 0x10000 +#define MME_ARCH_HEADER_ADVANCE_A_SHIFT 17 +#define MME_ARCH_HEADER_ADVANCE_A_MASK 0x20000 +#define MME_ARCH_HEADER_ADVANCE_B_SHIFT 18 +#define MME_ARCH_HEADER_ADVANCE_B_MASK 0x40000 +#define MME_ARCH_HEADER_ADVANCE_CIN_SHIFT 19 +#define MME_ARCH_HEADER_ADVANCE_CIN_MASK 0x80000 +#define MME_ARCH_HEADER_ADVANCE_COUT_SHIFT 20 +#define MME_ARCH_HEADER_ADVANCE_COUT_MASK 0x100000 +#define MME_ARCH_HEADER_COMPRESSED_B_SHIFT 21 +#define MME_ARCH_HEADER_COMPRESSED_B_MASK 0x200000 +#define MME_ARCH_HEADER_MASK_CONV_END_SHIFT 22 +#define MME_ARCH_HEADER_MASK_CONV_END_MASK 0x400000 +#define MME_ARCH_HEADER_ACC_ST_INC_DISABLE_SHIFT 23 +#define MME_ARCH_HEADER_ACC_ST_INC_DISABLE_MASK 0x800000 +#define MME_ARCH_HEADER_AB_DATA_TYPE_SHIFT 24 +#define MME_ARCH_HEADER_AB_DATA_TYPE_MASK 0x3000000 +#define MME_ARCH_HEADER_CIN_DATA_TYPE_SHIFT 26 +#define MME_ARCH_HEADER_CIN_DATA_TYPE_MASK 0x1C000000 +#define MME_ARCH_HEADER_COUT_DATA_TYPE_SHIFT 29 +#define MME_ARCH_HEADER_COUT_DATA_TYPE_MASK 0xE0000000 + +/* MME_ARCH_KERNEL_SIZE_MINUS_1 */ +#define MME_ARCH_KERNEL_SIZE_MINUS_1_DIM_0_SHIFT 0 +#define MME_ARCH_KERNEL_SIZE_MINUS_1_DIM_0_MASK 0xFF +#define MME_ARCH_KERNEL_SIZE_MINUS_1_DIM_1_SHIFT 8 +#define MME_ARCH_KERNEL_SIZE_MINUS_1_DIM_1_MASK 0xFF00 +#define MME_ARCH_KERNEL_SIZE_MINUS_1_DIM_2_SHIFT 16 +#define MME_ARCH_KERNEL_SIZE_MINUS_1_DIM_2_MASK 0xFF0000 +#define MME_ARCH_KERNEL_SIZE_MINUS_1_DIM_3_SHIFT 24 +#define MME_ARCH_KERNEL_SIZE_MINUS_1_DIM_3_MASK 0xFF000000 + +/* MME_ARCH_ASSOCIATED_DIMS */ +#define MME_ARCH_ASSOCIATED_DIMS_A_0_SHIFT 0 +#define MME_ARCH_ASSOCIATED_DIMS_A_0_MASK 0x7 +#define MME_ARCH_ASSOCIATED_DIMS_B_0_SHIFT 3 +#define MME_ARCH_ASSOCIATED_DIMS_B_0_MASK 0x38 +#define MME_ARCH_ASSOCIATED_DIMS_CIN_0_SHIFT 6 +#define MME_ARCH_ASSOCIATED_DIMS_CIN_0_MASK 0x1C0 +#define MME_ARCH_ASSOCIATED_DIMS_COUT_0_SHIFT 9 +#define MME_ARCH_ASSOCIATED_DIMS_COUT_0_MASK 0xE00 +#define MME_ARCH_ASSOCIATED_DIMS_A_1_SHIFT 16 +#define MME_ARCH_ASSOCIATED_DIMS_A_1_MASK 0x70000 +#define MME_ARCH_ASSOCIATED_DIMS_B_1_SHIFT 19 +#define MME_ARCH_ASSOCIATED_DIMS_B_1_MASK 0x380000 +#define MME_ARCH_ASSOCIATED_DIMS_CIN_1_SHIFT 22 +#define MME_ARCH_ASSOCIATED_DIMS_CIN_1_MASK 0x1C00000 +#define MME_ARCH_ASSOCIATED_DIMS_COUT_1_SHIFT 25 +#define MME_ARCH_ASSOCIATED_DIMS_COUT_1_MASK 0xE000000 + +/* MME_ARCH_COUT_SCALE */ +#define MME_ARCH_COUT_SCALE_V_SHIFT 0 +#define MME_ARCH_COUT_SCALE_V_MASK 0xFFFFFFFF + +/* MME_ARCH_CIN_SCALE */ +#define MME_ARCH_CIN_SCALE_V_SHIFT 0 +#define MME_ARCH_CIN_SCALE_V_MASK 0xFFFFFFFF + +/* MME_ARCH_GEMMLOWP_ZP */ +#define MME_ARCH_GEMMLOWP_ZP_ZP_CIN_SHIFT 0 +#define MME_ARCH_GEMMLOWP_ZP_ZP_CIN_MASK 0x1FF +#define MME_ARCH_GEMMLOWP_ZP_ZP_COUT_SHIFT 9 +#define MME_ARCH_GEMMLOWP_ZP_ZP_COUT_MASK 0x3FE00 +#define MME_ARCH_GEMMLOWP_ZP_ZP_B_SHIFT 18 +#define MME_ARCH_GEMMLOWP_ZP_ZP_B_MASK 0x7FC0000 +#define MME_ARCH_GEMMLOWP_ZP_GEMMLOWP_EU_EN_SHIFT 27 +#define MME_ARCH_GEMMLOWP_ZP_GEMMLOWP_EU_EN_MASK 0x8000000 +#define MME_ARCH_GEMMLOWP_ZP_ACCUM_SHIFT 28 +#define MME_ARCH_GEMMLOWP_ZP_ACCUM_MASK 0x10000000 +#define MME_ARCH_GEMMLOWP_ZP_ACCUM_BIAS_SHIFT 29 +#define MME_ARCH_GEMMLOWP_ZP_ACCUM_BIAS_MASK 0x20000000 +#define MME_ARCH_GEMMLOWP_ZP_RELU_EN_SHIFT 30 +#define MME_ARCH_GEMMLOWP_ZP_RELU_EN_MASK 0x40000000 + +/* MME_ARCH_GEMMLOWP_EXPONENT */ +#define MME_ARCH_GEMMLOWP_EXPONENT_EXPONENT_CIN_SHIFT 0 +#define MME_ARCH_GEMMLOWP_EXPONENT_EXPONENT_CIN_MASK 0x3F +#define MME_ARCH_GEMMLOWP_EXPONENT_EXPONENT_COUT_SHIFT 8 +#define MME_ARCH_GEMMLOWP_EXPONENT_EXPONENT_COUT_MASK 0x3F00 +#define MME_ARCH_GEMMLOWP_EXPONENT_MUL_CIN_EN_SHIFT 16 +#define MME_ARCH_GEMMLOWP_EXPONENT_MUL_CIN_EN_MASK 0x10000 +#define MME_ARCH_GEMMLOWP_EXPONENT_MUL_COUT_EN_SHIFT 17 +#define MME_ARCH_GEMMLOWP_EXPONENT_MUL_COUT_EN_MASK 0x20000 + +/* MME_ARCH_A_ROI_BASE_OFFSET */ +#define MME_ARCH_A_ROI_BASE_OFFSET_V_SHIFT 0 +#define MME_ARCH_A_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_ARCH_A_VALID_ELEMENTS */ +#define MME_ARCH_A_VALID_ELEMENTS_V_SHIFT 0 +#define MME_ARCH_A_VALID_ELEMENTS_V_MASK 0xFFFFFFFF + +/* MME_ARCH_A_LOOP_STRIDE */ +#define MME_ARCH_A_LOOP_STRIDE_V_SHIFT 0 +#define MME_ARCH_A_LOOP_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_ARCH_A_ROI_SIZE */ +#define MME_ARCH_A_ROI_SIZE_V_SHIFT 0 +#define MME_ARCH_A_ROI_SIZE_V_MASK 0xFFFFFFFF + +/* MME_ARCH_A_SPATIAL_START_OFFSET */ +#define MME_ARCH_A_SPATIAL_START_OFFSET_V_SHIFT 0 +#define MME_ARCH_A_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_ARCH_A_SPATIAL_STRIDE */ +#define MME_ARCH_A_SPATIAL_STRIDE_V_SHIFT 0 +#define MME_ARCH_A_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_ARCH_A_SPATIAL_SIZE_MINUS_1 */ +#define MME_ARCH_A_SPATIAL_SIZE_MINUS_1_V_SHIFT 0 +#define MME_ARCH_A_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF + +/* MME_ARCH_B_ROI_BASE_OFFSET */ +#define MME_ARCH_B_ROI_BASE_OFFSET_V_SHIFT 0 +#define MME_ARCH_B_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_ARCH_B_VALID_ELEMENTS */ +#define MME_ARCH_B_VALID_ELEMENTS_V_SHIFT 0 +#define MME_ARCH_B_VALID_ELEMENTS_V_MASK 0xFFFFFFFF + +/* MME_ARCH_B_LOOP_STRIDE */ +#define MME_ARCH_B_LOOP_STRIDE_V_SHIFT 0 +#define MME_ARCH_B_LOOP_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_ARCH_B_ROI_SIZE */ +#define MME_ARCH_B_ROI_SIZE_V_SHIFT 0 +#define MME_ARCH_B_ROI_SIZE_V_MASK 0xFFFFFFFF + +/* MME_ARCH_B_SPATIAL_START_OFFSET */ +#define MME_ARCH_B_SPATIAL_START_OFFSET_V_SHIFT 0 +#define MME_ARCH_B_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_ARCH_B_SPATIAL_STRIDE */ +#define MME_ARCH_B_SPATIAL_STRIDE_V_SHIFT 0 +#define MME_ARCH_B_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_ARCH_B_SPATIAL_SIZE_MINUS_1 */ +#define MME_ARCH_B_SPATIAL_SIZE_MINUS_1_V_SHIFT 0 +#define MME_ARCH_B_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF + +/* MME_ARCH_C_ROI_BASE_OFFSET */ +#define MME_ARCH_C_ROI_BASE_OFFSET_V_SHIFT 0 +#define MME_ARCH_C_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_ARCH_C_VALID_ELEMENTS */ +#define MME_ARCH_C_VALID_ELEMENTS_V_SHIFT 0 +#define MME_ARCH_C_VALID_ELEMENTS_V_MASK 0xFFFFFFFF + +/* MME_ARCH_C_LOOP_STRIDE */ +#define MME_ARCH_C_LOOP_STRIDE_V_SHIFT 0 +#define MME_ARCH_C_LOOP_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_ARCH_C_ROI_SIZE */ +#define MME_ARCH_C_ROI_SIZE_V_SHIFT 0 +#define MME_ARCH_C_ROI_SIZE_V_MASK 0xFFFFFFFF + +/* MME_ARCH_C_SPATIAL_START_OFFSET */ +#define MME_ARCH_C_SPATIAL_START_OFFSET_V_SHIFT 0 +#define MME_ARCH_C_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_ARCH_C_SPATIAL_STRIDE */ +#define MME_ARCH_C_SPATIAL_STRIDE_V_SHIFT 0 +#define MME_ARCH_C_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_ARCH_C_SPATIAL_SIZE_MINUS_1 */ +#define MME_ARCH_C_SPATIAL_SIZE_MINUS_1_V_SHIFT 0 +#define MME_ARCH_C_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF + +/* MME_ARCH_SYNC_OBJECT_MESSAGE */ +#define MME_ARCH_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_SHIFT 0 +#define MME_ARCH_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_MASK 0xFFFF +#define MME_ARCH_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_SHIFT 16 +#define MME_ARCH_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_MASK 0x7FFF0000 +#define MME_ARCH_SYNC_OBJECT_MESSAGE_SO_OPERATION_SHIFT 31 +#define MME_ARCH_SYNC_OBJECT_MESSAGE_SO_OPERATION_MASK 0x80000000 + +/* MME_ARCH_E_PADDING_VALUE_A */ +#define MME_ARCH_E_PADDING_VALUE_A_V_SHIFT 0 +#define MME_ARCH_E_PADDING_VALUE_A_V_MASK 0xFFFF + +/* MME_ARCH_E_NUM_ITERATION_MINUS_1 */ +#define MME_ARCH_E_NUM_ITERATION_MINUS_1_V_SHIFT 0 +#define MME_ARCH_E_NUM_ITERATION_MINUS_1_V_MASK 0xFFFFFFFF + +/* MME_ARCH_E_BUBBLES_PER_SPLIT */ +#define MME_ARCH_E_BUBBLES_PER_SPLIT_A_SHIFT 0 +#define MME_ARCH_E_BUBBLES_PER_SPLIT_A_MASK 0xFF +#define MME_ARCH_E_BUBBLES_PER_SPLIT_B_SHIFT 8 +#define MME_ARCH_E_BUBBLES_PER_SPLIT_B_MASK 0xFF00 +#define MME_ARCH_E_BUBBLES_PER_SPLIT_CIN_SHIFT 16 +#define MME_ARCH_E_BUBBLES_PER_SPLIT_CIN_MASK 0xFF0000 +#define MME_ARCH_E_BUBBLES_PER_SPLIT_ID_SHIFT 24 +#define MME_ARCH_E_BUBBLES_PER_SPLIT_ID_MASK 0xFF000000 + +/* MME_CMD */ +#define MME_CMD_EXECUTE_SHIFT 0 +#define MME_CMD_EXECUTE_MASK 0x1 + +/* MME_DUMMY */ +#define MME_DUMMY_V_SHIFT 0 +#define MME_DUMMY_V_MASK 0xFFFFFFFF + +/* MME_RESET */ +#define MME_RESET_V_SHIFT 0 +#define MME_RESET_V_MASK 0x1 + +/* MME_STALL */ +#define MME_STALL_V_SHIFT 0 +#define MME_STALL_V_MASK 0xFFFFFFFF + +/* MME_SM_BASE_ADDRESS_LOW */ +#define MME_SM_BASE_ADDRESS_LOW_V_SHIFT 0 +#define MME_SM_BASE_ADDRESS_LOW_V_MASK 0xFFFFFFFF + +/* MME_SM_BASE_ADDRESS_HIGH */ +#define MME_SM_BASE_ADDRESS_HIGH_V_SHIFT 0 +#define MME_SM_BASE_ADDRESS_HIGH_V_MASK 0xFFFFFFFF + +/* MME_DBGMEM_ADD */ +#define MME_DBGMEM_ADD_V_SHIFT 0 +#define MME_DBGMEM_ADD_V_MASK 0xFFFFFFFF + +/* MME_DBGMEM_DATA_WR */ +#define MME_DBGMEM_DATA_WR_V_SHIFT 0 +#define MME_DBGMEM_DATA_WR_V_MASK 0xFFFFFFFF + +/* MME_DBGMEM_DATA_RD */ +#define MME_DBGMEM_DATA_RD_V_SHIFT 0 +#define MME_DBGMEM_DATA_RD_V_MASK 0xFFFFFFFF + +/* MME_DBGMEM_CTRL */ +#define MME_DBGMEM_CTRL_WR_NRD_SHIFT 0 +#define MME_DBGMEM_CTRL_WR_NRD_MASK 0x1 + +/* MME_DBGMEM_RC */ +#define MME_DBGMEM_RC_VALID_SHIFT 0 +#define MME_DBGMEM_RC_VALID_MASK 0x1 +#define MME_DBGMEM_RC_FULL_SHIFT 1 +#define MME_DBGMEM_RC_FULL_MASK 0x2 + +/* MME_LOG_SHADOW */ +#define MME_LOG_SHADOW_MASK_0_SHIFT 0 +#define MME_LOG_SHADOW_MASK_0_MASK 0x7F +#define MME_LOG_SHADOW_MASK_1_SHIFT 8 +#define MME_LOG_SHADOW_MASK_1_MASK 0x7F00 +#define MME_LOG_SHADOW_MASK_2_SHIFT 16 +#define MME_LOG_SHADOW_MASK_2_MASK 0x7F0000 +#define MME_LOG_SHADOW_MASK_3_SHIFT 24 +#define MME_LOG_SHADOW_MASK_3_MASK 0x7F000000 + +/* MME_STORE_MAX_CREDIT */ +#define MME_STORE_MAX_CREDIT_V_SHIFT 0 +#define MME_STORE_MAX_CREDIT_V_MASK 0x3F + +/* MME_AGU */ +#define MME_AGU_SBA_MAX_CREDIT_SHIFT 0 +#define MME_AGU_SBA_MAX_CREDIT_MASK 0x1F +#define MME_AGU_SBB_MAX_CREDIT_SHIFT 8 +#define MME_AGU_SBB_MAX_CREDIT_MASK 0x1F00 +#define MME_AGU_SBC_MAX_CREDIT_SHIFT 16 +#define MME_AGU_SBC_MAX_CREDIT_MASK 0x1F0000 +#define MME_AGU_WBC_MAX_CREDIT_SHIFT 24 +#define MME_AGU_WBC_MAX_CREDIT_MASK 0x3F000000 + +/* MME_SBA */ +#define MME_SBA_MAX_SIZE_SHIFT 0 +#define MME_SBA_MAX_SIZE_MASK 0x3FF +#define MME_SBA_EU_MAX_CREDIT_SHIFT 16 +#define MME_SBA_EU_MAX_CREDIT_MASK 0x1F0000 + +/* MME_SBB */ +#define MME_SBB_MAX_SIZE_SHIFT 0 +#define MME_SBB_MAX_SIZE_MASK 0x3FF +#define MME_SBB_EU_MAX_CREDIT_SHIFT 16 +#define MME_SBB_EU_MAX_CREDIT_MASK 0x1F0000 + +/* MME_SBC */ +#define MME_SBC_MAX_SIZE_SHIFT 0 +#define MME_SBC_MAX_SIZE_MASK 0x3FF +#define MME_SBC_EU_MAX_CREDIT_SHIFT 16 +#define MME_SBC_EU_MAX_CREDIT_MASK 0x1F0000 + +/* MME_WBC */ +#define MME_WBC_MAX_OUTSTANDING_SHIFT 0 +#define MME_WBC_MAX_OUTSTANDING_MASK 0xFFF +#define MME_WBC_DISABLE_FAST_END_PE_SHIFT 12 +#define MME_WBC_DISABLE_FAST_END_PE_MASK 0x1000 +#define MME_WBC_LD_INSERT_BUBBLE_DIS_SHIFT 13 +#define MME_WBC_LD_INSERT_BUBBLE_DIS_MASK 0x2000 + +/* MME_SBA_CONTROL_DATA */ +#define MME_SBA_CONTROL_DATA_ASID_SHIFT 0 +#define MME_SBA_CONTROL_DATA_ASID_MASK 0x3FF +#define MME_SBA_CONTROL_DATA_MMBP_SHIFT 10 +#define MME_SBA_CONTROL_DATA_MMBP_MASK 0x400 + +/* MME_SBB_CONTROL_DATA */ +#define MME_SBB_CONTROL_DATA_ASID_SHIFT 0 +#define MME_SBB_CONTROL_DATA_ASID_MASK 0x3FF +#define MME_SBB_CONTROL_DATA_MMBP_SHIFT 10 +#define MME_SBB_CONTROL_DATA_MMBP_MASK 0x400 + +/* MME_SBC_CONTROL_DATA */ +#define MME_SBC_CONTROL_DATA_ASID_SHIFT 0 +#define MME_SBC_CONTROL_DATA_ASID_MASK 0x3FF +#define MME_SBC_CONTROL_DATA_MMBP_SHIFT 10 +#define MME_SBC_CONTROL_DATA_MMBP_MASK 0x400 + +/* MME_WBC_CONTROL_DATA */ +#define MME_WBC_CONTROL_DATA_ASID_SHIFT 0 +#define MME_WBC_CONTROL_DATA_ASID_MASK 0x3FF +#define MME_WBC_CONTROL_DATA_MMBP_SHIFT 10 +#define MME_WBC_CONTROL_DATA_MMBP_MASK 0x400 + +/* MME_TE */ +#define MME_TE_MAX_CREDIT_SHIFT 0 +#define MME_TE_MAX_CREDIT_MASK 0x1F +#define MME_TE_DESC_MAX_CREDIT_SHIFT 8 +#define MME_TE_DESC_MAX_CREDIT_MASK 0x1F00 + +/* MME_TE2DEC */ +#define MME_TE2DEC_MAX_CREDIT_SHIFT 0 +#define MME_TE2DEC_MAX_CREDIT_MASK 0x1F + +/* MME_REI_STATUS */ +#define MME_REI_STATUS_V_SHIFT 0 +#define MME_REI_STATUS_V_MASK 0xFFFFFFFF + +/* MME_REI_MASK */ +#define MME_REI_MASK_V_SHIFT 0 +#define MME_REI_MASK_V_MASK 0xFFFFFFFF + +/* MME_SEI_STATUS */ +#define MME_SEI_STATUS_V_SHIFT 0 +#define MME_SEI_STATUS_V_MASK 0xFFFFFFFF + +/* MME_SEI_MASK */ +#define MME_SEI_MASK_V_SHIFT 0 +#define MME_SEI_MASK_V_MASK 0xFFFFFFFF + +/* MME_SPI_STATUS */ +#define MME_SPI_STATUS_V_SHIFT 0 +#define MME_SPI_STATUS_V_MASK 0xFFFFFFFF + +/* MME_SPI_MASK */ +#define MME_SPI_MASK_V_SHIFT 0 +#define MME_SPI_MASK_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_STATUS */ +#define MME_SHADOW_0_STATUS_A_SHIFT 0 +#define MME_SHADOW_0_STATUS_A_MASK 0x1 +#define MME_SHADOW_0_STATUS_B_SHIFT 1 +#define MME_SHADOW_0_STATUS_B_MASK 0x2 +#define MME_SHADOW_0_STATUS_CIN_SHIFT 2 +#define MME_SHADOW_0_STATUS_CIN_MASK 0x4 +#define MME_SHADOW_0_STATUS_COUT_SHIFT 3 +#define MME_SHADOW_0_STATUS_COUT_MASK 0x8 +#define MME_SHADOW_0_STATUS_TE_SHIFT 4 +#define MME_SHADOW_0_STATUS_TE_MASK 0x10 +#define MME_SHADOW_0_STATUS_LD_SHIFT 5 +#define MME_SHADOW_0_STATUS_LD_MASK 0x20 +#define MME_SHADOW_0_STATUS_ST_SHIFT 6 +#define MME_SHADOW_0_STATUS_ST_MASK 0x40 + +/* MME_SHADOW_0_A_BASE_ADDR_HIGH */ +#define MME_SHADOW_0_A_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_SHADOW_0_A_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_B_BASE_ADDR_HIGH */ +#define MME_SHADOW_0_B_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_SHADOW_0_B_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_CIN_BASE_ADDR_HIGH */ +#define MME_SHADOW_0_CIN_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_SHADOW_0_CIN_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_COUT_BASE_ADDR_HIGH */ +#define MME_SHADOW_0_COUT_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_SHADOW_0_COUT_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_BIAS_BASE_ADDR_HIGH */ +#define MME_SHADOW_0_BIAS_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_SHADOW_0_BIAS_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_A_BASE_ADDR_LOW */ +#define MME_SHADOW_0_A_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_SHADOW_0_A_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_B_BASE_ADDR_LOW */ +#define MME_SHADOW_0_B_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_SHADOW_0_B_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_CIN_BASE_ADDR_LOW */ +#define MME_SHADOW_0_CIN_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_SHADOW_0_CIN_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_COUT_BASE_ADDR_LOW */ +#define MME_SHADOW_0_COUT_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_SHADOW_0_COUT_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_BIAS_BASE_ADDR_LOW */ +#define MME_SHADOW_0_BIAS_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_SHADOW_0_BIAS_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_HEADER */ +#define MME_SHADOW_0_HEADER_SIGNAL_MASK_SHIFT 0 +#define MME_SHADOW_0_HEADER_SIGNAL_MASK_MASK 0x1F +#define MME_SHADOW_0_HEADER_SIGNAL_EN_SHIFT 5 +#define MME_SHADOW_0_HEADER_SIGNAL_EN_MASK 0x20 +#define MME_SHADOW_0_HEADER_TRANS_A_SHIFT 6 +#define MME_SHADOW_0_HEADER_TRANS_A_MASK 0x40 +#define MME_SHADOW_0_HEADER_LOWER_A_SHIFT 7 +#define MME_SHADOW_0_HEADER_LOWER_A_MASK 0x80 +#define MME_SHADOW_0_HEADER_ACCUM_MASK_SHIFT 8 +#define MME_SHADOW_0_HEADER_ACCUM_MASK_MASK 0xF00 +#define MME_SHADOW_0_HEADER_LOAD_BIAS_SHIFT 12 +#define MME_SHADOW_0_HEADER_LOAD_BIAS_MASK 0x1000 +#define MME_SHADOW_0_HEADER_LOAD_CIN_SHIFT 13 +#define MME_SHADOW_0_HEADER_LOAD_CIN_MASK 0x2000 +#define MME_SHADOW_0_HEADER_STORE_OUT_SHIFT 15 +#define MME_SHADOW_0_HEADER_STORE_OUT_MASK 0x8000 +#define MME_SHADOW_0_HEADER_ACC_LD_INC_DISABLE_SHIFT 16 +#define MME_SHADOW_0_HEADER_ACC_LD_INC_DISABLE_MASK 0x10000 +#define MME_SHADOW_0_HEADER_ADVANCE_A_SHIFT 17 +#define MME_SHADOW_0_HEADER_ADVANCE_A_MASK 0x20000 +#define MME_SHADOW_0_HEADER_ADVANCE_B_SHIFT 18 +#define MME_SHADOW_0_HEADER_ADVANCE_B_MASK 0x40000 +#define MME_SHADOW_0_HEADER_ADVANCE_CIN_SHIFT 19 +#define MME_SHADOW_0_HEADER_ADVANCE_CIN_MASK 0x80000 +#define MME_SHADOW_0_HEADER_ADVANCE_COUT_SHIFT 20 +#define MME_SHADOW_0_HEADER_ADVANCE_COUT_MASK 0x100000 +#define MME_SHADOW_0_HEADER_COMPRESSED_B_SHIFT 21 +#define MME_SHADOW_0_HEADER_COMPRESSED_B_MASK 0x200000 +#define MME_SHADOW_0_HEADER_MASK_CONV_END_SHIFT 22 +#define MME_SHADOW_0_HEADER_MASK_CONV_END_MASK 0x400000 +#define MME_SHADOW_0_HEADER_ACC_ST_INC_DISABLE_SHIFT 23 +#define MME_SHADOW_0_HEADER_ACC_ST_INC_DISABLE_MASK 0x800000 +#define MME_SHADOW_0_HEADER_AB_DATA_TYPE_SHIFT 24 +#define MME_SHADOW_0_HEADER_AB_DATA_TYPE_MASK 0x3000000 +#define MME_SHADOW_0_HEADER_CIN_DATA_TYPE_SHIFT 26 +#define MME_SHADOW_0_HEADER_CIN_DATA_TYPE_MASK 0x1C000000 +#define MME_SHADOW_0_HEADER_COUT_DATA_TYPE_SHIFT 29 +#define MME_SHADOW_0_HEADER_COUT_DATA_TYPE_MASK 0xE0000000 + +/* MME_SHADOW_0_KERNEL_SIZE_MINUS_1 */ +#define MME_SHADOW_0_KERNEL_SIZE_MINUS_1_DIM_0_SHIFT 0 +#define MME_SHADOW_0_KERNEL_SIZE_MINUS_1_DIM_0_MASK 0xFF +#define MME_SHADOW_0_KERNEL_SIZE_MINUS_1_DIM_1_SHIFT 8 +#define MME_SHADOW_0_KERNEL_SIZE_MINUS_1_DIM_1_MASK 0xFF00 +#define MME_SHADOW_0_KERNEL_SIZE_MINUS_1_DIM_2_SHIFT 16 +#define MME_SHADOW_0_KERNEL_SIZE_MINUS_1_DIM_2_MASK 0xFF0000 +#define MME_SHADOW_0_KERNEL_SIZE_MINUS_1_DIM_3_SHIFT 24 +#define MME_SHADOW_0_KERNEL_SIZE_MINUS_1_DIM_3_MASK 0xFF000000 + +/* MME_SHADOW_0_ASSOCIATED_DIMS */ +#define MME_SHADOW_0_ASSOCIATED_DIMS_A_0_SHIFT 0 +#define MME_SHADOW_0_ASSOCIATED_DIMS_A_0_MASK 0x7 +#define MME_SHADOW_0_ASSOCIATED_DIMS_B_0_SHIFT 3 +#define MME_SHADOW_0_ASSOCIATED_DIMS_B_0_MASK 0x38 +#define MME_SHADOW_0_ASSOCIATED_DIMS_CIN_0_SHIFT 6 +#define MME_SHADOW_0_ASSOCIATED_DIMS_CIN_0_MASK 0x1C0 +#define MME_SHADOW_0_ASSOCIATED_DIMS_COUT_0_SHIFT 9 +#define MME_SHADOW_0_ASSOCIATED_DIMS_COUT_0_MASK 0xE00 +#define MME_SHADOW_0_ASSOCIATED_DIMS_A_1_SHIFT 16 +#define MME_SHADOW_0_ASSOCIATED_DIMS_A_1_MASK 0x70000 +#define MME_SHADOW_0_ASSOCIATED_DIMS_B_1_SHIFT 19 +#define MME_SHADOW_0_ASSOCIATED_DIMS_B_1_MASK 0x380000 +#define MME_SHADOW_0_ASSOCIATED_DIMS_CIN_1_SHIFT 22 +#define MME_SHADOW_0_ASSOCIATED_DIMS_CIN_1_MASK 0x1C00000 +#define MME_SHADOW_0_ASSOCIATED_DIMS_COUT_1_SHIFT 25 +#define MME_SHADOW_0_ASSOCIATED_DIMS_COUT_1_MASK 0xE000000 + +/* MME_SHADOW_0_COUT_SCALE */ +#define MME_SHADOW_0_COUT_SCALE_V_SHIFT 0 +#define MME_SHADOW_0_COUT_SCALE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_CIN_SCALE */ +#define MME_SHADOW_0_CIN_SCALE_V_SHIFT 0 +#define MME_SHADOW_0_CIN_SCALE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_GEMMLOWP_ZP */ +#define MME_SHADOW_0_GEMMLOWP_ZP_ZP_CIN_SHIFT 0 +#define MME_SHADOW_0_GEMMLOWP_ZP_ZP_CIN_MASK 0x1FF +#define MME_SHADOW_0_GEMMLOWP_ZP_ZP_COUT_SHIFT 9 +#define MME_SHADOW_0_GEMMLOWP_ZP_ZP_COUT_MASK 0x3FE00 +#define MME_SHADOW_0_GEMMLOWP_ZP_ZP_B_SHIFT 18 +#define MME_SHADOW_0_GEMMLOWP_ZP_ZP_B_MASK 0x7FC0000 +#define MME_SHADOW_0_GEMMLOWP_ZP_GEMMLOWP_EU_EN_SHIFT 27 +#define MME_SHADOW_0_GEMMLOWP_ZP_GEMMLOWP_EU_EN_MASK 0x8000000 +#define MME_SHADOW_0_GEMMLOWP_ZP_ACCUM_SHIFT 28 +#define MME_SHADOW_0_GEMMLOWP_ZP_ACCUM_MASK 0x10000000 +#define MME_SHADOW_0_GEMMLOWP_ZP_ACCUM_BIAS_SHIFT 29 +#define MME_SHADOW_0_GEMMLOWP_ZP_ACCUM_BIAS_MASK 0x20000000 +#define MME_SHADOW_0_GEMMLOWP_ZP_RELU_EN_SHIFT 30 +#define MME_SHADOW_0_GEMMLOWP_ZP_RELU_EN_MASK 0x40000000 + +/* MME_SHADOW_0_GEMMLOWP_EXPONENT */ +#define MME_SHADOW_0_GEMMLOWP_EXPONENT_EXPONENT_CIN_SHIFT 0 +#define MME_SHADOW_0_GEMMLOWP_EXPONENT_EXPONENT_CIN_MASK 0x3F +#define MME_SHADOW_0_GEMMLOWP_EXPONENT_EXPONENT_COUT_SHIFT 8 +#define MME_SHADOW_0_GEMMLOWP_EXPONENT_EXPONENT_COUT_MASK 0x3F00 +#define MME_SHADOW_0_GEMMLOWP_EXPONENT_MUL_CIN_EN_SHIFT 16 +#define MME_SHADOW_0_GEMMLOWP_EXPONENT_MUL_CIN_EN_MASK 0x10000 +#define MME_SHADOW_0_GEMMLOWP_EXPONENT_MUL_COUT_EN_SHIFT 17 +#define MME_SHADOW_0_GEMMLOWP_EXPONENT_MUL_COUT_EN_MASK 0x20000 + +/* MME_SHADOW_0_A_ROI_BASE_OFFSET */ +#define MME_SHADOW_0_A_ROI_BASE_OFFSET_V_SHIFT 0 +#define MME_SHADOW_0_A_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_A_VALID_ELEMENTS */ +#define MME_SHADOW_0_A_VALID_ELEMENTS_V_SHIFT 0 +#define MME_SHADOW_0_A_VALID_ELEMENTS_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_A_LOOP_STRIDE */ +#define MME_SHADOW_0_A_LOOP_STRIDE_V_SHIFT 0 +#define MME_SHADOW_0_A_LOOP_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_A_ROI_SIZE */ +#define MME_SHADOW_0_A_ROI_SIZE_V_SHIFT 0 +#define MME_SHADOW_0_A_ROI_SIZE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_A_SPATIAL_START_OFFSET */ +#define MME_SHADOW_0_A_SPATIAL_START_OFFSET_V_SHIFT 0 +#define MME_SHADOW_0_A_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_A_SPATIAL_STRIDE */ +#define MME_SHADOW_0_A_SPATIAL_STRIDE_V_SHIFT 0 +#define MME_SHADOW_0_A_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_A_SPATIAL_SIZE_MINUS_1 */ +#define MME_SHADOW_0_A_SPATIAL_SIZE_MINUS_1_V_SHIFT 0 +#define MME_SHADOW_0_A_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_B_ROI_BASE_OFFSET */ +#define MME_SHADOW_0_B_ROI_BASE_OFFSET_V_SHIFT 0 +#define MME_SHADOW_0_B_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_B_VALID_ELEMENTS */ +#define MME_SHADOW_0_B_VALID_ELEMENTS_V_SHIFT 0 +#define MME_SHADOW_0_B_VALID_ELEMENTS_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_B_LOOP_STRIDE */ +#define MME_SHADOW_0_B_LOOP_STRIDE_V_SHIFT 0 +#define MME_SHADOW_0_B_LOOP_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_B_ROI_SIZE */ +#define MME_SHADOW_0_B_ROI_SIZE_V_SHIFT 0 +#define MME_SHADOW_0_B_ROI_SIZE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_B_SPATIAL_START_OFFSET */ +#define MME_SHADOW_0_B_SPATIAL_START_OFFSET_V_SHIFT 0 +#define MME_SHADOW_0_B_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_B_SPATIAL_STRIDE */ +#define MME_SHADOW_0_B_SPATIAL_STRIDE_V_SHIFT 0 +#define MME_SHADOW_0_B_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_B_SPATIAL_SIZE_MINUS_1 */ +#define MME_SHADOW_0_B_SPATIAL_SIZE_MINUS_1_V_SHIFT 0 +#define MME_SHADOW_0_B_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_C_ROI_BASE_OFFSET */ +#define MME_SHADOW_0_C_ROI_BASE_OFFSET_V_SHIFT 0 +#define MME_SHADOW_0_C_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_C_VALID_ELEMENTS */ +#define MME_SHADOW_0_C_VALID_ELEMENTS_V_SHIFT 0 +#define MME_SHADOW_0_C_VALID_ELEMENTS_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_C_LOOP_STRIDE */ +#define MME_SHADOW_0_C_LOOP_STRIDE_V_SHIFT 0 +#define MME_SHADOW_0_C_LOOP_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_C_ROI_SIZE */ +#define MME_SHADOW_0_C_ROI_SIZE_V_SHIFT 0 +#define MME_SHADOW_0_C_ROI_SIZE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_C_SPATIAL_START_OFFSET */ +#define MME_SHADOW_0_C_SPATIAL_START_OFFSET_V_SHIFT 0 +#define MME_SHADOW_0_C_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_C_SPATIAL_STRIDE */ +#define MME_SHADOW_0_C_SPATIAL_STRIDE_V_SHIFT 0 +#define MME_SHADOW_0_C_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_C_SPATIAL_SIZE_MINUS_1 */ +#define MME_SHADOW_0_C_SPATIAL_SIZE_MINUS_1_V_SHIFT 0 +#define MME_SHADOW_0_C_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_SYNC_OBJECT_MESSAGE */ +#define MME_SHADOW_0_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_SHIFT 0 +#define MME_SHADOW_0_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_MASK 0xFFFF +#define MME_SHADOW_0_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_SHIFT 16 +#define MME_SHADOW_0_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_MASK 0x7FFF0000 +#define MME_SHADOW_0_SYNC_OBJECT_MESSAGE_SO_OPERATION_SHIFT 31 +#define MME_SHADOW_0_SYNC_OBJECT_MESSAGE_SO_OPERATION_MASK 0x80000000 + +/* MME_SHADOW_0_E_PADDING_VALUE_A */ +#define MME_SHADOW_0_E_PADDING_VALUE_A_V_SHIFT 0 +#define MME_SHADOW_0_E_PADDING_VALUE_A_V_MASK 0xFFFF + +/* MME_SHADOW_0_E_NUM_ITERATION_MINUS_1 */ +#define MME_SHADOW_0_E_NUM_ITERATION_MINUS_1_V_SHIFT 0 +#define MME_SHADOW_0_E_NUM_ITERATION_MINUS_1_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_E_BUBBLES_PER_SPLIT */ +#define MME_SHADOW_0_E_BUBBLES_PER_SPLIT_A_SHIFT 0 +#define MME_SHADOW_0_E_BUBBLES_PER_SPLIT_A_MASK 0xFF +#define MME_SHADOW_0_E_BUBBLES_PER_SPLIT_B_SHIFT 8 +#define MME_SHADOW_0_E_BUBBLES_PER_SPLIT_B_MASK 0xFF00 +#define MME_SHADOW_0_E_BUBBLES_PER_SPLIT_CIN_SHIFT 16 +#define MME_SHADOW_0_E_BUBBLES_PER_SPLIT_CIN_MASK 0xFF0000 +#define MME_SHADOW_0_E_BUBBLES_PER_SPLIT_ID_SHIFT 24 +#define MME_SHADOW_0_E_BUBBLES_PER_SPLIT_ID_MASK 0xFF000000 + +/* MME_SHADOW_1_STATUS */ +#define MME_SHADOW_1_STATUS_A_SHIFT 0 +#define MME_SHADOW_1_STATUS_A_MASK 0x1 +#define MME_SHADOW_1_STATUS_B_SHIFT 1 +#define MME_SHADOW_1_STATUS_B_MASK 0x2 +#define MME_SHADOW_1_STATUS_CIN_SHIFT 2 +#define MME_SHADOW_1_STATUS_CIN_MASK 0x4 +#define MME_SHADOW_1_STATUS_COUT_SHIFT 3 +#define MME_SHADOW_1_STATUS_COUT_MASK 0x8 +#define MME_SHADOW_1_STATUS_TE_SHIFT 4 +#define MME_SHADOW_1_STATUS_TE_MASK 0x10 +#define MME_SHADOW_1_STATUS_LD_SHIFT 5 +#define MME_SHADOW_1_STATUS_LD_MASK 0x20 +#define MME_SHADOW_1_STATUS_ST_SHIFT 6 +#define MME_SHADOW_1_STATUS_ST_MASK 0x40 + +/* MME_SHADOW_1_A_BASE_ADDR_HIGH */ +#define MME_SHADOW_1_A_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_SHADOW_1_A_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_B_BASE_ADDR_HIGH */ +#define MME_SHADOW_1_B_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_SHADOW_1_B_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_CIN_BASE_ADDR_HIGH */ +#define MME_SHADOW_1_CIN_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_SHADOW_1_CIN_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_COUT_BASE_ADDR_HIGH */ +#define MME_SHADOW_1_COUT_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_SHADOW_1_COUT_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_BIAS_BASE_ADDR_HIGH */ +#define MME_SHADOW_1_BIAS_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_SHADOW_1_BIAS_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_A_BASE_ADDR_LOW */ +#define MME_SHADOW_1_A_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_SHADOW_1_A_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_B_BASE_ADDR_LOW */ +#define MME_SHADOW_1_B_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_SHADOW_1_B_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_CIN_BASE_ADDR_LOW */ +#define MME_SHADOW_1_CIN_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_SHADOW_1_CIN_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_COUT_BASE_ADDR_LOW */ +#define MME_SHADOW_1_COUT_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_SHADOW_1_COUT_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_BIAS_BASE_ADDR_LOW */ +#define MME_SHADOW_1_BIAS_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_SHADOW_1_BIAS_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_HEADER */ +#define MME_SHADOW_1_HEADER_SIGNAL_MASK_SHIFT 0 +#define MME_SHADOW_1_HEADER_SIGNAL_MASK_MASK 0x1F +#define MME_SHADOW_1_HEADER_SIGNAL_EN_SHIFT 5 +#define MME_SHADOW_1_HEADER_SIGNAL_EN_MASK 0x20 +#define MME_SHADOW_1_HEADER_TRANS_A_SHIFT 6 +#define MME_SHADOW_1_HEADER_TRANS_A_MASK 0x40 +#define MME_SHADOW_1_HEADER_LOWER_A_SHIFT 7 +#define MME_SHADOW_1_HEADER_LOWER_A_MASK 0x80 +#define MME_SHADOW_1_HEADER_ACCUM_MASK_SHIFT 8 +#define MME_SHADOW_1_HEADER_ACCUM_MASK_MASK 0xF00 +#define MME_SHADOW_1_HEADER_LOAD_BIAS_SHIFT 12 +#define MME_SHADOW_1_HEADER_LOAD_BIAS_MASK 0x1000 +#define MME_SHADOW_1_HEADER_LOAD_CIN_SHIFT 13 +#define MME_SHADOW_1_HEADER_LOAD_CIN_MASK 0x2000 +#define MME_SHADOW_1_HEADER_STORE_OUT_SHIFT 15 +#define MME_SHADOW_1_HEADER_STORE_OUT_MASK 0x8000 +#define MME_SHADOW_1_HEADER_ACC_LD_INC_DISABLE_SHIFT 16 +#define MME_SHADOW_1_HEADER_ACC_LD_INC_DISABLE_MASK 0x10000 +#define MME_SHADOW_1_HEADER_ADVANCE_A_SHIFT 17 +#define MME_SHADOW_1_HEADER_ADVANCE_A_MASK 0x20000 +#define MME_SHADOW_1_HEADER_ADVANCE_B_SHIFT 18 +#define MME_SHADOW_1_HEADER_ADVANCE_B_MASK 0x40000 +#define MME_SHADOW_1_HEADER_ADVANCE_CIN_SHIFT 19 +#define MME_SHADOW_1_HEADER_ADVANCE_CIN_MASK 0x80000 +#define MME_SHADOW_1_HEADER_ADVANCE_COUT_SHIFT 20 +#define MME_SHADOW_1_HEADER_ADVANCE_COUT_MASK 0x100000 +#define MME_SHADOW_1_HEADER_COMPRESSED_B_SHIFT 21 +#define MME_SHADOW_1_HEADER_COMPRESSED_B_MASK 0x200000 +#define MME_SHADOW_1_HEADER_MASK_CONV_END_SHIFT 22 +#define MME_SHADOW_1_HEADER_MASK_CONV_END_MASK 0x400000 +#define MME_SHADOW_1_HEADER_ACC_ST_INC_DISABLE_SHIFT 23 +#define MME_SHADOW_1_HEADER_ACC_ST_INC_DISABLE_MASK 0x800000 +#define MME_SHADOW_1_HEADER_AB_DATA_TYPE_SHIFT 24 +#define MME_SHADOW_1_HEADER_AB_DATA_TYPE_MASK 0x3000000 +#define MME_SHADOW_1_HEADER_CIN_DATA_TYPE_SHIFT 26 +#define MME_SHADOW_1_HEADER_CIN_DATA_TYPE_MASK 0x1C000000 +#define MME_SHADOW_1_HEADER_COUT_DATA_TYPE_SHIFT 29 +#define MME_SHADOW_1_HEADER_COUT_DATA_TYPE_MASK 0xE0000000 + +/* MME_SHADOW_1_KERNEL_SIZE_MINUS_1 */ +#define MME_SHADOW_1_KERNEL_SIZE_MINUS_1_DIM_0_SHIFT 0 +#define MME_SHADOW_1_KERNEL_SIZE_MINUS_1_DIM_0_MASK 0xFF +#define MME_SHADOW_1_KERNEL_SIZE_MINUS_1_DIM_1_SHIFT 8 +#define MME_SHADOW_1_KERNEL_SIZE_MINUS_1_DIM_1_MASK 0xFF00 +#define MME_SHADOW_1_KERNEL_SIZE_MINUS_1_DIM_2_SHIFT 16 +#define MME_SHADOW_1_KERNEL_SIZE_MINUS_1_DIM_2_MASK 0xFF0000 +#define MME_SHADOW_1_KERNEL_SIZE_MINUS_1_DIM_3_SHIFT 24 +#define MME_SHADOW_1_KERNEL_SIZE_MINUS_1_DIM_3_MASK 0xFF000000 + +/* MME_SHADOW_1_ASSOCIATED_DIMS */ +#define MME_SHADOW_1_ASSOCIATED_DIMS_A_0_SHIFT 0 +#define MME_SHADOW_1_ASSOCIATED_DIMS_A_0_MASK 0x7 +#define MME_SHADOW_1_ASSOCIATED_DIMS_B_0_SHIFT 3 +#define MME_SHADOW_1_ASSOCIATED_DIMS_B_0_MASK 0x38 +#define MME_SHADOW_1_ASSOCIATED_DIMS_CIN_0_SHIFT 6 +#define MME_SHADOW_1_ASSOCIATED_DIMS_CIN_0_MASK 0x1C0 +#define MME_SHADOW_1_ASSOCIATED_DIMS_COUT_0_SHIFT 9 +#define MME_SHADOW_1_ASSOCIATED_DIMS_COUT_0_MASK 0xE00 +#define MME_SHADOW_1_ASSOCIATED_DIMS_A_1_SHIFT 16 +#define MME_SHADOW_1_ASSOCIATED_DIMS_A_1_MASK 0x70000 +#define MME_SHADOW_1_ASSOCIATED_DIMS_B_1_SHIFT 19 +#define MME_SHADOW_1_ASSOCIATED_DIMS_B_1_MASK 0x380000 +#define MME_SHADOW_1_ASSOCIATED_DIMS_CIN_1_SHIFT 22 +#define MME_SHADOW_1_ASSOCIATED_DIMS_CIN_1_MASK 0x1C00000 +#define MME_SHADOW_1_ASSOCIATED_DIMS_COUT_1_SHIFT 25 +#define MME_SHADOW_1_ASSOCIATED_DIMS_COUT_1_MASK 0xE000000 + +/* MME_SHADOW_1_COUT_SCALE */ +#define MME_SHADOW_1_COUT_SCALE_V_SHIFT 0 +#define MME_SHADOW_1_COUT_SCALE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_CIN_SCALE */ +#define MME_SHADOW_1_CIN_SCALE_V_SHIFT 0 +#define MME_SHADOW_1_CIN_SCALE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_GEMMLOWP_ZP */ +#define MME_SHADOW_1_GEMMLOWP_ZP_ZP_CIN_SHIFT 0 +#define MME_SHADOW_1_GEMMLOWP_ZP_ZP_CIN_MASK 0x1FF +#define MME_SHADOW_1_GEMMLOWP_ZP_ZP_COUT_SHIFT 9 +#define MME_SHADOW_1_GEMMLOWP_ZP_ZP_COUT_MASK 0x3FE00 +#define MME_SHADOW_1_GEMMLOWP_ZP_ZP_B_SHIFT 18 +#define MME_SHADOW_1_GEMMLOWP_ZP_ZP_B_MASK 0x7FC0000 +#define MME_SHADOW_1_GEMMLOWP_ZP_GEMMLOWP_EU_EN_SHIFT 27 +#define MME_SHADOW_1_GEMMLOWP_ZP_GEMMLOWP_EU_EN_MASK 0x8000000 +#define MME_SHADOW_1_GEMMLOWP_ZP_ACCUM_SHIFT 28 +#define MME_SHADOW_1_GEMMLOWP_ZP_ACCUM_MASK 0x10000000 +#define MME_SHADOW_1_GEMMLOWP_ZP_ACCUM_BIAS_SHIFT 29 +#define MME_SHADOW_1_GEMMLOWP_ZP_ACCUM_BIAS_MASK 0x20000000 +#define MME_SHADOW_1_GEMMLOWP_ZP_RELU_EN_SHIFT 30 +#define MME_SHADOW_1_GEMMLOWP_ZP_RELU_EN_MASK 0x40000000 + +/* MME_SHADOW_1_GEMMLOWP_EXPONENT */ +#define MME_SHADOW_1_GEMMLOWP_EXPONENT_EXPONENT_CIN_SHIFT 0 +#define MME_SHADOW_1_GEMMLOWP_EXPONENT_EXPONENT_CIN_MASK 0x3F +#define MME_SHADOW_1_GEMMLOWP_EXPONENT_EXPONENT_COUT_SHIFT 8 +#define MME_SHADOW_1_GEMMLOWP_EXPONENT_EXPONENT_COUT_MASK 0x3F00 +#define MME_SHADOW_1_GEMMLOWP_EXPONENT_MUL_CIN_EN_SHIFT 16 +#define MME_SHADOW_1_GEMMLOWP_EXPONENT_MUL_CIN_EN_MASK 0x10000 +#define MME_SHADOW_1_GEMMLOWP_EXPONENT_MUL_COUT_EN_SHIFT 17 +#define MME_SHADOW_1_GEMMLOWP_EXPONENT_MUL_COUT_EN_MASK 0x20000 + +/* MME_SHADOW_1_A_ROI_BASE_OFFSET */ +#define MME_SHADOW_1_A_ROI_BASE_OFFSET_V_SHIFT 0 +#define MME_SHADOW_1_A_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_A_VALID_ELEMENTS */ +#define MME_SHADOW_1_A_VALID_ELEMENTS_V_SHIFT 0 +#define MME_SHADOW_1_A_VALID_ELEMENTS_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_A_LOOP_STRIDE */ +#define MME_SHADOW_1_A_LOOP_STRIDE_V_SHIFT 0 +#define MME_SHADOW_1_A_LOOP_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_A_ROI_SIZE */ +#define MME_SHADOW_1_A_ROI_SIZE_V_SHIFT 0 +#define MME_SHADOW_1_A_ROI_SIZE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_A_SPATIAL_START_OFFSET */ +#define MME_SHADOW_1_A_SPATIAL_START_OFFSET_V_SHIFT 0 +#define MME_SHADOW_1_A_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_A_SPATIAL_STRIDE */ +#define MME_SHADOW_1_A_SPATIAL_STRIDE_V_SHIFT 0 +#define MME_SHADOW_1_A_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_A_SPATIAL_SIZE_MINUS_1 */ +#define MME_SHADOW_1_A_SPATIAL_SIZE_MINUS_1_V_SHIFT 0 +#define MME_SHADOW_1_A_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_B_ROI_BASE_OFFSET */ +#define MME_SHADOW_1_B_ROI_BASE_OFFSET_V_SHIFT 0 +#define MME_SHADOW_1_B_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_B_VALID_ELEMENTS */ +#define MME_SHADOW_1_B_VALID_ELEMENTS_V_SHIFT 0 +#define MME_SHADOW_1_B_VALID_ELEMENTS_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_B_LOOP_STRIDE */ +#define MME_SHADOW_1_B_LOOP_STRIDE_V_SHIFT 0 +#define MME_SHADOW_1_B_LOOP_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_B_ROI_SIZE */ +#define MME_SHADOW_1_B_ROI_SIZE_V_SHIFT 0 +#define MME_SHADOW_1_B_ROI_SIZE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_B_SPATIAL_START_OFFSET */ +#define MME_SHADOW_1_B_SPATIAL_START_OFFSET_V_SHIFT 0 +#define MME_SHADOW_1_B_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_B_SPATIAL_STRIDE */ +#define MME_SHADOW_1_B_SPATIAL_STRIDE_V_SHIFT 0 +#define MME_SHADOW_1_B_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_B_SPATIAL_SIZE_MINUS_1 */ +#define MME_SHADOW_1_B_SPATIAL_SIZE_MINUS_1_V_SHIFT 0 +#define MME_SHADOW_1_B_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_C_ROI_BASE_OFFSET */ +#define MME_SHADOW_1_C_ROI_BASE_OFFSET_V_SHIFT 0 +#define MME_SHADOW_1_C_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_C_VALID_ELEMENTS */ +#define MME_SHADOW_1_C_VALID_ELEMENTS_V_SHIFT 0 +#define MME_SHADOW_1_C_VALID_ELEMENTS_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_C_LOOP_STRIDE */ +#define MME_SHADOW_1_C_LOOP_STRIDE_V_SHIFT 0 +#define MME_SHADOW_1_C_LOOP_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_C_ROI_SIZE */ +#define MME_SHADOW_1_C_ROI_SIZE_V_SHIFT 0 +#define MME_SHADOW_1_C_ROI_SIZE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_C_SPATIAL_START_OFFSET */ +#define MME_SHADOW_1_C_SPATIAL_START_OFFSET_V_SHIFT 0 +#define MME_SHADOW_1_C_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_C_SPATIAL_STRIDE */ +#define MME_SHADOW_1_C_SPATIAL_STRIDE_V_SHIFT 0 +#define MME_SHADOW_1_C_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_C_SPATIAL_SIZE_MINUS_1 */ +#define MME_SHADOW_1_C_SPATIAL_SIZE_MINUS_1_V_SHIFT 0 +#define MME_SHADOW_1_C_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_SYNC_OBJECT_MESSAGE */ +#define MME_SHADOW_1_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_SHIFT 0 +#define MME_SHADOW_1_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_MASK 0xFFFF +#define MME_SHADOW_1_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_SHIFT 16 +#define MME_SHADOW_1_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_MASK 0x7FFF0000 +#define MME_SHADOW_1_SYNC_OBJECT_MESSAGE_SO_OPERATION_SHIFT 31 +#define MME_SHADOW_1_SYNC_OBJECT_MESSAGE_SO_OPERATION_MASK 0x80000000 + +/* MME_SHADOW_1_E_PADDING_VALUE_A */ +#define MME_SHADOW_1_E_PADDING_VALUE_A_V_SHIFT 0 +#define MME_SHADOW_1_E_PADDING_VALUE_A_V_MASK 0xFFFF + +/* MME_SHADOW_1_E_NUM_ITERATION_MINUS_1 */ +#define MME_SHADOW_1_E_NUM_ITERATION_MINUS_1_V_SHIFT 0 +#define MME_SHADOW_1_E_NUM_ITERATION_MINUS_1_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_E_BUBBLES_PER_SPLIT */ +#define MME_SHADOW_1_E_BUBBLES_PER_SPLIT_A_SHIFT 0 +#define MME_SHADOW_1_E_BUBBLES_PER_SPLIT_A_MASK 0xFF +#define MME_SHADOW_1_E_BUBBLES_PER_SPLIT_B_SHIFT 8 +#define MME_SHADOW_1_E_BUBBLES_PER_SPLIT_B_MASK 0xFF00 +#define MME_SHADOW_1_E_BUBBLES_PER_SPLIT_CIN_SHIFT 16 +#define MME_SHADOW_1_E_BUBBLES_PER_SPLIT_CIN_MASK 0xFF0000 +#define MME_SHADOW_1_E_BUBBLES_PER_SPLIT_ID_SHIFT 24 +#define MME_SHADOW_1_E_BUBBLES_PER_SPLIT_ID_MASK 0xFF000000 + +/* MME_SHADOW_2_STATUS */ +#define MME_SHADOW_2_STATUS_A_SHIFT 0 +#define MME_SHADOW_2_STATUS_A_MASK 0x1 +#define MME_SHADOW_2_STATUS_B_SHIFT 1 +#define MME_SHADOW_2_STATUS_B_MASK 0x2 +#define MME_SHADOW_2_STATUS_CIN_SHIFT 2 +#define MME_SHADOW_2_STATUS_CIN_MASK 0x4 +#define MME_SHADOW_2_STATUS_COUT_SHIFT 3 +#define MME_SHADOW_2_STATUS_COUT_MASK 0x8 +#define MME_SHADOW_2_STATUS_TE_SHIFT 4 +#define MME_SHADOW_2_STATUS_TE_MASK 0x10 +#define MME_SHADOW_2_STATUS_LD_SHIFT 5 +#define MME_SHADOW_2_STATUS_LD_MASK 0x20 +#define MME_SHADOW_2_STATUS_ST_SHIFT 6 +#define MME_SHADOW_2_STATUS_ST_MASK 0x40 + +/* MME_SHADOW_2_A_BASE_ADDR_HIGH */ +#define MME_SHADOW_2_A_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_SHADOW_2_A_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_B_BASE_ADDR_HIGH */ +#define MME_SHADOW_2_B_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_SHADOW_2_B_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_CIN_BASE_ADDR_HIGH */ +#define MME_SHADOW_2_CIN_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_SHADOW_2_CIN_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_COUT_BASE_ADDR_HIGH */ +#define MME_SHADOW_2_COUT_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_SHADOW_2_COUT_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_BIAS_BASE_ADDR_HIGH */ +#define MME_SHADOW_2_BIAS_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_SHADOW_2_BIAS_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_A_BASE_ADDR_LOW */ +#define MME_SHADOW_2_A_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_SHADOW_2_A_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_B_BASE_ADDR_LOW */ +#define MME_SHADOW_2_B_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_SHADOW_2_B_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_CIN_BASE_ADDR_LOW */ +#define MME_SHADOW_2_CIN_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_SHADOW_2_CIN_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_COUT_BASE_ADDR_LOW */ +#define MME_SHADOW_2_COUT_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_SHADOW_2_COUT_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_BIAS_BASE_ADDR_LOW */ +#define MME_SHADOW_2_BIAS_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_SHADOW_2_BIAS_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_HEADER */ +#define MME_SHADOW_2_HEADER_SIGNAL_MASK_SHIFT 0 +#define MME_SHADOW_2_HEADER_SIGNAL_MASK_MASK 0x1F +#define MME_SHADOW_2_HEADER_SIGNAL_EN_SHIFT 5 +#define MME_SHADOW_2_HEADER_SIGNAL_EN_MASK 0x20 +#define MME_SHADOW_2_HEADER_TRANS_A_SHIFT 6 +#define MME_SHADOW_2_HEADER_TRANS_A_MASK 0x40 +#define MME_SHADOW_2_HEADER_LOWER_A_SHIFT 7 +#define MME_SHADOW_2_HEADER_LOWER_A_MASK 0x80 +#define MME_SHADOW_2_HEADER_ACCUM_MASK_SHIFT 8 +#define MME_SHADOW_2_HEADER_ACCUM_MASK_MASK 0xF00 +#define MME_SHADOW_2_HEADER_LOAD_BIAS_SHIFT 12 +#define MME_SHADOW_2_HEADER_LOAD_BIAS_MASK 0x1000 +#define MME_SHADOW_2_HEADER_LOAD_CIN_SHIFT 13 +#define MME_SHADOW_2_HEADER_LOAD_CIN_MASK 0x2000 +#define MME_SHADOW_2_HEADER_STORE_OUT_SHIFT 15 +#define MME_SHADOW_2_HEADER_STORE_OUT_MASK 0x8000 +#define MME_SHADOW_2_HEADER_ACC_LD_INC_DISABLE_SHIFT 16 +#define MME_SHADOW_2_HEADER_ACC_LD_INC_DISABLE_MASK 0x10000 +#define MME_SHADOW_2_HEADER_ADVANCE_A_SHIFT 17 +#define MME_SHADOW_2_HEADER_ADVANCE_A_MASK 0x20000 +#define MME_SHADOW_2_HEADER_ADVANCE_B_SHIFT 18 +#define MME_SHADOW_2_HEADER_ADVANCE_B_MASK 0x40000 +#define MME_SHADOW_2_HEADER_ADVANCE_CIN_SHIFT 19 +#define MME_SHADOW_2_HEADER_ADVANCE_CIN_MASK 0x80000 +#define MME_SHADOW_2_HEADER_ADVANCE_COUT_SHIFT 20 +#define MME_SHADOW_2_HEADER_ADVANCE_COUT_MASK 0x100000 +#define MME_SHADOW_2_HEADER_COMPRESSED_B_SHIFT 21 +#define MME_SHADOW_2_HEADER_COMPRESSED_B_MASK 0x200000 +#define MME_SHADOW_2_HEADER_MASK_CONV_END_SHIFT 22 +#define MME_SHADOW_2_HEADER_MASK_CONV_END_MASK 0x400000 +#define MME_SHADOW_2_HEADER_ACC_ST_INC_DISABLE_SHIFT 23 +#define MME_SHADOW_2_HEADER_ACC_ST_INC_DISABLE_MASK 0x800000 +#define MME_SHADOW_2_HEADER_AB_DATA_TYPE_SHIFT 24 +#define MME_SHADOW_2_HEADER_AB_DATA_TYPE_MASK 0x3000000 +#define MME_SHADOW_2_HEADER_CIN_DATA_TYPE_SHIFT 26 +#define MME_SHADOW_2_HEADER_CIN_DATA_TYPE_MASK 0x1C000000 +#define MME_SHADOW_2_HEADER_COUT_DATA_TYPE_SHIFT 29 +#define MME_SHADOW_2_HEADER_COUT_DATA_TYPE_MASK 0xE0000000 + +/* MME_SHADOW_2_KERNEL_SIZE_MINUS_1 */ +#define MME_SHADOW_2_KERNEL_SIZE_MINUS_1_DIM_0_SHIFT 0 +#define MME_SHADOW_2_KERNEL_SIZE_MINUS_1_DIM_0_MASK 0xFF +#define MME_SHADOW_2_KERNEL_SIZE_MINUS_1_DIM_1_SHIFT 8 +#define MME_SHADOW_2_KERNEL_SIZE_MINUS_1_DIM_1_MASK 0xFF00 +#define MME_SHADOW_2_KERNEL_SIZE_MINUS_1_DIM_2_SHIFT 16 +#define MME_SHADOW_2_KERNEL_SIZE_MINUS_1_DIM_2_MASK 0xFF0000 +#define MME_SHADOW_2_KERNEL_SIZE_MINUS_1_DIM_3_SHIFT 24 +#define MME_SHADOW_2_KERNEL_SIZE_MINUS_1_DIM_3_MASK 0xFF000000 + +/* MME_SHADOW_2_ASSOCIATED_DIMS */ +#define MME_SHADOW_2_ASSOCIATED_DIMS_A_0_SHIFT 0 +#define MME_SHADOW_2_ASSOCIATED_DIMS_A_0_MASK 0x7 +#define MME_SHADOW_2_ASSOCIATED_DIMS_B_0_SHIFT 3 +#define MME_SHADOW_2_ASSOCIATED_DIMS_B_0_MASK 0x38 +#define MME_SHADOW_2_ASSOCIATED_DIMS_CIN_0_SHIFT 6 +#define MME_SHADOW_2_ASSOCIATED_DIMS_CIN_0_MASK 0x1C0 +#define MME_SHADOW_2_ASSOCIATED_DIMS_COUT_0_SHIFT 9 +#define MME_SHADOW_2_ASSOCIATED_DIMS_COUT_0_MASK 0xE00 +#define MME_SHADOW_2_ASSOCIATED_DIMS_A_1_SHIFT 16 +#define MME_SHADOW_2_ASSOCIATED_DIMS_A_1_MASK 0x70000 +#define MME_SHADOW_2_ASSOCIATED_DIMS_B_1_SHIFT 19 +#define MME_SHADOW_2_ASSOCIATED_DIMS_B_1_MASK 0x380000 +#define MME_SHADOW_2_ASSOCIATED_DIMS_CIN_1_SHIFT 22 +#define MME_SHADOW_2_ASSOCIATED_DIMS_CIN_1_MASK 0x1C00000 +#define MME_SHADOW_2_ASSOCIATED_DIMS_COUT_1_SHIFT 25 +#define MME_SHADOW_2_ASSOCIATED_DIMS_COUT_1_MASK 0xE000000 + +/* MME_SHADOW_2_COUT_SCALE */ +#define MME_SHADOW_2_COUT_SCALE_V_SHIFT 0 +#define MME_SHADOW_2_COUT_SCALE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_CIN_SCALE */ +#define MME_SHADOW_2_CIN_SCALE_V_SHIFT 0 +#define MME_SHADOW_2_CIN_SCALE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_GEMMLOWP_ZP */ +#define MME_SHADOW_2_GEMMLOWP_ZP_ZP_CIN_SHIFT 0 +#define MME_SHADOW_2_GEMMLOWP_ZP_ZP_CIN_MASK 0x1FF +#define MME_SHADOW_2_GEMMLOWP_ZP_ZP_COUT_SHIFT 9 +#define MME_SHADOW_2_GEMMLOWP_ZP_ZP_COUT_MASK 0x3FE00 +#define MME_SHADOW_2_GEMMLOWP_ZP_ZP_B_SHIFT 18 +#define MME_SHADOW_2_GEMMLOWP_ZP_ZP_B_MASK 0x7FC0000 +#define MME_SHADOW_2_GEMMLOWP_ZP_GEMMLOWP_EU_EN_SHIFT 27 +#define MME_SHADOW_2_GEMMLOWP_ZP_GEMMLOWP_EU_EN_MASK 0x8000000 +#define MME_SHADOW_2_GEMMLOWP_ZP_ACCUM_SHIFT 28 +#define MME_SHADOW_2_GEMMLOWP_ZP_ACCUM_MASK 0x10000000 +#define MME_SHADOW_2_GEMMLOWP_ZP_ACCUM_BIAS_SHIFT 29 +#define MME_SHADOW_2_GEMMLOWP_ZP_ACCUM_BIAS_MASK 0x20000000 +#define MME_SHADOW_2_GEMMLOWP_ZP_RELU_EN_SHIFT 30 +#define MME_SHADOW_2_GEMMLOWP_ZP_RELU_EN_MASK 0x40000000 + +/* MME_SHADOW_2_GEMMLOWP_EXPONENT */ +#define MME_SHADOW_2_GEMMLOWP_EXPONENT_EXPONENT_CIN_SHIFT 0 +#define MME_SHADOW_2_GEMMLOWP_EXPONENT_EXPONENT_CIN_MASK 0x3F +#define MME_SHADOW_2_GEMMLOWP_EXPONENT_EXPONENT_COUT_SHIFT 8 +#define MME_SHADOW_2_GEMMLOWP_EXPONENT_EXPONENT_COUT_MASK 0x3F00 +#define MME_SHADOW_2_GEMMLOWP_EXPONENT_MUL_CIN_EN_SHIFT 16 +#define MME_SHADOW_2_GEMMLOWP_EXPONENT_MUL_CIN_EN_MASK 0x10000 +#define MME_SHADOW_2_GEMMLOWP_EXPONENT_MUL_COUT_EN_SHIFT 17 +#define MME_SHADOW_2_GEMMLOWP_EXPONENT_MUL_COUT_EN_MASK 0x20000 + +/* MME_SHADOW_2_A_ROI_BASE_OFFSET */ +#define MME_SHADOW_2_A_ROI_BASE_OFFSET_V_SHIFT 0 +#define MME_SHADOW_2_A_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_A_VALID_ELEMENTS */ +#define MME_SHADOW_2_A_VALID_ELEMENTS_V_SHIFT 0 +#define MME_SHADOW_2_A_VALID_ELEMENTS_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_A_LOOP_STRIDE */ +#define MME_SHADOW_2_A_LOOP_STRIDE_V_SHIFT 0 +#define MME_SHADOW_2_A_LOOP_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_A_ROI_SIZE */ +#define MME_SHADOW_2_A_ROI_SIZE_V_SHIFT 0 +#define MME_SHADOW_2_A_ROI_SIZE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_A_SPATIAL_START_OFFSET */ +#define MME_SHADOW_2_A_SPATIAL_START_OFFSET_V_SHIFT 0 +#define MME_SHADOW_2_A_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_A_SPATIAL_STRIDE */ +#define MME_SHADOW_2_A_SPATIAL_STRIDE_V_SHIFT 0 +#define MME_SHADOW_2_A_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_A_SPATIAL_SIZE_MINUS_1 */ +#define MME_SHADOW_2_A_SPATIAL_SIZE_MINUS_1_V_SHIFT 0 +#define MME_SHADOW_2_A_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_B_ROI_BASE_OFFSET */ +#define MME_SHADOW_2_B_ROI_BASE_OFFSET_V_SHIFT 0 +#define MME_SHADOW_2_B_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_B_VALID_ELEMENTS */ +#define MME_SHADOW_2_B_VALID_ELEMENTS_V_SHIFT 0 +#define MME_SHADOW_2_B_VALID_ELEMENTS_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_B_LOOP_STRIDE */ +#define MME_SHADOW_2_B_LOOP_STRIDE_V_SHIFT 0 +#define MME_SHADOW_2_B_LOOP_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_B_ROI_SIZE */ +#define MME_SHADOW_2_B_ROI_SIZE_V_SHIFT 0 +#define MME_SHADOW_2_B_ROI_SIZE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_B_SPATIAL_START_OFFSET */ +#define MME_SHADOW_2_B_SPATIAL_START_OFFSET_V_SHIFT 0 +#define MME_SHADOW_2_B_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_B_SPATIAL_STRIDE */ +#define MME_SHADOW_2_B_SPATIAL_STRIDE_V_SHIFT 0 +#define MME_SHADOW_2_B_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_B_SPATIAL_SIZE_MINUS_1 */ +#define MME_SHADOW_2_B_SPATIAL_SIZE_MINUS_1_V_SHIFT 0 +#define MME_SHADOW_2_B_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_C_ROI_BASE_OFFSET */ +#define MME_SHADOW_2_C_ROI_BASE_OFFSET_V_SHIFT 0 +#define MME_SHADOW_2_C_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_C_VALID_ELEMENTS */ +#define MME_SHADOW_2_C_VALID_ELEMENTS_V_SHIFT 0 +#define MME_SHADOW_2_C_VALID_ELEMENTS_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_C_LOOP_STRIDE */ +#define MME_SHADOW_2_C_LOOP_STRIDE_V_SHIFT 0 +#define MME_SHADOW_2_C_LOOP_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_C_ROI_SIZE */ +#define MME_SHADOW_2_C_ROI_SIZE_V_SHIFT 0 +#define MME_SHADOW_2_C_ROI_SIZE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_C_SPATIAL_START_OFFSET */ +#define MME_SHADOW_2_C_SPATIAL_START_OFFSET_V_SHIFT 0 +#define MME_SHADOW_2_C_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_C_SPATIAL_STRIDE */ +#define MME_SHADOW_2_C_SPATIAL_STRIDE_V_SHIFT 0 +#define MME_SHADOW_2_C_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_C_SPATIAL_SIZE_MINUS_1 */ +#define MME_SHADOW_2_C_SPATIAL_SIZE_MINUS_1_V_SHIFT 0 +#define MME_SHADOW_2_C_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_SYNC_OBJECT_MESSAGE */ +#define MME_SHADOW_2_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_SHIFT 0 +#define MME_SHADOW_2_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_MASK 0xFFFF +#define MME_SHADOW_2_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_SHIFT 16 +#define MME_SHADOW_2_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_MASK 0x7FFF0000 +#define MME_SHADOW_2_SYNC_OBJECT_MESSAGE_SO_OPERATION_SHIFT 31 +#define MME_SHADOW_2_SYNC_OBJECT_MESSAGE_SO_OPERATION_MASK 0x80000000 + +/* MME_SHADOW_2_E_PADDING_VALUE_A */ +#define MME_SHADOW_2_E_PADDING_VALUE_A_V_SHIFT 0 +#define MME_SHADOW_2_E_PADDING_VALUE_A_V_MASK 0xFFFF + +/* MME_SHADOW_2_E_NUM_ITERATION_MINUS_1 */ +#define MME_SHADOW_2_E_NUM_ITERATION_MINUS_1_V_SHIFT 0 +#define MME_SHADOW_2_E_NUM_ITERATION_MINUS_1_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_E_BUBBLES_PER_SPLIT */ +#define MME_SHADOW_2_E_BUBBLES_PER_SPLIT_A_SHIFT 0 +#define MME_SHADOW_2_E_BUBBLES_PER_SPLIT_A_MASK 0xFF +#define MME_SHADOW_2_E_BUBBLES_PER_SPLIT_B_SHIFT 8 +#define MME_SHADOW_2_E_BUBBLES_PER_SPLIT_B_MASK 0xFF00 +#define MME_SHADOW_2_E_BUBBLES_PER_SPLIT_CIN_SHIFT 16 +#define MME_SHADOW_2_E_BUBBLES_PER_SPLIT_CIN_MASK 0xFF0000 +#define MME_SHADOW_2_E_BUBBLES_PER_SPLIT_ID_SHIFT 24 +#define MME_SHADOW_2_E_BUBBLES_PER_SPLIT_ID_MASK 0xFF000000 + +/* MME_SHADOW_3_STATUS */ +#define MME_SHADOW_3_STATUS_A_SHIFT 0 +#define MME_SHADOW_3_STATUS_A_MASK 0x1 +#define MME_SHADOW_3_STATUS_B_SHIFT 1 +#define MME_SHADOW_3_STATUS_B_MASK 0x2 +#define MME_SHADOW_3_STATUS_CIN_SHIFT 2 +#define MME_SHADOW_3_STATUS_CIN_MASK 0x4 +#define MME_SHADOW_3_STATUS_COUT_SHIFT 3 +#define MME_SHADOW_3_STATUS_COUT_MASK 0x8 +#define MME_SHADOW_3_STATUS_TE_SHIFT 4 +#define MME_SHADOW_3_STATUS_TE_MASK 0x10 +#define MME_SHADOW_3_STATUS_LD_SHIFT 5 +#define MME_SHADOW_3_STATUS_LD_MASK 0x20 +#define MME_SHADOW_3_STATUS_ST_SHIFT 6 +#define MME_SHADOW_3_STATUS_ST_MASK 0x40 + +/* MME_SHADOW_3_A_BASE_ADDR_HIGH */ +#define MME_SHADOW_3_A_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_SHADOW_3_A_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_B_BASE_ADDR_HIGH */ +#define MME_SHADOW_3_B_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_SHADOW_3_B_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_CIN_BASE_ADDR_HIGH */ +#define MME_SHADOW_3_CIN_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_SHADOW_3_CIN_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_COUT_BASE_ADDR_HIGH */ +#define MME_SHADOW_3_COUT_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_SHADOW_3_COUT_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_BIAS_BASE_ADDR_HIGH */ +#define MME_SHADOW_3_BIAS_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_SHADOW_3_BIAS_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_A_BASE_ADDR_LOW */ +#define MME_SHADOW_3_A_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_SHADOW_3_A_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_B_BASE_ADDR_LOW */ +#define MME_SHADOW_3_B_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_SHADOW_3_B_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_CIN_BASE_ADDR_LOW */ +#define MME_SHADOW_3_CIN_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_SHADOW_3_CIN_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_COUT_BASE_ADDR_LOW */ +#define MME_SHADOW_3_COUT_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_SHADOW_3_COUT_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_BIAS_BASE_ADDR_LOW */ +#define MME_SHADOW_3_BIAS_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_SHADOW_3_BIAS_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_HEADER */ +#define MME_SHADOW_3_HEADER_SIGNAL_MASK_SHIFT 0 +#define MME_SHADOW_3_HEADER_SIGNAL_MASK_MASK 0x1F +#define MME_SHADOW_3_HEADER_SIGNAL_EN_SHIFT 5 +#define MME_SHADOW_3_HEADER_SIGNAL_EN_MASK 0x20 +#define MME_SHADOW_3_HEADER_TRANS_A_SHIFT 6 +#define MME_SHADOW_3_HEADER_TRANS_A_MASK 0x40 +#define MME_SHADOW_3_HEADER_LOWER_A_SHIFT 7 +#define MME_SHADOW_3_HEADER_LOWER_A_MASK 0x80 +#define MME_SHADOW_3_HEADER_ACCUM_MASK_SHIFT 8 +#define MME_SHADOW_3_HEADER_ACCUM_MASK_MASK 0xF00 +#define MME_SHADOW_3_HEADER_LOAD_BIAS_SHIFT 12 +#define MME_SHADOW_3_HEADER_LOAD_BIAS_MASK 0x1000 +#define MME_SHADOW_3_HEADER_LOAD_CIN_SHIFT 13 +#define MME_SHADOW_3_HEADER_LOAD_CIN_MASK 0x2000 +#define MME_SHADOW_3_HEADER_STORE_OUT_SHIFT 15 +#define MME_SHADOW_3_HEADER_STORE_OUT_MASK 0x8000 +#define MME_SHADOW_3_HEADER_ACC_LD_INC_DISABLE_SHIFT 16 +#define MME_SHADOW_3_HEADER_ACC_LD_INC_DISABLE_MASK 0x10000 +#define MME_SHADOW_3_HEADER_ADVANCE_A_SHIFT 17 +#define MME_SHADOW_3_HEADER_ADVANCE_A_MASK 0x20000 +#define MME_SHADOW_3_HEADER_ADVANCE_B_SHIFT 18 +#define MME_SHADOW_3_HEADER_ADVANCE_B_MASK 0x40000 +#define MME_SHADOW_3_HEADER_ADVANCE_CIN_SHIFT 19 +#define MME_SHADOW_3_HEADER_ADVANCE_CIN_MASK 0x80000 +#define MME_SHADOW_3_HEADER_ADVANCE_COUT_SHIFT 20 +#define MME_SHADOW_3_HEADER_ADVANCE_COUT_MASK 0x100000 +#define MME_SHADOW_3_HEADER_COMPRESSED_B_SHIFT 21 +#define MME_SHADOW_3_HEADER_COMPRESSED_B_MASK 0x200000 +#define MME_SHADOW_3_HEADER_MASK_CONV_END_SHIFT 22 +#define MME_SHADOW_3_HEADER_MASK_CONV_END_MASK 0x400000 +#define MME_SHADOW_3_HEADER_ACC_ST_INC_DISABLE_SHIFT 23 +#define MME_SHADOW_3_HEADER_ACC_ST_INC_DISABLE_MASK 0x800000 +#define MME_SHADOW_3_HEADER_AB_DATA_TYPE_SHIFT 24 +#define MME_SHADOW_3_HEADER_AB_DATA_TYPE_MASK 0x3000000 +#define MME_SHADOW_3_HEADER_CIN_DATA_TYPE_SHIFT 26 +#define MME_SHADOW_3_HEADER_CIN_DATA_TYPE_MASK 0x1C000000 +#define MME_SHADOW_3_HEADER_COUT_DATA_TYPE_SHIFT 29 +#define MME_SHADOW_3_HEADER_COUT_DATA_TYPE_MASK 0xE0000000 + +/* MME_SHADOW_3_KERNEL_SIZE_MINUS_1 */ +#define MME_SHADOW_3_KERNEL_SIZE_MINUS_1_DIM_0_SHIFT 0 +#define MME_SHADOW_3_KERNEL_SIZE_MINUS_1_DIM_0_MASK 0xFF +#define MME_SHADOW_3_KERNEL_SIZE_MINUS_1_DIM_1_SHIFT 8 +#define MME_SHADOW_3_KERNEL_SIZE_MINUS_1_DIM_1_MASK 0xFF00 +#define MME_SHADOW_3_KERNEL_SIZE_MINUS_1_DIM_2_SHIFT 16 +#define MME_SHADOW_3_KERNEL_SIZE_MINUS_1_DIM_2_MASK 0xFF0000 +#define MME_SHADOW_3_KERNEL_SIZE_MINUS_1_DIM_3_SHIFT 24 +#define MME_SHADOW_3_KERNEL_SIZE_MINUS_1_DIM_3_MASK 0xFF000000 + +/* MME_SHADOW_3_ASSOCIATED_DIMS */ +#define MME_SHADOW_3_ASSOCIATED_DIMS_A_0_SHIFT 0 +#define MME_SHADOW_3_ASSOCIATED_DIMS_A_0_MASK 0x7 +#define MME_SHADOW_3_ASSOCIATED_DIMS_B_0_SHIFT 3 +#define MME_SHADOW_3_ASSOCIATED_DIMS_B_0_MASK 0x38 +#define MME_SHADOW_3_ASSOCIATED_DIMS_CIN_0_SHIFT 6 +#define MME_SHADOW_3_ASSOCIATED_DIMS_CIN_0_MASK 0x1C0 +#define MME_SHADOW_3_ASSOCIATED_DIMS_COUT_0_SHIFT 9 +#define MME_SHADOW_3_ASSOCIATED_DIMS_COUT_0_MASK 0xE00 +#define MME_SHADOW_3_ASSOCIATED_DIMS_A_1_SHIFT 16 +#define MME_SHADOW_3_ASSOCIATED_DIMS_A_1_MASK 0x70000 +#define MME_SHADOW_3_ASSOCIATED_DIMS_B_1_SHIFT 19 +#define MME_SHADOW_3_ASSOCIATED_DIMS_B_1_MASK 0x380000 +#define MME_SHADOW_3_ASSOCIATED_DIMS_CIN_1_SHIFT 22 +#define MME_SHADOW_3_ASSOCIATED_DIMS_CIN_1_MASK 0x1C00000 +#define MME_SHADOW_3_ASSOCIATED_DIMS_COUT_1_SHIFT 25 +#define MME_SHADOW_3_ASSOCIATED_DIMS_COUT_1_MASK 0xE000000 + +/* MME_SHADOW_3_COUT_SCALE */ +#define MME_SHADOW_3_COUT_SCALE_V_SHIFT 0 +#define MME_SHADOW_3_COUT_SCALE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_CIN_SCALE */ +#define MME_SHADOW_3_CIN_SCALE_V_SHIFT 0 +#define MME_SHADOW_3_CIN_SCALE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_GEMMLOWP_ZP */ +#define MME_SHADOW_3_GEMMLOWP_ZP_ZP_CIN_SHIFT 0 +#define MME_SHADOW_3_GEMMLOWP_ZP_ZP_CIN_MASK 0x1FF +#define MME_SHADOW_3_GEMMLOWP_ZP_ZP_COUT_SHIFT 9 +#define MME_SHADOW_3_GEMMLOWP_ZP_ZP_COUT_MASK 0x3FE00 +#define MME_SHADOW_3_GEMMLOWP_ZP_ZP_B_SHIFT 18 +#define MME_SHADOW_3_GEMMLOWP_ZP_ZP_B_MASK 0x7FC0000 +#define MME_SHADOW_3_GEMMLOWP_ZP_GEMMLOWP_EU_EN_SHIFT 27 +#define MME_SHADOW_3_GEMMLOWP_ZP_GEMMLOWP_EU_EN_MASK 0x8000000 +#define MME_SHADOW_3_GEMMLOWP_ZP_ACCUM_SHIFT 28 +#define MME_SHADOW_3_GEMMLOWP_ZP_ACCUM_MASK 0x10000000 +#define MME_SHADOW_3_GEMMLOWP_ZP_ACCUM_BIAS_SHIFT 29 +#define MME_SHADOW_3_GEMMLOWP_ZP_ACCUM_BIAS_MASK 0x20000000 +#define MME_SHADOW_3_GEMMLOWP_ZP_RELU_EN_SHIFT 30 +#define MME_SHADOW_3_GEMMLOWP_ZP_RELU_EN_MASK 0x40000000 + +/* MME_SHADOW_3_GEMMLOWP_EXPONENT */ +#define MME_SHADOW_3_GEMMLOWP_EXPONENT_EXPONENT_CIN_SHIFT 0 +#define MME_SHADOW_3_GEMMLOWP_EXPONENT_EXPONENT_CIN_MASK 0x3F +#define MME_SHADOW_3_GEMMLOWP_EXPONENT_EXPONENT_COUT_SHIFT 8 +#define MME_SHADOW_3_GEMMLOWP_EXPONENT_EXPONENT_COUT_MASK 0x3F00 +#define MME_SHADOW_3_GEMMLOWP_EXPONENT_MUL_CIN_EN_SHIFT 16 +#define MME_SHADOW_3_GEMMLOWP_EXPONENT_MUL_CIN_EN_MASK 0x10000 +#define MME_SHADOW_3_GEMMLOWP_EXPONENT_MUL_COUT_EN_SHIFT 17 +#define MME_SHADOW_3_GEMMLOWP_EXPONENT_MUL_COUT_EN_MASK 0x20000 + +/* MME_SHADOW_3_A_ROI_BASE_OFFSET */ +#define MME_SHADOW_3_A_ROI_BASE_OFFSET_V_SHIFT 0 +#define MME_SHADOW_3_A_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_A_VALID_ELEMENTS */ +#define MME_SHADOW_3_A_VALID_ELEMENTS_V_SHIFT 0 +#define MME_SHADOW_3_A_VALID_ELEMENTS_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_A_LOOP_STRIDE */ +#define MME_SHADOW_3_A_LOOP_STRIDE_V_SHIFT 0 +#define MME_SHADOW_3_A_LOOP_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_A_ROI_SIZE */ +#define MME_SHADOW_3_A_ROI_SIZE_V_SHIFT 0 +#define MME_SHADOW_3_A_ROI_SIZE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_A_SPATIAL_START_OFFSET */ +#define MME_SHADOW_3_A_SPATIAL_START_OFFSET_V_SHIFT 0 +#define MME_SHADOW_3_A_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_A_SPATIAL_STRIDE */ +#define MME_SHADOW_3_A_SPATIAL_STRIDE_V_SHIFT 0 +#define MME_SHADOW_3_A_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_A_SPATIAL_SIZE_MINUS_1 */ +#define MME_SHADOW_3_A_SPATIAL_SIZE_MINUS_1_V_SHIFT 0 +#define MME_SHADOW_3_A_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_B_ROI_BASE_OFFSET */ +#define MME_SHADOW_3_B_ROI_BASE_OFFSET_V_SHIFT 0 +#define MME_SHADOW_3_B_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_B_VALID_ELEMENTS */ +#define MME_SHADOW_3_B_VALID_ELEMENTS_V_SHIFT 0 +#define MME_SHADOW_3_B_VALID_ELEMENTS_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_B_LOOP_STRIDE */ +#define MME_SHADOW_3_B_LOOP_STRIDE_V_SHIFT 0 +#define MME_SHADOW_3_B_LOOP_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_B_ROI_SIZE */ +#define MME_SHADOW_3_B_ROI_SIZE_V_SHIFT 0 +#define MME_SHADOW_3_B_ROI_SIZE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_B_SPATIAL_START_OFFSET */ +#define MME_SHADOW_3_B_SPATIAL_START_OFFSET_V_SHIFT 0 +#define MME_SHADOW_3_B_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_B_SPATIAL_STRIDE */ +#define MME_SHADOW_3_B_SPATIAL_STRIDE_V_SHIFT 0 +#define MME_SHADOW_3_B_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_B_SPATIAL_SIZE_MINUS_1 */ +#define MME_SHADOW_3_B_SPATIAL_SIZE_MINUS_1_V_SHIFT 0 +#define MME_SHADOW_3_B_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_C_ROI_BASE_OFFSET */ +#define MME_SHADOW_3_C_ROI_BASE_OFFSET_V_SHIFT 0 +#define MME_SHADOW_3_C_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_C_VALID_ELEMENTS */ +#define MME_SHADOW_3_C_VALID_ELEMENTS_V_SHIFT 0 +#define MME_SHADOW_3_C_VALID_ELEMENTS_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_C_LOOP_STRIDE */ +#define MME_SHADOW_3_C_LOOP_STRIDE_V_SHIFT 0 +#define MME_SHADOW_3_C_LOOP_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_C_ROI_SIZE */ +#define MME_SHADOW_3_C_ROI_SIZE_V_SHIFT 0 +#define MME_SHADOW_3_C_ROI_SIZE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_C_SPATIAL_START_OFFSET */ +#define MME_SHADOW_3_C_SPATIAL_START_OFFSET_V_SHIFT 0 +#define MME_SHADOW_3_C_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_C_SPATIAL_STRIDE */ +#define MME_SHADOW_3_C_SPATIAL_STRIDE_V_SHIFT 0 +#define MME_SHADOW_3_C_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_C_SPATIAL_SIZE_MINUS_1 */ +#define MME_SHADOW_3_C_SPATIAL_SIZE_MINUS_1_V_SHIFT 0 +#define MME_SHADOW_3_C_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_SYNC_OBJECT_MESSAGE */ +#define MME_SHADOW_3_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_SHIFT 0 +#define MME_SHADOW_3_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_MASK 0xFFFF +#define MME_SHADOW_3_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_SHIFT 16 +#define MME_SHADOW_3_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_MASK 0x7FFF0000 +#define MME_SHADOW_3_SYNC_OBJECT_MESSAGE_SO_OPERATION_SHIFT 31 +#define MME_SHADOW_3_SYNC_OBJECT_MESSAGE_SO_OPERATION_MASK 0x80000000 + +/* MME_SHADOW_3_E_PADDING_VALUE_A */ +#define MME_SHADOW_3_E_PADDING_VALUE_A_V_SHIFT 0 +#define MME_SHADOW_3_E_PADDING_VALUE_A_V_MASK 0xFFFF + +/* MME_SHADOW_3_E_NUM_ITERATION_MINUS_1 */ +#define MME_SHADOW_3_E_NUM_ITERATION_MINUS_1_V_SHIFT 0 +#define MME_SHADOW_3_E_NUM_ITERATION_MINUS_1_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_E_BUBBLES_PER_SPLIT */ +#define MME_SHADOW_3_E_BUBBLES_PER_SPLIT_A_SHIFT 0 +#define MME_SHADOW_3_E_BUBBLES_PER_SPLIT_A_MASK 0xFF +#define MME_SHADOW_3_E_BUBBLES_PER_SPLIT_B_SHIFT 8 +#define MME_SHADOW_3_E_BUBBLES_PER_SPLIT_B_MASK 0xFF00 +#define MME_SHADOW_3_E_BUBBLES_PER_SPLIT_CIN_SHIFT 16 +#define MME_SHADOW_3_E_BUBBLES_PER_SPLIT_CIN_MASK 0xFF0000 +#define MME_SHADOW_3_E_BUBBLES_PER_SPLIT_ID_SHIFT 24 +#define MME_SHADOW_3_E_BUBBLES_PER_SPLIT_ID_MASK 0xFF000000 + +#endif /* ASIC_REG_MME_MASKS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_masks.h new file mode 100644 index 000000000..e464e3815 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_masks.h @@ -0,0 +1,464 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_MME_QM_MASKS_H_ +#define ASIC_REG_MME_QM_MASKS_H_ + +/* + ***************************************** + * MME_QM (Prototype: QMAN) + ***************************************** + */ + +/* MME_QM_GLBL_CFG0 */ +#define MME_QM_GLBL_CFG0_PQF_EN_SHIFT 0 +#define MME_QM_GLBL_CFG0_PQF_EN_MASK 0x1 +#define MME_QM_GLBL_CFG0_CQF_EN_SHIFT 1 +#define MME_QM_GLBL_CFG0_CQF_EN_MASK 0x2 +#define MME_QM_GLBL_CFG0_CP_EN_SHIFT 2 +#define MME_QM_GLBL_CFG0_CP_EN_MASK 0x4 +#define MME_QM_GLBL_CFG0_DMA_EN_SHIFT 3 +#define MME_QM_GLBL_CFG0_DMA_EN_MASK 0x8 + +/* MME_QM_GLBL_CFG1 */ +#define MME_QM_GLBL_CFG1_PQF_STOP_SHIFT 0 +#define MME_QM_GLBL_CFG1_PQF_STOP_MASK 0x1 +#define MME_QM_GLBL_CFG1_CQF_STOP_SHIFT 1 +#define MME_QM_GLBL_CFG1_CQF_STOP_MASK 0x2 +#define MME_QM_GLBL_CFG1_CP_STOP_SHIFT 2 +#define MME_QM_GLBL_CFG1_CP_STOP_MASK 0x4 +#define MME_QM_GLBL_CFG1_DMA_STOP_SHIFT 3 +#define MME_QM_GLBL_CFG1_DMA_STOP_MASK 0x8 +#define MME_QM_GLBL_CFG1_PQF_FLUSH_SHIFT 8 +#define MME_QM_GLBL_CFG1_PQF_FLUSH_MASK 0x100 +#define MME_QM_GLBL_CFG1_CQF_FLUSH_SHIFT 9 +#define MME_QM_GLBL_CFG1_CQF_FLUSH_MASK 0x200 +#define MME_QM_GLBL_CFG1_CP_FLUSH_SHIFT 10 +#define MME_QM_GLBL_CFG1_CP_FLUSH_MASK 0x400 +#define MME_QM_GLBL_CFG1_DMA_FLUSH_SHIFT 11 +#define MME_QM_GLBL_CFG1_DMA_FLUSH_MASK 0x800 + +/* MME_QM_GLBL_PROT */ +#define MME_QM_GLBL_PROT_PQF_PROT_SHIFT 0 +#define MME_QM_GLBL_PROT_PQF_PROT_MASK 0x1 +#define MME_QM_GLBL_PROT_CQF_PROT_SHIFT 1 +#define MME_QM_GLBL_PROT_CQF_PROT_MASK 0x2 +#define MME_QM_GLBL_PROT_CP_PROT_SHIFT 2 +#define MME_QM_GLBL_PROT_CP_PROT_MASK 0x4 +#define MME_QM_GLBL_PROT_DMA_PROT_SHIFT 3 +#define MME_QM_GLBL_PROT_DMA_PROT_MASK 0x8 +#define MME_QM_GLBL_PROT_PQF_ERR_PROT_SHIFT 4 +#define MME_QM_GLBL_PROT_PQF_ERR_PROT_MASK 0x10 +#define MME_QM_GLBL_PROT_CQF_ERR_PROT_SHIFT 5 +#define MME_QM_GLBL_PROT_CQF_ERR_PROT_MASK 0x20 +#define MME_QM_GLBL_PROT_CP_ERR_PROT_SHIFT 6 +#define MME_QM_GLBL_PROT_CP_ERR_PROT_MASK 0x40 +#define MME_QM_GLBL_PROT_DMA_ERR_PROT_SHIFT 7 +#define MME_QM_GLBL_PROT_DMA_ERR_PROT_MASK 0x80 + +/* MME_QM_GLBL_ERR_CFG */ +#define MME_QM_GLBL_ERR_CFG_PQF_ERR_INT_EN_SHIFT 0 +#define MME_QM_GLBL_ERR_CFG_PQF_ERR_INT_EN_MASK 0x1 +#define MME_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT 1 +#define MME_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK 0x2 +#define MME_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT 2 +#define MME_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK 0x4 +#define MME_QM_GLBL_ERR_CFG_CQF_ERR_INT_EN_SHIFT 3 +#define MME_QM_GLBL_ERR_CFG_CQF_ERR_INT_EN_MASK 0x8 +#define MME_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT 4 +#define MME_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK 0x10 +#define MME_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT 5 +#define MME_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK 0x20 +#define MME_QM_GLBL_ERR_CFG_CP_ERR_INT_EN_SHIFT 6 +#define MME_QM_GLBL_ERR_CFG_CP_ERR_INT_EN_MASK 0x40 +#define MME_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT 7 +#define MME_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK 0x80 +#define MME_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT 8 +#define MME_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK 0x100 +#define MME_QM_GLBL_ERR_CFG_DMA_ERR_INT_EN_SHIFT 9 +#define MME_QM_GLBL_ERR_CFG_DMA_ERR_INT_EN_MASK 0x200 +#define MME_QM_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT 10 +#define MME_QM_GLBL_ERR_CFG_DMA_ERR_MSG_EN_MASK 0x400 +#define MME_QM_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT 11 +#define MME_QM_GLBL_ERR_CFG_DMA_STOP_ON_ERR_MASK 0x800 + +/* MME_QM_GLBL_ERR_ADDR_LO */ +#define MME_QM_GLBL_ERR_ADDR_LO_VAL_SHIFT 0 +#define MME_QM_GLBL_ERR_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* MME_QM_GLBL_ERR_ADDR_HI */ +#define MME_QM_GLBL_ERR_ADDR_HI_VAL_SHIFT 0 +#define MME_QM_GLBL_ERR_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* MME_QM_GLBL_ERR_WDATA */ +#define MME_QM_GLBL_ERR_WDATA_VAL_SHIFT 0 +#define MME_QM_GLBL_ERR_WDATA_VAL_MASK 0xFFFFFFFF + +/* MME_QM_GLBL_SECURE_PROPS */ +#define MME_QM_GLBL_SECURE_PROPS_ASID_SHIFT 0 +#define MME_QM_GLBL_SECURE_PROPS_ASID_MASK 0x3FF +#define MME_QM_GLBL_SECURE_PROPS_MMBP_SHIFT 10 +#define MME_QM_GLBL_SECURE_PROPS_MMBP_MASK 0x400 + +/* MME_QM_GLBL_NON_SECURE_PROPS */ +#define MME_QM_GLBL_NON_SECURE_PROPS_ASID_SHIFT 0 +#define MME_QM_GLBL_NON_SECURE_PROPS_ASID_MASK 0x3FF +#define MME_QM_GLBL_NON_SECURE_PROPS_MMBP_SHIFT 10 +#define MME_QM_GLBL_NON_SECURE_PROPS_MMBP_MASK 0x400 + +/* MME_QM_GLBL_STS0 */ +#define MME_QM_GLBL_STS0_PQF_IDLE_SHIFT 0 +#define MME_QM_GLBL_STS0_PQF_IDLE_MASK 0x1 +#define MME_QM_GLBL_STS0_CQF_IDLE_SHIFT 1 +#define MME_QM_GLBL_STS0_CQF_IDLE_MASK 0x2 +#define MME_QM_GLBL_STS0_CP_IDLE_SHIFT 2 +#define MME_QM_GLBL_STS0_CP_IDLE_MASK 0x4 +#define MME_QM_GLBL_STS0_DMA_IDLE_SHIFT 3 +#define MME_QM_GLBL_STS0_DMA_IDLE_MASK 0x8 +#define MME_QM_GLBL_STS0_PQF_IS_STOP_SHIFT 4 +#define MME_QM_GLBL_STS0_PQF_IS_STOP_MASK 0x10 +#define MME_QM_GLBL_STS0_CQF_IS_STOP_SHIFT 5 +#define MME_QM_GLBL_STS0_CQF_IS_STOP_MASK 0x20 +#define MME_QM_GLBL_STS0_CP_IS_STOP_SHIFT 6 +#define MME_QM_GLBL_STS0_CP_IS_STOP_MASK 0x40 +#define MME_QM_GLBL_STS0_DMA_IS_STOP_SHIFT 7 +#define MME_QM_GLBL_STS0_DMA_IS_STOP_MASK 0x80 + +/* MME_QM_GLBL_STS1 */ +#define MME_QM_GLBL_STS1_PQF_RD_ERR_SHIFT 0 +#define MME_QM_GLBL_STS1_PQF_RD_ERR_MASK 0x1 +#define MME_QM_GLBL_STS1_CQF_RD_ERR_SHIFT 1 +#define MME_QM_GLBL_STS1_CQF_RD_ERR_MASK 0x2 +#define MME_QM_GLBL_STS1_CP_RD_ERR_SHIFT 2 +#define MME_QM_GLBL_STS1_CP_RD_ERR_MASK 0x4 +#define MME_QM_GLBL_STS1_CP_UNDEF_CMD_ERR_SHIFT 3 +#define MME_QM_GLBL_STS1_CP_UNDEF_CMD_ERR_MASK 0x8 +#define MME_QM_GLBL_STS1_CP_STOP_OP_SHIFT 4 +#define MME_QM_GLBL_STS1_CP_STOP_OP_MASK 0x10 +#define MME_QM_GLBL_STS1_CP_MSG_WR_ERR_SHIFT 5 +#define MME_QM_GLBL_STS1_CP_MSG_WR_ERR_MASK 0x20 +#define MME_QM_GLBL_STS1_DMA_RD_ERR_SHIFT 8 +#define MME_QM_GLBL_STS1_DMA_RD_ERR_MASK 0x100 +#define MME_QM_GLBL_STS1_DMA_WR_ERR_SHIFT 9 +#define MME_QM_GLBL_STS1_DMA_WR_ERR_MASK 0x200 +#define MME_QM_GLBL_STS1_DMA_RD_MSG_ERR_SHIFT 10 +#define MME_QM_GLBL_STS1_DMA_RD_MSG_ERR_MASK 0x400 +#define MME_QM_GLBL_STS1_DMA_WR_MSG_ERR_SHIFT 11 +#define MME_QM_GLBL_STS1_DMA_WR_MSG_ERR_MASK 0x800 + +/* MME_QM_PQ_BASE_LO */ +#define MME_QM_PQ_BASE_LO_VAL_SHIFT 0 +#define MME_QM_PQ_BASE_LO_VAL_MASK 0xFFFFFFFF + +/* MME_QM_PQ_BASE_HI */ +#define MME_QM_PQ_BASE_HI_VAL_SHIFT 0 +#define MME_QM_PQ_BASE_HI_VAL_MASK 0xFFFFFFFF + +/* MME_QM_PQ_SIZE */ +#define MME_QM_PQ_SIZE_VAL_SHIFT 0 +#define MME_QM_PQ_SIZE_VAL_MASK 0xFFFFFFFF + +/* MME_QM_PQ_PI */ +#define MME_QM_PQ_PI_VAL_SHIFT 0 +#define MME_QM_PQ_PI_VAL_MASK 0xFFFFFFFF + +/* MME_QM_PQ_CI */ +#define MME_QM_PQ_CI_VAL_SHIFT 0 +#define MME_QM_PQ_CI_VAL_MASK 0xFFFFFFFF + +/* MME_QM_PQ_CFG0 */ +#define MME_QM_PQ_CFG0_RESERVED_SHIFT 0 +#define MME_QM_PQ_CFG0_RESERVED_MASK 0x1 + +/* MME_QM_PQ_CFG1 */ +#define MME_QM_PQ_CFG1_CREDIT_LIM_SHIFT 0 +#define MME_QM_PQ_CFG1_CREDIT_LIM_MASK 0xFFFF +#define MME_QM_PQ_CFG1_MAX_INFLIGHT_SHIFT 16 +#define MME_QM_PQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000 + +/* MME_QM_PQ_ARUSER */ +#define MME_QM_PQ_ARUSER_NOSNOOP_SHIFT 0 +#define MME_QM_PQ_ARUSER_NOSNOOP_MASK 0x1 +#define MME_QM_PQ_ARUSER_WORD_SHIFT 1 +#define MME_QM_PQ_ARUSER_WORD_MASK 0x2 + +/* MME_QM_PQ_PUSH0 */ +#define MME_QM_PQ_PUSH0_PTR_LO_SHIFT 0 +#define MME_QM_PQ_PUSH0_PTR_LO_MASK 0xFFFFFFFF + +/* MME_QM_PQ_PUSH1 */ +#define MME_QM_PQ_PUSH1_PTR_HI_SHIFT 0 +#define MME_QM_PQ_PUSH1_PTR_HI_MASK 0xFFFFFFFF + +/* MME_QM_PQ_PUSH2 */ +#define MME_QM_PQ_PUSH2_TSIZE_SHIFT 0 +#define MME_QM_PQ_PUSH2_TSIZE_MASK 0xFFFFFFFF + +/* MME_QM_PQ_PUSH3 */ +#define MME_QM_PQ_PUSH3_RPT_SHIFT 0 +#define MME_QM_PQ_PUSH3_RPT_MASK 0xFFFF +#define MME_QM_PQ_PUSH3_CTL_SHIFT 16 +#define MME_QM_PQ_PUSH3_CTL_MASK 0xFFFF0000 + +/* MME_QM_PQ_STS0 */ +#define MME_QM_PQ_STS0_PQ_CREDIT_CNT_SHIFT 0 +#define MME_QM_PQ_STS0_PQ_CREDIT_CNT_MASK 0xFFFF +#define MME_QM_PQ_STS0_PQ_FREE_CNT_SHIFT 16 +#define MME_QM_PQ_STS0_PQ_FREE_CNT_MASK 0xFFFF0000 + +/* MME_QM_PQ_STS1 */ +#define MME_QM_PQ_STS1_PQ_INFLIGHT_CNT_SHIFT 0 +#define MME_QM_PQ_STS1_PQ_INFLIGHT_CNT_MASK 0xFFFF +#define MME_QM_PQ_STS1_PQ_BUF_EMPTY_SHIFT 30 +#define MME_QM_PQ_STS1_PQ_BUF_EMPTY_MASK 0x40000000 +#define MME_QM_PQ_STS1_PQ_BUSY_SHIFT 31 +#define MME_QM_PQ_STS1_PQ_BUSY_MASK 0x80000000 + +/* MME_QM_PQ_RD_RATE_LIM_EN */ +#define MME_QM_PQ_RD_RATE_LIM_EN_VAL_SHIFT 0 +#define MME_QM_PQ_RD_RATE_LIM_EN_VAL_MASK 0x1 + +/* MME_QM_PQ_RD_RATE_LIM_RST_TOKEN */ +#define MME_QM_PQ_RD_RATE_LIM_RST_TOKEN_VAL_SHIFT 0 +#define MME_QM_PQ_RD_RATE_LIM_RST_TOKEN_VAL_MASK 0xFFFF + +/* MME_QM_PQ_RD_RATE_LIM_SAT */ +#define MME_QM_PQ_RD_RATE_LIM_SAT_VAL_SHIFT 0 +#define MME_QM_PQ_RD_RATE_LIM_SAT_VAL_MASK 0xFFFF + +/* MME_QM_PQ_RD_RATE_LIM_TOUT */ +#define MME_QM_PQ_RD_RATE_LIM_TOUT_VAL_SHIFT 0 +#define MME_QM_PQ_RD_RATE_LIM_TOUT_VAL_MASK 0x7FFFFFFF + +/* MME_QM_CQ_CFG0 */ +#define MME_QM_CQ_CFG0_RESERVED_SHIFT 0 +#define MME_QM_CQ_CFG0_RESERVED_MASK 0x1 + +/* MME_QM_CQ_CFG1 */ +#define MME_QM_CQ_CFG1_CREDIT_LIM_SHIFT 0 +#define MME_QM_CQ_CFG1_CREDIT_LIM_MASK 0xFFFF +#define MME_QM_CQ_CFG1_MAX_INFLIGHT_SHIFT 16 +#define MME_QM_CQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000 + +/* MME_QM_CQ_ARUSER */ +#define MME_QM_CQ_ARUSER_NOSNOOP_SHIFT 0 +#define MME_QM_CQ_ARUSER_NOSNOOP_MASK 0x1 +#define MME_QM_CQ_ARUSER_WORD_SHIFT 1 +#define MME_QM_CQ_ARUSER_WORD_MASK 0x2 + +/* MME_QM_CQ_PTR_LO */ +#define MME_QM_CQ_PTR_LO_VAL_SHIFT 0 +#define MME_QM_CQ_PTR_LO_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CQ_PTR_HI */ +#define MME_QM_CQ_PTR_HI_VAL_SHIFT 0 +#define MME_QM_CQ_PTR_HI_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CQ_TSIZE */ +#define MME_QM_CQ_TSIZE_VAL_SHIFT 0 +#define MME_QM_CQ_TSIZE_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CQ_CTL */ +#define MME_QM_CQ_CTL_RPT_SHIFT 0 +#define MME_QM_CQ_CTL_RPT_MASK 0xFFFF +#define MME_QM_CQ_CTL_CTL_SHIFT 16 +#define MME_QM_CQ_CTL_CTL_MASK 0xFFFF0000 + +/* MME_QM_CQ_PTR_LO_STS */ +#define MME_QM_CQ_PTR_LO_STS_VAL_SHIFT 0 +#define MME_QM_CQ_PTR_LO_STS_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CQ_PTR_HI_STS */ +#define MME_QM_CQ_PTR_HI_STS_VAL_SHIFT 0 +#define MME_QM_CQ_PTR_HI_STS_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CQ_TSIZE_STS */ +#define MME_QM_CQ_TSIZE_STS_VAL_SHIFT 0 +#define MME_QM_CQ_TSIZE_STS_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CQ_CTL_STS */ +#define MME_QM_CQ_CTL_STS_RPT_SHIFT 0 +#define MME_QM_CQ_CTL_STS_RPT_MASK 0xFFFF +#define MME_QM_CQ_CTL_STS_CTL_SHIFT 16 +#define MME_QM_CQ_CTL_STS_CTL_MASK 0xFFFF0000 + +/* MME_QM_CQ_STS0 */ +#define MME_QM_CQ_STS0_CQ_CREDIT_CNT_SHIFT 0 +#define MME_QM_CQ_STS0_CQ_CREDIT_CNT_MASK 0xFFFF +#define MME_QM_CQ_STS0_CQ_FREE_CNT_SHIFT 16 +#define MME_QM_CQ_STS0_CQ_FREE_CNT_MASK 0xFFFF0000 + +/* MME_QM_CQ_STS1 */ +#define MME_QM_CQ_STS1_CQ_INFLIGHT_CNT_SHIFT 0 +#define MME_QM_CQ_STS1_CQ_INFLIGHT_CNT_MASK 0xFFFF +#define MME_QM_CQ_STS1_CQ_BUF_EMPTY_SHIFT 30 +#define MME_QM_CQ_STS1_CQ_BUF_EMPTY_MASK 0x40000000 +#define MME_QM_CQ_STS1_CQ_BUSY_SHIFT 31 +#define MME_QM_CQ_STS1_CQ_BUSY_MASK 0x80000000 + +/* MME_QM_CQ_RD_RATE_LIM_EN */ +#define MME_QM_CQ_RD_RATE_LIM_EN_VAL_SHIFT 0 +#define MME_QM_CQ_RD_RATE_LIM_EN_VAL_MASK 0x1 + +/* MME_QM_CQ_RD_RATE_LIM_RST_TOKEN */ +#define MME_QM_CQ_RD_RATE_LIM_RST_TOKEN_VAL_SHIFT 0 +#define MME_QM_CQ_RD_RATE_LIM_RST_TOKEN_VAL_MASK 0xFFFF + +/* MME_QM_CQ_RD_RATE_LIM_SAT */ +#define MME_QM_CQ_RD_RATE_LIM_SAT_VAL_SHIFT 0 +#define MME_QM_CQ_RD_RATE_LIM_SAT_VAL_MASK 0xFFFF + +/* MME_QM_CQ_RD_RATE_LIM_TOUT */ +#define MME_QM_CQ_RD_RATE_LIM_TOUT_VAL_SHIFT 0 +#define MME_QM_CQ_RD_RATE_LIM_TOUT_VAL_MASK 0x7FFFFFFF + +/* MME_QM_CQ_IFIFO_CNT */ +#define MME_QM_CQ_IFIFO_CNT_VAL_SHIFT 0 +#define MME_QM_CQ_IFIFO_CNT_VAL_MASK 0x3 + +/* MME_QM_CP_MSG_BASE0_ADDR_LO */ +#define MME_QM_CP_MSG_BASE0_ADDR_LO_VAL_SHIFT 0 +#define MME_QM_CP_MSG_BASE0_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CP_MSG_BASE0_ADDR_HI */ +#define MME_QM_CP_MSG_BASE0_ADDR_HI_VAL_SHIFT 0 +#define MME_QM_CP_MSG_BASE0_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CP_MSG_BASE1_ADDR_LO */ +#define MME_QM_CP_MSG_BASE1_ADDR_LO_VAL_SHIFT 0 +#define MME_QM_CP_MSG_BASE1_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CP_MSG_BASE1_ADDR_HI */ +#define MME_QM_CP_MSG_BASE1_ADDR_HI_VAL_SHIFT 0 +#define MME_QM_CP_MSG_BASE1_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CP_MSG_BASE2_ADDR_LO */ +#define MME_QM_CP_MSG_BASE2_ADDR_LO_VAL_SHIFT 0 +#define MME_QM_CP_MSG_BASE2_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CP_MSG_BASE2_ADDR_HI */ +#define MME_QM_CP_MSG_BASE2_ADDR_HI_VAL_SHIFT 0 +#define MME_QM_CP_MSG_BASE2_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CP_MSG_BASE3_ADDR_LO */ +#define MME_QM_CP_MSG_BASE3_ADDR_LO_VAL_SHIFT 0 +#define MME_QM_CP_MSG_BASE3_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CP_MSG_BASE3_ADDR_HI */ +#define MME_QM_CP_MSG_BASE3_ADDR_HI_VAL_SHIFT 0 +#define MME_QM_CP_MSG_BASE3_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CP_LDMA_TSIZE_OFFSET */ +#define MME_QM_CP_LDMA_TSIZE_OFFSET_VAL_SHIFT 0 +#define MME_QM_CP_LDMA_TSIZE_OFFSET_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CP_LDMA_SRC_BASE_LO_OFFSET */ +#define MME_QM_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_SHIFT 0 +#define MME_QM_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CP_LDMA_SRC_BASE_HI_OFFSET */ +#define MME_QM_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_SHIFT 0 +#define MME_QM_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CP_LDMA_DST_BASE_LO_OFFSET */ +#define MME_QM_CP_LDMA_DST_BASE_LO_OFFSET_VAL_SHIFT 0 +#define MME_QM_CP_LDMA_DST_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CP_LDMA_DST_BASE_HI_OFFSET */ +#define MME_QM_CP_LDMA_DST_BASE_HI_OFFSET_VAL_SHIFT 0 +#define MME_QM_CP_LDMA_DST_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CP_LDMA_COMMIT_OFFSET */ +#define MME_QM_CP_LDMA_COMMIT_OFFSET_VAL_SHIFT 0 +#define MME_QM_CP_LDMA_COMMIT_OFFSET_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CP_FENCE0_RDATA */ +#define MME_QM_CP_FENCE0_RDATA_INC_VAL_SHIFT 0 +#define MME_QM_CP_FENCE0_RDATA_INC_VAL_MASK 0xF + +/* MME_QM_CP_FENCE1_RDATA */ +#define MME_QM_CP_FENCE1_RDATA_INC_VAL_SHIFT 0 +#define MME_QM_CP_FENCE1_RDATA_INC_VAL_MASK 0xF + +/* MME_QM_CP_FENCE2_RDATA */ +#define MME_QM_CP_FENCE2_RDATA_INC_VAL_SHIFT 0 +#define MME_QM_CP_FENCE2_RDATA_INC_VAL_MASK 0xF + +/* MME_QM_CP_FENCE3_RDATA */ +#define MME_QM_CP_FENCE3_RDATA_INC_VAL_SHIFT 0 +#define MME_QM_CP_FENCE3_RDATA_INC_VAL_MASK 0xF + +/* MME_QM_CP_FENCE0_CNT */ +#define MME_QM_CP_FENCE0_CNT_VAL_SHIFT 0 +#define MME_QM_CP_FENCE0_CNT_VAL_MASK 0xFF + +/* MME_QM_CP_FENCE1_CNT */ +#define MME_QM_CP_FENCE1_CNT_VAL_SHIFT 0 +#define MME_QM_CP_FENCE1_CNT_VAL_MASK 0xFF + +/* MME_QM_CP_FENCE2_CNT */ +#define MME_QM_CP_FENCE2_CNT_VAL_SHIFT 0 +#define MME_QM_CP_FENCE2_CNT_VAL_MASK 0xFF + +/* MME_QM_CP_FENCE3_CNT */ +#define MME_QM_CP_FENCE3_CNT_VAL_SHIFT 0 +#define MME_QM_CP_FENCE3_CNT_VAL_MASK 0xFF + +/* MME_QM_CP_STS */ +#define MME_QM_CP_STS_MSG_INFLIGHT_CNT_SHIFT 0 +#define MME_QM_CP_STS_MSG_INFLIGHT_CNT_MASK 0xFFFF +#define MME_QM_CP_STS_ERDY_SHIFT 16 +#define MME_QM_CP_STS_ERDY_MASK 0x10000 +#define MME_QM_CP_STS_RRDY_SHIFT 17 +#define MME_QM_CP_STS_RRDY_MASK 0x20000 +#define MME_QM_CP_STS_MRDY_SHIFT 18 +#define MME_QM_CP_STS_MRDY_MASK 0x40000 +#define MME_QM_CP_STS_SW_STOP_SHIFT 19 +#define MME_QM_CP_STS_SW_STOP_MASK 0x80000 +#define MME_QM_CP_STS_FENCE_ID_SHIFT 20 +#define MME_QM_CP_STS_FENCE_ID_MASK 0x300000 +#define MME_QM_CP_STS_FENCE_IN_PROGRESS_SHIFT 22 +#define MME_QM_CP_STS_FENCE_IN_PROGRESS_MASK 0x400000 + +/* MME_QM_CP_CURRENT_INST_LO */ +#define MME_QM_CP_CURRENT_INST_LO_VAL_SHIFT 0 +#define MME_QM_CP_CURRENT_INST_LO_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CP_CURRENT_INST_HI */ +#define MME_QM_CP_CURRENT_INST_HI_VAL_SHIFT 0 +#define MME_QM_CP_CURRENT_INST_HI_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CP_BARRIER_CFG */ +#define MME_QM_CP_BARRIER_CFG_EBGUARD_SHIFT 0 +#define MME_QM_CP_BARRIER_CFG_EBGUARD_MASK 0xFFF + +/* MME_QM_CP_DBG_0 */ +#define MME_QM_CP_DBG_0_VAL_SHIFT 0 +#define MME_QM_CP_DBG_0_VAL_MASK 0xFF + +/* MME_QM_PQ_BUF_ADDR */ +#define MME_QM_PQ_BUF_ADDR_VAL_SHIFT 0 +#define MME_QM_PQ_BUF_ADDR_VAL_MASK 0xFFFFFFFF + +/* MME_QM_PQ_BUF_RDATA */ +#define MME_QM_PQ_BUF_RDATA_VAL_SHIFT 0 +#define MME_QM_PQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CQ_BUF_ADDR */ +#define MME_QM_CQ_BUF_ADDR_VAL_SHIFT 0 +#define MME_QM_CQ_BUF_ADDR_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CQ_BUF_RDATA */ +#define MME_QM_CQ_BUF_RDATA_VAL_SHIFT 0 +#define MME_QM_CQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF + +#endif /* ASIC_REG_MME_QM_MASKS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_regs.h new file mode 100644 index 000000000..538708bef --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_regs.h @@ -0,0 +1,178 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_MME_QM_REGS_H_ +#define ASIC_REG_MME_QM_REGS_H_ + +/* + ***************************************** + * MME_QM (Prototype: QMAN) + ***************************************** + */ + +#define mmMME_QM_GLBL_CFG0 0xD8000 + +#define mmMME_QM_GLBL_CFG1 0xD8004 + +#define mmMME_QM_GLBL_PROT 0xD8008 + +#define mmMME_QM_GLBL_ERR_CFG 0xD800C + +#define mmMME_QM_GLBL_ERR_ADDR_LO 0xD8010 + +#define mmMME_QM_GLBL_ERR_ADDR_HI 0xD8014 + +#define mmMME_QM_GLBL_ERR_WDATA 0xD8018 + +#define mmMME_QM_GLBL_SECURE_PROPS 0xD801C + +#define mmMME_QM_GLBL_NON_SECURE_PROPS 0xD8020 + +#define mmMME_QM_GLBL_STS0 0xD8024 + +#define mmMME_QM_GLBL_STS1 0xD8028 + +#define mmMME_QM_PQ_BASE_LO 0xD8060 + +#define mmMME_QM_PQ_BASE_HI 0xD8064 + +#define mmMME_QM_PQ_SIZE 0xD8068 + +#define mmMME_QM_PQ_PI 0xD806C + +#define mmMME_QM_PQ_CI 0xD8070 + +#define mmMME_QM_PQ_CFG0 0xD8074 + +#define mmMME_QM_PQ_CFG1 0xD8078 + +#define mmMME_QM_PQ_ARUSER 0xD807C + +#define mmMME_QM_PQ_PUSH0 0xD8080 + +#define mmMME_QM_PQ_PUSH1 0xD8084 + +#define mmMME_QM_PQ_PUSH2 0xD8088 + +#define mmMME_QM_PQ_PUSH3 0xD808C + +#define mmMME_QM_PQ_STS0 0xD8090 + +#define mmMME_QM_PQ_STS1 0xD8094 + +#define mmMME_QM_PQ_RD_RATE_LIM_EN 0xD80A0 + +#define mmMME_QM_PQ_RD_RATE_LIM_RST_TOKEN 0xD80A4 + +#define mmMME_QM_PQ_RD_RATE_LIM_SAT 0xD80A8 + +#define mmMME_QM_PQ_RD_RATE_LIM_TOUT 0xD80AC + +#define mmMME_QM_CQ_CFG0 0xD80B0 + +#define mmMME_QM_CQ_CFG1 0xD80B4 + +#define mmMME_QM_CQ_ARUSER 0xD80B8 + +#define mmMME_QM_CQ_PTR_LO 0xD80C0 + +#define mmMME_QM_CQ_PTR_HI 0xD80C4 + +#define mmMME_QM_CQ_TSIZE 0xD80C8 + +#define mmMME_QM_CQ_CTL 0xD80CC + +#define mmMME_QM_CQ_PTR_LO_STS 0xD80D4 + +#define mmMME_QM_CQ_PTR_HI_STS 0xD80D8 + +#define mmMME_QM_CQ_TSIZE_STS 0xD80DC + +#define mmMME_QM_CQ_CTL_STS 0xD80E0 + +#define mmMME_QM_CQ_STS0 0xD80E4 + +#define mmMME_QM_CQ_STS1 0xD80E8 + +#define mmMME_QM_CQ_RD_RATE_LIM_EN 0xD80F0 + +#define mmMME_QM_CQ_RD_RATE_LIM_RST_TOKEN 0xD80F4 + +#define mmMME_QM_CQ_RD_RATE_LIM_SAT 0xD80F8 + +#define mmMME_QM_CQ_RD_RATE_LIM_TOUT 0xD80FC + +#define mmMME_QM_CQ_IFIFO_CNT 0xD8108 + +#define mmMME_QM_CP_MSG_BASE0_ADDR_LO 0xD8120 + +#define mmMME_QM_CP_MSG_BASE0_ADDR_HI 0xD8124 + +#define mmMME_QM_CP_MSG_BASE1_ADDR_LO 0xD8128 + +#define mmMME_QM_CP_MSG_BASE1_ADDR_HI 0xD812C + +#define mmMME_QM_CP_MSG_BASE2_ADDR_LO 0xD8130 + +#define mmMME_QM_CP_MSG_BASE2_ADDR_HI 0xD8134 + +#define mmMME_QM_CP_MSG_BASE3_ADDR_LO 0xD8138 + +#define mmMME_QM_CP_MSG_BASE3_ADDR_HI 0xD813C + +#define mmMME_QM_CP_LDMA_TSIZE_OFFSET 0xD8140 + +#define mmMME_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0xD8144 + +#define mmMME_QM_CP_LDMA_SRC_BASE_HI_OFFSET 0xD8148 + +#define mmMME_QM_CP_LDMA_DST_BASE_LO_OFFSET 0xD814C + +#define mmMME_QM_CP_LDMA_DST_BASE_HI_OFFSET 0xD8150 + +#define mmMME_QM_CP_LDMA_COMMIT_OFFSET 0xD8154 + +#define mmMME_QM_CP_FENCE0_RDATA 0xD8158 + +#define mmMME_QM_CP_FENCE1_RDATA 0xD815C + +#define mmMME_QM_CP_FENCE2_RDATA 0xD8160 + +#define mmMME_QM_CP_FENCE3_RDATA 0xD8164 + +#define mmMME_QM_CP_FENCE0_CNT 0xD8168 + +#define mmMME_QM_CP_FENCE1_CNT 0xD816C + +#define mmMME_QM_CP_FENCE2_CNT 0xD8170 + +#define mmMME_QM_CP_FENCE3_CNT 0xD8174 + +#define mmMME_QM_CP_STS 0xD8178 + +#define mmMME_QM_CP_CURRENT_INST_LO 0xD817C + +#define mmMME_QM_CP_CURRENT_INST_HI 0xD8180 + +#define mmMME_QM_CP_BARRIER_CFG 0xD8184 + +#define mmMME_QM_CP_DBG_0 0xD8188 + +#define mmMME_QM_PQ_BUF_ADDR 0xD8300 + +#define mmMME_QM_PQ_BUF_RDATA 0xD8304 + +#define mmMME_QM_CQ_BUF_ADDR 0xD8308 + +#define mmMME_QM_CQ_BUF_RDATA 0xD830C + +#endif /* ASIC_REG_MME_QM_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme_regs.h new file mode 100644 index 000000000..0396cbfd5 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme_regs.h @@ -0,0 +1,1152 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_MME_REGS_H_ +#define ASIC_REG_MME_REGS_H_ + +/* + ***************************************** + * MME (Prototype: MME) + ***************************************** + */ + +#define mmMME_ARCH_STATUS 0xD0000 + +#define mmMME_ARCH_A_BASE_ADDR_HIGH 0xD0008 + +#define mmMME_ARCH_B_BASE_ADDR_HIGH 0xD000C + +#define mmMME_ARCH_CIN_BASE_ADDR_HIGH 0xD0010 + +#define mmMME_ARCH_COUT_BASE_ADDR_HIGH 0xD0014 + +#define mmMME_ARCH_BIAS_BASE_ADDR_HIGH 0xD0018 + +#define mmMME_ARCH_A_BASE_ADDR_LOW 0xD001C + +#define mmMME_ARCH_B_BASE_ADDR_LOW 0xD0020 + +#define mmMME_ARCH_CIN_BASE_ADDR_LOW 0xD0024 + +#define mmMME_ARCH_COUT_BASE_ADDR_LOW 0xD0028 + +#define mmMME_ARCH_BIAS_BASE_ADDR_LOW 0xD002C + +#define mmMME_ARCH_HEADER 0xD0030 + +#define mmMME_ARCH_KERNEL_SIZE_MINUS_1 0xD0034 + +#define mmMME_ARCH_ASSOCIATED_DIMS_0 0xD0038 + +#define mmMME_ARCH_ASSOCIATED_DIMS_1 0xD003C + +#define mmMME_ARCH_COUT_SCALE 0xD0040 + +#define mmMME_ARCH_CIN_SCALE 0xD0044 + +#define mmMME_ARCH_GEMMLOWP_ZP 0xD0048 + +#define mmMME_ARCH_GEMMLOWP_EXPONENT 0xD004C + +#define mmMME_ARCH_A_ROI_BASE_OFFSET_0 0xD0050 + +#define mmMME_ARCH_A_ROI_BASE_OFFSET_1 0xD0054 + +#define mmMME_ARCH_A_ROI_BASE_OFFSET_2 0xD0058 + +#define mmMME_ARCH_A_ROI_BASE_OFFSET_3 0xD005C + +#define mmMME_ARCH_A_ROI_BASE_OFFSET_4 0xD0060 + +#define mmMME_ARCH_A_VALID_ELEMENTS_0 0xD0064 + +#define mmMME_ARCH_A_VALID_ELEMENTS_1 0xD0068 + +#define mmMME_ARCH_A_VALID_ELEMENTS_2 0xD006C + +#define mmMME_ARCH_A_VALID_ELEMENTS_3 0xD0070 + +#define mmMME_ARCH_A_VALID_ELEMENTS_4 0xD0074 + +#define mmMME_ARCH_A_LOOP_STRIDE_0 0xD0078 + +#define mmMME_ARCH_A_LOOP_STRIDE_1 0xD007C + +#define mmMME_ARCH_A_LOOP_STRIDE_2 0xD0080 + +#define mmMME_ARCH_A_LOOP_STRIDE_3 0xD0084 + +#define mmMME_ARCH_A_LOOP_STRIDE_4 0xD0088 + +#define mmMME_ARCH_A_ROI_SIZE_0 0xD008C + +#define mmMME_ARCH_A_ROI_SIZE_1 0xD0090 + +#define mmMME_ARCH_A_ROI_SIZE_2 0xD0094 + +#define mmMME_ARCH_A_ROI_SIZE_3 0xD0098 + +#define mmMME_ARCH_A_SPATIAL_START_OFFSET_0 0xD009C + +#define mmMME_ARCH_A_SPATIAL_START_OFFSET_1 0xD00A0 + +#define mmMME_ARCH_A_SPATIAL_START_OFFSET_2 0xD00A4 + +#define mmMME_ARCH_A_SPATIAL_START_OFFSET_3 0xD00A8 + +#define mmMME_ARCH_A_SPATIAL_STRIDE_0 0xD00AC + +#define mmMME_ARCH_A_SPATIAL_STRIDE_1 0xD00B0 + +#define mmMME_ARCH_A_SPATIAL_STRIDE_2 0xD00B4 + +#define mmMME_ARCH_A_SPATIAL_STRIDE_3 0xD00B8 + +#define mmMME_ARCH_A_SPATIAL_SIZE_MINUS_1 0xD00BC + +#define mmMME_ARCH_B_ROI_BASE_OFFSET_0 0xD00C0 + +#define mmMME_ARCH_B_ROI_BASE_OFFSET_1 0xD00C4 + +#define mmMME_ARCH_B_ROI_BASE_OFFSET_2 0xD00C8 + +#define mmMME_ARCH_B_ROI_BASE_OFFSET_3 0xD00CC + +#define mmMME_ARCH_B_ROI_BASE_OFFSET_4 0xD00D0 + +#define mmMME_ARCH_B_VALID_ELEMENTS_0 0xD00D4 + +#define mmMME_ARCH_B_VALID_ELEMENTS_1 0xD00D8 + +#define mmMME_ARCH_B_VALID_ELEMENTS_2 0xD00DC + +#define mmMME_ARCH_B_VALID_ELEMENTS_3 0xD00E0 + +#define mmMME_ARCH_B_VALID_ELEMENTS_4 0xD00E4 + +#define mmMME_ARCH_B_LOOP_STRIDE_0 0xD00E8 + +#define mmMME_ARCH_B_LOOP_STRIDE_1 0xD00EC + +#define mmMME_ARCH_B_LOOP_STRIDE_2 0xD00F0 + +#define mmMME_ARCH_B_LOOP_STRIDE_3 0xD00F4 + +#define mmMME_ARCH_B_LOOP_STRIDE_4 0xD00F8 + +#define mmMME_ARCH_B_ROI_SIZE_0 0xD00FC + +#define mmMME_ARCH_B_ROI_SIZE_1 0xD0100 + +#define mmMME_ARCH_B_ROI_SIZE_2 0xD0104 + +#define mmMME_ARCH_B_ROI_SIZE_3 0xD0108 + +#define mmMME_ARCH_B_SPATIAL_START_OFFSET_0 0xD010C + +#define mmMME_ARCH_B_SPATIAL_START_OFFSET_1 0xD0110 + +#define mmMME_ARCH_B_SPATIAL_START_OFFSET_2 0xD0114 + +#define mmMME_ARCH_B_SPATIAL_START_OFFSET_3 0xD0118 + +#define mmMME_ARCH_B_SPATIAL_STRIDE_0 0xD011C + +#define mmMME_ARCH_B_SPATIAL_STRIDE_1 0xD0120 + +#define mmMME_ARCH_B_SPATIAL_STRIDE_2 0xD0124 + +#define mmMME_ARCH_B_SPATIAL_STRIDE_3 0xD0128 + +#define mmMME_ARCH_B_SPATIAL_SIZE_MINUS_1 0xD012C + +#define mmMME_ARCH_C_ROI_BASE_OFFSET_0 0xD0130 + +#define mmMME_ARCH_C_ROI_BASE_OFFSET_1 0xD0134 + +#define mmMME_ARCH_C_ROI_BASE_OFFSET_2 0xD0138 + +#define mmMME_ARCH_C_ROI_BASE_OFFSET_3 0xD013C + +#define mmMME_ARCH_C_ROI_BASE_OFFSET_4 0xD0140 + +#define mmMME_ARCH_C_VALID_ELEMENTS_0 0xD0144 + +#define mmMME_ARCH_C_VALID_ELEMENTS_1 0xD0148 + +#define mmMME_ARCH_C_VALID_ELEMENTS_2 0xD014C + +#define mmMME_ARCH_C_VALID_ELEMENTS_3 0xD0150 + +#define mmMME_ARCH_C_VALID_ELEMENTS_4 0xD0154 + +#define mmMME_ARCH_C_LOOP_STRIDE_0 0xD0158 + +#define mmMME_ARCH_C_LOOP_STRIDE_1 0xD015C + +#define mmMME_ARCH_C_LOOP_STRIDE_2 0xD0160 + +#define mmMME_ARCH_C_LOOP_STRIDE_3 0xD0164 + +#define mmMME_ARCH_C_LOOP_STRIDE_4 0xD0168 + +#define mmMME_ARCH_C_ROI_SIZE_0 0xD016C + +#define mmMME_ARCH_C_ROI_SIZE_1 0xD0170 + +#define mmMME_ARCH_C_ROI_SIZE_2 0xD0174 + +#define mmMME_ARCH_C_ROI_SIZE_3 0xD0178 + +#define mmMME_ARCH_C_SPATIAL_START_OFFSET_0 0xD017C + +#define mmMME_ARCH_C_SPATIAL_START_OFFSET_1 0xD0180 + +#define mmMME_ARCH_C_SPATIAL_START_OFFSET_2 0xD0184 + +#define mmMME_ARCH_C_SPATIAL_START_OFFSET_3 0xD0188 + +#define mmMME_ARCH_C_SPATIAL_STRIDE_0 0xD018C + +#define mmMME_ARCH_C_SPATIAL_STRIDE_1 0xD0190 + +#define mmMME_ARCH_C_SPATIAL_STRIDE_2 0xD0194 + +#define mmMME_ARCH_C_SPATIAL_STRIDE_3 0xD0198 + +#define mmMME_ARCH_C_SPATIAL_SIZE_MINUS_1 0xD019C + +#define mmMME_ARCH_SYNC_OBJECT_MESSAGE 0xD01A0 + +#define mmMME_ARCH_E_PADDING_VALUE_A 0xD01A4 + +#define mmMME_ARCH_E_NUM_ITERATION_MINUS_1 0xD01A8 + +#define mmMME_ARCH_E_BUBBLES_PER_SPLIT 0xD01AC + +#define mmMME_CMD 0xD0200 + +#define mmMME_DUMMY 0xD0204 + +#define mmMME_RESET 0xD0208 + +#define mmMME_STALL 0xD020C + +#define mmMME_SM_BASE_ADDRESS_LOW 0xD0210 + +#define mmMME_SM_BASE_ADDRESS_HIGH 0xD0214 + +#define mmMME_DBGMEM_ADD 0xD0218 + +#define mmMME_DBGMEM_DATA_WR 0xD021C + +#define mmMME_DBGMEM_DATA_RD 0xD0220 + +#define mmMME_DBGMEM_CTRL 0xD0224 + +#define mmMME_DBGMEM_RC 0xD0228 + +#define mmMME_LOG_SHADOW 0xD022C + +#define mmMME_STORE_MAX_CREDIT 0xD0300 + +#define mmMME_AGU 0xD0304 + +#define mmMME_SBA 0xD0308 + +#define mmMME_SBB 0xD030C + +#define mmMME_SBC 0xD0310 + +#define mmMME_WBC 0xD0314 + +#define mmMME_SBA_CONTROL_DATA 0xD0318 + +#define mmMME_SBB_CONTROL_DATA 0xD031C + +#define mmMME_SBC_CONTROL_DATA 0xD0320 + +#define mmMME_WBC_CONTROL_DATA 0xD0324 + +#define mmMME_TE 0xD0328 + +#define mmMME_TE2DEC 0xD032C + +#define mmMME_REI_STATUS 0xD0330 + +#define mmMME_REI_MASK 0xD0334 + +#define mmMME_SEI_STATUS 0xD0338 + +#define mmMME_SEI_MASK 0xD033C + +#define mmMME_SPI_STATUS 0xD0340 + +#define mmMME_SPI_MASK 0xD0344 + +#define mmMME_SHADOW_0_STATUS 0xD0400 + +#define mmMME_SHADOW_0_A_BASE_ADDR_HIGH 0xD0408 + +#define mmMME_SHADOW_0_B_BASE_ADDR_HIGH 0xD040C + +#define mmMME_SHADOW_0_CIN_BASE_ADDR_HIGH 0xD0410 + +#define mmMME_SHADOW_0_COUT_BASE_ADDR_HIGH 0xD0414 + +#define mmMME_SHADOW_0_BIAS_BASE_ADDR_HIGH 0xD0418 + +#define mmMME_SHADOW_0_A_BASE_ADDR_LOW 0xD041C + +#define mmMME_SHADOW_0_B_BASE_ADDR_LOW 0xD0420 + +#define mmMME_SHADOW_0_CIN_BASE_ADDR_LOW 0xD0424 + +#define mmMME_SHADOW_0_COUT_BASE_ADDR_LOW 0xD0428 + +#define mmMME_SHADOW_0_BIAS_BASE_ADDR_LOW 0xD042C + +#define mmMME_SHADOW_0_HEADER 0xD0430 + +#define mmMME_SHADOW_0_KERNEL_SIZE_MINUS_1 0xD0434 + +#define mmMME_SHADOW_0_ASSOCIATED_DIMS_0 0xD0438 + +#define mmMME_SHADOW_0_ASSOCIATED_DIMS_1 0xD043C + +#define mmMME_SHADOW_0_COUT_SCALE 0xD0440 + +#define mmMME_SHADOW_0_CIN_SCALE 0xD0444 + +#define mmMME_SHADOW_0_GEMMLOWP_ZP 0xD0448 + +#define mmMME_SHADOW_0_GEMMLOWP_EXPONENT 0xD044C + +#define mmMME_SHADOW_0_A_ROI_BASE_OFFSET_0 0xD0450 + +#define mmMME_SHADOW_0_A_ROI_BASE_OFFSET_1 0xD0454 + +#define mmMME_SHADOW_0_A_ROI_BASE_OFFSET_2 0xD0458 + +#define mmMME_SHADOW_0_A_ROI_BASE_OFFSET_3 0xD045C + +#define mmMME_SHADOW_0_A_ROI_BASE_OFFSET_4 0xD0460 + +#define mmMME_SHADOW_0_A_VALID_ELEMENTS_0 0xD0464 + +#define mmMME_SHADOW_0_A_VALID_ELEMENTS_1 0xD0468 + +#define mmMME_SHADOW_0_A_VALID_ELEMENTS_2 0xD046C + +#define mmMME_SHADOW_0_A_VALID_ELEMENTS_3 0xD0470 + +#define mmMME_SHADOW_0_A_VALID_ELEMENTS_4 0xD0474 + +#define mmMME_SHADOW_0_A_LOOP_STRIDE_0 0xD0478 + +#define mmMME_SHADOW_0_A_LOOP_STRIDE_1 0xD047C + +#define mmMME_SHADOW_0_A_LOOP_STRIDE_2 0xD0480 + +#define mmMME_SHADOW_0_A_LOOP_STRIDE_3 0xD0484 + +#define mmMME_SHADOW_0_A_LOOP_STRIDE_4 0xD0488 + +#define mmMME_SHADOW_0_A_ROI_SIZE_0 0xD048C + +#define mmMME_SHADOW_0_A_ROI_SIZE_1 0xD0490 + +#define mmMME_SHADOW_0_A_ROI_SIZE_2 0xD0494 + +#define mmMME_SHADOW_0_A_ROI_SIZE_3 0xD0498 + +#define mmMME_SHADOW_0_A_SPATIAL_START_OFFSET_0 0xD049C + +#define mmMME_SHADOW_0_A_SPATIAL_START_OFFSET_1 0xD04A0 + +#define mmMME_SHADOW_0_A_SPATIAL_START_OFFSET_2 0xD04A4 + +#define mmMME_SHADOW_0_A_SPATIAL_START_OFFSET_3 0xD04A8 + +#define mmMME_SHADOW_0_A_SPATIAL_STRIDE_0 0xD04AC + +#define mmMME_SHADOW_0_A_SPATIAL_STRIDE_1 0xD04B0 + +#define mmMME_SHADOW_0_A_SPATIAL_STRIDE_2 0xD04B4 + +#define mmMME_SHADOW_0_A_SPATIAL_STRIDE_3 0xD04B8 + +#define mmMME_SHADOW_0_A_SPATIAL_SIZE_MINUS_1 0xD04BC + +#define mmMME_SHADOW_0_B_ROI_BASE_OFFSET_0 0xD04C0 + +#define mmMME_SHADOW_0_B_ROI_BASE_OFFSET_1 0xD04C4 + +#define mmMME_SHADOW_0_B_ROI_BASE_OFFSET_2 0xD04C8 + +#define mmMME_SHADOW_0_B_ROI_BASE_OFFSET_3 0xD04CC + +#define mmMME_SHADOW_0_B_ROI_BASE_OFFSET_4 0xD04D0 + +#define mmMME_SHADOW_0_B_VALID_ELEMENTS_0 0xD04D4 + +#define mmMME_SHADOW_0_B_VALID_ELEMENTS_1 0xD04D8 + +#define mmMME_SHADOW_0_B_VALID_ELEMENTS_2 0xD04DC + +#define mmMME_SHADOW_0_B_VALID_ELEMENTS_3 0xD04E0 + +#define mmMME_SHADOW_0_B_VALID_ELEMENTS_4 0xD04E4 + +#define mmMME_SHADOW_0_B_LOOP_STRIDE_0 0xD04E8 + +#define mmMME_SHADOW_0_B_LOOP_STRIDE_1 0xD04EC + +#define mmMME_SHADOW_0_B_LOOP_STRIDE_2 0xD04F0 + +#define mmMME_SHADOW_0_B_LOOP_STRIDE_3 0xD04F4 + +#define mmMME_SHADOW_0_B_LOOP_STRIDE_4 0xD04F8 + +#define mmMME_SHADOW_0_B_ROI_SIZE_0 0xD04FC + +#define mmMME_SHADOW_0_B_ROI_SIZE_1 0xD0500 + +#define mmMME_SHADOW_0_B_ROI_SIZE_2 0xD0504 + +#define mmMME_SHADOW_0_B_ROI_SIZE_3 0xD0508 + +#define mmMME_SHADOW_0_B_SPATIAL_START_OFFSET_0 0xD050C + +#define mmMME_SHADOW_0_B_SPATIAL_START_OFFSET_1 0xD0510 + +#define mmMME_SHADOW_0_B_SPATIAL_START_OFFSET_2 0xD0514 + +#define mmMME_SHADOW_0_B_SPATIAL_START_OFFSET_3 0xD0518 + +#define mmMME_SHADOW_0_B_SPATIAL_STRIDE_0 0xD051C + +#define mmMME_SHADOW_0_B_SPATIAL_STRIDE_1 0xD0520 + +#define mmMME_SHADOW_0_B_SPATIAL_STRIDE_2 0xD0524 + +#define mmMME_SHADOW_0_B_SPATIAL_STRIDE_3 0xD0528 + +#define mmMME_SHADOW_0_B_SPATIAL_SIZE_MINUS_1 0xD052C + +#define mmMME_SHADOW_0_C_ROI_BASE_OFFSET_0 0xD0530 + +#define mmMME_SHADOW_0_C_ROI_BASE_OFFSET_1 0xD0534 + +#define mmMME_SHADOW_0_C_ROI_BASE_OFFSET_2 0xD0538 + +#define mmMME_SHADOW_0_C_ROI_BASE_OFFSET_3 0xD053C + +#define mmMME_SHADOW_0_C_ROI_BASE_OFFSET_4 0xD0540 + +#define mmMME_SHADOW_0_C_VALID_ELEMENTS_0 0xD0544 + +#define mmMME_SHADOW_0_C_VALID_ELEMENTS_1 0xD0548 + +#define mmMME_SHADOW_0_C_VALID_ELEMENTS_2 0xD054C + +#define mmMME_SHADOW_0_C_VALID_ELEMENTS_3 0xD0550 + +#define mmMME_SHADOW_0_C_VALID_ELEMENTS_4 0xD0554 + +#define mmMME_SHADOW_0_C_LOOP_STRIDE_0 0xD0558 + +#define mmMME_SHADOW_0_C_LOOP_STRIDE_1 0xD055C + +#define mmMME_SHADOW_0_C_LOOP_STRIDE_2 0xD0560 + +#define mmMME_SHADOW_0_C_LOOP_STRIDE_3 0xD0564 + +#define mmMME_SHADOW_0_C_LOOP_STRIDE_4 0xD0568 + +#define mmMME_SHADOW_0_C_ROI_SIZE_0 0xD056C + +#define mmMME_SHADOW_0_C_ROI_SIZE_1 0xD0570 + +#define mmMME_SHADOW_0_C_ROI_SIZE_2 0xD0574 + +#define mmMME_SHADOW_0_C_ROI_SIZE_3 0xD0578 + +#define mmMME_SHADOW_0_C_SPATIAL_START_OFFSET_0 0xD057C + +#define mmMME_SHADOW_0_C_SPATIAL_START_OFFSET_1 0xD0580 + +#define mmMME_SHADOW_0_C_SPATIAL_START_OFFSET_2 0xD0584 + +#define mmMME_SHADOW_0_C_SPATIAL_START_OFFSET_3 0xD0588 + +#define mmMME_SHADOW_0_C_SPATIAL_STRIDE_0 0xD058C + +#define mmMME_SHADOW_0_C_SPATIAL_STRIDE_1 0xD0590 + +#define mmMME_SHADOW_0_C_SPATIAL_STRIDE_2 0xD0594 + +#define mmMME_SHADOW_0_C_SPATIAL_STRIDE_3 0xD0598 + +#define mmMME_SHADOW_0_C_SPATIAL_SIZE_MINUS_1 0xD059C + +#define mmMME_SHADOW_0_SYNC_OBJECT_MESSAGE 0xD05A0 + +#define mmMME_SHADOW_0_E_PADDING_VALUE_A 0xD05A4 + +#define mmMME_SHADOW_0_E_NUM_ITERATION_MINUS_1 0xD05A8 + +#define mmMME_SHADOW_0_E_BUBBLES_PER_SPLIT 0xD05AC + +#define mmMME_SHADOW_1_STATUS 0xD0600 + +#define mmMME_SHADOW_1_A_BASE_ADDR_HIGH 0xD0608 + +#define mmMME_SHADOW_1_B_BASE_ADDR_HIGH 0xD060C + +#define mmMME_SHADOW_1_CIN_BASE_ADDR_HIGH 0xD0610 + +#define mmMME_SHADOW_1_COUT_BASE_ADDR_HIGH 0xD0614 + +#define mmMME_SHADOW_1_BIAS_BASE_ADDR_HIGH 0xD0618 + +#define mmMME_SHADOW_1_A_BASE_ADDR_LOW 0xD061C + +#define mmMME_SHADOW_1_B_BASE_ADDR_LOW 0xD0620 + +#define mmMME_SHADOW_1_CIN_BASE_ADDR_LOW 0xD0624 + +#define mmMME_SHADOW_1_COUT_BASE_ADDR_LOW 0xD0628 + +#define mmMME_SHADOW_1_BIAS_BASE_ADDR_LOW 0xD062C + +#define mmMME_SHADOW_1_HEADER 0xD0630 + +#define mmMME_SHADOW_1_KERNEL_SIZE_MINUS_1 0xD0634 + +#define mmMME_SHADOW_1_ASSOCIATED_DIMS_0 0xD0638 + +#define mmMME_SHADOW_1_ASSOCIATED_DIMS_1 0xD063C + +#define mmMME_SHADOW_1_COUT_SCALE 0xD0640 + +#define mmMME_SHADOW_1_CIN_SCALE 0xD0644 + +#define mmMME_SHADOW_1_GEMMLOWP_ZP 0xD0648 + +#define mmMME_SHADOW_1_GEMMLOWP_EXPONENT 0xD064C + +#define mmMME_SHADOW_1_A_ROI_BASE_OFFSET_0 0xD0650 + +#define mmMME_SHADOW_1_A_ROI_BASE_OFFSET_1 0xD0654 + +#define mmMME_SHADOW_1_A_ROI_BASE_OFFSET_2 0xD0658 + +#define mmMME_SHADOW_1_A_ROI_BASE_OFFSET_3 0xD065C + +#define mmMME_SHADOW_1_A_ROI_BASE_OFFSET_4 0xD0660 + +#define mmMME_SHADOW_1_A_VALID_ELEMENTS_0 0xD0664 + +#define mmMME_SHADOW_1_A_VALID_ELEMENTS_1 0xD0668 + +#define mmMME_SHADOW_1_A_VALID_ELEMENTS_2 0xD066C + +#define mmMME_SHADOW_1_A_VALID_ELEMENTS_3 0xD0670 + +#define mmMME_SHADOW_1_A_VALID_ELEMENTS_4 0xD0674 + +#define mmMME_SHADOW_1_A_LOOP_STRIDE_0 0xD0678 + +#define mmMME_SHADOW_1_A_LOOP_STRIDE_1 0xD067C + +#define mmMME_SHADOW_1_A_LOOP_STRIDE_2 0xD0680 + +#define mmMME_SHADOW_1_A_LOOP_STRIDE_3 0xD0684 + +#define mmMME_SHADOW_1_A_LOOP_STRIDE_4 0xD0688 + +#define mmMME_SHADOW_1_A_ROI_SIZE_0 0xD068C + +#define mmMME_SHADOW_1_A_ROI_SIZE_1 0xD0690 + +#define mmMME_SHADOW_1_A_ROI_SIZE_2 0xD0694 + +#define mmMME_SHADOW_1_A_ROI_SIZE_3 0xD0698 + +#define mmMME_SHADOW_1_A_SPATIAL_START_OFFSET_0 0xD069C + +#define mmMME_SHADOW_1_A_SPATIAL_START_OFFSET_1 0xD06A0 + +#define mmMME_SHADOW_1_A_SPATIAL_START_OFFSET_2 0xD06A4 + +#define mmMME_SHADOW_1_A_SPATIAL_START_OFFSET_3 0xD06A8 + +#define mmMME_SHADOW_1_A_SPATIAL_STRIDE_0 0xD06AC + +#define mmMME_SHADOW_1_A_SPATIAL_STRIDE_1 0xD06B0 + +#define mmMME_SHADOW_1_A_SPATIAL_STRIDE_2 0xD06B4 + +#define mmMME_SHADOW_1_A_SPATIAL_STRIDE_3 0xD06B8 + +#define mmMME_SHADOW_1_A_SPATIAL_SIZE_MINUS_1 0xD06BC + +#define mmMME_SHADOW_1_B_ROI_BASE_OFFSET_0 0xD06C0 + +#define mmMME_SHADOW_1_B_ROI_BASE_OFFSET_1 0xD06C4 + +#define mmMME_SHADOW_1_B_ROI_BASE_OFFSET_2 0xD06C8 + +#define mmMME_SHADOW_1_B_ROI_BASE_OFFSET_3 0xD06CC + +#define mmMME_SHADOW_1_B_ROI_BASE_OFFSET_4 0xD06D0 + +#define mmMME_SHADOW_1_B_VALID_ELEMENTS_0 0xD06D4 + +#define mmMME_SHADOW_1_B_VALID_ELEMENTS_1 0xD06D8 + +#define mmMME_SHADOW_1_B_VALID_ELEMENTS_2 0xD06DC + +#define mmMME_SHADOW_1_B_VALID_ELEMENTS_3 0xD06E0 + +#define mmMME_SHADOW_1_B_VALID_ELEMENTS_4 0xD06E4 + +#define mmMME_SHADOW_1_B_LOOP_STRIDE_0 0xD06E8 + +#define mmMME_SHADOW_1_B_LOOP_STRIDE_1 0xD06EC + +#define mmMME_SHADOW_1_B_LOOP_STRIDE_2 0xD06F0 + +#define mmMME_SHADOW_1_B_LOOP_STRIDE_3 0xD06F4 + +#define mmMME_SHADOW_1_B_LOOP_STRIDE_4 0xD06F8 + +#define mmMME_SHADOW_1_B_ROI_SIZE_0 0xD06FC + +#define mmMME_SHADOW_1_B_ROI_SIZE_1 0xD0700 + +#define mmMME_SHADOW_1_B_ROI_SIZE_2 0xD0704 + +#define mmMME_SHADOW_1_B_ROI_SIZE_3 0xD0708 + +#define mmMME_SHADOW_1_B_SPATIAL_START_OFFSET_0 0xD070C + +#define mmMME_SHADOW_1_B_SPATIAL_START_OFFSET_1 0xD0710 + +#define mmMME_SHADOW_1_B_SPATIAL_START_OFFSET_2 0xD0714 + +#define mmMME_SHADOW_1_B_SPATIAL_START_OFFSET_3 0xD0718 + +#define mmMME_SHADOW_1_B_SPATIAL_STRIDE_0 0xD071C + +#define mmMME_SHADOW_1_B_SPATIAL_STRIDE_1 0xD0720 + +#define mmMME_SHADOW_1_B_SPATIAL_STRIDE_2 0xD0724 + +#define mmMME_SHADOW_1_B_SPATIAL_STRIDE_3 0xD0728 + +#define mmMME_SHADOW_1_B_SPATIAL_SIZE_MINUS_1 0xD072C + +#define mmMME_SHADOW_1_C_ROI_BASE_OFFSET_0 0xD0730 + +#define mmMME_SHADOW_1_C_ROI_BASE_OFFSET_1 0xD0734 + +#define mmMME_SHADOW_1_C_ROI_BASE_OFFSET_2 0xD0738 + +#define mmMME_SHADOW_1_C_ROI_BASE_OFFSET_3 0xD073C + +#define mmMME_SHADOW_1_C_ROI_BASE_OFFSET_4 0xD0740 + +#define mmMME_SHADOW_1_C_VALID_ELEMENTS_0 0xD0744 + +#define mmMME_SHADOW_1_C_VALID_ELEMENTS_1 0xD0748 + +#define mmMME_SHADOW_1_C_VALID_ELEMENTS_2 0xD074C + +#define mmMME_SHADOW_1_C_VALID_ELEMENTS_3 0xD0750 + +#define mmMME_SHADOW_1_C_VALID_ELEMENTS_4 0xD0754 + +#define mmMME_SHADOW_1_C_LOOP_STRIDE_0 0xD0758 + +#define mmMME_SHADOW_1_C_LOOP_STRIDE_1 0xD075C + +#define mmMME_SHADOW_1_C_LOOP_STRIDE_2 0xD0760 + +#define mmMME_SHADOW_1_C_LOOP_STRIDE_3 0xD0764 + +#define mmMME_SHADOW_1_C_LOOP_STRIDE_4 0xD0768 + +#define mmMME_SHADOW_1_C_ROI_SIZE_0 0xD076C + +#define mmMME_SHADOW_1_C_ROI_SIZE_1 0xD0770 + +#define mmMME_SHADOW_1_C_ROI_SIZE_2 0xD0774 + +#define mmMME_SHADOW_1_C_ROI_SIZE_3 0xD0778 + +#define mmMME_SHADOW_1_C_SPATIAL_START_OFFSET_0 0xD077C + +#define mmMME_SHADOW_1_C_SPATIAL_START_OFFSET_1 0xD0780 + +#define mmMME_SHADOW_1_C_SPATIAL_START_OFFSET_2 0xD0784 + +#define mmMME_SHADOW_1_C_SPATIAL_START_OFFSET_3 0xD0788 + +#define mmMME_SHADOW_1_C_SPATIAL_STRIDE_0 0xD078C + +#define mmMME_SHADOW_1_C_SPATIAL_STRIDE_1 0xD0790 + +#define mmMME_SHADOW_1_C_SPATIAL_STRIDE_2 0xD0794 + +#define mmMME_SHADOW_1_C_SPATIAL_STRIDE_3 0xD0798 + +#define mmMME_SHADOW_1_C_SPATIAL_SIZE_MINUS_1 0xD079C + +#define mmMME_SHADOW_1_SYNC_OBJECT_MESSAGE 0xD07A0 + +#define mmMME_SHADOW_1_E_PADDING_VALUE_A 0xD07A4 + +#define mmMME_SHADOW_1_E_NUM_ITERATION_MINUS_1 0xD07A8 + +#define mmMME_SHADOW_1_E_BUBBLES_PER_SPLIT 0xD07AC + +#define mmMME_SHADOW_2_STATUS 0xD0800 + +#define mmMME_SHADOW_2_A_BASE_ADDR_HIGH 0xD0808 + +#define mmMME_SHADOW_2_B_BASE_ADDR_HIGH 0xD080C + +#define mmMME_SHADOW_2_CIN_BASE_ADDR_HIGH 0xD0810 + +#define mmMME_SHADOW_2_COUT_BASE_ADDR_HIGH 0xD0814 + +#define mmMME_SHADOW_2_BIAS_BASE_ADDR_HIGH 0xD0818 + +#define mmMME_SHADOW_2_A_BASE_ADDR_LOW 0xD081C + +#define mmMME_SHADOW_2_B_BASE_ADDR_LOW 0xD0820 + +#define mmMME_SHADOW_2_CIN_BASE_ADDR_LOW 0xD0824 + +#define mmMME_SHADOW_2_COUT_BASE_ADDR_LOW 0xD0828 + +#define mmMME_SHADOW_2_BIAS_BASE_ADDR_LOW 0xD082C + +#define mmMME_SHADOW_2_HEADER 0xD0830 + +#define mmMME_SHADOW_2_KERNEL_SIZE_MINUS_1 0xD0834 + +#define mmMME_SHADOW_2_ASSOCIATED_DIMS_0 0xD0838 + +#define mmMME_SHADOW_2_ASSOCIATED_DIMS_1 0xD083C + +#define mmMME_SHADOW_2_COUT_SCALE 0xD0840 + +#define mmMME_SHADOW_2_CIN_SCALE 0xD0844 + +#define mmMME_SHADOW_2_GEMMLOWP_ZP 0xD0848 + +#define mmMME_SHADOW_2_GEMMLOWP_EXPONENT 0xD084C + +#define mmMME_SHADOW_2_A_ROI_BASE_OFFSET_0 0xD0850 + +#define mmMME_SHADOW_2_A_ROI_BASE_OFFSET_1 0xD0854 + +#define mmMME_SHADOW_2_A_ROI_BASE_OFFSET_2 0xD0858 + +#define mmMME_SHADOW_2_A_ROI_BASE_OFFSET_3 0xD085C + +#define mmMME_SHADOW_2_A_ROI_BASE_OFFSET_4 0xD0860 + +#define mmMME_SHADOW_2_A_VALID_ELEMENTS_0 0xD0864 + +#define mmMME_SHADOW_2_A_VALID_ELEMENTS_1 0xD0868 + +#define mmMME_SHADOW_2_A_VALID_ELEMENTS_2 0xD086C + +#define mmMME_SHADOW_2_A_VALID_ELEMENTS_3 0xD0870 + +#define mmMME_SHADOW_2_A_VALID_ELEMENTS_4 0xD0874 + +#define mmMME_SHADOW_2_A_LOOP_STRIDE_0 0xD0878 + +#define mmMME_SHADOW_2_A_LOOP_STRIDE_1 0xD087C + +#define mmMME_SHADOW_2_A_LOOP_STRIDE_2 0xD0880 + +#define mmMME_SHADOW_2_A_LOOP_STRIDE_3 0xD0884 + +#define mmMME_SHADOW_2_A_LOOP_STRIDE_4 0xD0888 + +#define mmMME_SHADOW_2_A_ROI_SIZE_0 0xD088C + +#define mmMME_SHADOW_2_A_ROI_SIZE_1 0xD0890 + +#define mmMME_SHADOW_2_A_ROI_SIZE_2 0xD0894 + +#define mmMME_SHADOW_2_A_ROI_SIZE_3 0xD0898 + +#define mmMME_SHADOW_2_A_SPATIAL_START_OFFSET_0 0xD089C + +#define mmMME_SHADOW_2_A_SPATIAL_START_OFFSET_1 0xD08A0 + +#define mmMME_SHADOW_2_A_SPATIAL_START_OFFSET_2 0xD08A4 + +#define mmMME_SHADOW_2_A_SPATIAL_START_OFFSET_3 0xD08A8 + +#define mmMME_SHADOW_2_A_SPATIAL_STRIDE_0 0xD08AC + +#define mmMME_SHADOW_2_A_SPATIAL_STRIDE_1 0xD08B0 + +#define mmMME_SHADOW_2_A_SPATIAL_STRIDE_2 0xD08B4 + +#define mmMME_SHADOW_2_A_SPATIAL_STRIDE_3 0xD08B8 + +#define mmMME_SHADOW_2_A_SPATIAL_SIZE_MINUS_1 0xD08BC + +#define mmMME_SHADOW_2_B_ROI_BASE_OFFSET_0 0xD08C0 + +#define mmMME_SHADOW_2_B_ROI_BASE_OFFSET_1 0xD08C4 + +#define mmMME_SHADOW_2_B_ROI_BASE_OFFSET_2 0xD08C8 + +#define mmMME_SHADOW_2_B_ROI_BASE_OFFSET_3 0xD08CC + +#define mmMME_SHADOW_2_B_ROI_BASE_OFFSET_4 0xD08D0 + +#define mmMME_SHADOW_2_B_VALID_ELEMENTS_0 0xD08D4 + +#define mmMME_SHADOW_2_B_VALID_ELEMENTS_1 0xD08D8 + +#define mmMME_SHADOW_2_B_VALID_ELEMENTS_2 0xD08DC + +#define mmMME_SHADOW_2_B_VALID_ELEMENTS_3 0xD08E0 + +#define mmMME_SHADOW_2_B_VALID_ELEMENTS_4 0xD08E4 + +#define mmMME_SHADOW_2_B_LOOP_STRIDE_0 0xD08E8 + +#define mmMME_SHADOW_2_B_LOOP_STRIDE_1 0xD08EC + +#define mmMME_SHADOW_2_B_LOOP_STRIDE_2 0xD08F0 + +#define mmMME_SHADOW_2_B_LOOP_STRIDE_3 0xD08F4 + +#define mmMME_SHADOW_2_B_LOOP_STRIDE_4 0xD08F8 + +#define mmMME_SHADOW_2_B_ROI_SIZE_0 0xD08FC + +#define mmMME_SHADOW_2_B_ROI_SIZE_1 0xD0900 + +#define mmMME_SHADOW_2_B_ROI_SIZE_2 0xD0904 + +#define mmMME_SHADOW_2_B_ROI_SIZE_3 0xD0908 + +#define mmMME_SHADOW_2_B_SPATIAL_START_OFFSET_0 0xD090C + +#define mmMME_SHADOW_2_B_SPATIAL_START_OFFSET_1 0xD0910 + +#define mmMME_SHADOW_2_B_SPATIAL_START_OFFSET_2 0xD0914 + +#define mmMME_SHADOW_2_B_SPATIAL_START_OFFSET_3 0xD0918 + +#define mmMME_SHADOW_2_B_SPATIAL_STRIDE_0 0xD091C + +#define mmMME_SHADOW_2_B_SPATIAL_STRIDE_1 0xD0920 + +#define mmMME_SHADOW_2_B_SPATIAL_STRIDE_2 0xD0924 + +#define mmMME_SHADOW_2_B_SPATIAL_STRIDE_3 0xD0928 + +#define mmMME_SHADOW_2_B_SPATIAL_SIZE_MINUS_1 0xD092C + +#define mmMME_SHADOW_2_C_ROI_BASE_OFFSET_0 0xD0930 + +#define mmMME_SHADOW_2_C_ROI_BASE_OFFSET_1 0xD0934 + +#define mmMME_SHADOW_2_C_ROI_BASE_OFFSET_2 0xD0938 + +#define mmMME_SHADOW_2_C_ROI_BASE_OFFSET_3 0xD093C + +#define mmMME_SHADOW_2_C_ROI_BASE_OFFSET_4 0xD0940 + +#define mmMME_SHADOW_2_C_VALID_ELEMENTS_0 0xD0944 + +#define mmMME_SHADOW_2_C_VALID_ELEMENTS_1 0xD0948 + +#define mmMME_SHADOW_2_C_VALID_ELEMENTS_2 0xD094C + +#define mmMME_SHADOW_2_C_VALID_ELEMENTS_3 0xD0950 + +#define mmMME_SHADOW_2_C_VALID_ELEMENTS_4 0xD0954 + +#define mmMME_SHADOW_2_C_LOOP_STRIDE_0 0xD0958 + +#define mmMME_SHADOW_2_C_LOOP_STRIDE_1 0xD095C + +#define mmMME_SHADOW_2_C_LOOP_STRIDE_2 0xD0960 + +#define mmMME_SHADOW_2_C_LOOP_STRIDE_3 0xD0964 + +#define mmMME_SHADOW_2_C_LOOP_STRIDE_4 0xD0968 + +#define mmMME_SHADOW_2_C_ROI_SIZE_0 0xD096C + +#define mmMME_SHADOW_2_C_ROI_SIZE_1 0xD0970 + +#define mmMME_SHADOW_2_C_ROI_SIZE_2 0xD0974 + +#define mmMME_SHADOW_2_C_ROI_SIZE_3 0xD0978 + +#define mmMME_SHADOW_2_C_SPATIAL_START_OFFSET_0 0xD097C + +#define mmMME_SHADOW_2_C_SPATIAL_START_OFFSET_1 0xD0980 + +#define mmMME_SHADOW_2_C_SPATIAL_START_OFFSET_2 0xD0984 + +#define mmMME_SHADOW_2_C_SPATIAL_START_OFFSET_3 0xD0988 + +#define mmMME_SHADOW_2_C_SPATIAL_STRIDE_0 0xD098C + +#define mmMME_SHADOW_2_C_SPATIAL_STRIDE_1 0xD0990 + +#define mmMME_SHADOW_2_C_SPATIAL_STRIDE_2 0xD0994 + +#define mmMME_SHADOW_2_C_SPATIAL_STRIDE_3 0xD0998 + +#define mmMME_SHADOW_2_C_SPATIAL_SIZE_MINUS_1 0xD099C + +#define mmMME_SHADOW_2_SYNC_OBJECT_MESSAGE 0xD09A0 + +#define mmMME_SHADOW_2_E_PADDING_VALUE_A 0xD09A4 + +#define mmMME_SHADOW_2_E_NUM_ITERATION_MINUS_1 0xD09A8 + +#define mmMME_SHADOW_2_E_BUBBLES_PER_SPLIT 0xD09AC + +#define mmMME_SHADOW_3_STATUS 0xD0A00 + +#define mmMME_SHADOW_3_A_BASE_ADDR_HIGH 0xD0A08 + +#define mmMME_SHADOW_3_B_BASE_ADDR_HIGH 0xD0A0C + +#define mmMME_SHADOW_3_CIN_BASE_ADDR_HIGH 0xD0A10 + +#define mmMME_SHADOW_3_COUT_BASE_ADDR_HIGH 0xD0A14 + +#define mmMME_SHADOW_3_BIAS_BASE_ADDR_HIGH 0xD0A18 + +#define mmMME_SHADOW_3_A_BASE_ADDR_LOW 0xD0A1C + +#define mmMME_SHADOW_3_B_BASE_ADDR_LOW 0xD0A20 + +#define mmMME_SHADOW_3_CIN_BASE_ADDR_LOW 0xD0A24 + +#define mmMME_SHADOW_3_COUT_BASE_ADDR_LOW 0xD0A28 + +#define mmMME_SHADOW_3_BIAS_BASE_ADDR_LOW 0xD0A2C + +#define mmMME_SHADOW_3_HEADER 0xD0A30 + +#define mmMME_SHADOW_3_KERNEL_SIZE_MINUS_1 0xD0A34 + +#define mmMME_SHADOW_3_ASSOCIATED_DIMS_0 0xD0A38 + +#define mmMME_SHADOW_3_ASSOCIATED_DIMS_1 0xD0A3C + +#define mmMME_SHADOW_3_COUT_SCALE 0xD0A40 + +#define mmMME_SHADOW_3_CIN_SCALE 0xD0A44 + +#define mmMME_SHADOW_3_GEMMLOWP_ZP 0xD0A48 + +#define mmMME_SHADOW_3_GEMMLOWP_EXPONENT 0xD0A4C + +#define mmMME_SHADOW_3_A_ROI_BASE_OFFSET_0 0xD0A50 + +#define mmMME_SHADOW_3_A_ROI_BASE_OFFSET_1 0xD0A54 + +#define mmMME_SHADOW_3_A_ROI_BASE_OFFSET_2 0xD0A58 + +#define mmMME_SHADOW_3_A_ROI_BASE_OFFSET_3 0xD0A5C + +#define mmMME_SHADOW_3_A_ROI_BASE_OFFSET_4 0xD0A60 + +#define mmMME_SHADOW_3_A_VALID_ELEMENTS_0 0xD0A64 + +#define mmMME_SHADOW_3_A_VALID_ELEMENTS_1 0xD0A68 + +#define mmMME_SHADOW_3_A_VALID_ELEMENTS_2 0xD0A6C + +#define mmMME_SHADOW_3_A_VALID_ELEMENTS_3 0xD0A70 + +#define mmMME_SHADOW_3_A_VALID_ELEMENTS_4 0xD0A74 + +#define mmMME_SHADOW_3_A_LOOP_STRIDE_0 0xD0A78 + +#define mmMME_SHADOW_3_A_LOOP_STRIDE_1 0xD0A7C + +#define mmMME_SHADOW_3_A_LOOP_STRIDE_2 0xD0A80 + +#define mmMME_SHADOW_3_A_LOOP_STRIDE_3 0xD0A84 + +#define mmMME_SHADOW_3_A_LOOP_STRIDE_4 0xD0A88 + +#define mmMME_SHADOW_3_A_ROI_SIZE_0 0xD0A8C + +#define mmMME_SHADOW_3_A_ROI_SIZE_1 0xD0A90 + +#define mmMME_SHADOW_3_A_ROI_SIZE_2 0xD0A94 + +#define mmMME_SHADOW_3_A_ROI_SIZE_3 0xD0A98 + +#define mmMME_SHADOW_3_A_SPATIAL_START_OFFSET_0 0xD0A9C + +#define mmMME_SHADOW_3_A_SPATIAL_START_OFFSET_1 0xD0AA0 + +#define mmMME_SHADOW_3_A_SPATIAL_START_OFFSET_2 0xD0AA4 + +#define mmMME_SHADOW_3_A_SPATIAL_START_OFFSET_3 0xD0AA8 + +#define mmMME_SHADOW_3_A_SPATIAL_STRIDE_0 0xD0AAC + +#define mmMME_SHADOW_3_A_SPATIAL_STRIDE_1 0xD0AB0 + +#define mmMME_SHADOW_3_A_SPATIAL_STRIDE_2 0xD0AB4 + +#define mmMME_SHADOW_3_A_SPATIAL_STRIDE_3 0xD0AB8 + +#define mmMME_SHADOW_3_A_SPATIAL_SIZE_MINUS_1 0xD0ABC + +#define mmMME_SHADOW_3_B_ROI_BASE_OFFSET_0 0xD0AC0 + +#define mmMME_SHADOW_3_B_ROI_BASE_OFFSET_1 0xD0AC4 + +#define mmMME_SHADOW_3_B_ROI_BASE_OFFSET_2 0xD0AC8 + +#define mmMME_SHADOW_3_B_ROI_BASE_OFFSET_3 0xD0ACC + +#define mmMME_SHADOW_3_B_ROI_BASE_OFFSET_4 0xD0AD0 + +#define mmMME_SHADOW_3_B_VALID_ELEMENTS_0 0xD0AD4 + +#define mmMME_SHADOW_3_B_VALID_ELEMENTS_1 0xD0AD8 + +#define mmMME_SHADOW_3_B_VALID_ELEMENTS_2 0xD0ADC + +#define mmMME_SHADOW_3_B_VALID_ELEMENTS_3 0xD0AE0 + +#define mmMME_SHADOW_3_B_VALID_ELEMENTS_4 0xD0AE4 + +#define mmMME_SHADOW_3_B_LOOP_STRIDE_0 0xD0AE8 + +#define mmMME_SHADOW_3_B_LOOP_STRIDE_1 0xD0AEC + +#define mmMME_SHADOW_3_B_LOOP_STRIDE_2 0xD0AF0 + +#define mmMME_SHADOW_3_B_LOOP_STRIDE_3 0xD0AF4 + +#define mmMME_SHADOW_3_B_LOOP_STRIDE_4 0xD0AF8 + +#define mmMME_SHADOW_3_B_ROI_SIZE_0 0xD0AFC + +#define mmMME_SHADOW_3_B_ROI_SIZE_1 0xD0B00 + +#define mmMME_SHADOW_3_B_ROI_SIZE_2 0xD0B04 + +#define mmMME_SHADOW_3_B_ROI_SIZE_3 0xD0B08 + +#define mmMME_SHADOW_3_B_SPATIAL_START_OFFSET_0 0xD0B0C + +#define mmMME_SHADOW_3_B_SPATIAL_START_OFFSET_1 0xD0B10 + +#define mmMME_SHADOW_3_B_SPATIAL_START_OFFSET_2 0xD0B14 + +#define mmMME_SHADOW_3_B_SPATIAL_START_OFFSET_3 0xD0B18 + +#define mmMME_SHADOW_3_B_SPATIAL_STRIDE_0 0xD0B1C + +#define mmMME_SHADOW_3_B_SPATIAL_STRIDE_1 0xD0B20 + +#define mmMME_SHADOW_3_B_SPATIAL_STRIDE_2 0xD0B24 + +#define mmMME_SHADOW_3_B_SPATIAL_STRIDE_3 0xD0B28 + +#define mmMME_SHADOW_3_B_SPATIAL_SIZE_MINUS_1 0xD0B2C + +#define mmMME_SHADOW_3_C_ROI_BASE_OFFSET_0 0xD0B30 + +#define mmMME_SHADOW_3_C_ROI_BASE_OFFSET_1 0xD0B34 + +#define mmMME_SHADOW_3_C_ROI_BASE_OFFSET_2 0xD0B38 + +#define mmMME_SHADOW_3_C_ROI_BASE_OFFSET_3 0xD0B3C + +#define mmMME_SHADOW_3_C_ROI_BASE_OFFSET_4 0xD0B40 + +#define mmMME_SHADOW_3_C_VALID_ELEMENTS_0 0xD0B44 + +#define mmMME_SHADOW_3_C_VALID_ELEMENTS_1 0xD0B48 + +#define mmMME_SHADOW_3_C_VALID_ELEMENTS_2 0xD0B4C + +#define mmMME_SHADOW_3_C_VALID_ELEMENTS_3 0xD0B50 + +#define mmMME_SHADOW_3_C_VALID_ELEMENTS_4 0xD0B54 + +#define mmMME_SHADOW_3_C_LOOP_STRIDE_0 0xD0B58 + +#define mmMME_SHADOW_3_C_LOOP_STRIDE_1 0xD0B5C + +#define mmMME_SHADOW_3_C_LOOP_STRIDE_2 0xD0B60 + +#define mmMME_SHADOW_3_C_LOOP_STRIDE_3 0xD0B64 + +#define mmMME_SHADOW_3_C_LOOP_STRIDE_4 0xD0B68 + +#define mmMME_SHADOW_3_C_ROI_SIZE_0 0xD0B6C + +#define mmMME_SHADOW_3_C_ROI_SIZE_1 0xD0B70 + +#define mmMME_SHADOW_3_C_ROI_SIZE_2 0xD0B74 + +#define mmMME_SHADOW_3_C_ROI_SIZE_3 0xD0B78 + +#define mmMME_SHADOW_3_C_SPATIAL_START_OFFSET_0 0xD0B7C + +#define mmMME_SHADOW_3_C_SPATIAL_START_OFFSET_1 0xD0B80 + +#define mmMME_SHADOW_3_C_SPATIAL_START_OFFSET_2 0xD0B84 + +#define mmMME_SHADOW_3_C_SPATIAL_START_OFFSET_3 0xD0B88 + +#define mmMME_SHADOW_3_C_SPATIAL_STRIDE_0 0xD0B8C + +#define mmMME_SHADOW_3_C_SPATIAL_STRIDE_1 0xD0B90 + +#define mmMME_SHADOW_3_C_SPATIAL_STRIDE_2 0xD0B94 + +#define mmMME_SHADOW_3_C_SPATIAL_STRIDE_3 0xD0B98 + +#define mmMME_SHADOW_3_C_SPATIAL_SIZE_MINUS_1 0xD0B9C + +#define mmMME_SHADOW_3_SYNC_OBJECT_MESSAGE 0xD0BA0 + +#define mmMME_SHADOW_3_E_PADDING_VALUE_A 0xD0BA4 + +#define mmMME_SHADOW_3_E_NUM_ITERATION_MINUS_1 0xD0BA8 + +#define mmMME_SHADOW_3_E_BUBBLES_PER_SPLIT 0xD0BAC + +#endif /* ASIC_REG_MME_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mmu_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/mmu_masks.h new file mode 100644 index 000000000..c3e69062b --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/mmu_masks.h @@ -0,0 +1,142 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_MMU_MASKS_H_ +#define ASIC_REG_MMU_MASKS_H_ + +/* + ***************************************** + * MMU (Prototype: MMU) + ***************************************** + */ + +/* MMU_INPUT_FIFO_THRESHOLD */ +#define MMU_INPUT_FIFO_THRESHOLD_PCI_SHIFT 0 +#define MMU_INPUT_FIFO_THRESHOLD_PCI_MASK 0x7 +#define MMU_INPUT_FIFO_THRESHOLD_PSOC_SHIFT 4 +#define MMU_INPUT_FIFO_THRESHOLD_PSOC_MASK 0x70 +#define MMU_INPUT_FIFO_THRESHOLD_DMA_SHIFT 8 +#define MMU_INPUT_FIFO_THRESHOLD_DMA_MASK 0x700 +#define MMU_INPUT_FIFO_THRESHOLD_CPU_SHIFT 12 +#define MMU_INPUT_FIFO_THRESHOLD_CPU_MASK 0x7000 +#define MMU_INPUT_FIFO_THRESHOLD_MME_SHIFT 16 +#define MMU_INPUT_FIFO_THRESHOLD_MME_MASK 0x70000 +#define MMU_INPUT_FIFO_THRESHOLD_TPC_SHIFT 20 +#define MMU_INPUT_FIFO_THRESHOLD_TPC_MASK 0x700000 +#define MMU_INPUT_FIFO_THRESHOLD_OTHER_SHIFT 24 +#define MMU_INPUT_FIFO_THRESHOLD_OTHER_MASK 0x7000000 + +/* MMU_MMU_ENABLE */ +#define MMU_MMU_ENABLE_R_SHIFT 0 +#define MMU_MMU_ENABLE_R_MASK 0x1 + +/* MMU_FORCE_ORDERING */ +#define MMU_FORCE_ORDERING_DMA_WEAK_ORDERING_SHIFT 0 +#define MMU_FORCE_ORDERING_DMA_WEAK_ORDERING_MASK 0x1 +#define MMU_FORCE_ORDERING_PSOC_WEAK_ORDERING_SHIFT 1 +#define MMU_FORCE_ORDERING_PSOC_WEAK_ORDERING_MASK 0x2 +#define MMU_FORCE_ORDERING_PCI_WEAK_ORDERING_SHIFT 2 +#define MMU_FORCE_ORDERING_PCI_WEAK_ORDERING_MASK 0x4 +#define MMU_FORCE_ORDERING_CPU_WEAK_ORDERING_SHIFT 3 +#define MMU_FORCE_ORDERING_CPU_WEAK_ORDERING_MASK 0x8 +#define MMU_FORCE_ORDERING_MME_WEAK_ORDERING_SHIFT 4 +#define MMU_FORCE_ORDERING_MME_WEAK_ORDERING_MASK 0x10 +#define MMU_FORCE_ORDERING_TPC_WEAK_ORDERING_SHIFT 5 +#define MMU_FORCE_ORDERING_TPC_WEAK_ORDERING_MASK 0x20 +#define MMU_FORCE_ORDERING_DEFAULT_WEAK_ORDERING_SHIFT 6 +#define MMU_FORCE_ORDERING_DEFAULT_WEAK_ORDERING_MASK 0x40 +#define MMU_FORCE_ORDERING_DMA_STRONG_ORDERING_SHIFT 8 +#define MMU_FORCE_ORDERING_DMA_STRONG_ORDERING_MASK 0x100 +#define MMU_FORCE_ORDERING_PSOC_STRONG_ORDERING_SHIFT 9 +#define MMU_FORCE_ORDERING_PSOC_STRONG_ORDERING_MASK 0x200 +#define MMU_FORCE_ORDERING_PCI_STRONG_ORDERING_SHIFT 10 +#define MMU_FORCE_ORDERING_PCI_STRONG_ORDERING_MASK 0x400 +#define MMU_FORCE_ORDERING_CPU_STRONG_ORDERING_SHIFT 11 +#define MMU_FORCE_ORDERING_CPU_STRONG_ORDERING_MASK 0x800 +#define MMU_FORCE_ORDERING_MME_STRONG_ORDERING_SHIFT 12 +#define MMU_FORCE_ORDERING_MME_STRONG_ORDERING_MASK 0x1000 +#define MMU_FORCE_ORDERING_TPC_STRONG_ORDERING_SHIFT 13 +#define MMU_FORCE_ORDERING_TPC_STRONG_ORDERING_MASK 0x2000 +#define MMU_FORCE_ORDERING_DEFAULT_STRONG_ORDERING_SHIFT 14 +#define MMU_FORCE_ORDERING_DEFAULT_STRONG_ORDERING_MASK 0x4000 + +/* MMU_FEATURE_ENABLE */ +#define MMU_FEATURE_ENABLE_VA_ORDERING_EN_SHIFT 0 +#define MMU_FEATURE_ENABLE_VA_ORDERING_EN_MASK 0x1 +#define MMU_FEATURE_ENABLE_CLEAN_LINK_LIST_SHIFT 1 +#define MMU_FEATURE_ENABLE_CLEAN_LINK_LIST_MASK 0x2 +#define MMU_FEATURE_ENABLE_HOP_OFFSET_EN_SHIFT 2 +#define MMU_FEATURE_ENABLE_HOP_OFFSET_EN_MASK 0x4 +#define MMU_FEATURE_ENABLE_OBI_ORDERING_EN_SHIFT 3 +#define MMU_FEATURE_ENABLE_OBI_ORDERING_EN_MASK 0x8 +#define MMU_FEATURE_ENABLE_STRONG_ORDERING_READ_EN_SHIFT 4 +#define MMU_FEATURE_ENABLE_STRONG_ORDERING_READ_EN_MASK 0x10 +#define MMU_FEATURE_ENABLE_TRACE_ENABLE_SHIFT 5 +#define MMU_FEATURE_ENABLE_TRACE_ENABLE_MASK 0x20 + +/* MMU_VA_ORDERING_MASK_31_7 */ +#define MMU_VA_ORDERING_MASK_31_7_R_SHIFT 0 +#define MMU_VA_ORDERING_MASK_31_7_R_MASK 0x1FFFFFF + +/* MMU_VA_ORDERING_MASK_49_32 */ +#define MMU_VA_ORDERING_MASK_49_32_R_SHIFT 0 +#define MMU_VA_ORDERING_MASK_49_32_R_MASK 0x3FFFF + +/* MMU_LOG2_DDR_SIZE */ +#define MMU_LOG2_DDR_SIZE_R_SHIFT 0 +#define MMU_LOG2_DDR_SIZE_R_MASK 0xFF + +/* MMU_SCRAMBLER */ +#define MMU_SCRAMBLER_ADDR_BIT_SHIFT 0 +#define MMU_SCRAMBLER_ADDR_BIT_MASK 0x3F +#define MMU_SCRAMBLER_SINGLE_DDR_EN_SHIFT 6 +#define MMU_SCRAMBLER_SINGLE_DDR_EN_MASK 0x40 +#define MMU_SCRAMBLER_SINGLE_DDR_ID_SHIFT 7 +#define MMU_SCRAMBLER_SINGLE_DDR_ID_MASK 0x80 + +/* MMU_MEM_INIT_BUSY */ +#define MMU_MEM_INIT_BUSY_DATA_SHIFT 0 +#define MMU_MEM_INIT_BUSY_DATA_MASK 0x3 +#define MMU_MEM_INIT_BUSY_OBI0_SHIFT 2 +#define MMU_MEM_INIT_BUSY_OBI0_MASK 0x4 +#define MMU_MEM_INIT_BUSY_OBI1_SHIFT 3 +#define MMU_MEM_INIT_BUSY_OBI1_MASK 0x8 + +/* MMU_SPI_MASK */ +#define MMU_SPI_MASK_R_SHIFT 0 +#define MMU_SPI_MASK_R_MASK 0xFF + +/* MMU_SPI_CAUSE */ +#define MMU_SPI_CAUSE_R_SHIFT 0 +#define MMU_SPI_CAUSE_R_MASK 0xFF + +/* MMU_PAGE_ERROR_CAPTURE */ +#define MMU_PAGE_ERROR_CAPTURE_VA_49_32_SHIFT 0 +#define MMU_PAGE_ERROR_CAPTURE_VA_49_32_MASK 0x3FFFF +#define MMU_PAGE_ERROR_CAPTURE_ENTRY_VALID_SHIFT 18 +#define MMU_PAGE_ERROR_CAPTURE_ENTRY_VALID_MASK 0x40000 + +/* MMU_PAGE_ERROR_CAPTURE_VA */ +#define MMU_PAGE_ERROR_CAPTURE_VA_VA_31_0_SHIFT 0 +#define MMU_PAGE_ERROR_CAPTURE_VA_VA_31_0_MASK 0xFFFFFFFF + +/* MMU_ACCESS_ERROR_CAPTURE */ +#define MMU_ACCESS_ERROR_CAPTURE_VA_49_32_SHIFT 0 +#define MMU_ACCESS_ERROR_CAPTURE_VA_49_32_MASK 0x3FFFF +#define MMU_ACCESS_ERROR_CAPTURE_ENTRY_VALID_SHIFT 18 +#define MMU_ACCESS_ERROR_CAPTURE_ENTRY_VALID_MASK 0x40000 + +/* MMU_ACCESS_ERROR_CAPTURE_VA */ +#define MMU_ACCESS_ERROR_CAPTURE_VA_VA_31_0_SHIFT 0 +#define MMU_ACCESS_ERROR_CAPTURE_VA_VA_31_0_MASK 0xFFFFFFFF + +#endif /* ASIC_REG_MMU_MASKS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mmu_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mmu_regs.h new file mode 100644 index 000000000..7ec81f120 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/mmu_regs.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_MMU_REGS_H_ +#define ASIC_REG_MMU_REGS_H_ + +/* + ***************************************** + * MMU (Prototype: MMU) + ***************************************** + */ + +#define mmMMU_INPUT_FIFO_THRESHOLD 0x480000 + +#define mmMMU_MMU_ENABLE 0x48000C + +#define mmMMU_FORCE_ORDERING 0x480010 + +#define mmMMU_FEATURE_ENABLE 0x480014 + +#define mmMMU_VA_ORDERING_MASK_31_7 0x480018 + +#define mmMMU_VA_ORDERING_MASK_49_32 0x48001C + +#define mmMMU_LOG2_DDR_SIZE 0x480020 + +#define mmMMU_SCRAMBLER 0x480024 + +#define mmMMU_MEM_INIT_BUSY 0x480028 + +#define mmMMU_SPI_MASK 0x48002C + +#define mmMMU_SPI_CAUSE 0x480030 + +#define mmMMU_PAGE_ERROR_CAPTURE 0x480034 + +#define mmMMU_PAGE_ERROR_CAPTURE_VA 0x480038 + +#define mmMMU_ACCESS_ERROR_CAPTURE 0x48003C + +#define mmMMU_ACCESS_ERROR_CAPTURE_VA 0x480040 + +#endif /* ASIC_REG_MMU_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h new file mode 100644 index 000000000..ceb59f2e2 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h @@ -0,0 +1,208 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_PCI_NRTR_MASKS_H_ +#define ASIC_REG_PCI_NRTR_MASKS_H_ + +/* + ***************************************** + * PCI_NRTR (Prototype: IF_NRTR) + ***************************************** + */ + +/* PCI_NRTR_HBW_MAX_CRED */ +#define PCI_NRTR_HBW_MAX_CRED_WR_RQ_SHIFT 0 +#define PCI_NRTR_HBW_MAX_CRED_WR_RQ_MASK 0x3F +#define PCI_NRTR_HBW_MAX_CRED_WR_RS_SHIFT 8 +#define PCI_NRTR_HBW_MAX_CRED_WR_RS_MASK 0x3F00 +#define PCI_NRTR_HBW_MAX_CRED_RD_RQ_SHIFT 16 +#define PCI_NRTR_HBW_MAX_CRED_RD_RQ_MASK 0x3F0000 +#define PCI_NRTR_HBW_MAX_CRED_RD_RS_SHIFT 24 +#define PCI_NRTR_HBW_MAX_CRED_RD_RS_MASK 0x3F000000 + +/* PCI_NRTR_LBW_MAX_CRED */ +#define PCI_NRTR_LBW_MAX_CRED_WR_RQ_SHIFT 0 +#define PCI_NRTR_LBW_MAX_CRED_WR_RQ_MASK 0x3F +#define PCI_NRTR_LBW_MAX_CRED_WR_RS_SHIFT 8 +#define PCI_NRTR_LBW_MAX_CRED_WR_RS_MASK 0x3F00 +#define PCI_NRTR_LBW_MAX_CRED_RD_RQ_SHIFT 16 +#define PCI_NRTR_LBW_MAX_CRED_RD_RQ_MASK 0x3F0000 +#define PCI_NRTR_LBW_MAX_CRED_RD_RS_SHIFT 24 +#define PCI_NRTR_LBW_MAX_CRED_RD_RS_MASK 0x3F000000 + +/* PCI_NRTR_DBG_E_ARB */ +#define PCI_NRTR_DBG_E_ARB_W_SHIFT 0 +#define PCI_NRTR_DBG_E_ARB_W_MASK 0x7 +#define PCI_NRTR_DBG_E_ARB_S_SHIFT 8 +#define PCI_NRTR_DBG_E_ARB_S_MASK 0x700 +#define PCI_NRTR_DBG_E_ARB_N_SHIFT 16 +#define PCI_NRTR_DBG_E_ARB_N_MASK 0x70000 +#define PCI_NRTR_DBG_E_ARB_L_SHIFT 24 +#define PCI_NRTR_DBG_E_ARB_L_MASK 0x7000000 + +/* PCI_NRTR_DBG_W_ARB */ +#define PCI_NRTR_DBG_W_ARB_E_SHIFT 0 +#define PCI_NRTR_DBG_W_ARB_E_MASK 0x7 +#define PCI_NRTR_DBG_W_ARB_S_SHIFT 8 +#define PCI_NRTR_DBG_W_ARB_S_MASK 0x700 +#define PCI_NRTR_DBG_W_ARB_N_SHIFT 16 +#define PCI_NRTR_DBG_W_ARB_N_MASK 0x70000 +#define PCI_NRTR_DBG_W_ARB_L_SHIFT 24 +#define PCI_NRTR_DBG_W_ARB_L_MASK 0x7000000 + +/* PCI_NRTR_DBG_N_ARB */ +#define PCI_NRTR_DBG_N_ARB_W_SHIFT 0 +#define PCI_NRTR_DBG_N_ARB_W_MASK 0x7 +#define PCI_NRTR_DBG_N_ARB_E_SHIFT 8 +#define PCI_NRTR_DBG_N_ARB_E_MASK 0x700 +#define PCI_NRTR_DBG_N_ARB_S_SHIFT 16 +#define PCI_NRTR_DBG_N_ARB_S_MASK 0x70000 +#define PCI_NRTR_DBG_N_ARB_L_SHIFT 24 +#define PCI_NRTR_DBG_N_ARB_L_MASK 0x7000000 + +/* PCI_NRTR_DBG_S_ARB */ +#define PCI_NRTR_DBG_S_ARB_W_SHIFT 0 +#define PCI_NRTR_DBG_S_ARB_W_MASK 0x7 +#define PCI_NRTR_DBG_S_ARB_E_SHIFT 8 +#define PCI_NRTR_DBG_S_ARB_E_MASK 0x700 +#define PCI_NRTR_DBG_S_ARB_N_SHIFT 16 +#define PCI_NRTR_DBG_S_ARB_N_MASK 0x70000 +#define PCI_NRTR_DBG_S_ARB_L_SHIFT 24 +#define PCI_NRTR_DBG_S_ARB_L_MASK 0x7000000 + +/* PCI_NRTR_DBG_L_ARB */ +#define PCI_NRTR_DBG_L_ARB_W_SHIFT 0 +#define PCI_NRTR_DBG_L_ARB_W_MASK 0x7 +#define PCI_NRTR_DBG_L_ARB_E_SHIFT 8 +#define PCI_NRTR_DBG_L_ARB_E_MASK 0x700 +#define PCI_NRTR_DBG_L_ARB_S_SHIFT 16 +#define PCI_NRTR_DBG_L_ARB_S_MASK 0x70000 +#define PCI_NRTR_DBG_L_ARB_N_SHIFT 24 +#define PCI_NRTR_DBG_L_ARB_N_MASK 0x7000000 + +/* PCI_NRTR_DBG_E_ARB_MAX */ +#define PCI_NRTR_DBG_E_ARB_MAX_CREDIT_SHIFT 0 +#define PCI_NRTR_DBG_E_ARB_MAX_CREDIT_MASK 0x3F + +/* PCI_NRTR_DBG_W_ARB_MAX */ +#define PCI_NRTR_DBG_W_ARB_MAX_CREDIT_SHIFT 0 +#define PCI_NRTR_DBG_W_ARB_MAX_CREDIT_MASK 0x3F + +/* PCI_NRTR_DBG_N_ARB_MAX */ +#define PCI_NRTR_DBG_N_ARB_MAX_CREDIT_SHIFT 0 +#define PCI_NRTR_DBG_N_ARB_MAX_CREDIT_MASK 0x3F + +/* PCI_NRTR_DBG_S_ARB_MAX */ +#define PCI_NRTR_DBG_S_ARB_MAX_CREDIT_SHIFT 0 +#define PCI_NRTR_DBG_S_ARB_MAX_CREDIT_MASK 0x3F + +/* PCI_NRTR_DBG_L_ARB_MAX */ +#define PCI_NRTR_DBG_L_ARB_MAX_CREDIT_SHIFT 0 +#define PCI_NRTR_DBG_L_ARB_MAX_CREDIT_MASK 0x3F + +/* PCI_NRTR_SPLIT_COEF */ +#define PCI_NRTR_SPLIT_COEF_VAL_SHIFT 0 +#define PCI_NRTR_SPLIT_COEF_VAL_MASK 0xFFFF + +/* PCI_NRTR_SPLIT_CFG */ +#define PCI_NRTR_SPLIT_CFG_FORCE_WAK_ORDER_SHIFT 0 +#define PCI_NRTR_SPLIT_CFG_FORCE_WAK_ORDER_MASK 0x1 +#define PCI_NRTR_SPLIT_CFG_FORCE_STRONG_ORDER_SHIFT 1 +#define PCI_NRTR_SPLIT_CFG_FORCE_STRONG_ORDER_MASK 0x2 +#define PCI_NRTR_SPLIT_CFG_DEFAULT_MESH_SHIFT 2 +#define PCI_NRTR_SPLIT_CFG_DEFAULT_MESH_MASK 0xC +#define PCI_NRTR_SPLIT_CFG_RD_RATE_LIM_EN_SHIFT 4 +#define PCI_NRTR_SPLIT_CFG_RD_RATE_LIM_EN_MASK 0x10 +#define PCI_NRTR_SPLIT_CFG_WR_RATE_LIM_EN_SHIFT 5 +#define PCI_NRTR_SPLIT_CFG_WR_RATE_LIM_EN_MASK 0x20 +#define PCI_NRTR_SPLIT_CFG_B2B_OPT_SHIFT 6 +#define PCI_NRTR_SPLIT_CFG_B2B_OPT_MASK 0x1C0 + +/* PCI_NRTR_SPLIT_RD_SAT */ +#define PCI_NRTR_SPLIT_RD_SAT_VAL_SHIFT 0 +#define PCI_NRTR_SPLIT_RD_SAT_VAL_MASK 0xFFFF + +/* PCI_NRTR_SPLIT_RD_RST_TOKEN */ +#define PCI_NRTR_SPLIT_RD_RST_TOKEN_VAL_SHIFT 0 +#define PCI_NRTR_SPLIT_RD_RST_TOKEN_VAL_MASK 0xFFFF + +/* PCI_NRTR_SPLIT_RD_TIMEOUT */ +#define PCI_NRTR_SPLIT_RD_TIMEOUT_VAL_SHIFT 0 +#define PCI_NRTR_SPLIT_RD_TIMEOUT_VAL_MASK 0xFFFFFFFF + +/* PCI_NRTR_SPLIT_WR_SAT */ +#define PCI_NRTR_SPLIT_WR_SAT_VAL_SHIFT 0 +#define PCI_NRTR_SPLIT_WR_SAT_VAL_MASK 0xFFFF + +/* PCI_NRTR_WPLIT_WR_TST_TOLEN */ +#define PCI_NRTR_WPLIT_WR_TST_TOLEN_VAL_SHIFT 0 +#define PCI_NRTR_WPLIT_WR_TST_TOLEN_VAL_MASK 0xFFFF + +/* PCI_NRTR_SPLIT_WR_TIMEOUT */ +#define PCI_NRTR_SPLIT_WR_TIMEOUT_VAL_SHIFT 0 +#define PCI_NRTR_SPLIT_WR_TIMEOUT_VAL_MASK 0xFFFFFFFF + +/* PCI_NRTR_HBW_RANGE_HIT */ +#define PCI_NRTR_HBW_RANGE_HIT_IND_SHIFT 0 +#define PCI_NRTR_HBW_RANGE_HIT_IND_MASK 0xFF + +/* PCI_NRTR_HBW_RANGE_MASK_L */ +#define PCI_NRTR_HBW_RANGE_MASK_L_VAL_SHIFT 0 +#define PCI_NRTR_HBW_RANGE_MASK_L_VAL_MASK 0xFFFFFFFF + +/* PCI_NRTR_HBW_RANGE_MASK_H */ +#define PCI_NRTR_HBW_RANGE_MASK_H_VAL_SHIFT 0 +#define PCI_NRTR_HBW_RANGE_MASK_H_VAL_MASK 0x3FFFF + +/* PCI_NRTR_HBW_RANGE_BASE_L */ +#define PCI_NRTR_HBW_RANGE_BASE_L_VAL_SHIFT 0 +#define PCI_NRTR_HBW_RANGE_BASE_L_VAL_MASK 0xFFFFFFFF + +/* PCI_NRTR_HBW_RANGE_BASE_H */ +#define PCI_NRTR_HBW_RANGE_BASE_H_VAL_SHIFT 0 +#define PCI_NRTR_HBW_RANGE_BASE_H_VAL_MASK 0x3FFFF + +/* PCI_NRTR_LBW_RANGE_HIT */ +#define PCI_NRTR_LBW_RANGE_HIT_IND_SHIFT 0 +#define PCI_NRTR_LBW_RANGE_HIT_IND_MASK 0xFFFF + +/* PCI_NRTR_LBW_RANGE_MASK */ +#define PCI_NRTR_LBW_RANGE_MASK_VAL_SHIFT 0 +#define PCI_NRTR_LBW_RANGE_MASK_VAL_MASK 0x3FFFFFF + +/* PCI_NRTR_LBW_RANGE_BASE */ +#define PCI_NRTR_LBW_RANGE_BASE_VAL_SHIFT 0 +#define PCI_NRTR_LBW_RANGE_BASE_VAL_MASK 0x3FFFFFF + +/* PCI_NRTR_RGLTR */ +#define PCI_NRTR_RGLTR_WR_EN_SHIFT 0 +#define PCI_NRTR_RGLTR_WR_EN_MASK 0x1 +#define PCI_NRTR_RGLTR_RD_EN_SHIFT 4 +#define PCI_NRTR_RGLTR_RD_EN_MASK 0x10 + +/* PCI_NRTR_RGLTR_WR_RESULT */ +#define PCI_NRTR_RGLTR_WR_RESULT_VAL_SHIFT 0 +#define PCI_NRTR_RGLTR_WR_RESULT_VAL_MASK 0xFF + +/* PCI_NRTR_RGLTR_RD_RESULT */ +#define PCI_NRTR_RGLTR_RD_RESULT_VAL_SHIFT 0 +#define PCI_NRTR_RGLTR_RD_RESULT_VAL_MASK 0xFF + +/* PCI_NRTR_SCRAMB_EN */ +#define PCI_NRTR_SCRAMB_EN_VAL_SHIFT 0 +#define PCI_NRTR_SCRAMB_EN_VAL_MASK 0x1 + +/* PCI_NRTR_NON_LIN_SCRAMB */ +#define PCI_NRTR_NON_LIN_SCRAMB_EN_SHIFT 0 +#define PCI_NRTR_NON_LIN_SCRAMB_EN_MASK 0x1 + +#endif /* ASIC_REG_PCI_NRTR_MASKS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h new file mode 100644 index 000000000..dd067f301 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h @@ -0,0 +1,226 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_PCI_NRTR_REGS_H_ +#define ASIC_REG_PCI_NRTR_REGS_H_ + +/* + ***************************************** + * PCI_NRTR (Prototype: IF_NRTR) + ***************************************** + */ + +#define mmPCI_NRTR_HBW_MAX_CRED 0x100 + +#define mmPCI_NRTR_LBW_MAX_CRED 0x120 + +#define mmPCI_NRTR_DBG_E_ARB 0x300 + +#define mmPCI_NRTR_DBG_W_ARB 0x304 + +#define mmPCI_NRTR_DBG_N_ARB 0x308 + +#define mmPCI_NRTR_DBG_S_ARB 0x30C + +#define mmPCI_NRTR_DBG_L_ARB 0x310 + +#define mmPCI_NRTR_DBG_E_ARB_MAX 0x320 + +#define mmPCI_NRTR_DBG_W_ARB_MAX 0x324 + +#define mmPCI_NRTR_DBG_N_ARB_MAX 0x328 + +#define mmPCI_NRTR_DBG_S_ARB_MAX 0x32C + +#define mmPCI_NRTR_DBG_L_ARB_MAX 0x330 + +#define mmPCI_NRTR_SPLIT_COEF_0 0x400 + +#define mmPCI_NRTR_SPLIT_COEF_1 0x404 + +#define mmPCI_NRTR_SPLIT_COEF_2 0x408 + +#define mmPCI_NRTR_SPLIT_COEF_3 0x40C + +#define mmPCI_NRTR_SPLIT_COEF_4 0x410 + +#define mmPCI_NRTR_SPLIT_COEF_5 0x414 + +#define mmPCI_NRTR_SPLIT_COEF_6 0x418 + +#define mmPCI_NRTR_SPLIT_COEF_7 0x41C + +#define mmPCI_NRTR_SPLIT_COEF_8 0x420 + +#define mmPCI_NRTR_SPLIT_COEF_9 0x424 + +#define mmPCI_NRTR_SPLIT_CFG 0x440 + +#define mmPCI_NRTR_SPLIT_RD_SAT 0x444 + +#define mmPCI_NRTR_SPLIT_RD_RST_TOKEN 0x448 + +#define mmPCI_NRTR_SPLIT_RD_TIMEOUT_0 0x44C + +#define mmPCI_NRTR_SPLIT_RD_TIMEOUT_1 0x450 + +#define mmPCI_NRTR_SPLIT_WR_SAT 0x454 + +#define mmPCI_NRTR_WPLIT_WR_TST_TOLEN 0x458 + +#define mmPCI_NRTR_SPLIT_WR_TIMEOUT_0 0x45C + +#define mmPCI_NRTR_SPLIT_WR_TIMEOUT_1 0x460 + +#define mmPCI_NRTR_HBW_RANGE_HIT 0x470 + +#define mmPCI_NRTR_HBW_RANGE_MASK_L_0 0x480 + +#define mmPCI_NRTR_HBW_RANGE_MASK_L_1 0x484 + +#define mmPCI_NRTR_HBW_RANGE_MASK_L_2 0x488 + +#define mmPCI_NRTR_HBW_RANGE_MASK_L_3 0x48C + +#define mmPCI_NRTR_HBW_RANGE_MASK_L_4 0x490 + +#define mmPCI_NRTR_HBW_RANGE_MASK_L_5 0x494 + +#define mmPCI_NRTR_HBW_RANGE_MASK_L_6 0x498 + +#define mmPCI_NRTR_HBW_RANGE_MASK_L_7 0x49C + +#define mmPCI_NRTR_HBW_RANGE_MASK_H_0 0x4A0 + +#define mmPCI_NRTR_HBW_RANGE_MASK_H_1 0x4A4 + +#define mmPCI_NRTR_HBW_RANGE_MASK_H_2 0x4A8 + +#define mmPCI_NRTR_HBW_RANGE_MASK_H_3 0x4AC + +#define mmPCI_NRTR_HBW_RANGE_MASK_H_4 0x4B0 + +#define mmPCI_NRTR_HBW_RANGE_MASK_H_5 0x4B4 + +#define mmPCI_NRTR_HBW_RANGE_MASK_H_6 0x4B8 + +#define mmPCI_NRTR_HBW_RANGE_MASK_H_7 0x4BC + +#define mmPCI_NRTR_HBW_RANGE_BASE_L_0 0x4C0 + +#define mmPCI_NRTR_HBW_RANGE_BASE_L_1 0x4C4 + +#define mmPCI_NRTR_HBW_RANGE_BASE_L_2 0x4C8 + +#define mmPCI_NRTR_HBW_RANGE_BASE_L_3 0x4CC + +#define mmPCI_NRTR_HBW_RANGE_BASE_L_4 0x4D0 + +#define mmPCI_NRTR_HBW_RANGE_BASE_L_5 0x4D4 + +#define mmPCI_NRTR_HBW_RANGE_BASE_L_6 0x4D8 + +#define mmPCI_NRTR_HBW_RANGE_BASE_L_7 0x4DC + +#define mmPCI_NRTR_HBW_RANGE_BASE_H_0 0x4E0 + +#define mmPCI_NRTR_HBW_RANGE_BASE_H_1 0x4E4 + +#define mmPCI_NRTR_HBW_RANGE_BASE_H_2 0x4E8 + +#define mmPCI_NRTR_HBW_RANGE_BASE_H_3 0x4EC + +#define mmPCI_NRTR_HBW_RANGE_BASE_H_4 0x4F0 + +#define mmPCI_NRTR_HBW_RANGE_BASE_H_5 0x4F4 + +#define mmPCI_NRTR_HBW_RANGE_BASE_H_6 0x4F8 + +#define mmPCI_NRTR_HBW_RANGE_BASE_H_7 0x4FC + +#define mmPCI_NRTR_LBW_RANGE_HIT 0x500 + +#define mmPCI_NRTR_LBW_RANGE_MASK_0 0x510 + +#define mmPCI_NRTR_LBW_RANGE_MASK_1 0x514 + +#define mmPCI_NRTR_LBW_RANGE_MASK_2 0x518 + +#define mmPCI_NRTR_LBW_RANGE_MASK_3 0x51C + +#define mmPCI_NRTR_LBW_RANGE_MASK_4 0x520 + +#define mmPCI_NRTR_LBW_RANGE_MASK_5 0x524 + +#define mmPCI_NRTR_LBW_RANGE_MASK_6 0x528 + +#define mmPCI_NRTR_LBW_RANGE_MASK_7 0x52C + +#define mmPCI_NRTR_LBW_RANGE_MASK_8 0x530 + +#define mmPCI_NRTR_LBW_RANGE_MASK_9 0x534 + +#define mmPCI_NRTR_LBW_RANGE_MASK_10 0x538 + +#define mmPCI_NRTR_LBW_RANGE_MASK_11 0x53C + +#define mmPCI_NRTR_LBW_RANGE_MASK_12 0x540 + +#define mmPCI_NRTR_LBW_RANGE_MASK_13 0x544 + +#define mmPCI_NRTR_LBW_RANGE_MASK_14 0x548 + +#define mmPCI_NRTR_LBW_RANGE_MASK_15 0x54C + +#define mmPCI_NRTR_LBW_RANGE_BASE_0 0x550 + +#define mmPCI_NRTR_LBW_RANGE_BASE_1 0x554 + +#define mmPCI_NRTR_LBW_RANGE_BASE_2 0x558 + +#define mmPCI_NRTR_LBW_RANGE_BASE_3 0x55C + +#define mmPCI_NRTR_LBW_RANGE_BASE_4 0x560 + +#define mmPCI_NRTR_LBW_RANGE_BASE_5 0x564 + +#define mmPCI_NRTR_LBW_RANGE_BASE_6 0x568 + +#define mmPCI_NRTR_LBW_RANGE_BASE_7 0x56C + +#define mmPCI_NRTR_LBW_RANGE_BASE_8 0x570 + +#define mmPCI_NRTR_LBW_RANGE_BASE_9 0x574 + +#define mmPCI_NRTR_LBW_RANGE_BASE_10 0x578 + +#define mmPCI_NRTR_LBW_RANGE_BASE_11 0x57C + +#define mmPCI_NRTR_LBW_RANGE_BASE_12 0x580 + +#define mmPCI_NRTR_LBW_RANGE_BASE_13 0x584 + +#define mmPCI_NRTR_LBW_RANGE_BASE_14 0x588 + +#define mmPCI_NRTR_LBW_RANGE_BASE_15 0x58C + +#define mmPCI_NRTR_RGLTR 0x590 + +#define mmPCI_NRTR_RGLTR_WR_RESULT 0x594 + +#define mmPCI_NRTR_RGLTR_RD_RESULT 0x598 + +#define mmPCI_NRTR_SCRAMB_EN 0x600 + +#define mmPCI_NRTR_NON_LIN_SCRAMB 0x604 + +#endif /* ASIC_REG_PCI_NRTR_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/pcie_aux_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/pcie_aux_regs.h new file mode 100644 index 000000000..35b1d8ac6 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/pcie_aux_regs.h @@ -0,0 +1,242 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_PCIE_AUX_REGS_H_ +#define ASIC_REG_PCIE_AUX_REGS_H_ + +/* + ***************************************** + * PCIE_AUX (Prototype: PCIE_AUX) + ***************************************** + */ + +#define mmPCIE_AUX_APB_TIMEOUT 0xC07004 + +#define mmPCIE_AUX_PHY_INIT 0xC07100 + +#define mmPCIE_AUX_LTR_MAX_LATENCY 0xC07138 + +#define mmPCIE_AUX_BAR0_START_L 0xC07160 + +#define mmPCIE_AUX_BAR0_START_H 0xC07164 + +#define mmPCIE_AUX_BAR1_START 0xC07168 + +#define mmPCIE_AUX_BAR2_START_L 0xC0716C + +#define mmPCIE_AUX_BAR2_START_H 0xC07170 + +#define mmPCIE_AUX_BAR3_START 0xC07174 + +#define mmPCIE_AUX_BAR4_START_L 0xC07178 + +#define mmPCIE_AUX_BAR4_START_H 0xC0717C + +#define mmPCIE_AUX_BAR5_START 0xC07180 + +#define mmPCIE_AUX_BAR0_LIMIT_L 0xC07184 + +#define mmPCIE_AUX_BAR0_LIMIT_H 0xC07188 + +#define mmPCIE_AUX_BAR1_LIMIT 0xC0718C + +#define mmPCIE_AUX_BAR2_LIMIT_L 0xC07190 + +#define mmPCIE_AUX_BAR2_LIMIT_H 0xC07194 + +#define mmPCIE_AUX_BAR3_LIMIT 0xC07198 + +#define mmPCIE_AUX_BAR4_LIMIT_L 0xC0719C + +#define mmPCIE_AUX_BAR4_LIMIT_H 0xC07200 + +#define mmPCIE_AUX_BAR5_LIMIT 0xC07204 + +#define mmPCIE_AUX_BUS_MASTER_EN 0xC07208 + +#define mmPCIE_AUX_MEM_SPACE_EN 0xC0720C + +#define mmPCIE_AUX_MAX_RD_REQ_SIZE 0xC07210 + +#define mmPCIE_AUX_MAX_PAYLOAD_SIZE 0xC07214 + +#define mmPCIE_AUX_EXT_TAG_EN 0xC07218 + +#define mmPCIE_AUX_RCB 0xC0721C + +#define mmPCIE_AUX_PM_NO_SOFT_RST 0xC07220 + +#define mmPCIE_AUX_PBUS_NUM 0xC07224 + +#define mmPCIE_AUX_PBUS_DEV_NUM 0xC07228 + +#define mmPCIE_AUX_NO_SNOOP_EN 0xC0722C + +#define mmPCIE_AUX_RELAX_ORDER_EN 0xC07230 + +#define mmPCIE_AUX_HP_SLOT_CTRL_ACCESS 0xC07234 + +#define mmPCIE_AUX_DLL_STATE_CHGED_EN 0xC07238 + +#define mmPCIE_AUX_CMP_CPLED_INT_EN 0xC0723C + +#define mmPCIE_AUX_HP_INT_EN 0xC07340 + +#define mmPCIE_AUX_PRE_DET_CHGEN_EN 0xC07344 + +#define mmPCIE_AUX_MRL_SENSOR_CHGED_EN 0xC07348 + +#define mmPCIE_AUX_PWR_FAULT_DET_EN 0xC0734C + +#define mmPCIE_AUX_ATTEN_BUTTON_PRESSED_EN 0xC07350 + +#define mmPCIE_AUX_PF_FLR_ACTIVE 0xC07360 + +#define mmPCIE_AUX_PF_FLR_DONE 0xC07364 + +#define mmPCIE_AUX_FLR_INT 0xC07390 + +#define mmPCIE_AUX_LTR_M_EN 0xC073B0 + +#define mmPCIE_AUX_LTSSM_EN 0xC07428 + +#define mmPCIE_AUX_SYS_INTR 0xC07440 + +#define mmPCIE_AUX_INT_DISABLE 0xC07444 + +#define mmPCIE_AUX_SMLH_LINK_UP 0xC07448 + +#define mmPCIE_AUX_PM_CURR_STATE 0xC07450 + +#define mmPCIE_AUX_RDLH_LINK_UP 0xC07458 + +#define mmPCIE_AUX_BRDG_SLV_XFER_PENDING 0xC0745C + +#define mmPCIE_AUX_BRDG_DBI_XFER_PENDING 0xC07460 + +#define mmPCIE_AUX_AUTO_SP_DIS 0xC07478 + +#define mmPCIE_AUX_DBI 0xC07490 + +#define mmPCIE_AUX_DBI_32 0xC07494 + +#define mmPCIE_AUX_DIAG_STATUS_BUS_0 0xC074A4 + +#define mmPCIE_AUX_DIAG_STATUS_BUS_1 0xC074A8 + +#define mmPCIE_AUX_DIAG_STATUS_BUS_2 0xC074AC + +#define mmPCIE_AUX_DIAG_STATUS_BUS_3 0xC074B0 + +#define mmPCIE_AUX_DIAG_STATUS_BUS_4 0xC074B4 + +#define mmPCIE_AUX_DIAG_STATUS_BUS_5 0xC074B8 + +#define mmPCIE_AUX_DIAG_STATUS_BUS_6 0xC074BC + +#define mmPCIE_AUX_DIAG_STATUS_BUS_7 0xC074C0 + +#define mmPCIE_AUX_DIAG_STATUS_BUS_8 0xC074C4 + +#define mmPCIE_AUX_DIAG_STATUS_BUS_9 0xC074C8 + +#define mmPCIE_AUX_DIAG_STATUS_BUS_10 0xC074CC + +#define mmPCIE_AUX_DIAG_STATUS_BUS_11 0xC074D0 + +#define mmPCIE_AUX_DIAG_STATUS_BUS_12 0xC074D4 + +#define mmPCIE_AUX_DIAG_STATUS_BUS_13 0xC074D8 + +#define mmPCIE_AUX_DIAG_STATUS_BUS_14 0xC074DC + +#define mmPCIE_AUX_DIAG_STATUS_BUS_15 0xC074E0 + +#define mmPCIE_AUX_DIAG_STATUS_BUS_16 0xC074E4 + +#define mmPCIE_AUX_DIAG_STATUS_BUS_17 0xC074E8 + +#define mmPCIE_AUX_DIAG_STATUS_BUS_18 0xC074EC + +#define mmPCIE_AUX_DIAG_STATUS_BUS_19 0xC074F0 + +#define mmPCIE_AUX_DIAG_STATUS_BUS_20 0xC074F4 + +#define mmPCIE_AUX_DIAG_STATUS_BUS_21 0xC074F8 + +#define mmPCIE_AUX_DIAG_STATUS_BUS_22 0xC074FC + +#define mmPCIE_AUX_DIAG_STATUS_BUS_23 0xC07500 + +#define mmPCIE_AUX_DIAG_STATUS_BUS_24 0xC07504 + +#define mmPCIE_AUX_DIAG_STATUS_BUS_25 0xC07508 + +#define mmPCIE_AUX_DIAG_STATUS_BUS_26 0xC0750C + +#define mmPCIE_AUX_DIAG_STATUS_BUS_27 0xC07510 + +#define mmPCIE_AUX_DIAG_STATUS_BUS_28 0xC07514 + +#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_0 0xC07640 + +#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_1 0xC07644 + +#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_2 0xC07648 + +#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_3 0xC0764C + +#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_4 0xC07650 + +#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_5 0xC07654 + +#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_6 0xC07658 + +#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_7 0xC0765C + +#define mmPCIE_AUX_CDM_RAS_DES_SD_COMMON_0 0xC07744 + +#define mmPCIE_AUX_CDM_RAS_DES_SD_COMMON_1 0xC07748 + +#define mmPCIE_AUX_CDM_RAS_DES_SD_COMMON_2 0xC0774C + +#define mmPCIE_AUX_APP_RAS_DES_TBA_CTRL 0xC07774 + +#define mmPCIE_AUX_PM_DSTATE 0xC07840 + +#define mmPCIE_AUX_PM_PME_EN 0xC07844 + +#define mmPCIE_AUX_PM_LINKST_IN_L0S 0xC07848 + +#define mmPCIE_AUX_PM_LINKST_IN_L1 0xC0784C + +#define mmPCIE_AUX_PM_LINKST_IN_L2 0xC07850 + +#define mmPCIE_AUX_PM_LINKST_L2_EXIT 0xC07854 + +#define mmPCIE_AUX_PM_STATUS 0xC07858 + +#define mmPCIE_AUX_APP_READY_ENTER_L23 0xC0785C + +#define mmPCIE_AUX_APP_XFER_PENDING 0xC07860 + +#define mmPCIE_AUX_APP_REQ_L1 0xC07930 + +#define mmPCIE_AUX_AUX_PM_EN 0xC07934 + +#define mmPCIE_AUX_APPS_PM_XMT_PME 0xC07938 + +#define mmPCIE_AUX_OUTBAND_PWRUP_CMD 0xC07940 + +#define mmPCIE_AUX_PERST 0xC079B8 + +#endif /* ASIC_REG_PCIE_AUX_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/pcie_wrap_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/pcie_wrap_regs.h new file mode 100644 index 000000000..d1e55aace --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/pcie_wrap_regs.h @@ -0,0 +1,306 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_PCIE_WRAP_REGS_H_ +#define ASIC_REG_PCIE_WRAP_REGS_H_ + +/* + ***************************************** + * PCIE_WRAP (Prototype: PCIE_WRAP) + ***************************************** + */ + +#define mmPCIE_WRAP_PHY_RST_N 0xC01300 + +#define mmPCIE_WRAP_OUTSTAND_TRANS 0xC01400 + +#define mmPCIE_WRAP_MASK_REQ 0xC01404 + +#define mmPCIE_WRAP_IND_AWADDR_L 0xC01500 + +#define mmPCIE_WRAP_IND_AWADDR_H 0xC01504 + +#define mmPCIE_WRAP_IND_AWLEN 0xC01508 + +#define mmPCIE_WRAP_IND_AWSIZE 0xC0150C + +#define mmPCIE_WRAP_IND_AWBURST 0xC01510 + +#define mmPCIE_WRAP_IND_AWLOCK 0xC01514 + +#define mmPCIE_WRAP_IND_AWCACHE 0xC01518 + +#define mmPCIE_WRAP_IND_AWPROT 0xC0151C + +#define mmPCIE_WRAP_IND_AWVALID 0xC01520 + +#define mmPCIE_WRAP_IND_WDATA_0 0xC01524 + +#define mmPCIE_WRAP_IND_WDATA_1 0xC01528 + +#define mmPCIE_WRAP_IND_WDATA_2 0xC0152C + +#define mmPCIE_WRAP_IND_WDATA_3 0xC01530 + +#define mmPCIE_WRAP_IND_WSTRB 0xC01544 + +#define mmPCIE_WRAP_IND_WLAST 0xC01548 + +#define mmPCIE_WRAP_IND_WVALID 0xC0154C + +#define mmPCIE_WRAP_IND_BRESP 0xC01550 + +#define mmPCIE_WRAP_IND_BVALID 0xC01554 + +#define mmPCIE_WRAP_IND_ARADDR_0 0xC01558 + +#define mmPCIE_WRAP_IND_ARADDR_1 0xC0155C + +#define mmPCIE_WRAP_IND_ARLEN 0xC01560 + +#define mmPCIE_WRAP_IND_ARSIZE 0xC01564 + +#define mmPCIE_WRAP_IND_ARBURST 0xC01568 + +#define mmPCIE_WRAP_IND_ARLOCK 0xC0156C + +#define mmPCIE_WRAP_IND_ARCACHE 0xC01570 + +#define mmPCIE_WRAP_IND_ARPROT 0xC01574 + +#define mmPCIE_WRAP_IND_ARVALID 0xC01578 + +#define mmPCIE_WRAP_IND_RDATA_0 0xC0157C + +#define mmPCIE_WRAP_IND_RDATA_1 0xC01580 + +#define mmPCIE_WRAP_IND_RDATA_2 0xC01584 + +#define mmPCIE_WRAP_IND_RDATA_3 0xC01588 + +#define mmPCIE_WRAP_IND_RLAST 0xC0159C + +#define mmPCIE_WRAP_IND_RRESP 0xC015A0 + +#define mmPCIE_WRAP_IND_RVALID 0xC015A4 + +#define mmPCIE_WRAP_IND_AWMISC_INFO 0xC015A8 + +#define mmPCIE_WRAP_IND_AWMISC_INFO_HDR_34DW_0 0xC015AC + +#define mmPCIE_WRAP_IND_AWMISC_INFO_HDR_34DW_1 0xC015B0 + +#define mmPCIE_WRAP_IND_AWMISC_INFO_P_TAG 0xC015B4 + +#define mmPCIE_WRAP_IND_AWMISC_INFO_ATU_BYPAS 0xC015B8 + +#define mmPCIE_WRAP_IND_AWMISC_INFO_FUNC_NUM 0xC015BC + +#define mmPCIE_WRAP_IND_AWMISC_INFO_VFUNC_ACT 0xC015C0 + +#define mmPCIE_WRAP_IND_AWMISC_INFO_VFUNC_NUM 0xC015C4 + +#define mmPCIE_WRAP_IND_AWMISC_INFO_TLPPRFX 0xC015C8 + +#define mmPCIE_WRAP_IND_ARMISC_INFO 0xC015CC + +#define mmPCIE_WRAP_IND_ARMISC_INFO_TLPPRFX 0xC015D0 + +#define mmPCIE_WRAP_IND_ARMISC_INFO_ATU_BYP 0xC015D4 + +#define mmPCIE_WRAP_IND_ARMISC_INFO_FUNC_NUM 0xC015D8 + +#define mmPCIE_WRAP_IND_ARMISC_INFO_VFUNC_ACT 0xC015DC + +#define mmPCIE_WRAP_IND_ARMISC_INFO_VFUNC_NUM 0xC015E0 + +#define mmPCIE_WRAP_SLV_AWMISC_INFO 0xC01800 + +#define mmPCIE_WRAP_SLV_AWMISC_INFO_HDR_34DW_0 0xC01804 + +#define mmPCIE_WRAP_SLV_AWMISC_INFO_HDR_34DW_1 0xC01808 + +#define mmPCIE_WRAP_SLV_AWMISC_INFO_P_TAG 0xC0180C + +#define mmPCIE_WRAP_SLV_AWMISC_INFO_ATU_BYPAS 0xC01810 + +#define mmPCIE_WRAP_SLV_AWMISC_INFO_FUNC_NUM 0xC01814 + +#define mmPCIE_WRAP_SLV_AWMISC_INFO_VFUNC_ACT 0xC01818 + +#define mmPCIE_WRAP_SLV_AWMISC_INFO_VFUNC_NUM 0xC0181C + +#define mmPCIE_WRAP_SLV_AWMISC_INFO_TLPPRFX 0xC01820 + +#define mmPCIE_WRAP_SLV_ARMISC_INFO 0xC01824 + +#define mmPCIE_WRAP_SLV_ARMISC_INFO_TLPPRFX 0xC01828 + +#define mmPCIE_WRAP_SLV_ARMISC_INFO_ATU_BYP 0xC0182C + +#define mmPCIE_WRAP_SLV_ARMISC_INFO_FUNC_NUM 0xC01830 + +#define mmPCIE_WRAP_SLV_ARMISC_INFO_VFUNC_ACT 0xC01834 + +#define mmPCIE_WRAP_SLV_ARMISC_INFO_VFUNC_NUM 0xC01838 + +#define mmPCIE_WRAP_MAX_QID 0xC01900 + +#define mmPCIE_WRAP_DB_BASE_ADDR_L_0 0xC01910 + +#define mmPCIE_WRAP_DB_BASE_ADDR_L_1 0xC01914 + +#define mmPCIE_WRAP_DB_BASE_ADDR_L_2 0xC01918 + +#define mmPCIE_WRAP_DB_BASE_ADDR_L_3 0xC0191C + +#define mmPCIE_WRAP_DB_BASE_ADDR_H_0 0xC01920 + +#define mmPCIE_WRAP_DB_BASE_ADDR_H_1 0xC01924 + +#define mmPCIE_WRAP_DB_BASE_ADDR_H_2 0xC01928 + +#define mmPCIE_WRAP_DB_BASE_ADDR_H_3 0xC0192C + +#define mmPCIE_WRAP_DB_MASK 0xC01940 + +#define mmPCIE_WRAP_SQ_BASE_ADDR_H 0xC01A00 + +#define mmPCIE_WRAP_SQ_BASE_ADDR_L 0xC01A04 + +#define mmPCIE_WRAP_SQ_STRIDE_ACCRESS 0xC01A08 + +#define mmPCIE_WRAP_SQ_POP_CMD 0xC01A10 + +#define mmPCIE_WRAP_SQ_POP_DATA 0xC01A14 + +#define mmPCIE_WRAP_DB_INTR_0 0xC01A20 + +#define mmPCIE_WRAP_DB_INTR_1 0xC01A24 + +#define mmPCIE_WRAP_DB_INTR_2 0xC01A28 + +#define mmPCIE_WRAP_DB_INTR_3 0xC01A2C + +#define mmPCIE_WRAP_DB_INTR_4 0xC01A30 + +#define mmPCIE_WRAP_DB_INTR_5 0xC01A34 + +#define mmPCIE_WRAP_DB_INTR_6 0xC01A38 + +#define mmPCIE_WRAP_DB_INTR_7 0xC01A3C + +#define mmPCIE_WRAP_MMU_BYPASS_DMA 0xC01A80 + +#define mmPCIE_WRAP_MMU_BYPASS_NON_DMA 0xC01A84 + +#define mmPCIE_WRAP_ASID_NON_DMA 0xC01A90 + +#define mmPCIE_WRAP_ASID_DMA_0 0xC01AA0 + +#define mmPCIE_WRAP_ASID_DMA_1 0xC01AA4 + +#define mmPCIE_WRAP_ASID_DMA_2 0xC01AA8 + +#define mmPCIE_WRAP_ASID_DMA_3 0xC01AAC + +#define mmPCIE_WRAP_ASID_DMA_4 0xC01AB0 + +#define mmPCIE_WRAP_ASID_DMA_5 0xC01AB4 + +#define mmPCIE_WRAP_ASID_DMA_6 0xC01AB8 + +#define mmPCIE_WRAP_ASID_DMA_7 0xC01ABC + +#define mmPCIE_WRAP_CPU_HOT_RST 0xC01AE0 + +#define mmPCIE_WRAP_AXI_PROT_OVR 0xC01AE4 + +#define mmPCIE_WRAP_CACHE_OVR 0xC01B00 + +#define mmPCIE_WRAP_LOCK_OVR 0xC01B04 + +#define mmPCIE_WRAP_PROT_OVR 0xC01B08 + +#define mmPCIE_WRAP_ARUSER_OVR 0xC01B0C + +#define mmPCIE_WRAP_AWUSER_OVR 0xC01B10 + +#define mmPCIE_WRAP_ARUSER_OVR_EN 0xC01B14 + +#define mmPCIE_WRAP_AWUSER_OVR_EN 0xC01B18 + +#define mmPCIE_WRAP_MAX_OUTSTAND 0xC01B20 + +#define mmPCIE_WRAP_MST_IN 0xC01B24 + +#define mmPCIE_WRAP_RSP_OK 0xC01B28 + +#define mmPCIE_WRAP_LBW_CACHE_OVR 0xC01B40 + +#define mmPCIE_WRAP_LBW_LOCK_OVR 0xC01B44 + +#define mmPCIE_WRAP_LBW_PROT_OVR 0xC01B48 + +#define mmPCIE_WRAP_LBW_ARUSER_OVR 0xC01B4C + +#define mmPCIE_WRAP_LBW_AWUSER_OVR 0xC01B50 + +#define mmPCIE_WRAP_LBW_ARUSER_OVR_EN 0xC01B58 + +#define mmPCIE_WRAP_LBW_AWUSER_OVR_EN 0xC01B5C + +#define mmPCIE_WRAP_LBW_MAX_OUTSTAND 0xC01B60 + +#define mmPCIE_WRAP_LBW_MST_IN 0xC01B64 + +#define mmPCIE_WRAP_LBW_RSP_OK 0xC01B68 + +#define mmPCIE_WRAP_QUEUE_INIT 0xC01C00 + +#define mmPCIE_WRAP_AXI_SPLIT_INTR_0 0xC01C10 + +#define mmPCIE_WRAP_AXI_SPLIT_INTR_1 0xC01C14 + +#define mmPCIE_WRAP_DB_AWUSER 0xC01D00 + +#define mmPCIE_WRAP_DB_ARUSER 0xC01D04 + +#define mmPCIE_WRAP_PCIE_AWUSER 0xC01D08 + +#define mmPCIE_WRAP_PCIE_ARUSER 0xC01D0C + +#define mmPCIE_WRAP_PSOC_AWUSER 0xC01D10 + +#define mmPCIE_WRAP_PSOC_ARUSER 0xC01D14 + +#define mmPCIE_WRAP_SCH_Q_AWUSER 0xC01D18 + +#define mmPCIE_WRAP_SCH_Q_ARUSER 0xC01D1C + +#define mmPCIE_WRAP_PSOC2PCI_AWUSER 0xC01D40 + +#define mmPCIE_WRAP_PSOC2PCI_ARUSER 0xC01D44 + +#define mmPCIE_WRAP_DRAIN_TIMEOUT 0xC01D50 + +#define mmPCIE_WRAP_DRAIN_CFG 0xC01D54 + +#define mmPCIE_WRAP_DB_AXI_ERR 0xC01DE0 + +#define mmPCIE_WRAP_SPMU_INTR 0xC01DE4 + +#define mmPCIE_WRAP_AXI_INTR 0xC01DE8 + +#define mmPCIE_WRAP_E2E_CTRL 0xC01DF0 + +#endif /* ASIC_REG_PCIE_WRAP_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h new file mode 100644 index 000000000..9271ea95e --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_PSOC_EMMC_PLL_REGS_H_ +#define ASIC_REG_PSOC_EMMC_PLL_REGS_H_ + +/* + ***************************************** + * PSOC_EMMC_PLL (Prototype: PLL) + ***************************************** + */ + +#define mmPSOC_EMMC_PLL_NR 0xC70100 + +#define mmPSOC_EMMC_PLL_NF 0xC70104 + +#define mmPSOC_EMMC_PLL_OD 0xC70108 + +#define mmPSOC_EMMC_PLL_NB 0xC7010C + +#define mmPSOC_EMMC_PLL_CFG 0xC70110 + +#define mmPSOC_EMMC_PLL_LOSE_MASK 0xC70120 + +#define mmPSOC_EMMC_PLL_LOCK_INTR 0xC70128 + +#define mmPSOC_EMMC_PLL_LOCK_BYPASS 0xC7012C + +#define mmPSOC_EMMC_PLL_DATA_CHNG 0xC70130 + +#define mmPSOC_EMMC_PLL_RST 0xC70134 + +#define mmPSOC_EMMC_PLL_SLIP_WD_CNTR 0xC70150 + +#define mmPSOC_EMMC_PLL_DIV_FACTOR_0 0xC70200 + +#define mmPSOC_EMMC_PLL_DIV_FACTOR_1 0xC70204 + +#define mmPSOC_EMMC_PLL_DIV_FACTOR_2 0xC70208 + +#define mmPSOC_EMMC_PLL_DIV_FACTOR_3 0xC7020C + +#define mmPSOC_EMMC_PLL_DIV_FACTOR_CMD_0 0xC70220 + +#define mmPSOC_EMMC_PLL_DIV_FACTOR_CMD_1 0xC70224 + +#define mmPSOC_EMMC_PLL_DIV_FACTOR_CMD_2 0xC70228 + +#define mmPSOC_EMMC_PLL_DIV_FACTOR_CMD_3 0xC7022C + +#define mmPSOC_EMMC_PLL_DIV_SEL_0 0xC70280 + +#define mmPSOC_EMMC_PLL_DIV_SEL_1 0xC70284 + +#define mmPSOC_EMMC_PLL_DIV_SEL_2 0xC70288 + +#define mmPSOC_EMMC_PLL_DIV_SEL_3 0xC7028C + +#define mmPSOC_EMMC_PLL_DIV_EN_0 0xC702A0 + +#define mmPSOC_EMMC_PLL_DIV_EN_1 0xC702A4 + +#define mmPSOC_EMMC_PLL_DIV_EN_2 0xC702A8 + +#define mmPSOC_EMMC_PLL_DIV_EN_3 0xC702AC + +#define mmPSOC_EMMC_PLL_DIV_FACTOR_BUSY_0 0xC702C0 + +#define mmPSOC_EMMC_PLL_DIV_FACTOR_BUSY_1 0xC702C4 + +#define mmPSOC_EMMC_PLL_DIV_FACTOR_BUSY_2 0xC702C8 + +#define mmPSOC_EMMC_PLL_DIV_FACTOR_BUSY_3 0xC702CC + +#define mmPSOC_EMMC_PLL_CLK_GATER 0xC70300 + +#define mmPSOC_EMMC_PLL_CLK_RLX_0 0xC70310 + +#define mmPSOC_EMMC_PLL_CLK_RLX_1 0xC70314 + +#define mmPSOC_EMMC_PLL_CLK_RLX_2 0xC70318 + +#define mmPSOC_EMMC_PLL_CLK_RLX_3 0xC7031C + +#define mmPSOC_EMMC_PLL_REF_CNTR_PERIOD 0xC70400 + +#define mmPSOC_EMMC_PLL_REF_LOW_THRESHOLD 0xC70410 + +#define mmPSOC_EMMC_PLL_REF_HIGH_THRESHOLD 0xC70420 + +#define mmPSOC_EMMC_PLL_PLL_NOT_STABLE 0xC70430 + +#define mmPSOC_EMMC_PLL_FREQ_CALC_EN 0xC70440 + +#endif /* ASIC_REG_PSOC_EMMC_PLL_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_etr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_etr_regs.h new file mode 100644 index 000000000..b7c33e025 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_etr_regs.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_PSOC_ETR_REGS_H_ +#define ASIC_REG_PSOC_ETR_REGS_H_ + +/* + ***************************************** + * PSOC_ETR (Prototype: ETR) + ***************************************** + */ + +#define mmPSOC_ETR_RSZ 0x2C43004 + +#define mmPSOC_ETR_STS 0x2C4300C + +#define mmPSOC_ETR_RRD 0x2C43010 + +#define mmPSOC_ETR_RRP 0x2C43014 + +#define mmPSOC_ETR_RWP 0x2C43018 + +#define mmPSOC_ETR_TRG 0x2C4301C + +#define mmPSOC_ETR_CTL 0x2C43020 + +#define mmPSOC_ETR_RWD 0x2C43024 + +#define mmPSOC_ETR_MODE 0x2C43028 + +#define mmPSOC_ETR_LBUFLEVEL 0x2C4302C + +#define mmPSOC_ETR_CBUFLEVEL 0x2C43030 + +#define mmPSOC_ETR_BUFWM 0x2C43034 + +#define mmPSOC_ETR_RRPHI 0x2C43038 + +#define mmPSOC_ETR_RWPHI 0x2C4303C + +#define mmPSOC_ETR_AXICTL 0x2C43110 + +#define mmPSOC_ETR_DBALO 0x2C43118 + +#define mmPSOC_ETR_DBAHI 0x2C4311C + +#define mmPSOC_ETR_FFSR 0x2C43300 + +#define mmPSOC_ETR_FFCR 0x2C43304 + +#define mmPSOC_ETR_PSCR 0x2C43308 + +#define mmPSOC_ETR_ITMISCOP0 0x2C43EE0 + +#define mmPSOC_ETR_ITTRFLIN 0x2C43EE8 + +#define mmPSOC_ETR_ITATBDATA0 0x2C43EEC + +#define mmPSOC_ETR_ITATBCTR2 0x2C43EF0 + +#define mmPSOC_ETR_ITATBCTR1 0x2C43EF4 + +#define mmPSOC_ETR_ITATBCTR0 0x2C43EF8 + +#define mmPSOC_ETR_ITCTRL 0x2C43F00 + +#define mmPSOC_ETR_CLAIMSET 0x2C43FA0 + +#define mmPSOC_ETR_CLAIMCLR 0x2C43FA4 + +#define mmPSOC_ETR_LAR 0x2C43FB0 + +#define mmPSOC_ETR_LSR 0x2C43FB4 + +#define mmPSOC_ETR_AUTHSTATUS 0x2C43FB8 + +#define mmPSOC_ETR_DEVID 0x2C43FC8 + +#define mmPSOC_ETR_DEVTYPE 0x2C43FCC + +#define mmPSOC_ETR_PERIPHID4 0x2C43FD0 + +#define mmPSOC_ETR_PERIPHID5 0x2C43FD4 + +#define mmPSOC_ETR_PERIPHID6 0x2C43FD8 + +#define mmPSOC_ETR_PERIPHID7 0x2C43FDC + +#define mmPSOC_ETR_PERIPHID0 0x2C43FE0 + +#define mmPSOC_ETR_PERIPHID1 0x2C43FE4 + +#define mmPSOC_ETR_PERIPHID2 0x2C43FE8 + +#define mmPSOC_ETR_PERIPHID3 0x2C43FEC + +#define mmPSOC_ETR_COMPID0 0x2C43FF0 + +#define mmPSOC_ETR_COMPID1 0x2C43FF4 + +#define mmPSOC_ETR_COMPID2 0x2C43FF8 + +#define mmPSOC_ETR_COMPID3 0x2C43FFC + +#endif /* ASIC_REG_PSOC_ETR_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h new file mode 100644 index 000000000..324266653 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h @@ -0,0 +1,446 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_PSOC_GLOBAL_CONF_MASKS_H_ +#define ASIC_REG_PSOC_GLOBAL_CONF_MASKS_H_ + +/* + ***************************************** + * PSOC_GLOBAL_CONF (Prototype: GLOBAL_CONF) + ***************************************** + */ + +/* PSOC_GLOBAL_CONF_NON_RST_FLOPS */ +#define PSOC_GLOBAL_CONF_NON_RST_FLOPS_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_NON_RST_FLOPS_VAL_MASK 0xFFFFFFFF + +/* PSOC_GLOBAL_CONF_PCI_FW_FSM */ +#define PSOC_GLOBAL_CONF_PCI_FW_FSM_EN_SHIFT 0 +#define PSOC_GLOBAL_CONF_PCI_FW_FSM_EN_MASK 0x1 + +/* PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START */ +#define PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_IND_SHIFT 0 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_IND_MASK 0x1 + +/* PSOC_GLOBAL_CONF_BTM_FSM */ +#define PSOC_GLOBAL_CONF_BTM_FSM_STATE_SHIFT 0 +#define PSOC_GLOBAL_CONF_BTM_FSM_STATE_MASK 0xF + +/* PSOC_GLOBAL_CONF_SW_BTM_FSM */ +#define PSOC_GLOBAL_CONF_SW_BTM_FSM_CTRL_SHIFT 0 +#define PSOC_GLOBAL_CONF_SW_BTM_FSM_CTRL_MASK 0xF + +/* PSOC_GLOBAL_CONF_SW_BOOT_SEQ_FSM */ +#define PSOC_GLOBAL_CONF_SW_BOOT_SEQ_FSM_CTRL_SHIFT 0 +#define PSOC_GLOBAL_CONF_SW_BOOT_SEQ_FSM_CTRL_MASK 0xF + +/* PSOC_GLOBAL_CONF_BOOT_SEQ_TIMEOUT */ +#define PSOC_GLOBAL_CONF_BOOT_SEQ_TIMEOUT_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_TIMEOUT_VAL_MASK 0xFFFFFFFF + +/* PSOC_GLOBAL_CONF_SPI_MEM_EN */ +#define PSOC_GLOBAL_CONF_SPI_MEM_EN_IND_SHIFT 0 +#define PSOC_GLOBAL_CONF_SPI_MEM_EN_IND_MASK 0x1 + +/* PSOC_GLOBAL_CONF_PRSTN */ +#define PSOC_GLOBAL_CONF_PRSTN_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_PRSTN_VAL_MASK 0x1 + +/* PSOC_GLOBAL_CONF_PCIE_EN */ +#define PSOC_GLOBAL_CONF_PCIE_EN_MASK_SHIFT 0 +#define PSOC_GLOBAL_CONF_PCIE_EN_MASK_MASK 0x1 + +/* PSOC_GLOBAL_CONF_SPI_IMG_STS */ +#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PRI_SHIFT 0 +#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PRI_MASK 0x1 +#define PSOC_GLOBAL_CONF_SPI_IMG_STS_SEC_SHIFT 1 +#define PSOC_GLOBAL_CONF_SPI_IMG_STS_SEC_MASK 0x2 +#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PRSTN_SHIFT 2 +#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PRSTN_MASK 0x4 +#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PCI_SHIFT 3 +#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PCI_MASK 0x8 + +/* PSOC_GLOBAL_CONF_BOOT_SEQ_FSM */ +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_IDLE_SHIFT 0 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_IDLE_MASK 0x1 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_BOOT_INIT_SHIFT 1 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_BOOT_INIT_MASK 0x2 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PRI_SHIFT 2 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PRI_MASK 0x4 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_SEC_SHIFT 3 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_SEC_MASK 0x8 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PRSTN_SHIFT 4 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PRSTN_MASK 0x10 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PCIE_SHIFT 5 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PCIE_MASK 0x20 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_ROM_SHIFT 6 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_ROM_MASK 0x40 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_PCLK_READY_SHIFT 7 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_PCLK_READY_MASK 0x80 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_LTSSM_EN_SHIFT 8 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_LTSSM_EN_MASK 0x100 + +/* PSOC_GLOBAL_CONF_SCRATCHPAD */ +#define PSOC_GLOBAL_CONF_SCRATCHPAD_REG_SHIFT 0 +#define PSOC_GLOBAL_CONF_SCRATCHPAD_REG_MASK 0xFFFFFFFF + +/* PSOC_GLOBAL_CONF_SEMAPHORE */ +#define PSOC_GLOBAL_CONF_SEMAPHORE_REG_SHIFT 0 +#define PSOC_GLOBAL_CONF_SEMAPHORE_REG_MASK 0xFFFFFFFF + +/* PSOC_GLOBAL_CONF_WARM_REBOOT */ +#define PSOC_GLOBAL_CONF_WARM_REBOOT_CNTR_SHIFT 0 +#define PSOC_GLOBAL_CONF_WARM_REBOOT_CNTR_MASK 0xFFFFFFFF + +/* PSOC_GLOBAL_CONF_UBOOT_MAGIC */ +#define PSOC_GLOBAL_CONF_UBOOT_MAGIC_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_UBOOT_MAGIC_VAL_MASK 0xFFFFFFFF + +/* PSOC_GLOBAL_CONF_SPL_SOURCE */ +#define PSOC_GLOBAL_CONF_SPL_SOURCE_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_SPL_SOURCE_VAL_MASK 0x7 + +/* PSOC_GLOBAL_CONF_I2C_MSTR1_DBG */ +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_S_GEN_SHIFT 0 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_S_GEN_MASK 0x1 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_P_GEN_SHIFT 1 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_P_GEN_MASK 0x2 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_DATA_SHIFT 2 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_DATA_MASK 0x4 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_ADDR_SHIFT 3 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_ADDR_MASK 0x8 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_RD_SHIFT 4 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_RD_MASK 0x10 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_WR_SHIFT 5 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_WR_MASK 0x20 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_HS_SHIFT 6 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_HS_MASK 0x40 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_MASTER_ACT_SHIFT 7 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_MASTER_ACT_MASK 0x80 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_SLAVE_ACT_SHIFT 8 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_SLAVE_ACT_MASK 0x100 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_ADDR_10BIT_SHIFT 9 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_ADDR_10BIT_MASK 0x200 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_MST_CSTATE_SHIFT 10 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_MST_CSTATE_MASK 0x7C00 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_SLV_CSTATE_SHIFT 15 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_SLV_CSTATE_MASK 0x78000 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_IC_EN_SHIFT 19 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_IC_EN_MASK 0x80000 + +/* PSOC_GLOBAL_CONF_I2C_SLV */ +#define PSOC_GLOBAL_CONF_I2C_SLV_CPU_CTRL_SHIFT 0 +#define PSOC_GLOBAL_CONF_I2C_SLV_CPU_CTRL_MASK 0x1 + +/* PSOC_GLOBAL_CONF_I2C_SLV_INTR_MASK */ +#define PSOC_GLOBAL_CONF_I2C_SLV_INTR_MASK_FLD_INT_SHIFT 0 +#define PSOC_GLOBAL_CONF_I2C_SLV_INTR_MASK_FLD_INT_MASK 0x1 + +/* PSOC_GLOBAL_CONF_APP_STATUS */ +#define PSOC_GLOBAL_CONF_APP_STATUS_IND_SHIFT 0 +#define PSOC_GLOBAL_CONF_APP_STATUS_IND_MASK 0xFFFFFFFF + +/* PSOC_GLOBAL_CONF_BTL_STS */ +#define PSOC_GLOBAL_CONF_BTL_STS_DONE_SHIFT 0 +#define PSOC_GLOBAL_CONF_BTL_STS_DONE_MASK 0x1 +#define PSOC_GLOBAL_CONF_BTL_STS_FAIL_SHIFT 4 +#define PSOC_GLOBAL_CONF_BTL_STS_FAIL_MASK 0x10 +#define PSOC_GLOBAL_CONF_BTL_STS_FAIL_CODE_SHIFT 8 +#define PSOC_GLOBAL_CONF_BTL_STS_FAIL_CODE_MASK 0xF00 + +/* PSOC_GLOBAL_CONF_TIMEOUT_INTR */ +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_0_SHIFT 0 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_0_MASK 0x1 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_1_SHIFT 1 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_1_MASK 0x2 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_2_SHIFT 2 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_2_MASK 0x4 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_3_SHIFT 3 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_3_MASK 0x8 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_4_SHIFT 4 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_4_MASK 0x10 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_TIMER_SHIFT 5 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_TIMER_MASK 0x20 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_UART_0_SHIFT 6 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_UART_0_MASK 0x40 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_UART_1_SHIFT 7 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_UART_1_MASK 0x80 + +/* PSOC_GLOBAL_CONF_COMB_TIMEOUT_INTR */ +#define PSOC_GLOBAL_CONF_COMB_TIMEOUT_INTR_IND_SHIFT 0 +#define PSOC_GLOBAL_CONF_COMB_TIMEOUT_INTR_IND_MASK 0x1 + +/* PSOC_GLOBAL_CONF_PERIPH_INTR */ +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_TX_SHIFT 0 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_TX_MASK 0x1 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_RX_SHIFT 1 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_RX_MASK 0x2 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_TXOVR_SHIFT 2 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_TXOVR_MASK 0x4 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_RXOVR_SHIFT 3 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_RXOVR_MASK 0x8 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_TX_SHIFT 4 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_TX_MASK 0x10 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_RX_SHIFT 5 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_RX_MASK 0x20 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_TXOVR_SHIFT 6 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_TXOVR_MASK 0x40 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_RXOVR_SHIFT 7 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_RXOVR_MASK 0x80 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_EMMC_SHIFT 12 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_EMMC_MASK 0x1000 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_EMMC_WAKEUP_SHIFT 13 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_EMMC_WAKEUP_MASK 0x2000 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_MII_SHIFT 16 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_MII_MASK 0x10000 + +/* PSOC_GLOBAL_CONF_COMB_PERIPH_INTR */ +#define PSOC_GLOBAL_CONF_COMB_PERIPH_INTR_IND_SHIFT 0 +#define PSOC_GLOBAL_CONF_COMB_PERIPH_INTR_IND_MASK 0x1 + +/* PSOC_GLOBAL_CONF_AXI_ERR_INTR */ +#define PSOC_GLOBAL_CONF_AXI_ERR_INTR_IND_SHIFT 0 +#define PSOC_GLOBAL_CONF_AXI_ERR_INTR_IND_MASK 0x1 + +/* PSOC_GLOBAL_CONF_TARGETID */ +#define PSOC_GLOBAL_CONF_TARGETID_TDESIGNER_SHIFT 1 +#define PSOC_GLOBAL_CONF_TARGETID_TDESIGNER_MASK 0xFFE +#define PSOC_GLOBAL_CONF_TARGETID_TPARTNO_SHIFT 12 +#define PSOC_GLOBAL_CONF_TARGETID_TPARTNO_MASK 0xFFFF000 +#define PSOC_GLOBAL_CONF_TARGETID_TREVISION_SHIFT 28 +#define PSOC_GLOBAL_CONF_TARGETID_TREVISION_MASK 0xF0000000 + +/* PSOC_GLOBAL_CONF_EMMC_INT_VOL_STABLE */ +#define PSOC_GLOBAL_CONF_EMMC_INT_VOL_STABLE_IND_SHIFT 0 +#define PSOC_GLOBAL_CONF_EMMC_INT_VOL_STABLE_IND_MASK 0x1 + +/* PSOC_GLOBAL_CONF_MII_ADDR */ +#define PSOC_GLOBAL_CONF_MII_ADDR_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_MII_ADDR_VAL_MASK 0xFF + +/* PSOC_GLOBAL_CONF_MII_SPEED */ +#define PSOC_GLOBAL_CONF_MII_SPEED_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_MII_SPEED_VAL_MASK 0x3 + +/* PSOC_GLOBAL_CONF_BOOT_STRAP_PINS */ +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_CPOL_SHIFT 0 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_CPOL_MASK 0x1 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_CPHA_SHIFT 1 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_CPHA_MASK 0x2 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_BTL_EN_SHIFT 2 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_BTL_EN_MASK 0x4 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_BTL_ROM_EN_SHIFT 3 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_BTL_ROM_EN_MASK 0x8 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_PCIE_EN_SHIFT 4 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_PCIE_EN_MASK 0x10 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_I2C_SLV_ADDR_SHIFT 5 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_I2C_SLV_ADDR_MASK 0xFE0 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_BOOT_STG2_SRC_SHIFT 12 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_BOOT_STG2_SRC_MASK 0x3000 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_PLL_BPS_SHIFT 14 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_PLL_BPS_MASK 0x1FC000 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SRIOV_EN_SHIFT 21 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SRIOV_EN_MASK 0x200000 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_PLL_CFG_SHIFT 22 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_PLL_CFG_MASK 0x1C00000 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_MEM_REPAIR_BPS_SHIFT 25 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_MEM_REPAIR_BPS_MASK 0x2000000 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SPARE_SHIFT 26 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SPARE_MASK 0x1C000000 + +/* PSOC_GLOBAL_CONF_MEM_REPAIR_CTRL */ +#define PSOC_GLOBAL_CONF_MEM_REPAIR_CTRL_SET_SHIFT 0 +#define PSOC_GLOBAL_CONF_MEM_REPAIR_CTRL_SET_MASK 0x1 +#define PSOC_GLOBAL_CONF_MEM_REPAIR_CTRL_CLR_SHIFT 1 +#define PSOC_GLOBAL_CONF_MEM_REPAIR_CTRL_CLR_MASK 0x2 + +/* PSOC_GLOBAL_CONF_MEM_REPAIR_STS */ +#define PSOC_GLOBAL_CONF_MEM_REPAIR_STS_IND_SHIFT 0 +#define PSOC_GLOBAL_CONF_MEM_REPAIR_STS_IND_MASK 0x1 + +/* PSOC_GLOBAL_CONF_OUTSTANT_TRANS */ +#define PSOC_GLOBAL_CONF_OUTSTANT_TRANS_RD_SHIFT 0 +#define PSOC_GLOBAL_CONF_OUTSTANT_TRANS_RD_MASK 0x1 +#define PSOC_GLOBAL_CONF_OUTSTANT_TRANS_WR_SHIFT 1 +#define PSOC_GLOBAL_CONF_OUTSTANT_TRANS_WR_MASK 0x2 + +/* PSOC_GLOBAL_CONF_MASK_REQ */ +#define PSOC_GLOBAL_CONF_MASK_REQ_IND_SHIFT 0 +#define PSOC_GLOBAL_CONF_MASK_REQ_IND_MASK 0x1 + +/* PSOC_GLOBAL_CONF_PRSTN_RST_CFG */ +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_PCI_SHIFT 0 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_PCI_MASK 0x1 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_PCI_IF_SHIFT 1 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_PCI_IF_MASK 0x2 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_PLL_SHIFT 2 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_PLL_MASK 0x1FC +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_TPC_SHIFT 9 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_TPC_MASK 0x200 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_MME_SHIFT 10 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_MME_MASK 0x400 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_MC_SHIFT 11 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_MC_MASK 0x800 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_CPU_SHIFT 12 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_CPU_MASK 0x1000 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_IC_IF_SHIFT 13 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_IC_IF_MASK 0x2000 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_PSOC_SHIFT 14 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_PSOC_MASK 0x4000 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_SRAM_SHIFT 15 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_SRAM_MASK 0x1F8000 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_DMA_SHIFT 21 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_DMA_MASK 0x200000 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_DMA_IF_SHIFT 22 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_DMA_IF_MASK 0x400000 + +/* PSOC_GLOBAL_CONF_SW_ALL_RST_CFG */ +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_PCI_SHIFT 0 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_PCI_MASK 0x1 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_PCI_IF_SHIFT 1 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_PCI_IF_MASK 0x2 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_PLL_SHIFT 2 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_PLL_MASK 0x1FC +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_TPC_SHIFT 9 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_TPC_MASK 0x200 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_MME_SHIFT 10 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_MME_MASK 0x400 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_MC_SHIFT 11 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_MC_MASK 0x800 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_CPU_SHIFT 12 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_CPU_MASK 0x1000 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_IC_IF_SHIFT 13 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_IC_IF_MASK 0x2000 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_PSOC_SHIFT 14 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_PSOC_MASK 0x4000 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_SRAM_SHIFT 15 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_SRAM_MASK 0x1F8000 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_DMA_SHIFT 21 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_DMA_MASK 0x200000 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_DMA_IF_SHIFT 22 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_DMA_IF_MASK 0x400000 + +/* PSOC_GLOBAL_CONF_WD_RST_CFG */ +#define PSOC_GLOBAL_CONF_WD_RST_CFG_PCI_SHIFT 0 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_PCI_MASK 0x1 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_PCI_IF_SHIFT 1 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_PCI_IF_MASK 0x2 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_PLL_SHIFT 2 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_PLL_MASK 0x1FC +#define PSOC_GLOBAL_CONF_WD_RST_CFG_TPC_SHIFT 9 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_TPC_MASK 0x200 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_MME_SHIFT 10 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_MME_MASK 0x400 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_MC_SHIFT 11 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_MC_MASK 0x800 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_CPU_SHIFT 12 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_CPU_MASK 0x1000 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_IC_IF_SHIFT 13 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_IC_IF_MASK 0x2000 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_PSOC_SHIFT 14 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_PSOC_MASK 0x4000 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_SRAM_SHIFT 15 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_SRAM_MASK 0x1F8000 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_DMA_SHIFT 21 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_DMA_MASK 0x200000 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_DMA_IF_SHIFT 22 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_DMA_IF_MASK 0x400000 + +/* PSOC_GLOBAL_CONF_MNL_RST_CFG */ +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_PCI_SHIFT 0 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_PCI_MASK 0x1 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_PCI_IF_SHIFT 1 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_PCI_IF_MASK 0x2 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_PLL_SHIFT 2 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_PLL_MASK 0x1FC +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_TPC_SHIFT 9 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_TPC_MASK 0x200 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_MME_SHIFT 10 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_MME_MASK 0x400 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_MC_SHIFT 11 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_MC_MASK 0x800 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_CPU_SHIFT 12 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_CPU_MASK 0x1000 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_IC_IF_SHIFT 13 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_IC_IF_MASK 0x2000 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_PSOC_SHIFT 14 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_PSOC_MASK 0x4000 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_SRAM_SHIFT 15 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_SRAM_MASK 0x1F8000 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_DMA_SHIFT 21 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_DMA_MASK 0x200000 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_DMA_IF_SHIFT 22 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_DMA_IF_MASK 0x400000 + +/* PSOC_GLOBAL_CONF_UNIT_RST_N */ +#define PSOC_GLOBAL_CONF_UNIT_RST_N_PCI_SHIFT 0 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_PCI_MASK 0x1 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_PCI_IF_SHIFT 1 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_PCI_IF_MASK 0x2 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_PLL_SHIFT 2 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_PLL_MASK 0x1FC +#define PSOC_GLOBAL_CONF_UNIT_RST_N_TPC_SHIFT 9 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_TPC_MASK 0x200 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_MME_SHIFT 10 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_MME_MASK 0x400 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_MC_SHIFT 11 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_MC_MASK 0x800 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_CPU_SHIFT 12 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_CPU_MASK 0x1000 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_IC_IF_SHIFT 13 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_IC_IF_MASK 0x2000 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_PSOC_SHIFT 14 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_PSOC_MASK 0x4000 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_SRAM_SHIFT 15 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_SRAM_MASK 0x1F8000 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_DMA_SHIFT 21 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_DMA_MASK 0x200000 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_DMA_IF_SHIFT 22 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_DMA_IF_MASK 0x400000 + +/* PSOC_GLOBAL_CONF_PRSTN_MASK */ +#define PSOC_GLOBAL_CONF_PRSTN_MASK_IND_SHIFT 0 +#define PSOC_GLOBAL_CONF_PRSTN_MASK_IND_MASK 0x1 + +/* PSOC_GLOBAL_CONF_WD_MASK */ +#define PSOC_GLOBAL_CONF_WD_MASK_IND_SHIFT 0 +#define PSOC_GLOBAL_CONF_WD_MASK_IND_MASK 0x1 + +/* PSOC_GLOBAL_CONF_RST_SRC */ +#define PSOC_GLOBAL_CONF_RST_SRC_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_RST_SRC_VAL_MASK 0xF + +/* PSOC_GLOBAL_CONF_PAD_1V8_CFG */ +#define PSOC_GLOBAL_CONF_PAD_1V8_CFG_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_PAD_1V8_CFG_VAL_MASK 0x7F + +/* PSOC_GLOBAL_CONF_PAD_3V3_CFG */ +#define PSOC_GLOBAL_CONF_PAD_3V3_CFG_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_PAD_3V3_CFG_VAL_MASK 0x7F + +/* PSOC_GLOBAL_CONF_PAD_1V8_INPUT */ +#define PSOC_GLOBAL_CONF_PAD_1V8_INPUT_CFG_SHIFT 0 +#define PSOC_GLOBAL_CONF_PAD_1V8_INPUT_CFG_MASK 0x7 + +/* PSOC_GLOBAL_CONF_BNK3V3_MS */ +#define PSOC_GLOBAL_CONF_BNK3V3_MS_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_BNK3V3_MS_VAL_MASK 0x3 + +/* PSOC_GLOBAL_CONF_PAD_DEFAULT */ +#define PSOC_GLOBAL_CONF_PAD_DEFAULT_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_PAD_DEFAULT_VAL_MASK 0xF + +/* PSOC_GLOBAL_CONF_PAD_SEL */ +#define PSOC_GLOBAL_CONF_PAD_SEL_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_PAD_SEL_VAL_MASK 0x3 + +#endif /* ASIC_REG_PSOC_GLOBAL_CONF_MASKS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h new file mode 100644 index 000000000..8141f422e --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h @@ -0,0 +1,744 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_PSOC_GLOBAL_CONF_REGS_H_ +#define ASIC_REG_PSOC_GLOBAL_CONF_REGS_H_ + +/* + ***************************************** + * PSOC_GLOBAL_CONF (Prototype: GLOBAL_CONF) + ***************************************** + */ + +#define mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_0 0xC4B000 + +#define mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_1 0xC4B004 + +#define mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_2 0xC4B008 + +#define mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_3 0xC4B00C + +#define mmPSOC_GLOBAL_CONF_PCI_FW_FSM 0xC4B020 + +#define mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START 0xC4B024 + +#define mmPSOC_GLOBAL_CONF_BTM_FSM 0xC4B028 + +#define mmPSOC_GLOBAL_CONF_SW_BTM_FSM 0xC4B030 + +#define mmPSOC_GLOBAL_CONF_SW_BOOT_SEQ_FSM 0xC4B034 + +#define mmPSOC_GLOBAL_CONF_BOOT_SEQ_TIMEOUT 0xC4B038 + +#define mmPSOC_GLOBAL_CONF_SPI_MEM_EN 0xC4B040 + +#define mmPSOC_GLOBAL_CONF_PRSTN 0xC4B044 + +#define mmPSOC_GLOBAL_CONF_PCIE_EN 0xC4B048 + +#define mmPSOC_GLOBAL_CONF_SPI_IMG_STS 0xC4B050 + +#define mmPSOC_GLOBAL_CONF_BOOT_SEQ_FSM 0xC4B054 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_0 0xC4B100 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_1 0xC4B104 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_2 0xC4B108 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_3 0xC4B10C + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_4 0xC4B110 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_5 0xC4B114 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_6 0xC4B118 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_7 0xC4B11C + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_8 0xC4B120 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_9 0xC4B124 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_10 0xC4B128 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_11 0xC4B12C + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_12 0xC4B130 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_13 0xC4B134 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_14 0xC4B138 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_15 0xC4B13C + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_16 0xC4B140 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_17 0xC4B144 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_18 0xC4B148 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_19 0xC4B14C + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_20 0xC4B150 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_21 0xC4B154 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_22 0xC4B158 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_23 0xC4B15C + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_24 0xC4B160 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_25 0xC4B164 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_26 0xC4B168 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_27 0xC4B16C + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_28 0xC4B170 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_29 0xC4B174 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_30 0xC4B178 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_31 0xC4B17C + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_0 0xC4B200 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_1 0xC4B204 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_2 0xC4B208 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_3 0xC4B20C + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_4 0xC4B210 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_5 0xC4B214 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_6 0xC4B218 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_7 0xC4B21C + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_8 0xC4B220 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_9 0xC4B224 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_10 0xC4B228 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_11 0xC4B22C + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_12 0xC4B230 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_13 0xC4B234 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_14 0xC4B238 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_15 0xC4B23C + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_16 0xC4B240 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_17 0xC4B244 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_18 0xC4B248 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_19 0xC4B24C + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_20 0xC4B250 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_21 0xC4B254 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_22 0xC4B258 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_23 0xC4B25C + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_24 0xC4B260 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_25 0xC4B264 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_26 0xC4B268 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_27 0xC4B26C + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_28 0xC4B270 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_29 0xC4B274 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_30 0xC4B278 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_31 0xC4B27C + +#define mmPSOC_GLOBAL_CONF_WARM_REBOOT 0xC4B300 + +#define mmPSOC_GLOBAL_CONF_UBOOT_MAGIC 0xC4B304 + +#define mmPSOC_GLOBAL_CONF_SPL_SOURCE 0xC4B308 + +#define mmPSOC_GLOBAL_CONF_I2C_MSTR1_DBG 0xC4B30C + +#define mmPSOC_GLOBAL_CONF_I2C_SLV 0xC4B310 + +#define mmPSOC_GLOBAL_CONF_I2C_SLV_INTR_MASK 0xC4B314 + +#define mmPSOC_GLOBAL_CONF_APP_STATUS 0xC4B320 + +#define mmPSOC_GLOBAL_CONF_BTL_STS 0xC4B340 + +#define mmPSOC_GLOBAL_CONF_TIMEOUT_INTR 0xC4B350 + +#define mmPSOC_GLOBAL_CONF_COMB_TIMEOUT_INTR 0xC4B354 + +#define mmPSOC_GLOBAL_CONF_PERIPH_INTR 0xC4B358 + +#define mmPSOC_GLOBAL_CONF_COMB_PERIPH_INTR 0xC4B35C + +#define mmPSOC_GLOBAL_CONF_AXI_ERR_INTR 0xC4B360 + +#define mmPSOC_GLOBAL_CONF_TARGETID 0xC4B400 + +#define mmPSOC_GLOBAL_CONF_EMMC_INT_VOL_STABLE 0xC4B420 + +#define mmPSOC_GLOBAL_CONF_MII_ADDR 0xC4B424 + +#define mmPSOC_GLOBAL_CONF_MII_SPEED 0xC4B428 + +#define mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS 0xC4B430 + +#define mmPSOC_GLOBAL_CONF_MEM_REPAIR_CTRL 0xC4B450 + +#define mmPSOC_GLOBAL_CONF_MEM_REPAIR_STS 0xC4B454 + +#define mmPSOC_GLOBAL_CONF_OUTSTANT_TRANS 0xC4B458 + +#define mmPSOC_GLOBAL_CONF_MASK_REQ 0xC4B45C + +#define mmPSOC_GLOBAL_CONF_PRSTN_RST_CFG 0xC4B470 + +#define mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG 0xC4B474 + +#define mmPSOC_GLOBAL_CONF_WD_RST_CFG 0xC4B478 + +#define mmPSOC_GLOBAL_CONF_MNL_RST_CFG 0xC4B47C + +#define mmPSOC_GLOBAL_CONF_UNIT_RST_N 0xC4B480 + +#define mmPSOC_GLOBAL_CONF_PRSTN_MASK 0xC4B484 + +#define mmPSOC_GLOBAL_CONF_WD_MASK 0xC4B488 + +#define mmPSOC_GLOBAL_CONF_RST_SRC 0xC4B490 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_0 0xC4B500 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_1 0xC4B504 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_2 0xC4B508 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_3 0xC4B50C + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_4 0xC4B510 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_5 0xC4B514 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_6 0xC4B518 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_7 0xC4B51C + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_8 0xC4B520 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_9 0xC4B524 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_10 0xC4B528 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_11 0xC4B52C + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_12 0xC4B530 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_13 0xC4B534 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_14 0xC4B538 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_15 0xC4B53C + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_16 0xC4B540 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_17 0xC4B544 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_18 0xC4B548 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_19 0xC4B54C + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_20 0xC4B550 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_21 0xC4B554 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_22 0xC4B558 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_23 0xC4B55C + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_24 0xC4B560 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_25 0xC4B564 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_26 0xC4B568 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_27 0xC4B56C + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_28 0xC4B570 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_29 0xC4B574 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_30 0xC4B578 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_31 0xC4B57C + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_32 0xC4B580 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_33 0xC4B584 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_34 0xC4B588 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_35 0xC4B58C + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_36 0xC4B590 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_37 0xC4B594 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_38 0xC4B598 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_39 0xC4B59C + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_40 0xC4B5A0 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_41 0xC4B5A4 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_42 0xC4B5A8 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_43 0xC4B5AC + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_44 0xC4B5B0 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_45 0xC4B5B4 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_46 0xC4B5B8 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_47 0xC4B5BC + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_48 0xC4B5C0 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_49 0xC4B5C4 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_50 0xC4B5C8 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_51 0xC4B5CC + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_52 0xC4B5D0 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_53 0xC4B5D4 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_54 0xC4B5D8 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_55 0xC4B5DC + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_56 0xC4B5E0 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_57 0xC4B5E4 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_58 0xC4B5E8 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_59 0xC4B5EC + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_60 0xC4B5F0 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_61 0xC4B5F4 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_62 0xC4B5F8 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_63 0xC4B5FC + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_64 0xC4B600 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_65 0xC4B604 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_66 0xC4B608 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_67 0xC4B60C + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_68 0xC4B610 + +#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_0 0xC4B640 + +#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_1 0xC4B644 + +#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_2 0xC4B648 + +#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_3 0xC4B64C + +#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_4 0xC4B650 + +#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_5 0xC4B654 + +#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_6 0xC4B658 + +#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_7 0xC4B65C + +#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_8 0xC4B660 + +#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_9 0xC4B664 + +#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_10 0xC4B668 + +#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_11 0xC4B66C + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_0 0xC4B680 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_1 0xC4B684 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_2 0xC4B688 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_3 0xC4B68C + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_4 0xC4B690 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_5 0xC4B694 + +#define mmPSOC_GLOBAL_CONF_BNK3V3_MS 0xC4B6E0 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_0 0xC4B700 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_1 0xC4B704 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_2 0xC4B708 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_3 0xC4B70C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_4 0xC4B710 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_5 0xC4B714 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_6 0xC4B718 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_7 0xC4B71C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_8 0xC4B720 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_9 0xC4B724 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_10 0xC4B728 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_11 0xC4B72C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_12 0xC4B730 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_13 0xC4B734 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_14 0xC4B738 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_15 0xC4B73C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_16 0xC4B740 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_17 0xC4B744 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_18 0xC4B748 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_19 0xC4B74C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_20 0xC4B750 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_21 0xC4B754 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_22 0xC4B758 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_23 0xC4B75C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_24 0xC4B760 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_25 0xC4B764 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_26 0xC4B768 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_27 0xC4B76C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_28 0xC4B770 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_29 0xC4B774 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_30 0xC4B778 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_31 0xC4B77C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_32 0xC4B780 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_33 0xC4B784 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_34 0xC4B788 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_35 0xC4B78C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_36 0xC4B790 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_37 0xC4B794 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_38 0xC4B798 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_39 0xC4B79C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_40 0xC4B7A0 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_41 0xC4B7A4 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_42 0xC4B7A8 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_43 0xC4B7AC + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_44 0xC4B7B0 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_45 0xC4B7B4 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_46 0xC4B7B8 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_47 0xC4B7BC + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_48 0xC4B7C0 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_49 0xC4B7C4 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_50 0xC4B7C8 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_51 0xC4B7CC + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_52 0xC4B7D0 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_53 0xC4B7D4 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_54 0xC4B7D8 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_55 0xC4B7DC + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_56 0xC4B7E0 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_57 0xC4B7E4 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_58 0xC4B7E8 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_59 0xC4B7EC + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_60 0xC4B7F0 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_61 0xC4B7F4 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_62 0xC4B7F8 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_63 0xC4B7FC + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_64 0xC4B800 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_65 0xC4B804 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_66 0xC4B808 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_67 0xC4B80C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_68 0xC4B810 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_69 0xC4B814 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_70 0xC4B818 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_71 0xC4B81C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_72 0xC4B820 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_73 0xC4B824 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_74 0xC4B828 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_75 0xC4B82C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_76 0xC4B830 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_77 0xC4B834 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_78 0xC4B838 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_79 0xC4B83C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_80 0xC4B840 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_81 0xC4B844 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_0 0xC4B900 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_1 0xC4B904 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_2 0xC4B908 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_3 0xC4B90C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_4 0xC4B910 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_5 0xC4B914 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_6 0xC4B918 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_7 0xC4B91C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_8 0xC4B920 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_9 0xC4B924 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_10 0xC4B928 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_11 0xC4B92C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_12 0xC4B930 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_13 0xC4B934 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_14 0xC4B938 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_15 0xC4B93C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_16 0xC4B940 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_17 0xC4B944 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_18 0xC4B948 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_19 0xC4B94C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_20 0xC4B950 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_21 0xC4B954 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_22 0xC4B958 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_23 0xC4B95C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_24 0xC4B960 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_25 0xC4B964 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_26 0xC4B968 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_27 0xC4B96C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_28 0xC4B970 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_29 0xC4B974 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_30 0xC4B978 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_31 0xC4B97C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_32 0xC4B980 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_33 0xC4B984 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_34 0xC4B988 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_35 0xC4B98C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_36 0xC4B990 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_37 0xC4B994 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_38 0xC4B998 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_39 0xC4B99C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_40 0xC4B9A0 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_41 0xC4B9A4 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_42 0xC4B9A8 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_43 0xC4B9AC + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_44 0xC4B9B0 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_45 0xC4B9B4 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_46 0xC4B9B8 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_47 0xC4B9BC + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_48 0xC4B9C0 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_49 0xC4B9C4 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_50 0xC4B9C8 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_51 0xC4B9CC + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_52 0xC4B9D0 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_53 0xC4B9D4 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_54 0xC4B9D8 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_55 0xC4B9DC + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_56 0xC4B9E0 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_57 0xC4B9E4 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_58 0xC4B9E8 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_59 0xC4B9EC + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_60 0xC4B9F0 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_61 0xC4B9F4 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_62 0xC4B9F8 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_63 0xC4B9FC + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_64 0xC4BA00 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_65 0xC4BA04 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_66 0xC4BA08 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_67 0xC4BA0C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_68 0xC4BA10 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_69 0xC4BA14 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_70 0xC4BA18 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_71 0xC4BA1C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_72 0xC4BA20 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_73 0xC4BA24 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_74 0xC4BA28 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_75 0xC4BA2C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_76 0xC4BA30 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_77 0xC4BA34 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_78 0xC4BA38 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_79 0xC4BA3C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_80 0xC4BA40 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_81 0xC4BA44 + +#endif /* ASIC_REG_PSOC_GLOBAL_CONF_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h new file mode 100644 index 000000000..4789ebb9c --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_PSOC_MME_PLL_REGS_H_ +#define ASIC_REG_PSOC_MME_PLL_REGS_H_ + +/* + ***************************************** + * PSOC_MME_PLL (Prototype: PLL) + ***************************************** + */ + +#define mmPSOC_MME_PLL_NR 0xC71100 + +#define mmPSOC_MME_PLL_NF 0xC71104 + +#define mmPSOC_MME_PLL_OD 0xC71108 + +#define mmPSOC_MME_PLL_NB 0xC7110C + +#define mmPSOC_MME_PLL_CFG 0xC71110 + +#define mmPSOC_MME_PLL_LOSE_MASK 0xC71120 + +#define mmPSOC_MME_PLL_LOCK_INTR 0xC71128 + +#define mmPSOC_MME_PLL_LOCK_BYPASS 0xC7112C + +#define mmPSOC_MME_PLL_DATA_CHNG 0xC71130 + +#define mmPSOC_MME_PLL_RST 0xC71134 + +#define mmPSOC_MME_PLL_SLIP_WD_CNTR 0xC71150 + +#define mmPSOC_MME_PLL_DIV_FACTOR_0 0xC71200 + +#define mmPSOC_MME_PLL_DIV_FACTOR_1 0xC71204 + +#define mmPSOC_MME_PLL_DIV_FACTOR_2 0xC71208 + +#define mmPSOC_MME_PLL_DIV_FACTOR_3 0xC7120C + +#define mmPSOC_MME_PLL_DIV_FACTOR_CMD_0 0xC71220 + +#define mmPSOC_MME_PLL_DIV_FACTOR_CMD_1 0xC71224 + +#define mmPSOC_MME_PLL_DIV_FACTOR_CMD_2 0xC71228 + +#define mmPSOC_MME_PLL_DIV_FACTOR_CMD_3 0xC7122C + +#define mmPSOC_MME_PLL_DIV_SEL_0 0xC71280 + +#define mmPSOC_MME_PLL_DIV_SEL_1 0xC71284 + +#define mmPSOC_MME_PLL_DIV_SEL_2 0xC71288 + +#define mmPSOC_MME_PLL_DIV_SEL_3 0xC7128C + +#define mmPSOC_MME_PLL_DIV_EN_0 0xC712A0 + +#define mmPSOC_MME_PLL_DIV_EN_1 0xC712A4 + +#define mmPSOC_MME_PLL_DIV_EN_2 0xC712A8 + +#define mmPSOC_MME_PLL_DIV_EN_3 0xC712AC + +#define mmPSOC_MME_PLL_DIV_FACTOR_BUSY_0 0xC712C0 + +#define mmPSOC_MME_PLL_DIV_FACTOR_BUSY_1 0xC712C4 + +#define mmPSOC_MME_PLL_DIV_FACTOR_BUSY_2 0xC712C8 + +#define mmPSOC_MME_PLL_DIV_FACTOR_BUSY_3 0xC712CC + +#define mmPSOC_MME_PLL_CLK_GATER 0xC71300 + +#define mmPSOC_MME_PLL_CLK_RLX_0 0xC71310 + +#define mmPSOC_MME_PLL_CLK_RLX_1 0xC71314 + +#define mmPSOC_MME_PLL_CLK_RLX_2 0xC71318 + +#define mmPSOC_MME_PLL_CLK_RLX_3 0xC7131C + +#define mmPSOC_MME_PLL_REF_CNTR_PERIOD 0xC71400 + +#define mmPSOC_MME_PLL_REF_LOW_THRESHOLD 0xC71410 + +#define mmPSOC_MME_PLL_REF_HIGH_THRESHOLD 0xC71420 + +#define mmPSOC_MME_PLL_PLL_NOT_STABLE 0xC71430 + +#define mmPSOC_MME_PLL_FREQ_CALC_EN 0xC71440 + +#endif /* ASIC_REG_PSOC_MME_PLL_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h new file mode 100644 index 000000000..27a296ea6 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_PSOC_PCI_PLL_REGS_H_ +#define ASIC_REG_PSOC_PCI_PLL_REGS_H_ + +/* + ***************************************** + * PSOC_PCI_PLL (Prototype: PLL) + ***************************************** + */ + +#define mmPSOC_PCI_PLL_NR 0xC72100 + +#define mmPSOC_PCI_PLL_NF 0xC72104 + +#define mmPSOC_PCI_PLL_OD 0xC72108 + +#define mmPSOC_PCI_PLL_NB 0xC7210C + +#define mmPSOC_PCI_PLL_CFG 0xC72110 + +#define mmPSOC_PCI_PLL_LOSE_MASK 0xC72120 + +#define mmPSOC_PCI_PLL_LOCK_INTR 0xC72128 + +#define mmPSOC_PCI_PLL_LOCK_BYPASS 0xC7212C + +#define mmPSOC_PCI_PLL_DATA_CHNG 0xC72130 + +#define mmPSOC_PCI_PLL_RST 0xC72134 + +#define mmPSOC_PCI_PLL_SLIP_WD_CNTR 0xC72150 + +#define mmPSOC_PCI_PLL_DIV_FACTOR_0 0xC72200 + +#define mmPSOC_PCI_PLL_DIV_FACTOR_1 0xC72204 + +#define mmPSOC_PCI_PLL_DIV_FACTOR_2 0xC72208 + +#define mmPSOC_PCI_PLL_DIV_FACTOR_3 0xC7220C + +#define mmPSOC_PCI_PLL_DIV_FACTOR_CMD_0 0xC72220 + +#define mmPSOC_PCI_PLL_DIV_FACTOR_CMD_1 0xC72224 + +#define mmPSOC_PCI_PLL_DIV_FACTOR_CMD_2 0xC72228 + +#define mmPSOC_PCI_PLL_DIV_FACTOR_CMD_3 0xC7222C + +#define mmPSOC_PCI_PLL_DIV_SEL_0 0xC72280 + +#define mmPSOC_PCI_PLL_DIV_SEL_1 0xC72284 + +#define mmPSOC_PCI_PLL_DIV_SEL_2 0xC72288 + +#define mmPSOC_PCI_PLL_DIV_SEL_3 0xC7228C + +#define mmPSOC_PCI_PLL_DIV_EN_0 0xC722A0 + +#define mmPSOC_PCI_PLL_DIV_EN_1 0xC722A4 + +#define mmPSOC_PCI_PLL_DIV_EN_2 0xC722A8 + +#define mmPSOC_PCI_PLL_DIV_EN_3 0xC722AC + +#define mmPSOC_PCI_PLL_DIV_FACTOR_BUSY_0 0xC722C0 + +#define mmPSOC_PCI_PLL_DIV_FACTOR_BUSY_1 0xC722C4 + +#define mmPSOC_PCI_PLL_DIV_FACTOR_BUSY_2 0xC722C8 + +#define mmPSOC_PCI_PLL_DIV_FACTOR_BUSY_3 0xC722CC + +#define mmPSOC_PCI_PLL_CLK_GATER 0xC72300 + +#define mmPSOC_PCI_PLL_CLK_RLX_0 0xC72310 + +#define mmPSOC_PCI_PLL_CLK_RLX_1 0xC72314 + +#define mmPSOC_PCI_PLL_CLK_RLX_2 0xC72318 + +#define mmPSOC_PCI_PLL_CLK_RLX_3 0xC7231C + +#define mmPSOC_PCI_PLL_REF_CNTR_PERIOD 0xC72400 + +#define mmPSOC_PCI_PLL_REF_LOW_THRESHOLD 0xC72410 + +#define mmPSOC_PCI_PLL_REF_HIGH_THRESHOLD 0xC72420 + +#define mmPSOC_PCI_PLL_PLL_NOT_STABLE 0xC72430 + +#define mmPSOC_PCI_PLL_FREQ_CALC_EN 0xC72440 + +#endif /* ASIC_REG_PSOC_PCI_PLL_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_spi_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_spi_regs.h new file mode 100644 index 000000000..66aee7fa6 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_spi_regs.h @@ -0,0 +1,142 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_PSOC_SPI_REGS_H_ +#define ASIC_REG_PSOC_SPI_REGS_H_ + +/* + ***************************************** + * PSOC_SPI (Prototype: SPI) + ***************************************** + */ + +#define mmPSOC_SPI_CTRLR0 0xC43000 + +#define mmPSOC_SPI_CTRLR1 0xC43004 + +#define mmPSOC_SPI_SSIENR 0xC43008 + +#define mmPSOC_SPI_MWCR 0xC4300C + +#define mmPSOC_SPI_SER 0xC43010 + +#define mmPSOC_SPI_BAUDR 0xC43014 + +#define mmPSOC_SPI_TXFTLR 0xC43018 + +#define mmPSOC_SPI_RXFTLR 0xC4301C + +#define mmPSOC_SPI_TXFLR 0xC43020 + +#define mmPSOC_SPI_RXFLR 0xC43024 + +#define mmPSOC_SPI_SR 0xC43028 + +#define mmPSOC_SPI_IMR 0xC4302C + +#define mmPSOC_SPI_ISR 0xC43030 + +#define mmPSOC_SPI_RISR 0xC43034 + +#define mmPSOC_SPI_TXOICR 0xC43038 + +#define mmPSOC_SPI_RXOICR 0xC4303C + +#define mmPSOC_SPI_RXUICR 0xC43040 + +#define mmPSOC_SPI_MSTICR 0xC43044 + +#define mmPSOC_SPI_ICR 0xC43048 + +#define mmPSOC_SPI_IDR 0xC43058 + +#define mmPSOC_SPI_SSI_VERSION_ID 0xC4305C + +#define mmPSOC_SPI_DR0 0xC43060 + +#define mmPSOC_SPI_DR1 0xC43064 + +#define mmPSOC_SPI_DR2 0xC43068 + +#define mmPSOC_SPI_DR3 0xC4306C + +#define mmPSOC_SPI_DR4 0xC43070 + +#define mmPSOC_SPI_DR5 0xC43074 + +#define mmPSOC_SPI_DR6 0xC43078 + +#define mmPSOC_SPI_DR7 0xC4307C + +#define mmPSOC_SPI_DR8 0xC43080 + +#define mmPSOC_SPI_DR9 0xC43084 + +#define mmPSOC_SPI_DR10 0xC43088 + +#define mmPSOC_SPI_DR11 0xC4308C + +#define mmPSOC_SPI_DR12 0xC43090 + +#define mmPSOC_SPI_DR13 0xC43094 + +#define mmPSOC_SPI_DR14 0xC43098 + +#define mmPSOC_SPI_DR15 0xC4309C + +#define mmPSOC_SPI_DR16 0xC430A0 + +#define mmPSOC_SPI_DR17 0xC430A4 + +#define mmPSOC_SPI_DR18 0xC430A8 + +#define mmPSOC_SPI_DR19 0xC430AC + +#define mmPSOC_SPI_DR20 0xC430B0 + +#define mmPSOC_SPI_DR21 0xC430B4 + +#define mmPSOC_SPI_DR22 0xC430B8 + +#define mmPSOC_SPI_DR23 0xC430BC + +#define mmPSOC_SPI_DR24 0xC430C0 + +#define mmPSOC_SPI_DR25 0xC430C4 + +#define mmPSOC_SPI_DR26 0xC430C8 + +#define mmPSOC_SPI_DR27 0xC430CC + +#define mmPSOC_SPI_DR28 0xC430D0 + +#define mmPSOC_SPI_DR29 0xC430D4 + +#define mmPSOC_SPI_DR30 0xC430D8 + +#define mmPSOC_SPI_DR31 0xC430DC + +#define mmPSOC_SPI_DR32 0xC430E0 + +#define mmPSOC_SPI_DR33 0xC430E4 + +#define mmPSOC_SPI_DR34 0xC430E8 + +#define mmPSOC_SPI_DR35 0xC430EC + +#define mmPSOC_SPI_RX_SAMPLE_DLY 0xC430F0 + +#define mmPSOC_SPI_RSVD_1 0xC430F8 + +#define mmPSOC_SPI_RSVD_2 0xC430FC + +#endif /* ASIC_REG_PSOC_SPI_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_timestamp_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_timestamp_regs.h new file mode 100644 index 000000000..9ce24597d --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_timestamp_regs.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_PSOC_TIMESTAMP_REGS_H_ +#define ASIC_REG_PSOC_TIMESTAMP_REGS_H_ + +/* + ***************************************** + * PSOC_TIMESTAMP (Prototype: TIMESTAMP) + ***************************************** + */ + +#define mmPSOC_TIMESTAMP_CNTCR 0xC49000 + +#define mmPSOC_TIMESTAMP_CNTSR 0xC49004 + +#define mmPSOC_TIMESTAMP_CNTCVL 0xC49008 + +#define mmPSOC_TIMESTAMP_CNTCVU 0xC4900C + +#define mmPSOC_TIMESTAMP_CNTFID0 0xC49020 + +#define mmPSOC_TIMESTAMP_PIDR4 0xC49FD0 + +#define mmPSOC_TIMESTAMP_PIDR5 0xC49FD4 + +#define mmPSOC_TIMESTAMP_PIDR6 0xC49FD8 + +#define mmPSOC_TIMESTAMP_PIDR7 0xC49FDC + +#define mmPSOC_TIMESTAMP_PIDR0 0xC49FE0 + +#define mmPSOC_TIMESTAMP_PIDR1 0xC49FE4 + +#define mmPSOC_TIMESTAMP_PIDR2 0xC49FE8 + +#define mmPSOC_TIMESTAMP_PIDR3 0xC49FEC + +#define mmPSOC_TIMESTAMP_CIDR0 0xC49FF0 + +#define mmPSOC_TIMESTAMP_CIDR1 0xC49FF4 + +#define mmPSOC_TIMESTAMP_CIDR2 0xC49FF8 + +#define mmPSOC_TIMESTAMP_CIDR3 0xC49FFC + +#endif /* ASIC_REG_PSOC_TIMESTAMP_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h new file mode 100644 index 000000000..2ea1770b0 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_SRAM_Y0_X0_RTR_REGS_H_ +#define ASIC_REG_SRAM_Y0_X0_RTR_REGS_H_ + +/* + ***************************************** + * SRAM_Y0_X0_RTR (Prototype: IC_RTR) + ***************************************** + */ + +#define mmSRAM_Y0_X0_RTR_HBW_RD_RQ_E_ARB 0x201100 + +#define mmSRAM_Y0_X0_RTR_HBW_RD_RQ_W_ARB 0x201104 + +#define mmSRAM_Y0_X0_RTR_HBW_RD_RQ_L_ARB 0x201110 + +#define mmSRAM_Y0_X0_RTR_HBW_E_ARB_MAX 0x201120 + +#define mmSRAM_Y0_X0_RTR_HBW_W_ARB_MAX 0x201124 + +#define mmSRAM_Y0_X0_RTR_HBW_L_ARB_MAX 0x201130 + +#define mmSRAM_Y0_X0_RTR_HBW_DATA_E_ARB 0x201140 + +#define mmSRAM_Y0_X0_RTR_HBW_DATA_W_ARB 0x201144 + +#define mmSRAM_Y0_X0_RTR_HBW_DATA_L_ARB 0x201148 + +#define mmSRAM_Y0_X0_RTR_HBW_WR_RS_E_ARB 0x201160 + +#define mmSRAM_Y0_X0_RTR_HBW_WR_RS_W_ARB 0x201164 + +#define mmSRAM_Y0_X0_RTR_HBW_WR_RS_L_ARB 0x201168 + +#define mmSRAM_Y0_X0_RTR_LBW_RD_RQ_E_ARB 0x201200 + +#define mmSRAM_Y0_X0_RTR_LBW_RD_RQ_W_ARB 0x201204 + +#define mmSRAM_Y0_X0_RTR_LBW_RD_RQ_L_ARB 0x201210 + +#define mmSRAM_Y0_X0_RTR_LBW_E_ARB_MAX 0x201220 + +#define mmSRAM_Y0_X0_RTR_LBW_W_ARB_MAX 0x201224 + +#define mmSRAM_Y0_X0_RTR_LBW_L_ARB_MAX 0x201230 + +#define mmSRAM_Y0_X0_RTR_LBW_DATA_E_ARB 0x201240 + +#define mmSRAM_Y0_X0_RTR_LBW_DATA_W_ARB 0x201244 + +#define mmSRAM_Y0_X0_RTR_LBW_DATA_L_ARB 0x201248 + +#define mmSRAM_Y0_X0_RTR_LBW_WR_RS_E_ARB 0x201260 + +#define mmSRAM_Y0_X0_RTR_LBW_WR_RS_W_ARB 0x201264 + +#define mmSRAM_Y0_X0_RTR_LBW_WR_RS_L_ARB 0x201268 + +#define mmSRAM_Y0_X0_RTR_DBG_E_ARB 0x201300 + +#define mmSRAM_Y0_X0_RTR_DBG_W_ARB 0x201304 + +#define mmSRAM_Y0_X0_RTR_DBG_L_ARB 0x201310 + +#define mmSRAM_Y0_X0_RTR_DBG_E_ARB_MAX 0x201320 + +#define mmSRAM_Y0_X0_RTR_DBG_W_ARB_MAX 0x201324 + +#define mmSRAM_Y0_X0_RTR_DBG_L_ARB_MAX 0x201330 + +#endif /* ASIC_REG_SRAM_Y0_X0_RTR_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h new file mode 100644 index 000000000..37e0713ef --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_SRAM_Y0_X1_RTR_REGS_H_ +#define ASIC_REG_SRAM_Y0_X1_RTR_REGS_H_ + +/* + ***************************************** + * SRAM_Y0_X1_RTR (Prototype: IC_RTR) + ***************************************** + */ + +#define mmSRAM_Y0_X1_RTR_HBW_RD_RQ_E_ARB 0x205100 + +#define mmSRAM_Y0_X1_RTR_HBW_RD_RQ_W_ARB 0x205104 + +#define mmSRAM_Y0_X1_RTR_HBW_RD_RQ_L_ARB 0x205110 + +#define mmSRAM_Y0_X1_RTR_HBW_E_ARB_MAX 0x205120 + +#define mmSRAM_Y0_X1_RTR_HBW_W_ARB_MAX 0x205124 + +#define mmSRAM_Y0_X1_RTR_HBW_L_ARB_MAX 0x205130 + +#define mmSRAM_Y0_X1_RTR_HBW_DATA_E_ARB 0x205140 + +#define mmSRAM_Y0_X1_RTR_HBW_DATA_W_ARB 0x205144 + +#define mmSRAM_Y0_X1_RTR_HBW_DATA_L_ARB 0x205148 + +#define mmSRAM_Y0_X1_RTR_HBW_WR_RS_E_ARB 0x205160 + +#define mmSRAM_Y0_X1_RTR_HBW_WR_RS_W_ARB 0x205164 + +#define mmSRAM_Y0_X1_RTR_HBW_WR_RS_L_ARB 0x205168 + +#define mmSRAM_Y0_X1_RTR_LBW_RD_RQ_E_ARB 0x205200 + +#define mmSRAM_Y0_X1_RTR_LBW_RD_RQ_W_ARB 0x205204 + +#define mmSRAM_Y0_X1_RTR_LBW_RD_RQ_L_ARB 0x205210 + +#define mmSRAM_Y0_X1_RTR_LBW_E_ARB_MAX 0x205220 + +#define mmSRAM_Y0_X1_RTR_LBW_W_ARB_MAX 0x205224 + +#define mmSRAM_Y0_X1_RTR_LBW_L_ARB_MAX 0x205230 + +#define mmSRAM_Y0_X1_RTR_LBW_DATA_E_ARB 0x205240 + +#define mmSRAM_Y0_X1_RTR_LBW_DATA_W_ARB 0x205244 + +#define mmSRAM_Y0_X1_RTR_LBW_DATA_L_ARB 0x205248 + +#define mmSRAM_Y0_X1_RTR_LBW_WR_RS_E_ARB 0x205260 + +#define mmSRAM_Y0_X1_RTR_LBW_WR_RS_W_ARB 0x205264 + +#define mmSRAM_Y0_X1_RTR_LBW_WR_RS_L_ARB 0x205268 + +#define mmSRAM_Y0_X1_RTR_DBG_E_ARB 0x205300 + +#define mmSRAM_Y0_X1_RTR_DBG_W_ARB 0x205304 + +#define mmSRAM_Y0_X1_RTR_DBG_L_ARB 0x205310 + +#define mmSRAM_Y0_X1_RTR_DBG_E_ARB_MAX 0x205320 + +#define mmSRAM_Y0_X1_RTR_DBG_W_ARB_MAX 0x205324 + +#define mmSRAM_Y0_X1_RTR_DBG_L_ARB_MAX 0x205330 + +#endif /* ASIC_REG_SRAM_Y0_X1_RTR_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h new file mode 100644 index 000000000..d2572279a --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_SRAM_Y0_X2_RTR_REGS_H_ +#define ASIC_REG_SRAM_Y0_X2_RTR_REGS_H_ + +/* + ***************************************** + * SRAM_Y0_X2_RTR (Prototype: IC_RTR) + ***************************************** + */ + +#define mmSRAM_Y0_X2_RTR_HBW_RD_RQ_E_ARB 0x209100 + +#define mmSRAM_Y0_X2_RTR_HBW_RD_RQ_W_ARB 0x209104 + +#define mmSRAM_Y0_X2_RTR_HBW_RD_RQ_L_ARB 0x209110 + +#define mmSRAM_Y0_X2_RTR_HBW_E_ARB_MAX 0x209120 + +#define mmSRAM_Y0_X2_RTR_HBW_W_ARB_MAX 0x209124 + +#define mmSRAM_Y0_X2_RTR_HBW_L_ARB_MAX 0x209130 + +#define mmSRAM_Y0_X2_RTR_HBW_DATA_E_ARB 0x209140 + +#define mmSRAM_Y0_X2_RTR_HBW_DATA_W_ARB 0x209144 + +#define mmSRAM_Y0_X2_RTR_HBW_DATA_L_ARB 0x209148 + +#define mmSRAM_Y0_X2_RTR_HBW_WR_RS_E_ARB 0x209160 + +#define mmSRAM_Y0_X2_RTR_HBW_WR_RS_W_ARB 0x209164 + +#define mmSRAM_Y0_X2_RTR_HBW_WR_RS_L_ARB 0x209168 + +#define mmSRAM_Y0_X2_RTR_LBW_RD_RQ_E_ARB 0x209200 + +#define mmSRAM_Y0_X2_RTR_LBW_RD_RQ_W_ARB 0x209204 + +#define mmSRAM_Y0_X2_RTR_LBW_RD_RQ_L_ARB 0x209210 + +#define mmSRAM_Y0_X2_RTR_LBW_E_ARB_MAX 0x209220 + +#define mmSRAM_Y0_X2_RTR_LBW_W_ARB_MAX 0x209224 + +#define mmSRAM_Y0_X2_RTR_LBW_L_ARB_MAX 0x209230 + +#define mmSRAM_Y0_X2_RTR_LBW_DATA_E_ARB 0x209240 + +#define mmSRAM_Y0_X2_RTR_LBW_DATA_W_ARB 0x209244 + +#define mmSRAM_Y0_X2_RTR_LBW_DATA_L_ARB 0x209248 + +#define mmSRAM_Y0_X2_RTR_LBW_WR_RS_E_ARB 0x209260 + +#define mmSRAM_Y0_X2_RTR_LBW_WR_RS_W_ARB 0x209264 + +#define mmSRAM_Y0_X2_RTR_LBW_WR_RS_L_ARB 0x209268 + +#define mmSRAM_Y0_X2_RTR_DBG_E_ARB 0x209300 + +#define mmSRAM_Y0_X2_RTR_DBG_W_ARB 0x209304 + +#define mmSRAM_Y0_X2_RTR_DBG_L_ARB 0x209310 + +#define mmSRAM_Y0_X2_RTR_DBG_E_ARB_MAX 0x209320 + +#define mmSRAM_Y0_X2_RTR_DBG_W_ARB_MAX 0x209324 + +#define mmSRAM_Y0_X2_RTR_DBG_L_ARB_MAX 0x209330 + +#endif /* ASIC_REG_SRAM_Y0_X2_RTR_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h new file mode 100644 index 000000000..68c5b402c --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_SRAM_Y0_X3_RTR_REGS_H_ +#define ASIC_REG_SRAM_Y0_X3_RTR_REGS_H_ + +/* + ***************************************** + * SRAM_Y0_X3_RTR (Prototype: IC_RTR) + ***************************************** + */ + +#define mmSRAM_Y0_X3_RTR_HBW_RD_RQ_E_ARB 0x20D100 + +#define mmSRAM_Y0_X3_RTR_HBW_RD_RQ_W_ARB 0x20D104 + +#define mmSRAM_Y0_X3_RTR_HBW_RD_RQ_L_ARB 0x20D110 + +#define mmSRAM_Y0_X3_RTR_HBW_E_ARB_MAX 0x20D120 + +#define mmSRAM_Y0_X3_RTR_HBW_W_ARB_MAX 0x20D124 + +#define mmSRAM_Y0_X3_RTR_HBW_L_ARB_MAX 0x20D130 + +#define mmSRAM_Y0_X3_RTR_HBW_DATA_E_ARB 0x20D140 + +#define mmSRAM_Y0_X3_RTR_HBW_DATA_W_ARB 0x20D144 + +#define mmSRAM_Y0_X3_RTR_HBW_DATA_L_ARB 0x20D148 + +#define mmSRAM_Y0_X3_RTR_HBW_WR_RS_E_ARB 0x20D160 + +#define mmSRAM_Y0_X3_RTR_HBW_WR_RS_W_ARB 0x20D164 + +#define mmSRAM_Y0_X3_RTR_HBW_WR_RS_L_ARB 0x20D168 + +#define mmSRAM_Y0_X3_RTR_LBW_RD_RQ_E_ARB 0x20D200 + +#define mmSRAM_Y0_X3_RTR_LBW_RD_RQ_W_ARB 0x20D204 + +#define mmSRAM_Y0_X3_RTR_LBW_RD_RQ_L_ARB 0x20D210 + +#define mmSRAM_Y0_X3_RTR_LBW_E_ARB_MAX 0x20D220 + +#define mmSRAM_Y0_X3_RTR_LBW_W_ARB_MAX 0x20D224 + +#define mmSRAM_Y0_X3_RTR_LBW_L_ARB_MAX 0x20D230 + +#define mmSRAM_Y0_X3_RTR_LBW_DATA_E_ARB 0x20D240 + +#define mmSRAM_Y0_X3_RTR_LBW_DATA_W_ARB 0x20D244 + +#define mmSRAM_Y0_X3_RTR_LBW_DATA_L_ARB 0x20D248 + +#define mmSRAM_Y0_X3_RTR_LBW_WR_RS_E_ARB 0x20D260 + +#define mmSRAM_Y0_X3_RTR_LBW_WR_RS_W_ARB 0x20D264 + +#define mmSRAM_Y0_X3_RTR_LBW_WR_RS_L_ARB 0x20D268 + +#define mmSRAM_Y0_X3_RTR_DBG_E_ARB 0x20D300 + +#define mmSRAM_Y0_X3_RTR_DBG_W_ARB 0x20D304 + +#define mmSRAM_Y0_X3_RTR_DBG_L_ARB 0x20D310 + +#define mmSRAM_Y0_X3_RTR_DBG_E_ARB_MAX 0x20D320 + +#define mmSRAM_Y0_X3_RTR_DBG_W_ARB_MAX 0x20D324 + +#define mmSRAM_Y0_X3_RTR_DBG_L_ARB_MAX 0x20D330 + +#endif /* ASIC_REG_SRAM_Y0_X3_RTR_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h new file mode 100644 index 000000000..a42f1ba06 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_SRAM_Y0_X4_RTR_REGS_H_ +#define ASIC_REG_SRAM_Y0_X4_RTR_REGS_H_ + +/* + ***************************************** + * SRAM_Y0_X4_RTR (Prototype: IC_RTR) + ***************************************** + */ + +#define mmSRAM_Y0_X4_RTR_HBW_RD_RQ_E_ARB 0x211100 + +#define mmSRAM_Y0_X4_RTR_HBW_RD_RQ_W_ARB 0x211104 + +#define mmSRAM_Y0_X4_RTR_HBW_RD_RQ_L_ARB 0x211110 + +#define mmSRAM_Y0_X4_RTR_HBW_E_ARB_MAX 0x211120 + +#define mmSRAM_Y0_X4_RTR_HBW_W_ARB_MAX 0x211124 + +#define mmSRAM_Y0_X4_RTR_HBW_L_ARB_MAX 0x211130 + +#define mmSRAM_Y0_X4_RTR_HBW_DATA_E_ARB 0x211140 + +#define mmSRAM_Y0_X4_RTR_HBW_DATA_W_ARB 0x211144 + +#define mmSRAM_Y0_X4_RTR_HBW_DATA_L_ARB 0x211148 + +#define mmSRAM_Y0_X4_RTR_HBW_WR_RS_E_ARB 0x211160 + +#define mmSRAM_Y0_X4_RTR_HBW_WR_RS_W_ARB 0x211164 + +#define mmSRAM_Y0_X4_RTR_HBW_WR_RS_L_ARB 0x211168 + +#define mmSRAM_Y0_X4_RTR_LBW_RD_RQ_E_ARB 0x211200 + +#define mmSRAM_Y0_X4_RTR_LBW_RD_RQ_W_ARB 0x211204 + +#define mmSRAM_Y0_X4_RTR_LBW_RD_RQ_L_ARB 0x211210 + +#define mmSRAM_Y0_X4_RTR_LBW_E_ARB_MAX 0x211220 + +#define mmSRAM_Y0_X4_RTR_LBW_W_ARB_MAX 0x211224 + +#define mmSRAM_Y0_X4_RTR_LBW_L_ARB_MAX 0x211230 + +#define mmSRAM_Y0_X4_RTR_LBW_DATA_E_ARB 0x211240 + +#define mmSRAM_Y0_X4_RTR_LBW_DATA_W_ARB 0x211244 + +#define mmSRAM_Y0_X4_RTR_LBW_DATA_L_ARB 0x211248 + +#define mmSRAM_Y0_X4_RTR_LBW_WR_RS_E_ARB 0x211260 + +#define mmSRAM_Y0_X4_RTR_LBW_WR_RS_W_ARB 0x211264 + +#define mmSRAM_Y0_X4_RTR_LBW_WR_RS_L_ARB 0x211268 + +#define mmSRAM_Y0_X4_RTR_DBG_E_ARB 0x211300 + +#define mmSRAM_Y0_X4_RTR_DBG_W_ARB 0x211304 + +#define mmSRAM_Y0_X4_RTR_DBG_L_ARB 0x211310 + +#define mmSRAM_Y0_X4_RTR_DBG_E_ARB_MAX 0x211320 + +#define mmSRAM_Y0_X4_RTR_DBG_W_ARB_MAX 0x211324 + +#define mmSRAM_Y0_X4_RTR_DBG_L_ARB_MAX 0x211330 + +#endif /* ASIC_REG_SRAM_Y0_X4_RTR_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/stlb_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/stlb_masks.h new file mode 100644 index 000000000..94f2ed4a3 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/stlb_masks.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_STLB_MASKS_H_ +#define ASIC_REG_STLB_MASKS_H_ + +/* + ***************************************** + * STLB (Prototype: STLB) + ***************************************** + */ + +/* STLB_CACHE_INV */ +#define STLB_CACHE_INV_PRODUCER_INDEX_SHIFT 0 +#define STLB_CACHE_INV_PRODUCER_INDEX_MASK 0xFF +#define STLB_CACHE_INV_INDEX_MASK_SHIFT 8 +#define STLB_CACHE_INV_INDEX_MASK_MASK 0xFF00 + +/* STLB_CACHE_INV_BASE_39_8 */ +#define STLB_CACHE_INV_BASE_39_8_PA_SHIFT 0 +#define STLB_CACHE_INV_BASE_39_8_PA_MASK 0xFFFFFFFF + +/* STLB_CACHE_INV_BASE_49_40 */ +#define STLB_CACHE_INV_BASE_49_40_PA_SHIFT 0 +#define STLB_CACHE_INV_BASE_49_40_PA_MASK 0x3FF + +/* STLB_STLB_FEATURE_EN */ +#define STLB_STLB_FEATURE_EN_STLB_CTRL_MULTI_PAGE_SIZE_EN_SHIFT 0 +#define STLB_STLB_FEATURE_EN_STLB_CTRL_MULTI_PAGE_SIZE_EN_MASK 0x1 +#define STLB_STLB_FEATURE_EN_MULTI_PAGE_SIZE_EN_SHIFT 1 +#define STLB_STLB_FEATURE_EN_MULTI_PAGE_SIZE_EN_MASK 0x2 +#define STLB_STLB_FEATURE_EN_LOOKUP_EN_SHIFT 2 +#define STLB_STLB_FEATURE_EN_LOOKUP_EN_MASK 0x4 +#define STLB_STLB_FEATURE_EN_BYPASS_SHIFT 3 +#define STLB_STLB_FEATURE_EN_BYPASS_MASK 0x8 +#define STLB_STLB_FEATURE_EN_BANK_STOP_SHIFT 4 +#define STLB_STLB_FEATURE_EN_BANK_STOP_MASK 0x10 +#define STLB_STLB_FEATURE_EN_TRACE_EN_SHIFT 5 +#define STLB_STLB_FEATURE_EN_TRACE_EN_MASK 0x20 +#define STLB_STLB_FEATURE_EN_FOLLOWER_EN_SHIFT 6 +#define STLB_STLB_FEATURE_EN_FOLLOWER_EN_MASK 0x40 +#define STLB_STLB_FEATURE_EN_CACHING_EN_SHIFT 7 +#define STLB_STLB_FEATURE_EN_CACHING_EN_MASK 0xF80 + +/* STLB_STLB_AXI_CACHE */ +#define STLB_STLB_AXI_CACHE_STLB_CTRL_ARCACHE_SHIFT 0 +#define STLB_STLB_AXI_CACHE_STLB_CTRL_ARCACHE_MASK 0xF +#define STLB_STLB_AXI_CACHE_STLB_CTRL_AWCACHE_SHIFT 4 +#define STLB_STLB_AXI_CACHE_STLB_CTRL_AWCACHE_MASK 0xF0 +#define STLB_STLB_AXI_CACHE_INV_ARCACHE_SHIFT 8 +#define STLB_STLB_AXI_CACHE_INV_ARCACHE_MASK 0xF00 + +/* STLB_HOP_CONFIGURATION */ +#define STLB_HOP_CONFIGURATION_FIRST_HOP_SHIFT 0 +#define STLB_HOP_CONFIGURATION_FIRST_HOP_MASK 0x7 +#define STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_SHIFT 4 +#define STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_MASK 0x70 +#define STLB_HOP_CONFIGURATION_LAST_HOP_SHIFT 8 +#define STLB_HOP_CONFIGURATION_LAST_HOP_MASK 0x700 + +/* STLB_LINK_LIST_LOOKUP_MASK_49_32 */ +#define STLB_LINK_LIST_LOOKUP_MASK_49_32_R_SHIFT 0 +#define STLB_LINK_LIST_LOOKUP_MASK_49_32_R_MASK 0x3FFFF + +/* STLB_LINK_LIST_LOOKUP_MASK_31_0 */ +#define STLB_LINK_LIST_LOOKUP_MASK_31_0_R_SHIFT 0 +#define STLB_LINK_LIST_LOOKUP_MASK_31_0_R_MASK 0xFFFFFFFF + +/* STLB_LINK_LIST */ +#define STLB_LINK_LIST_CLEAR_SHIFT 0 +#define STLB_LINK_LIST_CLEAR_MASK 0x1 +#define STLB_LINK_LIST_EN_SHIFT 1 +#define STLB_LINK_LIST_EN_MASK 0x2 + +/* STLB_INV_ALL_START */ +#define STLB_INV_ALL_START_R_SHIFT 0 +#define STLB_INV_ALL_START_R_MASK 0x1 + +/* STLB_INV_ALL_SET */ +#define STLB_INV_ALL_SET_R_SHIFT 0 +#define STLB_INV_ALL_SET_R_MASK 0xFF + +/* STLB_INV_PS */ +#define STLB_INV_PS_R_SHIFT 0 +#define STLB_INV_PS_R_MASK 0x3 + +/* STLB_INV_CONSUMER_INDEX */ +#define STLB_INV_CONSUMER_INDEX_R_SHIFT 0 +#define STLB_INV_CONSUMER_INDEX_R_MASK 0xFF + +/* STLB_INV_HIT_COUNT */ +#define STLB_INV_HIT_COUNT_R_SHIFT 0 +#define STLB_INV_HIT_COUNT_R_MASK 0x7FF + +/* STLB_INV_SET */ +#define STLB_INV_SET_R_SHIFT 0 +#define STLB_INV_SET_R_MASK 0xFF + +/* STLB_SRAM_INIT */ +#define STLB_SRAM_INIT_BUSY_TAG_SHIFT 0 +#define STLB_SRAM_INIT_BUSY_TAG_MASK 0x3 +#define STLB_SRAM_INIT_BUSY_SLICE_SHIFT 2 +#define STLB_SRAM_INIT_BUSY_SLICE_MASK 0xC +#define STLB_SRAM_INIT_BUSY_DATA_SHIFT 4 +#define STLB_SRAM_INIT_BUSY_DATA_MASK 0x10 + +#endif /* ASIC_REG_STLB_MASKS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/stlb_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/stlb_regs.h new file mode 100644 index 000000000..35013f65a --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/stlb_regs.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_STLB_REGS_H_ +#define ASIC_REG_STLB_REGS_H_ + +/* + ***************************************** + * STLB (Prototype: STLB) + ***************************************** + */ + +#define mmSTLB_CACHE_INV 0x490010 + +#define mmSTLB_CACHE_INV_BASE_39_8 0x490014 + +#define mmSTLB_CACHE_INV_BASE_49_40 0x490018 + +#define mmSTLB_STLB_FEATURE_EN 0x49001C + +#define mmSTLB_STLB_AXI_CACHE 0x490020 + +#define mmSTLB_HOP_CONFIGURATION 0x490024 + +#define mmSTLB_LINK_LIST_LOOKUP_MASK_49_32 0x490028 + +#define mmSTLB_LINK_LIST_LOOKUP_MASK_31_0 0x49002C + +#define mmSTLB_LINK_LIST 0x490030 + +#define mmSTLB_INV_ALL_START 0x490034 + +#define mmSTLB_INV_ALL_SET 0x490038 + +#define mmSTLB_INV_PS 0x49003C + +#define mmSTLB_INV_CONSUMER_INDEX 0x490040 + +#define mmSTLB_INV_HIT_COUNT 0x490044 + +#define mmSTLB_INV_SET 0x490048 + +#define mmSTLB_SRAM_INIT 0x49004C + +#endif /* ASIC_REG_STLB_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h new file mode 100644 index 000000000..89c9507a5 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h @@ -0,0 +1,1606 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC0_CFG_MASKS_H_ +#define ASIC_REG_TPC0_CFG_MASKS_H_ + +/* + ***************************************** + * TPC0_CFG (Prototype: TPC) + ***************************************** + */ + +/* TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW */ +#define TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH */ +#define TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_PADDING_VALUE */ +#define TPC0_CFG_KERNEL_TENSOR_0_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG */ +#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_DATA_TYPE_MASK 0x3 +#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 + +/* TPC0_CFG_KERNEL_TENSOR_0_DIM_0_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_DIM_1_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_DIM_2_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_DIM_3_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_DIM_4_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW */ +#define TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH */ +#define TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_PADDING_VALUE */ +#define TPC0_CFG_KERNEL_TENSOR_1_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG */ +#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_DATA_TYPE_MASK 0x3 +#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 + +/* TPC0_CFG_KERNEL_TENSOR_1_DIM_0_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_DIM_1_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_DIM_2_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_DIM_3_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_DIM_4_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW */ +#define TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH */ +#define TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_PADDING_VALUE */ +#define TPC0_CFG_KERNEL_TENSOR_2_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG */ +#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_DATA_TYPE_MASK 0x3 +#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 + +/* TPC0_CFG_KERNEL_TENSOR_2_DIM_0_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_DIM_1_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_DIM_2_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_DIM_3_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_DIM_4_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW */ +#define TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH */ +#define TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_PADDING_VALUE */ +#define TPC0_CFG_KERNEL_TENSOR_3_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG */ +#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_DATA_TYPE_MASK 0x3 +#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 + +/* TPC0_CFG_KERNEL_TENSOR_3_DIM_0_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_DIM_1_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_DIM_2_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_DIM_3_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_DIM_4_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW */ +#define TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH */ +#define TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_PADDING_VALUE */ +#define TPC0_CFG_KERNEL_TENSOR_4_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG */ +#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_DATA_TYPE_MASK 0x3 +#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 + +/* TPC0_CFG_KERNEL_TENSOR_4_DIM_0_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_DIM_1_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_DIM_2_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_DIM_3_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_DIM_4_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW */ +#define TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH */ +#define TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_PADDING_VALUE */ +#define TPC0_CFG_KERNEL_TENSOR_5_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG */ +#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_DATA_TYPE_MASK 0x3 +#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 + +/* TPC0_CFG_KERNEL_TENSOR_5_DIM_0_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_DIM_1_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_DIM_2_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_DIM_3_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_DIM_4_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW */ +#define TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH */ +#define TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_PADDING_VALUE */ +#define TPC0_CFG_KERNEL_TENSOR_6_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG */ +#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_DATA_TYPE_MASK 0x3 +#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 + +/* TPC0_CFG_KERNEL_TENSOR_6_DIM_0_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_DIM_1_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_DIM_2_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_DIM_3_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_DIM_4_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW */ +#define TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH */ +#define TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_PADDING_VALUE */ +#define TPC0_CFG_KERNEL_TENSOR_7_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG */ +#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_DATA_TYPE_MASK 0x3 +#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 + +/* TPC0_CFG_KERNEL_TENSOR_7_DIM_0_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_DIM_1_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_DIM_2_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_DIM_3_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_DIM_4_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW */ +#define TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW_V_SHIFT 0 +#define TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH */ +#define TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH_V_SHIFT 0 +#define TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TID_BASE_DIM_0 */ +#define TPC0_CFG_KERNEL_TID_BASE_DIM_0_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TID_BASE_DIM_0_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TID_SIZE_DIM_0 */ +#define TPC0_CFG_KERNEL_TID_SIZE_DIM_0_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TID_SIZE_DIM_0_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TID_BASE_DIM_1 */ +#define TPC0_CFG_KERNEL_TID_BASE_DIM_1_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TID_BASE_DIM_1_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TID_SIZE_DIM_1 */ +#define TPC0_CFG_KERNEL_TID_SIZE_DIM_1_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TID_SIZE_DIM_1_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TID_BASE_DIM_2 */ +#define TPC0_CFG_KERNEL_TID_BASE_DIM_2_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TID_BASE_DIM_2_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TID_SIZE_DIM_2 */ +#define TPC0_CFG_KERNEL_TID_SIZE_DIM_2_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TID_SIZE_DIM_2_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TID_BASE_DIM_3 */ +#define TPC0_CFG_KERNEL_TID_BASE_DIM_3_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TID_BASE_DIM_3_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TID_SIZE_DIM_3 */ +#define TPC0_CFG_KERNEL_TID_SIZE_DIM_3_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TID_SIZE_DIM_3_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TID_BASE_DIM_4 */ +#define TPC0_CFG_KERNEL_TID_BASE_DIM_4_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TID_BASE_DIM_4_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TID_SIZE_DIM_4 */ +#define TPC0_CFG_KERNEL_TID_SIZE_DIM_4_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TID_SIZE_DIM_4_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_SRF */ +#define TPC0_CFG_KERNEL_SRF_V_SHIFT 0 +#define TPC0_CFG_KERNEL_SRF_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_KERNEL_CONFIG */ +#define TPC0_CFG_KERNEL_KERNEL_CONFIG_SMALL_VLM_SHIFT 0 +#define TPC0_CFG_KERNEL_KERNEL_CONFIG_SMALL_VLM_MASK 0x1 +#define TPC0_CFG_KERNEL_KERNEL_CONFIG_ASO_EVICT_L0_SHIFT 1 +#define TPC0_CFG_KERNEL_KERNEL_CONFIG_ASO_EVICT_L0_MASK 0x2 +#define TPC0_CFG_KERNEL_KERNEL_CONFIG_NUM_VALID_SRFS_SHIFT 8 +#define TPC0_CFG_KERNEL_KERNEL_CONFIG_NUM_VALID_SRFS_MASK 0x3F00 + +/* TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE */ +#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_SHIFT 0 +#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_MASK 0xFFFF +#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_SHIFT 16 +#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_MASK 0x7FFF0000 +#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_SO_OPERATION_SHIFT 31 +#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_SO_OPERATION_MASK 0x80000000 + +/* TPC0_CFG_RESERVED_DESC_END */ +#define TPC0_CFG_RESERVED_DESC_END_V_SHIFT 0 +#define TPC0_CFG_RESERVED_DESC_END_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_ROUND_CSR */ +#define TPC0_CFG_ROUND_CSR_MODE_SHIFT 0 +#define TPC0_CFG_ROUND_CSR_MODE_MASK 0x7 + +/* TPC0_CFG_TBUF_BASE_ADDR_LOW */ +#define TPC0_CFG_TBUF_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_TBUF_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_TBUF_BASE_ADDR_HIGH */ +#define TPC0_CFG_TBUF_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_TBUF_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_SEMAPHORE */ +#define TPC0_CFG_SEMAPHORE_V_SHIFT 0 +#define TPC0_CFG_SEMAPHORE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_VFLAGS */ +#define TPC0_CFG_VFLAGS_V_SHIFT 0 +#define TPC0_CFG_VFLAGS_V_MASK 0xF + +/* TPC0_CFG_SFLAGS */ +#define TPC0_CFG_SFLAGS_V_SHIFT 0 +#define TPC0_CFG_SFLAGS_V_MASK 0xF + +/* TPC0_CFG_LFSR_POLYNOM */ +#define TPC0_CFG_LFSR_POLYNOM_V_SHIFT 0 +#define TPC0_CFG_LFSR_POLYNOM_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_STATUS */ +#define TPC0_CFG_STATUS_SCALAR_PIPE_EMPTY_SHIFT 1 +#define TPC0_CFG_STATUS_SCALAR_PIPE_EMPTY_MASK 0x2 +#define TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_SHIFT 2 +#define TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_MASK 0x4 +#define TPC0_CFG_STATUS_IQ_EMPTY_SHIFT 3 +#define TPC0_CFG_STATUS_IQ_EMPTY_MASK 0x8 +#define TPC0_CFG_STATUS_NO_INFLIGH_MEM_ACCESSES_SHIFT 4 +#define TPC0_CFG_STATUS_NO_INFLIGH_MEM_ACCESSES_MASK 0x10 + +/* TPC0_CFG_CFG_BASE_ADDRESS_HIGH */ +#define TPC0_CFG_CFG_BASE_ADDRESS_HIGH_V_SHIFT 0 +#define TPC0_CFG_CFG_BASE_ADDRESS_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_CFG_SUBTRACT_VALUE */ +#define TPC0_CFG_CFG_SUBTRACT_VALUE_V_SHIFT 0 +#define TPC0_CFG_CFG_SUBTRACT_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_SM_BASE_ADDRESS_LOW */ +#define TPC0_CFG_SM_BASE_ADDRESS_LOW_V_SHIFT 0 +#define TPC0_CFG_SM_BASE_ADDRESS_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_SM_BASE_ADDRESS_HIGH */ +#define TPC0_CFG_SM_BASE_ADDRESS_HIGH_V_SHIFT 0 +#define TPC0_CFG_SM_BASE_ADDRESS_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_TPC_CMD */ +#define TPC0_CFG_TPC_CMD_ICACHE_INVALIDATE_SHIFT 0 +#define TPC0_CFG_TPC_CMD_ICACHE_INVALIDATE_MASK 0x1 +#define TPC0_CFG_TPC_CMD_DCACHE_INVALIDATE_SHIFT 1 +#define TPC0_CFG_TPC_CMD_DCACHE_INVALIDATE_MASK 0x2 +#define TPC0_CFG_TPC_CMD_LCACHE_INVALIDATE_SHIFT 2 +#define TPC0_CFG_TPC_CMD_LCACHE_INVALIDATE_MASK 0x4 +#define TPC0_CFG_TPC_CMD_TCACHE_INVALIDATE_SHIFT 3 +#define TPC0_CFG_TPC_CMD_TCACHE_INVALIDATE_MASK 0x8 +#define TPC0_CFG_TPC_CMD_ICACHE_PREFETCH_64KB_SHIFT 4 +#define TPC0_CFG_TPC_CMD_ICACHE_PREFETCH_64KB_MASK 0x10 +#define TPC0_CFG_TPC_CMD_ICACHE_PREFETCH_32KB_SHIFT 5 +#define TPC0_CFG_TPC_CMD_ICACHE_PREFETCH_32KB_MASK 0x20 +#define TPC0_CFG_TPC_CMD_QMAN_STOP_SHIFT 6 +#define TPC0_CFG_TPC_CMD_QMAN_STOP_MASK 0x40 + +/* TPC0_CFG_TPC_EXECUTE */ +#define TPC0_CFG_TPC_EXECUTE_V_SHIFT 0 +#define TPC0_CFG_TPC_EXECUTE_V_MASK 0x1 + +/* TPC0_CFG_TPC_STALL */ +#define TPC0_CFG_TPC_STALL_V_SHIFT 0 +#define TPC0_CFG_TPC_STALL_V_MASK 0x1 + +/* TPC0_CFG_ICACHE_BASE_ADDERESS_LOW */ +#define TPC0_CFG_ICACHE_BASE_ADDERESS_LOW_V_SHIFT 0 +#define TPC0_CFG_ICACHE_BASE_ADDERESS_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_ICACHE_BASE_ADDERESS_HIGH */ +#define TPC0_CFG_ICACHE_BASE_ADDERESS_HIGH_V_SHIFT 0 +#define TPC0_CFG_ICACHE_BASE_ADDERESS_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_MSS_CONFIG */ +#define TPC0_CFG_MSS_CONFIG_AWCACHE_SHIFT 0 +#define TPC0_CFG_MSS_CONFIG_AWCACHE_MASK 0xF +#define TPC0_CFG_MSS_CONFIG_ARCACHE_SHIFT 4 +#define TPC0_CFG_MSS_CONFIG_ARCACHE_MASK 0xF0 +#define TPC0_CFG_MSS_CONFIG_ICACHE_FETCH_LINE_NUM_SHIFT 8 +#define TPC0_CFG_MSS_CONFIG_ICACHE_FETCH_LINE_NUM_MASK 0x300 +#define TPC0_CFG_MSS_CONFIG_EXPOSED_PIPE_DIS_SHIFT 10 +#define TPC0_CFG_MSS_CONFIG_EXPOSED_PIPE_DIS_MASK 0x400 + +/* TPC0_CFG_TPC_INTR_CAUSE */ +#define TPC0_CFG_TPC_INTR_CAUSE_CAUSE_SHIFT 0 +#define TPC0_CFG_TPC_INTR_CAUSE_CAUSE_MASK 0xFFFFFFFF + +/* TPC0_CFG_TPC_INTR_MASK */ +#define TPC0_CFG_TPC_INTR_MASK_MASK_SHIFT 0 +#define TPC0_CFG_TPC_INTR_MASK_MASK_MASK 0xFFFFFFFF + +/* TPC0_CFG_TSB_CONFIG */ +#define TPC0_CFG_TSB_CONFIG_TSB_AGU_MAX_CREDIT_SHIFT 0 +#define TPC0_CFG_TSB_CONFIG_TSB_AGU_MAX_CREDIT_MASK 0x1F +#define TPC0_CFG_TSB_CONFIG_TSB_EU_MAX_CREDIT_SHIFT 5 +#define TPC0_CFG_TSB_CONFIG_TSB_EU_MAX_CREDIT_MASK 0x3E0 +#define TPC0_CFG_TSB_CONFIG_MAX_OUTSTANDING_SHIFT 10 +#define TPC0_CFG_TSB_CONFIG_MAX_OUTSTANDING_MASK 0xFFC00 +#define TPC0_CFG_TSB_CONFIG_MAX_SIZE_SHIFT 20 +#define TPC0_CFG_TSB_CONFIG_MAX_SIZE_MASK 0x3FF00000 + +/* TPC0_CFG_QM_TENSOR_0_BASE_ADDR_LOW */ +#define TPC0_CFG_QM_TENSOR_0_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_BASE_ADDR_HIGH */ +#define TPC0_CFG_QM_TENSOR_0_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_PADDING_VALUE */ +#define TPC0_CFG_QM_TENSOR_0_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG */ +#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_DATA_TYPE_MASK 0x3 +#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 + +/* TPC0_CFG_QM_TENSOR_0_DIM_0_SIZE */ +#define TPC0_CFG_QM_TENSOR_0_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_DIM_0_STRIDE */ +#define TPC0_CFG_QM_TENSOR_0_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_DIM_1_SIZE */ +#define TPC0_CFG_QM_TENSOR_0_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_DIM_1_STRIDE */ +#define TPC0_CFG_QM_TENSOR_0_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_DIM_2_SIZE */ +#define TPC0_CFG_QM_TENSOR_0_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_DIM_2_STRIDE */ +#define TPC0_CFG_QM_TENSOR_0_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_DIM_3_SIZE */ +#define TPC0_CFG_QM_TENSOR_0_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_DIM_3_STRIDE */ +#define TPC0_CFG_QM_TENSOR_0_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_DIM_4_SIZE */ +#define TPC0_CFG_QM_TENSOR_0_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_DIM_4_STRIDE */ +#define TPC0_CFG_QM_TENSOR_0_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_BASE_ADDR_LOW */ +#define TPC0_CFG_QM_TENSOR_1_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_BASE_ADDR_HIGH */ +#define TPC0_CFG_QM_TENSOR_1_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_PADDING_VALUE */ +#define TPC0_CFG_QM_TENSOR_1_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG */ +#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_DATA_TYPE_MASK 0x3 +#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 + +/* TPC0_CFG_QM_TENSOR_1_DIM_0_SIZE */ +#define TPC0_CFG_QM_TENSOR_1_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_DIM_0_STRIDE */ +#define TPC0_CFG_QM_TENSOR_1_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_DIM_1_SIZE */ +#define TPC0_CFG_QM_TENSOR_1_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_DIM_1_STRIDE */ +#define TPC0_CFG_QM_TENSOR_1_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_DIM_2_SIZE */ +#define TPC0_CFG_QM_TENSOR_1_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_DIM_2_STRIDE */ +#define TPC0_CFG_QM_TENSOR_1_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_DIM_3_SIZE */ +#define TPC0_CFG_QM_TENSOR_1_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_DIM_3_STRIDE */ +#define TPC0_CFG_QM_TENSOR_1_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_DIM_4_SIZE */ +#define TPC0_CFG_QM_TENSOR_1_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_DIM_4_STRIDE */ +#define TPC0_CFG_QM_TENSOR_1_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_BASE_ADDR_LOW */ +#define TPC0_CFG_QM_TENSOR_2_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_BASE_ADDR_HIGH */ +#define TPC0_CFG_QM_TENSOR_2_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_PADDING_VALUE */ +#define TPC0_CFG_QM_TENSOR_2_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG */ +#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_DATA_TYPE_MASK 0x3 +#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 + +/* TPC0_CFG_QM_TENSOR_2_DIM_0_SIZE */ +#define TPC0_CFG_QM_TENSOR_2_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_DIM_0_STRIDE */ +#define TPC0_CFG_QM_TENSOR_2_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_DIM_1_SIZE */ +#define TPC0_CFG_QM_TENSOR_2_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_DIM_1_STRIDE */ +#define TPC0_CFG_QM_TENSOR_2_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_DIM_2_SIZE */ +#define TPC0_CFG_QM_TENSOR_2_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_DIM_2_STRIDE */ +#define TPC0_CFG_QM_TENSOR_2_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_DIM_3_SIZE */ +#define TPC0_CFG_QM_TENSOR_2_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_DIM_3_STRIDE */ +#define TPC0_CFG_QM_TENSOR_2_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_DIM_4_SIZE */ +#define TPC0_CFG_QM_TENSOR_2_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_DIM_4_STRIDE */ +#define TPC0_CFG_QM_TENSOR_2_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_BASE_ADDR_LOW */ +#define TPC0_CFG_QM_TENSOR_3_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_BASE_ADDR_HIGH */ +#define TPC0_CFG_QM_TENSOR_3_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_PADDING_VALUE */ +#define TPC0_CFG_QM_TENSOR_3_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG */ +#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_DATA_TYPE_MASK 0x3 +#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 + +/* TPC0_CFG_QM_TENSOR_3_DIM_0_SIZE */ +#define TPC0_CFG_QM_TENSOR_3_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_DIM_0_STRIDE */ +#define TPC0_CFG_QM_TENSOR_3_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_DIM_1_SIZE */ +#define TPC0_CFG_QM_TENSOR_3_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_DIM_1_STRIDE */ +#define TPC0_CFG_QM_TENSOR_3_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_DIM_2_SIZE */ +#define TPC0_CFG_QM_TENSOR_3_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_DIM_2_STRIDE */ +#define TPC0_CFG_QM_TENSOR_3_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_DIM_3_SIZE */ +#define TPC0_CFG_QM_TENSOR_3_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_DIM_3_STRIDE */ +#define TPC0_CFG_QM_TENSOR_3_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_DIM_4_SIZE */ +#define TPC0_CFG_QM_TENSOR_3_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_DIM_4_STRIDE */ +#define TPC0_CFG_QM_TENSOR_3_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_BASE_ADDR_LOW */ +#define TPC0_CFG_QM_TENSOR_4_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_BASE_ADDR_HIGH */ +#define TPC0_CFG_QM_TENSOR_4_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_PADDING_VALUE */ +#define TPC0_CFG_QM_TENSOR_4_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG */ +#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_DATA_TYPE_MASK 0x3 +#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 + +/* TPC0_CFG_QM_TENSOR_4_DIM_0_SIZE */ +#define TPC0_CFG_QM_TENSOR_4_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_DIM_0_STRIDE */ +#define TPC0_CFG_QM_TENSOR_4_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_DIM_1_SIZE */ +#define TPC0_CFG_QM_TENSOR_4_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_DIM_1_STRIDE */ +#define TPC0_CFG_QM_TENSOR_4_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_DIM_2_SIZE */ +#define TPC0_CFG_QM_TENSOR_4_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_DIM_2_STRIDE */ +#define TPC0_CFG_QM_TENSOR_4_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_DIM_3_SIZE */ +#define TPC0_CFG_QM_TENSOR_4_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_DIM_3_STRIDE */ +#define TPC0_CFG_QM_TENSOR_4_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_DIM_4_SIZE */ +#define TPC0_CFG_QM_TENSOR_4_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_DIM_4_STRIDE */ +#define TPC0_CFG_QM_TENSOR_4_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_BASE_ADDR_LOW */ +#define TPC0_CFG_QM_TENSOR_5_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_BASE_ADDR_HIGH */ +#define TPC0_CFG_QM_TENSOR_5_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_PADDING_VALUE */ +#define TPC0_CFG_QM_TENSOR_5_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG */ +#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_DATA_TYPE_MASK 0x3 +#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 + +/* TPC0_CFG_QM_TENSOR_5_DIM_0_SIZE */ +#define TPC0_CFG_QM_TENSOR_5_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_DIM_0_STRIDE */ +#define TPC0_CFG_QM_TENSOR_5_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_DIM_1_SIZE */ +#define TPC0_CFG_QM_TENSOR_5_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_DIM_1_STRIDE */ +#define TPC0_CFG_QM_TENSOR_5_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_DIM_2_SIZE */ +#define TPC0_CFG_QM_TENSOR_5_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_DIM_2_STRIDE */ +#define TPC0_CFG_QM_TENSOR_5_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_DIM_3_SIZE */ +#define TPC0_CFG_QM_TENSOR_5_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_DIM_3_STRIDE */ +#define TPC0_CFG_QM_TENSOR_5_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_DIM_4_SIZE */ +#define TPC0_CFG_QM_TENSOR_5_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_DIM_4_STRIDE */ +#define TPC0_CFG_QM_TENSOR_5_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_BASE_ADDR_LOW */ +#define TPC0_CFG_QM_TENSOR_6_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_BASE_ADDR_HIGH */ +#define TPC0_CFG_QM_TENSOR_6_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_PADDING_VALUE */ +#define TPC0_CFG_QM_TENSOR_6_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG */ +#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_DATA_TYPE_MASK 0x3 +#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 + +/* TPC0_CFG_QM_TENSOR_6_DIM_0_SIZE */ +#define TPC0_CFG_QM_TENSOR_6_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_DIM_0_STRIDE */ +#define TPC0_CFG_QM_TENSOR_6_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_DIM_1_SIZE */ +#define TPC0_CFG_QM_TENSOR_6_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_DIM_1_STRIDE */ +#define TPC0_CFG_QM_TENSOR_6_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_DIM_2_SIZE */ +#define TPC0_CFG_QM_TENSOR_6_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_DIM_2_STRIDE */ +#define TPC0_CFG_QM_TENSOR_6_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_DIM_3_SIZE */ +#define TPC0_CFG_QM_TENSOR_6_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_DIM_3_STRIDE */ +#define TPC0_CFG_QM_TENSOR_6_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_DIM_4_SIZE */ +#define TPC0_CFG_QM_TENSOR_6_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_DIM_4_STRIDE */ +#define TPC0_CFG_QM_TENSOR_6_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_BASE_ADDR_LOW */ +#define TPC0_CFG_QM_TENSOR_7_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_BASE_ADDR_HIGH */ +#define TPC0_CFG_QM_TENSOR_7_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_PADDING_VALUE */ +#define TPC0_CFG_QM_TENSOR_7_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG */ +#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_DATA_TYPE_MASK 0x3 +#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 + +/* TPC0_CFG_QM_TENSOR_7_DIM_0_SIZE */ +#define TPC0_CFG_QM_TENSOR_7_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_DIM_0_STRIDE */ +#define TPC0_CFG_QM_TENSOR_7_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_DIM_1_SIZE */ +#define TPC0_CFG_QM_TENSOR_7_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_DIM_1_STRIDE */ +#define TPC0_CFG_QM_TENSOR_7_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_DIM_2_SIZE */ +#define TPC0_CFG_QM_TENSOR_7_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_DIM_2_STRIDE */ +#define TPC0_CFG_QM_TENSOR_7_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_DIM_3_SIZE */ +#define TPC0_CFG_QM_TENSOR_7_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_DIM_3_STRIDE */ +#define TPC0_CFG_QM_TENSOR_7_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_DIM_4_SIZE */ +#define TPC0_CFG_QM_TENSOR_7_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_DIM_4_STRIDE */ +#define TPC0_CFG_QM_TENSOR_7_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_KERNEL_BASE_ADDRESS_LOW */ +#define TPC0_CFG_QM_KERNEL_BASE_ADDRESS_LOW_V_SHIFT 0 +#define TPC0_CFG_QM_KERNEL_BASE_ADDRESS_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_KERNEL_BASE_ADDRESS_HIGH */ +#define TPC0_CFG_QM_KERNEL_BASE_ADDRESS_HIGH_V_SHIFT 0 +#define TPC0_CFG_QM_KERNEL_BASE_ADDRESS_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TID_BASE_DIM_0 */ +#define TPC0_CFG_QM_TID_BASE_DIM_0_V_SHIFT 0 +#define TPC0_CFG_QM_TID_BASE_DIM_0_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TID_SIZE_DIM_0 */ +#define TPC0_CFG_QM_TID_SIZE_DIM_0_V_SHIFT 0 +#define TPC0_CFG_QM_TID_SIZE_DIM_0_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TID_BASE_DIM_1 */ +#define TPC0_CFG_QM_TID_BASE_DIM_1_V_SHIFT 0 +#define TPC0_CFG_QM_TID_BASE_DIM_1_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TID_SIZE_DIM_1 */ +#define TPC0_CFG_QM_TID_SIZE_DIM_1_V_SHIFT 0 +#define TPC0_CFG_QM_TID_SIZE_DIM_1_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TID_BASE_DIM_2 */ +#define TPC0_CFG_QM_TID_BASE_DIM_2_V_SHIFT 0 +#define TPC0_CFG_QM_TID_BASE_DIM_2_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TID_SIZE_DIM_2 */ +#define TPC0_CFG_QM_TID_SIZE_DIM_2_V_SHIFT 0 +#define TPC0_CFG_QM_TID_SIZE_DIM_2_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TID_BASE_DIM_3 */ +#define TPC0_CFG_QM_TID_BASE_DIM_3_V_SHIFT 0 +#define TPC0_CFG_QM_TID_BASE_DIM_3_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TID_SIZE_DIM_3 */ +#define TPC0_CFG_QM_TID_SIZE_DIM_3_V_SHIFT 0 +#define TPC0_CFG_QM_TID_SIZE_DIM_3_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TID_BASE_DIM_4 */ +#define TPC0_CFG_QM_TID_BASE_DIM_4_V_SHIFT 0 +#define TPC0_CFG_QM_TID_BASE_DIM_4_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TID_SIZE_DIM_4 */ +#define TPC0_CFG_QM_TID_SIZE_DIM_4_V_SHIFT 0 +#define TPC0_CFG_QM_TID_SIZE_DIM_4_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_SRF */ +#define TPC0_CFG_QM_SRF_V_SHIFT 0 +#define TPC0_CFG_QM_SRF_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_KERNEL_CONFIG */ +#define TPC0_CFG_QM_KERNEL_CONFIG_SMALL_VLM_SHIFT 0 +#define TPC0_CFG_QM_KERNEL_CONFIG_SMALL_VLM_MASK 0x1 +#define TPC0_CFG_QM_KERNEL_CONFIG_ASO_EVICT_L0_SHIFT 1 +#define TPC0_CFG_QM_KERNEL_CONFIG_ASO_EVICT_L0_MASK 0x2 +#define TPC0_CFG_QM_KERNEL_CONFIG_NUM_VALID_SRFS_SHIFT 8 +#define TPC0_CFG_QM_KERNEL_CONFIG_NUM_VALID_SRFS_MASK 0x3F00 + +/* TPC0_CFG_QM_SYNC_OBJECT_MESSAGE */ +#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_SHIFT 0 +#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_MASK 0xFFFF +#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_SHIFT 16 +#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_MASK 0x7FFF0000 +#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_SO_OPERATION_SHIFT 31 +#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_SO_OPERATION_MASK 0x80000000 + +/* TPC0_CFG_ARUSER */ +#define TPC0_CFG_ARUSER_ASID_SHIFT 0 +#define TPC0_CFG_ARUSER_ASID_MASK 0x3FF +#define TPC0_CFG_ARUSER_MMBP_SHIFT 10 +#define TPC0_CFG_ARUSER_MMBP_MASK 0x400 +#define TPC0_CFG_ARUSER_V_SHIFT 11 +#define TPC0_CFG_ARUSER_V_MASK 0xFFFFF800 + +/* TPC0_CFG_AWUSER */ +#define TPC0_CFG_AWUSER_ASID_SHIFT 0 +#define TPC0_CFG_AWUSER_ASID_MASK 0x3FF +#define TPC0_CFG_AWUSER_MMBP_SHIFT 10 +#define TPC0_CFG_AWUSER_MMBP_MASK 0x400 +#define TPC0_CFG_AWUSER_V_SHIFT 11 +#define TPC0_CFG_AWUSER_V_MASK 0xFFFFF800 + +/* TPC0_CFG_FUNC_MBIST_CNTRL */ +#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_START_SHIFT 0 +#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_START_MASK 0x1 +#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_DONE_SHIFT 1 +#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_DONE_MASK 0x2 +#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_ACTIVE_SHIFT 2 +#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_ACTIVE_MASK 0x4 +#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_FAILED_SHIFT 16 +#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_FAILED_MASK 0x3FF0000 + +/* TPC0_CFG_FUNC_MBIST_PAT */ +#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN0_EVEN_SHIFT 0 +#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN0_EVEN_MASK 0x3 +#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN0_ODD_SHIFT 2 +#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN0_ODD_MASK 0xC +#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN1_EVEN_SHIFT 4 +#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN1_EVEN_MASK 0x30 +#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN1_ODD_SHIFT 6 +#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN1_ODD_MASK 0xC0 +#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN2_EVEN_SHIFT 8 +#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN2_EVEN_MASK 0x300 +#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN2_ODD_SHIFT 10 +#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN2_ODD_MASK 0xC00 + +/* TPC0_CFG_FUNC_MBIST_MEM */ +#define TPC0_CFG_FUNC_MBIST_MEM_MAX_ADDR_SHIFT 0 +#define TPC0_CFG_FUNC_MBIST_MEM_MAX_ADDR_MASK 0x7FF +#define TPC0_CFG_FUNC_MBIST_MEM_PATTERN_EN_SHIFT 12 +#define TPC0_CFG_FUNC_MBIST_MEM_PATTERN_EN_MASK 0x7000 +#define TPC0_CFG_FUNC_MBIST_MEM_LAST_FAILED_ADDR_SHIFT 16 +#define TPC0_CFG_FUNC_MBIST_MEM_LAST_FAILED_ADDR_MASK 0x7FF0000 +#define TPC0_CFG_FUNC_MBIST_MEM_LAST_FAILED_PATTERN_SHIFT 28 +#define TPC0_CFG_FUNC_MBIST_MEM_LAST_FAILED_PATTERN_MASK 0x70000000 + +#endif /* ASIC_REG_TPC0_CFG_MASKS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h new file mode 100644 index 000000000..7d71c4b73 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h @@ -0,0 +1,886 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC0_CFG_REGS_H_ +#define ASIC_REG_TPC0_CFG_REGS_H_ + +/* + ***************************************** + * TPC0_CFG (Prototype: TPC) + ***************************************** + */ + +#define mmTPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xE06400 + +#define mmTPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xE06404 + +#define mmTPC0_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xE06408 + +#define mmTPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xE0640C + +#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xE06410 + +#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xE06414 + +#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET 0xE06418 + +#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xE0641C + +#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xE06420 + +#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET 0xE06424 + +#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xE06428 + +#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xE0642C + +#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET 0xE06430 + +#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xE06434 + +#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xE06438 + +#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET 0xE0643C + +#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xE06440 + +#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xE06444 + +#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET 0xE06448 + +#define mmTPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xE0644C + +#define mmTPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xE06450 + +#define mmTPC0_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xE06454 + +#define mmTPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xE06458 + +#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xE0645C + +#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xE06460 + +#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET 0xE06464 + +#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xE06468 + +#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xE0646C + +#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET 0xE06470 + +#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xE06474 + +#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xE06478 + +#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET 0xE0647C + +#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xE06480 + +#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xE06484 + +#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET 0xE06488 + +#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xE0648C + +#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xE06490 + +#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET 0xE06494 + +#define mmTPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xE06498 + +#define mmTPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xE0649C + +#define mmTPC0_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xE064A0 + +#define mmTPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xE064A4 + +#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xE064A8 + +#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xE064AC + +#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET 0xE064B0 + +#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xE064B4 + +#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xE064B8 + +#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET 0xE064BC + +#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xE064C0 + +#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xE064C4 + +#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET 0xE064C8 + +#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xE064CC + +#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xE064D0 + +#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET 0xE064D4 + +#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xE064D8 + +#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xE064DC + +#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET 0xE064E0 + +#define mmTPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xE064E4 + +#define mmTPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xE064E8 + +#define mmTPC0_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xE064EC + +#define mmTPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xE064F0 + +#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xE064F4 + +#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xE064F8 + +#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET 0xE064FC + +#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xE06500 + +#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xE06504 + +#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET 0xE06508 + +#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xE0650C + +#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xE06510 + +#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET 0xE06514 + +#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xE06518 + +#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xE0651C + +#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET 0xE06520 + +#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xE06524 + +#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xE06528 + +#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET 0xE0652C + +#define mmTPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xE06530 + +#define mmTPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xE06534 + +#define mmTPC0_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xE06538 + +#define mmTPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xE0653C + +#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xE06540 + +#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xE06544 + +#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET 0xE06548 + +#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xE0654C + +#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xE06550 + +#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET 0xE06554 + +#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xE06558 + +#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xE0655C + +#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET 0xE06560 + +#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xE06564 + +#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xE06568 + +#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET 0xE0656C + +#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xE06570 + +#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xE06574 + +#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET 0xE06578 + +#define mmTPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xE0657C + +#define mmTPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xE06580 + +#define mmTPC0_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xE06584 + +#define mmTPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xE06588 + +#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xE0658C + +#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xE06590 + +#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET 0xE06594 + +#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xE06598 + +#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xE0659C + +#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET 0xE065A0 + +#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xE065A4 + +#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xE065A8 + +#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET 0xE065AC + +#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xE065B0 + +#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xE065B4 + +#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET 0xE065B8 + +#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xE065BC + +#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xE065C0 + +#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET 0xE065C4 + +#define mmTPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xE065C8 + +#define mmTPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xE065CC + +#define mmTPC0_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xE065D0 + +#define mmTPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xE065D4 + +#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xE065D8 + +#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xE065DC + +#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET 0xE065E0 + +#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xE065E4 + +#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xE065E8 + +#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET 0xE065EC + +#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xE065F0 + +#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xE065F4 + +#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET 0xE065F8 + +#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xE065FC + +#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xE06600 + +#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET 0xE06604 + +#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xE06608 + +#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xE0660C + +#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET 0xE06610 + +#define mmTPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xE06614 + +#define mmTPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xE06618 + +#define mmTPC0_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xE0661C + +#define mmTPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xE06620 + +#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xE06624 + +#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xE06628 + +#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET 0xE0662C + +#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xE06630 + +#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xE06634 + +#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET 0xE06638 + +#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xE0663C + +#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xE06640 + +#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET 0xE06644 + +#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xE06648 + +#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xE0664C + +#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET 0xE06650 + +#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xE06654 + +#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xE06658 + +#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET 0xE0665C + +#define mmTPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xE06660 + +#define mmTPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xE06664 + +#define mmTPC0_CFG_KERNEL_TID_BASE_DIM_0 0xE06668 + +#define mmTPC0_CFG_KERNEL_TID_SIZE_DIM_0 0xE0666C + +#define mmTPC0_CFG_KERNEL_TID_BASE_DIM_1 0xE06670 + +#define mmTPC0_CFG_KERNEL_TID_SIZE_DIM_1 0xE06674 + +#define mmTPC0_CFG_KERNEL_TID_BASE_DIM_2 0xE06678 + +#define mmTPC0_CFG_KERNEL_TID_SIZE_DIM_2 0xE0667C + +#define mmTPC0_CFG_KERNEL_TID_BASE_DIM_3 0xE06680 + +#define mmTPC0_CFG_KERNEL_TID_SIZE_DIM_3 0xE06684 + +#define mmTPC0_CFG_KERNEL_TID_BASE_DIM_4 0xE06688 + +#define mmTPC0_CFG_KERNEL_TID_SIZE_DIM_4 0xE0668C + +#define mmTPC0_CFG_KERNEL_SRF_0 0xE06690 + +#define mmTPC0_CFG_KERNEL_SRF_1 0xE06694 + +#define mmTPC0_CFG_KERNEL_SRF_2 0xE06698 + +#define mmTPC0_CFG_KERNEL_SRF_3 0xE0669C + +#define mmTPC0_CFG_KERNEL_SRF_4 0xE066A0 + +#define mmTPC0_CFG_KERNEL_SRF_5 0xE066A4 + +#define mmTPC0_CFG_KERNEL_SRF_6 0xE066A8 + +#define mmTPC0_CFG_KERNEL_SRF_7 0xE066AC + +#define mmTPC0_CFG_KERNEL_SRF_8 0xE066B0 + +#define mmTPC0_CFG_KERNEL_SRF_9 0xE066B4 + +#define mmTPC0_CFG_KERNEL_SRF_10 0xE066B8 + +#define mmTPC0_CFG_KERNEL_SRF_11 0xE066BC + +#define mmTPC0_CFG_KERNEL_SRF_12 0xE066C0 + +#define mmTPC0_CFG_KERNEL_SRF_13 0xE066C4 + +#define mmTPC0_CFG_KERNEL_SRF_14 0xE066C8 + +#define mmTPC0_CFG_KERNEL_SRF_15 0xE066CC + +#define mmTPC0_CFG_KERNEL_SRF_16 0xE066D0 + +#define mmTPC0_CFG_KERNEL_SRF_17 0xE066D4 + +#define mmTPC0_CFG_KERNEL_SRF_18 0xE066D8 + +#define mmTPC0_CFG_KERNEL_SRF_19 0xE066DC + +#define mmTPC0_CFG_KERNEL_SRF_20 0xE066E0 + +#define mmTPC0_CFG_KERNEL_SRF_21 0xE066E4 + +#define mmTPC0_CFG_KERNEL_SRF_22 0xE066E8 + +#define mmTPC0_CFG_KERNEL_SRF_23 0xE066EC + +#define mmTPC0_CFG_KERNEL_SRF_24 0xE066F0 + +#define mmTPC0_CFG_KERNEL_SRF_25 0xE066F4 + +#define mmTPC0_CFG_KERNEL_SRF_26 0xE066F8 + +#define mmTPC0_CFG_KERNEL_SRF_27 0xE066FC + +#define mmTPC0_CFG_KERNEL_SRF_28 0xE06700 + +#define mmTPC0_CFG_KERNEL_SRF_29 0xE06704 + +#define mmTPC0_CFG_KERNEL_SRF_30 0xE06708 + +#define mmTPC0_CFG_KERNEL_SRF_31 0xE0670C + +#define mmTPC0_CFG_KERNEL_KERNEL_CONFIG 0xE06710 + +#define mmTPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xE06714 + +#define mmTPC0_CFG_RESERVED_DESC_END 0xE06738 + +#define mmTPC0_CFG_ROUND_CSR 0xE067FC + +#define mmTPC0_CFG_TBUF_BASE_ADDR_LOW 0xE06800 + +#define mmTPC0_CFG_TBUF_BASE_ADDR_HIGH 0xE06804 + +#define mmTPC0_CFG_SEMAPHORE 0xE06808 + +#define mmTPC0_CFG_VFLAGS 0xE0680C + +#define mmTPC0_CFG_SFLAGS 0xE06810 + +#define mmTPC0_CFG_LFSR_POLYNOM 0xE06818 + +#define mmTPC0_CFG_STATUS 0xE0681C + +#define mmTPC0_CFG_CFG_BASE_ADDRESS_HIGH 0xE06820 + +#define mmTPC0_CFG_CFG_SUBTRACT_VALUE 0xE06824 + +#define mmTPC0_CFG_SM_BASE_ADDRESS_LOW 0xE06828 + +#define mmTPC0_CFG_SM_BASE_ADDRESS_HIGH 0xE0682C + +#define mmTPC0_CFG_TPC_CMD 0xE06830 + +#define mmTPC0_CFG_TPC_EXECUTE 0xE06838 + +#define mmTPC0_CFG_TPC_STALL 0xE0683C + +#define mmTPC0_CFG_ICACHE_BASE_ADDERESS_LOW 0xE06840 + +#define mmTPC0_CFG_ICACHE_BASE_ADDERESS_HIGH 0xE06844 + +#define mmTPC0_CFG_MSS_CONFIG 0xE06854 + +#define mmTPC0_CFG_TPC_INTR_CAUSE 0xE06858 + +#define mmTPC0_CFG_TPC_INTR_MASK 0xE0685C + +#define mmTPC0_CFG_TSB_CONFIG 0xE06860 + +#define mmTPC0_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xE06A00 + +#define mmTPC0_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xE06A04 + +#define mmTPC0_CFG_QM_TENSOR_0_PADDING_VALUE 0xE06A08 + +#define mmTPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xE06A0C + +#define mmTPC0_CFG_QM_TENSOR_0_DIM_0_SIZE 0xE06A10 + +#define mmTPC0_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xE06A14 + +#define mmTPC0_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET 0xE06A18 + +#define mmTPC0_CFG_QM_TENSOR_0_DIM_1_SIZE 0xE06A1C + +#define mmTPC0_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xE06A20 + +#define mmTPC0_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET 0xE06A24 + +#define mmTPC0_CFG_QM_TENSOR_0_DIM_2_SIZE 0xE06A28 + +#define mmTPC0_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xE06A2C + +#define mmTPC0_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET 0xE06A30 + +#define mmTPC0_CFG_QM_TENSOR_0_DIM_3_SIZE 0xE06A34 + +#define mmTPC0_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xE06A38 + +#define mmTPC0_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET 0xE06A3C + +#define mmTPC0_CFG_QM_TENSOR_0_DIM_4_SIZE 0xE06A40 + +#define mmTPC0_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xE06A44 + +#define mmTPC0_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET 0xE06A48 + +#define mmTPC0_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xE06A4C + +#define mmTPC0_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xE06A50 + +#define mmTPC0_CFG_QM_TENSOR_1_PADDING_VALUE 0xE06A54 + +#define mmTPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xE06A58 + +#define mmTPC0_CFG_QM_TENSOR_1_DIM_0_SIZE 0xE06A5C + +#define mmTPC0_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xE06A60 + +#define mmTPC0_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET 0xE06A64 + +#define mmTPC0_CFG_QM_TENSOR_1_DIM_1_SIZE 0xE06A68 + +#define mmTPC0_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xE06A6C + +#define mmTPC0_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET 0xE06A70 + +#define mmTPC0_CFG_QM_TENSOR_1_DIM_2_SIZE 0xE06A74 + +#define mmTPC0_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xE06A78 + +#define mmTPC0_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET 0xE06A7C + +#define mmTPC0_CFG_QM_TENSOR_1_DIM_3_SIZE 0xE06A80 + +#define mmTPC0_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xE06A84 + +#define mmTPC0_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET 0xE06A88 + +#define mmTPC0_CFG_QM_TENSOR_1_DIM_4_SIZE 0xE06A8C + +#define mmTPC0_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xE06A90 + +#define mmTPC0_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET 0xE06A94 + +#define mmTPC0_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xE06A98 + +#define mmTPC0_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xE06A9C + +#define mmTPC0_CFG_QM_TENSOR_2_PADDING_VALUE 0xE06AA0 + +#define mmTPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xE06AA4 + +#define mmTPC0_CFG_QM_TENSOR_2_DIM_0_SIZE 0xE06AA8 + +#define mmTPC0_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xE06AAC + +#define mmTPC0_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET 0xE06AB0 + +#define mmTPC0_CFG_QM_TENSOR_2_DIM_1_SIZE 0xE06AB4 + +#define mmTPC0_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xE06AB8 + +#define mmTPC0_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET 0xE06ABC + +#define mmTPC0_CFG_QM_TENSOR_2_DIM_2_SIZE 0xE06AC0 + +#define mmTPC0_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xE06AC4 + +#define mmTPC0_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET 0xE06AC8 + +#define mmTPC0_CFG_QM_TENSOR_2_DIM_3_SIZE 0xE06ACC + +#define mmTPC0_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xE06AD0 + +#define mmTPC0_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET 0xE06AD4 + +#define mmTPC0_CFG_QM_TENSOR_2_DIM_4_SIZE 0xE06AD8 + +#define mmTPC0_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xE06ADC + +#define mmTPC0_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET 0xE06AE0 + +#define mmTPC0_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xE06AE4 + +#define mmTPC0_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xE06AE8 + +#define mmTPC0_CFG_QM_TENSOR_3_PADDING_VALUE 0xE06AEC + +#define mmTPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xE06AF0 + +#define mmTPC0_CFG_QM_TENSOR_3_DIM_0_SIZE 0xE06AF4 + +#define mmTPC0_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xE06AF8 + +#define mmTPC0_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET 0xE06AFC + +#define mmTPC0_CFG_QM_TENSOR_3_DIM_1_SIZE 0xE06B00 + +#define mmTPC0_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xE06B04 + +#define mmTPC0_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET 0xE06B08 + +#define mmTPC0_CFG_QM_TENSOR_3_DIM_2_SIZE 0xE06B0C + +#define mmTPC0_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xE06B10 + +#define mmTPC0_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET 0xE06B14 + +#define mmTPC0_CFG_QM_TENSOR_3_DIM_3_SIZE 0xE06B18 + +#define mmTPC0_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xE06B1C + +#define mmTPC0_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET 0xE06B20 + +#define mmTPC0_CFG_QM_TENSOR_3_DIM_4_SIZE 0xE06B24 + +#define mmTPC0_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xE06B28 + +#define mmTPC0_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET 0xE06B2C + +#define mmTPC0_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xE06B30 + +#define mmTPC0_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xE06B34 + +#define mmTPC0_CFG_QM_TENSOR_4_PADDING_VALUE 0xE06B38 + +#define mmTPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xE06B3C + +#define mmTPC0_CFG_QM_TENSOR_4_DIM_0_SIZE 0xE06B40 + +#define mmTPC0_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xE06B44 + +#define mmTPC0_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET 0xE06B48 + +#define mmTPC0_CFG_QM_TENSOR_4_DIM_1_SIZE 0xE06B4C + +#define mmTPC0_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xE06B50 + +#define mmTPC0_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET 0xE06B54 + +#define mmTPC0_CFG_QM_TENSOR_4_DIM_2_SIZE 0xE06B58 + +#define mmTPC0_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xE06B5C + +#define mmTPC0_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET 0xE06B60 + +#define mmTPC0_CFG_QM_TENSOR_4_DIM_3_SIZE 0xE06B64 + +#define mmTPC0_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xE06B68 + +#define mmTPC0_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET 0xE06B6C + +#define mmTPC0_CFG_QM_TENSOR_4_DIM_4_SIZE 0xE06B70 + +#define mmTPC0_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xE06B74 + +#define mmTPC0_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET 0xE06B78 + +#define mmTPC0_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xE06B7C + +#define mmTPC0_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xE06B80 + +#define mmTPC0_CFG_QM_TENSOR_5_PADDING_VALUE 0xE06B84 + +#define mmTPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xE06B88 + +#define mmTPC0_CFG_QM_TENSOR_5_DIM_0_SIZE 0xE06B8C + +#define mmTPC0_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xE06B90 + +#define mmTPC0_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET 0xE06B94 + +#define mmTPC0_CFG_QM_TENSOR_5_DIM_1_SIZE 0xE06B98 + +#define mmTPC0_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xE06B9C + +#define mmTPC0_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET 0xE06BA0 + +#define mmTPC0_CFG_QM_TENSOR_5_DIM_2_SIZE 0xE06BA4 + +#define mmTPC0_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xE06BA8 + +#define mmTPC0_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET 0xE06BAC + +#define mmTPC0_CFG_QM_TENSOR_5_DIM_3_SIZE 0xE06BB0 + +#define mmTPC0_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xE06BB4 + +#define mmTPC0_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET 0xE06BB8 + +#define mmTPC0_CFG_QM_TENSOR_5_DIM_4_SIZE 0xE06BBC + +#define mmTPC0_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xE06BC0 + +#define mmTPC0_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET 0xE06BC4 + +#define mmTPC0_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xE06BC8 + +#define mmTPC0_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xE06BCC + +#define mmTPC0_CFG_QM_TENSOR_6_PADDING_VALUE 0xE06BD0 + +#define mmTPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xE06BD4 + +#define mmTPC0_CFG_QM_TENSOR_6_DIM_0_SIZE 0xE06BD8 + +#define mmTPC0_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xE06BDC + +#define mmTPC0_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET 0xE06BE0 + +#define mmTPC0_CFG_QM_TENSOR_6_DIM_1_SIZE 0xE06BE4 + +#define mmTPC0_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xE06BE8 + +#define mmTPC0_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET 0xE06BEC + +#define mmTPC0_CFG_QM_TENSOR_6_DIM_2_SIZE 0xE06BF0 + +#define mmTPC0_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xE06BF4 + +#define mmTPC0_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET 0xE06BF8 + +#define mmTPC0_CFG_QM_TENSOR_6_DIM_3_SIZE 0xE06BFC + +#define mmTPC0_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xE06C00 + +#define mmTPC0_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET 0xE06C04 + +#define mmTPC0_CFG_QM_TENSOR_6_DIM_4_SIZE 0xE06C08 + +#define mmTPC0_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xE06C0C + +#define mmTPC0_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET 0xE06C10 + +#define mmTPC0_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xE06C14 + +#define mmTPC0_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xE06C18 + +#define mmTPC0_CFG_QM_TENSOR_7_PADDING_VALUE 0xE06C1C + +#define mmTPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xE06C20 + +#define mmTPC0_CFG_QM_TENSOR_7_DIM_0_SIZE 0xE06C24 + +#define mmTPC0_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xE06C28 + +#define mmTPC0_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET 0xE06C2C + +#define mmTPC0_CFG_QM_TENSOR_7_DIM_1_SIZE 0xE06C30 + +#define mmTPC0_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xE06C34 + +#define mmTPC0_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET 0xE06C38 + +#define mmTPC0_CFG_QM_TENSOR_7_DIM_2_SIZE 0xE06C3C + +#define mmTPC0_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xE06C40 + +#define mmTPC0_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET 0xE06C44 + +#define mmTPC0_CFG_QM_TENSOR_7_DIM_3_SIZE 0xE06C48 + +#define mmTPC0_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xE06C4C + +#define mmTPC0_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET 0xE06C50 + +#define mmTPC0_CFG_QM_TENSOR_7_DIM_4_SIZE 0xE06C54 + +#define mmTPC0_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xE06C58 + +#define mmTPC0_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET 0xE06C5C + +#define mmTPC0_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xE06C60 + +#define mmTPC0_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xE06C64 + +#define mmTPC0_CFG_QM_TID_BASE_DIM_0 0xE06C68 + +#define mmTPC0_CFG_QM_TID_SIZE_DIM_0 0xE06C6C + +#define mmTPC0_CFG_QM_TID_BASE_DIM_1 0xE06C70 + +#define mmTPC0_CFG_QM_TID_SIZE_DIM_1 0xE06C74 + +#define mmTPC0_CFG_QM_TID_BASE_DIM_2 0xE06C78 + +#define mmTPC0_CFG_QM_TID_SIZE_DIM_2 0xE06C7C + +#define mmTPC0_CFG_QM_TID_BASE_DIM_3 0xE06C80 + +#define mmTPC0_CFG_QM_TID_SIZE_DIM_3 0xE06C84 + +#define mmTPC0_CFG_QM_TID_BASE_DIM_4 0xE06C88 + +#define mmTPC0_CFG_QM_TID_SIZE_DIM_4 0xE06C8C + +#define mmTPC0_CFG_QM_SRF_0 0xE06C90 + +#define mmTPC0_CFG_QM_SRF_1 0xE06C94 + +#define mmTPC0_CFG_QM_SRF_2 0xE06C98 + +#define mmTPC0_CFG_QM_SRF_3 0xE06C9C + +#define mmTPC0_CFG_QM_SRF_4 0xE06CA0 + +#define mmTPC0_CFG_QM_SRF_5 0xE06CA4 + +#define mmTPC0_CFG_QM_SRF_6 0xE06CA8 + +#define mmTPC0_CFG_QM_SRF_7 0xE06CAC + +#define mmTPC0_CFG_QM_SRF_8 0xE06CB0 + +#define mmTPC0_CFG_QM_SRF_9 0xE06CB4 + +#define mmTPC0_CFG_QM_SRF_10 0xE06CB8 + +#define mmTPC0_CFG_QM_SRF_11 0xE06CBC + +#define mmTPC0_CFG_QM_SRF_12 0xE06CC0 + +#define mmTPC0_CFG_QM_SRF_13 0xE06CC4 + +#define mmTPC0_CFG_QM_SRF_14 0xE06CC8 + +#define mmTPC0_CFG_QM_SRF_15 0xE06CCC + +#define mmTPC0_CFG_QM_SRF_16 0xE06CD0 + +#define mmTPC0_CFG_QM_SRF_17 0xE06CD4 + +#define mmTPC0_CFG_QM_SRF_18 0xE06CD8 + +#define mmTPC0_CFG_QM_SRF_19 0xE06CDC + +#define mmTPC0_CFG_QM_SRF_20 0xE06CE0 + +#define mmTPC0_CFG_QM_SRF_21 0xE06CE4 + +#define mmTPC0_CFG_QM_SRF_22 0xE06CE8 + +#define mmTPC0_CFG_QM_SRF_23 0xE06CEC + +#define mmTPC0_CFG_QM_SRF_24 0xE06CF0 + +#define mmTPC0_CFG_QM_SRF_25 0xE06CF4 + +#define mmTPC0_CFG_QM_SRF_26 0xE06CF8 + +#define mmTPC0_CFG_QM_SRF_27 0xE06CFC + +#define mmTPC0_CFG_QM_SRF_28 0xE06D00 + +#define mmTPC0_CFG_QM_SRF_29 0xE06D04 + +#define mmTPC0_CFG_QM_SRF_30 0xE06D08 + +#define mmTPC0_CFG_QM_SRF_31 0xE06D0C + +#define mmTPC0_CFG_QM_KERNEL_CONFIG 0xE06D10 + +#define mmTPC0_CFG_QM_SYNC_OBJECT_MESSAGE 0xE06D14 + +#define mmTPC0_CFG_ARUSER 0xE06D18 + +#define mmTPC0_CFG_AWUSER 0xE06D1C + +#define mmTPC0_CFG_FUNC_MBIST_CNTRL 0xE06E00 + +#define mmTPC0_CFG_FUNC_MBIST_PAT 0xE06E04 + +#define mmTPC0_CFG_FUNC_MBIST_MEM_0 0xE06E08 + +#define mmTPC0_CFG_FUNC_MBIST_MEM_1 0xE06E0C + +#define mmTPC0_CFG_FUNC_MBIST_MEM_2 0xE06E10 + +#define mmTPC0_CFG_FUNC_MBIST_MEM_3 0xE06E14 + +#define mmTPC0_CFG_FUNC_MBIST_MEM_4 0xE06E18 + +#define mmTPC0_CFG_FUNC_MBIST_MEM_5 0xE06E1C + +#define mmTPC0_CFG_FUNC_MBIST_MEM_6 0xE06E20 + +#define mmTPC0_CFG_FUNC_MBIST_MEM_7 0xE06E24 + +#define mmTPC0_CFG_FUNC_MBIST_MEM_8 0xE06E28 + +#define mmTPC0_CFG_FUNC_MBIST_MEM_9 0xE06E2C + +#endif /* ASIC_REG_TPC0_CFG_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h new file mode 100644 index 000000000..9395f2458 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h @@ -0,0 +1,372 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC0_CMDQ_MASKS_H_ +#define ASIC_REG_TPC0_CMDQ_MASKS_H_ + +/* + ***************************************** + * TPC0_CMDQ (Prototype: CMDQ) + ***************************************** + */ + +/* TPC0_CMDQ_GLBL_CFG0 */ +#define TPC0_CMDQ_GLBL_CFG0_PQF_EN_SHIFT 0 +#define TPC0_CMDQ_GLBL_CFG0_PQF_EN_MASK 0x1 +#define TPC0_CMDQ_GLBL_CFG0_CQF_EN_SHIFT 1 +#define TPC0_CMDQ_GLBL_CFG0_CQF_EN_MASK 0x2 +#define TPC0_CMDQ_GLBL_CFG0_CP_EN_SHIFT 2 +#define TPC0_CMDQ_GLBL_CFG0_CP_EN_MASK 0x4 +#define TPC0_CMDQ_GLBL_CFG0_DMA_EN_SHIFT 3 +#define TPC0_CMDQ_GLBL_CFG0_DMA_EN_MASK 0x8 + +/* TPC0_CMDQ_GLBL_CFG1 */ +#define TPC0_CMDQ_GLBL_CFG1_PQF_STOP_SHIFT 0 +#define TPC0_CMDQ_GLBL_CFG1_PQF_STOP_MASK 0x1 +#define TPC0_CMDQ_GLBL_CFG1_CQF_STOP_SHIFT 1 +#define TPC0_CMDQ_GLBL_CFG1_CQF_STOP_MASK 0x2 +#define TPC0_CMDQ_GLBL_CFG1_CP_STOP_SHIFT 2 +#define TPC0_CMDQ_GLBL_CFG1_CP_STOP_MASK 0x4 +#define TPC0_CMDQ_GLBL_CFG1_DMA_STOP_SHIFT 3 +#define TPC0_CMDQ_GLBL_CFG1_DMA_STOP_MASK 0x8 +#define TPC0_CMDQ_GLBL_CFG1_PQF_FLUSH_SHIFT 8 +#define TPC0_CMDQ_GLBL_CFG1_PQF_FLUSH_MASK 0x100 +#define TPC0_CMDQ_GLBL_CFG1_CQF_FLUSH_SHIFT 9 +#define TPC0_CMDQ_GLBL_CFG1_CQF_FLUSH_MASK 0x200 +#define TPC0_CMDQ_GLBL_CFG1_CP_FLUSH_SHIFT 10 +#define TPC0_CMDQ_GLBL_CFG1_CP_FLUSH_MASK 0x400 +#define TPC0_CMDQ_GLBL_CFG1_DMA_FLUSH_SHIFT 11 +#define TPC0_CMDQ_GLBL_CFG1_DMA_FLUSH_MASK 0x800 + +/* TPC0_CMDQ_GLBL_PROT */ +#define TPC0_CMDQ_GLBL_PROT_PQF_PROT_SHIFT 0 +#define TPC0_CMDQ_GLBL_PROT_PQF_PROT_MASK 0x1 +#define TPC0_CMDQ_GLBL_PROT_CQF_PROT_SHIFT 1 +#define TPC0_CMDQ_GLBL_PROT_CQF_PROT_MASK 0x2 +#define TPC0_CMDQ_GLBL_PROT_CP_PROT_SHIFT 2 +#define TPC0_CMDQ_GLBL_PROT_CP_PROT_MASK 0x4 +#define TPC0_CMDQ_GLBL_PROT_DMA_PROT_SHIFT 3 +#define TPC0_CMDQ_GLBL_PROT_DMA_PROT_MASK 0x8 +#define TPC0_CMDQ_GLBL_PROT_PQF_ERR_PROT_SHIFT 4 +#define TPC0_CMDQ_GLBL_PROT_PQF_ERR_PROT_MASK 0x10 +#define TPC0_CMDQ_GLBL_PROT_CQF_ERR_PROT_SHIFT 5 +#define TPC0_CMDQ_GLBL_PROT_CQF_ERR_PROT_MASK 0x20 +#define TPC0_CMDQ_GLBL_PROT_CP_ERR_PROT_SHIFT 6 +#define TPC0_CMDQ_GLBL_PROT_CP_ERR_PROT_MASK 0x40 +#define TPC0_CMDQ_GLBL_PROT_DMA_ERR_PROT_SHIFT 7 +#define TPC0_CMDQ_GLBL_PROT_DMA_ERR_PROT_MASK 0x80 + +/* TPC0_CMDQ_GLBL_ERR_CFG */ +#define TPC0_CMDQ_GLBL_ERR_CFG_PQF_ERR_INT_EN_SHIFT 0 +#define TPC0_CMDQ_GLBL_ERR_CFG_PQF_ERR_INT_EN_MASK 0x1 +#define TPC0_CMDQ_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT 1 +#define TPC0_CMDQ_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK 0x2 +#define TPC0_CMDQ_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT 2 +#define TPC0_CMDQ_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK 0x4 +#define TPC0_CMDQ_GLBL_ERR_CFG_CQF_ERR_INT_EN_SHIFT 3 +#define TPC0_CMDQ_GLBL_ERR_CFG_CQF_ERR_INT_EN_MASK 0x8 +#define TPC0_CMDQ_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT 4 +#define TPC0_CMDQ_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK 0x10 +#define TPC0_CMDQ_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT 5 +#define TPC0_CMDQ_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK 0x20 +#define TPC0_CMDQ_GLBL_ERR_CFG_CP_ERR_INT_EN_SHIFT 6 +#define TPC0_CMDQ_GLBL_ERR_CFG_CP_ERR_INT_EN_MASK 0x40 +#define TPC0_CMDQ_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT 7 +#define TPC0_CMDQ_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK 0x80 +#define TPC0_CMDQ_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT 8 +#define TPC0_CMDQ_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK 0x100 +#define TPC0_CMDQ_GLBL_ERR_CFG_DMA_ERR_INT_EN_SHIFT 9 +#define TPC0_CMDQ_GLBL_ERR_CFG_DMA_ERR_INT_EN_MASK 0x200 +#define TPC0_CMDQ_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT 10 +#define TPC0_CMDQ_GLBL_ERR_CFG_DMA_ERR_MSG_EN_MASK 0x400 +#define TPC0_CMDQ_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT 11 +#define TPC0_CMDQ_GLBL_ERR_CFG_DMA_STOP_ON_ERR_MASK 0x800 + +/* TPC0_CMDQ_GLBL_ERR_ADDR_LO */ +#define TPC0_CMDQ_GLBL_ERR_ADDR_LO_VAL_SHIFT 0 +#define TPC0_CMDQ_GLBL_ERR_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_GLBL_ERR_ADDR_HI */ +#define TPC0_CMDQ_GLBL_ERR_ADDR_HI_VAL_SHIFT 0 +#define TPC0_CMDQ_GLBL_ERR_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_GLBL_ERR_WDATA */ +#define TPC0_CMDQ_GLBL_ERR_WDATA_VAL_SHIFT 0 +#define TPC0_CMDQ_GLBL_ERR_WDATA_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_GLBL_SECURE_PROPS */ +#define TPC0_CMDQ_GLBL_SECURE_PROPS_ASID_SHIFT 0 +#define TPC0_CMDQ_GLBL_SECURE_PROPS_ASID_MASK 0x3FF +#define TPC0_CMDQ_GLBL_SECURE_PROPS_MMBP_SHIFT 10 +#define TPC0_CMDQ_GLBL_SECURE_PROPS_MMBP_MASK 0x400 + +/* TPC0_CMDQ_GLBL_NON_SECURE_PROPS */ +#define TPC0_CMDQ_GLBL_NON_SECURE_PROPS_ASID_SHIFT 0 +#define TPC0_CMDQ_GLBL_NON_SECURE_PROPS_ASID_MASK 0x3FF +#define TPC0_CMDQ_GLBL_NON_SECURE_PROPS_MMBP_SHIFT 10 +#define TPC0_CMDQ_GLBL_NON_SECURE_PROPS_MMBP_MASK 0x400 + +/* TPC0_CMDQ_GLBL_STS0 */ +#define TPC0_CMDQ_GLBL_STS0_PQF_IDLE_SHIFT 0 +#define TPC0_CMDQ_GLBL_STS0_PQF_IDLE_MASK 0x1 +#define TPC0_CMDQ_GLBL_STS0_CQF_IDLE_SHIFT 1 +#define TPC0_CMDQ_GLBL_STS0_CQF_IDLE_MASK 0x2 +#define TPC0_CMDQ_GLBL_STS0_CP_IDLE_SHIFT 2 +#define TPC0_CMDQ_GLBL_STS0_CP_IDLE_MASK 0x4 +#define TPC0_CMDQ_GLBL_STS0_DMA_IDLE_SHIFT 3 +#define TPC0_CMDQ_GLBL_STS0_DMA_IDLE_MASK 0x8 +#define TPC0_CMDQ_GLBL_STS0_PQF_IS_STOP_SHIFT 4 +#define TPC0_CMDQ_GLBL_STS0_PQF_IS_STOP_MASK 0x10 +#define TPC0_CMDQ_GLBL_STS0_CQF_IS_STOP_SHIFT 5 +#define TPC0_CMDQ_GLBL_STS0_CQF_IS_STOP_MASK 0x20 +#define TPC0_CMDQ_GLBL_STS0_CP_IS_STOP_SHIFT 6 +#define TPC0_CMDQ_GLBL_STS0_CP_IS_STOP_MASK 0x40 +#define TPC0_CMDQ_GLBL_STS0_DMA_IS_STOP_SHIFT 7 +#define TPC0_CMDQ_GLBL_STS0_DMA_IS_STOP_MASK 0x80 + +/* TPC0_CMDQ_GLBL_STS1 */ +#define TPC0_CMDQ_GLBL_STS1_PQF_RD_ERR_SHIFT 0 +#define TPC0_CMDQ_GLBL_STS1_PQF_RD_ERR_MASK 0x1 +#define TPC0_CMDQ_GLBL_STS1_CQF_RD_ERR_SHIFT 1 +#define TPC0_CMDQ_GLBL_STS1_CQF_RD_ERR_MASK 0x2 +#define TPC0_CMDQ_GLBL_STS1_CP_RD_ERR_SHIFT 2 +#define TPC0_CMDQ_GLBL_STS1_CP_RD_ERR_MASK 0x4 +#define TPC0_CMDQ_GLBL_STS1_CP_UNDEF_CMD_ERR_SHIFT 3 +#define TPC0_CMDQ_GLBL_STS1_CP_UNDEF_CMD_ERR_MASK 0x8 +#define TPC0_CMDQ_GLBL_STS1_CP_STOP_OP_SHIFT 4 +#define TPC0_CMDQ_GLBL_STS1_CP_STOP_OP_MASK 0x10 +#define TPC0_CMDQ_GLBL_STS1_CP_MSG_WR_ERR_SHIFT 5 +#define TPC0_CMDQ_GLBL_STS1_CP_MSG_WR_ERR_MASK 0x20 +#define TPC0_CMDQ_GLBL_STS1_DMA_RD_ERR_SHIFT 8 +#define TPC0_CMDQ_GLBL_STS1_DMA_RD_ERR_MASK 0x100 +#define TPC0_CMDQ_GLBL_STS1_DMA_WR_ERR_SHIFT 9 +#define TPC0_CMDQ_GLBL_STS1_DMA_WR_ERR_MASK 0x200 +#define TPC0_CMDQ_GLBL_STS1_DMA_RD_MSG_ERR_SHIFT 10 +#define TPC0_CMDQ_GLBL_STS1_DMA_RD_MSG_ERR_MASK 0x400 +#define TPC0_CMDQ_GLBL_STS1_DMA_WR_MSG_ERR_SHIFT 11 +#define TPC0_CMDQ_GLBL_STS1_DMA_WR_MSG_ERR_MASK 0x800 + +/* TPC0_CMDQ_CQ_CFG0 */ +#define TPC0_CMDQ_CQ_CFG0_RESERVED_SHIFT 0 +#define TPC0_CMDQ_CQ_CFG0_RESERVED_MASK 0x1 + +/* TPC0_CMDQ_CQ_CFG1 */ +#define TPC0_CMDQ_CQ_CFG1_CREDIT_LIM_SHIFT 0 +#define TPC0_CMDQ_CQ_CFG1_CREDIT_LIM_MASK 0xFFFF +#define TPC0_CMDQ_CQ_CFG1_MAX_INFLIGHT_SHIFT 16 +#define TPC0_CMDQ_CQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000 + +/* TPC0_CMDQ_CQ_ARUSER */ +#define TPC0_CMDQ_CQ_ARUSER_NOSNOOP_SHIFT 0 +#define TPC0_CMDQ_CQ_ARUSER_NOSNOOP_MASK 0x1 +#define TPC0_CMDQ_CQ_ARUSER_WORD_SHIFT 1 +#define TPC0_CMDQ_CQ_ARUSER_WORD_MASK 0x2 + +/* TPC0_CMDQ_CQ_PTR_LO */ +#define TPC0_CMDQ_CQ_PTR_LO_VAL_SHIFT 0 +#define TPC0_CMDQ_CQ_PTR_LO_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CQ_PTR_HI */ +#define TPC0_CMDQ_CQ_PTR_HI_VAL_SHIFT 0 +#define TPC0_CMDQ_CQ_PTR_HI_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CQ_TSIZE */ +#define TPC0_CMDQ_CQ_TSIZE_VAL_SHIFT 0 +#define TPC0_CMDQ_CQ_TSIZE_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CQ_CTL */ +#define TPC0_CMDQ_CQ_CTL_RPT_SHIFT 0 +#define TPC0_CMDQ_CQ_CTL_RPT_MASK 0xFFFF +#define TPC0_CMDQ_CQ_CTL_CTL_SHIFT 16 +#define TPC0_CMDQ_CQ_CTL_CTL_MASK 0xFFFF0000 + +/* TPC0_CMDQ_CQ_PTR_LO_STS */ +#define TPC0_CMDQ_CQ_PTR_LO_STS_VAL_SHIFT 0 +#define TPC0_CMDQ_CQ_PTR_LO_STS_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CQ_PTR_HI_STS */ +#define TPC0_CMDQ_CQ_PTR_HI_STS_VAL_SHIFT 0 +#define TPC0_CMDQ_CQ_PTR_HI_STS_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CQ_TSIZE_STS */ +#define TPC0_CMDQ_CQ_TSIZE_STS_VAL_SHIFT 0 +#define TPC0_CMDQ_CQ_TSIZE_STS_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CQ_CTL_STS */ +#define TPC0_CMDQ_CQ_CTL_STS_RPT_SHIFT 0 +#define TPC0_CMDQ_CQ_CTL_STS_RPT_MASK 0xFFFF +#define TPC0_CMDQ_CQ_CTL_STS_CTL_SHIFT 16 +#define TPC0_CMDQ_CQ_CTL_STS_CTL_MASK 0xFFFF0000 + +/* TPC0_CMDQ_CQ_STS0 */ +#define TPC0_CMDQ_CQ_STS0_CQ_CREDIT_CNT_SHIFT 0 +#define TPC0_CMDQ_CQ_STS0_CQ_CREDIT_CNT_MASK 0xFFFF +#define TPC0_CMDQ_CQ_STS0_CQ_FREE_CNT_SHIFT 16 +#define TPC0_CMDQ_CQ_STS0_CQ_FREE_CNT_MASK 0xFFFF0000 + +/* TPC0_CMDQ_CQ_STS1 */ +#define TPC0_CMDQ_CQ_STS1_CQ_INFLIGHT_CNT_SHIFT 0 +#define TPC0_CMDQ_CQ_STS1_CQ_INFLIGHT_CNT_MASK 0xFFFF +#define TPC0_CMDQ_CQ_STS1_CQ_BUF_EMPTY_SHIFT 30 +#define TPC0_CMDQ_CQ_STS1_CQ_BUF_EMPTY_MASK 0x40000000 +#define TPC0_CMDQ_CQ_STS1_CQ_BUSY_SHIFT 31 +#define TPC0_CMDQ_CQ_STS1_CQ_BUSY_MASK 0x80000000 + +/* TPC0_CMDQ_CQ_RD_RATE_LIM_EN */ +#define TPC0_CMDQ_CQ_RD_RATE_LIM_EN_VAL_SHIFT 0 +#define TPC0_CMDQ_CQ_RD_RATE_LIM_EN_VAL_MASK 0x1 + +/* TPC0_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN */ +#define TPC0_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN_VAL_SHIFT 0 +#define TPC0_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN_VAL_MASK 0xFFFF + +/* TPC0_CMDQ_CQ_RD_RATE_LIM_SAT */ +#define TPC0_CMDQ_CQ_RD_RATE_LIM_SAT_VAL_SHIFT 0 +#define TPC0_CMDQ_CQ_RD_RATE_LIM_SAT_VAL_MASK 0xFFFF + +/* TPC0_CMDQ_CQ_RD_RATE_LIM_TOUT */ +#define TPC0_CMDQ_CQ_RD_RATE_LIM_TOUT_VAL_SHIFT 0 +#define TPC0_CMDQ_CQ_RD_RATE_LIM_TOUT_VAL_MASK 0x7FFFFFFF + +/* TPC0_CMDQ_CQ_IFIFO_CNT */ +#define TPC0_CMDQ_CQ_IFIFO_CNT_VAL_SHIFT 0 +#define TPC0_CMDQ_CQ_IFIFO_CNT_VAL_MASK 0x3 + +/* TPC0_CMDQ_CP_MSG_BASE0_ADDR_LO */ +#define TPC0_CMDQ_CP_MSG_BASE0_ADDR_LO_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_MSG_BASE0_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CP_MSG_BASE0_ADDR_HI */ +#define TPC0_CMDQ_CP_MSG_BASE0_ADDR_HI_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_MSG_BASE0_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CP_MSG_BASE1_ADDR_LO */ +#define TPC0_CMDQ_CP_MSG_BASE1_ADDR_LO_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_MSG_BASE1_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CP_MSG_BASE1_ADDR_HI */ +#define TPC0_CMDQ_CP_MSG_BASE1_ADDR_HI_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_MSG_BASE1_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CP_MSG_BASE2_ADDR_LO */ +#define TPC0_CMDQ_CP_MSG_BASE2_ADDR_LO_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_MSG_BASE2_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CP_MSG_BASE2_ADDR_HI */ +#define TPC0_CMDQ_CP_MSG_BASE2_ADDR_HI_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_MSG_BASE2_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CP_MSG_BASE3_ADDR_LO */ +#define TPC0_CMDQ_CP_MSG_BASE3_ADDR_LO_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_MSG_BASE3_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CP_MSG_BASE3_ADDR_HI */ +#define TPC0_CMDQ_CP_MSG_BASE3_ADDR_HI_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_MSG_BASE3_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CP_LDMA_TSIZE_OFFSET */ +#define TPC0_CMDQ_CP_LDMA_TSIZE_OFFSET_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_LDMA_TSIZE_OFFSET_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET */ +#define TPC0_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET */ +#define TPC0_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET */ +#define TPC0_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET */ +#define TPC0_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CP_LDMA_COMMIT_OFFSET */ +#define TPC0_CMDQ_CP_LDMA_COMMIT_OFFSET_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_LDMA_COMMIT_OFFSET_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CP_FENCE0_RDATA */ +#define TPC0_CMDQ_CP_FENCE0_RDATA_INC_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_FENCE0_RDATA_INC_VAL_MASK 0xF + +/* TPC0_CMDQ_CP_FENCE1_RDATA */ +#define TPC0_CMDQ_CP_FENCE1_RDATA_INC_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_FENCE1_RDATA_INC_VAL_MASK 0xF + +/* TPC0_CMDQ_CP_FENCE2_RDATA */ +#define TPC0_CMDQ_CP_FENCE2_RDATA_INC_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_FENCE2_RDATA_INC_VAL_MASK 0xF + +/* TPC0_CMDQ_CP_FENCE3_RDATA */ +#define TPC0_CMDQ_CP_FENCE3_RDATA_INC_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_FENCE3_RDATA_INC_VAL_MASK 0xF + +/* TPC0_CMDQ_CP_FENCE0_CNT */ +#define TPC0_CMDQ_CP_FENCE0_CNT_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_FENCE0_CNT_VAL_MASK 0xFF + +/* TPC0_CMDQ_CP_FENCE1_CNT */ +#define TPC0_CMDQ_CP_FENCE1_CNT_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_FENCE1_CNT_VAL_MASK 0xFF + +/* TPC0_CMDQ_CP_FENCE2_CNT */ +#define TPC0_CMDQ_CP_FENCE2_CNT_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_FENCE2_CNT_VAL_MASK 0xFF + +/* TPC0_CMDQ_CP_FENCE3_CNT */ +#define TPC0_CMDQ_CP_FENCE3_CNT_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_FENCE3_CNT_VAL_MASK 0xFF + +/* TPC0_CMDQ_CP_STS */ +#define TPC0_CMDQ_CP_STS_MSG_INFLIGHT_CNT_SHIFT 0 +#define TPC0_CMDQ_CP_STS_MSG_INFLIGHT_CNT_MASK 0xFFFF +#define TPC0_CMDQ_CP_STS_ERDY_SHIFT 16 +#define TPC0_CMDQ_CP_STS_ERDY_MASK 0x10000 +#define TPC0_CMDQ_CP_STS_RRDY_SHIFT 17 +#define TPC0_CMDQ_CP_STS_RRDY_MASK 0x20000 +#define TPC0_CMDQ_CP_STS_MRDY_SHIFT 18 +#define TPC0_CMDQ_CP_STS_MRDY_MASK 0x40000 +#define TPC0_CMDQ_CP_STS_SW_STOP_SHIFT 19 +#define TPC0_CMDQ_CP_STS_SW_STOP_MASK 0x80000 +#define TPC0_CMDQ_CP_STS_FENCE_ID_SHIFT 20 +#define TPC0_CMDQ_CP_STS_FENCE_ID_MASK 0x300000 +#define TPC0_CMDQ_CP_STS_FENCE_IN_PROGRESS_SHIFT 22 +#define TPC0_CMDQ_CP_STS_FENCE_IN_PROGRESS_MASK 0x400000 + +/* TPC0_CMDQ_CP_CURRENT_INST_LO */ +#define TPC0_CMDQ_CP_CURRENT_INST_LO_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_CURRENT_INST_LO_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CP_CURRENT_INST_HI */ +#define TPC0_CMDQ_CP_CURRENT_INST_HI_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_CURRENT_INST_HI_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CP_BARRIER_CFG */ +#define TPC0_CMDQ_CP_BARRIER_CFG_EBGUARD_SHIFT 0 +#define TPC0_CMDQ_CP_BARRIER_CFG_EBGUARD_MASK 0xFFF + +/* TPC0_CMDQ_CP_DBG_0 */ +#define TPC0_CMDQ_CP_DBG_0_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_DBG_0_VAL_MASK 0xFF + +/* TPC0_CMDQ_CQ_BUF_ADDR */ +#define TPC0_CMDQ_CQ_BUF_ADDR_VAL_SHIFT 0 +#define TPC0_CMDQ_CQ_BUF_ADDR_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CQ_BUF_RDATA */ +#define TPC0_CMDQ_CQ_BUF_RDATA_VAL_SHIFT 0 +#define TPC0_CMDQ_CQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF + +#endif /* ASIC_REG_TPC0_CMDQ_MASKS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h new file mode 100644 index 000000000..bc51df573 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h @@ -0,0 +1,138 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC0_CMDQ_REGS_H_ +#define ASIC_REG_TPC0_CMDQ_REGS_H_ + +/* + ***************************************** + * TPC0_CMDQ (Prototype: CMDQ) + ***************************************** + */ + +#define mmTPC0_CMDQ_GLBL_CFG0 0xE09000 + +#define mmTPC0_CMDQ_GLBL_CFG1 0xE09004 + +#define mmTPC0_CMDQ_GLBL_PROT 0xE09008 + +#define mmTPC0_CMDQ_GLBL_ERR_CFG 0xE0900C + +#define mmTPC0_CMDQ_GLBL_ERR_ADDR_LO 0xE09010 + +#define mmTPC0_CMDQ_GLBL_ERR_ADDR_HI 0xE09014 + +#define mmTPC0_CMDQ_GLBL_ERR_WDATA 0xE09018 + +#define mmTPC0_CMDQ_GLBL_SECURE_PROPS 0xE0901C + +#define mmTPC0_CMDQ_GLBL_NON_SECURE_PROPS 0xE09020 + +#define mmTPC0_CMDQ_GLBL_STS0 0xE09024 + +#define mmTPC0_CMDQ_GLBL_STS1 0xE09028 + +#define mmTPC0_CMDQ_CQ_CFG0 0xE090B0 + +#define mmTPC0_CMDQ_CQ_CFG1 0xE090B4 + +#define mmTPC0_CMDQ_CQ_ARUSER 0xE090B8 + +#define mmTPC0_CMDQ_CQ_PTR_LO 0xE090C0 + +#define mmTPC0_CMDQ_CQ_PTR_HI 0xE090C4 + +#define mmTPC0_CMDQ_CQ_TSIZE 0xE090C8 + +#define mmTPC0_CMDQ_CQ_CTL 0xE090CC + +#define mmTPC0_CMDQ_CQ_PTR_LO_STS 0xE090D4 + +#define mmTPC0_CMDQ_CQ_PTR_HI_STS 0xE090D8 + +#define mmTPC0_CMDQ_CQ_TSIZE_STS 0xE090DC + +#define mmTPC0_CMDQ_CQ_CTL_STS 0xE090E0 + +#define mmTPC0_CMDQ_CQ_STS0 0xE090E4 + +#define mmTPC0_CMDQ_CQ_STS1 0xE090E8 + +#define mmTPC0_CMDQ_CQ_RD_RATE_LIM_EN 0xE090F0 + +#define mmTPC0_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xE090F4 + +#define mmTPC0_CMDQ_CQ_RD_RATE_LIM_SAT 0xE090F8 + +#define mmTPC0_CMDQ_CQ_RD_RATE_LIM_TOUT 0xE090FC + +#define mmTPC0_CMDQ_CQ_IFIFO_CNT 0xE09108 + +#define mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_LO 0xE09120 + +#define mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_HI 0xE09124 + +#define mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_LO 0xE09128 + +#define mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_HI 0xE0912C + +#define mmTPC0_CMDQ_CP_MSG_BASE2_ADDR_LO 0xE09130 + +#define mmTPC0_CMDQ_CP_MSG_BASE2_ADDR_HI 0xE09134 + +#define mmTPC0_CMDQ_CP_MSG_BASE3_ADDR_LO 0xE09138 + +#define mmTPC0_CMDQ_CP_MSG_BASE3_ADDR_HI 0xE0913C + +#define mmTPC0_CMDQ_CP_LDMA_TSIZE_OFFSET 0xE09140 + +#define mmTPC0_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xE09144 + +#define mmTPC0_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xE09148 + +#define mmTPC0_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xE0914C + +#define mmTPC0_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xE09150 + +#define mmTPC0_CMDQ_CP_LDMA_COMMIT_OFFSET 0xE09154 + +#define mmTPC0_CMDQ_CP_FENCE0_RDATA 0xE09158 + +#define mmTPC0_CMDQ_CP_FENCE1_RDATA 0xE0915C + +#define mmTPC0_CMDQ_CP_FENCE2_RDATA 0xE09160 + +#define mmTPC0_CMDQ_CP_FENCE3_RDATA 0xE09164 + +#define mmTPC0_CMDQ_CP_FENCE0_CNT 0xE09168 + +#define mmTPC0_CMDQ_CP_FENCE1_CNT 0xE0916C + +#define mmTPC0_CMDQ_CP_FENCE2_CNT 0xE09170 + +#define mmTPC0_CMDQ_CP_FENCE3_CNT 0xE09174 + +#define mmTPC0_CMDQ_CP_STS 0xE09178 + +#define mmTPC0_CMDQ_CP_CURRENT_INST_LO 0xE0917C + +#define mmTPC0_CMDQ_CP_CURRENT_INST_HI 0xE09180 + +#define mmTPC0_CMDQ_CP_BARRIER_CFG 0xE09184 + +#define mmTPC0_CMDQ_CP_DBG_0 0xE09188 + +#define mmTPC0_CMDQ_CQ_BUF_ADDR 0xE09308 + +#define mmTPC0_CMDQ_CQ_BUF_RDATA 0xE0930C + +#endif /* ASIC_REG_TPC0_CMDQ_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h new file mode 100644 index 000000000..553c6b6bd --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h @@ -0,0 +1,346 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC0_EML_CFG_MASKS_H_ +#define ASIC_REG_TPC0_EML_CFG_MASKS_H_ + +/* + ***************************************** + * TPC0_EML_CFG (Prototype: TPC_EML_CFG) + ***************************************** + */ + +/* TPC0_EML_CFG_DBG_CNT */ +#define TPC0_EML_CFG_DBG_CNT_DBG_ENTER_SHIFT 0 +#define TPC0_EML_CFG_DBG_CNT_DBG_ENTER_MASK 0x1 +#define TPC0_EML_CFG_DBG_CNT_DBG_EN_SHIFT 1 +#define TPC0_EML_CFG_DBG_CNT_DBG_EN_MASK 0x2 +#define TPC0_EML_CFG_DBG_CNT_CORE_RST_SHIFT 2 +#define TPC0_EML_CFG_DBG_CNT_CORE_RST_MASK 0x4 +#define TPC0_EML_CFG_DBG_CNT_DCACHE_INV_SHIFT 4 +#define TPC0_EML_CFG_DBG_CNT_DCACHE_INV_MASK 0x10 +#define TPC0_EML_CFG_DBG_CNT_ICACHE_INV_SHIFT 5 +#define TPC0_EML_CFG_DBG_CNT_ICACHE_INV_MASK 0x20 +#define TPC0_EML_CFG_DBG_CNT_DBG_EXIT_SHIFT 6 +#define TPC0_EML_CFG_DBG_CNT_DBG_EXIT_MASK 0x40 +#define TPC0_EML_CFG_DBG_CNT_SNG_STEP_SHIFT 7 +#define TPC0_EML_CFG_DBG_CNT_SNG_STEP_MASK 0x80 +#define TPC0_EML_CFG_DBG_CNT_BP_DBGSW_EN_SHIFT 16 +#define TPC0_EML_CFG_DBG_CNT_BP_DBGSW_EN_MASK 0x10000 + +/* TPC0_EML_CFG_DBG_STS */ +#define TPC0_EML_CFG_DBG_STS_DBG_MODE_SHIFT 0 +#define TPC0_EML_CFG_DBG_STS_DBG_MODE_MASK 0x1 +#define TPC0_EML_CFG_DBG_STS_CORE_READY_SHIFT 1 +#define TPC0_EML_CFG_DBG_STS_CORE_READY_MASK 0x2 +#define TPC0_EML_CFG_DBG_STS_DURING_KERNEL_SHIFT 2 +#define TPC0_EML_CFG_DBG_STS_DURING_KERNEL_MASK 0x4 +#define TPC0_EML_CFG_DBG_STS_ICACHE_IDLE_SHIFT 3 +#define TPC0_EML_CFG_DBG_STS_ICACHE_IDLE_MASK 0x8 +#define TPC0_EML_CFG_DBG_STS_DCACHE_IDLE_SHIFT 4 +#define TPC0_EML_CFG_DBG_STS_DCACHE_IDLE_MASK 0x10 +#define TPC0_EML_CFG_DBG_STS_QM_IDLE_SHIFT 5 +#define TPC0_EML_CFG_DBG_STS_QM_IDLE_MASK 0x20 +#define TPC0_EML_CFG_DBG_STS_WQ_IDLE_SHIFT 6 +#define TPC0_EML_CFG_DBG_STS_WQ_IDLE_MASK 0x40 +#define TPC0_EML_CFG_DBG_STS_MSS_IDLE_SHIFT 7 +#define TPC0_EML_CFG_DBG_STS_MSS_IDLE_MASK 0x80 +#define TPC0_EML_CFG_DBG_STS_DBG_CAUSE_SHIFT 8 +#define TPC0_EML_CFG_DBG_STS_DBG_CAUSE_MASK 0xFFFFFF00 + +/* TPC0_EML_CFG_DBG_PADD */ +#define TPC0_EML_CFG_DBG_PADD_ADDRESS_SHIFT 0 +#define TPC0_EML_CFG_DBG_PADD_ADDRESS_MASK 0xFFFFFFFF + +/* TPC0_EML_CFG_DBG_PADD_COUNT */ +#define TPC0_EML_CFG_DBG_PADD_COUNT_COUNT_SHIFT 0 +#define TPC0_EML_CFG_DBG_PADD_COUNT_COUNT_MASK 0xFF + +/* TPC0_EML_CFG_DBG_PADD_COUNT_MATCH */ +#define TPC0_EML_CFG_DBG_PADD_COUNT_MATCH_COUNT_SHIFT 0 +#define TPC0_EML_CFG_DBG_PADD_COUNT_MATCH_COUNT_MASK 0xFF + +/* TPC0_EML_CFG_DBG_PADD_EN */ +#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE0_SHIFT 0 +#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE0_MASK 0x1 +#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE1_SHIFT 1 +#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE1_MASK 0x2 +#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE2_SHIFT 2 +#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE2_MASK 0x4 +#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE3_SHIFT 3 +#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE3_MASK 0x8 +#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE4_SHIFT 4 +#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE4_MASK 0x10 +#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE5_SHIFT 5 +#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE5_MASK 0x20 +#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE6_SHIFT 6 +#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE6_MASK 0x40 +#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE7_SHIFT 7 +#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE7_MASK 0x80 + +/* TPC0_EML_CFG_DBG_VPADD_HIGH */ +#define TPC0_EML_CFG_DBG_VPADD_HIGH_ADDRESS_SHIFT 0 +#define TPC0_EML_CFG_DBG_VPADD_HIGH_ADDRESS_MASK 0x1FF + +/* TPC0_EML_CFG_DBG_VPADD_LOW */ +#define TPC0_EML_CFG_DBG_VPADD_LOW_ADDRESS_SHIFT 0 +#define TPC0_EML_CFG_DBG_VPADD_LOW_ADDRESS_MASK 0x1FF + +/* TPC0_EML_CFG_DBG_VPADD_COUNT */ +#define TPC0_EML_CFG_DBG_VPADD_COUNT_COUNT_SHIFT 0 +#define TPC0_EML_CFG_DBG_VPADD_COUNT_COUNT_MASK 0xFF + +/* TPC0_EML_CFG_DBG_VPADD_COUNT_MATCH */ +#define TPC0_EML_CFG_DBG_VPADD_COUNT_MATCH_COUNT_SHIFT 0 +#define TPC0_EML_CFG_DBG_VPADD_COUNT_MATCH_COUNT_MASK 0xFF + +/* TPC0_EML_CFG_DBG_VPADD_EN */ +#define TPC0_EML_CFG_DBG_VPADD_EN_ENABLE0_SHIFT 0 +#define TPC0_EML_CFG_DBG_VPADD_EN_ENABLE0_MASK 0x1 +#define TPC0_EML_CFG_DBG_VPADD_EN_ENABLE1_SHIFT 1 +#define TPC0_EML_CFG_DBG_VPADD_EN_ENABLE1_MASK 0x2 +#define TPC0_EML_CFG_DBG_VPADD_EN_RW_N0_SHIFT 2 +#define TPC0_EML_CFG_DBG_VPADD_EN_RW_N0_MASK 0x4 +#define TPC0_EML_CFG_DBG_VPADD_EN_RW_N1_SHIFT 3 +#define TPC0_EML_CFG_DBG_VPADD_EN_RW_N1_MASK 0x8 + +/* TPC0_EML_CFG_DBG_SPADD_HIGH */ +#define TPC0_EML_CFG_DBG_SPADD_HIGH_ADDRESS_SHIFT 0 +#define TPC0_EML_CFG_DBG_SPADD_HIGH_ADDRESS_MASK 0xFF + +/* TPC0_EML_CFG_DBG_SPADD_LOW */ +#define TPC0_EML_CFG_DBG_SPADD_LOW_ADDRESS_SHIFT 0 +#define TPC0_EML_CFG_DBG_SPADD_LOW_ADDRESS_MASK 0xFF + +/* TPC0_EML_CFG_DBG_SPADD_COUNT */ +#define TPC0_EML_CFG_DBG_SPADD_COUNT_COUNT_SHIFT 0 +#define TPC0_EML_CFG_DBG_SPADD_COUNT_COUNT_MASK 0xFF + +/* TPC0_EML_CFG_DBG_SPADD_COUNT_MATCH */ +#define TPC0_EML_CFG_DBG_SPADD_COUNT_MATCH_COUNT_SHIFT 0 +#define TPC0_EML_CFG_DBG_SPADD_COUNT_MATCH_COUNT_MASK 0xFF + +/* TPC0_EML_CFG_DBG_SPADD_EN */ +#define TPC0_EML_CFG_DBG_SPADD_EN_ENABLE0_SHIFT 0 +#define TPC0_EML_CFG_DBG_SPADD_EN_ENABLE0_MASK 0x1 +#define TPC0_EML_CFG_DBG_SPADD_EN_ENABLE1_SHIFT 1 +#define TPC0_EML_CFG_DBG_SPADD_EN_ENABLE1_MASK 0x2 +#define TPC0_EML_CFG_DBG_SPADD_EN_RW_N0_SHIFT 2 +#define TPC0_EML_CFG_DBG_SPADD_EN_RW_N0_MASK 0x4 +#define TPC0_EML_CFG_DBG_SPADD_EN_RW_N1_SHIFT 3 +#define TPC0_EML_CFG_DBG_SPADD_EN_RW_N1_MASK 0x8 + +/* TPC0_EML_CFG_DBG_AGUADD_MSB_HIGH */ +#define TPC0_EML_CFG_DBG_AGUADD_MSB_HIGH_ADDRESS_SHIFT 0 +#define TPC0_EML_CFG_DBG_AGUADD_MSB_HIGH_ADDRESS_MASK 0xFFFFFFFF + +/* TPC0_EML_CFG_DBG_AGUADD_MSB_LOW */ +#define TPC0_EML_CFG_DBG_AGUADD_MSB_LOW_ADDRESS_SHIFT 0 +#define TPC0_EML_CFG_DBG_AGUADD_MSB_LOW_ADDRESS_MASK 0xFFFFFFFF + +/* TPC0_EML_CFG_DBG_AGUADD_LSB_HIGH */ +#define TPC0_EML_CFG_DBG_AGUADD_LSB_HIGH_ADDRESS_SHIFT 0 +#define TPC0_EML_CFG_DBG_AGUADD_LSB_HIGH_ADDRESS_MASK 0xFFFFFFFF + +/* TPC0_EML_CFG_DBG_AGUADD_LSB_LOW */ +#define TPC0_EML_CFG_DBG_AGUADD_LSB_LOW_ADDRESS_SHIFT 0 +#define TPC0_EML_CFG_DBG_AGUADD_LSB_LOW_ADDRESS_MASK 0xFFFFFFFF + +/* TPC0_EML_CFG_DBG_AGUADD_COUNT */ +#define TPC0_EML_CFG_DBG_AGUADD_COUNT_COUNT_SHIFT 0 +#define TPC0_EML_CFG_DBG_AGUADD_COUNT_COUNT_MASK 0xFF + +/* TPC0_EML_CFG_DBG_AGUADD_COUNT_MATCH */ +#define TPC0_EML_CFG_DBG_AGUADD_COUNT_MATCH_COUNT_SHIFT 0 +#define TPC0_EML_CFG_DBG_AGUADD_COUNT_MATCH_COUNT_MASK 0xFF + +/* TPC0_EML_CFG_DBG_AGUADD_EN */ +#define TPC0_EML_CFG_DBG_AGUADD_EN_ENABLE0_SHIFT 0 +#define TPC0_EML_CFG_DBG_AGUADD_EN_ENABLE0_MASK 0x1 +#define TPC0_EML_CFG_DBG_AGUADD_EN_ENABLE1_SHIFT 1 +#define TPC0_EML_CFG_DBG_AGUADD_EN_ENABLE1_MASK 0x2 +#define TPC0_EML_CFG_DBG_AGUADD_EN_RW_N0_SHIFT 2 +#define TPC0_EML_CFG_DBG_AGUADD_EN_RW_N0_MASK 0x4 +#define TPC0_EML_CFG_DBG_AGUADD_EN_RW_N1_SHIFT 3 +#define TPC0_EML_CFG_DBG_AGUADD_EN_RW_N1_MASK 0x8 + +/* TPC0_EML_CFG_DBG_AXIHBWADD_MSB_HIGH */ +#define TPC0_EML_CFG_DBG_AXIHBWADD_MSB_HIGH_ADDRESS_SHIFT 0 +#define TPC0_EML_CFG_DBG_AXIHBWADD_MSB_HIGH_ADDRESS_MASK 0xFFFFFFFF + +/* TPC0_EML_CFG_DBG_AXIHBWADD_MSB_LOW */ +#define TPC0_EML_CFG_DBG_AXIHBWADD_MSB_LOW_ADDRESS_SHIFT 0 +#define TPC0_EML_CFG_DBG_AXIHBWADD_MSB_LOW_ADDRESS_MASK 0xFFFFFFFF + +/* TPC0_EML_CFG_DBG_AXIHBWADD_LSB_HIGH */ +#define TPC0_EML_CFG_DBG_AXIHBWADD_LSB_HIGH_ADDRESS_SHIFT 0 +#define TPC0_EML_CFG_DBG_AXIHBWADD_LSB_HIGH_ADDRESS_MASK 0xFFFFFFFF + +/* TPC0_EML_CFG_DBG_AXIHBWADD_LSB_LOW */ +#define TPC0_EML_CFG_DBG_AXIHBWADD_LSB_LOW_ADDRESS_SHIFT 0 +#define TPC0_EML_CFG_DBG_AXIHBWADD_LSB_LOW_ADDRESS_MASK 0xFFFFFFFF + +/* TPC0_EML_CFG_DBG_AXIHBWADD_COUNT */ +#define TPC0_EML_CFG_DBG_AXIHBWADD_COUNT_COUNT_SHIFT 0 +#define TPC0_EML_CFG_DBG_AXIHBWADD_COUNT_COUNT_MASK 0xFF + +/* TPC0_EML_CFG_DBG_AXIHBWADD_COUNT_MATCH */ +#define TPC0_EML_CFG_DBG_AXIHBWADD_COUNT_MATCH_MATCH_SHIFT 0 +#define TPC0_EML_CFG_DBG_AXIHBWADD_COUNT_MATCH_MATCH_MASK 0xFF + +/* TPC0_EML_CFG_DBG_AXIHBWADD_EN */ +#define TPC0_EML_CFG_DBG_AXIHBWADD_EN_ENABLE0_SHIFT 0 +#define TPC0_EML_CFG_DBG_AXIHBWADD_EN_ENABLE0_MASK 0x1 +#define TPC0_EML_CFG_DBG_AXIHBWADD_EN_ENABLE1_SHIFT 1 +#define TPC0_EML_CFG_DBG_AXIHBWADD_EN_ENABLE1_MASK 0x2 +#define TPC0_EML_CFG_DBG_AXIHBWADD_EN_RW_N0_SHIFT 2 +#define TPC0_EML_CFG_DBG_AXIHBWADD_EN_RW_N0_MASK 0x4 +#define TPC0_EML_CFG_DBG_AXIHBWADD_EN_RW_N1_SHIFT 3 +#define TPC0_EML_CFG_DBG_AXIHBWADD_EN_RW_N1_MASK 0x8 + +/* TPC0_EML_CFG_DBG_AXILBWADD_MSB_HIGH */ +#define TPC0_EML_CFG_DBG_AXILBWADD_MSB_HIGH_ADDRESS_SHIFT 0 +#define TPC0_EML_CFG_DBG_AXILBWADD_MSB_HIGH_ADDRESS_MASK 0xFFFFFFFF + +/* TPC0_EML_CFG_DBG_AXILBWADD_MSB_LOW */ +#define TPC0_EML_CFG_DBG_AXILBWADD_MSB_LOW_ADDRESS_SHIFT 0 +#define TPC0_EML_CFG_DBG_AXILBWADD_MSB_LOW_ADDRESS_MASK 0xFFFFFFFF + +/* TPC0_EML_CFG_DBG_AXILBWADD_LSB_HIGH */ +#define TPC0_EML_CFG_DBG_AXILBWADD_LSB_HIGH_ADDRESS_SHIFT 0 +#define TPC0_EML_CFG_DBG_AXILBWADD_LSB_HIGH_ADDRESS_MASK 0xFFFFFFFF + +/* TPC0_EML_CFG_DBG_AXILBWADD_LSB_LOW */ +#define TPC0_EML_CFG_DBG_AXILBWADD_LSB_LOW_ADDRESS_SHIFT 0 +#define TPC0_EML_CFG_DBG_AXILBWADD_LSB_LOW_ADDRESS_MASK 0xFFFFFFFF + +/* TPC0_EML_CFG_DBG_AXILBWADD_COUNT */ +#define TPC0_EML_CFG_DBG_AXILBWADD_COUNT_COUNT_SHIFT 0 +#define TPC0_EML_CFG_DBG_AXILBWADD_COUNT_COUNT_MASK 0xFF + +/* TPC0_EML_CFG_DBG_AXILBWADD_COUNT_MATCH */ +#define TPC0_EML_CFG_DBG_AXILBWADD_COUNT_MATCH_MATCH_SHIFT 0 +#define TPC0_EML_CFG_DBG_AXILBWADD_COUNT_MATCH_MATCH_MASK 0xFF + +/* TPC0_EML_CFG_DBG_AXILBWADD_EN */ +#define TPC0_EML_CFG_DBG_AXILBWADD_EN_ENABLE0_SHIFT 0 +#define TPC0_EML_CFG_DBG_AXILBWADD_EN_ENABLE0_MASK 0x1 +#define TPC0_EML_CFG_DBG_AXILBWADD_EN_ENABLE1_SHIFT 1 +#define TPC0_EML_CFG_DBG_AXILBWADD_EN_ENABLE1_MASK 0x2 +#define TPC0_EML_CFG_DBG_AXILBWADD_EN_RW_N0_SHIFT 2 +#define TPC0_EML_CFG_DBG_AXILBWADD_EN_RW_N0_MASK 0x4 +#define TPC0_EML_CFG_DBG_AXILBWADD_EN_RW_N1_SHIFT 3 +#define TPC0_EML_CFG_DBG_AXILBWADD_EN_RW_N1_MASK 0x8 + +/* TPC0_EML_CFG_DBG_SPDATA */ +#define TPC0_EML_CFG_DBG_SPDATA_DATA_SHIFT 0 +#define TPC0_EML_CFG_DBG_SPDATA_DATA_MASK 0xFFFFFFFF + +/* TPC0_EML_CFG_DBG_SPDATA_COUNT */ +#define TPC0_EML_CFG_DBG_SPDATA_COUNT_COUNT_SHIFT 0 +#define TPC0_EML_CFG_DBG_SPDATA_COUNT_COUNT_MASK 0xFF + +/* TPC0_EML_CFG_DBG_SPDATA_COUNT_MATCH */ +#define TPC0_EML_CFG_DBG_SPDATA_COUNT_MATCH_MATCH_SHIFT 0 +#define TPC0_EML_CFG_DBG_SPDATA_COUNT_MATCH_MATCH_MASK 0xFF + +/* TPC0_EML_CFG_DBG_SPDATA_EN */ +#define TPC0_EML_CFG_DBG_SPDATA_EN_ENABLE0_SHIFT 0 +#define TPC0_EML_CFG_DBG_SPDATA_EN_ENABLE0_MASK 0x1 +#define TPC0_EML_CFG_DBG_SPDATA_EN_ENABLE1_SHIFT 1 +#define TPC0_EML_CFG_DBG_SPDATA_EN_ENABLE1_MASK 0x2 +#define TPC0_EML_CFG_DBG_SPDATA_EN_RW_N0_SHIFT 2 +#define TPC0_EML_CFG_DBG_SPDATA_EN_RW_N0_MASK 0x4 +#define TPC0_EML_CFG_DBG_SPDATA_EN_RW_N1_SHIFT 3 +#define TPC0_EML_CFG_DBG_SPDATA_EN_RW_N1_MASK 0x8 + +/* TPC0_EML_CFG_DBG_AXIHBWDATA */ +#define TPC0_EML_CFG_DBG_AXIHBWDATA_DATA_SHIFT 0 +#define TPC0_EML_CFG_DBG_AXIHBWDATA_DATA_MASK 0xFFFFFFFF + +/* TPC0_EML_CFG_DBG_AXIHBWDATA_COUNT */ +#define TPC0_EML_CFG_DBG_AXIHBWDATA_COUNT_COUNT_SHIFT 0 +#define TPC0_EML_CFG_DBG_AXIHBWDATA_COUNT_COUNT_MASK 0xFF + +/* TPC0_EML_CFG_DBG_AXIHBWDAT_COUNT_MATCH */ +#define TPC0_EML_CFG_DBG_AXIHBWDAT_COUNT_MATCH_COUNT_SHIFT 0 +#define TPC0_EML_CFG_DBG_AXIHBWDAT_COUNT_MATCH_COUNT_MASK 0xFF + +/* TPC0_EML_CFG_DBG_AXIHBWDATA_EN */ +#define TPC0_EML_CFG_DBG_AXIHBWDATA_EN_ENABLE_SHIFT 0 +#define TPC0_EML_CFG_DBG_AXIHBWDATA_EN_ENABLE_MASK 0x1 +#define TPC0_EML_CFG_DBG_AXIHBWDATA_EN_RW_N_SHIFT 1 +#define TPC0_EML_CFG_DBG_AXIHBWDATA_EN_RW_N_MASK 0x2 + +/* TPC0_EML_CFG_DBG_AXILBWDATA */ +#define TPC0_EML_CFG_DBG_AXILBWDATA_DATA_SHIFT 0 +#define TPC0_EML_CFG_DBG_AXILBWDATA_DATA_MASK 0xFFFFFFFF + +/* TPC0_EML_CFG_DBG_AXILBWDATA_COUNT */ +#define TPC0_EML_CFG_DBG_AXILBWDATA_COUNT_COUNT_SHIFT 0 +#define TPC0_EML_CFG_DBG_AXILBWDATA_COUNT_COUNT_MASK 0xFF + +/* TPC0_EML_CFG_DBG_AXILBWDAT_COUNT_MATCH */ +#define TPC0_EML_CFG_DBG_AXILBWDAT_COUNT_MATCH_MATCH_SHIFT 0 +#define TPC0_EML_CFG_DBG_AXILBWDAT_COUNT_MATCH_MATCH_MASK 0xFF + +/* TPC0_EML_CFG_DBG_AXILBWDATA_EN */ +#define TPC0_EML_CFG_DBG_AXILBWDATA_EN_ENABLE_SHIFT 0 +#define TPC0_EML_CFG_DBG_AXILBWDATA_EN_ENABLE_MASK 0x1 +#define TPC0_EML_CFG_DBG_AXILBWDATA_EN_RW_N_SHIFT 1 +#define TPC0_EML_CFG_DBG_AXILBWDATA_EN_RW_N_MASK 0x2 + +/* TPC0_EML_CFG_DBG_D0_PC */ +#define TPC0_EML_CFG_DBG_D0_PC_PC_SHIFT 0 +#define TPC0_EML_CFG_DBG_D0_PC_PC_MASK 0xFFFFFFFF + +/* TPC0_EML_CFG_RTTCONFIG */ +#define TPC0_EML_CFG_RTTCONFIG_TR_EN_SHIFT 0 +#define TPC0_EML_CFG_RTTCONFIG_TR_EN_MASK 0x1 +#define TPC0_EML_CFG_RTTCONFIG_PRIO_SHIFT 1 +#define TPC0_EML_CFG_RTTCONFIG_PRIO_MASK 0x2 + +/* TPC0_EML_CFG_RTTPREDICATE */ +#define TPC0_EML_CFG_RTTPREDICATE_TR_EN_SHIFT 0 +#define TPC0_EML_CFG_RTTPREDICATE_TR_EN_MASK 0x1 +#define TPC0_EML_CFG_RTTPREDICATE_GEN_SHIFT 1 +#define TPC0_EML_CFG_RTTPREDICATE_GEN_MASK 0x2 +#define TPC0_EML_CFG_RTTPREDICATE_USE_INTERVAL_SHIFT 2 +#define TPC0_EML_CFG_RTTPREDICATE_USE_INTERVAL_MASK 0x4 +#define TPC0_EML_CFG_RTTPREDICATE_SPRF_MASK_SHIFT 16 +#define TPC0_EML_CFG_RTTPREDICATE_SPRF_MASK_MASK 0xFFFF0000 + +/* TPC0_EML_CFG_RTTPREDICATE_INTV */ +#define TPC0_EML_CFG_RTTPREDICATE_INTV_INTERVAL_SHIFT 0 +#define TPC0_EML_CFG_RTTPREDICATE_INTV_INTERVAL_MASK 0xFFFFFFFF + +/* TPC0_EML_CFG_RTTTS */ +#define TPC0_EML_CFG_RTTTS_TR_EN_SHIFT 0 +#define TPC0_EML_CFG_RTTTS_TR_EN_MASK 0x1 +#define TPC0_EML_CFG_RTTTS_GEN_SHIFT 1 +#define TPC0_EML_CFG_RTTTS_GEN_MASK 0x2 +#define TPC0_EML_CFG_RTTTS_COMPRESS_EN_SHIFT 2 +#define TPC0_EML_CFG_RTTTS_COMPRESS_EN_MASK 0x4 + +/* TPC0_EML_CFG_RTTTS_INTV */ +#define TPC0_EML_CFG_RTTTS_INTV_INTERVAL_SHIFT 0 +#define TPC0_EML_CFG_RTTTS_INTV_INTERVAL_MASK 0xFFFFFFFF + +/* TPC0_EML_CFG_DBG_INST_INSERT */ +#define TPC0_EML_CFG_DBG_INST_INSERT_INST_SHIFT 0 +#define TPC0_EML_CFG_DBG_INST_INSERT_INST_MASK 0xFFFFFFFF + +/* TPC0_EML_CFG_DBG_INST_INSERT_CTL */ +#define TPC0_EML_CFG_DBG_INST_INSERT_CTL_INSERT_SHIFT 0 +#define TPC0_EML_CFG_DBG_INST_INSERT_CTL_INSERT_MASK 0x1 + +#endif /* ASIC_REG_TPC0_EML_CFG_MASKS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h new file mode 100644 index 000000000..8495479c3 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h @@ -0,0 +1,312 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC0_EML_CFG_REGS_H_ +#define ASIC_REG_TPC0_EML_CFG_REGS_H_ + +/* + ***************************************** + * TPC0_EML_CFG (Prototype: TPC_EML_CFG) + ***************************************** + */ + +#define mmTPC0_EML_CFG_DBG_CNT 0x3040000 + +#define mmTPC0_EML_CFG_DBG_STS 0x3040004 + +#define mmTPC0_EML_CFG_DBG_PADD_0 0x3040008 + +#define mmTPC0_EML_CFG_DBG_PADD_1 0x304000C + +#define mmTPC0_EML_CFG_DBG_PADD_2 0x3040010 + +#define mmTPC0_EML_CFG_DBG_PADD_3 0x3040014 + +#define mmTPC0_EML_CFG_DBG_PADD_4 0x3040018 + +#define mmTPC0_EML_CFG_DBG_PADD_5 0x304001C + +#define mmTPC0_EML_CFG_DBG_PADD_6 0x3040020 + +#define mmTPC0_EML_CFG_DBG_PADD_7 0x3040024 + +#define mmTPC0_EML_CFG_DBG_PADD_COUNT_0 0x3040028 + +#define mmTPC0_EML_CFG_DBG_PADD_COUNT_1 0x304002C + +#define mmTPC0_EML_CFG_DBG_PADD_COUNT_2 0x3040030 + +#define mmTPC0_EML_CFG_DBG_PADD_COUNT_3 0x3040034 + +#define mmTPC0_EML_CFG_DBG_PADD_COUNT_4 0x3040038 + +#define mmTPC0_EML_CFG_DBG_PADD_COUNT_5 0x304003C + +#define mmTPC0_EML_CFG_DBG_PADD_COUNT_6 0x3040040 + +#define mmTPC0_EML_CFG_DBG_PADD_COUNT_7 0x3040044 + +#define mmTPC0_EML_CFG_DBG_PADD_COUNT_MATCH_0 0x3040048 + +#define mmTPC0_EML_CFG_DBG_PADD_COUNT_MATCH_1 0x304004C + +#define mmTPC0_EML_CFG_DBG_PADD_COUNT_MATCH_2 0x3040050 + +#define mmTPC0_EML_CFG_DBG_PADD_COUNT_MATCH_3 0x3040054 + +#define mmTPC0_EML_CFG_DBG_PADD_COUNT_MATCH_4 0x3040058 + +#define mmTPC0_EML_CFG_DBG_PADD_COUNT_MATCH_5 0x304005C + +#define mmTPC0_EML_CFG_DBG_PADD_COUNT_MATCH_6 0x3040060 + +#define mmTPC0_EML_CFG_DBG_PADD_COUNT_MATCH_7 0x3040064 + +#define mmTPC0_EML_CFG_DBG_PADD_EN 0x3040068 + +#define mmTPC0_EML_CFG_DBG_VPADD_HIGH_0 0x304006C + +#define mmTPC0_EML_CFG_DBG_VPADD_HIGH_1 0x3040070 + +#define mmTPC0_EML_CFG_DBG_VPADD_LOW_0 0x3040074 + +#define mmTPC0_EML_CFG_DBG_VPADD_LOW_1 0x3040078 + +#define mmTPC0_EML_CFG_DBG_VPADD_COUNT_0 0x304007C + +#define mmTPC0_EML_CFG_DBG_VPADD_COUNT_1 0x3040080 + +#define mmTPC0_EML_CFG_DBG_VPADD_COUNT_MATCH_0 0x3040084 + +#define mmTPC0_EML_CFG_DBG_VPADD_COUNT_MATCH_1 0x3040088 + +#define mmTPC0_EML_CFG_DBG_VPADD_EN 0x304008C + +#define mmTPC0_EML_CFG_DBG_SPADD_HIGH_0 0x3040090 + +#define mmTPC0_EML_CFG_DBG_SPADD_HIGH_1 0x3040094 + +#define mmTPC0_EML_CFG_DBG_SPADD_LOW_0 0x3040098 + +#define mmTPC0_EML_CFG_DBG_SPADD_LOW_1 0x304009C + +#define mmTPC0_EML_CFG_DBG_SPADD_COUNT_0 0x30400A0 + +#define mmTPC0_EML_CFG_DBG_SPADD_COUNT_1 0x30400A4 + +#define mmTPC0_EML_CFG_DBG_SPADD_COUNT_MATCH_0 0x30400A8 + +#define mmTPC0_EML_CFG_DBG_SPADD_COUNT_MATCH_1 0x30400AC + +#define mmTPC0_EML_CFG_DBG_SPADD_EN 0x30400B0 + +#define mmTPC0_EML_CFG_DBG_AGUADD_MSB_HIGH_0 0x30400B4 + +#define mmTPC0_EML_CFG_DBG_AGUADD_MSB_HIGH_1 0x30400B8 + +#define mmTPC0_EML_CFG_DBG_AGUADD_MSB_LOW_0 0x30400BC + +#define mmTPC0_EML_CFG_DBG_AGUADD_MSB_LOW_1 0x30400C0 + +#define mmTPC0_EML_CFG_DBG_AGUADD_LSB_HIGH_0 0x30400C4 + +#define mmTPC0_EML_CFG_DBG_AGUADD_LSB_HIGH_1 0x30400C8 + +#define mmTPC0_EML_CFG_DBG_AGUADD_LSB_LOW_0 0x30400CC + +#define mmTPC0_EML_CFG_DBG_AGUADD_LSB_LOW_1 0x30400D0 + +#define mmTPC0_EML_CFG_DBG_AGUADD_COUNT_0 0x30400D4 + +#define mmTPC0_EML_CFG_DBG_AGUADD_COUNT_1 0x30400D8 + +#define mmTPC0_EML_CFG_DBG_AGUADD_COUNT_MATCH_0 0x30400DC + +#define mmTPC0_EML_CFG_DBG_AGUADD_COUNT_MATCH_1 0x30400E0 + +#define mmTPC0_EML_CFG_DBG_AGUADD_EN 0x30400E4 + +#define mmTPC0_EML_CFG_DBG_AXIHBWADD_MSB_HIGH_0 0x30400E8 + +#define mmTPC0_EML_CFG_DBG_AXIHBWADD_MSB_HIGH_1 0x30400EC + +#define mmTPC0_EML_CFG_DBG_AXIHBWADD_MSB_LOW_0 0x30400F0 + +#define mmTPC0_EML_CFG_DBG_AXIHBWADD_MSB_LOW_1 0x30400F4 + +#define mmTPC0_EML_CFG_DBG_AXIHBWADD_LSB_HIGH_0 0x30400F8 + +#define mmTPC0_EML_CFG_DBG_AXIHBWADD_LSB_HIGH_1 0x30400FC + +#define mmTPC0_EML_CFG_DBG_AXIHBWADD_LSB_LOW_0 0x3040100 + +#define mmTPC0_EML_CFG_DBG_AXIHBWADD_LSB_LOW_1 0x3040104 + +#define mmTPC0_EML_CFG_DBG_AXIHBWADD_COUNT_0 0x3040108 + +#define mmTPC0_EML_CFG_DBG_AXIHBWADD_COUNT_1 0x304010C + +#define mmTPC0_EML_CFG_DBG_AXIHBWADD_COUNT_MATCH_0 0x3040110 + +#define mmTPC0_EML_CFG_DBG_AXIHBWADD_COUNT_MATCH_1 0x3040114 + +#define mmTPC0_EML_CFG_DBG_AXIHBWADD_EN 0x3040118 + +#define mmTPC0_EML_CFG_DBG_AXILBWADD_MSB_HIGH_0 0x304011C + +#define mmTPC0_EML_CFG_DBG_AXILBWADD_MSB_HIGH_1 0x3040120 + +#define mmTPC0_EML_CFG_DBG_AXILBWADD_MSB_LOW_0 0x3040124 + +#define mmTPC0_EML_CFG_DBG_AXILBWADD_MSB_LOW_1 0x3040128 + +#define mmTPC0_EML_CFG_DBG_AXILBWADD_LSB_HIGH_0 0x304012C + +#define mmTPC0_EML_CFG_DBG_AXILBWADD_LSB_HIGH_1 0x3040130 + +#define mmTPC0_EML_CFG_DBG_AXILBWADD_LSB_LOW_0 0x3040134 + +#define mmTPC0_EML_CFG_DBG_AXILBWADD_LSB_LOW_1 0x3040138 + +#define mmTPC0_EML_CFG_DBG_AXILBWADD_COUNT_0 0x304013C + +#define mmTPC0_EML_CFG_DBG_AXILBWADD_COUNT_1 0x3040140 + +#define mmTPC0_EML_CFG_DBG_AXILBWADD_COUNT_MATCH_0 0x3040144 + +#define mmTPC0_EML_CFG_DBG_AXILBWADD_COUNT_MATCH_1 0x3040148 + +#define mmTPC0_EML_CFG_DBG_AXILBWADD_EN 0x304014C + +#define mmTPC0_EML_CFG_DBG_SPDATA_0 0x3040150 + +#define mmTPC0_EML_CFG_DBG_SPDATA_1 0x3040154 + +#define mmTPC0_EML_CFG_DBG_SPDATA_COUNT_0 0x3040158 + +#define mmTPC0_EML_CFG_DBG_SPDATA_COUNT_1 0x304015C + +#define mmTPC0_EML_CFG_DBG_SPDATA_COUNT_MATCH_0 0x3040160 + +#define mmTPC0_EML_CFG_DBG_SPDATA_COUNT_MATCH_1 0x3040164 + +#define mmTPC0_EML_CFG_DBG_SPDATA_EN 0x3040168 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_0 0x304016C + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_1 0x3040170 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_2 0x3040174 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_3 0x3040178 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_4 0x304017C + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_5 0x3040180 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_6 0x3040184 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_7 0x3040188 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_8 0x304018C + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_9 0x3040190 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_10 0x3040194 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_11 0x3040198 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_12 0x304019C + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_13 0x30401A0 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_14 0x30401A4 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_15 0x30401A8 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_16 0x30401AC + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_17 0x30401B0 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_18 0x30401B4 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_19 0x30401B8 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_20 0x30401BC + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_21 0x30401C0 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_22 0x30401C4 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_23 0x30401C8 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_24 0x30401CC + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_25 0x30401D0 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_26 0x30401D4 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_27 0x30401D8 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_28 0x30401DC + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_29 0x30401E0 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_30 0x30401E4 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_31 0x30401E8 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_COUNT 0x30401EC + +#define mmTPC0_EML_CFG_DBG_AXIHBWDAT_COUNT_MATCH 0x30401F0 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_EN 0x30401F4 + +#define mmTPC0_EML_CFG_DBG_AXILBWDATA 0x30401F8 + +#define mmTPC0_EML_CFG_DBG_AXILBWDATA_COUNT 0x30401FC + +#define mmTPC0_EML_CFG_DBG_AXILBWDAT_COUNT_MATCH 0x3040200 + +#define mmTPC0_EML_CFG_DBG_AXILBWDATA_EN 0x3040204 + +#define mmTPC0_EML_CFG_DBG_D0_PC 0x3040208 + +#define mmTPC0_EML_CFG_RTTCONFIG 0x3040300 + +#define mmTPC0_EML_CFG_RTTPREDICATE 0x3040304 + +#define mmTPC0_EML_CFG_RTTPREDICATE_INTV 0x3040308 + +#define mmTPC0_EML_CFG_RTTTS 0x304030C + +#define mmTPC0_EML_CFG_RTTTS_INTV 0x3040310 + +#define mmTPC0_EML_CFG_DBG_INST_INSERT_0 0x3040314 + +#define mmTPC0_EML_CFG_DBG_INST_INSERT_1 0x3040318 + +#define mmTPC0_EML_CFG_DBG_INST_INSERT_2 0x304031C + +#define mmTPC0_EML_CFG_DBG_INST_INSERT_3 0x3040320 + +#define mmTPC0_EML_CFG_DBG_INST_INSERT_4 0x3040324 + +#define mmTPC0_EML_CFG_DBG_INST_INSERT_5 0x3040328 + +#define mmTPC0_EML_CFG_DBG_INST_INSERT_6 0x304032C + +#define mmTPC0_EML_CFG_DBG_INST_INSERT_7 0x3040330 + +#define mmTPC0_EML_CFG_DBG_INST_INSERT_CTL 0x3040334 + +#endif /* ASIC_REG_TPC0_EML_CFG_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h new file mode 100644 index 000000000..43fafcf01 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h @@ -0,0 +1,208 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC0_NRTR_MASKS_H_ +#define ASIC_REG_TPC0_NRTR_MASKS_H_ + +/* + ***************************************** + * TPC0_NRTR (Prototype: IF_NRTR) + ***************************************** + */ + +/* TPC0_NRTR_HBW_MAX_CRED */ +#define TPC0_NRTR_HBW_MAX_CRED_WR_RQ_SHIFT 0 +#define TPC0_NRTR_HBW_MAX_CRED_WR_RQ_MASK 0x3F +#define TPC0_NRTR_HBW_MAX_CRED_WR_RS_SHIFT 8 +#define TPC0_NRTR_HBW_MAX_CRED_WR_RS_MASK 0x3F00 +#define TPC0_NRTR_HBW_MAX_CRED_RD_RQ_SHIFT 16 +#define TPC0_NRTR_HBW_MAX_CRED_RD_RQ_MASK 0x3F0000 +#define TPC0_NRTR_HBW_MAX_CRED_RD_RS_SHIFT 24 +#define TPC0_NRTR_HBW_MAX_CRED_RD_RS_MASK 0x3F000000 + +/* TPC0_NRTR_LBW_MAX_CRED */ +#define TPC0_NRTR_LBW_MAX_CRED_WR_RQ_SHIFT 0 +#define TPC0_NRTR_LBW_MAX_CRED_WR_RQ_MASK 0x3F +#define TPC0_NRTR_LBW_MAX_CRED_WR_RS_SHIFT 8 +#define TPC0_NRTR_LBW_MAX_CRED_WR_RS_MASK 0x3F00 +#define TPC0_NRTR_LBW_MAX_CRED_RD_RQ_SHIFT 16 +#define TPC0_NRTR_LBW_MAX_CRED_RD_RQ_MASK 0x3F0000 +#define TPC0_NRTR_LBW_MAX_CRED_RD_RS_SHIFT 24 +#define TPC0_NRTR_LBW_MAX_CRED_RD_RS_MASK 0x3F000000 + +/* TPC0_NRTR_DBG_E_ARB */ +#define TPC0_NRTR_DBG_E_ARB_W_SHIFT 0 +#define TPC0_NRTR_DBG_E_ARB_W_MASK 0x7 +#define TPC0_NRTR_DBG_E_ARB_S_SHIFT 8 +#define TPC0_NRTR_DBG_E_ARB_S_MASK 0x700 +#define TPC0_NRTR_DBG_E_ARB_N_SHIFT 16 +#define TPC0_NRTR_DBG_E_ARB_N_MASK 0x70000 +#define TPC0_NRTR_DBG_E_ARB_L_SHIFT 24 +#define TPC0_NRTR_DBG_E_ARB_L_MASK 0x7000000 + +/* TPC0_NRTR_DBG_W_ARB */ +#define TPC0_NRTR_DBG_W_ARB_E_SHIFT 0 +#define TPC0_NRTR_DBG_W_ARB_E_MASK 0x7 +#define TPC0_NRTR_DBG_W_ARB_S_SHIFT 8 +#define TPC0_NRTR_DBG_W_ARB_S_MASK 0x700 +#define TPC0_NRTR_DBG_W_ARB_N_SHIFT 16 +#define TPC0_NRTR_DBG_W_ARB_N_MASK 0x70000 +#define TPC0_NRTR_DBG_W_ARB_L_SHIFT 24 +#define TPC0_NRTR_DBG_W_ARB_L_MASK 0x7000000 + +/* TPC0_NRTR_DBG_N_ARB */ +#define TPC0_NRTR_DBG_N_ARB_W_SHIFT 0 +#define TPC0_NRTR_DBG_N_ARB_W_MASK 0x7 +#define TPC0_NRTR_DBG_N_ARB_E_SHIFT 8 +#define TPC0_NRTR_DBG_N_ARB_E_MASK 0x700 +#define TPC0_NRTR_DBG_N_ARB_S_SHIFT 16 +#define TPC0_NRTR_DBG_N_ARB_S_MASK 0x70000 +#define TPC0_NRTR_DBG_N_ARB_L_SHIFT 24 +#define TPC0_NRTR_DBG_N_ARB_L_MASK 0x7000000 + +/* TPC0_NRTR_DBG_S_ARB */ +#define TPC0_NRTR_DBG_S_ARB_W_SHIFT 0 +#define TPC0_NRTR_DBG_S_ARB_W_MASK 0x7 +#define TPC0_NRTR_DBG_S_ARB_E_SHIFT 8 +#define TPC0_NRTR_DBG_S_ARB_E_MASK 0x700 +#define TPC0_NRTR_DBG_S_ARB_N_SHIFT 16 +#define TPC0_NRTR_DBG_S_ARB_N_MASK 0x70000 +#define TPC0_NRTR_DBG_S_ARB_L_SHIFT 24 +#define TPC0_NRTR_DBG_S_ARB_L_MASK 0x7000000 + +/* TPC0_NRTR_DBG_L_ARB */ +#define TPC0_NRTR_DBG_L_ARB_W_SHIFT 0 +#define TPC0_NRTR_DBG_L_ARB_W_MASK 0x7 +#define TPC0_NRTR_DBG_L_ARB_E_SHIFT 8 +#define TPC0_NRTR_DBG_L_ARB_E_MASK 0x700 +#define TPC0_NRTR_DBG_L_ARB_S_SHIFT 16 +#define TPC0_NRTR_DBG_L_ARB_S_MASK 0x70000 +#define TPC0_NRTR_DBG_L_ARB_N_SHIFT 24 +#define TPC0_NRTR_DBG_L_ARB_N_MASK 0x7000000 + +/* TPC0_NRTR_DBG_E_ARB_MAX */ +#define TPC0_NRTR_DBG_E_ARB_MAX_CREDIT_SHIFT 0 +#define TPC0_NRTR_DBG_E_ARB_MAX_CREDIT_MASK 0x3F + +/* TPC0_NRTR_DBG_W_ARB_MAX */ +#define TPC0_NRTR_DBG_W_ARB_MAX_CREDIT_SHIFT 0 +#define TPC0_NRTR_DBG_W_ARB_MAX_CREDIT_MASK 0x3F + +/* TPC0_NRTR_DBG_N_ARB_MAX */ +#define TPC0_NRTR_DBG_N_ARB_MAX_CREDIT_SHIFT 0 +#define TPC0_NRTR_DBG_N_ARB_MAX_CREDIT_MASK 0x3F + +/* TPC0_NRTR_DBG_S_ARB_MAX */ +#define TPC0_NRTR_DBG_S_ARB_MAX_CREDIT_SHIFT 0 +#define TPC0_NRTR_DBG_S_ARB_MAX_CREDIT_MASK 0x3F + +/* TPC0_NRTR_DBG_L_ARB_MAX */ +#define TPC0_NRTR_DBG_L_ARB_MAX_CREDIT_SHIFT 0 +#define TPC0_NRTR_DBG_L_ARB_MAX_CREDIT_MASK 0x3F + +/* TPC0_NRTR_SPLIT_COEF */ +#define TPC0_NRTR_SPLIT_COEF_VAL_SHIFT 0 +#define TPC0_NRTR_SPLIT_COEF_VAL_MASK 0xFFFF + +/* TPC0_NRTR_SPLIT_CFG */ +#define TPC0_NRTR_SPLIT_CFG_FORCE_WAK_ORDER_SHIFT 0 +#define TPC0_NRTR_SPLIT_CFG_FORCE_WAK_ORDER_MASK 0x1 +#define TPC0_NRTR_SPLIT_CFG_FORCE_STRONG_ORDER_SHIFT 1 +#define TPC0_NRTR_SPLIT_CFG_FORCE_STRONG_ORDER_MASK 0x2 +#define TPC0_NRTR_SPLIT_CFG_DEFAULT_MESH_SHIFT 2 +#define TPC0_NRTR_SPLIT_CFG_DEFAULT_MESH_MASK 0xC +#define TPC0_NRTR_SPLIT_CFG_RD_RATE_LIM_EN_SHIFT 4 +#define TPC0_NRTR_SPLIT_CFG_RD_RATE_LIM_EN_MASK 0x10 +#define TPC0_NRTR_SPLIT_CFG_WR_RATE_LIM_EN_SHIFT 5 +#define TPC0_NRTR_SPLIT_CFG_WR_RATE_LIM_EN_MASK 0x20 +#define TPC0_NRTR_SPLIT_CFG_B2B_OPT_SHIFT 6 +#define TPC0_NRTR_SPLIT_CFG_B2B_OPT_MASK 0x1C0 + +/* TPC0_NRTR_SPLIT_RD_SAT */ +#define TPC0_NRTR_SPLIT_RD_SAT_VAL_SHIFT 0 +#define TPC0_NRTR_SPLIT_RD_SAT_VAL_MASK 0xFFFF + +/* TPC0_NRTR_SPLIT_RD_RST_TOKEN */ +#define TPC0_NRTR_SPLIT_RD_RST_TOKEN_VAL_SHIFT 0 +#define TPC0_NRTR_SPLIT_RD_RST_TOKEN_VAL_MASK 0xFFFF + +/* TPC0_NRTR_SPLIT_RD_TIMEOUT */ +#define TPC0_NRTR_SPLIT_RD_TIMEOUT_VAL_SHIFT 0 +#define TPC0_NRTR_SPLIT_RD_TIMEOUT_VAL_MASK 0xFFFFFFFF + +/* TPC0_NRTR_SPLIT_WR_SAT */ +#define TPC0_NRTR_SPLIT_WR_SAT_VAL_SHIFT 0 +#define TPC0_NRTR_SPLIT_WR_SAT_VAL_MASK 0xFFFF + +/* TPC0_NRTR_WPLIT_WR_TST_TOLEN */ +#define TPC0_NRTR_WPLIT_WR_TST_TOLEN_VAL_SHIFT 0 +#define TPC0_NRTR_WPLIT_WR_TST_TOLEN_VAL_MASK 0xFFFF + +/* TPC0_NRTR_SPLIT_WR_TIMEOUT */ +#define TPC0_NRTR_SPLIT_WR_TIMEOUT_VAL_SHIFT 0 +#define TPC0_NRTR_SPLIT_WR_TIMEOUT_VAL_MASK 0xFFFFFFFF + +/* TPC0_NRTR_HBW_RANGE_HIT */ +#define TPC0_NRTR_HBW_RANGE_HIT_IND_SHIFT 0 +#define TPC0_NRTR_HBW_RANGE_HIT_IND_MASK 0xFF + +/* TPC0_NRTR_HBW_RANGE_MASK_L */ +#define TPC0_NRTR_HBW_RANGE_MASK_L_VAL_SHIFT 0 +#define TPC0_NRTR_HBW_RANGE_MASK_L_VAL_MASK 0xFFFFFFFF + +/* TPC0_NRTR_HBW_RANGE_MASK_H */ +#define TPC0_NRTR_HBW_RANGE_MASK_H_VAL_SHIFT 0 +#define TPC0_NRTR_HBW_RANGE_MASK_H_VAL_MASK 0x3FFFF + +/* TPC0_NRTR_HBW_RANGE_BASE_L */ +#define TPC0_NRTR_HBW_RANGE_BASE_L_VAL_SHIFT 0 +#define TPC0_NRTR_HBW_RANGE_BASE_L_VAL_MASK 0xFFFFFFFF + +/* TPC0_NRTR_HBW_RANGE_BASE_H */ +#define TPC0_NRTR_HBW_RANGE_BASE_H_VAL_SHIFT 0 +#define TPC0_NRTR_HBW_RANGE_BASE_H_VAL_MASK 0x3FFFF + +/* TPC0_NRTR_LBW_RANGE_HIT */ +#define TPC0_NRTR_LBW_RANGE_HIT_IND_SHIFT 0 +#define TPC0_NRTR_LBW_RANGE_HIT_IND_MASK 0xFFFF + +/* TPC0_NRTR_LBW_RANGE_MASK */ +#define TPC0_NRTR_LBW_RANGE_MASK_VAL_SHIFT 0 +#define TPC0_NRTR_LBW_RANGE_MASK_VAL_MASK 0x3FFFFFF + +/* TPC0_NRTR_LBW_RANGE_BASE */ +#define TPC0_NRTR_LBW_RANGE_BASE_VAL_SHIFT 0 +#define TPC0_NRTR_LBW_RANGE_BASE_VAL_MASK 0x3FFFFFF + +/* TPC0_NRTR_RGLTR */ +#define TPC0_NRTR_RGLTR_WR_EN_SHIFT 0 +#define TPC0_NRTR_RGLTR_WR_EN_MASK 0x1 +#define TPC0_NRTR_RGLTR_RD_EN_SHIFT 4 +#define TPC0_NRTR_RGLTR_RD_EN_MASK 0x10 + +/* TPC0_NRTR_RGLTR_WR_RESULT */ +#define TPC0_NRTR_RGLTR_WR_RESULT_VAL_SHIFT 0 +#define TPC0_NRTR_RGLTR_WR_RESULT_VAL_MASK 0xFF + +/* TPC0_NRTR_RGLTR_RD_RESULT */ +#define TPC0_NRTR_RGLTR_RD_RESULT_VAL_SHIFT 0 +#define TPC0_NRTR_RGLTR_RD_RESULT_VAL_MASK 0xFF + +/* TPC0_NRTR_SCRAMB_EN */ +#define TPC0_NRTR_SCRAMB_EN_VAL_SHIFT 0 +#define TPC0_NRTR_SCRAMB_EN_VAL_MASK 0x1 + +/* TPC0_NRTR_NON_LIN_SCRAMB */ +#define TPC0_NRTR_NON_LIN_SCRAMB_EN_SHIFT 0 +#define TPC0_NRTR_NON_LIN_SCRAMB_EN_MASK 0x1 + +#endif /* ASIC_REG_TPC0_NRTR_MASKS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h new file mode 100644 index 000000000..ce3346dd2 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h @@ -0,0 +1,226 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC0_NRTR_REGS_H_ +#define ASIC_REG_TPC0_NRTR_REGS_H_ + +/* + ***************************************** + * TPC0_NRTR (Prototype: IF_NRTR) + ***************************************** + */ + +#define mmTPC0_NRTR_HBW_MAX_CRED 0xE00100 + +#define mmTPC0_NRTR_LBW_MAX_CRED 0xE00120 + +#define mmTPC0_NRTR_DBG_E_ARB 0xE00300 + +#define mmTPC0_NRTR_DBG_W_ARB 0xE00304 + +#define mmTPC0_NRTR_DBG_N_ARB 0xE00308 + +#define mmTPC0_NRTR_DBG_S_ARB 0xE0030C + +#define mmTPC0_NRTR_DBG_L_ARB 0xE00310 + +#define mmTPC0_NRTR_DBG_E_ARB_MAX 0xE00320 + +#define mmTPC0_NRTR_DBG_W_ARB_MAX 0xE00324 + +#define mmTPC0_NRTR_DBG_N_ARB_MAX 0xE00328 + +#define mmTPC0_NRTR_DBG_S_ARB_MAX 0xE0032C + +#define mmTPC0_NRTR_DBG_L_ARB_MAX 0xE00330 + +#define mmTPC0_NRTR_SPLIT_COEF_0 0xE00400 + +#define mmTPC0_NRTR_SPLIT_COEF_1 0xE00404 + +#define mmTPC0_NRTR_SPLIT_COEF_2 0xE00408 + +#define mmTPC0_NRTR_SPLIT_COEF_3 0xE0040C + +#define mmTPC0_NRTR_SPLIT_COEF_4 0xE00410 + +#define mmTPC0_NRTR_SPLIT_COEF_5 0xE00414 + +#define mmTPC0_NRTR_SPLIT_COEF_6 0xE00418 + +#define mmTPC0_NRTR_SPLIT_COEF_7 0xE0041C + +#define mmTPC0_NRTR_SPLIT_COEF_8 0xE00420 + +#define mmTPC0_NRTR_SPLIT_COEF_9 0xE00424 + +#define mmTPC0_NRTR_SPLIT_CFG 0xE00440 + +#define mmTPC0_NRTR_SPLIT_RD_SAT 0xE00444 + +#define mmTPC0_NRTR_SPLIT_RD_RST_TOKEN 0xE00448 + +#define mmTPC0_NRTR_SPLIT_RD_TIMEOUT_0 0xE0044C + +#define mmTPC0_NRTR_SPLIT_RD_TIMEOUT_1 0xE00450 + +#define mmTPC0_NRTR_SPLIT_WR_SAT 0xE00454 + +#define mmTPC0_NRTR_WPLIT_WR_TST_TOLEN 0xE00458 + +#define mmTPC0_NRTR_SPLIT_WR_TIMEOUT_0 0xE0045C + +#define mmTPC0_NRTR_SPLIT_WR_TIMEOUT_1 0xE00460 + +#define mmTPC0_NRTR_HBW_RANGE_HIT 0xE00470 + +#define mmTPC0_NRTR_HBW_RANGE_MASK_L_0 0xE00480 + +#define mmTPC0_NRTR_HBW_RANGE_MASK_L_1 0xE00484 + +#define mmTPC0_NRTR_HBW_RANGE_MASK_L_2 0xE00488 + +#define mmTPC0_NRTR_HBW_RANGE_MASK_L_3 0xE0048C + +#define mmTPC0_NRTR_HBW_RANGE_MASK_L_4 0xE00490 + +#define mmTPC0_NRTR_HBW_RANGE_MASK_L_5 0xE00494 + +#define mmTPC0_NRTR_HBW_RANGE_MASK_L_6 0xE00498 + +#define mmTPC0_NRTR_HBW_RANGE_MASK_L_7 0xE0049C + +#define mmTPC0_NRTR_HBW_RANGE_MASK_H_0 0xE004A0 + +#define mmTPC0_NRTR_HBW_RANGE_MASK_H_1 0xE004A4 + +#define mmTPC0_NRTR_HBW_RANGE_MASK_H_2 0xE004A8 + +#define mmTPC0_NRTR_HBW_RANGE_MASK_H_3 0xE004AC + +#define mmTPC0_NRTR_HBW_RANGE_MASK_H_4 0xE004B0 + +#define mmTPC0_NRTR_HBW_RANGE_MASK_H_5 0xE004B4 + +#define mmTPC0_NRTR_HBW_RANGE_MASK_H_6 0xE004B8 + +#define mmTPC0_NRTR_HBW_RANGE_MASK_H_7 0xE004BC + +#define mmTPC0_NRTR_HBW_RANGE_BASE_L_0 0xE004C0 + +#define mmTPC0_NRTR_HBW_RANGE_BASE_L_1 0xE004C4 + +#define mmTPC0_NRTR_HBW_RANGE_BASE_L_2 0xE004C8 + +#define mmTPC0_NRTR_HBW_RANGE_BASE_L_3 0xE004CC + +#define mmTPC0_NRTR_HBW_RANGE_BASE_L_4 0xE004D0 + +#define mmTPC0_NRTR_HBW_RANGE_BASE_L_5 0xE004D4 + +#define mmTPC0_NRTR_HBW_RANGE_BASE_L_6 0xE004D8 + +#define mmTPC0_NRTR_HBW_RANGE_BASE_L_7 0xE004DC + +#define mmTPC0_NRTR_HBW_RANGE_BASE_H_0 0xE004E0 + +#define mmTPC0_NRTR_HBW_RANGE_BASE_H_1 0xE004E4 + +#define mmTPC0_NRTR_HBW_RANGE_BASE_H_2 0xE004E8 + +#define mmTPC0_NRTR_HBW_RANGE_BASE_H_3 0xE004EC + +#define mmTPC0_NRTR_HBW_RANGE_BASE_H_4 0xE004F0 + +#define mmTPC0_NRTR_HBW_RANGE_BASE_H_5 0xE004F4 + +#define mmTPC0_NRTR_HBW_RANGE_BASE_H_6 0xE004F8 + +#define mmTPC0_NRTR_HBW_RANGE_BASE_H_7 0xE004FC + +#define mmTPC0_NRTR_LBW_RANGE_HIT 0xE00500 + +#define mmTPC0_NRTR_LBW_RANGE_MASK_0 0xE00510 + +#define mmTPC0_NRTR_LBW_RANGE_MASK_1 0xE00514 + +#define mmTPC0_NRTR_LBW_RANGE_MASK_2 0xE00518 + +#define mmTPC0_NRTR_LBW_RANGE_MASK_3 0xE0051C + +#define mmTPC0_NRTR_LBW_RANGE_MASK_4 0xE00520 + +#define mmTPC0_NRTR_LBW_RANGE_MASK_5 0xE00524 + +#define mmTPC0_NRTR_LBW_RANGE_MASK_6 0xE00528 + +#define mmTPC0_NRTR_LBW_RANGE_MASK_7 0xE0052C + +#define mmTPC0_NRTR_LBW_RANGE_MASK_8 0xE00530 + +#define mmTPC0_NRTR_LBW_RANGE_MASK_9 0xE00534 + +#define mmTPC0_NRTR_LBW_RANGE_MASK_10 0xE00538 + +#define mmTPC0_NRTR_LBW_RANGE_MASK_11 0xE0053C + +#define mmTPC0_NRTR_LBW_RANGE_MASK_12 0xE00540 + +#define mmTPC0_NRTR_LBW_RANGE_MASK_13 0xE00544 + +#define mmTPC0_NRTR_LBW_RANGE_MASK_14 0xE00548 + +#define mmTPC0_NRTR_LBW_RANGE_MASK_15 0xE0054C + +#define mmTPC0_NRTR_LBW_RANGE_BASE_0 0xE00550 + +#define mmTPC0_NRTR_LBW_RANGE_BASE_1 0xE00554 + +#define mmTPC0_NRTR_LBW_RANGE_BASE_2 0xE00558 + +#define mmTPC0_NRTR_LBW_RANGE_BASE_3 0xE0055C + +#define mmTPC0_NRTR_LBW_RANGE_BASE_4 0xE00560 + +#define mmTPC0_NRTR_LBW_RANGE_BASE_5 0xE00564 + +#define mmTPC0_NRTR_LBW_RANGE_BASE_6 0xE00568 + +#define mmTPC0_NRTR_LBW_RANGE_BASE_7 0xE0056C + +#define mmTPC0_NRTR_LBW_RANGE_BASE_8 0xE00570 + +#define mmTPC0_NRTR_LBW_RANGE_BASE_9 0xE00574 + +#define mmTPC0_NRTR_LBW_RANGE_BASE_10 0xE00578 + +#define mmTPC0_NRTR_LBW_RANGE_BASE_11 0xE0057C + +#define mmTPC0_NRTR_LBW_RANGE_BASE_12 0xE00580 + +#define mmTPC0_NRTR_LBW_RANGE_BASE_13 0xE00584 + +#define mmTPC0_NRTR_LBW_RANGE_BASE_14 0xE00588 + +#define mmTPC0_NRTR_LBW_RANGE_BASE_15 0xE0058C + +#define mmTPC0_NRTR_RGLTR 0xE00590 + +#define mmTPC0_NRTR_RGLTR_WR_RESULT 0xE00594 + +#define mmTPC0_NRTR_RGLTR_RD_RESULT 0xE00598 + +#define mmTPC0_NRTR_SCRAMB_EN 0xE00600 + +#define mmTPC0_NRTR_NON_LIN_SCRAMB 0xE00604 + +#endif /* ASIC_REG_TPC0_NRTR_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h new file mode 100644 index 000000000..2e4b45947 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h @@ -0,0 +1,464 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC0_QM_MASKS_H_ +#define ASIC_REG_TPC0_QM_MASKS_H_ + +/* + ***************************************** + * TPC0_QM (Prototype: QMAN) + ***************************************** + */ + +/* TPC0_QM_GLBL_CFG0 */ +#define TPC0_QM_GLBL_CFG0_PQF_EN_SHIFT 0 +#define TPC0_QM_GLBL_CFG0_PQF_EN_MASK 0x1 +#define TPC0_QM_GLBL_CFG0_CQF_EN_SHIFT 1 +#define TPC0_QM_GLBL_CFG0_CQF_EN_MASK 0x2 +#define TPC0_QM_GLBL_CFG0_CP_EN_SHIFT 2 +#define TPC0_QM_GLBL_CFG0_CP_EN_MASK 0x4 +#define TPC0_QM_GLBL_CFG0_DMA_EN_SHIFT 3 +#define TPC0_QM_GLBL_CFG0_DMA_EN_MASK 0x8 + +/* TPC0_QM_GLBL_CFG1 */ +#define TPC0_QM_GLBL_CFG1_PQF_STOP_SHIFT 0 +#define TPC0_QM_GLBL_CFG1_PQF_STOP_MASK 0x1 +#define TPC0_QM_GLBL_CFG1_CQF_STOP_SHIFT 1 +#define TPC0_QM_GLBL_CFG1_CQF_STOP_MASK 0x2 +#define TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT 2 +#define TPC0_QM_GLBL_CFG1_CP_STOP_MASK 0x4 +#define TPC0_QM_GLBL_CFG1_DMA_STOP_SHIFT 3 +#define TPC0_QM_GLBL_CFG1_DMA_STOP_MASK 0x8 +#define TPC0_QM_GLBL_CFG1_PQF_FLUSH_SHIFT 8 +#define TPC0_QM_GLBL_CFG1_PQF_FLUSH_MASK 0x100 +#define TPC0_QM_GLBL_CFG1_CQF_FLUSH_SHIFT 9 +#define TPC0_QM_GLBL_CFG1_CQF_FLUSH_MASK 0x200 +#define TPC0_QM_GLBL_CFG1_CP_FLUSH_SHIFT 10 +#define TPC0_QM_GLBL_CFG1_CP_FLUSH_MASK 0x400 +#define TPC0_QM_GLBL_CFG1_DMA_FLUSH_SHIFT 11 +#define TPC0_QM_GLBL_CFG1_DMA_FLUSH_MASK 0x800 + +/* TPC0_QM_GLBL_PROT */ +#define TPC0_QM_GLBL_PROT_PQF_PROT_SHIFT 0 +#define TPC0_QM_GLBL_PROT_PQF_PROT_MASK 0x1 +#define TPC0_QM_GLBL_PROT_CQF_PROT_SHIFT 1 +#define TPC0_QM_GLBL_PROT_CQF_PROT_MASK 0x2 +#define TPC0_QM_GLBL_PROT_CP_PROT_SHIFT 2 +#define TPC0_QM_GLBL_PROT_CP_PROT_MASK 0x4 +#define TPC0_QM_GLBL_PROT_DMA_PROT_SHIFT 3 +#define TPC0_QM_GLBL_PROT_DMA_PROT_MASK 0x8 +#define TPC0_QM_GLBL_PROT_PQF_ERR_PROT_SHIFT 4 +#define TPC0_QM_GLBL_PROT_PQF_ERR_PROT_MASK 0x10 +#define TPC0_QM_GLBL_PROT_CQF_ERR_PROT_SHIFT 5 +#define TPC0_QM_GLBL_PROT_CQF_ERR_PROT_MASK 0x20 +#define TPC0_QM_GLBL_PROT_CP_ERR_PROT_SHIFT 6 +#define TPC0_QM_GLBL_PROT_CP_ERR_PROT_MASK 0x40 +#define TPC0_QM_GLBL_PROT_DMA_ERR_PROT_SHIFT 7 +#define TPC0_QM_GLBL_PROT_DMA_ERR_PROT_MASK 0x80 + +/* TPC0_QM_GLBL_ERR_CFG */ +#define TPC0_QM_GLBL_ERR_CFG_PQF_ERR_INT_EN_SHIFT 0 +#define TPC0_QM_GLBL_ERR_CFG_PQF_ERR_INT_EN_MASK 0x1 +#define TPC0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT 1 +#define TPC0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK 0x2 +#define TPC0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT 2 +#define TPC0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK 0x4 +#define TPC0_QM_GLBL_ERR_CFG_CQF_ERR_INT_EN_SHIFT 3 +#define TPC0_QM_GLBL_ERR_CFG_CQF_ERR_INT_EN_MASK 0x8 +#define TPC0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT 4 +#define TPC0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK 0x10 +#define TPC0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT 5 +#define TPC0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK 0x20 +#define TPC0_QM_GLBL_ERR_CFG_CP_ERR_INT_EN_SHIFT 6 +#define TPC0_QM_GLBL_ERR_CFG_CP_ERR_INT_EN_MASK 0x40 +#define TPC0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT 7 +#define TPC0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK 0x80 +#define TPC0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT 8 +#define TPC0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK 0x100 +#define TPC0_QM_GLBL_ERR_CFG_DMA_ERR_INT_EN_SHIFT 9 +#define TPC0_QM_GLBL_ERR_CFG_DMA_ERR_INT_EN_MASK 0x200 +#define TPC0_QM_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT 10 +#define TPC0_QM_GLBL_ERR_CFG_DMA_ERR_MSG_EN_MASK 0x400 +#define TPC0_QM_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT 11 +#define TPC0_QM_GLBL_ERR_CFG_DMA_STOP_ON_ERR_MASK 0x800 + +/* TPC0_QM_GLBL_ERR_ADDR_LO */ +#define TPC0_QM_GLBL_ERR_ADDR_LO_VAL_SHIFT 0 +#define TPC0_QM_GLBL_ERR_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_GLBL_ERR_ADDR_HI */ +#define TPC0_QM_GLBL_ERR_ADDR_HI_VAL_SHIFT 0 +#define TPC0_QM_GLBL_ERR_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_GLBL_ERR_WDATA */ +#define TPC0_QM_GLBL_ERR_WDATA_VAL_SHIFT 0 +#define TPC0_QM_GLBL_ERR_WDATA_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_GLBL_SECURE_PROPS */ +#define TPC0_QM_GLBL_SECURE_PROPS_ASID_SHIFT 0 +#define TPC0_QM_GLBL_SECURE_PROPS_ASID_MASK 0x3FF +#define TPC0_QM_GLBL_SECURE_PROPS_MMBP_SHIFT 10 +#define TPC0_QM_GLBL_SECURE_PROPS_MMBP_MASK 0x400 + +/* TPC0_QM_GLBL_NON_SECURE_PROPS */ +#define TPC0_QM_GLBL_NON_SECURE_PROPS_ASID_SHIFT 0 +#define TPC0_QM_GLBL_NON_SECURE_PROPS_ASID_MASK 0x3FF +#define TPC0_QM_GLBL_NON_SECURE_PROPS_MMBP_SHIFT 10 +#define TPC0_QM_GLBL_NON_SECURE_PROPS_MMBP_MASK 0x400 + +/* TPC0_QM_GLBL_STS0 */ +#define TPC0_QM_GLBL_STS0_PQF_IDLE_SHIFT 0 +#define TPC0_QM_GLBL_STS0_PQF_IDLE_MASK 0x1 +#define TPC0_QM_GLBL_STS0_CQF_IDLE_SHIFT 1 +#define TPC0_QM_GLBL_STS0_CQF_IDLE_MASK 0x2 +#define TPC0_QM_GLBL_STS0_CP_IDLE_SHIFT 2 +#define TPC0_QM_GLBL_STS0_CP_IDLE_MASK 0x4 +#define TPC0_QM_GLBL_STS0_DMA_IDLE_SHIFT 3 +#define TPC0_QM_GLBL_STS0_DMA_IDLE_MASK 0x8 +#define TPC0_QM_GLBL_STS0_PQF_IS_STOP_SHIFT 4 +#define TPC0_QM_GLBL_STS0_PQF_IS_STOP_MASK 0x10 +#define TPC0_QM_GLBL_STS0_CQF_IS_STOP_SHIFT 5 +#define TPC0_QM_GLBL_STS0_CQF_IS_STOP_MASK 0x20 +#define TPC0_QM_GLBL_STS0_CP_IS_STOP_SHIFT 6 +#define TPC0_QM_GLBL_STS0_CP_IS_STOP_MASK 0x40 +#define TPC0_QM_GLBL_STS0_DMA_IS_STOP_SHIFT 7 +#define TPC0_QM_GLBL_STS0_DMA_IS_STOP_MASK 0x80 + +/* TPC0_QM_GLBL_STS1 */ +#define TPC0_QM_GLBL_STS1_PQF_RD_ERR_SHIFT 0 +#define TPC0_QM_GLBL_STS1_PQF_RD_ERR_MASK 0x1 +#define TPC0_QM_GLBL_STS1_CQF_RD_ERR_SHIFT 1 +#define TPC0_QM_GLBL_STS1_CQF_RD_ERR_MASK 0x2 +#define TPC0_QM_GLBL_STS1_CP_RD_ERR_SHIFT 2 +#define TPC0_QM_GLBL_STS1_CP_RD_ERR_MASK 0x4 +#define TPC0_QM_GLBL_STS1_CP_UNDEF_CMD_ERR_SHIFT 3 +#define TPC0_QM_GLBL_STS1_CP_UNDEF_CMD_ERR_MASK 0x8 +#define TPC0_QM_GLBL_STS1_CP_STOP_OP_SHIFT 4 +#define TPC0_QM_GLBL_STS1_CP_STOP_OP_MASK 0x10 +#define TPC0_QM_GLBL_STS1_CP_MSG_WR_ERR_SHIFT 5 +#define TPC0_QM_GLBL_STS1_CP_MSG_WR_ERR_MASK 0x20 +#define TPC0_QM_GLBL_STS1_DMA_RD_ERR_SHIFT 8 +#define TPC0_QM_GLBL_STS1_DMA_RD_ERR_MASK 0x100 +#define TPC0_QM_GLBL_STS1_DMA_WR_ERR_SHIFT 9 +#define TPC0_QM_GLBL_STS1_DMA_WR_ERR_MASK 0x200 +#define TPC0_QM_GLBL_STS1_DMA_RD_MSG_ERR_SHIFT 10 +#define TPC0_QM_GLBL_STS1_DMA_RD_MSG_ERR_MASK 0x400 +#define TPC0_QM_GLBL_STS1_DMA_WR_MSG_ERR_SHIFT 11 +#define TPC0_QM_GLBL_STS1_DMA_WR_MSG_ERR_MASK 0x800 + +/* TPC0_QM_PQ_BASE_LO */ +#define TPC0_QM_PQ_BASE_LO_VAL_SHIFT 0 +#define TPC0_QM_PQ_BASE_LO_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_PQ_BASE_HI */ +#define TPC0_QM_PQ_BASE_HI_VAL_SHIFT 0 +#define TPC0_QM_PQ_BASE_HI_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_PQ_SIZE */ +#define TPC0_QM_PQ_SIZE_VAL_SHIFT 0 +#define TPC0_QM_PQ_SIZE_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_PQ_PI */ +#define TPC0_QM_PQ_PI_VAL_SHIFT 0 +#define TPC0_QM_PQ_PI_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_PQ_CI */ +#define TPC0_QM_PQ_CI_VAL_SHIFT 0 +#define TPC0_QM_PQ_CI_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_PQ_CFG0 */ +#define TPC0_QM_PQ_CFG0_RESERVED_SHIFT 0 +#define TPC0_QM_PQ_CFG0_RESERVED_MASK 0x1 + +/* TPC0_QM_PQ_CFG1 */ +#define TPC0_QM_PQ_CFG1_CREDIT_LIM_SHIFT 0 +#define TPC0_QM_PQ_CFG1_CREDIT_LIM_MASK 0xFFFF +#define TPC0_QM_PQ_CFG1_MAX_INFLIGHT_SHIFT 16 +#define TPC0_QM_PQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000 + +/* TPC0_QM_PQ_ARUSER */ +#define TPC0_QM_PQ_ARUSER_NOSNOOP_SHIFT 0 +#define TPC0_QM_PQ_ARUSER_NOSNOOP_MASK 0x1 +#define TPC0_QM_PQ_ARUSER_WORD_SHIFT 1 +#define TPC0_QM_PQ_ARUSER_WORD_MASK 0x2 + +/* TPC0_QM_PQ_PUSH0 */ +#define TPC0_QM_PQ_PUSH0_PTR_LO_SHIFT 0 +#define TPC0_QM_PQ_PUSH0_PTR_LO_MASK 0xFFFFFFFF + +/* TPC0_QM_PQ_PUSH1 */ +#define TPC0_QM_PQ_PUSH1_PTR_HI_SHIFT 0 +#define TPC0_QM_PQ_PUSH1_PTR_HI_MASK 0xFFFFFFFF + +/* TPC0_QM_PQ_PUSH2 */ +#define TPC0_QM_PQ_PUSH2_TSIZE_SHIFT 0 +#define TPC0_QM_PQ_PUSH2_TSIZE_MASK 0xFFFFFFFF + +/* TPC0_QM_PQ_PUSH3 */ +#define TPC0_QM_PQ_PUSH3_RPT_SHIFT 0 +#define TPC0_QM_PQ_PUSH3_RPT_MASK 0xFFFF +#define TPC0_QM_PQ_PUSH3_CTL_SHIFT 16 +#define TPC0_QM_PQ_PUSH3_CTL_MASK 0xFFFF0000 + +/* TPC0_QM_PQ_STS0 */ +#define TPC0_QM_PQ_STS0_PQ_CREDIT_CNT_SHIFT 0 +#define TPC0_QM_PQ_STS0_PQ_CREDIT_CNT_MASK 0xFFFF +#define TPC0_QM_PQ_STS0_PQ_FREE_CNT_SHIFT 16 +#define TPC0_QM_PQ_STS0_PQ_FREE_CNT_MASK 0xFFFF0000 + +/* TPC0_QM_PQ_STS1 */ +#define TPC0_QM_PQ_STS1_PQ_INFLIGHT_CNT_SHIFT 0 +#define TPC0_QM_PQ_STS1_PQ_INFLIGHT_CNT_MASK 0xFFFF +#define TPC0_QM_PQ_STS1_PQ_BUF_EMPTY_SHIFT 30 +#define TPC0_QM_PQ_STS1_PQ_BUF_EMPTY_MASK 0x40000000 +#define TPC0_QM_PQ_STS1_PQ_BUSY_SHIFT 31 +#define TPC0_QM_PQ_STS1_PQ_BUSY_MASK 0x80000000 + +/* TPC0_QM_PQ_RD_RATE_LIM_EN */ +#define TPC0_QM_PQ_RD_RATE_LIM_EN_VAL_SHIFT 0 +#define TPC0_QM_PQ_RD_RATE_LIM_EN_VAL_MASK 0x1 + +/* TPC0_QM_PQ_RD_RATE_LIM_RST_TOKEN */ +#define TPC0_QM_PQ_RD_RATE_LIM_RST_TOKEN_VAL_SHIFT 0 +#define TPC0_QM_PQ_RD_RATE_LIM_RST_TOKEN_VAL_MASK 0xFFFF + +/* TPC0_QM_PQ_RD_RATE_LIM_SAT */ +#define TPC0_QM_PQ_RD_RATE_LIM_SAT_VAL_SHIFT 0 +#define TPC0_QM_PQ_RD_RATE_LIM_SAT_VAL_MASK 0xFFFF + +/* TPC0_QM_PQ_RD_RATE_LIM_TOUT */ +#define TPC0_QM_PQ_RD_RATE_LIM_TOUT_VAL_SHIFT 0 +#define TPC0_QM_PQ_RD_RATE_LIM_TOUT_VAL_MASK 0x7FFFFFFF + +/* TPC0_QM_CQ_CFG0 */ +#define TPC0_QM_CQ_CFG0_RESERVED_SHIFT 0 +#define TPC0_QM_CQ_CFG0_RESERVED_MASK 0x1 + +/* TPC0_QM_CQ_CFG1 */ +#define TPC0_QM_CQ_CFG1_CREDIT_LIM_SHIFT 0 +#define TPC0_QM_CQ_CFG1_CREDIT_LIM_MASK 0xFFFF +#define TPC0_QM_CQ_CFG1_MAX_INFLIGHT_SHIFT 16 +#define TPC0_QM_CQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000 + +/* TPC0_QM_CQ_ARUSER */ +#define TPC0_QM_CQ_ARUSER_NOSNOOP_SHIFT 0 +#define TPC0_QM_CQ_ARUSER_NOSNOOP_MASK 0x1 +#define TPC0_QM_CQ_ARUSER_WORD_SHIFT 1 +#define TPC0_QM_CQ_ARUSER_WORD_MASK 0x2 + +/* TPC0_QM_CQ_PTR_LO */ +#define TPC0_QM_CQ_PTR_LO_VAL_SHIFT 0 +#define TPC0_QM_CQ_PTR_LO_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CQ_PTR_HI */ +#define TPC0_QM_CQ_PTR_HI_VAL_SHIFT 0 +#define TPC0_QM_CQ_PTR_HI_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CQ_TSIZE */ +#define TPC0_QM_CQ_TSIZE_VAL_SHIFT 0 +#define TPC0_QM_CQ_TSIZE_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CQ_CTL */ +#define TPC0_QM_CQ_CTL_RPT_SHIFT 0 +#define TPC0_QM_CQ_CTL_RPT_MASK 0xFFFF +#define TPC0_QM_CQ_CTL_CTL_SHIFT 16 +#define TPC0_QM_CQ_CTL_CTL_MASK 0xFFFF0000 + +/* TPC0_QM_CQ_PTR_LO_STS */ +#define TPC0_QM_CQ_PTR_LO_STS_VAL_SHIFT 0 +#define TPC0_QM_CQ_PTR_LO_STS_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CQ_PTR_HI_STS */ +#define TPC0_QM_CQ_PTR_HI_STS_VAL_SHIFT 0 +#define TPC0_QM_CQ_PTR_HI_STS_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CQ_TSIZE_STS */ +#define TPC0_QM_CQ_TSIZE_STS_VAL_SHIFT 0 +#define TPC0_QM_CQ_TSIZE_STS_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CQ_CTL_STS */ +#define TPC0_QM_CQ_CTL_STS_RPT_SHIFT 0 +#define TPC0_QM_CQ_CTL_STS_RPT_MASK 0xFFFF +#define TPC0_QM_CQ_CTL_STS_CTL_SHIFT 16 +#define TPC0_QM_CQ_CTL_STS_CTL_MASK 0xFFFF0000 + +/* TPC0_QM_CQ_STS0 */ +#define TPC0_QM_CQ_STS0_CQ_CREDIT_CNT_SHIFT 0 +#define TPC0_QM_CQ_STS0_CQ_CREDIT_CNT_MASK 0xFFFF +#define TPC0_QM_CQ_STS0_CQ_FREE_CNT_SHIFT 16 +#define TPC0_QM_CQ_STS0_CQ_FREE_CNT_MASK 0xFFFF0000 + +/* TPC0_QM_CQ_STS1 */ +#define TPC0_QM_CQ_STS1_CQ_INFLIGHT_CNT_SHIFT 0 +#define TPC0_QM_CQ_STS1_CQ_INFLIGHT_CNT_MASK 0xFFFF +#define TPC0_QM_CQ_STS1_CQ_BUF_EMPTY_SHIFT 30 +#define TPC0_QM_CQ_STS1_CQ_BUF_EMPTY_MASK 0x40000000 +#define TPC0_QM_CQ_STS1_CQ_BUSY_SHIFT 31 +#define TPC0_QM_CQ_STS1_CQ_BUSY_MASK 0x80000000 + +/* TPC0_QM_CQ_RD_RATE_LIM_EN */ +#define TPC0_QM_CQ_RD_RATE_LIM_EN_VAL_SHIFT 0 +#define TPC0_QM_CQ_RD_RATE_LIM_EN_VAL_MASK 0x1 + +/* TPC0_QM_CQ_RD_RATE_LIM_RST_TOKEN */ +#define TPC0_QM_CQ_RD_RATE_LIM_RST_TOKEN_VAL_SHIFT 0 +#define TPC0_QM_CQ_RD_RATE_LIM_RST_TOKEN_VAL_MASK 0xFFFF + +/* TPC0_QM_CQ_RD_RATE_LIM_SAT */ +#define TPC0_QM_CQ_RD_RATE_LIM_SAT_VAL_SHIFT 0 +#define TPC0_QM_CQ_RD_RATE_LIM_SAT_VAL_MASK 0xFFFF + +/* TPC0_QM_CQ_RD_RATE_LIM_TOUT */ +#define TPC0_QM_CQ_RD_RATE_LIM_TOUT_VAL_SHIFT 0 +#define TPC0_QM_CQ_RD_RATE_LIM_TOUT_VAL_MASK 0x7FFFFFFF + +/* TPC0_QM_CQ_IFIFO_CNT */ +#define TPC0_QM_CQ_IFIFO_CNT_VAL_SHIFT 0 +#define TPC0_QM_CQ_IFIFO_CNT_VAL_MASK 0x3 + +/* TPC0_QM_CP_MSG_BASE0_ADDR_LO */ +#define TPC0_QM_CP_MSG_BASE0_ADDR_LO_VAL_SHIFT 0 +#define TPC0_QM_CP_MSG_BASE0_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CP_MSG_BASE0_ADDR_HI */ +#define TPC0_QM_CP_MSG_BASE0_ADDR_HI_VAL_SHIFT 0 +#define TPC0_QM_CP_MSG_BASE0_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CP_MSG_BASE1_ADDR_LO */ +#define TPC0_QM_CP_MSG_BASE1_ADDR_LO_VAL_SHIFT 0 +#define TPC0_QM_CP_MSG_BASE1_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CP_MSG_BASE1_ADDR_HI */ +#define TPC0_QM_CP_MSG_BASE1_ADDR_HI_VAL_SHIFT 0 +#define TPC0_QM_CP_MSG_BASE1_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CP_MSG_BASE2_ADDR_LO */ +#define TPC0_QM_CP_MSG_BASE2_ADDR_LO_VAL_SHIFT 0 +#define TPC0_QM_CP_MSG_BASE2_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CP_MSG_BASE2_ADDR_HI */ +#define TPC0_QM_CP_MSG_BASE2_ADDR_HI_VAL_SHIFT 0 +#define TPC0_QM_CP_MSG_BASE2_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CP_MSG_BASE3_ADDR_LO */ +#define TPC0_QM_CP_MSG_BASE3_ADDR_LO_VAL_SHIFT 0 +#define TPC0_QM_CP_MSG_BASE3_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CP_MSG_BASE3_ADDR_HI */ +#define TPC0_QM_CP_MSG_BASE3_ADDR_HI_VAL_SHIFT 0 +#define TPC0_QM_CP_MSG_BASE3_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CP_LDMA_TSIZE_OFFSET */ +#define TPC0_QM_CP_LDMA_TSIZE_OFFSET_VAL_SHIFT 0 +#define TPC0_QM_CP_LDMA_TSIZE_OFFSET_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET */ +#define TPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_SHIFT 0 +#define TPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CP_LDMA_SRC_BASE_HI_OFFSET */ +#define TPC0_QM_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_SHIFT 0 +#define TPC0_QM_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET */ +#define TPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_VAL_SHIFT 0 +#define TPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CP_LDMA_DST_BASE_HI_OFFSET */ +#define TPC0_QM_CP_LDMA_DST_BASE_HI_OFFSET_VAL_SHIFT 0 +#define TPC0_QM_CP_LDMA_DST_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CP_LDMA_COMMIT_OFFSET */ +#define TPC0_QM_CP_LDMA_COMMIT_OFFSET_VAL_SHIFT 0 +#define TPC0_QM_CP_LDMA_COMMIT_OFFSET_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CP_FENCE0_RDATA */ +#define TPC0_QM_CP_FENCE0_RDATA_INC_VAL_SHIFT 0 +#define TPC0_QM_CP_FENCE0_RDATA_INC_VAL_MASK 0xF + +/* TPC0_QM_CP_FENCE1_RDATA */ +#define TPC0_QM_CP_FENCE1_RDATA_INC_VAL_SHIFT 0 +#define TPC0_QM_CP_FENCE1_RDATA_INC_VAL_MASK 0xF + +/* TPC0_QM_CP_FENCE2_RDATA */ +#define TPC0_QM_CP_FENCE2_RDATA_INC_VAL_SHIFT 0 +#define TPC0_QM_CP_FENCE2_RDATA_INC_VAL_MASK 0xF + +/* TPC0_QM_CP_FENCE3_RDATA */ +#define TPC0_QM_CP_FENCE3_RDATA_INC_VAL_SHIFT 0 +#define TPC0_QM_CP_FENCE3_RDATA_INC_VAL_MASK 0xF + +/* TPC0_QM_CP_FENCE0_CNT */ +#define TPC0_QM_CP_FENCE0_CNT_VAL_SHIFT 0 +#define TPC0_QM_CP_FENCE0_CNT_VAL_MASK 0xFF + +/* TPC0_QM_CP_FENCE1_CNT */ +#define TPC0_QM_CP_FENCE1_CNT_VAL_SHIFT 0 +#define TPC0_QM_CP_FENCE1_CNT_VAL_MASK 0xFF + +/* TPC0_QM_CP_FENCE2_CNT */ +#define TPC0_QM_CP_FENCE2_CNT_VAL_SHIFT 0 +#define TPC0_QM_CP_FENCE2_CNT_VAL_MASK 0xFF + +/* TPC0_QM_CP_FENCE3_CNT */ +#define TPC0_QM_CP_FENCE3_CNT_VAL_SHIFT 0 +#define TPC0_QM_CP_FENCE3_CNT_VAL_MASK 0xFF + +/* TPC0_QM_CP_STS */ +#define TPC0_QM_CP_STS_MSG_INFLIGHT_CNT_SHIFT 0 +#define TPC0_QM_CP_STS_MSG_INFLIGHT_CNT_MASK 0xFFFF +#define TPC0_QM_CP_STS_ERDY_SHIFT 16 +#define TPC0_QM_CP_STS_ERDY_MASK 0x10000 +#define TPC0_QM_CP_STS_RRDY_SHIFT 17 +#define TPC0_QM_CP_STS_RRDY_MASK 0x20000 +#define TPC0_QM_CP_STS_MRDY_SHIFT 18 +#define TPC0_QM_CP_STS_MRDY_MASK 0x40000 +#define TPC0_QM_CP_STS_SW_STOP_SHIFT 19 +#define TPC0_QM_CP_STS_SW_STOP_MASK 0x80000 +#define TPC0_QM_CP_STS_FENCE_ID_SHIFT 20 +#define TPC0_QM_CP_STS_FENCE_ID_MASK 0x300000 +#define TPC0_QM_CP_STS_FENCE_IN_PROGRESS_SHIFT 22 +#define TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK 0x400000 + +/* TPC0_QM_CP_CURRENT_INST_LO */ +#define TPC0_QM_CP_CURRENT_INST_LO_VAL_SHIFT 0 +#define TPC0_QM_CP_CURRENT_INST_LO_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CP_CURRENT_INST_HI */ +#define TPC0_QM_CP_CURRENT_INST_HI_VAL_SHIFT 0 +#define TPC0_QM_CP_CURRENT_INST_HI_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CP_BARRIER_CFG */ +#define TPC0_QM_CP_BARRIER_CFG_EBGUARD_SHIFT 0 +#define TPC0_QM_CP_BARRIER_CFG_EBGUARD_MASK 0xFFF + +/* TPC0_QM_CP_DBG_0 */ +#define TPC0_QM_CP_DBG_0_VAL_SHIFT 0 +#define TPC0_QM_CP_DBG_0_VAL_MASK 0xFF + +/* TPC0_QM_PQ_BUF_ADDR */ +#define TPC0_QM_PQ_BUF_ADDR_VAL_SHIFT 0 +#define TPC0_QM_PQ_BUF_ADDR_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_PQ_BUF_RDATA */ +#define TPC0_QM_PQ_BUF_RDATA_VAL_SHIFT 0 +#define TPC0_QM_PQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CQ_BUF_ADDR */ +#define TPC0_QM_CQ_BUF_ADDR_VAL_SHIFT 0 +#define TPC0_QM_CQ_BUF_ADDR_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CQ_BUF_RDATA */ +#define TPC0_QM_CQ_BUF_RDATA_VAL_SHIFT 0 +#define TPC0_QM_CQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF + +#endif /* ASIC_REG_TPC0_QM_MASKS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h new file mode 100644 index 000000000..4fa09eb88 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h @@ -0,0 +1,178 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC0_QM_REGS_H_ +#define ASIC_REG_TPC0_QM_REGS_H_ + +/* + ***************************************** + * TPC0_QM (Prototype: QMAN) + ***************************************** + */ + +#define mmTPC0_QM_GLBL_CFG0 0xE08000 + +#define mmTPC0_QM_GLBL_CFG1 0xE08004 + +#define mmTPC0_QM_GLBL_PROT 0xE08008 + +#define mmTPC0_QM_GLBL_ERR_CFG 0xE0800C + +#define mmTPC0_QM_GLBL_ERR_ADDR_LO 0xE08010 + +#define mmTPC0_QM_GLBL_ERR_ADDR_HI 0xE08014 + +#define mmTPC0_QM_GLBL_ERR_WDATA 0xE08018 + +#define mmTPC0_QM_GLBL_SECURE_PROPS 0xE0801C + +#define mmTPC0_QM_GLBL_NON_SECURE_PROPS 0xE08020 + +#define mmTPC0_QM_GLBL_STS0 0xE08024 + +#define mmTPC0_QM_GLBL_STS1 0xE08028 + +#define mmTPC0_QM_PQ_BASE_LO 0xE08060 + +#define mmTPC0_QM_PQ_BASE_HI 0xE08064 + +#define mmTPC0_QM_PQ_SIZE 0xE08068 + +#define mmTPC0_QM_PQ_PI 0xE0806C + +#define mmTPC0_QM_PQ_CI 0xE08070 + +#define mmTPC0_QM_PQ_CFG0 0xE08074 + +#define mmTPC0_QM_PQ_CFG1 0xE08078 + +#define mmTPC0_QM_PQ_ARUSER 0xE0807C + +#define mmTPC0_QM_PQ_PUSH0 0xE08080 + +#define mmTPC0_QM_PQ_PUSH1 0xE08084 + +#define mmTPC0_QM_PQ_PUSH2 0xE08088 + +#define mmTPC0_QM_PQ_PUSH3 0xE0808C + +#define mmTPC0_QM_PQ_STS0 0xE08090 + +#define mmTPC0_QM_PQ_STS1 0xE08094 + +#define mmTPC0_QM_PQ_RD_RATE_LIM_EN 0xE080A0 + +#define mmTPC0_QM_PQ_RD_RATE_LIM_RST_TOKEN 0xE080A4 + +#define mmTPC0_QM_PQ_RD_RATE_LIM_SAT 0xE080A8 + +#define mmTPC0_QM_PQ_RD_RATE_LIM_TOUT 0xE080AC + +#define mmTPC0_QM_CQ_CFG0 0xE080B0 + +#define mmTPC0_QM_CQ_CFG1 0xE080B4 + +#define mmTPC0_QM_CQ_ARUSER 0xE080B8 + +#define mmTPC0_QM_CQ_PTR_LO 0xE080C0 + +#define mmTPC0_QM_CQ_PTR_HI 0xE080C4 + +#define mmTPC0_QM_CQ_TSIZE 0xE080C8 + +#define mmTPC0_QM_CQ_CTL 0xE080CC + +#define mmTPC0_QM_CQ_PTR_LO_STS 0xE080D4 + +#define mmTPC0_QM_CQ_PTR_HI_STS 0xE080D8 + +#define mmTPC0_QM_CQ_TSIZE_STS 0xE080DC + +#define mmTPC0_QM_CQ_CTL_STS 0xE080E0 + +#define mmTPC0_QM_CQ_STS0 0xE080E4 + +#define mmTPC0_QM_CQ_STS1 0xE080E8 + +#define mmTPC0_QM_CQ_RD_RATE_LIM_EN 0xE080F0 + +#define mmTPC0_QM_CQ_RD_RATE_LIM_RST_TOKEN 0xE080F4 + +#define mmTPC0_QM_CQ_RD_RATE_LIM_SAT 0xE080F8 + +#define mmTPC0_QM_CQ_RD_RATE_LIM_TOUT 0xE080FC + +#define mmTPC0_QM_CQ_IFIFO_CNT 0xE08108 + +#define mmTPC0_QM_CP_MSG_BASE0_ADDR_LO 0xE08120 + +#define mmTPC0_QM_CP_MSG_BASE0_ADDR_HI 0xE08124 + +#define mmTPC0_QM_CP_MSG_BASE1_ADDR_LO 0xE08128 + +#define mmTPC0_QM_CP_MSG_BASE1_ADDR_HI 0xE0812C + +#define mmTPC0_QM_CP_MSG_BASE2_ADDR_LO 0xE08130 + +#define mmTPC0_QM_CP_MSG_BASE2_ADDR_HI 0xE08134 + +#define mmTPC0_QM_CP_MSG_BASE3_ADDR_LO 0xE08138 + +#define mmTPC0_QM_CP_MSG_BASE3_ADDR_HI 0xE0813C + +#define mmTPC0_QM_CP_LDMA_TSIZE_OFFSET 0xE08140 + +#define mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0xE08144 + +#define mmTPC0_QM_CP_LDMA_SRC_BASE_HI_OFFSET 0xE08148 + +#define mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET 0xE0814C + +#define mmTPC0_QM_CP_LDMA_DST_BASE_HI_OFFSET 0xE08150 + +#define mmTPC0_QM_CP_LDMA_COMMIT_OFFSET 0xE08154 + +#define mmTPC0_QM_CP_FENCE0_RDATA 0xE08158 + +#define mmTPC0_QM_CP_FENCE1_RDATA 0xE0815C + +#define mmTPC0_QM_CP_FENCE2_RDATA 0xE08160 + +#define mmTPC0_QM_CP_FENCE3_RDATA 0xE08164 + +#define mmTPC0_QM_CP_FENCE0_CNT 0xE08168 + +#define mmTPC0_QM_CP_FENCE1_CNT 0xE0816C + +#define mmTPC0_QM_CP_FENCE2_CNT 0xE08170 + +#define mmTPC0_QM_CP_FENCE3_CNT 0xE08174 + +#define mmTPC0_QM_CP_STS 0xE08178 + +#define mmTPC0_QM_CP_CURRENT_INST_LO 0xE0817C + +#define mmTPC0_QM_CP_CURRENT_INST_HI 0xE08180 + +#define mmTPC0_QM_CP_BARRIER_CFG 0xE08184 + +#define mmTPC0_QM_CP_DBG_0 0xE08188 + +#define mmTPC0_QM_PQ_BUF_ADDR 0xE08300 + +#define mmTPC0_QM_PQ_BUF_RDATA 0xE08304 + +#define mmTPC0_QM_CQ_BUF_ADDR 0xE08308 + +#define mmTPC0_QM_CQ_BUF_RDATA 0xE0830C + +#endif /* ASIC_REG_TPC0_QM_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h new file mode 100644 index 000000000..928eef180 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h @@ -0,0 +1,886 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC1_CFG_REGS_H_ +#define ASIC_REG_TPC1_CFG_REGS_H_ + +/* + ***************************************** + * TPC1_CFG (Prototype: TPC) + ***************************************** + */ + +#define mmTPC1_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xE46400 + +#define mmTPC1_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xE46404 + +#define mmTPC1_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xE46408 + +#define mmTPC1_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xE4640C + +#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xE46410 + +#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xE46414 + +#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET 0xE46418 + +#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xE4641C + +#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xE46420 + +#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET 0xE46424 + +#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xE46428 + +#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xE4642C + +#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET 0xE46430 + +#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xE46434 + +#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xE46438 + +#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET 0xE4643C + +#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xE46440 + +#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xE46444 + +#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET 0xE46448 + +#define mmTPC1_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xE4644C + +#define mmTPC1_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xE46450 + +#define mmTPC1_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xE46454 + +#define mmTPC1_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xE46458 + +#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xE4645C + +#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xE46460 + +#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET 0xE46464 + +#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xE46468 + +#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xE4646C + +#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET 0xE46470 + +#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xE46474 + +#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xE46478 + +#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET 0xE4647C + +#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xE46480 + +#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xE46484 + +#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET 0xE46488 + +#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xE4648C + +#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xE46490 + +#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET 0xE46494 + +#define mmTPC1_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xE46498 + +#define mmTPC1_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xE4649C + +#define mmTPC1_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xE464A0 + +#define mmTPC1_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xE464A4 + +#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xE464A8 + +#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xE464AC + +#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET 0xE464B0 + +#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xE464B4 + +#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xE464B8 + +#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET 0xE464BC + +#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xE464C0 + +#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xE464C4 + +#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET 0xE464C8 + +#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xE464CC + +#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xE464D0 + +#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET 0xE464D4 + +#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xE464D8 + +#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xE464DC + +#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET 0xE464E0 + +#define mmTPC1_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xE464E4 + +#define mmTPC1_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xE464E8 + +#define mmTPC1_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xE464EC + +#define mmTPC1_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xE464F0 + +#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xE464F4 + +#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xE464F8 + +#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET 0xE464FC + +#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xE46500 + +#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xE46504 + +#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET 0xE46508 + +#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xE4650C + +#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xE46510 + +#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET 0xE46514 + +#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xE46518 + +#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xE4651C + +#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET 0xE46520 + +#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xE46524 + +#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xE46528 + +#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET 0xE4652C + +#define mmTPC1_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xE46530 + +#define mmTPC1_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xE46534 + +#define mmTPC1_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xE46538 + +#define mmTPC1_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xE4653C + +#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xE46540 + +#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xE46544 + +#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET 0xE46548 + +#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xE4654C + +#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xE46550 + +#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET 0xE46554 + +#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xE46558 + +#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xE4655C + +#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET 0xE46560 + +#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xE46564 + +#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xE46568 + +#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET 0xE4656C + +#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xE46570 + +#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xE46574 + +#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET 0xE46578 + +#define mmTPC1_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xE4657C + +#define mmTPC1_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xE46580 + +#define mmTPC1_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xE46584 + +#define mmTPC1_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xE46588 + +#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xE4658C + +#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xE46590 + +#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET 0xE46594 + +#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xE46598 + +#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xE4659C + +#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET 0xE465A0 + +#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xE465A4 + +#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xE465A8 + +#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET 0xE465AC + +#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xE465B0 + +#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xE465B4 + +#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET 0xE465B8 + +#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xE465BC + +#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xE465C0 + +#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET 0xE465C4 + +#define mmTPC1_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xE465C8 + +#define mmTPC1_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xE465CC + +#define mmTPC1_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xE465D0 + +#define mmTPC1_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xE465D4 + +#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xE465D8 + +#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xE465DC + +#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET 0xE465E0 + +#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xE465E4 + +#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xE465E8 + +#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET 0xE465EC + +#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xE465F0 + +#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xE465F4 + +#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET 0xE465F8 + +#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xE465FC + +#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xE46600 + +#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET 0xE46604 + +#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xE46608 + +#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xE4660C + +#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET 0xE46610 + +#define mmTPC1_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xE46614 + +#define mmTPC1_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xE46618 + +#define mmTPC1_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xE4661C + +#define mmTPC1_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xE46620 + +#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xE46624 + +#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xE46628 + +#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET 0xE4662C + +#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xE46630 + +#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xE46634 + +#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET 0xE46638 + +#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xE4663C + +#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xE46640 + +#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET 0xE46644 + +#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xE46648 + +#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xE4664C + +#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET 0xE46650 + +#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xE46654 + +#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xE46658 + +#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET 0xE4665C + +#define mmTPC1_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xE46660 + +#define mmTPC1_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xE46664 + +#define mmTPC1_CFG_KERNEL_TID_BASE_DIM_0 0xE46668 + +#define mmTPC1_CFG_KERNEL_TID_SIZE_DIM_0 0xE4666C + +#define mmTPC1_CFG_KERNEL_TID_BASE_DIM_1 0xE46670 + +#define mmTPC1_CFG_KERNEL_TID_SIZE_DIM_1 0xE46674 + +#define mmTPC1_CFG_KERNEL_TID_BASE_DIM_2 0xE46678 + +#define mmTPC1_CFG_KERNEL_TID_SIZE_DIM_2 0xE4667C + +#define mmTPC1_CFG_KERNEL_TID_BASE_DIM_3 0xE46680 + +#define mmTPC1_CFG_KERNEL_TID_SIZE_DIM_3 0xE46684 + +#define mmTPC1_CFG_KERNEL_TID_BASE_DIM_4 0xE46688 + +#define mmTPC1_CFG_KERNEL_TID_SIZE_DIM_4 0xE4668C + +#define mmTPC1_CFG_KERNEL_SRF_0 0xE46690 + +#define mmTPC1_CFG_KERNEL_SRF_1 0xE46694 + +#define mmTPC1_CFG_KERNEL_SRF_2 0xE46698 + +#define mmTPC1_CFG_KERNEL_SRF_3 0xE4669C + +#define mmTPC1_CFG_KERNEL_SRF_4 0xE466A0 + +#define mmTPC1_CFG_KERNEL_SRF_5 0xE466A4 + +#define mmTPC1_CFG_KERNEL_SRF_6 0xE466A8 + +#define mmTPC1_CFG_KERNEL_SRF_7 0xE466AC + +#define mmTPC1_CFG_KERNEL_SRF_8 0xE466B0 + +#define mmTPC1_CFG_KERNEL_SRF_9 0xE466B4 + +#define mmTPC1_CFG_KERNEL_SRF_10 0xE466B8 + +#define mmTPC1_CFG_KERNEL_SRF_11 0xE466BC + +#define mmTPC1_CFG_KERNEL_SRF_12 0xE466C0 + +#define mmTPC1_CFG_KERNEL_SRF_13 0xE466C4 + +#define mmTPC1_CFG_KERNEL_SRF_14 0xE466C8 + +#define mmTPC1_CFG_KERNEL_SRF_15 0xE466CC + +#define mmTPC1_CFG_KERNEL_SRF_16 0xE466D0 + +#define mmTPC1_CFG_KERNEL_SRF_17 0xE466D4 + +#define mmTPC1_CFG_KERNEL_SRF_18 0xE466D8 + +#define mmTPC1_CFG_KERNEL_SRF_19 0xE466DC + +#define mmTPC1_CFG_KERNEL_SRF_20 0xE466E0 + +#define mmTPC1_CFG_KERNEL_SRF_21 0xE466E4 + +#define mmTPC1_CFG_KERNEL_SRF_22 0xE466E8 + +#define mmTPC1_CFG_KERNEL_SRF_23 0xE466EC + +#define mmTPC1_CFG_KERNEL_SRF_24 0xE466F0 + +#define mmTPC1_CFG_KERNEL_SRF_25 0xE466F4 + +#define mmTPC1_CFG_KERNEL_SRF_26 0xE466F8 + +#define mmTPC1_CFG_KERNEL_SRF_27 0xE466FC + +#define mmTPC1_CFG_KERNEL_SRF_28 0xE46700 + +#define mmTPC1_CFG_KERNEL_SRF_29 0xE46704 + +#define mmTPC1_CFG_KERNEL_SRF_30 0xE46708 + +#define mmTPC1_CFG_KERNEL_SRF_31 0xE4670C + +#define mmTPC1_CFG_KERNEL_KERNEL_CONFIG 0xE46710 + +#define mmTPC1_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xE46714 + +#define mmTPC1_CFG_RESERVED_DESC_END 0xE46738 + +#define mmTPC1_CFG_ROUND_CSR 0xE467FC + +#define mmTPC1_CFG_TBUF_BASE_ADDR_LOW 0xE46800 + +#define mmTPC1_CFG_TBUF_BASE_ADDR_HIGH 0xE46804 + +#define mmTPC1_CFG_SEMAPHORE 0xE46808 + +#define mmTPC1_CFG_VFLAGS 0xE4680C + +#define mmTPC1_CFG_SFLAGS 0xE46810 + +#define mmTPC1_CFG_LFSR_POLYNOM 0xE46818 + +#define mmTPC1_CFG_STATUS 0xE4681C + +#define mmTPC1_CFG_CFG_BASE_ADDRESS_HIGH 0xE46820 + +#define mmTPC1_CFG_CFG_SUBTRACT_VALUE 0xE46824 + +#define mmTPC1_CFG_SM_BASE_ADDRESS_LOW 0xE46828 + +#define mmTPC1_CFG_SM_BASE_ADDRESS_HIGH 0xE4682C + +#define mmTPC1_CFG_TPC_CMD 0xE46830 + +#define mmTPC1_CFG_TPC_EXECUTE 0xE46838 + +#define mmTPC1_CFG_TPC_STALL 0xE4683C + +#define mmTPC1_CFG_ICACHE_BASE_ADDERESS_LOW 0xE46840 + +#define mmTPC1_CFG_ICACHE_BASE_ADDERESS_HIGH 0xE46844 + +#define mmTPC1_CFG_MSS_CONFIG 0xE46854 + +#define mmTPC1_CFG_TPC_INTR_CAUSE 0xE46858 + +#define mmTPC1_CFG_TPC_INTR_MASK 0xE4685C + +#define mmTPC1_CFG_TSB_CONFIG 0xE46860 + +#define mmTPC1_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xE46A00 + +#define mmTPC1_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xE46A04 + +#define mmTPC1_CFG_QM_TENSOR_0_PADDING_VALUE 0xE46A08 + +#define mmTPC1_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xE46A0C + +#define mmTPC1_CFG_QM_TENSOR_0_DIM_0_SIZE 0xE46A10 + +#define mmTPC1_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xE46A14 + +#define mmTPC1_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET 0xE46A18 + +#define mmTPC1_CFG_QM_TENSOR_0_DIM_1_SIZE 0xE46A1C + +#define mmTPC1_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xE46A20 + +#define mmTPC1_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET 0xE46A24 + +#define mmTPC1_CFG_QM_TENSOR_0_DIM_2_SIZE 0xE46A28 + +#define mmTPC1_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xE46A2C + +#define mmTPC1_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET 0xE46A30 + +#define mmTPC1_CFG_QM_TENSOR_0_DIM_3_SIZE 0xE46A34 + +#define mmTPC1_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xE46A38 + +#define mmTPC1_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET 0xE46A3C + +#define mmTPC1_CFG_QM_TENSOR_0_DIM_4_SIZE 0xE46A40 + +#define mmTPC1_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xE46A44 + +#define mmTPC1_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET 0xE46A48 + +#define mmTPC1_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xE46A4C + +#define mmTPC1_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xE46A50 + +#define mmTPC1_CFG_QM_TENSOR_1_PADDING_VALUE 0xE46A54 + +#define mmTPC1_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xE46A58 + +#define mmTPC1_CFG_QM_TENSOR_1_DIM_0_SIZE 0xE46A5C + +#define mmTPC1_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xE46A60 + +#define mmTPC1_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET 0xE46A64 + +#define mmTPC1_CFG_QM_TENSOR_1_DIM_1_SIZE 0xE46A68 + +#define mmTPC1_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xE46A6C + +#define mmTPC1_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET 0xE46A70 + +#define mmTPC1_CFG_QM_TENSOR_1_DIM_2_SIZE 0xE46A74 + +#define mmTPC1_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xE46A78 + +#define mmTPC1_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET 0xE46A7C + +#define mmTPC1_CFG_QM_TENSOR_1_DIM_3_SIZE 0xE46A80 + +#define mmTPC1_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xE46A84 + +#define mmTPC1_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET 0xE46A88 + +#define mmTPC1_CFG_QM_TENSOR_1_DIM_4_SIZE 0xE46A8C + +#define mmTPC1_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xE46A90 + +#define mmTPC1_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET 0xE46A94 + +#define mmTPC1_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xE46A98 + +#define mmTPC1_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xE46A9C + +#define mmTPC1_CFG_QM_TENSOR_2_PADDING_VALUE 0xE46AA0 + +#define mmTPC1_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xE46AA4 + +#define mmTPC1_CFG_QM_TENSOR_2_DIM_0_SIZE 0xE46AA8 + +#define mmTPC1_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xE46AAC + +#define mmTPC1_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET 0xE46AB0 + +#define mmTPC1_CFG_QM_TENSOR_2_DIM_1_SIZE 0xE46AB4 + +#define mmTPC1_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xE46AB8 + +#define mmTPC1_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET 0xE46ABC + +#define mmTPC1_CFG_QM_TENSOR_2_DIM_2_SIZE 0xE46AC0 + +#define mmTPC1_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xE46AC4 + +#define mmTPC1_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET 0xE46AC8 + +#define mmTPC1_CFG_QM_TENSOR_2_DIM_3_SIZE 0xE46ACC + +#define mmTPC1_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xE46AD0 + +#define mmTPC1_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET 0xE46AD4 + +#define mmTPC1_CFG_QM_TENSOR_2_DIM_4_SIZE 0xE46AD8 + +#define mmTPC1_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xE46ADC + +#define mmTPC1_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET 0xE46AE0 + +#define mmTPC1_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xE46AE4 + +#define mmTPC1_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xE46AE8 + +#define mmTPC1_CFG_QM_TENSOR_3_PADDING_VALUE 0xE46AEC + +#define mmTPC1_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xE46AF0 + +#define mmTPC1_CFG_QM_TENSOR_3_DIM_0_SIZE 0xE46AF4 + +#define mmTPC1_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xE46AF8 + +#define mmTPC1_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET 0xE46AFC + +#define mmTPC1_CFG_QM_TENSOR_3_DIM_1_SIZE 0xE46B00 + +#define mmTPC1_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xE46B04 + +#define mmTPC1_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET 0xE46B08 + +#define mmTPC1_CFG_QM_TENSOR_3_DIM_2_SIZE 0xE46B0C + +#define mmTPC1_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xE46B10 + +#define mmTPC1_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET 0xE46B14 + +#define mmTPC1_CFG_QM_TENSOR_3_DIM_3_SIZE 0xE46B18 + +#define mmTPC1_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xE46B1C + +#define mmTPC1_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET 0xE46B20 + +#define mmTPC1_CFG_QM_TENSOR_3_DIM_4_SIZE 0xE46B24 + +#define mmTPC1_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xE46B28 + +#define mmTPC1_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET 0xE46B2C + +#define mmTPC1_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xE46B30 + +#define mmTPC1_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xE46B34 + +#define mmTPC1_CFG_QM_TENSOR_4_PADDING_VALUE 0xE46B38 + +#define mmTPC1_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xE46B3C + +#define mmTPC1_CFG_QM_TENSOR_4_DIM_0_SIZE 0xE46B40 + +#define mmTPC1_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xE46B44 + +#define mmTPC1_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET 0xE46B48 + +#define mmTPC1_CFG_QM_TENSOR_4_DIM_1_SIZE 0xE46B4C + +#define mmTPC1_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xE46B50 + +#define mmTPC1_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET 0xE46B54 + +#define mmTPC1_CFG_QM_TENSOR_4_DIM_2_SIZE 0xE46B58 + +#define mmTPC1_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xE46B5C + +#define mmTPC1_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET 0xE46B60 + +#define mmTPC1_CFG_QM_TENSOR_4_DIM_3_SIZE 0xE46B64 + +#define mmTPC1_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xE46B68 + +#define mmTPC1_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET 0xE46B6C + +#define mmTPC1_CFG_QM_TENSOR_4_DIM_4_SIZE 0xE46B70 + +#define mmTPC1_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xE46B74 + +#define mmTPC1_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET 0xE46B78 + +#define mmTPC1_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xE46B7C + +#define mmTPC1_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xE46B80 + +#define mmTPC1_CFG_QM_TENSOR_5_PADDING_VALUE 0xE46B84 + +#define mmTPC1_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xE46B88 + +#define mmTPC1_CFG_QM_TENSOR_5_DIM_0_SIZE 0xE46B8C + +#define mmTPC1_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xE46B90 + +#define mmTPC1_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET 0xE46B94 + +#define mmTPC1_CFG_QM_TENSOR_5_DIM_1_SIZE 0xE46B98 + +#define mmTPC1_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xE46B9C + +#define mmTPC1_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET 0xE46BA0 + +#define mmTPC1_CFG_QM_TENSOR_5_DIM_2_SIZE 0xE46BA4 + +#define mmTPC1_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xE46BA8 + +#define mmTPC1_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET 0xE46BAC + +#define mmTPC1_CFG_QM_TENSOR_5_DIM_3_SIZE 0xE46BB0 + +#define mmTPC1_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xE46BB4 + +#define mmTPC1_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET 0xE46BB8 + +#define mmTPC1_CFG_QM_TENSOR_5_DIM_4_SIZE 0xE46BBC + +#define mmTPC1_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xE46BC0 + +#define mmTPC1_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET 0xE46BC4 + +#define mmTPC1_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xE46BC8 + +#define mmTPC1_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xE46BCC + +#define mmTPC1_CFG_QM_TENSOR_6_PADDING_VALUE 0xE46BD0 + +#define mmTPC1_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xE46BD4 + +#define mmTPC1_CFG_QM_TENSOR_6_DIM_0_SIZE 0xE46BD8 + +#define mmTPC1_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xE46BDC + +#define mmTPC1_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET 0xE46BE0 + +#define mmTPC1_CFG_QM_TENSOR_6_DIM_1_SIZE 0xE46BE4 + +#define mmTPC1_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xE46BE8 + +#define mmTPC1_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET 0xE46BEC + +#define mmTPC1_CFG_QM_TENSOR_6_DIM_2_SIZE 0xE46BF0 + +#define mmTPC1_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xE46BF4 + +#define mmTPC1_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET 0xE46BF8 + +#define mmTPC1_CFG_QM_TENSOR_6_DIM_3_SIZE 0xE46BFC + +#define mmTPC1_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xE46C00 + +#define mmTPC1_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET 0xE46C04 + +#define mmTPC1_CFG_QM_TENSOR_6_DIM_4_SIZE 0xE46C08 + +#define mmTPC1_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xE46C0C + +#define mmTPC1_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET 0xE46C10 + +#define mmTPC1_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xE46C14 + +#define mmTPC1_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xE46C18 + +#define mmTPC1_CFG_QM_TENSOR_7_PADDING_VALUE 0xE46C1C + +#define mmTPC1_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xE46C20 + +#define mmTPC1_CFG_QM_TENSOR_7_DIM_0_SIZE 0xE46C24 + +#define mmTPC1_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xE46C28 + +#define mmTPC1_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET 0xE46C2C + +#define mmTPC1_CFG_QM_TENSOR_7_DIM_1_SIZE 0xE46C30 + +#define mmTPC1_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xE46C34 + +#define mmTPC1_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET 0xE46C38 + +#define mmTPC1_CFG_QM_TENSOR_7_DIM_2_SIZE 0xE46C3C + +#define mmTPC1_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xE46C40 + +#define mmTPC1_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET 0xE46C44 + +#define mmTPC1_CFG_QM_TENSOR_7_DIM_3_SIZE 0xE46C48 + +#define mmTPC1_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xE46C4C + +#define mmTPC1_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET 0xE46C50 + +#define mmTPC1_CFG_QM_TENSOR_7_DIM_4_SIZE 0xE46C54 + +#define mmTPC1_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xE46C58 + +#define mmTPC1_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET 0xE46C5C + +#define mmTPC1_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xE46C60 + +#define mmTPC1_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xE46C64 + +#define mmTPC1_CFG_QM_TID_BASE_DIM_0 0xE46C68 + +#define mmTPC1_CFG_QM_TID_SIZE_DIM_0 0xE46C6C + +#define mmTPC1_CFG_QM_TID_BASE_DIM_1 0xE46C70 + +#define mmTPC1_CFG_QM_TID_SIZE_DIM_1 0xE46C74 + +#define mmTPC1_CFG_QM_TID_BASE_DIM_2 0xE46C78 + +#define mmTPC1_CFG_QM_TID_SIZE_DIM_2 0xE46C7C + +#define mmTPC1_CFG_QM_TID_BASE_DIM_3 0xE46C80 + +#define mmTPC1_CFG_QM_TID_SIZE_DIM_3 0xE46C84 + +#define mmTPC1_CFG_QM_TID_BASE_DIM_4 0xE46C88 + +#define mmTPC1_CFG_QM_TID_SIZE_DIM_4 0xE46C8C + +#define mmTPC1_CFG_QM_SRF_0 0xE46C90 + +#define mmTPC1_CFG_QM_SRF_1 0xE46C94 + +#define mmTPC1_CFG_QM_SRF_2 0xE46C98 + +#define mmTPC1_CFG_QM_SRF_3 0xE46C9C + +#define mmTPC1_CFG_QM_SRF_4 0xE46CA0 + +#define mmTPC1_CFG_QM_SRF_5 0xE46CA4 + +#define mmTPC1_CFG_QM_SRF_6 0xE46CA8 + +#define mmTPC1_CFG_QM_SRF_7 0xE46CAC + +#define mmTPC1_CFG_QM_SRF_8 0xE46CB0 + +#define mmTPC1_CFG_QM_SRF_9 0xE46CB4 + +#define mmTPC1_CFG_QM_SRF_10 0xE46CB8 + +#define mmTPC1_CFG_QM_SRF_11 0xE46CBC + +#define mmTPC1_CFG_QM_SRF_12 0xE46CC0 + +#define mmTPC1_CFG_QM_SRF_13 0xE46CC4 + +#define mmTPC1_CFG_QM_SRF_14 0xE46CC8 + +#define mmTPC1_CFG_QM_SRF_15 0xE46CCC + +#define mmTPC1_CFG_QM_SRF_16 0xE46CD0 + +#define mmTPC1_CFG_QM_SRF_17 0xE46CD4 + +#define mmTPC1_CFG_QM_SRF_18 0xE46CD8 + +#define mmTPC1_CFG_QM_SRF_19 0xE46CDC + +#define mmTPC1_CFG_QM_SRF_20 0xE46CE0 + +#define mmTPC1_CFG_QM_SRF_21 0xE46CE4 + +#define mmTPC1_CFG_QM_SRF_22 0xE46CE8 + +#define mmTPC1_CFG_QM_SRF_23 0xE46CEC + +#define mmTPC1_CFG_QM_SRF_24 0xE46CF0 + +#define mmTPC1_CFG_QM_SRF_25 0xE46CF4 + +#define mmTPC1_CFG_QM_SRF_26 0xE46CF8 + +#define mmTPC1_CFG_QM_SRF_27 0xE46CFC + +#define mmTPC1_CFG_QM_SRF_28 0xE46D00 + +#define mmTPC1_CFG_QM_SRF_29 0xE46D04 + +#define mmTPC1_CFG_QM_SRF_30 0xE46D08 + +#define mmTPC1_CFG_QM_SRF_31 0xE46D0C + +#define mmTPC1_CFG_QM_KERNEL_CONFIG 0xE46D10 + +#define mmTPC1_CFG_QM_SYNC_OBJECT_MESSAGE 0xE46D14 + +#define mmTPC1_CFG_ARUSER 0xE46D18 + +#define mmTPC1_CFG_AWUSER 0xE46D1C + +#define mmTPC1_CFG_FUNC_MBIST_CNTRL 0xE46E00 + +#define mmTPC1_CFG_FUNC_MBIST_PAT 0xE46E04 + +#define mmTPC1_CFG_FUNC_MBIST_MEM_0 0xE46E08 + +#define mmTPC1_CFG_FUNC_MBIST_MEM_1 0xE46E0C + +#define mmTPC1_CFG_FUNC_MBIST_MEM_2 0xE46E10 + +#define mmTPC1_CFG_FUNC_MBIST_MEM_3 0xE46E14 + +#define mmTPC1_CFG_FUNC_MBIST_MEM_4 0xE46E18 + +#define mmTPC1_CFG_FUNC_MBIST_MEM_5 0xE46E1C + +#define mmTPC1_CFG_FUNC_MBIST_MEM_6 0xE46E20 + +#define mmTPC1_CFG_FUNC_MBIST_MEM_7 0xE46E24 + +#define mmTPC1_CFG_FUNC_MBIST_MEM_8 0xE46E28 + +#define mmTPC1_CFG_FUNC_MBIST_MEM_9 0xE46E2C + +#endif /* ASIC_REG_TPC1_CFG_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h new file mode 100644 index 000000000..30ae0f307 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h @@ -0,0 +1,138 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC1_CMDQ_REGS_H_ +#define ASIC_REG_TPC1_CMDQ_REGS_H_ + +/* + ***************************************** + * TPC1_CMDQ (Prototype: CMDQ) + ***************************************** + */ + +#define mmTPC1_CMDQ_GLBL_CFG0 0xE49000 + +#define mmTPC1_CMDQ_GLBL_CFG1 0xE49004 + +#define mmTPC1_CMDQ_GLBL_PROT 0xE49008 + +#define mmTPC1_CMDQ_GLBL_ERR_CFG 0xE4900C + +#define mmTPC1_CMDQ_GLBL_ERR_ADDR_LO 0xE49010 + +#define mmTPC1_CMDQ_GLBL_ERR_ADDR_HI 0xE49014 + +#define mmTPC1_CMDQ_GLBL_ERR_WDATA 0xE49018 + +#define mmTPC1_CMDQ_GLBL_SECURE_PROPS 0xE4901C + +#define mmTPC1_CMDQ_GLBL_NON_SECURE_PROPS 0xE49020 + +#define mmTPC1_CMDQ_GLBL_STS0 0xE49024 + +#define mmTPC1_CMDQ_GLBL_STS1 0xE49028 + +#define mmTPC1_CMDQ_CQ_CFG0 0xE490B0 + +#define mmTPC1_CMDQ_CQ_CFG1 0xE490B4 + +#define mmTPC1_CMDQ_CQ_ARUSER 0xE490B8 + +#define mmTPC1_CMDQ_CQ_PTR_LO 0xE490C0 + +#define mmTPC1_CMDQ_CQ_PTR_HI 0xE490C4 + +#define mmTPC1_CMDQ_CQ_TSIZE 0xE490C8 + +#define mmTPC1_CMDQ_CQ_CTL 0xE490CC + +#define mmTPC1_CMDQ_CQ_PTR_LO_STS 0xE490D4 + +#define mmTPC1_CMDQ_CQ_PTR_HI_STS 0xE490D8 + +#define mmTPC1_CMDQ_CQ_TSIZE_STS 0xE490DC + +#define mmTPC1_CMDQ_CQ_CTL_STS 0xE490E0 + +#define mmTPC1_CMDQ_CQ_STS0 0xE490E4 + +#define mmTPC1_CMDQ_CQ_STS1 0xE490E8 + +#define mmTPC1_CMDQ_CQ_RD_RATE_LIM_EN 0xE490F0 + +#define mmTPC1_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xE490F4 + +#define mmTPC1_CMDQ_CQ_RD_RATE_LIM_SAT 0xE490F8 + +#define mmTPC1_CMDQ_CQ_RD_RATE_LIM_TOUT 0xE490FC + +#define mmTPC1_CMDQ_CQ_IFIFO_CNT 0xE49108 + +#define mmTPC1_CMDQ_CP_MSG_BASE0_ADDR_LO 0xE49120 + +#define mmTPC1_CMDQ_CP_MSG_BASE0_ADDR_HI 0xE49124 + +#define mmTPC1_CMDQ_CP_MSG_BASE1_ADDR_LO 0xE49128 + +#define mmTPC1_CMDQ_CP_MSG_BASE1_ADDR_HI 0xE4912C + +#define mmTPC1_CMDQ_CP_MSG_BASE2_ADDR_LO 0xE49130 + +#define mmTPC1_CMDQ_CP_MSG_BASE2_ADDR_HI 0xE49134 + +#define mmTPC1_CMDQ_CP_MSG_BASE3_ADDR_LO 0xE49138 + +#define mmTPC1_CMDQ_CP_MSG_BASE3_ADDR_HI 0xE4913C + +#define mmTPC1_CMDQ_CP_LDMA_TSIZE_OFFSET 0xE49140 + +#define mmTPC1_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xE49144 + +#define mmTPC1_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xE49148 + +#define mmTPC1_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xE4914C + +#define mmTPC1_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xE49150 + +#define mmTPC1_CMDQ_CP_LDMA_COMMIT_OFFSET 0xE49154 + +#define mmTPC1_CMDQ_CP_FENCE0_RDATA 0xE49158 + +#define mmTPC1_CMDQ_CP_FENCE1_RDATA 0xE4915C + +#define mmTPC1_CMDQ_CP_FENCE2_RDATA 0xE49160 + +#define mmTPC1_CMDQ_CP_FENCE3_RDATA 0xE49164 + +#define mmTPC1_CMDQ_CP_FENCE0_CNT 0xE49168 + +#define mmTPC1_CMDQ_CP_FENCE1_CNT 0xE4916C + +#define mmTPC1_CMDQ_CP_FENCE2_CNT 0xE49170 + +#define mmTPC1_CMDQ_CP_FENCE3_CNT 0xE49174 + +#define mmTPC1_CMDQ_CP_STS 0xE49178 + +#define mmTPC1_CMDQ_CP_CURRENT_INST_LO 0xE4917C + +#define mmTPC1_CMDQ_CP_CURRENT_INST_HI 0xE49180 + +#define mmTPC1_CMDQ_CP_BARRIER_CFG 0xE49184 + +#define mmTPC1_CMDQ_CP_DBG_0 0xE49188 + +#define mmTPC1_CMDQ_CQ_BUF_ADDR 0xE49308 + +#define mmTPC1_CMDQ_CQ_BUF_RDATA 0xE4930C + +#endif /* ASIC_REG_TPC1_CMDQ_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h new file mode 100644 index 000000000..b95de4f95 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h @@ -0,0 +1,178 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC1_QM_REGS_H_ +#define ASIC_REG_TPC1_QM_REGS_H_ + +/* + ***************************************** + * TPC1_QM (Prototype: QMAN) + ***************************************** + */ + +#define mmTPC1_QM_GLBL_CFG0 0xE48000 + +#define mmTPC1_QM_GLBL_CFG1 0xE48004 + +#define mmTPC1_QM_GLBL_PROT 0xE48008 + +#define mmTPC1_QM_GLBL_ERR_CFG 0xE4800C + +#define mmTPC1_QM_GLBL_ERR_ADDR_LO 0xE48010 + +#define mmTPC1_QM_GLBL_ERR_ADDR_HI 0xE48014 + +#define mmTPC1_QM_GLBL_ERR_WDATA 0xE48018 + +#define mmTPC1_QM_GLBL_SECURE_PROPS 0xE4801C + +#define mmTPC1_QM_GLBL_NON_SECURE_PROPS 0xE48020 + +#define mmTPC1_QM_GLBL_STS0 0xE48024 + +#define mmTPC1_QM_GLBL_STS1 0xE48028 + +#define mmTPC1_QM_PQ_BASE_LO 0xE48060 + +#define mmTPC1_QM_PQ_BASE_HI 0xE48064 + +#define mmTPC1_QM_PQ_SIZE 0xE48068 + +#define mmTPC1_QM_PQ_PI 0xE4806C + +#define mmTPC1_QM_PQ_CI 0xE48070 + +#define mmTPC1_QM_PQ_CFG0 0xE48074 + +#define mmTPC1_QM_PQ_CFG1 0xE48078 + +#define mmTPC1_QM_PQ_ARUSER 0xE4807C + +#define mmTPC1_QM_PQ_PUSH0 0xE48080 + +#define mmTPC1_QM_PQ_PUSH1 0xE48084 + +#define mmTPC1_QM_PQ_PUSH2 0xE48088 + +#define mmTPC1_QM_PQ_PUSH3 0xE4808C + +#define mmTPC1_QM_PQ_STS0 0xE48090 + +#define mmTPC1_QM_PQ_STS1 0xE48094 + +#define mmTPC1_QM_PQ_RD_RATE_LIM_EN 0xE480A0 + +#define mmTPC1_QM_PQ_RD_RATE_LIM_RST_TOKEN 0xE480A4 + +#define mmTPC1_QM_PQ_RD_RATE_LIM_SAT 0xE480A8 + +#define mmTPC1_QM_PQ_RD_RATE_LIM_TOUT 0xE480AC + +#define mmTPC1_QM_CQ_CFG0 0xE480B0 + +#define mmTPC1_QM_CQ_CFG1 0xE480B4 + +#define mmTPC1_QM_CQ_ARUSER 0xE480B8 + +#define mmTPC1_QM_CQ_PTR_LO 0xE480C0 + +#define mmTPC1_QM_CQ_PTR_HI 0xE480C4 + +#define mmTPC1_QM_CQ_TSIZE 0xE480C8 + +#define mmTPC1_QM_CQ_CTL 0xE480CC + +#define mmTPC1_QM_CQ_PTR_LO_STS 0xE480D4 + +#define mmTPC1_QM_CQ_PTR_HI_STS 0xE480D8 + +#define mmTPC1_QM_CQ_TSIZE_STS 0xE480DC + +#define mmTPC1_QM_CQ_CTL_STS 0xE480E0 + +#define mmTPC1_QM_CQ_STS0 0xE480E4 + +#define mmTPC1_QM_CQ_STS1 0xE480E8 + +#define mmTPC1_QM_CQ_RD_RATE_LIM_EN 0xE480F0 + +#define mmTPC1_QM_CQ_RD_RATE_LIM_RST_TOKEN 0xE480F4 + +#define mmTPC1_QM_CQ_RD_RATE_LIM_SAT 0xE480F8 + +#define mmTPC1_QM_CQ_RD_RATE_LIM_TOUT 0xE480FC + +#define mmTPC1_QM_CQ_IFIFO_CNT 0xE48108 + +#define mmTPC1_QM_CP_MSG_BASE0_ADDR_LO 0xE48120 + +#define mmTPC1_QM_CP_MSG_BASE0_ADDR_HI 0xE48124 + +#define mmTPC1_QM_CP_MSG_BASE1_ADDR_LO 0xE48128 + +#define mmTPC1_QM_CP_MSG_BASE1_ADDR_HI 0xE4812C + +#define mmTPC1_QM_CP_MSG_BASE2_ADDR_LO 0xE48130 + +#define mmTPC1_QM_CP_MSG_BASE2_ADDR_HI 0xE48134 + +#define mmTPC1_QM_CP_MSG_BASE3_ADDR_LO 0xE48138 + +#define mmTPC1_QM_CP_MSG_BASE3_ADDR_HI 0xE4813C + +#define mmTPC1_QM_CP_LDMA_TSIZE_OFFSET 0xE48140 + +#define mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0xE48144 + +#define mmTPC1_QM_CP_LDMA_SRC_BASE_HI_OFFSET 0xE48148 + +#define mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET 0xE4814C + +#define mmTPC1_QM_CP_LDMA_DST_BASE_HI_OFFSET 0xE48150 + +#define mmTPC1_QM_CP_LDMA_COMMIT_OFFSET 0xE48154 + +#define mmTPC1_QM_CP_FENCE0_RDATA 0xE48158 + +#define mmTPC1_QM_CP_FENCE1_RDATA 0xE4815C + +#define mmTPC1_QM_CP_FENCE2_RDATA 0xE48160 + +#define mmTPC1_QM_CP_FENCE3_RDATA 0xE48164 + +#define mmTPC1_QM_CP_FENCE0_CNT 0xE48168 + +#define mmTPC1_QM_CP_FENCE1_CNT 0xE4816C + +#define mmTPC1_QM_CP_FENCE2_CNT 0xE48170 + +#define mmTPC1_QM_CP_FENCE3_CNT 0xE48174 + +#define mmTPC1_QM_CP_STS 0xE48178 + +#define mmTPC1_QM_CP_CURRENT_INST_LO 0xE4817C + +#define mmTPC1_QM_CP_CURRENT_INST_HI 0xE48180 + +#define mmTPC1_QM_CP_BARRIER_CFG 0xE48184 + +#define mmTPC1_QM_CP_DBG_0 0xE48188 + +#define mmTPC1_QM_PQ_BUF_ADDR 0xE48300 + +#define mmTPC1_QM_PQ_BUF_RDATA 0xE48304 + +#define mmTPC1_QM_CQ_BUF_ADDR 0xE48308 + +#define mmTPC1_QM_CQ_BUF_RDATA 0xE4830C + +#endif /* ASIC_REG_TPC1_QM_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h new file mode 100644 index 000000000..0f91e3078 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h @@ -0,0 +1,322 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC1_RTR_REGS_H_ +#define ASIC_REG_TPC1_RTR_REGS_H_ + +/* + ***************************************** + * TPC1_RTR (Prototype: TPC_RTR) + ***************************************** + */ + +#define mmTPC1_RTR_HBW_RD_RQ_E_ARB 0xE40100 + +#define mmTPC1_RTR_HBW_RD_RQ_W_ARB 0xE40104 + +#define mmTPC1_RTR_HBW_RD_RQ_N_ARB 0xE40108 + +#define mmTPC1_RTR_HBW_RD_RQ_S_ARB 0xE4010C + +#define mmTPC1_RTR_HBW_RD_RQ_L_ARB 0xE40110 + +#define mmTPC1_RTR_HBW_E_ARB_MAX 0xE40120 + +#define mmTPC1_RTR_HBW_W_ARB_MAX 0xE40124 + +#define mmTPC1_RTR_HBW_N_ARB_MAX 0xE40128 + +#define mmTPC1_RTR_HBW_S_ARB_MAX 0xE4012C + +#define mmTPC1_RTR_HBW_L_ARB_MAX 0xE40130 + +#define mmTPC1_RTR_HBW_RD_RS_E_ARB 0xE40140 + +#define mmTPC1_RTR_HBW_RD_RS_W_ARB 0xE40144 + +#define mmTPC1_RTR_HBW_RD_RS_N_ARB 0xE40148 + +#define mmTPC1_RTR_HBW_RD_RS_S_ARB 0xE4014C + +#define mmTPC1_RTR_HBW_RD_RS_L_ARB 0xE40150 + +#define mmTPC1_RTR_HBW_WR_RQ_E_ARB 0xE40170 + +#define mmTPC1_RTR_HBW_WR_RQ_W_ARB 0xE40174 + +#define mmTPC1_RTR_HBW_WR_RQ_N_ARB 0xE40178 + +#define mmTPC1_RTR_HBW_WR_RQ_S_ARB 0xE4017C + +#define mmTPC1_RTR_HBW_WR_RQ_L_ARB 0xE40180 + +#define mmTPC1_RTR_HBW_WR_RS_E_ARB 0xE40190 + +#define mmTPC1_RTR_HBW_WR_RS_W_ARB 0xE40194 + +#define mmTPC1_RTR_HBW_WR_RS_N_ARB 0xE40198 + +#define mmTPC1_RTR_HBW_WR_RS_S_ARB 0xE4019C + +#define mmTPC1_RTR_HBW_WR_RS_L_ARB 0xE401A0 + +#define mmTPC1_RTR_LBW_RD_RQ_E_ARB 0xE40200 + +#define mmTPC1_RTR_LBW_RD_RQ_W_ARB 0xE40204 + +#define mmTPC1_RTR_LBW_RD_RQ_N_ARB 0xE40208 + +#define mmTPC1_RTR_LBW_RD_RQ_S_ARB 0xE4020C + +#define mmTPC1_RTR_LBW_RD_RQ_L_ARB 0xE40210 + +#define mmTPC1_RTR_LBW_E_ARB_MAX 0xE40220 + +#define mmTPC1_RTR_LBW_W_ARB_MAX 0xE40224 + +#define mmTPC1_RTR_LBW_N_ARB_MAX 0xE40228 + +#define mmTPC1_RTR_LBW_S_ARB_MAX 0xE4022C + +#define mmTPC1_RTR_LBW_L_ARB_MAX 0xE40230 + +#define mmTPC1_RTR_LBW_RD_RS_E_ARB 0xE40250 + +#define mmTPC1_RTR_LBW_RD_RS_W_ARB 0xE40254 + +#define mmTPC1_RTR_LBW_RD_RS_N_ARB 0xE40258 + +#define mmTPC1_RTR_LBW_RD_RS_S_ARB 0xE4025C + +#define mmTPC1_RTR_LBW_RD_RS_L_ARB 0xE40260 + +#define mmTPC1_RTR_LBW_WR_RQ_E_ARB 0xE40270 + +#define mmTPC1_RTR_LBW_WR_RQ_W_ARB 0xE40274 + +#define mmTPC1_RTR_LBW_WR_RQ_N_ARB 0xE40278 + +#define mmTPC1_RTR_LBW_WR_RQ_S_ARB 0xE4027C + +#define mmTPC1_RTR_LBW_WR_RQ_L_ARB 0xE40280 + +#define mmTPC1_RTR_LBW_WR_RS_E_ARB 0xE40290 + +#define mmTPC1_RTR_LBW_WR_RS_W_ARB 0xE40294 + +#define mmTPC1_RTR_LBW_WR_RS_N_ARB 0xE40298 + +#define mmTPC1_RTR_LBW_WR_RS_S_ARB 0xE4029C + +#define mmTPC1_RTR_LBW_WR_RS_L_ARB 0xE402A0 + +#define mmTPC1_RTR_DBG_E_ARB 0xE40300 + +#define mmTPC1_RTR_DBG_W_ARB 0xE40304 + +#define mmTPC1_RTR_DBG_N_ARB 0xE40308 + +#define mmTPC1_RTR_DBG_S_ARB 0xE4030C + +#define mmTPC1_RTR_DBG_L_ARB 0xE40310 + +#define mmTPC1_RTR_DBG_E_ARB_MAX 0xE40320 + +#define mmTPC1_RTR_DBG_W_ARB_MAX 0xE40324 + +#define mmTPC1_RTR_DBG_N_ARB_MAX 0xE40328 + +#define mmTPC1_RTR_DBG_S_ARB_MAX 0xE4032C + +#define mmTPC1_RTR_DBG_L_ARB_MAX 0xE40330 + +#define mmTPC1_RTR_SPLIT_COEF_0 0xE40400 + +#define mmTPC1_RTR_SPLIT_COEF_1 0xE40404 + +#define mmTPC1_RTR_SPLIT_COEF_2 0xE40408 + +#define mmTPC1_RTR_SPLIT_COEF_3 0xE4040C + +#define mmTPC1_RTR_SPLIT_COEF_4 0xE40410 + +#define mmTPC1_RTR_SPLIT_COEF_5 0xE40414 + +#define mmTPC1_RTR_SPLIT_COEF_6 0xE40418 + +#define mmTPC1_RTR_SPLIT_COEF_7 0xE4041C + +#define mmTPC1_RTR_SPLIT_COEF_8 0xE40420 + +#define mmTPC1_RTR_SPLIT_COEF_9 0xE40424 + +#define mmTPC1_RTR_SPLIT_CFG 0xE40440 + +#define mmTPC1_RTR_SPLIT_RD_SAT 0xE40444 + +#define mmTPC1_RTR_SPLIT_RD_RST_TOKEN 0xE40448 + +#define mmTPC1_RTR_SPLIT_RD_TIMEOUT_0 0xE4044C + +#define mmTPC1_RTR_SPLIT_RD_TIMEOUT_1 0xE40450 + +#define mmTPC1_RTR_SPLIT_WR_SAT 0xE40454 + +#define mmTPC1_RTR_WPLIT_WR_TST_TOLEN 0xE40458 + +#define mmTPC1_RTR_SPLIT_WR_TIMEOUT_0 0xE4045C + +#define mmTPC1_RTR_SPLIT_WR_TIMEOUT_1 0xE40460 + +#define mmTPC1_RTR_HBW_RANGE_HIT 0xE40470 + +#define mmTPC1_RTR_HBW_RANGE_MASK_L_0 0xE40480 + +#define mmTPC1_RTR_HBW_RANGE_MASK_L_1 0xE40484 + +#define mmTPC1_RTR_HBW_RANGE_MASK_L_2 0xE40488 + +#define mmTPC1_RTR_HBW_RANGE_MASK_L_3 0xE4048C + +#define mmTPC1_RTR_HBW_RANGE_MASK_L_4 0xE40490 + +#define mmTPC1_RTR_HBW_RANGE_MASK_L_5 0xE40494 + +#define mmTPC1_RTR_HBW_RANGE_MASK_L_6 0xE40498 + +#define mmTPC1_RTR_HBW_RANGE_MASK_L_7 0xE4049C + +#define mmTPC1_RTR_HBW_RANGE_MASK_H_0 0xE404A0 + +#define mmTPC1_RTR_HBW_RANGE_MASK_H_1 0xE404A4 + +#define mmTPC1_RTR_HBW_RANGE_MASK_H_2 0xE404A8 + +#define mmTPC1_RTR_HBW_RANGE_MASK_H_3 0xE404AC + +#define mmTPC1_RTR_HBW_RANGE_MASK_H_4 0xE404B0 + +#define mmTPC1_RTR_HBW_RANGE_MASK_H_5 0xE404B4 + +#define mmTPC1_RTR_HBW_RANGE_MASK_H_6 0xE404B8 + +#define mmTPC1_RTR_HBW_RANGE_MASK_H_7 0xE404BC + +#define mmTPC1_RTR_HBW_RANGE_BASE_L_0 0xE404C0 + +#define mmTPC1_RTR_HBW_RANGE_BASE_L_1 0xE404C4 + +#define mmTPC1_RTR_HBW_RANGE_BASE_L_2 0xE404C8 + +#define mmTPC1_RTR_HBW_RANGE_BASE_L_3 0xE404CC + +#define mmTPC1_RTR_HBW_RANGE_BASE_L_4 0xE404D0 + +#define mmTPC1_RTR_HBW_RANGE_BASE_L_5 0xE404D4 + +#define mmTPC1_RTR_HBW_RANGE_BASE_L_6 0xE404D8 + +#define mmTPC1_RTR_HBW_RANGE_BASE_L_7 0xE404DC + +#define mmTPC1_RTR_HBW_RANGE_BASE_H_0 0xE404E0 + +#define mmTPC1_RTR_HBW_RANGE_BASE_H_1 0xE404E4 + +#define mmTPC1_RTR_HBW_RANGE_BASE_H_2 0xE404E8 + +#define mmTPC1_RTR_HBW_RANGE_BASE_H_3 0xE404EC + +#define mmTPC1_RTR_HBW_RANGE_BASE_H_4 0xE404F0 + +#define mmTPC1_RTR_HBW_RANGE_BASE_H_5 0xE404F4 + +#define mmTPC1_RTR_HBW_RANGE_BASE_H_6 0xE404F8 + +#define mmTPC1_RTR_HBW_RANGE_BASE_H_7 0xE404FC + +#define mmTPC1_RTR_LBW_RANGE_HIT 0xE40500 + +#define mmTPC1_RTR_LBW_RANGE_MASK_0 0xE40510 + +#define mmTPC1_RTR_LBW_RANGE_MASK_1 0xE40514 + +#define mmTPC1_RTR_LBW_RANGE_MASK_2 0xE40518 + +#define mmTPC1_RTR_LBW_RANGE_MASK_3 0xE4051C + +#define mmTPC1_RTR_LBW_RANGE_MASK_4 0xE40520 + +#define mmTPC1_RTR_LBW_RANGE_MASK_5 0xE40524 + +#define mmTPC1_RTR_LBW_RANGE_MASK_6 0xE40528 + +#define mmTPC1_RTR_LBW_RANGE_MASK_7 0xE4052C + +#define mmTPC1_RTR_LBW_RANGE_MASK_8 0xE40530 + +#define mmTPC1_RTR_LBW_RANGE_MASK_9 0xE40534 + +#define mmTPC1_RTR_LBW_RANGE_MASK_10 0xE40538 + +#define mmTPC1_RTR_LBW_RANGE_MASK_11 0xE4053C + +#define mmTPC1_RTR_LBW_RANGE_MASK_12 0xE40540 + +#define mmTPC1_RTR_LBW_RANGE_MASK_13 0xE40544 + +#define mmTPC1_RTR_LBW_RANGE_MASK_14 0xE40548 + +#define mmTPC1_RTR_LBW_RANGE_MASK_15 0xE4054C + +#define mmTPC1_RTR_LBW_RANGE_BASE_0 0xE40550 + +#define mmTPC1_RTR_LBW_RANGE_BASE_1 0xE40554 + +#define mmTPC1_RTR_LBW_RANGE_BASE_2 0xE40558 + +#define mmTPC1_RTR_LBW_RANGE_BASE_3 0xE4055C + +#define mmTPC1_RTR_LBW_RANGE_BASE_4 0xE40560 + +#define mmTPC1_RTR_LBW_RANGE_BASE_5 0xE40564 + +#define mmTPC1_RTR_LBW_RANGE_BASE_6 0xE40568 + +#define mmTPC1_RTR_LBW_RANGE_BASE_7 0xE4056C + +#define mmTPC1_RTR_LBW_RANGE_BASE_8 0xE40570 + +#define mmTPC1_RTR_LBW_RANGE_BASE_9 0xE40574 + +#define mmTPC1_RTR_LBW_RANGE_BASE_10 0xE40578 + +#define mmTPC1_RTR_LBW_RANGE_BASE_11 0xE4057C + +#define mmTPC1_RTR_LBW_RANGE_BASE_12 0xE40580 + +#define mmTPC1_RTR_LBW_RANGE_BASE_13 0xE40584 + +#define mmTPC1_RTR_LBW_RANGE_BASE_14 0xE40588 + +#define mmTPC1_RTR_LBW_RANGE_BASE_15 0xE4058C + +#define mmTPC1_RTR_RGLTR 0xE40590 + +#define mmTPC1_RTR_RGLTR_WR_RESULT 0xE40594 + +#define mmTPC1_RTR_RGLTR_RD_RESULT 0xE40598 + +#define mmTPC1_RTR_SCRAMB_EN 0xE40600 + +#define mmTPC1_RTR_NON_LIN_SCRAMB 0xE40604 + +#endif /* ASIC_REG_TPC1_RTR_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h new file mode 100644 index 000000000..73421227f --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h @@ -0,0 +1,886 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC2_CFG_REGS_H_ +#define ASIC_REG_TPC2_CFG_REGS_H_ + +/* + ***************************************** + * TPC2_CFG (Prototype: TPC) + ***************************************** + */ + +#define mmTPC2_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xE86400 + +#define mmTPC2_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xE86404 + +#define mmTPC2_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xE86408 + +#define mmTPC2_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xE8640C + +#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xE86410 + +#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xE86414 + +#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET 0xE86418 + +#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xE8641C + +#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xE86420 + +#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET 0xE86424 + +#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xE86428 + +#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xE8642C + +#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET 0xE86430 + +#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xE86434 + +#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xE86438 + +#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET 0xE8643C + +#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xE86440 + +#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xE86444 + +#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET 0xE86448 + +#define mmTPC2_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xE8644C + +#define mmTPC2_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xE86450 + +#define mmTPC2_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xE86454 + +#define mmTPC2_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xE86458 + +#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xE8645C + +#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xE86460 + +#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET 0xE86464 + +#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xE86468 + +#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xE8646C + +#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET 0xE86470 + +#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xE86474 + +#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xE86478 + +#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET 0xE8647C + +#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xE86480 + +#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xE86484 + +#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET 0xE86488 + +#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xE8648C + +#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xE86490 + +#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET 0xE86494 + +#define mmTPC2_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xE86498 + +#define mmTPC2_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xE8649C + +#define mmTPC2_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xE864A0 + +#define mmTPC2_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xE864A4 + +#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xE864A8 + +#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xE864AC + +#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET 0xE864B0 + +#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xE864B4 + +#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xE864B8 + +#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET 0xE864BC + +#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xE864C0 + +#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xE864C4 + +#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET 0xE864C8 + +#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xE864CC + +#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xE864D0 + +#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET 0xE864D4 + +#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xE864D8 + +#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xE864DC + +#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET 0xE864E0 + +#define mmTPC2_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xE864E4 + +#define mmTPC2_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xE864E8 + +#define mmTPC2_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xE864EC + +#define mmTPC2_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xE864F0 + +#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xE864F4 + +#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xE864F8 + +#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET 0xE864FC + +#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xE86500 + +#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xE86504 + +#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET 0xE86508 + +#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xE8650C + +#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xE86510 + +#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET 0xE86514 + +#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xE86518 + +#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xE8651C + +#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET 0xE86520 + +#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xE86524 + +#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xE86528 + +#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET 0xE8652C + +#define mmTPC2_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xE86530 + +#define mmTPC2_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xE86534 + +#define mmTPC2_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xE86538 + +#define mmTPC2_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xE8653C + +#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xE86540 + +#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xE86544 + +#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET 0xE86548 + +#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xE8654C + +#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xE86550 + +#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET 0xE86554 + +#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xE86558 + +#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xE8655C + +#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET 0xE86560 + +#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xE86564 + +#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xE86568 + +#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET 0xE8656C + +#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xE86570 + +#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xE86574 + +#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET 0xE86578 + +#define mmTPC2_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xE8657C + +#define mmTPC2_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xE86580 + +#define mmTPC2_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xE86584 + +#define mmTPC2_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xE86588 + +#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xE8658C + +#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xE86590 + +#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET 0xE86594 + +#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xE86598 + +#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xE8659C + +#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET 0xE865A0 + +#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xE865A4 + +#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xE865A8 + +#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET 0xE865AC + +#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xE865B0 + +#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xE865B4 + +#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET 0xE865B8 + +#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xE865BC + +#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xE865C0 + +#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET 0xE865C4 + +#define mmTPC2_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xE865C8 + +#define mmTPC2_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xE865CC + +#define mmTPC2_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xE865D0 + +#define mmTPC2_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xE865D4 + +#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xE865D8 + +#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xE865DC + +#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET 0xE865E0 + +#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xE865E4 + +#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xE865E8 + +#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET 0xE865EC + +#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xE865F0 + +#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xE865F4 + +#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET 0xE865F8 + +#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xE865FC + +#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xE86600 + +#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET 0xE86604 + +#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xE86608 + +#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xE8660C + +#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET 0xE86610 + +#define mmTPC2_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xE86614 + +#define mmTPC2_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xE86618 + +#define mmTPC2_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xE8661C + +#define mmTPC2_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xE86620 + +#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xE86624 + +#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xE86628 + +#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET 0xE8662C + +#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xE86630 + +#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xE86634 + +#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET 0xE86638 + +#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xE8663C + +#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xE86640 + +#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET 0xE86644 + +#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xE86648 + +#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xE8664C + +#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET 0xE86650 + +#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xE86654 + +#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xE86658 + +#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET 0xE8665C + +#define mmTPC2_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xE86660 + +#define mmTPC2_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xE86664 + +#define mmTPC2_CFG_KERNEL_TID_BASE_DIM_0 0xE86668 + +#define mmTPC2_CFG_KERNEL_TID_SIZE_DIM_0 0xE8666C + +#define mmTPC2_CFG_KERNEL_TID_BASE_DIM_1 0xE86670 + +#define mmTPC2_CFG_KERNEL_TID_SIZE_DIM_1 0xE86674 + +#define mmTPC2_CFG_KERNEL_TID_BASE_DIM_2 0xE86678 + +#define mmTPC2_CFG_KERNEL_TID_SIZE_DIM_2 0xE8667C + +#define mmTPC2_CFG_KERNEL_TID_BASE_DIM_3 0xE86680 + +#define mmTPC2_CFG_KERNEL_TID_SIZE_DIM_3 0xE86684 + +#define mmTPC2_CFG_KERNEL_TID_BASE_DIM_4 0xE86688 + +#define mmTPC2_CFG_KERNEL_TID_SIZE_DIM_4 0xE8668C + +#define mmTPC2_CFG_KERNEL_SRF_0 0xE86690 + +#define mmTPC2_CFG_KERNEL_SRF_1 0xE86694 + +#define mmTPC2_CFG_KERNEL_SRF_2 0xE86698 + +#define mmTPC2_CFG_KERNEL_SRF_3 0xE8669C + +#define mmTPC2_CFG_KERNEL_SRF_4 0xE866A0 + +#define mmTPC2_CFG_KERNEL_SRF_5 0xE866A4 + +#define mmTPC2_CFG_KERNEL_SRF_6 0xE866A8 + +#define mmTPC2_CFG_KERNEL_SRF_7 0xE866AC + +#define mmTPC2_CFG_KERNEL_SRF_8 0xE866B0 + +#define mmTPC2_CFG_KERNEL_SRF_9 0xE866B4 + +#define mmTPC2_CFG_KERNEL_SRF_10 0xE866B8 + +#define mmTPC2_CFG_KERNEL_SRF_11 0xE866BC + +#define mmTPC2_CFG_KERNEL_SRF_12 0xE866C0 + +#define mmTPC2_CFG_KERNEL_SRF_13 0xE866C4 + +#define mmTPC2_CFG_KERNEL_SRF_14 0xE866C8 + +#define mmTPC2_CFG_KERNEL_SRF_15 0xE866CC + +#define mmTPC2_CFG_KERNEL_SRF_16 0xE866D0 + +#define mmTPC2_CFG_KERNEL_SRF_17 0xE866D4 + +#define mmTPC2_CFG_KERNEL_SRF_18 0xE866D8 + +#define mmTPC2_CFG_KERNEL_SRF_19 0xE866DC + +#define mmTPC2_CFG_KERNEL_SRF_20 0xE866E0 + +#define mmTPC2_CFG_KERNEL_SRF_21 0xE866E4 + +#define mmTPC2_CFG_KERNEL_SRF_22 0xE866E8 + +#define mmTPC2_CFG_KERNEL_SRF_23 0xE866EC + +#define mmTPC2_CFG_KERNEL_SRF_24 0xE866F0 + +#define mmTPC2_CFG_KERNEL_SRF_25 0xE866F4 + +#define mmTPC2_CFG_KERNEL_SRF_26 0xE866F8 + +#define mmTPC2_CFG_KERNEL_SRF_27 0xE866FC + +#define mmTPC2_CFG_KERNEL_SRF_28 0xE86700 + +#define mmTPC2_CFG_KERNEL_SRF_29 0xE86704 + +#define mmTPC2_CFG_KERNEL_SRF_30 0xE86708 + +#define mmTPC2_CFG_KERNEL_SRF_31 0xE8670C + +#define mmTPC2_CFG_KERNEL_KERNEL_CONFIG 0xE86710 + +#define mmTPC2_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xE86714 + +#define mmTPC2_CFG_RESERVED_DESC_END 0xE86738 + +#define mmTPC2_CFG_ROUND_CSR 0xE867FC + +#define mmTPC2_CFG_TBUF_BASE_ADDR_LOW 0xE86800 + +#define mmTPC2_CFG_TBUF_BASE_ADDR_HIGH 0xE86804 + +#define mmTPC2_CFG_SEMAPHORE 0xE86808 + +#define mmTPC2_CFG_VFLAGS 0xE8680C + +#define mmTPC2_CFG_SFLAGS 0xE86810 + +#define mmTPC2_CFG_LFSR_POLYNOM 0xE86818 + +#define mmTPC2_CFG_STATUS 0xE8681C + +#define mmTPC2_CFG_CFG_BASE_ADDRESS_HIGH 0xE86820 + +#define mmTPC2_CFG_CFG_SUBTRACT_VALUE 0xE86824 + +#define mmTPC2_CFG_SM_BASE_ADDRESS_LOW 0xE86828 + +#define mmTPC2_CFG_SM_BASE_ADDRESS_HIGH 0xE8682C + +#define mmTPC2_CFG_TPC_CMD 0xE86830 + +#define mmTPC2_CFG_TPC_EXECUTE 0xE86838 + +#define mmTPC2_CFG_TPC_STALL 0xE8683C + +#define mmTPC2_CFG_ICACHE_BASE_ADDERESS_LOW 0xE86840 + +#define mmTPC2_CFG_ICACHE_BASE_ADDERESS_HIGH 0xE86844 + +#define mmTPC2_CFG_MSS_CONFIG 0xE86854 + +#define mmTPC2_CFG_TPC_INTR_CAUSE 0xE86858 + +#define mmTPC2_CFG_TPC_INTR_MASK 0xE8685C + +#define mmTPC2_CFG_TSB_CONFIG 0xE86860 + +#define mmTPC2_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xE86A00 + +#define mmTPC2_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xE86A04 + +#define mmTPC2_CFG_QM_TENSOR_0_PADDING_VALUE 0xE86A08 + +#define mmTPC2_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xE86A0C + +#define mmTPC2_CFG_QM_TENSOR_0_DIM_0_SIZE 0xE86A10 + +#define mmTPC2_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xE86A14 + +#define mmTPC2_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET 0xE86A18 + +#define mmTPC2_CFG_QM_TENSOR_0_DIM_1_SIZE 0xE86A1C + +#define mmTPC2_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xE86A20 + +#define mmTPC2_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET 0xE86A24 + +#define mmTPC2_CFG_QM_TENSOR_0_DIM_2_SIZE 0xE86A28 + +#define mmTPC2_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xE86A2C + +#define mmTPC2_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET 0xE86A30 + +#define mmTPC2_CFG_QM_TENSOR_0_DIM_3_SIZE 0xE86A34 + +#define mmTPC2_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xE86A38 + +#define mmTPC2_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET 0xE86A3C + +#define mmTPC2_CFG_QM_TENSOR_0_DIM_4_SIZE 0xE86A40 + +#define mmTPC2_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xE86A44 + +#define mmTPC2_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET 0xE86A48 + +#define mmTPC2_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xE86A4C + +#define mmTPC2_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xE86A50 + +#define mmTPC2_CFG_QM_TENSOR_1_PADDING_VALUE 0xE86A54 + +#define mmTPC2_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xE86A58 + +#define mmTPC2_CFG_QM_TENSOR_1_DIM_0_SIZE 0xE86A5C + +#define mmTPC2_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xE86A60 + +#define mmTPC2_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET 0xE86A64 + +#define mmTPC2_CFG_QM_TENSOR_1_DIM_1_SIZE 0xE86A68 + +#define mmTPC2_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xE86A6C + +#define mmTPC2_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET 0xE86A70 + +#define mmTPC2_CFG_QM_TENSOR_1_DIM_2_SIZE 0xE86A74 + +#define mmTPC2_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xE86A78 + +#define mmTPC2_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET 0xE86A7C + +#define mmTPC2_CFG_QM_TENSOR_1_DIM_3_SIZE 0xE86A80 + +#define mmTPC2_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xE86A84 + +#define mmTPC2_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET 0xE86A88 + +#define mmTPC2_CFG_QM_TENSOR_1_DIM_4_SIZE 0xE86A8C + +#define mmTPC2_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xE86A90 + +#define mmTPC2_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET 0xE86A94 + +#define mmTPC2_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xE86A98 + +#define mmTPC2_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xE86A9C + +#define mmTPC2_CFG_QM_TENSOR_2_PADDING_VALUE 0xE86AA0 + +#define mmTPC2_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xE86AA4 + +#define mmTPC2_CFG_QM_TENSOR_2_DIM_0_SIZE 0xE86AA8 + +#define mmTPC2_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xE86AAC + +#define mmTPC2_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET 0xE86AB0 + +#define mmTPC2_CFG_QM_TENSOR_2_DIM_1_SIZE 0xE86AB4 + +#define mmTPC2_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xE86AB8 + +#define mmTPC2_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET 0xE86ABC + +#define mmTPC2_CFG_QM_TENSOR_2_DIM_2_SIZE 0xE86AC0 + +#define mmTPC2_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xE86AC4 + +#define mmTPC2_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET 0xE86AC8 + +#define mmTPC2_CFG_QM_TENSOR_2_DIM_3_SIZE 0xE86ACC + +#define mmTPC2_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xE86AD0 + +#define mmTPC2_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET 0xE86AD4 + +#define mmTPC2_CFG_QM_TENSOR_2_DIM_4_SIZE 0xE86AD8 + +#define mmTPC2_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xE86ADC + +#define mmTPC2_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET 0xE86AE0 + +#define mmTPC2_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xE86AE4 + +#define mmTPC2_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xE86AE8 + +#define mmTPC2_CFG_QM_TENSOR_3_PADDING_VALUE 0xE86AEC + +#define mmTPC2_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xE86AF0 + +#define mmTPC2_CFG_QM_TENSOR_3_DIM_0_SIZE 0xE86AF4 + +#define mmTPC2_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xE86AF8 + +#define mmTPC2_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET 0xE86AFC + +#define mmTPC2_CFG_QM_TENSOR_3_DIM_1_SIZE 0xE86B00 + +#define mmTPC2_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xE86B04 + +#define mmTPC2_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET 0xE86B08 + +#define mmTPC2_CFG_QM_TENSOR_3_DIM_2_SIZE 0xE86B0C + +#define mmTPC2_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xE86B10 + +#define mmTPC2_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET 0xE86B14 + +#define mmTPC2_CFG_QM_TENSOR_3_DIM_3_SIZE 0xE86B18 + +#define mmTPC2_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xE86B1C + +#define mmTPC2_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET 0xE86B20 + +#define mmTPC2_CFG_QM_TENSOR_3_DIM_4_SIZE 0xE86B24 + +#define mmTPC2_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xE86B28 + +#define mmTPC2_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET 0xE86B2C + +#define mmTPC2_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xE86B30 + +#define mmTPC2_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xE86B34 + +#define mmTPC2_CFG_QM_TENSOR_4_PADDING_VALUE 0xE86B38 + +#define mmTPC2_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xE86B3C + +#define mmTPC2_CFG_QM_TENSOR_4_DIM_0_SIZE 0xE86B40 + +#define mmTPC2_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xE86B44 + +#define mmTPC2_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET 0xE86B48 + +#define mmTPC2_CFG_QM_TENSOR_4_DIM_1_SIZE 0xE86B4C + +#define mmTPC2_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xE86B50 + +#define mmTPC2_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET 0xE86B54 + +#define mmTPC2_CFG_QM_TENSOR_4_DIM_2_SIZE 0xE86B58 + +#define mmTPC2_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xE86B5C + +#define mmTPC2_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET 0xE86B60 + +#define mmTPC2_CFG_QM_TENSOR_4_DIM_3_SIZE 0xE86B64 + +#define mmTPC2_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xE86B68 + +#define mmTPC2_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET 0xE86B6C + +#define mmTPC2_CFG_QM_TENSOR_4_DIM_4_SIZE 0xE86B70 + +#define mmTPC2_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xE86B74 + +#define mmTPC2_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET 0xE86B78 + +#define mmTPC2_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xE86B7C + +#define mmTPC2_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xE86B80 + +#define mmTPC2_CFG_QM_TENSOR_5_PADDING_VALUE 0xE86B84 + +#define mmTPC2_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xE86B88 + +#define mmTPC2_CFG_QM_TENSOR_5_DIM_0_SIZE 0xE86B8C + +#define mmTPC2_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xE86B90 + +#define mmTPC2_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET 0xE86B94 + +#define mmTPC2_CFG_QM_TENSOR_5_DIM_1_SIZE 0xE86B98 + +#define mmTPC2_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xE86B9C + +#define mmTPC2_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET 0xE86BA0 + +#define mmTPC2_CFG_QM_TENSOR_5_DIM_2_SIZE 0xE86BA4 + +#define mmTPC2_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xE86BA8 + +#define mmTPC2_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET 0xE86BAC + +#define mmTPC2_CFG_QM_TENSOR_5_DIM_3_SIZE 0xE86BB0 + +#define mmTPC2_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xE86BB4 + +#define mmTPC2_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET 0xE86BB8 + +#define mmTPC2_CFG_QM_TENSOR_5_DIM_4_SIZE 0xE86BBC + +#define mmTPC2_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xE86BC0 + +#define mmTPC2_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET 0xE86BC4 + +#define mmTPC2_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xE86BC8 + +#define mmTPC2_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xE86BCC + +#define mmTPC2_CFG_QM_TENSOR_6_PADDING_VALUE 0xE86BD0 + +#define mmTPC2_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xE86BD4 + +#define mmTPC2_CFG_QM_TENSOR_6_DIM_0_SIZE 0xE86BD8 + +#define mmTPC2_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xE86BDC + +#define mmTPC2_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET 0xE86BE0 + +#define mmTPC2_CFG_QM_TENSOR_6_DIM_1_SIZE 0xE86BE4 + +#define mmTPC2_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xE86BE8 + +#define mmTPC2_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET 0xE86BEC + +#define mmTPC2_CFG_QM_TENSOR_6_DIM_2_SIZE 0xE86BF0 + +#define mmTPC2_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xE86BF4 + +#define mmTPC2_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET 0xE86BF8 + +#define mmTPC2_CFG_QM_TENSOR_6_DIM_3_SIZE 0xE86BFC + +#define mmTPC2_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xE86C00 + +#define mmTPC2_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET 0xE86C04 + +#define mmTPC2_CFG_QM_TENSOR_6_DIM_4_SIZE 0xE86C08 + +#define mmTPC2_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xE86C0C + +#define mmTPC2_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET 0xE86C10 + +#define mmTPC2_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xE86C14 + +#define mmTPC2_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xE86C18 + +#define mmTPC2_CFG_QM_TENSOR_7_PADDING_VALUE 0xE86C1C + +#define mmTPC2_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xE86C20 + +#define mmTPC2_CFG_QM_TENSOR_7_DIM_0_SIZE 0xE86C24 + +#define mmTPC2_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xE86C28 + +#define mmTPC2_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET 0xE86C2C + +#define mmTPC2_CFG_QM_TENSOR_7_DIM_1_SIZE 0xE86C30 + +#define mmTPC2_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xE86C34 + +#define mmTPC2_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET 0xE86C38 + +#define mmTPC2_CFG_QM_TENSOR_7_DIM_2_SIZE 0xE86C3C + +#define mmTPC2_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xE86C40 + +#define mmTPC2_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET 0xE86C44 + +#define mmTPC2_CFG_QM_TENSOR_7_DIM_3_SIZE 0xE86C48 + +#define mmTPC2_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xE86C4C + +#define mmTPC2_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET 0xE86C50 + +#define mmTPC2_CFG_QM_TENSOR_7_DIM_4_SIZE 0xE86C54 + +#define mmTPC2_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xE86C58 + +#define mmTPC2_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET 0xE86C5C + +#define mmTPC2_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xE86C60 + +#define mmTPC2_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xE86C64 + +#define mmTPC2_CFG_QM_TID_BASE_DIM_0 0xE86C68 + +#define mmTPC2_CFG_QM_TID_SIZE_DIM_0 0xE86C6C + +#define mmTPC2_CFG_QM_TID_BASE_DIM_1 0xE86C70 + +#define mmTPC2_CFG_QM_TID_SIZE_DIM_1 0xE86C74 + +#define mmTPC2_CFG_QM_TID_BASE_DIM_2 0xE86C78 + +#define mmTPC2_CFG_QM_TID_SIZE_DIM_2 0xE86C7C + +#define mmTPC2_CFG_QM_TID_BASE_DIM_3 0xE86C80 + +#define mmTPC2_CFG_QM_TID_SIZE_DIM_3 0xE86C84 + +#define mmTPC2_CFG_QM_TID_BASE_DIM_4 0xE86C88 + +#define mmTPC2_CFG_QM_TID_SIZE_DIM_4 0xE86C8C + +#define mmTPC2_CFG_QM_SRF_0 0xE86C90 + +#define mmTPC2_CFG_QM_SRF_1 0xE86C94 + +#define mmTPC2_CFG_QM_SRF_2 0xE86C98 + +#define mmTPC2_CFG_QM_SRF_3 0xE86C9C + +#define mmTPC2_CFG_QM_SRF_4 0xE86CA0 + +#define mmTPC2_CFG_QM_SRF_5 0xE86CA4 + +#define mmTPC2_CFG_QM_SRF_6 0xE86CA8 + +#define mmTPC2_CFG_QM_SRF_7 0xE86CAC + +#define mmTPC2_CFG_QM_SRF_8 0xE86CB0 + +#define mmTPC2_CFG_QM_SRF_9 0xE86CB4 + +#define mmTPC2_CFG_QM_SRF_10 0xE86CB8 + +#define mmTPC2_CFG_QM_SRF_11 0xE86CBC + +#define mmTPC2_CFG_QM_SRF_12 0xE86CC0 + +#define mmTPC2_CFG_QM_SRF_13 0xE86CC4 + +#define mmTPC2_CFG_QM_SRF_14 0xE86CC8 + +#define mmTPC2_CFG_QM_SRF_15 0xE86CCC + +#define mmTPC2_CFG_QM_SRF_16 0xE86CD0 + +#define mmTPC2_CFG_QM_SRF_17 0xE86CD4 + +#define mmTPC2_CFG_QM_SRF_18 0xE86CD8 + +#define mmTPC2_CFG_QM_SRF_19 0xE86CDC + +#define mmTPC2_CFG_QM_SRF_20 0xE86CE0 + +#define mmTPC2_CFG_QM_SRF_21 0xE86CE4 + +#define mmTPC2_CFG_QM_SRF_22 0xE86CE8 + +#define mmTPC2_CFG_QM_SRF_23 0xE86CEC + +#define mmTPC2_CFG_QM_SRF_24 0xE86CF0 + +#define mmTPC2_CFG_QM_SRF_25 0xE86CF4 + +#define mmTPC2_CFG_QM_SRF_26 0xE86CF8 + +#define mmTPC2_CFG_QM_SRF_27 0xE86CFC + +#define mmTPC2_CFG_QM_SRF_28 0xE86D00 + +#define mmTPC2_CFG_QM_SRF_29 0xE86D04 + +#define mmTPC2_CFG_QM_SRF_30 0xE86D08 + +#define mmTPC2_CFG_QM_SRF_31 0xE86D0C + +#define mmTPC2_CFG_QM_KERNEL_CONFIG 0xE86D10 + +#define mmTPC2_CFG_QM_SYNC_OBJECT_MESSAGE 0xE86D14 + +#define mmTPC2_CFG_ARUSER 0xE86D18 + +#define mmTPC2_CFG_AWUSER 0xE86D1C + +#define mmTPC2_CFG_FUNC_MBIST_CNTRL 0xE86E00 + +#define mmTPC2_CFG_FUNC_MBIST_PAT 0xE86E04 + +#define mmTPC2_CFG_FUNC_MBIST_MEM_0 0xE86E08 + +#define mmTPC2_CFG_FUNC_MBIST_MEM_1 0xE86E0C + +#define mmTPC2_CFG_FUNC_MBIST_MEM_2 0xE86E10 + +#define mmTPC2_CFG_FUNC_MBIST_MEM_3 0xE86E14 + +#define mmTPC2_CFG_FUNC_MBIST_MEM_4 0xE86E18 + +#define mmTPC2_CFG_FUNC_MBIST_MEM_5 0xE86E1C + +#define mmTPC2_CFG_FUNC_MBIST_MEM_6 0xE86E20 + +#define mmTPC2_CFG_FUNC_MBIST_MEM_7 0xE86E24 + +#define mmTPC2_CFG_FUNC_MBIST_MEM_8 0xE86E28 + +#define mmTPC2_CFG_FUNC_MBIST_MEM_9 0xE86E2C + +#endif /* ASIC_REG_TPC2_CFG_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h new file mode 100644 index 000000000..27b66bf2d --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h @@ -0,0 +1,138 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC2_CMDQ_REGS_H_ +#define ASIC_REG_TPC2_CMDQ_REGS_H_ + +/* + ***************************************** + * TPC2_CMDQ (Prototype: CMDQ) + ***************************************** + */ + +#define mmTPC2_CMDQ_GLBL_CFG0 0xE89000 + +#define mmTPC2_CMDQ_GLBL_CFG1 0xE89004 + +#define mmTPC2_CMDQ_GLBL_PROT 0xE89008 + +#define mmTPC2_CMDQ_GLBL_ERR_CFG 0xE8900C + +#define mmTPC2_CMDQ_GLBL_ERR_ADDR_LO 0xE89010 + +#define mmTPC2_CMDQ_GLBL_ERR_ADDR_HI 0xE89014 + +#define mmTPC2_CMDQ_GLBL_ERR_WDATA 0xE89018 + +#define mmTPC2_CMDQ_GLBL_SECURE_PROPS 0xE8901C + +#define mmTPC2_CMDQ_GLBL_NON_SECURE_PROPS 0xE89020 + +#define mmTPC2_CMDQ_GLBL_STS0 0xE89024 + +#define mmTPC2_CMDQ_GLBL_STS1 0xE89028 + +#define mmTPC2_CMDQ_CQ_CFG0 0xE890B0 + +#define mmTPC2_CMDQ_CQ_CFG1 0xE890B4 + +#define mmTPC2_CMDQ_CQ_ARUSER 0xE890B8 + +#define mmTPC2_CMDQ_CQ_PTR_LO 0xE890C0 + +#define mmTPC2_CMDQ_CQ_PTR_HI 0xE890C4 + +#define mmTPC2_CMDQ_CQ_TSIZE 0xE890C8 + +#define mmTPC2_CMDQ_CQ_CTL 0xE890CC + +#define mmTPC2_CMDQ_CQ_PTR_LO_STS 0xE890D4 + +#define mmTPC2_CMDQ_CQ_PTR_HI_STS 0xE890D8 + +#define mmTPC2_CMDQ_CQ_TSIZE_STS 0xE890DC + +#define mmTPC2_CMDQ_CQ_CTL_STS 0xE890E0 + +#define mmTPC2_CMDQ_CQ_STS0 0xE890E4 + +#define mmTPC2_CMDQ_CQ_STS1 0xE890E8 + +#define mmTPC2_CMDQ_CQ_RD_RATE_LIM_EN 0xE890F0 + +#define mmTPC2_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xE890F4 + +#define mmTPC2_CMDQ_CQ_RD_RATE_LIM_SAT 0xE890F8 + +#define mmTPC2_CMDQ_CQ_RD_RATE_LIM_TOUT 0xE890FC + +#define mmTPC2_CMDQ_CQ_IFIFO_CNT 0xE89108 + +#define mmTPC2_CMDQ_CP_MSG_BASE0_ADDR_LO 0xE89120 + +#define mmTPC2_CMDQ_CP_MSG_BASE0_ADDR_HI 0xE89124 + +#define mmTPC2_CMDQ_CP_MSG_BASE1_ADDR_LO 0xE89128 + +#define mmTPC2_CMDQ_CP_MSG_BASE1_ADDR_HI 0xE8912C + +#define mmTPC2_CMDQ_CP_MSG_BASE2_ADDR_LO 0xE89130 + +#define mmTPC2_CMDQ_CP_MSG_BASE2_ADDR_HI 0xE89134 + +#define mmTPC2_CMDQ_CP_MSG_BASE3_ADDR_LO 0xE89138 + +#define mmTPC2_CMDQ_CP_MSG_BASE3_ADDR_HI 0xE8913C + +#define mmTPC2_CMDQ_CP_LDMA_TSIZE_OFFSET 0xE89140 + +#define mmTPC2_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xE89144 + +#define mmTPC2_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xE89148 + +#define mmTPC2_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xE8914C + +#define mmTPC2_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xE89150 + +#define mmTPC2_CMDQ_CP_LDMA_COMMIT_OFFSET 0xE89154 + +#define mmTPC2_CMDQ_CP_FENCE0_RDATA 0xE89158 + +#define mmTPC2_CMDQ_CP_FENCE1_RDATA 0xE8915C + +#define mmTPC2_CMDQ_CP_FENCE2_RDATA 0xE89160 + +#define mmTPC2_CMDQ_CP_FENCE3_RDATA 0xE89164 + +#define mmTPC2_CMDQ_CP_FENCE0_CNT 0xE89168 + +#define mmTPC2_CMDQ_CP_FENCE1_CNT 0xE8916C + +#define mmTPC2_CMDQ_CP_FENCE2_CNT 0xE89170 + +#define mmTPC2_CMDQ_CP_FENCE3_CNT 0xE89174 + +#define mmTPC2_CMDQ_CP_STS 0xE89178 + +#define mmTPC2_CMDQ_CP_CURRENT_INST_LO 0xE8917C + +#define mmTPC2_CMDQ_CP_CURRENT_INST_HI 0xE89180 + +#define mmTPC2_CMDQ_CP_BARRIER_CFG 0xE89184 + +#define mmTPC2_CMDQ_CP_DBG_0 0xE89188 + +#define mmTPC2_CMDQ_CQ_BUF_ADDR 0xE89308 + +#define mmTPC2_CMDQ_CQ_BUF_RDATA 0xE8930C + +#endif /* ASIC_REG_TPC2_CMDQ_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h new file mode 100644 index 000000000..31e5b2f53 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h @@ -0,0 +1,178 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC2_QM_REGS_H_ +#define ASIC_REG_TPC2_QM_REGS_H_ + +/* + ***************************************** + * TPC2_QM (Prototype: QMAN) + ***************************************** + */ + +#define mmTPC2_QM_GLBL_CFG0 0xE88000 + +#define mmTPC2_QM_GLBL_CFG1 0xE88004 + +#define mmTPC2_QM_GLBL_PROT 0xE88008 + +#define mmTPC2_QM_GLBL_ERR_CFG 0xE8800C + +#define mmTPC2_QM_GLBL_ERR_ADDR_LO 0xE88010 + +#define mmTPC2_QM_GLBL_ERR_ADDR_HI 0xE88014 + +#define mmTPC2_QM_GLBL_ERR_WDATA 0xE88018 + +#define mmTPC2_QM_GLBL_SECURE_PROPS 0xE8801C + +#define mmTPC2_QM_GLBL_NON_SECURE_PROPS 0xE88020 + +#define mmTPC2_QM_GLBL_STS0 0xE88024 + +#define mmTPC2_QM_GLBL_STS1 0xE88028 + +#define mmTPC2_QM_PQ_BASE_LO 0xE88060 + +#define mmTPC2_QM_PQ_BASE_HI 0xE88064 + +#define mmTPC2_QM_PQ_SIZE 0xE88068 + +#define mmTPC2_QM_PQ_PI 0xE8806C + +#define mmTPC2_QM_PQ_CI 0xE88070 + +#define mmTPC2_QM_PQ_CFG0 0xE88074 + +#define mmTPC2_QM_PQ_CFG1 0xE88078 + +#define mmTPC2_QM_PQ_ARUSER 0xE8807C + +#define mmTPC2_QM_PQ_PUSH0 0xE88080 + +#define mmTPC2_QM_PQ_PUSH1 0xE88084 + +#define mmTPC2_QM_PQ_PUSH2 0xE88088 + +#define mmTPC2_QM_PQ_PUSH3 0xE8808C + +#define mmTPC2_QM_PQ_STS0 0xE88090 + +#define mmTPC2_QM_PQ_STS1 0xE88094 + +#define mmTPC2_QM_PQ_RD_RATE_LIM_EN 0xE880A0 + +#define mmTPC2_QM_PQ_RD_RATE_LIM_RST_TOKEN 0xE880A4 + +#define mmTPC2_QM_PQ_RD_RATE_LIM_SAT 0xE880A8 + +#define mmTPC2_QM_PQ_RD_RATE_LIM_TOUT 0xE880AC + +#define mmTPC2_QM_CQ_CFG0 0xE880B0 + +#define mmTPC2_QM_CQ_CFG1 0xE880B4 + +#define mmTPC2_QM_CQ_ARUSER 0xE880B8 + +#define mmTPC2_QM_CQ_PTR_LO 0xE880C0 + +#define mmTPC2_QM_CQ_PTR_HI 0xE880C4 + +#define mmTPC2_QM_CQ_TSIZE 0xE880C8 + +#define mmTPC2_QM_CQ_CTL 0xE880CC + +#define mmTPC2_QM_CQ_PTR_LO_STS 0xE880D4 + +#define mmTPC2_QM_CQ_PTR_HI_STS 0xE880D8 + +#define mmTPC2_QM_CQ_TSIZE_STS 0xE880DC + +#define mmTPC2_QM_CQ_CTL_STS 0xE880E0 + +#define mmTPC2_QM_CQ_STS0 0xE880E4 + +#define mmTPC2_QM_CQ_STS1 0xE880E8 + +#define mmTPC2_QM_CQ_RD_RATE_LIM_EN 0xE880F0 + +#define mmTPC2_QM_CQ_RD_RATE_LIM_RST_TOKEN 0xE880F4 + +#define mmTPC2_QM_CQ_RD_RATE_LIM_SAT 0xE880F8 + +#define mmTPC2_QM_CQ_RD_RATE_LIM_TOUT 0xE880FC + +#define mmTPC2_QM_CQ_IFIFO_CNT 0xE88108 + +#define mmTPC2_QM_CP_MSG_BASE0_ADDR_LO 0xE88120 + +#define mmTPC2_QM_CP_MSG_BASE0_ADDR_HI 0xE88124 + +#define mmTPC2_QM_CP_MSG_BASE1_ADDR_LO 0xE88128 + +#define mmTPC2_QM_CP_MSG_BASE1_ADDR_HI 0xE8812C + +#define mmTPC2_QM_CP_MSG_BASE2_ADDR_LO 0xE88130 + +#define mmTPC2_QM_CP_MSG_BASE2_ADDR_HI 0xE88134 + +#define mmTPC2_QM_CP_MSG_BASE3_ADDR_LO 0xE88138 + +#define mmTPC2_QM_CP_MSG_BASE3_ADDR_HI 0xE8813C + +#define mmTPC2_QM_CP_LDMA_TSIZE_OFFSET 0xE88140 + +#define mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0xE88144 + +#define mmTPC2_QM_CP_LDMA_SRC_BASE_HI_OFFSET 0xE88148 + +#define mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET 0xE8814C + +#define mmTPC2_QM_CP_LDMA_DST_BASE_HI_OFFSET 0xE88150 + +#define mmTPC2_QM_CP_LDMA_COMMIT_OFFSET 0xE88154 + +#define mmTPC2_QM_CP_FENCE0_RDATA 0xE88158 + +#define mmTPC2_QM_CP_FENCE1_RDATA 0xE8815C + +#define mmTPC2_QM_CP_FENCE2_RDATA 0xE88160 + +#define mmTPC2_QM_CP_FENCE3_RDATA 0xE88164 + +#define mmTPC2_QM_CP_FENCE0_CNT 0xE88168 + +#define mmTPC2_QM_CP_FENCE1_CNT 0xE8816C + +#define mmTPC2_QM_CP_FENCE2_CNT 0xE88170 + +#define mmTPC2_QM_CP_FENCE3_CNT 0xE88174 + +#define mmTPC2_QM_CP_STS 0xE88178 + +#define mmTPC2_QM_CP_CURRENT_INST_LO 0xE8817C + +#define mmTPC2_QM_CP_CURRENT_INST_HI 0xE88180 + +#define mmTPC2_QM_CP_BARRIER_CFG 0xE88184 + +#define mmTPC2_QM_CP_DBG_0 0xE88188 + +#define mmTPC2_QM_PQ_BUF_ADDR 0xE88300 + +#define mmTPC2_QM_PQ_BUF_RDATA 0xE88304 + +#define mmTPC2_QM_CQ_BUF_ADDR 0xE88308 + +#define mmTPC2_QM_CQ_BUF_RDATA 0xE8830C + +#endif /* ASIC_REG_TPC2_QM_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h new file mode 100644 index 000000000..4eddeaa15 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h @@ -0,0 +1,322 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC2_RTR_REGS_H_ +#define ASIC_REG_TPC2_RTR_REGS_H_ + +/* + ***************************************** + * TPC2_RTR (Prototype: TPC_RTR) + ***************************************** + */ + +#define mmTPC2_RTR_HBW_RD_RQ_E_ARB 0xE80100 + +#define mmTPC2_RTR_HBW_RD_RQ_W_ARB 0xE80104 + +#define mmTPC2_RTR_HBW_RD_RQ_N_ARB 0xE80108 + +#define mmTPC2_RTR_HBW_RD_RQ_S_ARB 0xE8010C + +#define mmTPC2_RTR_HBW_RD_RQ_L_ARB 0xE80110 + +#define mmTPC2_RTR_HBW_E_ARB_MAX 0xE80120 + +#define mmTPC2_RTR_HBW_W_ARB_MAX 0xE80124 + +#define mmTPC2_RTR_HBW_N_ARB_MAX 0xE80128 + +#define mmTPC2_RTR_HBW_S_ARB_MAX 0xE8012C + +#define mmTPC2_RTR_HBW_L_ARB_MAX 0xE80130 + +#define mmTPC2_RTR_HBW_RD_RS_E_ARB 0xE80140 + +#define mmTPC2_RTR_HBW_RD_RS_W_ARB 0xE80144 + +#define mmTPC2_RTR_HBW_RD_RS_N_ARB 0xE80148 + +#define mmTPC2_RTR_HBW_RD_RS_S_ARB 0xE8014C + +#define mmTPC2_RTR_HBW_RD_RS_L_ARB 0xE80150 + +#define mmTPC2_RTR_HBW_WR_RQ_E_ARB 0xE80170 + +#define mmTPC2_RTR_HBW_WR_RQ_W_ARB 0xE80174 + +#define mmTPC2_RTR_HBW_WR_RQ_N_ARB 0xE80178 + +#define mmTPC2_RTR_HBW_WR_RQ_S_ARB 0xE8017C + +#define mmTPC2_RTR_HBW_WR_RQ_L_ARB 0xE80180 + +#define mmTPC2_RTR_HBW_WR_RS_E_ARB 0xE80190 + +#define mmTPC2_RTR_HBW_WR_RS_W_ARB 0xE80194 + +#define mmTPC2_RTR_HBW_WR_RS_N_ARB 0xE80198 + +#define mmTPC2_RTR_HBW_WR_RS_S_ARB 0xE8019C + +#define mmTPC2_RTR_HBW_WR_RS_L_ARB 0xE801A0 + +#define mmTPC2_RTR_LBW_RD_RQ_E_ARB 0xE80200 + +#define mmTPC2_RTR_LBW_RD_RQ_W_ARB 0xE80204 + +#define mmTPC2_RTR_LBW_RD_RQ_N_ARB 0xE80208 + +#define mmTPC2_RTR_LBW_RD_RQ_S_ARB 0xE8020C + +#define mmTPC2_RTR_LBW_RD_RQ_L_ARB 0xE80210 + +#define mmTPC2_RTR_LBW_E_ARB_MAX 0xE80220 + +#define mmTPC2_RTR_LBW_W_ARB_MAX 0xE80224 + +#define mmTPC2_RTR_LBW_N_ARB_MAX 0xE80228 + +#define mmTPC2_RTR_LBW_S_ARB_MAX 0xE8022C + +#define mmTPC2_RTR_LBW_L_ARB_MAX 0xE80230 + +#define mmTPC2_RTR_LBW_RD_RS_E_ARB 0xE80250 + +#define mmTPC2_RTR_LBW_RD_RS_W_ARB 0xE80254 + +#define mmTPC2_RTR_LBW_RD_RS_N_ARB 0xE80258 + +#define mmTPC2_RTR_LBW_RD_RS_S_ARB 0xE8025C + +#define mmTPC2_RTR_LBW_RD_RS_L_ARB 0xE80260 + +#define mmTPC2_RTR_LBW_WR_RQ_E_ARB 0xE80270 + +#define mmTPC2_RTR_LBW_WR_RQ_W_ARB 0xE80274 + +#define mmTPC2_RTR_LBW_WR_RQ_N_ARB 0xE80278 + +#define mmTPC2_RTR_LBW_WR_RQ_S_ARB 0xE8027C + +#define mmTPC2_RTR_LBW_WR_RQ_L_ARB 0xE80280 + +#define mmTPC2_RTR_LBW_WR_RS_E_ARB 0xE80290 + +#define mmTPC2_RTR_LBW_WR_RS_W_ARB 0xE80294 + +#define mmTPC2_RTR_LBW_WR_RS_N_ARB 0xE80298 + +#define mmTPC2_RTR_LBW_WR_RS_S_ARB 0xE8029C + +#define mmTPC2_RTR_LBW_WR_RS_L_ARB 0xE802A0 + +#define mmTPC2_RTR_DBG_E_ARB 0xE80300 + +#define mmTPC2_RTR_DBG_W_ARB 0xE80304 + +#define mmTPC2_RTR_DBG_N_ARB 0xE80308 + +#define mmTPC2_RTR_DBG_S_ARB 0xE8030C + +#define mmTPC2_RTR_DBG_L_ARB 0xE80310 + +#define mmTPC2_RTR_DBG_E_ARB_MAX 0xE80320 + +#define mmTPC2_RTR_DBG_W_ARB_MAX 0xE80324 + +#define mmTPC2_RTR_DBG_N_ARB_MAX 0xE80328 + +#define mmTPC2_RTR_DBG_S_ARB_MAX 0xE8032C + +#define mmTPC2_RTR_DBG_L_ARB_MAX 0xE80330 + +#define mmTPC2_RTR_SPLIT_COEF_0 0xE80400 + +#define mmTPC2_RTR_SPLIT_COEF_1 0xE80404 + +#define mmTPC2_RTR_SPLIT_COEF_2 0xE80408 + +#define mmTPC2_RTR_SPLIT_COEF_3 0xE8040C + +#define mmTPC2_RTR_SPLIT_COEF_4 0xE80410 + +#define mmTPC2_RTR_SPLIT_COEF_5 0xE80414 + +#define mmTPC2_RTR_SPLIT_COEF_6 0xE80418 + +#define mmTPC2_RTR_SPLIT_COEF_7 0xE8041C + +#define mmTPC2_RTR_SPLIT_COEF_8 0xE80420 + +#define mmTPC2_RTR_SPLIT_COEF_9 0xE80424 + +#define mmTPC2_RTR_SPLIT_CFG 0xE80440 + +#define mmTPC2_RTR_SPLIT_RD_SAT 0xE80444 + +#define mmTPC2_RTR_SPLIT_RD_RST_TOKEN 0xE80448 + +#define mmTPC2_RTR_SPLIT_RD_TIMEOUT_0 0xE8044C + +#define mmTPC2_RTR_SPLIT_RD_TIMEOUT_1 0xE80450 + +#define mmTPC2_RTR_SPLIT_WR_SAT 0xE80454 + +#define mmTPC2_RTR_WPLIT_WR_TST_TOLEN 0xE80458 + +#define mmTPC2_RTR_SPLIT_WR_TIMEOUT_0 0xE8045C + +#define mmTPC2_RTR_SPLIT_WR_TIMEOUT_1 0xE80460 + +#define mmTPC2_RTR_HBW_RANGE_HIT 0xE80470 + +#define mmTPC2_RTR_HBW_RANGE_MASK_L_0 0xE80480 + +#define mmTPC2_RTR_HBW_RANGE_MASK_L_1 0xE80484 + +#define mmTPC2_RTR_HBW_RANGE_MASK_L_2 0xE80488 + +#define mmTPC2_RTR_HBW_RANGE_MASK_L_3 0xE8048C + +#define mmTPC2_RTR_HBW_RANGE_MASK_L_4 0xE80490 + +#define mmTPC2_RTR_HBW_RANGE_MASK_L_5 0xE80494 + +#define mmTPC2_RTR_HBW_RANGE_MASK_L_6 0xE80498 + +#define mmTPC2_RTR_HBW_RANGE_MASK_L_7 0xE8049C + +#define mmTPC2_RTR_HBW_RANGE_MASK_H_0 0xE804A0 + +#define mmTPC2_RTR_HBW_RANGE_MASK_H_1 0xE804A4 + +#define mmTPC2_RTR_HBW_RANGE_MASK_H_2 0xE804A8 + +#define mmTPC2_RTR_HBW_RANGE_MASK_H_3 0xE804AC + +#define mmTPC2_RTR_HBW_RANGE_MASK_H_4 0xE804B0 + +#define mmTPC2_RTR_HBW_RANGE_MASK_H_5 0xE804B4 + +#define mmTPC2_RTR_HBW_RANGE_MASK_H_6 0xE804B8 + +#define mmTPC2_RTR_HBW_RANGE_MASK_H_7 0xE804BC + +#define mmTPC2_RTR_HBW_RANGE_BASE_L_0 0xE804C0 + +#define mmTPC2_RTR_HBW_RANGE_BASE_L_1 0xE804C4 + +#define mmTPC2_RTR_HBW_RANGE_BASE_L_2 0xE804C8 + +#define mmTPC2_RTR_HBW_RANGE_BASE_L_3 0xE804CC + +#define mmTPC2_RTR_HBW_RANGE_BASE_L_4 0xE804D0 + +#define mmTPC2_RTR_HBW_RANGE_BASE_L_5 0xE804D4 + +#define mmTPC2_RTR_HBW_RANGE_BASE_L_6 0xE804D8 + +#define mmTPC2_RTR_HBW_RANGE_BASE_L_7 0xE804DC + +#define mmTPC2_RTR_HBW_RANGE_BASE_H_0 0xE804E0 + +#define mmTPC2_RTR_HBW_RANGE_BASE_H_1 0xE804E4 + +#define mmTPC2_RTR_HBW_RANGE_BASE_H_2 0xE804E8 + +#define mmTPC2_RTR_HBW_RANGE_BASE_H_3 0xE804EC + +#define mmTPC2_RTR_HBW_RANGE_BASE_H_4 0xE804F0 + +#define mmTPC2_RTR_HBW_RANGE_BASE_H_5 0xE804F4 + +#define mmTPC2_RTR_HBW_RANGE_BASE_H_6 0xE804F8 + +#define mmTPC2_RTR_HBW_RANGE_BASE_H_7 0xE804FC + +#define mmTPC2_RTR_LBW_RANGE_HIT 0xE80500 + +#define mmTPC2_RTR_LBW_RANGE_MASK_0 0xE80510 + +#define mmTPC2_RTR_LBW_RANGE_MASK_1 0xE80514 + +#define mmTPC2_RTR_LBW_RANGE_MASK_2 0xE80518 + +#define mmTPC2_RTR_LBW_RANGE_MASK_3 0xE8051C + +#define mmTPC2_RTR_LBW_RANGE_MASK_4 0xE80520 + +#define mmTPC2_RTR_LBW_RANGE_MASK_5 0xE80524 + +#define mmTPC2_RTR_LBW_RANGE_MASK_6 0xE80528 + +#define mmTPC2_RTR_LBW_RANGE_MASK_7 0xE8052C + +#define mmTPC2_RTR_LBW_RANGE_MASK_8 0xE80530 + +#define mmTPC2_RTR_LBW_RANGE_MASK_9 0xE80534 + +#define mmTPC2_RTR_LBW_RANGE_MASK_10 0xE80538 + +#define mmTPC2_RTR_LBW_RANGE_MASK_11 0xE8053C + +#define mmTPC2_RTR_LBW_RANGE_MASK_12 0xE80540 + +#define mmTPC2_RTR_LBW_RANGE_MASK_13 0xE80544 + +#define mmTPC2_RTR_LBW_RANGE_MASK_14 0xE80548 + +#define mmTPC2_RTR_LBW_RANGE_MASK_15 0xE8054C + +#define mmTPC2_RTR_LBW_RANGE_BASE_0 0xE80550 + +#define mmTPC2_RTR_LBW_RANGE_BASE_1 0xE80554 + +#define mmTPC2_RTR_LBW_RANGE_BASE_2 0xE80558 + +#define mmTPC2_RTR_LBW_RANGE_BASE_3 0xE8055C + +#define mmTPC2_RTR_LBW_RANGE_BASE_4 0xE80560 + +#define mmTPC2_RTR_LBW_RANGE_BASE_5 0xE80564 + +#define mmTPC2_RTR_LBW_RANGE_BASE_6 0xE80568 + +#define mmTPC2_RTR_LBW_RANGE_BASE_7 0xE8056C + +#define mmTPC2_RTR_LBW_RANGE_BASE_8 0xE80570 + +#define mmTPC2_RTR_LBW_RANGE_BASE_9 0xE80574 + +#define mmTPC2_RTR_LBW_RANGE_BASE_10 0xE80578 + +#define mmTPC2_RTR_LBW_RANGE_BASE_11 0xE8057C + +#define mmTPC2_RTR_LBW_RANGE_BASE_12 0xE80580 + +#define mmTPC2_RTR_LBW_RANGE_BASE_13 0xE80584 + +#define mmTPC2_RTR_LBW_RANGE_BASE_14 0xE80588 + +#define mmTPC2_RTR_LBW_RANGE_BASE_15 0xE8058C + +#define mmTPC2_RTR_RGLTR 0xE80590 + +#define mmTPC2_RTR_RGLTR_WR_RESULT 0xE80594 + +#define mmTPC2_RTR_RGLTR_RD_RESULT 0xE80598 + +#define mmTPC2_RTR_SCRAMB_EN 0xE80600 + +#define mmTPC2_RTR_NON_LIN_SCRAMB 0xE80604 + +#endif /* ASIC_REG_TPC2_RTR_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h new file mode 100644 index 000000000..ce573a1a8 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h @@ -0,0 +1,886 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC3_CFG_REGS_H_ +#define ASIC_REG_TPC3_CFG_REGS_H_ + +/* + ***************************************** + * TPC3_CFG (Prototype: TPC) + ***************************************** + */ + +#define mmTPC3_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xEC6400 + +#define mmTPC3_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xEC6404 + +#define mmTPC3_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xEC6408 + +#define mmTPC3_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xEC640C + +#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xEC6410 + +#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xEC6414 + +#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET 0xEC6418 + +#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xEC641C + +#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xEC6420 + +#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET 0xEC6424 + +#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xEC6428 + +#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xEC642C + +#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET 0xEC6430 + +#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xEC6434 + +#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xEC6438 + +#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET 0xEC643C + +#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xEC6440 + +#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xEC6444 + +#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET 0xEC6448 + +#define mmTPC3_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xEC644C + +#define mmTPC3_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xEC6450 + +#define mmTPC3_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xEC6454 + +#define mmTPC3_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xEC6458 + +#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xEC645C + +#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xEC6460 + +#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET 0xEC6464 + +#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xEC6468 + +#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xEC646C + +#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET 0xEC6470 + +#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xEC6474 + +#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xEC6478 + +#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET 0xEC647C + +#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xEC6480 + +#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xEC6484 + +#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET 0xEC6488 + +#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xEC648C + +#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xEC6490 + +#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET 0xEC6494 + +#define mmTPC3_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xEC6498 + +#define mmTPC3_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xEC649C + +#define mmTPC3_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xEC64A0 + +#define mmTPC3_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xEC64A4 + +#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xEC64A8 + +#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xEC64AC + +#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET 0xEC64B0 + +#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xEC64B4 + +#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xEC64B8 + +#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET 0xEC64BC + +#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xEC64C0 + +#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xEC64C4 + +#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET 0xEC64C8 + +#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xEC64CC + +#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xEC64D0 + +#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET 0xEC64D4 + +#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xEC64D8 + +#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xEC64DC + +#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET 0xEC64E0 + +#define mmTPC3_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xEC64E4 + +#define mmTPC3_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xEC64E8 + +#define mmTPC3_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xEC64EC + +#define mmTPC3_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xEC64F0 + +#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xEC64F4 + +#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xEC64F8 + +#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET 0xEC64FC + +#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xEC6500 + +#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xEC6504 + +#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET 0xEC6508 + +#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xEC650C + +#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xEC6510 + +#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET 0xEC6514 + +#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xEC6518 + +#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xEC651C + +#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET 0xEC6520 + +#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xEC6524 + +#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xEC6528 + +#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET 0xEC652C + +#define mmTPC3_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xEC6530 + +#define mmTPC3_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xEC6534 + +#define mmTPC3_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xEC6538 + +#define mmTPC3_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xEC653C + +#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xEC6540 + +#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xEC6544 + +#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET 0xEC6548 + +#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xEC654C + +#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xEC6550 + +#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET 0xEC6554 + +#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xEC6558 + +#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xEC655C + +#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET 0xEC6560 + +#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xEC6564 + +#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xEC6568 + +#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET 0xEC656C + +#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xEC6570 + +#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xEC6574 + +#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET 0xEC6578 + +#define mmTPC3_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xEC657C + +#define mmTPC3_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xEC6580 + +#define mmTPC3_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xEC6584 + +#define mmTPC3_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xEC6588 + +#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xEC658C + +#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xEC6590 + +#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET 0xEC6594 + +#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xEC6598 + +#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xEC659C + +#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET 0xEC65A0 + +#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xEC65A4 + +#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xEC65A8 + +#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET 0xEC65AC + +#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xEC65B0 + +#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xEC65B4 + +#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET 0xEC65B8 + +#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xEC65BC + +#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xEC65C0 + +#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET 0xEC65C4 + +#define mmTPC3_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xEC65C8 + +#define mmTPC3_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xEC65CC + +#define mmTPC3_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xEC65D0 + +#define mmTPC3_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xEC65D4 + +#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xEC65D8 + +#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xEC65DC + +#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET 0xEC65E0 + +#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xEC65E4 + +#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xEC65E8 + +#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET 0xEC65EC + +#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xEC65F0 + +#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xEC65F4 + +#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET 0xEC65F8 + +#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xEC65FC + +#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xEC6600 + +#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET 0xEC6604 + +#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xEC6608 + +#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xEC660C + +#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET 0xEC6610 + +#define mmTPC3_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xEC6614 + +#define mmTPC3_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xEC6618 + +#define mmTPC3_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xEC661C + +#define mmTPC3_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xEC6620 + +#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xEC6624 + +#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xEC6628 + +#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET 0xEC662C + +#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xEC6630 + +#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xEC6634 + +#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET 0xEC6638 + +#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xEC663C + +#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xEC6640 + +#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET 0xEC6644 + +#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xEC6648 + +#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xEC664C + +#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET 0xEC6650 + +#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xEC6654 + +#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xEC6658 + +#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET 0xEC665C + +#define mmTPC3_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xEC6660 + +#define mmTPC3_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xEC6664 + +#define mmTPC3_CFG_KERNEL_TID_BASE_DIM_0 0xEC6668 + +#define mmTPC3_CFG_KERNEL_TID_SIZE_DIM_0 0xEC666C + +#define mmTPC3_CFG_KERNEL_TID_BASE_DIM_1 0xEC6670 + +#define mmTPC3_CFG_KERNEL_TID_SIZE_DIM_1 0xEC6674 + +#define mmTPC3_CFG_KERNEL_TID_BASE_DIM_2 0xEC6678 + +#define mmTPC3_CFG_KERNEL_TID_SIZE_DIM_2 0xEC667C + +#define mmTPC3_CFG_KERNEL_TID_BASE_DIM_3 0xEC6680 + +#define mmTPC3_CFG_KERNEL_TID_SIZE_DIM_3 0xEC6684 + +#define mmTPC3_CFG_KERNEL_TID_BASE_DIM_4 0xEC6688 + +#define mmTPC3_CFG_KERNEL_TID_SIZE_DIM_4 0xEC668C + +#define mmTPC3_CFG_KERNEL_SRF_0 0xEC6690 + +#define mmTPC3_CFG_KERNEL_SRF_1 0xEC6694 + +#define mmTPC3_CFG_KERNEL_SRF_2 0xEC6698 + +#define mmTPC3_CFG_KERNEL_SRF_3 0xEC669C + +#define mmTPC3_CFG_KERNEL_SRF_4 0xEC66A0 + +#define mmTPC3_CFG_KERNEL_SRF_5 0xEC66A4 + +#define mmTPC3_CFG_KERNEL_SRF_6 0xEC66A8 + +#define mmTPC3_CFG_KERNEL_SRF_7 0xEC66AC + +#define mmTPC3_CFG_KERNEL_SRF_8 0xEC66B0 + +#define mmTPC3_CFG_KERNEL_SRF_9 0xEC66B4 + +#define mmTPC3_CFG_KERNEL_SRF_10 0xEC66B8 + +#define mmTPC3_CFG_KERNEL_SRF_11 0xEC66BC + +#define mmTPC3_CFG_KERNEL_SRF_12 0xEC66C0 + +#define mmTPC3_CFG_KERNEL_SRF_13 0xEC66C4 + +#define mmTPC3_CFG_KERNEL_SRF_14 0xEC66C8 + +#define mmTPC3_CFG_KERNEL_SRF_15 0xEC66CC + +#define mmTPC3_CFG_KERNEL_SRF_16 0xEC66D0 + +#define mmTPC3_CFG_KERNEL_SRF_17 0xEC66D4 + +#define mmTPC3_CFG_KERNEL_SRF_18 0xEC66D8 + +#define mmTPC3_CFG_KERNEL_SRF_19 0xEC66DC + +#define mmTPC3_CFG_KERNEL_SRF_20 0xEC66E0 + +#define mmTPC3_CFG_KERNEL_SRF_21 0xEC66E4 + +#define mmTPC3_CFG_KERNEL_SRF_22 0xEC66E8 + +#define mmTPC3_CFG_KERNEL_SRF_23 0xEC66EC + +#define mmTPC3_CFG_KERNEL_SRF_24 0xEC66F0 + +#define mmTPC3_CFG_KERNEL_SRF_25 0xEC66F4 + +#define mmTPC3_CFG_KERNEL_SRF_26 0xEC66F8 + +#define mmTPC3_CFG_KERNEL_SRF_27 0xEC66FC + +#define mmTPC3_CFG_KERNEL_SRF_28 0xEC6700 + +#define mmTPC3_CFG_KERNEL_SRF_29 0xEC6704 + +#define mmTPC3_CFG_KERNEL_SRF_30 0xEC6708 + +#define mmTPC3_CFG_KERNEL_SRF_31 0xEC670C + +#define mmTPC3_CFG_KERNEL_KERNEL_CONFIG 0xEC6710 + +#define mmTPC3_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xEC6714 + +#define mmTPC3_CFG_RESERVED_DESC_END 0xEC6738 + +#define mmTPC3_CFG_ROUND_CSR 0xEC67FC + +#define mmTPC3_CFG_TBUF_BASE_ADDR_LOW 0xEC6800 + +#define mmTPC3_CFG_TBUF_BASE_ADDR_HIGH 0xEC6804 + +#define mmTPC3_CFG_SEMAPHORE 0xEC6808 + +#define mmTPC3_CFG_VFLAGS 0xEC680C + +#define mmTPC3_CFG_SFLAGS 0xEC6810 + +#define mmTPC3_CFG_LFSR_POLYNOM 0xEC6818 + +#define mmTPC3_CFG_STATUS 0xEC681C + +#define mmTPC3_CFG_CFG_BASE_ADDRESS_HIGH 0xEC6820 + +#define mmTPC3_CFG_CFG_SUBTRACT_VALUE 0xEC6824 + +#define mmTPC3_CFG_SM_BASE_ADDRESS_LOW 0xEC6828 + +#define mmTPC3_CFG_SM_BASE_ADDRESS_HIGH 0xEC682C + +#define mmTPC3_CFG_TPC_CMD 0xEC6830 + +#define mmTPC3_CFG_TPC_EXECUTE 0xEC6838 + +#define mmTPC3_CFG_TPC_STALL 0xEC683C + +#define mmTPC3_CFG_ICACHE_BASE_ADDERESS_LOW 0xEC6840 + +#define mmTPC3_CFG_ICACHE_BASE_ADDERESS_HIGH 0xEC6844 + +#define mmTPC3_CFG_MSS_CONFIG 0xEC6854 + +#define mmTPC3_CFG_TPC_INTR_CAUSE 0xEC6858 + +#define mmTPC3_CFG_TPC_INTR_MASK 0xEC685C + +#define mmTPC3_CFG_TSB_CONFIG 0xEC6860 + +#define mmTPC3_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xEC6A00 + +#define mmTPC3_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xEC6A04 + +#define mmTPC3_CFG_QM_TENSOR_0_PADDING_VALUE 0xEC6A08 + +#define mmTPC3_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xEC6A0C + +#define mmTPC3_CFG_QM_TENSOR_0_DIM_0_SIZE 0xEC6A10 + +#define mmTPC3_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xEC6A14 + +#define mmTPC3_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET 0xEC6A18 + +#define mmTPC3_CFG_QM_TENSOR_0_DIM_1_SIZE 0xEC6A1C + +#define mmTPC3_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xEC6A20 + +#define mmTPC3_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET 0xEC6A24 + +#define mmTPC3_CFG_QM_TENSOR_0_DIM_2_SIZE 0xEC6A28 + +#define mmTPC3_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xEC6A2C + +#define mmTPC3_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET 0xEC6A30 + +#define mmTPC3_CFG_QM_TENSOR_0_DIM_3_SIZE 0xEC6A34 + +#define mmTPC3_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xEC6A38 + +#define mmTPC3_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET 0xEC6A3C + +#define mmTPC3_CFG_QM_TENSOR_0_DIM_4_SIZE 0xEC6A40 + +#define mmTPC3_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xEC6A44 + +#define mmTPC3_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET 0xEC6A48 + +#define mmTPC3_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xEC6A4C + +#define mmTPC3_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xEC6A50 + +#define mmTPC3_CFG_QM_TENSOR_1_PADDING_VALUE 0xEC6A54 + +#define mmTPC3_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xEC6A58 + +#define mmTPC3_CFG_QM_TENSOR_1_DIM_0_SIZE 0xEC6A5C + +#define mmTPC3_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xEC6A60 + +#define mmTPC3_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET 0xEC6A64 + +#define mmTPC3_CFG_QM_TENSOR_1_DIM_1_SIZE 0xEC6A68 + +#define mmTPC3_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xEC6A6C + +#define mmTPC3_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET 0xEC6A70 + +#define mmTPC3_CFG_QM_TENSOR_1_DIM_2_SIZE 0xEC6A74 + +#define mmTPC3_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xEC6A78 + +#define mmTPC3_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET 0xEC6A7C + +#define mmTPC3_CFG_QM_TENSOR_1_DIM_3_SIZE 0xEC6A80 + +#define mmTPC3_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xEC6A84 + +#define mmTPC3_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET 0xEC6A88 + +#define mmTPC3_CFG_QM_TENSOR_1_DIM_4_SIZE 0xEC6A8C + +#define mmTPC3_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xEC6A90 + +#define mmTPC3_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET 0xEC6A94 + +#define mmTPC3_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xEC6A98 + +#define mmTPC3_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xEC6A9C + +#define mmTPC3_CFG_QM_TENSOR_2_PADDING_VALUE 0xEC6AA0 + +#define mmTPC3_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xEC6AA4 + +#define mmTPC3_CFG_QM_TENSOR_2_DIM_0_SIZE 0xEC6AA8 + +#define mmTPC3_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xEC6AAC + +#define mmTPC3_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET 0xEC6AB0 + +#define mmTPC3_CFG_QM_TENSOR_2_DIM_1_SIZE 0xEC6AB4 + +#define mmTPC3_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xEC6AB8 + +#define mmTPC3_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET 0xEC6ABC + +#define mmTPC3_CFG_QM_TENSOR_2_DIM_2_SIZE 0xEC6AC0 + +#define mmTPC3_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xEC6AC4 + +#define mmTPC3_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET 0xEC6AC8 + +#define mmTPC3_CFG_QM_TENSOR_2_DIM_3_SIZE 0xEC6ACC + +#define mmTPC3_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xEC6AD0 + +#define mmTPC3_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET 0xEC6AD4 + +#define mmTPC3_CFG_QM_TENSOR_2_DIM_4_SIZE 0xEC6AD8 + +#define mmTPC3_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xEC6ADC + +#define mmTPC3_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET 0xEC6AE0 + +#define mmTPC3_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xEC6AE4 + +#define mmTPC3_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xEC6AE8 + +#define mmTPC3_CFG_QM_TENSOR_3_PADDING_VALUE 0xEC6AEC + +#define mmTPC3_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xEC6AF0 + +#define mmTPC3_CFG_QM_TENSOR_3_DIM_0_SIZE 0xEC6AF4 + +#define mmTPC3_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xEC6AF8 + +#define mmTPC3_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET 0xEC6AFC + +#define mmTPC3_CFG_QM_TENSOR_3_DIM_1_SIZE 0xEC6B00 + +#define mmTPC3_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xEC6B04 + +#define mmTPC3_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET 0xEC6B08 + +#define mmTPC3_CFG_QM_TENSOR_3_DIM_2_SIZE 0xEC6B0C + +#define mmTPC3_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xEC6B10 + +#define mmTPC3_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET 0xEC6B14 + +#define mmTPC3_CFG_QM_TENSOR_3_DIM_3_SIZE 0xEC6B18 + +#define mmTPC3_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xEC6B1C + +#define mmTPC3_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET 0xEC6B20 + +#define mmTPC3_CFG_QM_TENSOR_3_DIM_4_SIZE 0xEC6B24 + +#define mmTPC3_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xEC6B28 + +#define mmTPC3_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET 0xEC6B2C + +#define mmTPC3_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xEC6B30 + +#define mmTPC3_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xEC6B34 + +#define mmTPC3_CFG_QM_TENSOR_4_PADDING_VALUE 0xEC6B38 + +#define mmTPC3_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xEC6B3C + +#define mmTPC3_CFG_QM_TENSOR_4_DIM_0_SIZE 0xEC6B40 + +#define mmTPC3_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xEC6B44 + +#define mmTPC3_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET 0xEC6B48 + +#define mmTPC3_CFG_QM_TENSOR_4_DIM_1_SIZE 0xEC6B4C + +#define mmTPC3_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xEC6B50 + +#define mmTPC3_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET 0xEC6B54 + +#define mmTPC3_CFG_QM_TENSOR_4_DIM_2_SIZE 0xEC6B58 + +#define mmTPC3_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xEC6B5C + +#define mmTPC3_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET 0xEC6B60 + +#define mmTPC3_CFG_QM_TENSOR_4_DIM_3_SIZE 0xEC6B64 + +#define mmTPC3_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xEC6B68 + +#define mmTPC3_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET 0xEC6B6C + +#define mmTPC3_CFG_QM_TENSOR_4_DIM_4_SIZE 0xEC6B70 + +#define mmTPC3_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xEC6B74 + +#define mmTPC3_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET 0xEC6B78 + +#define mmTPC3_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xEC6B7C + +#define mmTPC3_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xEC6B80 + +#define mmTPC3_CFG_QM_TENSOR_5_PADDING_VALUE 0xEC6B84 + +#define mmTPC3_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xEC6B88 + +#define mmTPC3_CFG_QM_TENSOR_5_DIM_0_SIZE 0xEC6B8C + +#define mmTPC3_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xEC6B90 + +#define mmTPC3_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET 0xEC6B94 + +#define mmTPC3_CFG_QM_TENSOR_5_DIM_1_SIZE 0xEC6B98 + +#define mmTPC3_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xEC6B9C + +#define mmTPC3_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET 0xEC6BA0 + +#define mmTPC3_CFG_QM_TENSOR_5_DIM_2_SIZE 0xEC6BA4 + +#define mmTPC3_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xEC6BA8 + +#define mmTPC3_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET 0xEC6BAC + +#define mmTPC3_CFG_QM_TENSOR_5_DIM_3_SIZE 0xEC6BB0 + +#define mmTPC3_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xEC6BB4 + +#define mmTPC3_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET 0xEC6BB8 + +#define mmTPC3_CFG_QM_TENSOR_5_DIM_4_SIZE 0xEC6BBC + +#define mmTPC3_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xEC6BC0 + +#define mmTPC3_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET 0xEC6BC4 + +#define mmTPC3_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xEC6BC8 + +#define mmTPC3_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xEC6BCC + +#define mmTPC3_CFG_QM_TENSOR_6_PADDING_VALUE 0xEC6BD0 + +#define mmTPC3_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xEC6BD4 + +#define mmTPC3_CFG_QM_TENSOR_6_DIM_0_SIZE 0xEC6BD8 + +#define mmTPC3_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xEC6BDC + +#define mmTPC3_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET 0xEC6BE0 + +#define mmTPC3_CFG_QM_TENSOR_6_DIM_1_SIZE 0xEC6BE4 + +#define mmTPC3_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xEC6BE8 + +#define mmTPC3_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET 0xEC6BEC + +#define mmTPC3_CFG_QM_TENSOR_6_DIM_2_SIZE 0xEC6BF0 + +#define mmTPC3_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xEC6BF4 + +#define mmTPC3_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET 0xEC6BF8 + +#define mmTPC3_CFG_QM_TENSOR_6_DIM_3_SIZE 0xEC6BFC + +#define mmTPC3_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xEC6C00 + +#define mmTPC3_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET 0xEC6C04 + +#define mmTPC3_CFG_QM_TENSOR_6_DIM_4_SIZE 0xEC6C08 + +#define mmTPC3_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xEC6C0C + +#define mmTPC3_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET 0xEC6C10 + +#define mmTPC3_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xEC6C14 + +#define mmTPC3_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xEC6C18 + +#define mmTPC3_CFG_QM_TENSOR_7_PADDING_VALUE 0xEC6C1C + +#define mmTPC3_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xEC6C20 + +#define mmTPC3_CFG_QM_TENSOR_7_DIM_0_SIZE 0xEC6C24 + +#define mmTPC3_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xEC6C28 + +#define mmTPC3_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET 0xEC6C2C + +#define mmTPC3_CFG_QM_TENSOR_7_DIM_1_SIZE 0xEC6C30 + +#define mmTPC3_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xEC6C34 + +#define mmTPC3_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET 0xEC6C38 + +#define mmTPC3_CFG_QM_TENSOR_7_DIM_2_SIZE 0xEC6C3C + +#define mmTPC3_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xEC6C40 + +#define mmTPC3_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET 0xEC6C44 + +#define mmTPC3_CFG_QM_TENSOR_7_DIM_3_SIZE 0xEC6C48 + +#define mmTPC3_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xEC6C4C + +#define mmTPC3_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET 0xEC6C50 + +#define mmTPC3_CFG_QM_TENSOR_7_DIM_4_SIZE 0xEC6C54 + +#define mmTPC3_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xEC6C58 + +#define mmTPC3_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET 0xEC6C5C + +#define mmTPC3_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xEC6C60 + +#define mmTPC3_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xEC6C64 + +#define mmTPC3_CFG_QM_TID_BASE_DIM_0 0xEC6C68 + +#define mmTPC3_CFG_QM_TID_SIZE_DIM_0 0xEC6C6C + +#define mmTPC3_CFG_QM_TID_BASE_DIM_1 0xEC6C70 + +#define mmTPC3_CFG_QM_TID_SIZE_DIM_1 0xEC6C74 + +#define mmTPC3_CFG_QM_TID_BASE_DIM_2 0xEC6C78 + +#define mmTPC3_CFG_QM_TID_SIZE_DIM_2 0xEC6C7C + +#define mmTPC3_CFG_QM_TID_BASE_DIM_3 0xEC6C80 + +#define mmTPC3_CFG_QM_TID_SIZE_DIM_3 0xEC6C84 + +#define mmTPC3_CFG_QM_TID_BASE_DIM_4 0xEC6C88 + +#define mmTPC3_CFG_QM_TID_SIZE_DIM_4 0xEC6C8C + +#define mmTPC3_CFG_QM_SRF_0 0xEC6C90 + +#define mmTPC3_CFG_QM_SRF_1 0xEC6C94 + +#define mmTPC3_CFG_QM_SRF_2 0xEC6C98 + +#define mmTPC3_CFG_QM_SRF_3 0xEC6C9C + +#define mmTPC3_CFG_QM_SRF_4 0xEC6CA0 + +#define mmTPC3_CFG_QM_SRF_5 0xEC6CA4 + +#define mmTPC3_CFG_QM_SRF_6 0xEC6CA8 + +#define mmTPC3_CFG_QM_SRF_7 0xEC6CAC + +#define mmTPC3_CFG_QM_SRF_8 0xEC6CB0 + +#define mmTPC3_CFG_QM_SRF_9 0xEC6CB4 + +#define mmTPC3_CFG_QM_SRF_10 0xEC6CB8 + +#define mmTPC3_CFG_QM_SRF_11 0xEC6CBC + +#define mmTPC3_CFG_QM_SRF_12 0xEC6CC0 + +#define mmTPC3_CFG_QM_SRF_13 0xEC6CC4 + +#define mmTPC3_CFG_QM_SRF_14 0xEC6CC8 + +#define mmTPC3_CFG_QM_SRF_15 0xEC6CCC + +#define mmTPC3_CFG_QM_SRF_16 0xEC6CD0 + +#define mmTPC3_CFG_QM_SRF_17 0xEC6CD4 + +#define mmTPC3_CFG_QM_SRF_18 0xEC6CD8 + +#define mmTPC3_CFG_QM_SRF_19 0xEC6CDC + +#define mmTPC3_CFG_QM_SRF_20 0xEC6CE0 + +#define mmTPC3_CFG_QM_SRF_21 0xEC6CE4 + +#define mmTPC3_CFG_QM_SRF_22 0xEC6CE8 + +#define mmTPC3_CFG_QM_SRF_23 0xEC6CEC + +#define mmTPC3_CFG_QM_SRF_24 0xEC6CF0 + +#define mmTPC3_CFG_QM_SRF_25 0xEC6CF4 + +#define mmTPC3_CFG_QM_SRF_26 0xEC6CF8 + +#define mmTPC3_CFG_QM_SRF_27 0xEC6CFC + +#define mmTPC3_CFG_QM_SRF_28 0xEC6D00 + +#define mmTPC3_CFG_QM_SRF_29 0xEC6D04 + +#define mmTPC3_CFG_QM_SRF_30 0xEC6D08 + +#define mmTPC3_CFG_QM_SRF_31 0xEC6D0C + +#define mmTPC3_CFG_QM_KERNEL_CONFIG 0xEC6D10 + +#define mmTPC3_CFG_QM_SYNC_OBJECT_MESSAGE 0xEC6D14 + +#define mmTPC3_CFG_ARUSER 0xEC6D18 + +#define mmTPC3_CFG_AWUSER 0xEC6D1C + +#define mmTPC3_CFG_FUNC_MBIST_CNTRL 0xEC6E00 + +#define mmTPC3_CFG_FUNC_MBIST_PAT 0xEC6E04 + +#define mmTPC3_CFG_FUNC_MBIST_MEM_0 0xEC6E08 + +#define mmTPC3_CFG_FUNC_MBIST_MEM_1 0xEC6E0C + +#define mmTPC3_CFG_FUNC_MBIST_MEM_2 0xEC6E10 + +#define mmTPC3_CFG_FUNC_MBIST_MEM_3 0xEC6E14 + +#define mmTPC3_CFG_FUNC_MBIST_MEM_4 0xEC6E18 + +#define mmTPC3_CFG_FUNC_MBIST_MEM_5 0xEC6E1C + +#define mmTPC3_CFG_FUNC_MBIST_MEM_6 0xEC6E20 + +#define mmTPC3_CFG_FUNC_MBIST_MEM_7 0xEC6E24 + +#define mmTPC3_CFG_FUNC_MBIST_MEM_8 0xEC6E28 + +#define mmTPC3_CFG_FUNC_MBIST_MEM_9 0xEC6E2C + +#endif /* ASIC_REG_TPC3_CFG_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h new file mode 100644 index 000000000..11d81fca0 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h @@ -0,0 +1,138 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC3_CMDQ_REGS_H_ +#define ASIC_REG_TPC3_CMDQ_REGS_H_ + +/* + ***************************************** + * TPC3_CMDQ (Prototype: CMDQ) + ***************************************** + */ + +#define mmTPC3_CMDQ_GLBL_CFG0 0xEC9000 + +#define mmTPC3_CMDQ_GLBL_CFG1 0xEC9004 + +#define mmTPC3_CMDQ_GLBL_PROT 0xEC9008 + +#define mmTPC3_CMDQ_GLBL_ERR_CFG 0xEC900C + +#define mmTPC3_CMDQ_GLBL_ERR_ADDR_LO 0xEC9010 + +#define mmTPC3_CMDQ_GLBL_ERR_ADDR_HI 0xEC9014 + +#define mmTPC3_CMDQ_GLBL_ERR_WDATA 0xEC9018 + +#define mmTPC3_CMDQ_GLBL_SECURE_PROPS 0xEC901C + +#define mmTPC3_CMDQ_GLBL_NON_SECURE_PROPS 0xEC9020 + +#define mmTPC3_CMDQ_GLBL_STS0 0xEC9024 + +#define mmTPC3_CMDQ_GLBL_STS1 0xEC9028 + +#define mmTPC3_CMDQ_CQ_CFG0 0xEC90B0 + +#define mmTPC3_CMDQ_CQ_CFG1 0xEC90B4 + +#define mmTPC3_CMDQ_CQ_ARUSER 0xEC90B8 + +#define mmTPC3_CMDQ_CQ_PTR_LO 0xEC90C0 + +#define mmTPC3_CMDQ_CQ_PTR_HI 0xEC90C4 + +#define mmTPC3_CMDQ_CQ_TSIZE 0xEC90C8 + +#define mmTPC3_CMDQ_CQ_CTL 0xEC90CC + +#define mmTPC3_CMDQ_CQ_PTR_LO_STS 0xEC90D4 + +#define mmTPC3_CMDQ_CQ_PTR_HI_STS 0xEC90D8 + +#define mmTPC3_CMDQ_CQ_TSIZE_STS 0xEC90DC + +#define mmTPC3_CMDQ_CQ_CTL_STS 0xEC90E0 + +#define mmTPC3_CMDQ_CQ_STS0 0xEC90E4 + +#define mmTPC3_CMDQ_CQ_STS1 0xEC90E8 + +#define mmTPC3_CMDQ_CQ_RD_RATE_LIM_EN 0xEC90F0 + +#define mmTPC3_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xEC90F4 + +#define mmTPC3_CMDQ_CQ_RD_RATE_LIM_SAT 0xEC90F8 + +#define mmTPC3_CMDQ_CQ_RD_RATE_LIM_TOUT 0xEC90FC + +#define mmTPC3_CMDQ_CQ_IFIFO_CNT 0xEC9108 + +#define mmTPC3_CMDQ_CP_MSG_BASE0_ADDR_LO 0xEC9120 + +#define mmTPC3_CMDQ_CP_MSG_BASE0_ADDR_HI 0xEC9124 + +#define mmTPC3_CMDQ_CP_MSG_BASE1_ADDR_LO 0xEC9128 + +#define mmTPC3_CMDQ_CP_MSG_BASE1_ADDR_HI 0xEC912C + +#define mmTPC3_CMDQ_CP_MSG_BASE2_ADDR_LO 0xEC9130 + +#define mmTPC3_CMDQ_CP_MSG_BASE2_ADDR_HI 0xEC9134 + +#define mmTPC3_CMDQ_CP_MSG_BASE3_ADDR_LO 0xEC9138 + +#define mmTPC3_CMDQ_CP_MSG_BASE3_ADDR_HI 0xEC913C + +#define mmTPC3_CMDQ_CP_LDMA_TSIZE_OFFSET 0xEC9140 + +#define mmTPC3_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xEC9144 + +#define mmTPC3_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xEC9148 + +#define mmTPC3_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xEC914C + +#define mmTPC3_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xEC9150 + +#define mmTPC3_CMDQ_CP_LDMA_COMMIT_OFFSET 0xEC9154 + +#define mmTPC3_CMDQ_CP_FENCE0_RDATA 0xEC9158 + +#define mmTPC3_CMDQ_CP_FENCE1_RDATA 0xEC915C + +#define mmTPC3_CMDQ_CP_FENCE2_RDATA 0xEC9160 + +#define mmTPC3_CMDQ_CP_FENCE3_RDATA 0xEC9164 + +#define mmTPC3_CMDQ_CP_FENCE0_CNT 0xEC9168 + +#define mmTPC3_CMDQ_CP_FENCE1_CNT 0xEC916C + +#define mmTPC3_CMDQ_CP_FENCE2_CNT 0xEC9170 + +#define mmTPC3_CMDQ_CP_FENCE3_CNT 0xEC9174 + +#define mmTPC3_CMDQ_CP_STS 0xEC9178 + +#define mmTPC3_CMDQ_CP_CURRENT_INST_LO 0xEC917C + +#define mmTPC3_CMDQ_CP_CURRENT_INST_HI 0xEC9180 + +#define mmTPC3_CMDQ_CP_BARRIER_CFG 0xEC9184 + +#define mmTPC3_CMDQ_CP_DBG_0 0xEC9188 + +#define mmTPC3_CMDQ_CQ_BUF_ADDR 0xEC9308 + +#define mmTPC3_CMDQ_CQ_BUF_RDATA 0xEC930C + +#endif /* ASIC_REG_TPC3_CMDQ_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h new file mode 100644 index 000000000..e41595a19 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h @@ -0,0 +1,178 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC3_QM_REGS_H_ +#define ASIC_REG_TPC3_QM_REGS_H_ + +/* + ***************************************** + * TPC3_QM (Prototype: QMAN) + ***************************************** + */ + +#define mmTPC3_QM_GLBL_CFG0 0xEC8000 + +#define mmTPC3_QM_GLBL_CFG1 0xEC8004 + +#define mmTPC3_QM_GLBL_PROT 0xEC8008 + +#define mmTPC3_QM_GLBL_ERR_CFG 0xEC800C + +#define mmTPC3_QM_GLBL_ERR_ADDR_LO 0xEC8010 + +#define mmTPC3_QM_GLBL_ERR_ADDR_HI 0xEC8014 + +#define mmTPC3_QM_GLBL_ERR_WDATA 0xEC8018 + +#define mmTPC3_QM_GLBL_SECURE_PROPS 0xEC801C + +#define mmTPC3_QM_GLBL_NON_SECURE_PROPS 0xEC8020 + +#define mmTPC3_QM_GLBL_STS0 0xEC8024 + +#define mmTPC3_QM_GLBL_STS1 0xEC8028 + +#define mmTPC3_QM_PQ_BASE_LO 0xEC8060 + +#define mmTPC3_QM_PQ_BASE_HI 0xEC8064 + +#define mmTPC3_QM_PQ_SIZE 0xEC8068 + +#define mmTPC3_QM_PQ_PI 0xEC806C + +#define mmTPC3_QM_PQ_CI 0xEC8070 + +#define mmTPC3_QM_PQ_CFG0 0xEC8074 + +#define mmTPC3_QM_PQ_CFG1 0xEC8078 + +#define mmTPC3_QM_PQ_ARUSER 0xEC807C + +#define mmTPC3_QM_PQ_PUSH0 0xEC8080 + +#define mmTPC3_QM_PQ_PUSH1 0xEC8084 + +#define mmTPC3_QM_PQ_PUSH2 0xEC8088 + +#define mmTPC3_QM_PQ_PUSH3 0xEC808C + +#define mmTPC3_QM_PQ_STS0 0xEC8090 + +#define mmTPC3_QM_PQ_STS1 0xEC8094 + +#define mmTPC3_QM_PQ_RD_RATE_LIM_EN 0xEC80A0 + +#define mmTPC3_QM_PQ_RD_RATE_LIM_RST_TOKEN 0xEC80A4 + +#define mmTPC3_QM_PQ_RD_RATE_LIM_SAT 0xEC80A8 + +#define mmTPC3_QM_PQ_RD_RATE_LIM_TOUT 0xEC80AC + +#define mmTPC3_QM_CQ_CFG0 0xEC80B0 + +#define mmTPC3_QM_CQ_CFG1 0xEC80B4 + +#define mmTPC3_QM_CQ_ARUSER 0xEC80B8 + +#define mmTPC3_QM_CQ_PTR_LO 0xEC80C0 + +#define mmTPC3_QM_CQ_PTR_HI 0xEC80C4 + +#define mmTPC3_QM_CQ_TSIZE 0xEC80C8 + +#define mmTPC3_QM_CQ_CTL 0xEC80CC + +#define mmTPC3_QM_CQ_PTR_LO_STS 0xEC80D4 + +#define mmTPC3_QM_CQ_PTR_HI_STS 0xEC80D8 + +#define mmTPC3_QM_CQ_TSIZE_STS 0xEC80DC + +#define mmTPC3_QM_CQ_CTL_STS 0xEC80E0 + +#define mmTPC3_QM_CQ_STS0 0xEC80E4 + +#define mmTPC3_QM_CQ_STS1 0xEC80E8 + +#define mmTPC3_QM_CQ_RD_RATE_LIM_EN 0xEC80F0 + +#define mmTPC3_QM_CQ_RD_RATE_LIM_RST_TOKEN 0xEC80F4 + +#define mmTPC3_QM_CQ_RD_RATE_LIM_SAT 0xEC80F8 + +#define mmTPC3_QM_CQ_RD_RATE_LIM_TOUT 0xEC80FC + +#define mmTPC3_QM_CQ_IFIFO_CNT 0xEC8108 + +#define mmTPC3_QM_CP_MSG_BASE0_ADDR_LO 0xEC8120 + +#define mmTPC3_QM_CP_MSG_BASE0_ADDR_HI 0xEC8124 + +#define mmTPC3_QM_CP_MSG_BASE1_ADDR_LO 0xEC8128 + +#define mmTPC3_QM_CP_MSG_BASE1_ADDR_HI 0xEC812C + +#define mmTPC3_QM_CP_MSG_BASE2_ADDR_LO 0xEC8130 + +#define mmTPC3_QM_CP_MSG_BASE2_ADDR_HI 0xEC8134 + +#define mmTPC3_QM_CP_MSG_BASE3_ADDR_LO 0xEC8138 + +#define mmTPC3_QM_CP_MSG_BASE3_ADDR_HI 0xEC813C + +#define mmTPC3_QM_CP_LDMA_TSIZE_OFFSET 0xEC8140 + +#define mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0xEC8144 + +#define mmTPC3_QM_CP_LDMA_SRC_BASE_HI_OFFSET 0xEC8148 + +#define mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET 0xEC814C + +#define mmTPC3_QM_CP_LDMA_DST_BASE_HI_OFFSET 0xEC8150 + +#define mmTPC3_QM_CP_LDMA_COMMIT_OFFSET 0xEC8154 + +#define mmTPC3_QM_CP_FENCE0_RDATA 0xEC8158 + +#define mmTPC3_QM_CP_FENCE1_RDATA 0xEC815C + +#define mmTPC3_QM_CP_FENCE2_RDATA 0xEC8160 + +#define mmTPC3_QM_CP_FENCE3_RDATA 0xEC8164 + +#define mmTPC3_QM_CP_FENCE0_CNT 0xEC8168 + +#define mmTPC3_QM_CP_FENCE1_CNT 0xEC816C + +#define mmTPC3_QM_CP_FENCE2_CNT 0xEC8170 + +#define mmTPC3_QM_CP_FENCE3_CNT 0xEC8174 + +#define mmTPC3_QM_CP_STS 0xEC8178 + +#define mmTPC3_QM_CP_CURRENT_INST_LO 0xEC817C + +#define mmTPC3_QM_CP_CURRENT_INST_HI 0xEC8180 + +#define mmTPC3_QM_CP_BARRIER_CFG 0xEC8184 + +#define mmTPC3_QM_CP_DBG_0 0xEC8188 + +#define mmTPC3_QM_PQ_BUF_ADDR 0xEC8300 + +#define mmTPC3_QM_PQ_BUF_RDATA 0xEC8304 + +#define mmTPC3_QM_CQ_BUF_ADDR 0xEC8308 + +#define mmTPC3_QM_CQ_BUF_RDATA 0xEC830C + +#endif /* ASIC_REG_TPC3_QM_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h new file mode 100644 index 000000000..34a438b1e --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h @@ -0,0 +1,322 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC3_RTR_REGS_H_ +#define ASIC_REG_TPC3_RTR_REGS_H_ + +/* + ***************************************** + * TPC3_RTR (Prototype: TPC_RTR) + ***************************************** + */ + +#define mmTPC3_RTR_HBW_RD_RQ_E_ARB 0xEC0100 + +#define mmTPC3_RTR_HBW_RD_RQ_W_ARB 0xEC0104 + +#define mmTPC3_RTR_HBW_RD_RQ_N_ARB 0xEC0108 + +#define mmTPC3_RTR_HBW_RD_RQ_S_ARB 0xEC010C + +#define mmTPC3_RTR_HBW_RD_RQ_L_ARB 0xEC0110 + +#define mmTPC3_RTR_HBW_E_ARB_MAX 0xEC0120 + +#define mmTPC3_RTR_HBW_W_ARB_MAX 0xEC0124 + +#define mmTPC3_RTR_HBW_N_ARB_MAX 0xEC0128 + +#define mmTPC3_RTR_HBW_S_ARB_MAX 0xEC012C + +#define mmTPC3_RTR_HBW_L_ARB_MAX 0xEC0130 + +#define mmTPC3_RTR_HBW_RD_RS_E_ARB 0xEC0140 + +#define mmTPC3_RTR_HBW_RD_RS_W_ARB 0xEC0144 + +#define mmTPC3_RTR_HBW_RD_RS_N_ARB 0xEC0148 + +#define mmTPC3_RTR_HBW_RD_RS_S_ARB 0xEC014C + +#define mmTPC3_RTR_HBW_RD_RS_L_ARB 0xEC0150 + +#define mmTPC3_RTR_HBW_WR_RQ_E_ARB 0xEC0170 + +#define mmTPC3_RTR_HBW_WR_RQ_W_ARB 0xEC0174 + +#define mmTPC3_RTR_HBW_WR_RQ_N_ARB 0xEC0178 + +#define mmTPC3_RTR_HBW_WR_RQ_S_ARB 0xEC017C + +#define mmTPC3_RTR_HBW_WR_RQ_L_ARB 0xEC0180 + +#define mmTPC3_RTR_HBW_WR_RS_E_ARB 0xEC0190 + +#define mmTPC3_RTR_HBW_WR_RS_W_ARB 0xEC0194 + +#define mmTPC3_RTR_HBW_WR_RS_N_ARB 0xEC0198 + +#define mmTPC3_RTR_HBW_WR_RS_S_ARB 0xEC019C + +#define mmTPC3_RTR_HBW_WR_RS_L_ARB 0xEC01A0 + +#define mmTPC3_RTR_LBW_RD_RQ_E_ARB 0xEC0200 + +#define mmTPC3_RTR_LBW_RD_RQ_W_ARB 0xEC0204 + +#define mmTPC3_RTR_LBW_RD_RQ_N_ARB 0xEC0208 + +#define mmTPC3_RTR_LBW_RD_RQ_S_ARB 0xEC020C + +#define mmTPC3_RTR_LBW_RD_RQ_L_ARB 0xEC0210 + +#define mmTPC3_RTR_LBW_E_ARB_MAX 0xEC0220 + +#define mmTPC3_RTR_LBW_W_ARB_MAX 0xEC0224 + +#define mmTPC3_RTR_LBW_N_ARB_MAX 0xEC0228 + +#define mmTPC3_RTR_LBW_S_ARB_MAX 0xEC022C + +#define mmTPC3_RTR_LBW_L_ARB_MAX 0xEC0230 + +#define mmTPC3_RTR_LBW_RD_RS_E_ARB 0xEC0250 + +#define mmTPC3_RTR_LBW_RD_RS_W_ARB 0xEC0254 + +#define mmTPC3_RTR_LBW_RD_RS_N_ARB 0xEC0258 + +#define mmTPC3_RTR_LBW_RD_RS_S_ARB 0xEC025C + +#define mmTPC3_RTR_LBW_RD_RS_L_ARB 0xEC0260 + +#define mmTPC3_RTR_LBW_WR_RQ_E_ARB 0xEC0270 + +#define mmTPC3_RTR_LBW_WR_RQ_W_ARB 0xEC0274 + +#define mmTPC3_RTR_LBW_WR_RQ_N_ARB 0xEC0278 + +#define mmTPC3_RTR_LBW_WR_RQ_S_ARB 0xEC027C + +#define mmTPC3_RTR_LBW_WR_RQ_L_ARB 0xEC0280 + +#define mmTPC3_RTR_LBW_WR_RS_E_ARB 0xEC0290 + +#define mmTPC3_RTR_LBW_WR_RS_W_ARB 0xEC0294 + +#define mmTPC3_RTR_LBW_WR_RS_N_ARB 0xEC0298 + +#define mmTPC3_RTR_LBW_WR_RS_S_ARB 0xEC029C + +#define mmTPC3_RTR_LBW_WR_RS_L_ARB 0xEC02A0 + +#define mmTPC3_RTR_DBG_E_ARB 0xEC0300 + +#define mmTPC3_RTR_DBG_W_ARB 0xEC0304 + +#define mmTPC3_RTR_DBG_N_ARB 0xEC0308 + +#define mmTPC3_RTR_DBG_S_ARB 0xEC030C + +#define mmTPC3_RTR_DBG_L_ARB 0xEC0310 + +#define mmTPC3_RTR_DBG_E_ARB_MAX 0xEC0320 + +#define mmTPC3_RTR_DBG_W_ARB_MAX 0xEC0324 + +#define mmTPC3_RTR_DBG_N_ARB_MAX 0xEC0328 + +#define mmTPC3_RTR_DBG_S_ARB_MAX 0xEC032C + +#define mmTPC3_RTR_DBG_L_ARB_MAX 0xEC0330 + +#define mmTPC3_RTR_SPLIT_COEF_0 0xEC0400 + +#define mmTPC3_RTR_SPLIT_COEF_1 0xEC0404 + +#define mmTPC3_RTR_SPLIT_COEF_2 0xEC0408 + +#define mmTPC3_RTR_SPLIT_COEF_3 0xEC040C + +#define mmTPC3_RTR_SPLIT_COEF_4 0xEC0410 + +#define mmTPC3_RTR_SPLIT_COEF_5 0xEC0414 + +#define mmTPC3_RTR_SPLIT_COEF_6 0xEC0418 + +#define mmTPC3_RTR_SPLIT_COEF_7 0xEC041C + +#define mmTPC3_RTR_SPLIT_COEF_8 0xEC0420 + +#define mmTPC3_RTR_SPLIT_COEF_9 0xEC0424 + +#define mmTPC3_RTR_SPLIT_CFG 0xEC0440 + +#define mmTPC3_RTR_SPLIT_RD_SAT 0xEC0444 + +#define mmTPC3_RTR_SPLIT_RD_RST_TOKEN 0xEC0448 + +#define mmTPC3_RTR_SPLIT_RD_TIMEOUT_0 0xEC044C + +#define mmTPC3_RTR_SPLIT_RD_TIMEOUT_1 0xEC0450 + +#define mmTPC3_RTR_SPLIT_WR_SAT 0xEC0454 + +#define mmTPC3_RTR_WPLIT_WR_TST_TOLEN 0xEC0458 + +#define mmTPC3_RTR_SPLIT_WR_TIMEOUT_0 0xEC045C + +#define mmTPC3_RTR_SPLIT_WR_TIMEOUT_1 0xEC0460 + +#define mmTPC3_RTR_HBW_RANGE_HIT 0xEC0470 + +#define mmTPC3_RTR_HBW_RANGE_MASK_L_0 0xEC0480 + +#define mmTPC3_RTR_HBW_RANGE_MASK_L_1 0xEC0484 + +#define mmTPC3_RTR_HBW_RANGE_MASK_L_2 0xEC0488 + +#define mmTPC3_RTR_HBW_RANGE_MASK_L_3 0xEC048C + +#define mmTPC3_RTR_HBW_RANGE_MASK_L_4 0xEC0490 + +#define mmTPC3_RTR_HBW_RANGE_MASK_L_5 0xEC0494 + +#define mmTPC3_RTR_HBW_RANGE_MASK_L_6 0xEC0498 + +#define mmTPC3_RTR_HBW_RANGE_MASK_L_7 0xEC049C + +#define mmTPC3_RTR_HBW_RANGE_MASK_H_0 0xEC04A0 + +#define mmTPC3_RTR_HBW_RANGE_MASK_H_1 0xEC04A4 + +#define mmTPC3_RTR_HBW_RANGE_MASK_H_2 0xEC04A8 + +#define mmTPC3_RTR_HBW_RANGE_MASK_H_3 0xEC04AC + +#define mmTPC3_RTR_HBW_RANGE_MASK_H_4 0xEC04B0 + +#define mmTPC3_RTR_HBW_RANGE_MASK_H_5 0xEC04B4 + +#define mmTPC3_RTR_HBW_RANGE_MASK_H_6 0xEC04B8 + +#define mmTPC3_RTR_HBW_RANGE_MASK_H_7 0xEC04BC + +#define mmTPC3_RTR_HBW_RANGE_BASE_L_0 0xEC04C0 + +#define mmTPC3_RTR_HBW_RANGE_BASE_L_1 0xEC04C4 + +#define mmTPC3_RTR_HBW_RANGE_BASE_L_2 0xEC04C8 + +#define mmTPC3_RTR_HBW_RANGE_BASE_L_3 0xEC04CC + +#define mmTPC3_RTR_HBW_RANGE_BASE_L_4 0xEC04D0 + +#define mmTPC3_RTR_HBW_RANGE_BASE_L_5 0xEC04D4 + +#define mmTPC3_RTR_HBW_RANGE_BASE_L_6 0xEC04D8 + +#define mmTPC3_RTR_HBW_RANGE_BASE_L_7 0xEC04DC + +#define mmTPC3_RTR_HBW_RANGE_BASE_H_0 0xEC04E0 + +#define mmTPC3_RTR_HBW_RANGE_BASE_H_1 0xEC04E4 + +#define mmTPC3_RTR_HBW_RANGE_BASE_H_2 0xEC04E8 + +#define mmTPC3_RTR_HBW_RANGE_BASE_H_3 0xEC04EC + +#define mmTPC3_RTR_HBW_RANGE_BASE_H_4 0xEC04F0 + +#define mmTPC3_RTR_HBW_RANGE_BASE_H_5 0xEC04F4 + +#define mmTPC3_RTR_HBW_RANGE_BASE_H_6 0xEC04F8 + +#define mmTPC3_RTR_HBW_RANGE_BASE_H_7 0xEC04FC + +#define mmTPC3_RTR_LBW_RANGE_HIT 0xEC0500 + +#define mmTPC3_RTR_LBW_RANGE_MASK_0 0xEC0510 + +#define mmTPC3_RTR_LBW_RANGE_MASK_1 0xEC0514 + +#define mmTPC3_RTR_LBW_RANGE_MASK_2 0xEC0518 + +#define mmTPC3_RTR_LBW_RANGE_MASK_3 0xEC051C + +#define mmTPC3_RTR_LBW_RANGE_MASK_4 0xEC0520 + +#define mmTPC3_RTR_LBW_RANGE_MASK_5 0xEC0524 + +#define mmTPC3_RTR_LBW_RANGE_MASK_6 0xEC0528 + +#define mmTPC3_RTR_LBW_RANGE_MASK_7 0xEC052C + +#define mmTPC3_RTR_LBW_RANGE_MASK_8 0xEC0530 + +#define mmTPC3_RTR_LBW_RANGE_MASK_9 0xEC0534 + +#define mmTPC3_RTR_LBW_RANGE_MASK_10 0xEC0538 + +#define mmTPC3_RTR_LBW_RANGE_MASK_11 0xEC053C + +#define mmTPC3_RTR_LBW_RANGE_MASK_12 0xEC0540 + +#define mmTPC3_RTR_LBW_RANGE_MASK_13 0xEC0544 + +#define mmTPC3_RTR_LBW_RANGE_MASK_14 0xEC0548 + +#define mmTPC3_RTR_LBW_RANGE_MASK_15 0xEC054C + +#define mmTPC3_RTR_LBW_RANGE_BASE_0 0xEC0550 + +#define mmTPC3_RTR_LBW_RANGE_BASE_1 0xEC0554 + +#define mmTPC3_RTR_LBW_RANGE_BASE_2 0xEC0558 + +#define mmTPC3_RTR_LBW_RANGE_BASE_3 0xEC055C + +#define mmTPC3_RTR_LBW_RANGE_BASE_4 0xEC0560 + +#define mmTPC3_RTR_LBW_RANGE_BASE_5 0xEC0564 + +#define mmTPC3_RTR_LBW_RANGE_BASE_6 0xEC0568 + +#define mmTPC3_RTR_LBW_RANGE_BASE_7 0xEC056C + +#define mmTPC3_RTR_LBW_RANGE_BASE_8 0xEC0570 + +#define mmTPC3_RTR_LBW_RANGE_BASE_9 0xEC0574 + +#define mmTPC3_RTR_LBW_RANGE_BASE_10 0xEC0578 + +#define mmTPC3_RTR_LBW_RANGE_BASE_11 0xEC057C + +#define mmTPC3_RTR_LBW_RANGE_BASE_12 0xEC0580 + +#define mmTPC3_RTR_LBW_RANGE_BASE_13 0xEC0584 + +#define mmTPC3_RTR_LBW_RANGE_BASE_14 0xEC0588 + +#define mmTPC3_RTR_LBW_RANGE_BASE_15 0xEC058C + +#define mmTPC3_RTR_RGLTR 0xEC0590 + +#define mmTPC3_RTR_RGLTR_WR_RESULT 0xEC0594 + +#define mmTPC3_RTR_RGLTR_RD_RESULT 0xEC0598 + +#define mmTPC3_RTR_SCRAMB_EN 0xEC0600 + +#define mmTPC3_RTR_NON_LIN_SCRAMB 0xEC0604 + +#endif /* ASIC_REG_TPC3_RTR_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h new file mode 100644 index 000000000..d44caf0fc --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h @@ -0,0 +1,886 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC4_CFG_REGS_H_ +#define ASIC_REG_TPC4_CFG_REGS_H_ + +/* + ***************************************** + * TPC4_CFG (Prototype: TPC) + ***************************************** + */ + +#define mmTPC4_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xF06400 + +#define mmTPC4_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xF06404 + +#define mmTPC4_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xF06408 + +#define mmTPC4_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xF0640C + +#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xF06410 + +#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xF06414 + +#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET 0xF06418 + +#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xF0641C + +#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xF06420 + +#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET 0xF06424 + +#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xF06428 + +#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xF0642C + +#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET 0xF06430 + +#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xF06434 + +#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xF06438 + +#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET 0xF0643C + +#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xF06440 + +#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xF06444 + +#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET 0xF06448 + +#define mmTPC4_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xF0644C + +#define mmTPC4_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xF06450 + +#define mmTPC4_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xF06454 + +#define mmTPC4_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xF06458 + +#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xF0645C + +#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xF06460 + +#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET 0xF06464 + +#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xF06468 + +#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xF0646C + +#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET 0xF06470 + +#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xF06474 + +#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xF06478 + +#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET 0xF0647C + +#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xF06480 + +#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xF06484 + +#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET 0xF06488 + +#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xF0648C + +#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xF06490 + +#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET 0xF06494 + +#define mmTPC4_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xF06498 + +#define mmTPC4_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xF0649C + +#define mmTPC4_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xF064A0 + +#define mmTPC4_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xF064A4 + +#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xF064A8 + +#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xF064AC + +#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET 0xF064B0 + +#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xF064B4 + +#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xF064B8 + +#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET 0xF064BC + +#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xF064C0 + +#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xF064C4 + +#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET 0xF064C8 + +#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xF064CC + +#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xF064D0 + +#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET 0xF064D4 + +#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xF064D8 + +#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xF064DC + +#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET 0xF064E0 + +#define mmTPC4_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xF064E4 + +#define mmTPC4_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xF064E8 + +#define mmTPC4_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xF064EC + +#define mmTPC4_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xF064F0 + +#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xF064F4 + +#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xF064F8 + +#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET 0xF064FC + +#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xF06500 + +#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xF06504 + +#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET 0xF06508 + +#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xF0650C + +#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xF06510 + +#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET 0xF06514 + +#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xF06518 + +#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xF0651C + +#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET 0xF06520 + +#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xF06524 + +#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xF06528 + +#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET 0xF0652C + +#define mmTPC4_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xF06530 + +#define mmTPC4_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xF06534 + +#define mmTPC4_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xF06538 + +#define mmTPC4_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xF0653C + +#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xF06540 + +#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xF06544 + +#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET 0xF06548 + +#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xF0654C + +#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xF06550 + +#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET 0xF06554 + +#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xF06558 + +#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xF0655C + +#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET 0xF06560 + +#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xF06564 + +#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xF06568 + +#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET 0xF0656C + +#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xF06570 + +#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xF06574 + +#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET 0xF06578 + +#define mmTPC4_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xF0657C + +#define mmTPC4_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xF06580 + +#define mmTPC4_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xF06584 + +#define mmTPC4_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xF06588 + +#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xF0658C + +#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xF06590 + +#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET 0xF06594 + +#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xF06598 + +#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xF0659C + +#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET 0xF065A0 + +#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xF065A4 + +#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xF065A8 + +#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET 0xF065AC + +#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xF065B0 + +#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xF065B4 + +#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET 0xF065B8 + +#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xF065BC + +#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xF065C0 + +#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET 0xF065C4 + +#define mmTPC4_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xF065C8 + +#define mmTPC4_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xF065CC + +#define mmTPC4_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xF065D0 + +#define mmTPC4_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xF065D4 + +#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xF065D8 + +#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xF065DC + +#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET 0xF065E0 + +#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xF065E4 + +#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xF065E8 + +#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET 0xF065EC + +#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xF065F0 + +#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xF065F4 + +#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET 0xF065F8 + +#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xF065FC + +#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xF06600 + +#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET 0xF06604 + +#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xF06608 + +#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xF0660C + +#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET 0xF06610 + +#define mmTPC4_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xF06614 + +#define mmTPC4_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xF06618 + +#define mmTPC4_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xF0661C + +#define mmTPC4_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xF06620 + +#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xF06624 + +#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xF06628 + +#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET 0xF0662C + +#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xF06630 + +#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xF06634 + +#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET 0xF06638 + +#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xF0663C + +#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xF06640 + +#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET 0xF06644 + +#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xF06648 + +#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xF0664C + +#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET 0xF06650 + +#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xF06654 + +#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xF06658 + +#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET 0xF0665C + +#define mmTPC4_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xF06660 + +#define mmTPC4_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xF06664 + +#define mmTPC4_CFG_KERNEL_TID_BASE_DIM_0 0xF06668 + +#define mmTPC4_CFG_KERNEL_TID_SIZE_DIM_0 0xF0666C + +#define mmTPC4_CFG_KERNEL_TID_BASE_DIM_1 0xF06670 + +#define mmTPC4_CFG_KERNEL_TID_SIZE_DIM_1 0xF06674 + +#define mmTPC4_CFG_KERNEL_TID_BASE_DIM_2 0xF06678 + +#define mmTPC4_CFG_KERNEL_TID_SIZE_DIM_2 0xF0667C + +#define mmTPC4_CFG_KERNEL_TID_BASE_DIM_3 0xF06680 + +#define mmTPC4_CFG_KERNEL_TID_SIZE_DIM_3 0xF06684 + +#define mmTPC4_CFG_KERNEL_TID_BASE_DIM_4 0xF06688 + +#define mmTPC4_CFG_KERNEL_TID_SIZE_DIM_4 0xF0668C + +#define mmTPC4_CFG_KERNEL_SRF_0 0xF06690 + +#define mmTPC4_CFG_KERNEL_SRF_1 0xF06694 + +#define mmTPC4_CFG_KERNEL_SRF_2 0xF06698 + +#define mmTPC4_CFG_KERNEL_SRF_3 0xF0669C + +#define mmTPC4_CFG_KERNEL_SRF_4 0xF066A0 + +#define mmTPC4_CFG_KERNEL_SRF_5 0xF066A4 + +#define mmTPC4_CFG_KERNEL_SRF_6 0xF066A8 + +#define mmTPC4_CFG_KERNEL_SRF_7 0xF066AC + +#define mmTPC4_CFG_KERNEL_SRF_8 0xF066B0 + +#define mmTPC4_CFG_KERNEL_SRF_9 0xF066B4 + +#define mmTPC4_CFG_KERNEL_SRF_10 0xF066B8 + +#define mmTPC4_CFG_KERNEL_SRF_11 0xF066BC + +#define mmTPC4_CFG_KERNEL_SRF_12 0xF066C0 + +#define mmTPC4_CFG_KERNEL_SRF_13 0xF066C4 + +#define mmTPC4_CFG_KERNEL_SRF_14 0xF066C8 + +#define mmTPC4_CFG_KERNEL_SRF_15 0xF066CC + +#define mmTPC4_CFG_KERNEL_SRF_16 0xF066D0 + +#define mmTPC4_CFG_KERNEL_SRF_17 0xF066D4 + +#define mmTPC4_CFG_KERNEL_SRF_18 0xF066D8 + +#define mmTPC4_CFG_KERNEL_SRF_19 0xF066DC + +#define mmTPC4_CFG_KERNEL_SRF_20 0xF066E0 + +#define mmTPC4_CFG_KERNEL_SRF_21 0xF066E4 + +#define mmTPC4_CFG_KERNEL_SRF_22 0xF066E8 + +#define mmTPC4_CFG_KERNEL_SRF_23 0xF066EC + +#define mmTPC4_CFG_KERNEL_SRF_24 0xF066F0 + +#define mmTPC4_CFG_KERNEL_SRF_25 0xF066F4 + +#define mmTPC4_CFG_KERNEL_SRF_26 0xF066F8 + +#define mmTPC4_CFG_KERNEL_SRF_27 0xF066FC + +#define mmTPC4_CFG_KERNEL_SRF_28 0xF06700 + +#define mmTPC4_CFG_KERNEL_SRF_29 0xF06704 + +#define mmTPC4_CFG_KERNEL_SRF_30 0xF06708 + +#define mmTPC4_CFG_KERNEL_SRF_31 0xF0670C + +#define mmTPC4_CFG_KERNEL_KERNEL_CONFIG 0xF06710 + +#define mmTPC4_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xF06714 + +#define mmTPC4_CFG_RESERVED_DESC_END 0xF06738 + +#define mmTPC4_CFG_ROUND_CSR 0xF067FC + +#define mmTPC4_CFG_TBUF_BASE_ADDR_LOW 0xF06800 + +#define mmTPC4_CFG_TBUF_BASE_ADDR_HIGH 0xF06804 + +#define mmTPC4_CFG_SEMAPHORE 0xF06808 + +#define mmTPC4_CFG_VFLAGS 0xF0680C + +#define mmTPC4_CFG_SFLAGS 0xF06810 + +#define mmTPC4_CFG_LFSR_POLYNOM 0xF06818 + +#define mmTPC4_CFG_STATUS 0xF0681C + +#define mmTPC4_CFG_CFG_BASE_ADDRESS_HIGH 0xF06820 + +#define mmTPC4_CFG_CFG_SUBTRACT_VALUE 0xF06824 + +#define mmTPC4_CFG_SM_BASE_ADDRESS_LOW 0xF06828 + +#define mmTPC4_CFG_SM_BASE_ADDRESS_HIGH 0xF0682C + +#define mmTPC4_CFG_TPC_CMD 0xF06830 + +#define mmTPC4_CFG_TPC_EXECUTE 0xF06838 + +#define mmTPC4_CFG_TPC_STALL 0xF0683C + +#define mmTPC4_CFG_ICACHE_BASE_ADDERESS_LOW 0xF06840 + +#define mmTPC4_CFG_ICACHE_BASE_ADDERESS_HIGH 0xF06844 + +#define mmTPC4_CFG_MSS_CONFIG 0xF06854 + +#define mmTPC4_CFG_TPC_INTR_CAUSE 0xF06858 + +#define mmTPC4_CFG_TPC_INTR_MASK 0xF0685C + +#define mmTPC4_CFG_TSB_CONFIG 0xF06860 + +#define mmTPC4_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xF06A00 + +#define mmTPC4_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xF06A04 + +#define mmTPC4_CFG_QM_TENSOR_0_PADDING_VALUE 0xF06A08 + +#define mmTPC4_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xF06A0C + +#define mmTPC4_CFG_QM_TENSOR_0_DIM_0_SIZE 0xF06A10 + +#define mmTPC4_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xF06A14 + +#define mmTPC4_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET 0xF06A18 + +#define mmTPC4_CFG_QM_TENSOR_0_DIM_1_SIZE 0xF06A1C + +#define mmTPC4_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xF06A20 + +#define mmTPC4_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET 0xF06A24 + +#define mmTPC4_CFG_QM_TENSOR_0_DIM_2_SIZE 0xF06A28 + +#define mmTPC4_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xF06A2C + +#define mmTPC4_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET 0xF06A30 + +#define mmTPC4_CFG_QM_TENSOR_0_DIM_3_SIZE 0xF06A34 + +#define mmTPC4_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xF06A38 + +#define mmTPC4_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET 0xF06A3C + +#define mmTPC4_CFG_QM_TENSOR_0_DIM_4_SIZE 0xF06A40 + +#define mmTPC4_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xF06A44 + +#define mmTPC4_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET 0xF06A48 + +#define mmTPC4_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xF06A4C + +#define mmTPC4_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xF06A50 + +#define mmTPC4_CFG_QM_TENSOR_1_PADDING_VALUE 0xF06A54 + +#define mmTPC4_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xF06A58 + +#define mmTPC4_CFG_QM_TENSOR_1_DIM_0_SIZE 0xF06A5C + +#define mmTPC4_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xF06A60 + +#define mmTPC4_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET 0xF06A64 + +#define mmTPC4_CFG_QM_TENSOR_1_DIM_1_SIZE 0xF06A68 + +#define mmTPC4_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xF06A6C + +#define mmTPC4_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET 0xF06A70 + +#define mmTPC4_CFG_QM_TENSOR_1_DIM_2_SIZE 0xF06A74 + +#define mmTPC4_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xF06A78 + +#define mmTPC4_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET 0xF06A7C + +#define mmTPC4_CFG_QM_TENSOR_1_DIM_3_SIZE 0xF06A80 + +#define mmTPC4_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xF06A84 + +#define mmTPC4_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET 0xF06A88 + +#define mmTPC4_CFG_QM_TENSOR_1_DIM_4_SIZE 0xF06A8C + +#define mmTPC4_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xF06A90 + +#define mmTPC4_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET 0xF06A94 + +#define mmTPC4_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xF06A98 + +#define mmTPC4_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xF06A9C + +#define mmTPC4_CFG_QM_TENSOR_2_PADDING_VALUE 0xF06AA0 + +#define mmTPC4_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xF06AA4 + +#define mmTPC4_CFG_QM_TENSOR_2_DIM_0_SIZE 0xF06AA8 + +#define mmTPC4_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xF06AAC + +#define mmTPC4_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET 0xF06AB0 + +#define mmTPC4_CFG_QM_TENSOR_2_DIM_1_SIZE 0xF06AB4 + +#define mmTPC4_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xF06AB8 + +#define mmTPC4_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET 0xF06ABC + +#define mmTPC4_CFG_QM_TENSOR_2_DIM_2_SIZE 0xF06AC0 + +#define mmTPC4_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xF06AC4 + +#define mmTPC4_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET 0xF06AC8 + +#define mmTPC4_CFG_QM_TENSOR_2_DIM_3_SIZE 0xF06ACC + +#define mmTPC4_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xF06AD0 + +#define mmTPC4_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET 0xF06AD4 + +#define mmTPC4_CFG_QM_TENSOR_2_DIM_4_SIZE 0xF06AD8 + +#define mmTPC4_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xF06ADC + +#define mmTPC4_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET 0xF06AE0 + +#define mmTPC4_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xF06AE4 + +#define mmTPC4_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xF06AE8 + +#define mmTPC4_CFG_QM_TENSOR_3_PADDING_VALUE 0xF06AEC + +#define mmTPC4_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xF06AF0 + +#define mmTPC4_CFG_QM_TENSOR_3_DIM_0_SIZE 0xF06AF4 + +#define mmTPC4_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xF06AF8 + +#define mmTPC4_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET 0xF06AFC + +#define mmTPC4_CFG_QM_TENSOR_3_DIM_1_SIZE 0xF06B00 + +#define mmTPC4_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xF06B04 + +#define mmTPC4_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET 0xF06B08 + +#define mmTPC4_CFG_QM_TENSOR_3_DIM_2_SIZE 0xF06B0C + +#define mmTPC4_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xF06B10 + +#define mmTPC4_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET 0xF06B14 + +#define mmTPC4_CFG_QM_TENSOR_3_DIM_3_SIZE 0xF06B18 + +#define mmTPC4_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xF06B1C + +#define mmTPC4_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET 0xF06B20 + +#define mmTPC4_CFG_QM_TENSOR_3_DIM_4_SIZE 0xF06B24 + +#define mmTPC4_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xF06B28 + +#define mmTPC4_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET 0xF06B2C + +#define mmTPC4_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xF06B30 + +#define mmTPC4_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xF06B34 + +#define mmTPC4_CFG_QM_TENSOR_4_PADDING_VALUE 0xF06B38 + +#define mmTPC4_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xF06B3C + +#define mmTPC4_CFG_QM_TENSOR_4_DIM_0_SIZE 0xF06B40 + +#define mmTPC4_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xF06B44 + +#define mmTPC4_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET 0xF06B48 + +#define mmTPC4_CFG_QM_TENSOR_4_DIM_1_SIZE 0xF06B4C + +#define mmTPC4_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xF06B50 + +#define mmTPC4_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET 0xF06B54 + +#define mmTPC4_CFG_QM_TENSOR_4_DIM_2_SIZE 0xF06B58 + +#define mmTPC4_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xF06B5C + +#define mmTPC4_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET 0xF06B60 + +#define mmTPC4_CFG_QM_TENSOR_4_DIM_3_SIZE 0xF06B64 + +#define mmTPC4_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xF06B68 + +#define mmTPC4_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET 0xF06B6C + +#define mmTPC4_CFG_QM_TENSOR_4_DIM_4_SIZE 0xF06B70 + +#define mmTPC4_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xF06B74 + +#define mmTPC4_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET 0xF06B78 + +#define mmTPC4_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xF06B7C + +#define mmTPC4_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xF06B80 + +#define mmTPC4_CFG_QM_TENSOR_5_PADDING_VALUE 0xF06B84 + +#define mmTPC4_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xF06B88 + +#define mmTPC4_CFG_QM_TENSOR_5_DIM_0_SIZE 0xF06B8C + +#define mmTPC4_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xF06B90 + +#define mmTPC4_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET 0xF06B94 + +#define mmTPC4_CFG_QM_TENSOR_5_DIM_1_SIZE 0xF06B98 + +#define mmTPC4_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xF06B9C + +#define mmTPC4_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET 0xF06BA0 + +#define mmTPC4_CFG_QM_TENSOR_5_DIM_2_SIZE 0xF06BA4 + +#define mmTPC4_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xF06BA8 + +#define mmTPC4_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET 0xF06BAC + +#define mmTPC4_CFG_QM_TENSOR_5_DIM_3_SIZE 0xF06BB0 + +#define mmTPC4_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xF06BB4 + +#define mmTPC4_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET 0xF06BB8 + +#define mmTPC4_CFG_QM_TENSOR_5_DIM_4_SIZE 0xF06BBC + +#define mmTPC4_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xF06BC0 + +#define mmTPC4_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET 0xF06BC4 + +#define mmTPC4_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xF06BC8 + +#define mmTPC4_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xF06BCC + +#define mmTPC4_CFG_QM_TENSOR_6_PADDING_VALUE 0xF06BD0 + +#define mmTPC4_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xF06BD4 + +#define mmTPC4_CFG_QM_TENSOR_6_DIM_0_SIZE 0xF06BD8 + +#define mmTPC4_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xF06BDC + +#define mmTPC4_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET 0xF06BE0 + +#define mmTPC4_CFG_QM_TENSOR_6_DIM_1_SIZE 0xF06BE4 + +#define mmTPC4_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xF06BE8 + +#define mmTPC4_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET 0xF06BEC + +#define mmTPC4_CFG_QM_TENSOR_6_DIM_2_SIZE 0xF06BF0 + +#define mmTPC4_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xF06BF4 + +#define mmTPC4_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET 0xF06BF8 + +#define mmTPC4_CFG_QM_TENSOR_6_DIM_3_SIZE 0xF06BFC + +#define mmTPC4_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xF06C00 + +#define mmTPC4_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET 0xF06C04 + +#define mmTPC4_CFG_QM_TENSOR_6_DIM_4_SIZE 0xF06C08 + +#define mmTPC4_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xF06C0C + +#define mmTPC4_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET 0xF06C10 + +#define mmTPC4_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xF06C14 + +#define mmTPC4_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xF06C18 + +#define mmTPC4_CFG_QM_TENSOR_7_PADDING_VALUE 0xF06C1C + +#define mmTPC4_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xF06C20 + +#define mmTPC4_CFG_QM_TENSOR_7_DIM_0_SIZE 0xF06C24 + +#define mmTPC4_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xF06C28 + +#define mmTPC4_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET 0xF06C2C + +#define mmTPC4_CFG_QM_TENSOR_7_DIM_1_SIZE 0xF06C30 + +#define mmTPC4_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xF06C34 + +#define mmTPC4_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET 0xF06C38 + +#define mmTPC4_CFG_QM_TENSOR_7_DIM_2_SIZE 0xF06C3C + +#define mmTPC4_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xF06C40 + +#define mmTPC4_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET 0xF06C44 + +#define mmTPC4_CFG_QM_TENSOR_7_DIM_3_SIZE 0xF06C48 + +#define mmTPC4_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xF06C4C + +#define mmTPC4_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET 0xF06C50 + +#define mmTPC4_CFG_QM_TENSOR_7_DIM_4_SIZE 0xF06C54 + +#define mmTPC4_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xF06C58 + +#define mmTPC4_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET 0xF06C5C + +#define mmTPC4_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xF06C60 + +#define mmTPC4_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xF06C64 + +#define mmTPC4_CFG_QM_TID_BASE_DIM_0 0xF06C68 + +#define mmTPC4_CFG_QM_TID_SIZE_DIM_0 0xF06C6C + +#define mmTPC4_CFG_QM_TID_BASE_DIM_1 0xF06C70 + +#define mmTPC4_CFG_QM_TID_SIZE_DIM_1 0xF06C74 + +#define mmTPC4_CFG_QM_TID_BASE_DIM_2 0xF06C78 + +#define mmTPC4_CFG_QM_TID_SIZE_DIM_2 0xF06C7C + +#define mmTPC4_CFG_QM_TID_BASE_DIM_3 0xF06C80 + +#define mmTPC4_CFG_QM_TID_SIZE_DIM_3 0xF06C84 + +#define mmTPC4_CFG_QM_TID_BASE_DIM_4 0xF06C88 + +#define mmTPC4_CFG_QM_TID_SIZE_DIM_4 0xF06C8C + +#define mmTPC4_CFG_QM_SRF_0 0xF06C90 + +#define mmTPC4_CFG_QM_SRF_1 0xF06C94 + +#define mmTPC4_CFG_QM_SRF_2 0xF06C98 + +#define mmTPC4_CFG_QM_SRF_3 0xF06C9C + +#define mmTPC4_CFG_QM_SRF_4 0xF06CA0 + +#define mmTPC4_CFG_QM_SRF_5 0xF06CA4 + +#define mmTPC4_CFG_QM_SRF_6 0xF06CA8 + +#define mmTPC4_CFG_QM_SRF_7 0xF06CAC + +#define mmTPC4_CFG_QM_SRF_8 0xF06CB0 + +#define mmTPC4_CFG_QM_SRF_9 0xF06CB4 + +#define mmTPC4_CFG_QM_SRF_10 0xF06CB8 + +#define mmTPC4_CFG_QM_SRF_11 0xF06CBC + +#define mmTPC4_CFG_QM_SRF_12 0xF06CC0 + +#define mmTPC4_CFG_QM_SRF_13 0xF06CC4 + +#define mmTPC4_CFG_QM_SRF_14 0xF06CC8 + +#define mmTPC4_CFG_QM_SRF_15 0xF06CCC + +#define mmTPC4_CFG_QM_SRF_16 0xF06CD0 + +#define mmTPC4_CFG_QM_SRF_17 0xF06CD4 + +#define mmTPC4_CFG_QM_SRF_18 0xF06CD8 + +#define mmTPC4_CFG_QM_SRF_19 0xF06CDC + +#define mmTPC4_CFG_QM_SRF_20 0xF06CE0 + +#define mmTPC4_CFG_QM_SRF_21 0xF06CE4 + +#define mmTPC4_CFG_QM_SRF_22 0xF06CE8 + +#define mmTPC4_CFG_QM_SRF_23 0xF06CEC + +#define mmTPC4_CFG_QM_SRF_24 0xF06CF0 + +#define mmTPC4_CFG_QM_SRF_25 0xF06CF4 + +#define mmTPC4_CFG_QM_SRF_26 0xF06CF8 + +#define mmTPC4_CFG_QM_SRF_27 0xF06CFC + +#define mmTPC4_CFG_QM_SRF_28 0xF06D00 + +#define mmTPC4_CFG_QM_SRF_29 0xF06D04 + +#define mmTPC4_CFG_QM_SRF_30 0xF06D08 + +#define mmTPC4_CFG_QM_SRF_31 0xF06D0C + +#define mmTPC4_CFG_QM_KERNEL_CONFIG 0xF06D10 + +#define mmTPC4_CFG_QM_SYNC_OBJECT_MESSAGE 0xF06D14 + +#define mmTPC4_CFG_ARUSER 0xF06D18 + +#define mmTPC4_CFG_AWUSER 0xF06D1C + +#define mmTPC4_CFG_FUNC_MBIST_CNTRL 0xF06E00 + +#define mmTPC4_CFG_FUNC_MBIST_PAT 0xF06E04 + +#define mmTPC4_CFG_FUNC_MBIST_MEM_0 0xF06E08 + +#define mmTPC4_CFG_FUNC_MBIST_MEM_1 0xF06E0C + +#define mmTPC4_CFG_FUNC_MBIST_MEM_2 0xF06E10 + +#define mmTPC4_CFG_FUNC_MBIST_MEM_3 0xF06E14 + +#define mmTPC4_CFG_FUNC_MBIST_MEM_4 0xF06E18 + +#define mmTPC4_CFG_FUNC_MBIST_MEM_5 0xF06E1C + +#define mmTPC4_CFG_FUNC_MBIST_MEM_6 0xF06E20 + +#define mmTPC4_CFG_FUNC_MBIST_MEM_7 0xF06E24 + +#define mmTPC4_CFG_FUNC_MBIST_MEM_8 0xF06E28 + +#define mmTPC4_CFG_FUNC_MBIST_MEM_9 0xF06E2C + +#endif /* ASIC_REG_TPC4_CFG_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h new file mode 100644 index 000000000..f13a65329 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h @@ -0,0 +1,138 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC4_CMDQ_REGS_H_ +#define ASIC_REG_TPC4_CMDQ_REGS_H_ + +/* + ***************************************** + * TPC4_CMDQ (Prototype: CMDQ) + ***************************************** + */ + +#define mmTPC4_CMDQ_GLBL_CFG0 0xF09000 + +#define mmTPC4_CMDQ_GLBL_CFG1 0xF09004 + +#define mmTPC4_CMDQ_GLBL_PROT 0xF09008 + +#define mmTPC4_CMDQ_GLBL_ERR_CFG 0xF0900C + +#define mmTPC4_CMDQ_GLBL_ERR_ADDR_LO 0xF09010 + +#define mmTPC4_CMDQ_GLBL_ERR_ADDR_HI 0xF09014 + +#define mmTPC4_CMDQ_GLBL_ERR_WDATA 0xF09018 + +#define mmTPC4_CMDQ_GLBL_SECURE_PROPS 0xF0901C + +#define mmTPC4_CMDQ_GLBL_NON_SECURE_PROPS 0xF09020 + +#define mmTPC4_CMDQ_GLBL_STS0 0xF09024 + +#define mmTPC4_CMDQ_GLBL_STS1 0xF09028 + +#define mmTPC4_CMDQ_CQ_CFG0 0xF090B0 + +#define mmTPC4_CMDQ_CQ_CFG1 0xF090B4 + +#define mmTPC4_CMDQ_CQ_ARUSER 0xF090B8 + +#define mmTPC4_CMDQ_CQ_PTR_LO 0xF090C0 + +#define mmTPC4_CMDQ_CQ_PTR_HI 0xF090C4 + +#define mmTPC4_CMDQ_CQ_TSIZE 0xF090C8 + +#define mmTPC4_CMDQ_CQ_CTL 0xF090CC + +#define mmTPC4_CMDQ_CQ_PTR_LO_STS 0xF090D4 + +#define mmTPC4_CMDQ_CQ_PTR_HI_STS 0xF090D8 + +#define mmTPC4_CMDQ_CQ_TSIZE_STS 0xF090DC + +#define mmTPC4_CMDQ_CQ_CTL_STS 0xF090E0 + +#define mmTPC4_CMDQ_CQ_STS0 0xF090E4 + +#define mmTPC4_CMDQ_CQ_STS1 0xF090E8 + +#define mmTPC4_CMDQ_CQ_RD_RATE_LIM_EN 0xF090F0 + +#define mmTPC4_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xF090F4 + +#define mmTPC4_CMDQ_CQ_RD_RATE_LIM_SAT 0xF090F8 + +#define mmTPC4_CMDQ_CQ_RD_RATE_LIM_TOUT 0xF090FC + +#define mmTPC4_CMDQ_CQ_IFIFO_CNT 0xF09108 + +#define mmTPC4_CMDQ_CP_MSG_BASE0_ADDR_LO 0xF09120 + +#define mmTPC4_CMDQ_CP_MSG_BASE0_ADDR_HI 0xF09124 + +#define mmTPC4_CMDQ_CP_MSG_BASE1_ADDR_LO 0xF09128 + +#define mmTPC4_CMDQ_CP_MSG_BASE1_ADDR_HI 0xF0912C + +#define mmTPC4_CMDQ_CP_MSG_BASE2_ADDR_LO 0xF09130 + +#define mmTPC4_CMDQ_CP_MSG_BASE2_ADDR_HI 0xF09134 + +#define mmTPC4_CMDQ_CP_MSG_BASE3_ADDR_LO 0xF09138 + +#define mmTPC4_CMDQ_CP_MSG_BASE3_ADDR_HI 0xF0913C + +#define mmTPC4_CMDQ_CP_LDMA_TSIZE_OFFSET 0xF09140 + +#define mmTPC4_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xF09144 + +#define mmTPC4_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xF09148 + +#define mmTPC4_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xF0914C + +#define mmTPC4_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xF09150 + +#define mmTPC4_CMDQ_CP_LDMA_COMMIT_OFFSET 0xF09154 + +#define mmTPC4_CMDQ_CP_FENCE0_RDATA 0xF09158 + +#define mmTPC4_CMDQ_CP_FENCE1_RDATA 0xF0915C + +#define mmTPC4_CMDQ_CP_FENCE2_RDATA 0xF09160 + +#define mmTPC4_CMDQ_CP_FENCE3_RDATA 0xF09164 + +#define mmTPC4_CMDQ_CP_FENCE0_CNT 0xF09168 + +#define mmTPC4_CMDQ_CP_FENCE1_CNT 0xF0916C + +#define mmTPC4_CMDQ_CP_FENCE2_CNT 0xF09170 + +#define mmTPC4_CMDQ_CP_FENCE3_CNT 0xF09174 + +#define mmTPC4_CMDQ_CP_STS 0xF09178 + +#define mmTPC4_CMDQ_CP_CURRENT_INST_LO 0xF0917C + +#define mmTPC4_CMDQ_CP_CURRENT_INST_HI 0xF09180 + +#define mmTPC4_CMDQ_CP_BARRIER_CFG 0xF09184 + +#define mmTPC4_CMDQ_CP_DBG_0 0xF09188 + +#define mmTPC4_CMDQ_CQ_BUF_ADDR 0xF09308 + +#define mmTPC4_CMDQ_CQ_BUF_RDATA 0xF0930C + +#endif /* ASIC_REG_TPC4_CMDQ_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h new file mode 100644 index 000000000..db081fc17 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h @@ -0,0 +1,178 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC4_QM_REGS_H_ +#define ASIC_REG_TPC4_QM_REGS_H_ + +/* + ***************************************** + * TPC4_QM (Prototype: QMAN) + ***************************************** + */ + +#define mmTPC4_QM_GLBL_CFG0 0xF08000 + +#define mmTPC4_QM_GLBL_CFG1 0xF08004 + +#define mmTPC4_QM_GLBL_PROT 0xF08008 + +#define mmTPC4_QM_GLBL_ERR_CFG 0xF0800C + +#define mmTPC4_QM_GLBL_ERR_ADDR_LO 0xF08010 + +#define mmTPC4_QM_GLBL_ERR_ADDR_HI 0xF08014 + +#define mmTPC4_QM_GLBL_ERR_WDATA 0xF08018 + +#define mmTPC4_QM_GLBL_SECURE_PROPS 0xF0801C + +#define mmTPC4_QM_GLBL_NON_SECURE_PROPS 0xF08020 + +#define mmTPC4_QM_GLBL_STS0 0xF08024 + +#define mmTPC4_QM_GLBL_STS1 0xF08028 + +#define mmTPC4_QM_PQ_BASE_LO 0xF08060 + +#define mmTPC4_QM_PQ_BASE_HI 0xF08064 + +#define mmTPC4_QM_PQ_SIZE 0xF08068 + +#define mmTPC4_QM_PQ_PI 0xF0806C + +#define mmTPC4_QM_PQ_CI 0xF08070 + +#define mmTPC4_QM_PQ_CFG0 0xF08074 + +#define mmTPC4_QM_PQ_CFG1 0xF08078 + +#define mmTPC4_QM_PQ_ARUSER 0xF0807C + +#define mmTPC4_QM_PQ_PUSH0 0xF08080 + +#define mmTPC4_QM_PQ_PUSH1 0xF08084 + +#define mmTPC4_QM_PQ_PUSH2 0xF08088 + +#define mmTPC4_QM_PQ_PUSH3 0xF0808C + +#define mmTPC4_QM_PQ_STS0 0xF08090 + +#define mmTPC4_QM_PQ_STS1 0xF08094 + +#define mmTPC4_QM_PQ_RD_RATE_LIM_EN 0xF080A0 + +#define mmTPC4_QM_PQ_RD_RATE_LIM_RST_TOKEN 0xF080A4 + +#define mmTPC4_QM_PQ_RD_RATE_LIM_SAT 0xF080A8 + +#define mmTPC4_QM_PQ_RD_RATE_LIM_TOUT 0xF080AC + +#define mmTPC4_QM_CQ_CFG0 0xF080B0 + +#define mmTPC4_QM_CQ_CFG1 0xF080B4 + +#define mmTPC4_QM_CQ_ARUSER 0xF080B8 + +#define mmTPC4_QM_CQ_PTR_LO 0xF080C0 + +#define mmTPC4_QM_CQ_PTR_HI 0xF080C4 + +#define mmTPC4_QM_CQ_TSIZE 0xF080C8 + +#define mmTPC4_QM_CQ_CTL 0xF080CC + +#define mmTPC4_QM_CQ_PTR_LO_STS 0xF080D4 + +#define mmTPC4_QM_CQ_PTR_HI_STS 0xF080D8 + +#define mmTPC4_QM_CQ_TSIZE_STS 0xF080DC + +#define mmTPC4_QM_CQ_CTL_STS 0xF080E0 + +#define mmTPC4_QM_CQ_STS0 0xF080E4 + +#define mmTPC4_QM_CQ_STS1 0xF080E8 + +#define mmTPC4_QM_CQ_RD_RATE_LIM_EN 0xF080F0 + +#define mmTPC4_QM_CQ_RD_RATE_LIM_RST_TOKEN 0xF080F4 + +#define mmTPC4_QM_CQ_RD_RATE_LIM_SAT 0xF080F8 + +#define mmTPC4_QM_CQ_RD_RATE_LIM_TOUT 0xF080FC + +#define mmTPC4_QM_CQ_IFIFO_CNT 0xF08108 + +#define mmTPC4_QM_CP_MSG_BASE0_ADDR_LO 0xF08120 + +#define mmTPC4_QM_CP_MSG_BASE0_ADDR_HI 0xF08124 + +#define mmTPC4_QM_CP_MSG_BASE1_ADDR_LO 0xF08128 + +#define mmTPC4_QM_CP_MSG_BASE1_ADDR_HI 0xF0812C + +#define mmTPC4_QM_CP_MSG_BASE2_ADDR_LO 0xF08130 + +#define mmTPC4_QM_CP_MSG_BASE2_ADDR_HI 0xF08134 + +#define mmTPC4_QM_CP_MSG_BASE3_ADDR_LO 0xF08138 + +#define mmTPC4_QM_CP_MSG_BASE3_ADDR_HI 0xF0813C + +#define mmTPC4_QM_CP_LDMA_TSIZE_OFFSET 0xF08140 + +#define mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0xF08144 + +#define mmTPC4_QM_CP_LDMA_SRC_BASE_HI_OFFSET 0xF08148 + +#define mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET 0xF0814C + +#define mmTPC4_QM_CP_LDMA_DST_BASE_HI_OFFSET 0xF08150 + +#define mmTPC4_QM_CP_LDMA_COMMIT_OFFSET 0xF08154 + +#define mmTPC4_QM_CP_FENCE0_RDATA 0xF08158 + +#define mmTPC4_QM_CP_FENCE1_RDATA 0xF0815C + +#define mmTPC4_QM_CP_FENCE2_RDATA 0xF08160 + +#define mmTPC4_QM_CP_FENCE3_RDATA 0xF08164 + +#define mmTPC4_QM_CP_FENCE0_CNT 0xF08168 + +#define mmTPC4_QM_CP_FENCE1_CNT 0xF0816C + +#define mmTPC4_QM_CP_FENCE2_CNT 0xF08170 + +#define mmTPC4_QM_CP_FENCE3_CNT 0xF08174 + +#define mmTPC4_QM_CP_STS 0xF08178 + +#define mmTPC4_QM_CP_CURRENT_INST_LO 0xF0817C + +#define mmTPC4_QM_CP_CURRENT_INST_HI 0xF08180 + +#define mmTPC4_QM_CP_BARRIER_CFG 0xF08184 + +#define mmTPC4_QM_CP_DBG_0 0xF08188 + +#define mmTPC4_QM_PQ_BUF_ADDR 0xF08300 + +#define mmTPC4_QM_PQ_BUF_RDATA 0xF08304 + +#define mmTPC4_QM_CQ_BUF_ADDR 0xF08308 + +#define mmTPC4_QM_CQ_BUF_RDATA 0xF0830C + +#endif /* ASIC_REG_TPC4_QM_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h new file mode 100644 index 000000000..8c5372303 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h @@ -0,0 +1,322 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC4_RTR_REGS_H_ +#define ASIC_REG_TPC4_RTR_REGS_H_ + +/* + ***************************************** + * TPC4_RTR (Prototype: TPC_RTR) + ***************************************** + */ + +#define mmTPC4_RTR_HBW_RD_RQ_E_ARB 0xF00100 + +#define mmTPC4_RTR_HBW_RD_RQ_W_ARB 0xF00104 + +#define mmTPC4_RTR_HBW_RD_RQ_N_ARB 0xF00108 + +#define mmTPC4_RTR_HBW_RD_RQ_S_ARB 0xF0010C + +#define mmTPC4_RTR_HBW_RD_RQ_L_ARB 0xF00110 + +#define mmTPC4_RTR_HBW_E_ARB_MAX 0xF00120 + +#define mmTPC4_RTR_HBW_W_ARB_MAX 0xF00124 + +#define mmTPC4_RTR_HBW_N_ARB_MAX 0xF00128 + +#define mmTPC4_RTR_HBW_S_ARB_MAX 0xF0012C + +#define mmTPC4_RTR_HBW_L_ARB_MAX 0xF00130 + +#define mmTPC4_RTR_HBW_RD_RS_E_ARB 0xF00140 + +#define mmTPC4_RTR_HBW_RD_RS_W_ARB 0xF00144 + +#define mmTPC4_RTR_HBW_RD_RS_N_ARB 0xF00148 + +#define mmTPC4_RTR_HBW_RD_RS_S_ARB 0xF0014C + +#define mmTPC4_RTR_HBW_RD_RS_L_ARB 0xF00150 + +#define mmTPC4_RTR_HBW_WR_RQ_E_ARB 0xF00170 + +#define mmTPC4_RTR_HBW_WR_RQ_W_ARB 0xF00174 + +#define mmTPC4_RTR_HBW_WR_RQ_N_ARB 0xF00178 + +#define mmTPC4_RTR_HBW_WR_RQ_S_ARB 0xF0017C + +#define mmTPC4_RTR_HBW_WR_RQ_L_ARB 0xF00180 + +#define mmTPC4_RTR_HBW_WR_RS_E_ARB 0xF00190 + +#define mmTPC4_RTR_HBW_WR_RS_W_ARB 0xF00194 + +#define mmTPC4_RTR_HBW_WR_RS_N_ARB 0xF00198 + +#define mmTPC4_RTR_HBW_WR_RS_S_ARB 0xF0019C + +#define mmTPC4_RTR_HBW_WR_RS_L_ARB 0xF001A0 + +#define mmTPC4_RTR_LBW_RD_RQ_E_ARB 0xF00200 + +#define mmTPC4_RTR_LBW_RD_RQ_W_ARB 0xF00204 + +#define mmTPC4_RTR_LBW_RD_RQ_N_ARB 0xF00208 + +#define mmTPC4_RTR_LBW_RD_RQ_S_ARB 0xF0020C + +#define mmTPC4_RTR_LBW_RD_RQ_L_ARB 0xF00210 + +#define mmTPC4_RTR_LBW_E_ARB_MAX 0xF00220 + +#define mmTPC4_RTR_LBW_W_ARB_MAX 0xF00224 + +#define mmTPC4_RTR_LBW_N_ARB_MAX 0xF00228 + +#define mmTPC4_RTR_LBW_S_ARB_MAX 0xF0022C + +#define mmTPC4_RTR_LBW_L_ARB_MAX 0xF00230 + +#define mmTPC4_RTR_LBW_RD_RS_E_ARB 0xF00250 + +#define mmTPC4_RTR_LBW_RD_RS_W_ARB 0xF00254 + +#define mmTPC4_RTR_LBW_RD_RS_N_ARB 0xF00258 + +#define mmTPC4_RTR_LBW_RD_RS_S_ARB 0xF0025C + +#define mmTPC4_RTR_LBW_RD_RS_L_ARB 0xF00260 + +#define mmTPC4_RTR_LBW_WR_RQ_E_ARB 0xF00270 + +#define mmTPC4_RTR_LBW_WR_RQ_W_ARB 0xF00274 + +#define mmTPC4_RTR_LBW_WR_RQ_N_ARB 0xF00278 + +#define mmTPC4_RTR_LBW_WR_RQ_S_ARB 0xF0027C + +#define mmTPC4_RTR_LBW_WR_RQ_L_ARB 0xF00280 + +#define mmTPC4_RTR_LBW_WR_RS_E_ARB 0xF00290 + +#define mmTPC4_RTR_LBW_WR_RS_W_ARB 0xF00294 + +#define mmTPC4_RTR_LBW_WR_RS_N_ARB 0xF00298 + +#define mmTPC4_RTR_LBW_WR_RS_S_ARB 0xF0029C + +#define mmTPC4_RTR_LBW_WR_RS_L_ARB 0xF002A0 + +#define mmTPC4_RTR_DBG_E_ARB 0xF00300 + +#define mmTPC4_RTR_DBG_W_ARB 0xF00304 + +#define mmTPC4_RTR_DBG_N_ARB 0xF00308 + +#define mmTPC4_RTR_DBG_S_ARB 0xF0030C + +#define mmTPC4_RTR_DBG_L_ARB 0xF00310 + +#define mmTPC4_RTR_DBG_E_ARB_MAX 0xF00320 + +#define mmTPC4_RTR_DBG_W_ARB_MAX 0xF00324 + +#define mmTPC4_RTR_DBG_N_ARB_MAX 0xF00328 + +#define mmTPC4_RTR_DBG_S_ARB_MAX 0xF0032C + +#define mmTPC4_RTR_DBG_L_ARB_MAX 0xF00330 + +#define mmTPC4_RTR_SPLIT_COEF_0 0xF00400 + +#define mmTPC4_RTR_SPLIT_COEF_1 0xF00404 + +#define mmTPC4_RTR_SPLIT_COEF_2 0xF00408 + +#define mmTPC4_RTR_SPLIT_COEF_3 0xF0040C + +#define mmTPC4_RTR_SPLIT_COEF_4 0xF00410 + +#define mmTPC4_RTR_SPLIT_COEF_5 0xF00414 + +#define mmTPC4_RTR_SPLIT_COEF_6 0xF00418 + +#define mmTPC4_RTR_SPLIT_COEF_7 0xF0041C + +#define mmTPC4_RTR_SPLIT_COEF_8 0xF00420 + +#define mmTPC4_RTR_SPLIT_COEF_9 0xF00424 + +#define mmTPC4_RTR_SPLIT_CFG 0xF00440 + +#define mmTPC4_RTR_SPLIT_RD_SAT 0xF00444 + +#define mmTPC4_RTR_SPLIT_RD_RST_TOKEN 0xF00448 + +#define mmTPC4_RTR_SPLIT_RD_TIMEOUT_0 0xF0044C + +#define mmTPC4_RTR_SPLIT_RD_TIMEOUT_1 0xF00450 + +#define mmTPC4_RTR_SPLIT_WR_SAT 0xF00454 + +#define mmTPC4_RTR_WPLIT_WR_TST_TOLEN 0xF00458 + +#define mmTPC4_RTR_SPLIT_WR_TIMEOUT_0 0xF0045C + +#define mmTPC4_RTR_SPLIT_WR_TIMEOUT_1 0xF00460 + +#define mmTPC4_RTR_HBW_RANGE_HIT 0xF00470 + +#define mmTPC4_RTR_HBW_RANGE_MASK_L_0 0xF00480 + +#define mmTPC4_RTR_HBW_RANGE_MASK_L_1 0xF00484 + +#define mmTPC4_RTR_HBW_RANGE_MASK_L_2 0xF00488 + +#define mmTPC4_RTR_HBW_RANGE_MASK_L_3 0xF0048C + +#define mmTPC4_RTR_HBW_RANGE_MASK_L_4 0xF00490 + +#define mmTPC4_RTR_HBW_RANGE_MASK_L_5 0xF00494 + +#define mmTPC4_RTR_HBW_RANGE_MASK_L_6 0xF00498 + +#define mmTPC4_RTR_HBW_RANGE_MASK_L_7 0xF0049C + +#define mmTPC4_RTR_HBW_RANGE_MASK_H_0 0xF004A0 + +#define mmTPC4_RTR_HBW_RANGE_MASK_H_1 0xF004A4 + +#define mmTPC4_RTR_HBW_RANGE_MASK_H_2 0xF004A8 + +#define mmTPC4_RTR_HBW_RANGE_MASK_H_3 0xF004AC + +#define mmTPC4_RTR_HBW_RANGE_MASK_H_4 0xF004B0 + +#define mmTPC4_RTR_HBW_RANGE_MASK_H_5 0xF004B4 + +#define mmTPC4_RTR_HBW_RANGE_MASK_H_6 0xF004B8 + +#define mmTPC4_RTR_HBW_RANGE_MASK_H_7 0xF004BC + +#define mmTPC4_RTR_HBW_RANGE_BASE_L_0 0xF004C0 + +#define mmTPC4_RTR_HBW_RANGE_BASE_L_1 0xF004C4 + +#define mmTPC4_RTR_HBW_RANGE_BASE_L_2 0xF004C8 + +#define mmTPC4_RTR_HBW_RANGE_BASE_L_3 0xF004CC + +#define mmTPC4_RTR_HBW_RANGE_BASE_L_4 0xF004D0 + +#define mmTPC4_RTR_HBW_RANGE_BASE_L_5 0xF004D4 + +#define mmTPC4_RTR_HBW_RANGE_BASE_L_6 0xF004D8 + +#define mmTPC4_RTR_HBW_RANGE_BASE_L_7 0xF004DC + +#define mmTPC4_RTR_HBW_RANGE_BASE_H_0 0xF004E0 + +#define mmTPC4_RTR_HBW_RANGE_BASE_H_1 0xF004E4 + +#define mmTPC4_RTR_HBW_RANGE_BASE_H_2 0xF004E8 + +#define mmTPC4_RTR_HBW_RANGE_BASE_H_3 0xF004EC + +#define mmTPC4_RTR_HBW_RANGE_BASE_H_4 0xF004F0 + +#define mmTPC4_RTR_HBW_RANGE_BASE_H_5 0xF004F4 + +#define mmTPC4_RTR_HBW_RANGE_BASE_H_6 0xF004F8 + +#define mmTPC4_RTR_HBW_RANGE_BASE_H_7 0xF004FC + +#define mmTPC4_RTR_LBW_RANGE_HIT 0xF00500 + +#define mmTPC4_RTR_LBW_RANGE_MASK_0 0xF00510 + +#define mmTPC4_RTR_LBW_RANGE_MASK_1 0xF00514 + +#define mmTPC4_RTR_LBW_RANGE_MASK_2 0xF00518 + +#define mmTPC4_RTR_LBW_RANGE_MASK_3 0xF0051C + +#define mmTPC4_RTR_LBW_RANGE_MASK_4 0xF00520 + +#define mmTPC4_RTR_LBW_RANGE_MASK_5 0xF00524 + +#define mmTPC4_RTR_LBW_RANGE_MASK_6 0xF00528 + +#define mmTPC4_RTR_LBW_RANGE_MASK_7 0xF0052C + +#define mmTPC4_RTR_LBW_RANGE_MASK_8 0xF00530 + +#define mmTPC4_RTR_LBW_RANGE_MASK_9 0xF00534 + +#define mmTPC4_RTR_LBW_RANGE_MASK_10 0xF00538 + +#define mmTPC4_RTR_LBW_RANGE_MASK_11 0xF0053C + +#define mmTPC4_RTR_LBW_RANGE_MASK_12 0xF00540 + +#define mmTPC4_RTR_LBW_RANGE_MASK_13 0xF00544 + +#define mmTPC4_RTR_LBW_RANGE_MASK_14 0xF00548 + +#define mmTPC4_RTR_LBW_RANGE_MASK_15 0xF0054C + +#define mmTPC4_RTR_LBW_RANGE_BASE_0 0xF00550 + +#define mmTPC4_RTR_LBW_RANGE_BASE_1 0xF00554 + +#define mmTPC4_RTR_LBW_RANGE_BASE_2 0xF00558 + +#define mmTPC4_RTR_LBW_RANGE_BASE_3 0xF0055C + +#define mmTPC4_RTR_LBW_RANGE_BASE_4 0xF00560 + +#define mmTPC4_RTR_LBW_RANGE_BASE_5 0xF00564 + +#define mmTPC4_RTR_LBW_RANGE_BASE_6 0xF00568 + +#define mmTPC4_RTR_LBW_RANGE_BASE_7 0xF0056C + +#define mmTPC4_RTR_LBW_RANGE_BASE_8 0xF00570 + +#define mmTPC4_RTR_LBW_RANGE_BASE_9 0xF00574 + +#define mmTPC4_RTR_LBW_RANGE_BASE_10 0xF00578 + +#define mmTPC4_RTR_LBW_RANGE_BASE_11 0xF0057C + +#define mmTPC4_RTR_LBW_RANGE_BASE_12 0xF00580 + +#define mmTPC4_RTR_LBW_RANGE_BASE_13 0xF00584 + +#define mmTPC4_RTR_LBW_RANGE_BASE_14 0xF00588 + +#define mmTPC4_RTR_LBW_RANGE_BASE_15 0xF0058C + +#define mmTPC4_RTR_RGLTR 0xF00590 + +#define mmTPC4_RTR_RGLTR_WR_RESULT 0xF00594 + +#define mmTPC4_RTR_RGLTR_RD_RESULT 0xF00598 + +#define mmTPC4_RTR_SCRAMB_EN 0xF00600 + +#define mmTPC4_RTR_NON_LIN_SCRAMB 0xF00604 + +#endif /* ASIC_REG_TPC4_RTR_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h new file mode 100644 index 000000000..5139fde71 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h @@ -0,0 +1,886 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC5_CFG_REGS_H_ +#define ASIC_REG_TPC5_CFG_REGS_H_ + +/* + ***************************************** + * TPC5_CFG (Prototype: TPC) + ***************************************** + */ + +#define mmTPC5_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xF46400 + +#define mmTPC5_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xF46404 + +#define mmTPC5_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xF46408 + +#define mmTPC5_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xF4640C + +#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xF46410 + +#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xF46414 + +#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET 0xF46418 + +#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xF4641C + +#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xF46420 + +#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET 0xF46424 + +#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xF46428 + +#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xF4642C + +#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET 0xF46430 + +#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xF46434 + +#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xF46438 + +#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET 0xF4643C + +#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xF46440 + +#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xF46444 + +#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET 0xF46448 + +#define mmTPC5_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xF4644C + +#define mmTPC5_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xF46450 + +#define mmTPC5_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xF46454 + +#define mmTPC5_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xF46458 + +#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xF4645C + +#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xF46460 + +#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET 0xF46464 + +#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xF46468 + +#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xF4646C + +#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET 0xF46470 + +#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xF46474 + +#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xF46478 + +#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET 0xF4647C + +#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xF46480 + +#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xF46484 + +#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET 0xF46488 + +#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xF4648C + +#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xF46490 + +#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET 0xF46494 + +#define mmTPC5_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xF46498 + +#define mmTPC5_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xF4649C + +#define mmTPC5_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xF464A0 + +#define mmTPC5_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xF464A4 + +#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xF464A8 + +#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xF464AC + +#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET 0xF464B0 + +#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xF464B4 + +#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xF464B8 + +#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET 0xF464BC + +#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xF464C0 + +#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xF464C4 + +#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET 0xF464C8 + +#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xF464CC + +#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xF464D0 + +#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET 0xF464D4 + +#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xF464D8 + +#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xF464DC + +#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET 0xF464E0 + +#define mmTPC5_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xF464E4 + +#define mmTPC5_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xF464E8 + +#define mmTPC5_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xF464EC + +#define mmTPC5_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xF464F0 + +#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xF464F4 + +#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xF464F8 + +#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET 0xF464FC + +#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xF46500 + +#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xF46504 + +#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET 0xF46508 + +#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xF4650C + +#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xF46510 + +#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET 0xF46514 + +#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xF46518 + +#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xF4651C + +#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET 0xF46520 + +#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xF46524 + +#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xF46528 + +#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET 0xF4652C + +#define mmTPC5_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xF46530 + +#define mmTPC5_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xF46534 + +#define mmTPC5_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xF46538 + +#define mmTPC5_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xF4653C + +#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xF46540 + +#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xF46544 + +#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET 0xF46548 + +#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xF4654C + +#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xF46550 + +#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET 0xF46554 + +#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xF46558 + +#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xF4655C + +#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET 0xF46560 + +#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xF46564 + +#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xF46568 + +#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET 0xF4656C + +#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xF46570 + +#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xF46574 + +#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET 0xF46578 + +#define mmTPC5_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xF4657C + +#define mmTPC5_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xF46580 + +#define mmTPC5_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xF46584 + +#define mmTPC5_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xF46588 + +#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xF4658C + +#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xF46590 + +#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET 0xF46594 + +#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xF46598 + +#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xF4659C + +#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET 0xF465A0 + +#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xF465A4 + +#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xF465A8 + +#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET 0xF465AC + +#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xF465B0 + +#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xF465B4 + +#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET 0xF465B8 + +#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xF465BC + +#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xF465C0 + +#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET 0xF465C4 + +#define mmTPC5_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xF465C8 + +#define mmTPC5_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xF465CC + +#define mmTPC5_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xF465D0 + +#define mmTPC5_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xF465D4 + +#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xF465D8 + +#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xF465DC + +#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET 0xF465E0 + +#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xF465E4 + +#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xF465E8 + +#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET 0xF465EC + +#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xF465F0 + +#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xF465F4 + +#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET 0xF465F8 + +#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xF465FC + +#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xF46600 + +#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET 0xF46604 + +#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xF46608 + +#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xF4660C + +#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET 0xF46610 + +#define mmTPC5_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xF46614 + +#define mmTPC5_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xF46618 + +#define mmTPC5_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xF4661C + +#define mmTPC5_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xF46620 + +#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xF46624 + +#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xF46628 + +#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET 0xF4662C + +#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xF46630 + +#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xF46634 + +#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET 0xF46638 + +#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xF4663C + +#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xF46640 + +#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET 0xF46644 + +#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xF46648 + +#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xF4664C + +#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET 0xF46650 + +#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xF46654 + +#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xF46658 + +#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET 0xF4665C + +#define mmTPC5_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xF46660 + +#define mmTPC5_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xF46664 + +#define mmTPC5_CFG_KERNEL_TID_BASE_DIM_0 0xF46668 + +#define mmTPC5_CFG_KERNEL_TID_SIZE_DIM_0 0xF4666C + +#define mmTPC5_CFG_KERNEL_TID_BASE_DIM_1 0xF46670 + +#define mmTPC5_CFG_KERNEL_TID_SIZE_DIM_1 0xF46674 + +#define mmTPC5_CFG_KERNEL_TID_BASE_DIM_2 0xF46678 + +#define mmTPC5_CFG_KERNEL_TID_SIZE_DIM_2 0xF4667C + +#define mmTPC5_CFG_KERNEL_TID_BASE_DIM_3 0xF46680 + +#define mmTPC5_CFG_KERNEL_TID_SIZE_DIM_3 0xF46684 + +#define mmTPC5_CFG_KERNEL_TID_BASE_DIM_4 0xF46688 + +#define mmTPC5_CFG_KERNEL_TID_SIZE_DIM_4 0xF4668C + +#define mmTPC5_CFG_KERNEL_SRF_0 0xF46690 + +#define mmTPC5_CFG_KERNEL_SRF_1 0xF46694 + +#define mmTPC5_CFG_KERNEL_SRF_2 0xF46698 + +#define mmTPC5_CFG_KERNEL_SRF_3 0xF4669C + +#define mmTPC5_CFG_KERNEL_SRF_4 0xF466A0 + +#define mmTPC5_CFG_KERNEL_SRF_5 0xF466A4 + +#define mmTPC5_CFG_KERNEL_SRF_6 0xF466A8 + +#define mmTPC5_CFG_KERNEL_SRF_7 0xF466AC + +#define mmTPC5_CFG_KERNEL_SRF_8 0xF466B0 + +#define mmTPC5_CFG_KERNEL_SRF_9 0xF466B4 + +#define mmTPC5_CFG_KERNEL_SRF_10 0xF466B8 + +#define mmTPC5_CFG_KERNEL_SRF_11 0xF466BC + +#define mmTPC5_CFG_KERNEL_SRF_12 0xF466C0 + +#define mmTPC5_CFG_KERNEL_SRF_13 0xF466C4 + +#define mmTPC5_CFG_KERNEL_SRF_14 0xF466C8 + +#define mmTPC5_CFG_KERNEL_SRF_15 0xF466CC + +#define mmTPC5_CFG_KERNEL_SRF_16 0xF466D0 + +#define mmTPC5_CFG_KERNEL_SRF_17 0xF466D4 + +#define mmTPC5_CFG_KERNEL_SRF_18 0xF466D8 + +#define mmTPC5_CFG_KERNEL_SRF_19 0xF466DC + +#define mmTPC5_CFG_KERNEL_SRF_20 0xF466E0 + +#define mmTPC5_CFG_KERNEL_SRF_21 0xF466E4 + +#define mmTPC5_CFG_KERNEL_SRF_22 0xF466E8 + +#define mmTPC5_CFG_KERNEL_SRF_23 0xF466EC + +#define mmTPC5_CFG_KERNEL_SRF_24 0xF466F0 + +#define mmTPC5_CFG_KERNEL_SRF_25 0xF466F4 + +#define mmTPC5_CFG_KERNEL_SRF_26 0xF466F8 + +#define mmTPC5_CFG_KERNEL_SRF_27 0xF466FC + +#define mmTPC5_CFG_KERNEL_SRF_28 0xF46700 + +#define mmTPC5_CFG_KERNEL_SRF_29 0xF46704 + +#define mmTPC5_CFG_KERNEL_SRF_30 0xF46708 + +#define mmTPC5_CFG_KERNEL_SRF_31 0xF4670C + +#define mmTPC5_CFG_KERNEL_KERNEL_CONFIG 0xF46710 + +#define mmTPC5_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xF46714 + +#define mmTPC5_CFG_RESERVED_DESC_END 0xF46738 + +#define mmTPC5_CFG_ROUND_CSR 0xF467FC + +#define mmTPC5_CFG_TBUF_BASE_ADDR_LOW 0xF46800 + +#define mmTPC5_CFG_TBUF_BASE_ADDR_HIGH 0xF46804 + +#define mmTPC5_CFG_SEMAPHORE 0xF46808 + +#define mmTPC5_CFG_VFLAGS 0xF4680C + +#define mmTPC5_CFG_SFLAGS 0xF46810 + +#define mmTPC5_CFG_LFSR_POLYNOM 0xF46818 + +#define mmTPC5_CFG_STATUS 0xF4681C + +#define mmTPC5_CFG_CFG_BASE_ADDRESS_HIGH 0xF46820 + +#define mmTPC5_CFG_CFG_SUBTRACT_VALUE 0xF46824 + +#define mmTPC5_CFG_SM_BASE_ADDRESS_LOW 0xF46828 + +#define mmTPC5_CFG_SM_BASE_ADDRESS_HIGH 0xF4682C + +#define mmTPC5_CFG_TPC_CMD 0xF46830 + +#define mmTPC5_CFG_TPC_EXECUTE 0xF46838 + +#define mmTPC5_CFG_TPC_STALL 0xF4683C + +#define mmTPC5_CFG_ICACHE_BASE_ADDERESS_LOW 0xF46840 + +#define mmTPC5_CFG_ICACHE_BASE_ADDERESS_HIGH 0xF46844 + +#define mmTPC5_CFG_MSS_CONFIG 0xF46854 + +#define mmTPC5_CFG_TPC_INTR_CAUSE 0xF46858 + +#define mmTPC5_CFG_TPC_INTR_MASK 0xF4685C + +#define mmTPC5_CFG_TSB_CONFIG 0xF46860 + +#define mmTPC5_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xF46A00 + +#define mmTPC5_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xF46A04 + +#define mmTPC5_CFG_QM_TENSOR_0_PADDING_VALUE 0xF46A08 + +#define mmTPC5_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xF46A0C + +#define mmTPC5_CFG_QM_TENSOR_0_DIM_0_SIZE 0xF46A10 + +#define mmTPC5_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xF46A14 + +#define mmTPC5_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET 0xF46A18 + +#define mmTPC5_CFG_QM_TENSOR_0_DIM_1_SIZE 0xF46A1C + +#define mmTPC5_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xF46A20 + +#define mmTPC5_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET 0xF46A24 + +#define mmTPC5_CFG_QM_TENSOR_0_DIM_2_SIZE 0xF46A28 + +#define mmTPC5_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xF46A2C + +#define mmTPC5_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET 0xF46A30 + +#define mmTPC5_CFG_QM_TENSOR_0_DIM_3_SIZE 0xF46A34 + +#define mmTPC5_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xF46A38 + +#define mmTPC5_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET 0xF46A3C + +#define mmTPC5_CFG_QM_TENSOR_0_DIM_4_SIZE 0xF46A40 + +#define mmTPC5_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xF46A44 + +#define mmTPC5_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET 0xF46A48 + +#define mmTPC5_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xF46A4C + +#define mmTPC5_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xF46A50 + +#define mmTPC5_CFG_QM_TENSOR_1_PADDING_VALUE 0xF46A54 + +#define mmTPC5_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xF46A58 + +#define mmTPC5_CFG_QM_TENSOR_1_DIM_0_SIZE 0xF46A5C + +#define mmTPC5_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xF46A60 + +#define mmTPC5_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET 0xF46A64 + +#define mmTPC5_CFG_QM_TENSOR_1_DIM_1_SIZE 0xF46A68 + +#define mmTPC5_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xF46A6C + +#define mmTPC5_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET 0xF46A70 + +#define mmTPC5_CFG_QM_TENSOR_1_DIM_2_SIZE 0xF46A74 + +#define mmTPC5_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xF46A78 + +#define mmTPC5_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET 0xF46A7C + +#define mmTPC5_CFG_QM_TENSOR_1_DIM_3_SIZE 0xF46A80 + +#define mmTPC5_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xF46A84 + +#define mmTPC5_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET 0xF46A88 + +#define mmTPC5_CFG_QM_TENSOR_1_DIM_4_SIZE 0xF46A8C + +#define mmTPC5_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xF46A90 + +#define mmTPC5_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET 0xF46A94 + +#define mmTPC5_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xF46A98 + +#define mmTPC5_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xF46A9C + +#define mmTPC5_CFG_QM_TENSOR_2_PADDING_VALUE 0xF46AA0 + +#define mmTPC5_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xF46AA4 + +#define mmTPC5_CFG_QM_TENSOR_2_DIM_0_SIZE 0xF46AA8 + +#define mmTPC5_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xF46AAC + +#define mmTPC5_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET 0xF46AB0 + +#define mmTPC5_CFG_QM_TENSOR_2_DIM_1_SIZE 0xF46AB4 + +#define mmTPC5_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xF46AB8 + +#define mmTPC5_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET 0xF46ABC + +#define mmTPC5_CFG_QM_TENSOR_2_DIM_2_SIZE 0xF46AC0 + +#define mmTPC5_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xF46AC4 + +#define mmTPC5_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET 0xF46AC8 + +#define mmTPC5_CFG_QM_TENSOR_2_DIM_3_SIZE 0xF46ACC + +#define mmTPC5_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xF46AD0 + +#define mmTPC5_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET 0xF46AD4 + +#define mmTPC5_CFG_QM_TENSOR_2_DIM_4_SIZE 0xF46AD8 + +#define mmTPC5_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xF46ADC + +#define mmTPC5_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET 0xF46AE0 + +#define mmTPC5_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xF46AE4 + +#define mmTPC5_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xF46AE8 + +#define mmTPC5_CFG_QM_TENSOR_3_PADDING_VALUE 0xF46AEC + +#define mmTPC5_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xF46AF0 + +#define mmTPC5_CFG_QM_TENSOR_3_DIM_0_SIZE 0xF46AF4 + +#define mmTPC5_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xF46AF8 + +#define mmTPC5_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET 0xF46AFC + +#define mmTPC5_CFG_QM_TENSOR_3_DIM_1_SIZE 0xF46B00 + +#define mmTPC5_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xF46B04 + +#define mmTPC5_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET 0xF46B08 + +#define mmTPC5_CFG_QM_TENSOR_3_DIM_2_SIZE 0xF46B0C + +#define mmTPC5_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xF46B10 + +#define mmTPC5_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET 0xF46B14 + +#define mmTPC5_CFG_QM_TENSOR_3_DIM_3_SIZE 0xF46B18 + +#define mmTPC5_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xF46B1C + +#define mmTPC5_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET 0xF46B20 + +#define mmTPC5_CFG_QM_TENSOR_3_DIM_4_SIZE 0xF46B24 + +#define mmTPC5_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xF46B28 + +#define mmTPC5_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET 0xF46B2C + +#define mmTPC5_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xF46B30 + +#define mmTPC5_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xF46B34 + +#define mmTPC5_CFG_QM_TENSOR_4_PADDING_VALUE 0xF46B38 + +#define mmTPC5_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xF46B3C + +#define mmTPC5_CFG_QM_TENSOR_4_DIM_0_SIZE 0xF46B40 + +#define mmTPC5_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xF46B44 + +#define mmTPC5_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET 0xF46B48 + +#define mmTPC5_CFG_QM_TENSOR_4_DIM_1_SIZE 0xF46B4C + +#define mmTPC5_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xF46B50 + +#define mmTPC5_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET 0xF46B54 + +#define mmTPC5_CFG_QM_TENSOR_4_DIM_2_SIZE 0xF46B58 + +#define mmTPC5_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xF46B5C + +#define mmTPC5_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET 0xF46B60 + +#define mmTPC5_CFG_QM_TENSOR_4_DIM_3_SIZE 0xF46B64 + +#define mmTPC5_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xF46B68 + +#define mmTPC5_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET 0xF46B6C + +#define mmTPC5_CFG_QM_TENSOR_4_DIM_4_SIZE 0xF46B70 + +#define mmTPC5_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xF46B74 + +#define mmTPC5_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET 0xF46B78 + +#define mmTPC5_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xF46B7C + +#define mmTPC5_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xF46B80 + +#define mmTPC5_CFG_QM_TENSOR_5_PADDING_VALUE 0xF46B84 + +#define mmTPC5_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xF46B88 + +#define mmTPC5_CFG_QM_TENSOR_5_DIM_0_SIZE 0xF46B8C + +#define mmTPC5_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xF46B90 + +#define mmTPC5_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET 0xF46B94 + +#define mmTPC5_CFG_QM_TENSOR_5_DIM_1_SIZE 0xF46B98 + +#define mmTPC5_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xF46B9C + +#define mmTPC5_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET 0xF46BA0 + +#define mmTPC5_CFG_QM_TENSOR_5_DIM_2_SIZE 0xF46BA4 + +#define mmTPC5_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xF46BA8 + +#define mmTPC5_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET 0xF46BAC + +#define mmTPC5_CFG_QM_TENSOR_5_DIM_3_SIZE 0xF46BB0 + +#define mmTPC5_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xF46BB4 + +#define mmTPC5_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET 0xF46BB8 + +#define mmTPC5_CFG_QM_TENSOR_5_DIM_4_SIZE 0xF46BBC + +#define mmTPC5_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xF46BC0 + +#define mmTPC5_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET 0xF46BC4 + +#define mmTPC5_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xF46BC8 + +#define mmTPC5_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xF46BCC + +#define mmTPC5_CFG_QM_TENSOR_6_PADDING_VALUE 0xF46BD0 + +#define mmTPC5_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xF46BD4 + +#define mmTPC5_CFG_QM_TENSOR_6_DIM_0_SIZE 0xF46BD8 + +#define mmTPC5_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xF46BDC + +#define mmTPC5_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET 0xF46BE0 + +#define mmTPC5_CFG_QM_TENSOR_6_DIM_1_SIZE 0xF46BE4 + +#define mmTPC5_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xF46BE8 + +#define mmTPC5_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET 0xF46BEC + +#define mmTPC5_CFG_QM_TENSOR_6_DIM_2_SIZE 0xF46BF0 + +#define mmTPC5_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xF46BF4 + +#define mmTPC5_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET 0xF46BF8 + +#define mmTPC5_CFG_QM_TENSOR_6_DIM_3_SIZE 0xF46BFC + +#define mmTPC5_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xF46C00 + +#define mmTPC5_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET 0xF46C04 + +#define mmTPC5_CFG_QM_TENSOR_6_DIM_4_SIZE 0xF46C08 + +#define mmTPC5_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xF46C0C + +#define mmTPC5_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET 0xF46C10 + +#define mmTPC5_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xF46C14 + +#define mmTPC5_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xF46C18 + +#define mmTPC5_CFG_QM_TENSOR_7_PADDING_VALUE 0xF46C1C + +#define mmTPC5_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xF46C20 + +#define mmTPC5_CFG_QM_TENSOR_7_DIM_0_SIZE 0xF46C24 + +#define mmTPC5_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xF46C28 + +#define mmTPC5_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET 0xF46C2C + +#define mmTPC5_CFG_QM_TENSOR_7_DIM_1_SIZE 0xF46C30 + +#define mmTPC5_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xF46C34 + +#define mmTPC5_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET 0xF46C38 + +#define mmTPC5_CFG_QM_TENSOR_7_DIM_2_SIZE 0xF46C3C + +#define mmTPC5_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xF46C40 + +#define mmTPC5_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET 0xF46C44 + +#define mmTPC5_CFG_QM_TENSOR_7_DIM_3_SIZE 0xF46C48 + +#define mmTPC5_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xF46C4C + +#define mmTPC5_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET 0xF46C50 + +#define mmTPC5_CFG_QM_TENSOR_7_DIM_4_SIZE 0xF46C54 + +#define mmTPC5_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xF46C58 + +#define mmTPC5_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET 0xF46C5C + +#define mmTPC5_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xF46C60 + +#define mmTPC5_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xF46C64 + +#define mmTPC5_CFG_QM_TID_BASE_DIM_0 0xF46C68 + +#define mmTPC5_CFG_QM_TID_SIZE_DIM_0 0xF46C6C + +#define mmTPC5_CFG_QM_TID_BASE_DIM_1 0xF46C70 + +#define mmTPC5_CFG_QM_TID_SIZE_DIM_1 0xF46C74 + +#define mmTPC5_CFG_QM_TID_BASE_DIM_2 0xF46C78 + +#define mmTPC5_CFG_QM_TID_SIZE_DIM_2 0xF46C7C + +#define mmTPC5_CFG_QM_TID_BASE_DIM_3 0xF46C80 + +#define mmTPC5_CFG_QM_TID_SIZE_DIM_3 0xF46C84 + +#define mmTPC5_CFG_QM_TID_BASE_DIM_4 0xF46C88 + +#define mmTPC5_CFG_QM_TID_SIZE_DIM_4 0xF46C8C + +#define mmTPC5_CFG_QM_SRF_0 0xF46C90 + +#define mmTPC5_CFG_QM_SRF_1 0xF46C94 + +#define mmTPC5_CFG_QM_SRF_2 0xF46C98 + +#define mmTPC5_CFG_QM_SRF_3 0xF46C9C + +#define mmTPC5_CFG_QM_SRF_4 0xF46CA0 + +#define mmTPC5_CFG_QM_SRF_5 0xF46CA4 + +#define mmTPC5_CFG_QM_SRF_6 0xF46CA8 + +#define mmTPC5_CFG_QM_SRF_7 0xF46CAC + +#define mmTPC5_CFG_QM_SRF_8 0xF46CB0 + +#define mmTPC5_CFG_QM_SRF_9 0xF46CB4 + +#define mmTPC5_CFG_QM_SRF_10 0xF46CB8 + +#define mmTPC5_CFG_QM_SRF_11 0xF46CBC + +#define mmTPC5_CFG_QM_SRF_12 0xF46CC0 + +#define mmTPC5_CFG_QM_SRF_13 0xF46CC4 + +#define mmTPC5_CFG_QM_SRF_14 0xF46CC8 + +#define mmTPC5_CFG_QM_SRF_15 0xF46CCC + +#define mmTPC5_CFG_QM_SRF_16 0xF46CD0 + +#define mmTPC5_CFG_QM_SRF_17 0xF46CD4 + +#define mmTPC5_CFG_QM_SRF_18 0xF46CD8 + +#define mmTPC5_CFG_QM_SRF_19 0xF46CDC + +#define mmTPC5_CFG_QM_SRF_20 0xF46CE0 + +#define mmTPC5_CFG_QM_SRF_21 0xF46CE4 + +#define mmTPC5_CFG_QM_SRF_22 0xF46CE8 + +#define mmTPC5_CFG_QM_SRF_23 0xF46CEC + +#define mmTPC5_CFG_QM_SRF_24 0xF46CF0 + +#define mmTPC5_CFG_QM_SRF_25 0xF46CF4 + +#define mmTPC5_CFG_QM_SRF_26 0xF46CF8 + +#define mmTPC5_CFG_QM_SRF_27 0xF46CFC + +#define mmTPC5_CFG_QM_SRF_28 0xF46D00 + +#define mmTPC5_CFG_QM_SRF_29 0xF46D04 + +#define mmTPC5_CFG_QM_SRF_30 0xF46D08 + +#define mmTPC5_CFG_QM_SRF_31 0xF46D0C + +#define mmTPC5_CFG_QM_KERNEL_CONFIG 0xF46D10 + +#define mmTPC5_CFG_QM_SYNC_OBJECT_MESSAGE 0xF46D14 + +#define mmTPC5_CFG_ARUSER 0xF46D18 + +#define mmTPC5_CFG_AWUSER 0xF46D1C + +#define mmTPC5_CFG_FUNC_MBIST_CNTRL 0xF46E00 + +#define mmTPC5_CFG_FUNC_MBIST_PAT 0xF46E04 + +#define mmTPC5_CFG_FUNC_MBIST_MEM_0 0xF46E08 + +#define mmTPC5_CFG_FUNC_MBIST_MEM_1 0xF46E0C + +#define mmTPC5_CFG_FUNC_MBIST_MEM_2 0xF46E10 + +#define mmTPC5_CFG_FUNC_MBIST_MEM_3 0xF46E14 + +#define mmTPC5_CFG_FUNC_MBIST_MEM_4 0xF46E18 + +#define mmTPC5_CFG_FUNC_MBIST_MEM_5 0xF46E1C + +#define mmTPC5_CFG_FUNC_MBIST_MEM_6 0xF46E20 + +#define mmTPC5_CFG_FUNC_MBIST_MEM_7 0xF46E24 + +#define mmTPC5_CFG_FUNC_MBIST_MEM_8 0xF46E28 + +#define mmTPC5_CFG_FUNC_MBIST_MEM_9 0xF46E2C + +#endif /* ASIC_REG_TPC5_CFG_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h new file mode 100644 index 000000000..1e7cd6e1e --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h @@ -0,0 +1,138 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC5_CMDQ_REGS_H_ +#define ASIC_REG_TPC5_CMDQ_REGS_H_ + +/* + ***************************************** + * TPC5_CMDQ (Prototype: CMDQ) + ***************************************** + */ + +#define mmTPC5_CMDQ_GLBL_CFG0 0xF49000 + +#define mmTPC5_CMDQ_GLBL_CFG1 0xF49004 + +#define mmTPC5_CMDQ_GLBL_PROT 0xF49008 + +#define mmTPC5_CMDQ_GLBL_ERR_CFG 0xF4900C + +#define mmTPC5_CMDQ_GLBL_ERR_ADDR_LO 0xF49010 + +#define mmTPC5_CMDQ_GLBL_ERR_ADDR_HI 0xF49014 + +#define mmTPC5_CMDQ_GLBL_ERR_WDATA 0xF49018 + +#define mmTPC5_CMDQ_GLBL_SECURE_PROPS 0xF4901C + +#define mmTPC5_CMDQ_GLBL_NON_SECURE_PROPS 0xF49020 + +#define mmTPC5_CMDQ_GLBL_STS0 0xF49024 + +#define mmTPC5_CMDQ_GLBL_STS1 0xF49028 + +#define mmTPC5_CMDQ_CQ_CFG0 0xF490B0 + +#define mmTPC5_CMDQ_CQ_CFG1 0xF490B4 + +#define mmTPC5_CMDQ_CQ_ARUSER 0xF490B8 + +#define mmTPC5_CMDQ_CQ_PTR_LO 0xF490C0 + +#define mmTPC5_CMDQ_CQ_PTR_HI 0xF490C4 + +#define mmTPC5_CMDQ_CQ_TSIZE 0xF490C8 + +#define mmTPC5_CMDQ_CQ_CTL 0xF490CC + +#define mmTPC5_CMDQ_CQ_PTR_LO_STS 0xF490D4 + +#define mmTPC5_CMDQ_CQ_PTR_HI_STS 0xF490D8 + +#define mmTPC5_CMDQ_CQ_TSIZE_STS 0xF490DC + +#define mmTPC5_CMDQ_CQ_CTL_STS 0xF490E0 + +#define mmTPC5_CMDQ_CQ_STS0 0xF490E4 + +#define mmTPC5_CMDQ_CQ_STS1 0xF490E8 + +#define mmTPC5_CMDQ_CQ_RD_RATE_LIM_EN 0xF490F0 + +#define mmTPC5_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xF490F4 + +#define mmTPC5_CMDQ_CQ_RD_RATE_LIM_SAT 0xF490F8 + +#define mmTPC5_CMDQ_CQ_RD_RATE_LIM_TOUT 0xF490FC + +#define mmTPC5_CMDQ_CQ_IFIFO_CNT 0xF49108 + +#define mmTPC5_CMDQ_CP_MSG_BASE0_ADDR_LO 0xF49120 + +#define mmTPC5_CMDQ_CP_MSG_BASE0_ADDR_HI 0xF49124 + +#define mmTPC5_CMDQ_CP_MSG_BASE1_ADDR_LO 0xF49128 + +#define mmTPC5_CMDQ_CP_MSG_BASE1_ADDR_HI 0xF4912C + +#define mmTPC5_CMDQ_CP_MSG_BASE2_ADDR_LO 0xF49130 + +#define mmTPC5_CMDQ_CP_MSG_BASE2_ADDR_HI 0xF49134 + +#define mmTPC5_CMDQ_CP_MSG_BASE3_ADDR_LO 0xF49138 + +#define mmTPC5_CMDQ_CP_MSG_BASE3_ADDR_HI 0xF4913C + +#define mmTPC5_CMDQ_CP_LDMA_TSIZE_OFFSET 0xF49140 + +#define mmTPC5_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xF49144 + +#define mmTPC5_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xF49148 + +#define mmTPC5_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xF4914C + +#define mmTPC5_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xF49150 + +#define mmTPC5_CMDQ_CP_LDMA_COMMIT_OFFSET 0xF49154 + +#define mmTPC5_CMDQ_CP_FENCE0_RDATA 0xF49158 + +#define mmTPC5_CMDQ_CP_FENCE1_RDATA 0xF4915C + +#define mmTPC5_CMDQ_CP_FENCE2_RDATA 0xF49160 + +#define mmTPC5_CMDQ_CP_FENCE3_RDATA 0xF49164 + +#define mmTPC5_CMDQ_CP_FENCE0_CNT 0xF49168 + +#define mmTPC5_CMDQ_CP_FENCE1_CNT 0xF4916C + +#define mmTPC5_CMDQ_CP_FENCE2_CNT 0xF49170 + +#define mmTPC5_CMDQ_CP_FENCE3_CNT 0xF49174 + +#define mmTPC5_CMDQ_CP_STS 0xF49178 + +#define mmTPC5_CMDQ_CP_CURRENT_INST_LO 0xF4917C + +#define mmTPC5_CMDQ_CP_CURRENT_INST_HI 0xF49180 + +#define mmTPC5_CMDQ_CP_BARRIER_CFG 0xF49184 + +#define mmTPC5_CMDQ_CP_DBG_0 0xF49188 + +#define mmTPC5_CMDQ_CQ_BUF_ADDR 0xF49308 + +#define mmTPC5_CMDQ_CQ_BUF_RDATA 0xF4930C + +#endif /* ASIC_REG_TPC5_CMDQ_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h new file mode 100644 index 000000000..ac0d3820c --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h @@ -0,0 +1,178 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC5_QM_REGS_H_ +#define ASIC_REG_TPC5_QM_REGS_H_ + +/* + ***************************************** + * TPC5_QM (Prototype: QMAN) + ***************************************** + */ + +#define mmTPC5_QM_GLBL_CFG0 0xF48000 + +#define mmTPC5_QM_GLBL_CFG1 0xF48004 + +#define mmTPC5_QM_GLBL_PROT 0xF48008 + +#define mmTPC5_QM_GLBL_ERR_CFG 0xF4800C + +#define mmTPC5_QM_GLBL_ERR_ADDR_LO 0xF48010 + +#define mmTPC5_QM_GLBL_ERR_ADDR_HI 0xF48014 + +#define mmTPC5_QM_GLBL_ERR_WDATA 0xF48018 + +#define mmTPC5_QM_GLBL_SECURE_PROPS 0xF4801C + +#define mmTPC5_QM_GLBL_NON_SECURE_PROPS 0xF48020 + +#define mmTPC5_QM_GLBL_STS0 0xF48024 + +#define mmTPC5_QM_GLBL_STS1 0xF48028 + +#define mmTPC5_QM_PQ_BASE_LO 0xF48060 + +#define mmTPC5_QM_PQ_BASE_HI 0xF48064 + +#define mmTPC5_QM_PQ_SIZE 0xF48068 + +#define mmTPC5_QM_PQ_PI 0xF4806C + +#define mmTPC5_QM_PQ_CI 0xF48070 + +#define mmTPC5_QM_PQ_CFG0 0xF48074 + +#define mmTPC5_QM_PQ_CFG1 0xF48078 + +#define mmTPC5_QM_PQ_ARUSER 0xF4807C + +#define mmTPC5_QM_PQ_PUSH0 0xF48080 + +#define mmTPC5_QM_PQ_PUSH1 0xF48084 + +#define mmTPC5_QM_PQ_PUSH2 0xF48088 + +#define mmTPC5_QM_PQ_PUSH3 0xF4808C + +#define mmTPC5_QM_PQ_STS0 0xF48090 + +#define mmTPC5_QM_PQ_STS1 0xF48094 + +#define mmTPC5_QM_PQ_RD_RATE_LIM_EN 0xF480A0 + +#define mmTPC5_QM_PQ_RD_RATE_LIM_RST_TOKEN 0xF480A4 + +#define mmTPC5_QM_PQ_RD_RATE_LIM_SAT 0xF480A8 + +#define mmTPC5_QM_PQ_RD_RATE_LIM_TOUT 0xF480AC + +#define mmTPC5_QM_CQ_CFG0 0xF480B0 + +#define mmTPC5_QM_CQ_CFG1 0xF480B4 + +#define mmTPC5_QM_CQ_ARUSER 0xF480B8 + +#define mmTPC5_QM_CQ_PTR_LO 0xF480C0 + +#define mmTPC5_QM_CQ_PTR_HI 0xF480C4 + +#define mmTPC5_QM_CQ_TSIZE 0xF480C8 + +#define mmTPC5_QM_CQ_CTL 0xF480CC + +#define mmTPC5_QM_CQ_PTR_LO_STS 0xF480D4 + +#define mmTPC5_QM_CQ_PTR_HI_STS 0xF480D8 + +#define mmTPC5_QM_CQ_TSIZE_STS 0xF480DC + +#define mmTPC5_QM_CQ_CTL_STS 0xF480E0 + +#define mmTPC5_QM_CQ_STS0 0xF480E4 + +#define mmTPC5_QM_CQ_STS1 0xF480E8 + +#define mmTPC5_QM_CQ_RD_RATE_LIM_EN 0xF480F0 + +#define mmTPC5_QM_CQ_RD_RATE_LIM_RST_TOKEN 0xF480F4 + +#define mmTPC5_QM_CQ_RD_RATE_LIM_SAT 0xF480F8 + +#define mmTPC5_QM_CQ_RD_RATE_LIM_TOUT 0xF480FC + +#define mmTPC5_QM_CQ_IFIFO_CNT 0xF48108 + +#define mmTPC5_QM_CP_MSG_BASE0_ADDR_LO 0xF48120 + +#define mmTPC5_QM_CP_MSG_BASE0_ADDR_HI 0xF48124 + +#define mmTPC5_QM_CP_MSG_BASE1_ADDR_LO 0xF48128 + +#define mmTPC5_QM_CP_MSG_BASE1_ADDR_HI 0xF4812C + +#define mmTPC5_QM_CP_MSG_BASE2_ADDR_LO 0xF48130 + +#define mmTPC5_QM_CP_MSG_BASE2_ADDR_HI 0xF48134 + +#define mmTPC5_QM_CP_MSG_BASE3_ADDR_LO 0xF48138 + +#define mmTPC5_QM_CP_MSG_BASE3_ADDR_HI 0xF4813C + +#define mmTPC5_QM_CP_LDMA_TSIZE_OFFSET 0xF48140 + +#define mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0xF48144 + +#define mmTPC5_QM_CP_LDMA_SRC_BASE_HI_OFFSET 0xF48148 + +#define mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET 0xF4814C + +#define mmTPC5_QM_CP_LDMA_DST_BASE_HI_OFFSET 0xF48150 + +#define mmTPC5_QM_CP_LDMA_COMMIT_OFFSET 0xF48154 + +#define mmTPC5_QM_CP_FENCE0_RDATA 0xF48158 + +#define mmTPC5_QM_CP_FENCE1_RDATA 0xF4815C + +#define mmTPC5_QM_CP_FENCE2_RDATA 0xF48160 + +#define mmTPC5_QM_CP_FENCE3_RDATA 0xF48164 + +#define mmTPC5_QM_CP_FENCE0_CNT 0xF48168 + +#define mmTPC5_QM_CP_FENCE1_CNT 0xF4816C + +#define mmTPC5_QM_CP_FENCE2_CNT 0xF48170 + +#define mmTPC5_QM_CP_FENCE3_CNT 0xF48174 + +#define mmTPC5_QM_CP_STS 0xF48178 + +#define mmTPC5_QM_CP_CURRENT_INST_LO 0xF4817C + +#define mmTPC5_QM_CP_CURRENT_INST_HI 0xF48180 + +#define mmTPC5_QM_CP_BARRIER_CFG 0xF48184 + +#define mmTPC5_QM_CP_DBG_0 0xF48188 + +#define mmTPC5_QM_PQ_BUF_ADDR 0xF48300 + +#define mmTPC5_QM_PQ_BUF_RDATA 0xF48304 + +#define mmTPC5_QM_CQ_BUF_ADDR 0xF48308 + +#define mmTPC5_QM_CQ_BUF_RDATA 0xF4830C + +#endif /* ASIC_REG_TPC5_QM_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h new file mode 100644 index 000000000..57f83bc3b --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h @@ -0,0 +1,322 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC5_RTR_REGS_H_ +#define ASIC_REG_TPC5_RTR_REGS_H_ + +/* + ***************************************** + * TPC5_RTR (Prototype: TPC_RTR) + ***************************************** + */ + +#define mmTPC5_RTR_HBW_RD_RQ_E_ARB 0xF40100 + +#define mmTPC5_RTR_HBW_RD_RQ_W_ARB 0xF40104 + +#define mmTPC5_RTR_HBW_RD_RQ_N_ARB 0xF40108 + +#define mmTPC5_RTR_HBW_RD_RQ_S_ARB 0xF4010C + +#define mmTPC5_RTR_HBW_RD_RQ_L_ARB 0xF40110 + +#define mmTPC5_RTR_HBW_E_ARB_MAX 0xF40120 + +#define mmTPC5_RTR_HBW_W_ARB_MAX 0xF40124 + +#define mmTPC5_RTR_HBW_N_ARB_MAX 0xF40128 + +#define mmTPC5_RTR_HBW_S_ARB_MAX 0xF4012C + +#define mmTPC5_RTR_HBW_L_ARB_MAX 0xF40130 + +#define mmTPC5_RTR_HBW_RD_RS_E_ARB 0xF40140 + +#define mmTPC5_RTR_HBW_RD_RS_W_ARB 0xF40144 + +#define mmTPC5_RTR_HBW_RD_RS_N_ARB 0xF40148 + +#define mmTPC5_RTR_HBW_RD_RS_S_ARB 0xF4014C + +#define mmTPC5_RTR_HBW_RD_RS_L_ARB 0xF40150 + +#define mmTPC5_RTR_HBW_WR_RQ_E_ARB 0xF40170 + +#define mmTPC5_RTR_HBW_WR_RQ_W_ARB 0xF40174 + +#define mmTPC5_RTR_HBW_WR_RQ_N_ARB 0xF40178 + +#define mmTPC5_RTR_HBW_WR_RQ_S_ARB 0xF4017C + +#define mmTPC5_RTR_HBW_WR_RQ_L_ARB 0xF40180 + +#define mmTPC5_RTR_HBW_WR_RS_E_ARB 0xF40190 + +#define mmTPC5_RTR_HBW_WR_RS_W_ARB 0xF40194 + +#define mmTPC5_RTR_HBW_WR_RS_N_ARB 0xF40198 + +#define mmTPC5_RTR_HBW_WR_RS_S_ARB 0xF4019C + +#define mmTPC5_RTR_HBW_WR_RS_L_ARB 0xF401A0 + +#define mmTPC5_RTR_LBW_RD_RQ_E_ARB 0xF40200 + +#define mmTPC5_RTR_LBW_RD_RQ_W_ARB 0xF40204 + +#define mmTPC5_RTR_LBW_RD_RQ_N_ARB 0xF40208 + +#define mmTPC5_RTR_LBW_RD_RQ_S_ARB 0xF4020C + +#define mmTPC5_RTR_LBW_RD_RQ_L_ARB 0xF40210 + +#define mmTPC5_RTR_LBW_E_ARB_MAX 0xF40220 + +#define mmTPC5_RTR_LBW_W_ARB_MAX 0xF40224 + +#define mmTPC5_RTR_LBW_N_ARB_MAX 0xF40228 + +#define mmTPC5_RTR_LBW_S_ARB_MAX 0xF4022C + +#define mmTPC5_RTR_LBW_L_ARB_MAX 0xF40230 + +#define mmTPC5_RTR_LBW_RD_RS_E_ARB 0xF40250 + +#define mmTPC5_RTR_LBW_RD_RS_W_ARB 0xF40254 + +#define mmTPC5_RTR_LBW_RD_RS_N_ARB 0xF40258 + +#define mmTPC5_RTR_LBW_RD_RS_S_ARB 0xF4025C + +#define mmTPC5_RTR_LBW_RD_RS_L_ARB 0xF40260 + +#define mmTPC5_RTR_LBW_WR_RQ_E_ARB 0xF40270 + +#define mmTPC5_RTR_LBW_WR_RQ_W_ARB 0xF40274 + +#define mmTPC5_RTR_LBW_WR_RQ_N_ARB 0xF40278 + +#define mmTPC5_RTR_LBW_WR_RQ_S_ARB 0xF4027C + +#define mmTPC5_RTR_LBW_WR_RQ_L_ARB 0xF40280 + +#define mmTPC5_RTR_LBW_WR_RS_E_ARB 0xF40290 + +#define mmTPC5_RTR_LBW_WR_RS_W_ARB 0xF40294 + +#define mmTPC5_RTR_LBW_WR_RS_N_ARB 0xF40298 + +#define mmTPC5_RTR_LBW_WR_RS_S_ARB 0xF4029C + +#define mmTPC5_RTR_LBW_WR_RS_L_ARB 0xF402A0 + +#define mmTPC5_RTR_DBG_E_ARB 0xF40300 + +#define mmTPC5_RTR_DBG_W_ARB 0xF40304 + +#define mmTPC5_RTR_DBG_N_ARB 0xF40308 + +#define mmTPC5_RTR_DBG_S_ARB 0xF4030C + +#define mmTPC5_RTR_DBG_L_ARB 0xF40310 + +#define mmTPC5_RTR_DBG_E_ARB_MAX 0xF40320 + +#define mmTPC5_RTR_DBG_W_ARB_MAX 0xF40324 + +#define mmTPC5_RTR_DBG_N_ARB_MAX 0xF40328 + +#define mmTPC5_RTR_DBG_S_ARB_MAX 0xF4032C + +#define mmTPC5_RTR_DBG_L_ARB_MAX 0xF40330 + +#define mmTPC5_RTR_SPLIT_COEF_0 0xF40400 + +#define mmTPC5_RTR_SPLIT_COEF_1 0xF40404 + +#define mmTPC5_RTR_SPLIT_COEF_2 0xF40408 + +#define mmTPC5_RTR_SPLIT_COEF_3 0xF4040C + +#define mmTPC5_RTR_SPLIT_COEF_4 0xF40410 + +#define mmTPC5_RTR_SPLIT_COEF_5 0xF40414 + +#define mmTPC5_RTR_SPLIT_COEF_6 0xF40418 + +#define mmTPC5_RTR_SPLIT_COEF_7 0xF4041C + +#define mmTPC5_RTR_SPLIT_COEF_8 0xF40420 + +#define mmTPC5_RTR_SPLIT_COEF_9 0xF40424 + +#define mmTPC5_RTR_SPLIT_CFG 0xF40440 + +#define mmTPC5_RTR_SPLIT_RD_SAT 0xF40444 + +#define mmTPC5_RTR_SPLIT_RD_RST_TOKEN 0xF40448 + +#define mmTPC5_RTR_SPLIT_RD_TIMEOUT_0 0xF4044C + +#define mmTPC5_RTR_SPLIT_RD_TIMEOUT_1 0xF40450 + +#define mmTPC5_RTR_SPLIT_WR_SAT 0xF40454 + +#define mmTPC5_RTR_WPLIT_WR_TST_TOLEN 0xF40458 + +#define mmTPC5_RTR_SPLIT_WR_TIMEOUT_0 0xF4045C + +#define mmTPC5_RTR_SPLIT_WR_TIMEOUT_1 0xF40460 + +#define mmTPC5_RTR_HBW_RANGE_HIT 0xF40470 + +#define mmTPC5_RTR_HBW_RANGE_MASK_L_0 0xF40480 + +#define mmTPC5_RTR_HBW_RANGE_MASK_L_1 0xF40484 + +#define mmTPC5_RTR_HBW_RANGE_MASK_L_2 0xF40488 + +#define mmTPC5_RTR_HBW_RANGE_MASK_L_3 0xF4048C + +#define mmTPC5_RTR_HBW_RANGE_MASK_L_4 0xF40490 + +#define mmTPC5_RTR_HBW_RANGE_MASK_L_5 0xF40494 + +#define mmTPC5_RTR_HBW_RANGE_MASK_L_6 0xF40498 + +#define mmTPC5_RTR_HBW_RANGE_MASK_L_7 0xF4049C + +#define mmTPC5_RTR_HBW_RANGE_MASK_H_0 0xF404A0 + +#define mmTPC5_RTR_HBW_RANGE_MASK_H_1 0xF404A4 + +#define mmTPC5_RTR_HBW_RANGE_MASK_H_2 0xF404A8 + +#define mmTPC5_RTR_HBW_RANGE_MASK_H_3 0xF404AC + +#define mmTPC5_RTR_HBW_RANGE_MASK_H_4 0xF404B0 + +#define mmTPC5_RTR_HBW_RANGE_MASK_H_5 0xF404B4 + +#define mmTPC5_RTR_HBW_RANGE_MASK_H_6 0xF404B8 + +#define mmTPC5_RTR_HBW_RANGE_MASK_H_7 0xF404BC + +#define mmTPC5_RTR_HBW_RANGE_BASE_L_0 0xF404C0 + +#define mmTPC5_RTR_HBW_RANGE_BASE_L_1 0xF404C4 + +#define mmTPC5_RTR_HBW_RANGE_BASE_L_2 0xF404C8 + +#define mmTPC5_RTR_HBW_RANGE_BASE_L_3 0xF404CC + +#define mmTPC5_RTR_HBW_RANGE_BASE_L_4 0xF404D0 + +#define mmTPC5_RTR_HBW_RANGE_BASE_L_5 0xF404D4 + +#define mmTPC5_RTR_HBW_RANGE_BASE_L_6 0xF404D8 + +#define mmTPC5_RTR_HBW_RANGE_BASE_L_7 0xF404DC + +#define mmTPC5_RTR_HBW_RANGE_BASE_H_0 0xF404E0 + +#define mmTPC5_RTR_HBW_RANGE_BASE_H_1 0xF404E4 + +#define mmTPC5_RTR_HBW_RANGE_BASE_H_2 0xF404E8 + +#define mmTPC5_RTR_HBW_RANGE_BASE_H_3 0xF404EC + +#define mmTPC5_RTR_HBW_RANGE_BASE_H_4 0xF404F0 + +#define mmTPC5_RTR_HBW_RANGE_BASE_H_5 0xF404F4 + +#define mmTPC5_RTR_HBW_RANGE_BASE_H_6 0xF404F8 + +#define mmTPC5_RTR_HBW_RANGE_BASE_H_7 0xF404FC + +#define mmTPC5_RTR_LBW_RANGE_HIT 0xF40500 + +#define mmTPC5_RTR_LBW_RANGE_MASK_0 0xF40510 + +#define mmTPC5_RTR_LBW_RANGE_MASK_1 0xF40514 + +#define mmTPC5_RTR_LBW_RANGE_MASK_2 0xF40518 + +#define mmTPC5_RTR_LBW_RANGE_MASK_3 0xF4051C + +#define mmTPC5_RTR_LBW_RANGE_MASK_4 0xF40520 + +#define mmTPC5_RTR_LBW_RANGE_MASK_5 0xF40524 + +#define mmTPC5_RTR_LBW_RANGE_MASK_6 0xF40528 + +#define mmTPC5_RTR_LBW_RANGE_MASK_7 0xF4052C + +#define mmTPC5_RTR_LBW_RANGE_MASK_8 0xF40530 + +#define mmTPC5_RTR_LBW_RANGE_MASK_9 0xF40534 + +#define mmTPC5_RTR_LBW_RANGE_MASK_10 0xF40538 + +#define mmTPC5_RTR_LBW_RANGE_MASK_11 0xF4053C + +#define mmTPC5_RTR_LBW_RANGE_MASK_12 0xF40540 + +#define mmTPC5_RTR_LBW_RANGE_MASK_13 0xF40544 + +#define mmTPC5_RTR_LBW_RANGE_MASK_14 0xF40548 + +#define mmTPC5_RTR_LBW_RANGE_MASK_15 0xF4054C + +#define mmTPC5_RTR_LBW_RANGE_BASE_0 0xF40550 + +#define mmTPC5_RTR_LBW_RANGE_BASE_1 0xF40554 + +#define mmTPC5_RTR_LBW_RANGE_BASE_2 0xF40558 + +#define mmTPC5_RTR_LBW_RANGE_BASE_3 0xF4055C + +#define mmTPC5_RTR_LBW_RANGE_BASE_4 0xF40560 + +#define mmTPC5_RTR_LBW_RANGE_BASE_5 0xF40564 + +#define mmTPC5_RTR_LBW_RANGE_BASE_6 0xF40568 + +#define mmTPC5_RTR_LBW_RANGE_BASE_7 0xF4056C + +#define mmTPC5_RTR_LBW_RANGE_BASE_8 0xF40570 + +#define mmTPC5_RTR_LBW_RANGE_BASE_9 0xF40574 + +#define mmTPC5_RTR_LBW_RANGE_BASE_10 0xF40578 + +#define mmTPC5_RTR_LBW_RANGE_BASE_11 0xF4057C + +#define mmTPC5_RTR_LBW_RANGE_BASE_12 0xF40580 + +#define mmTPC5_RTR_LBW_RANGE_BASE_13 0xF40584 + +#define mmTPC5_RTR_LBW_RANGE_BASE_14 0xF40588 + +#define mmTPC5_RTR_LBW_RANGE_BASE_15 0xF4058C + +#define mmTPC5_RTR_RGLTR 0xF40590 + +#define mmTPC5_RTR_RGLTR_WR_RESULT 0xF40594 + +#define mmTPC5_RTR_RGLTR_RD_RESULT 0xF40598 + +#define mmTPC5_RTR_SCRAMB_EN 0xF40600 + +#define mmTPC5_RTR_NON_LIN_SCRAMB 0xF40604 + +#endif /* ASIC_REG_TPC5_RTR_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h new file mode 100644 index 000000000..94e0191c0 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h @@ -0,0 +1,886 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC6_CFG_REGS_H_ +#define ASIC_REG_TPC6_CFG_REGS_H_ + +/* + ***************************************** + * TPC6_CFG (Prototype: TPC) + ***************************************** + */ + +#define mmTPC6_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xF86400 + +#define mmTPC6_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xF86404 + +#define mmTPC6_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xF86408 + +#define mmTPC6_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xF8640C + +#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xF86410 + +#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xF86414 + +#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET 0xF86418 + +#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xF8641C + +#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xF86420 + +#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET 0xF86424 + +#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xF86428 + +#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xF8642C + +#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET 0xF86430 + +#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xF86434 + +#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xF86438 + +#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET 0xF8643C + +#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xF86440 + +#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xF86444 + +#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET 0xF86448 + +#define mmTPC6_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xF8644C + +#define mmTPC6_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xF86450 + +#define mmTPC6_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xF86454 + +#define mmTPC6_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xF86458 + +#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xF8645C + +#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xF86460 + +#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET 0xF86464 + +#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xF86468 + +#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xF8646C + +#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET 0xF86470 + +#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xF86474 + +#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xF86478 + +#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET 0xF8647C + +#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xF86480 + +#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xF86484 + +#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET 0xF86488 + +#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xF8648C + +#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xF86490 + +#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET 0xF86494 + +#define mmTPC6_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xF86498 + +#define mmTPC6_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xF8649C + +#define mmTPC6_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xF864A0 + +#define mmTPC6_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xF864A4 + +#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xF864A8 + +#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xF864AC + +#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET 0xF864B0 + +#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xF864B4 + +#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xF864B8 + +#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET 0xF864BC + +#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xF864C0 + +#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xF864C4 + +#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET 0xF864C8 + +#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xF864CC + +#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xF864D0 + +#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET 0xF864D4 + +#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xF864D8 + +#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xF864DC + +#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET 0xF864E0 + +#define mmTPC6_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xF864E4 + +#define mmTPC6_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xF864E8 + +#define mmTPC6_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xF864EC + +#define mmTPC6_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xF864F0 + +#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xF864F4 + +#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xF864F8 + +#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET 0xF864FC + +#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xF86500 + +#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xF86504 + +#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET 0xF86508 + +#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xF8650C + +#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xF86510 + +#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET 0xF86514 + +#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xF86518 + +#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xF8651C + +#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET 0xF86520 + +#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xF86524 + +#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xF86528 + +#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET 0xF8652C + +#define mmTPC6_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xF86530 + +#define mmTPC6_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xF86534 + +#define mmTPC6_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xF86538 + +#define mmTPC6_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xF8653C + +#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xF86540 + +#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xF86544 + +#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET 0xF86548 + +#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xF8654C + +#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xF86550 + +#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET 0xF86554 + +#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xF86558 + +#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xF8655C + +#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET 0xF86560 + +#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xF86564 + +#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xF86568 + +#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET 0xF8656C + +#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xF86570 + +#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xF86574 + +#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET 0xF86578 + +#define mmTPC6_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xF8657C + +#define mmTPC6_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xF86580 + +#define mmTPC6_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xF86584 + +#define mmTPC6_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xF86588 + +#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xF8658C + +#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xF86590 + +#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET 0xF86594 + +#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xF86598 + +#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xF8659C + +#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET 0xF865A0 + +#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xF865A4 + +#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xF865A8 + +#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET 0xF865AC + +#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xF865B0 + +#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xF865B4 + +#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET 0xF865B8 + +#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xF865BC + +#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xF865C0 + +#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET 0xF865C4 + +#define mmTPC6_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xF865C8 + +#define mmTPC6_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xF865CC + +#define mmTPC6_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xF865D0 + +#define mmTPC6_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xF865D4 + +#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xF865D8 + +#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xF865DC + +#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET 0xF865E0 + +#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xF865E4 + +#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xF865E8 + +#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET 0xF865EC + +#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xF865F0 + +#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xF865F4 + +#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET 0xF865F8 + +#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xF865FC + +#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xF86600 + +#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET 0xF86604 + +#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xF86608 + +#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xF8660C + +#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET 0xF86610 + +#define mmTPC6_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xF86614 + +#define mmTPC6_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xF86618 + +#define mmTPC6_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xF8661C + +#define mmTPC6_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xF86620 + +#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xF86624 + +#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xF86628 + +#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET 0xF8662C + +#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xF86630 + +#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xF86634 + +#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET 0xF86638 + +#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xF8663C + +#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xF86640 + +#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET 0xF86644 + +#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xF86648 + +#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xF8664C + +#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET 0xF86650 + +#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xF86654 + +#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xF86658 + +#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET 0xF8665C + +#define mmTPC6_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xF86660 + +#define mmTPC6_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xF86664 + +#define mmTPC6_CFG_KERNEL_TID_BASE_DIM_0 0xF86668 + +#define mmTPC6_CFG_KERNEL_TID_SIZE_DIM_0 0xF8666C + +#define mmTPC6_CFG_KERNEL_TID_BASE_DIM_1 0xF86670 + +#define mmTPC6_CFG_KERNEL_TID_SIZE_DIM_1 0xF86674 + +#define mmTPC6_CFG_KERNEL_TID_BASE_DIM_2 0xF86678 + +#define mmTPC6_CFG_KERNEL_TID_SIZE_DIM_2 0xF8667C + +#define mmTPC6_CFG_KERNEL_TID_BASE_DIM_3 0xF86680 + +#define mmTPC6_CFG_KERNEL_TID_SIZE_DIM_3 0xF86684 + +#define mmTPC6_CFG_KERNEL_TID_BASE_DIM_4 0xF86688 + +#define mmTPC6_CFG_KERNEL_TID_SIZE_DIM_4 0xF8668C + +#define mmTPC6_CFG_KERNEL_SRF_0 0xF86690 + +#define mmTPC6_CFG_KERNEL_SRF_1 0xF86694 + +#define mmTPC6_CFG_KERNEL_SRF_2 0xF86698 + +#define mmTPC6_CFG_KERNEL_SRF_3 0xF8669C + +#define mmTPC6_CFG_KERNEL_SRF_4 0xF866A0 + +#define mmTPC6_CFG_KERNEL_SRF_5 0xF866A4 + +#define mmTPC6_CFG_KERNEL_SRF_6 0xF866A8 + +#define mmTPC6_CFG_KERNEL_SRF_7 0xF866AC + +#define mmTPC6_CFG_KERNEL_SRF_8 0xF866B0 + +#define mmTPC6_CFG_KERNEL_SRF_9 0xF866B4 + +#define mmTPC6_CFG_KERNEL_SRF_10 0xF866B8 + +#define mmTPC6_CFG_KERNEL_SRF_11 0xF866BC + +#define mmTPC6_CFG_KERNEL_SRF_12 0xF866C0 + +#define mmTPC6_CFG_KERNEL_SRF_13 0xF866C4 + +#define mmTPC6_CFG_KERNEL_SRF_14 0xF866C8 + +#define mmTPC6_CFG_KERNEL_SRF_15 0xF866CC + +#define mmTPC6_CFG_KERNEL_SRF_16 0xF866D0 + +#define mmTPC6_CFG_KERNEL_SRF_17 0xF866D4 + +#define mmTPC6_CFG_KERNEL_SRF_18 0xF866D8 + +#define mmTPC6_CFG_KERNEL_SRF_19 0xF866DC + +#define mmTPC6_CFG_KERNEL_SRF_20 0xF866E0 + +#define mmTPC6_CFG_KERNEL_SRF_21 0xF866E4 + +#define mmTPC6_CFG_KERNEL_SRF_22 0xF866E8 + +#define mmTPC6_CFG_KERNEL_SRF_23 0xF866EC + +#define mmTPC6_CFG_KERNEL_SRF_24 0xF866F0 + +#define mmTPC6_CFG_KERNEL_SRF_25 0xF866F4 + +#define mmTPC6_CFG_KERNEL_SRF_26 0xF866F8 + +#define mmTPC6_CFG_KERNEL_SRF_27 0xF866FC + +#define mmTPC6_CFG_KERNEL_SRF_28 0xF86700 + +#define mmTPC6_CFG_KERNEL_SRF_29 0xF86704 + +#define mmTPC6_CFG_KERNEL_SRF_30 0xF86708 + +#define mmTPC6_CFG_KERNEL_SRF_31 0xF8670C + +#define mmTPC6_CFG_KERNEL_KERNEL_CONFIG 0xF86710 + +#define mmTPC6_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xF86714 + +#define mmTPC6_CFG_RESERVED_DESC_END 0xF86738 + +#define mmTPC6_CFG_ROUND_CSR 0xF867FC + +#define mmTPC6_CFG_TBUF_BASE_ADDR_LOW 0xF86800 + +#define mmTPC6_CFG_TBUF_BASE_ADDR_HIGH 0xF86804 + +#define mmTPC6_CFG_SEMAPHORE 0xF86808 + +#define mmTPC6_CFG_VFLAGS 0xF8680C + +#define mmTPC6_CFG_SFLAGS 0xF86810 + +#define mmTPC6_CFG_LFSR_POLYNOM 0xF86818 + +#define mmTPC6_CFG_STATUS 0xF8681C + +#define mmTPC6_CFG_CFG_BASE_ADDRESS_HIGH 0xF86820 + +#define mmTPC6_CFG_CFG_SUBTRACT_VALUE 0xF86824 + +#define mmTPC6_CFG_SM_BASE_ADDRESS_LOW 0xF86828 + +#define mmTPC6_CFG_SM_BASE_ADDRESS_HIGH 0xF8682C + +#define mmTPC6_CFG_TPC_CMD 0xF86830 + +#define mmTPC6_CFG_TPC_EXECUTE 0xF86838 + +#define mmTPC6_CFG_TPC_STALL 0xF8683C + +#define mmTPC6_CFG_ICACHE_BASE_ADDERESS_LOW 0xF86840 + +#define mmTPC6_CFG_ICACHE_BASE_ADDERESS_HIGH 0xF86844 + +#define mmTPC6_CFG_MSS_CONFIG 0xF86854 + +#define mmTPC6_CFG_TPC_INTR_CAUSE 0xF86858 + +#define mmTPC6_CFG_TPC_INTR_MASK 0xF8685C + +#define mmTPC6_CFG_TSB_CONFIG 0xF86860 + +#define mmTPC6_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xF86A00 + +#define mmTPC6_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xF86A04 + +#define mmTPC6_CFG_QM_TENSOR_0_PADDING_VALUE 0xF86A08 + +#define mmTPC6_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xF86A0C + +#define mmTPC6_CFG_QM_TENSOR_0_DIM_0_SIZE 0xF86A10 + +#define mmTPC6_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xF86A14 + +#define mmTPC6_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET 0xF86A18 + +#define mmTPC6_CFG_QM_TENSOR_0_DIM_1_SIZE 0xF86A1C + +#define mmTPC6_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xF86A20 + +#define mmTPC6_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET 0xF86A24 + +#define mmTPC6_CFG_QM_TENSOR_0_DIM_2_SIZE 0xF86A28 + +#define mmTPC6_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xF86A2C + +#define mmTPC6_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET 0xF86A30 + +#define mmTPC6_CFG_QM_TENSOR_0_DIM_3_SIZE 0xF86A34 + +#define mmTPC6_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xF86A38 + +#define mmTPC6_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET 0xF86A3C + +#define mmTPC6_CFG_QM_TENSOR_0_DIM_4_SIZE 0xF86A40 + +#define mmTPC6_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xF86A44 + +#define mmTPC6_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET 0xF86A48 + +#define mmTPC6_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xF86A4C + +#define mmTPC6_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xF86A50 + +#define mmTPC6_CFG_QM_TENSOR_1_PADDING_VALUE 0xF86A54 + +#define mmTPC6_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xF86A58 + +#define mmTPC6_CFG_QM_TENSOR_1_DIM_0_SIZE 0xF86A5C + +#define mmTPC6_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xF86A60 + +#define mmTPC6_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET 0xF86A64 + +#define mmTPC6_CFG_QM_TENSOR_1_DIM_1_SIZE 0xF86A68 + +#define mmTPC6_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xF86A6C + +#define mmTPC6_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET 0xF86A70 + +#define mmTPC6_CFG_QM_TENSOR_1_DIM_2_SIZE 0xF86A74 + +#define mmTPC6_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xF86A78 + +#define mmTPC6_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET 0xF86A7C + +#define mmTPC6_CFG_QM_TENSOR_1_DIM_3_SIZE 0xF86A80 + +#define mmTPC6_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xF86A84 + +#define mmTPC6_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET 0xF86A88 + +#define mmTPC6_CFG_QM_TENSOR_1_DIM_4_SIZE 0xF86A8C + +#define mmTPC6_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xF86A90 + +#define mmTPC6_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET 0xF86A94 + +#define mmTPC6_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xF86A98 + +#define mmTPC6_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xF86A9C + +#define mmTPC6_CFG_QM_TENSOR_2_PADDING_VALUE 0xF86AA0 + +#define mmTPC6_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xF86AA4 + +#define mmTPC6_CFG_QM_TENSOR_2_DIM_0_SIZE 0xF86AA8 + +#define mmTPC6_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xF86AAC + +#define mmTPC6_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET 0xF86AB0 + +#define mmTPC6_CFG_QM_TENSOR_2_DIM_1_SIZE 0xF86AB4 + +#define mmTPC6_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xF86AB8 + +#define mmTPC6_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET 0xF86ABC + +#define mmTPC6_CFG_QM_TENSOR_2_DIM_2_SIZE 0xF86AC0 + +#define mmTPC6_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xF86AC4 + +#define mmTPC6_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET 0xF86AC8 + +#define mmTPC6_CFG_QM_TENSOR_2_DIM_3_SIZE 0xF86ACC + +#define mmTPC6_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xF86AD0 + +#define mmTPC6_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET 0xF86AD4 + +#define mmTPC6_CFG_QM_TENSOR_2_DIM_4_SIZE 0xF86AD8 + +#define mmTPC6_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xF86ADC + +#define mmTPC6_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET 0xF86AE0 + +#define mmTPC6_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xF86AE4 + +#define mmTPC6_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xF86AE8 + +#define mmTPC6_CFG_QM_TENSOR_3_PADDING_VALUE 0xF86AEC + +#define mmTPC6_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xF86AF0 + +#define mmTPC6_CFG_QM_TENSOR_3_DIM_0_SIZE 0xF86AF4 + +#define mmTPC6_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xF86AF8 + +#define mmTPC6_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET 0xF86AFC + +#define mmTPC6_CFG_QM_TENSOR_3_DIM_1_SIZE 0xF86B00 + +#define mmTPC6_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xF86B04 + +#define mmTPC6_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET 0xF86B08 + +#define mmTPC6_CFG_QM_TENSOR_3_DIM_2_SIZE 0xF86B0C + +#define mmTPC6_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xF86B10 + +#define mmTPC6_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET 0xF86B14 + +#define mmTPC6_CFG_QM_TENSOR_3_DIM_3_SIZE 0xF86B18 + +#define mmTPC6_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xF86B1C + +#define mmTPC6_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET 0xF86B20 + +#define mmTPC6_CFG_QM_TENSOR_3_DIM_4_SIZE 0xF86B24 + +#define mmTPC6_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xF86B28 + +#define mmTPC6_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET 0xF86B2C + +#define mmTPC6_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xF86B30 + +#define mmTPC6_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xF86B34 + +#define mmTPC6_CFG_QM_TENSOR_4_PADDING_VALUE 0xF86B38 + +#define mmTPC6_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xF86B3C + +#define mmTPC6_CFG_QM_TENSOR_4_DIM_0_SIZE 0xF86B40 + +#define mmTPC6_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xF86B44 + +#define mmTPC6_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET 0xF86B48 + +#define mmTPC6_CFG_QM_TENSOR_4_DIM_1_SIZE 0xF86B4C + +#define mmTPC6_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xF86B50 + +#define mmTPC6_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET 0xF86B54 + +#define mmTPC6_CFG_QM_TENSOR_4_DIM_2_SIZE 0xF86B58 + +#define mmTPC6_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xF86B5C + +#define mmTPC6_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET 0xF86B60 + +#define mmTPC6_CFG_QM_TENSOR_4_DIM_3_SIZE 0xF86B64 + +#define mmTPC6_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xF86B68 + +#define mmTPC6_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET 0xF86B6C + +#define mmTPC6_CFG_QM_TENSOR_4_DIM_4_SIZE 0xF86B70 + +#define mmTPC6_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xF86B74 + +#define mmTPC6_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET 0xF86B78 + +#define mmTPC6_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xF86B7C + +#define mmTPC6_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xF86B80 + +#define mmTPC6_CFG_QM_TENSOR_5_PADDING_VALUE 0xF86B84 + +#define mmTPC6_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xF86B88 + +#define mmTPC6_CFG_QM_TENSOR_5_DIM_0_SIZE 0xF86B8C + +#define mmTPC6_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xF86B90 + +#define mmTPC6_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET 0xF86B94 + +#define mmTPC6_CFG_QM_TENSOR_5_DIM_1_SIZE 0xF86B98 + +#define mmTPC6_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xF86B9C + +#define mmTPC6_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET 0xF86BA0 + +#define mmTPC6_CFG_QM_TENSOR_5_DIM_2_SIZE 0xF86BA4 + +#define mmTPC6_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xF86BA8 + +#define mmTPC6_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET 0xF86BAC + +#define mmTPC6_CFG_QM_TENSOR_5_DIM_3_SIZE 0xF86BB0 + +#define mmTPC6_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xF86BB4 + +#define mmTPC6_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET 0xF86BB8 + +#define mmTPC6_CFG_QM_TENSOR_5_DIM_4_SIZE 0xF86BBC + +#define mmTPC6_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xF86BC0 + +#define mmTPC6_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET 0xF86BC4 + +#define mmTPC6_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xF86BC8 + +#define mmTPC6_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xF86BCC + +#define mmTPC6_CFG_QM_TENSOR_6_PADDING_VALUE 0xF86BD0 + +#define mmTPC6_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xF86BD4 + +#define mmTPC6_CFG_QM_TENSOR_6_DIM_0_SIZE 0xF86BD8 + +#define mmTPC6_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xF86BDC + +#define mmTPC6_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET 0xF86BE0 + +#define mmTPC6_CFG_QM_TENSOR_6_DIM_1_SIZE 0xF86BE4 + +#define mmTPC6_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xF86BE8 + +#define mmTPC6_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET 0xF86BEC + +#define mmTPC6_CFG_QM_TENSOR_6_DIM_2_SIZE 0xF86BF0 + +#define mmTPC6_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xF86BF4 + +#define mmTPC6_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET 0xF86BF8 + +#define mmTPC6_CFG_QM_TENSOR_6_DIM_3_SIZE 0xF86BFC + +#define mmTPC6_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xF86C00 + +#define mmTPC6_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET 0xF86C04 + +#define mmTPC6_CFG_QM_TENSOR_6_DIM_4_SIZE 0xF86C08 + +#define mmTPC6_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xF86C0C + +#define mmTPC6_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET 0xF86C10 + +#define mmTPC6_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xF86C14 + +#define mmTPC6_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xF86C18 + +#define mmTPC6_CFG_QM_TENSOR_7_PADDING_VALUE 0xF86C1C + +#define mmTPC6_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xF86C20 + +#define mmTPC6_CFG_QM_TENSOR_7_DIM_0_SIZE 0xF86C24 + +#define mmTPC6_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xF86C28 + +#define mmTPC6_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET 0xF86C2C + +#define mmTPC6_CFG_QM_TENSOR_7_DIM_1_SIZE 0xF86C30 + +#define mmTPC6_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xF86C34 + +#define mmTPC6_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET 0xF86C38 + +#define mmTPC6_CFG_QM_TENSOR_7_DIM_2_SIZE 0xF86C3C + +#define mmTPC6_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xF86C40 + +#define mmTPC6_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET 0xF86C44 + +#define mmTPC6_CFG_QM_TENSOR_7_DIM_3_SIZE 0xF86C48 + +#define mmTPC6_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xF86C4C + +#define mmTPC6_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET 0xF86C50 + +#define mmTPC6_CFG_QM_TENSOR_7_DIM_4_SIZE 0xF86C54 + +#define mmTPC6_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xF86C58 + +#define mmTPC6_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET 0xF86C5C + +#define mmTPC6_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xF86C60 + +#define mmTPC6_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xF86C64 + +#define mmTPC6_CFG_QM_TID_BASE_DIM_0 0xF86C68 + +#define mmTPC6_CFG_QM_TID_SIZE_DIM_0 0xF86C6C + +#define mmTPC6_CFG_QM_TID_BASE_DIM_1 0xF86C70 + +#define mmTPC6_CFG_QM_TID_SIZE_DIM_1 0xF86C74 + +#define mmTPC6_CFG_QM_TID_BASE_DIM_2 0xF86C78 + +#define mmTPC6_CFG_QM_TID_SIZE_DIM_2 0xF86C7C + +#define mmTPC6_CFG_QM_TID_BASE_DIM_3 0xF86C80 + +#define mmTPC6_CFG_QM_TID_SIZE_DIM_3 0xF86C84 + +#define mmTPC6_CFG_QM_TID_BASE_DIM_4 0xF86C88 + +#define mmTPC6_CFG_QM_TID_SIZE_DIM_4 0xF86C8C + +#define mmTPC6_CFG_QM_SRF_0 0xF86C90 + +#define mmTPC6_CFG_QM_SRF_1 0xF86C94 + +#define mmTPC6_CFG_QM_SRF_2 0xF86C98 + +#define mmTPC6_CFG_QM_SRF_3 0xF86C9C + +#define mmTPC6_CFG_QM_SRF_4 0xF86CA0 + +#define mmTPC6_CFG_QM_SRF_5 0xF86CA4 + +#define mmTPC6_CFG_QM_SRF_6 0xF86CA8 + +#define mmTPC6_CFG_QM_SRF_7 0xF86CAC + +#define mmTPC6_CFG_QM_SRF_8 0xF86CB0 + +#define mmTPC6_CFG_QM_SRF_9 0xF86CB4 + +#define mmTPC6_CFG_QM_SRF_10 0xF86CB8 + +#define mmTPC6_CFG_QM_SRF_11 0xF86CBC + +#define mmTPC6_CFG_QM_SRF_12 0xF86CC0 + +#define mmTPC6_CFG_QM_SRF_13 0xF86CC4 + +#define mmTPC6_CFG_QM_SRF_14 0xF86CC8 + +#define mmTPC6_CFG_QM_SRF_15 0xF86CCC + +#define mmTPC6_CFG_QM_SRF_16 0xF86CD0 + +#define mmTPC6_CFG_QM_SRF_17 0xF86CD4 + +#define mmTPC6_CFG_QM_SRF_18 0xF86CD8 + +#define mmTPC6_CFG_QM_SRF_19 0xF86CDC + +#define mmTPC6_CFG_QM_SRF_20 0xF86CE0 + +#define mmTPC6_CFG_QM_SRF_21 0xF86CE4 + +#define mmTPC6_CFG_QM_SRF_22 0xF86CE8 + +#define mmTPC6_CFG_QM_SRF_23 0xF86CEC + +#define mmTPC6_CFG_QM_SRF_24 0xF86CF0 + +#define mmTPC6_CFG_QM_SRF_25 0xF86CF4 + +#define mmTPC6_CFG_QM_SRF_26 0xF86CF8 + +#define mmTPC6_CFG_QM_SRF_27 0xF86CFC + +#define mmTPC6_CFG_QM_SRF_28 0xF86D00 + +#define mmTPC6_CFG_QM_SRF_29 0xF86D04 + +#define mmTPC6_CFG_QM_SRF_30 0xF86D08 + +#define mmTPC6_CFG_QM_SRF_31 0xF86D0C + +#define mmTPC6_CFG_QM_KERNEL_CONFIG 0xF86D10 + +#define mmTPC6_CFG_QM_SYNC_OBJECT_MESSAGE 0xF86D14 + +#define mmTPC6_CFG_ARUSER 0xF86D18 + +#define mmTPC6_CFG_AWUSER 0xF86D1C + +#define mmTPC6_CFG_FUNC_MBIST_CNTRL 0xF86E00 + +#define mmTPC6_CFG_FUNC_MBIST_PAT 0xF86E04 + +#define mmTPC6_CFG_FUNC_MBIST_MEM_0 0xF86E08 + +#define mmTPC6_CFG_FUNC_MBIST_MEM_1 0xF86E0C + +#define mmTPC6_CFG_FUNC_MBIST_MEM_2 0xF86E10 + +#define mmTPC6_CFG_FUNC_MBIST_MEM_3 0xF86E14 + +#define mmTPC6_CFG_FUNC_MBIST_MEM_4 0xF86E18 + +#define mmTPC6_CFG_FUNC_MBIST_MEM_5 0xF86E1C + +#define mmTPC6_CFG_FUNC_MBIST_MEM_6 0xF86E20 + +#define mmTPC6_CFG_FUNC_MBIST_MEM_7 0xF86E24 + +#define mmTPC6_CFG_FUNC_MBIST_MEM_8 0xF86E28 + +#define mmTPC6_CFG_FUNC_MBIST_MEM_9 0xF86E2C + +#endif /* ASIC_REG_TPC6_CFG_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h new file mode 100644 index 000000000..7a1a0e87b --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h @@ -0,0 +1,138 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC6_CMDQ_REGS_H_ +#define ASIC_REG_TPC6_CMDQ_REGS_H_ + +/* + ***************************************** + * TPC6_CMDQ (Prototype: CMDQ) + ***************************************** + */ + +#define mmTPC6_CMDQ_GLBL_CFG0 0xF89000 + +#define mmTPC6_CMDQ_GLBL_CFG1 0xF89004 + +#define mmTPC6_CMDQ_GLBL_PROT 0xF89008 + +#define mmTPC6_CMDQ_GLBL_ERR_CFG 0xF8900C + +#define mmTPC6_CMDQ_GLBL_ERR_ADDR_LO 0xF89010 + +#define mmTPC6_CMDQ_GLBL_ERR_ADDR_HI 0xF89014 + +#define mmTPC6_CMDQ_GLBL_ERR_WDATA 0xF89018 + +#define mmTPC6_CMDQ_GLBL_SECURE_PROPS 0xF8901C + +#define mmTPC6_CMDQ_GLBL_NON_SECURE_PROPS 0xF89020 + +#define mmTPC6_CMDQ_GLBL_STS0 0xF89024 + +#define mmTPC6_CMDQ_GLBL_STS1 0xF89028 + +#define mmTPC6_CMDQ_CQ_CFG0 0xF890B0 + +#define mmTPC6_CMDQ_CQ_CFG1 0xF890B4 + +#define mmTPC6_CMDQ_CQ_ARUSER 0xF890B8 + +#define mmTPC6_CMDQ_CQ_PTR_LO 0xF890C0 + +#define mmTPC6_CMDQ_CQ_PTR_HI 0xF890C4 + +#define mmTPC6_CMDQ_CQ_TSIZE 0xF890C8 + +#define mmTPC6_CMDQ_CQ_CTL 0xF890CC + +#define mmTPC6_CMDQ_CQ_PTR_LO_STS 0xF890D4 + +#define mmTPC6_CMDQ_CQ_PTR_HI_STS 0xF890D8 + +#define mmTPC6_CMDQ_CQ_TSIZE_STS 0xF890DC + +#define mmTPC6_CMDQ_CQ_CTL_STS 0xF890E0 + +#define mmTPC6_CMDQ_CQ_STS0 0xF890E4 + +#define mmTPC6_CMDQ_CQ_STS1 0xF890E8 + +#define mmTPC6_CMDQ_CQ_RD_RATE_LIM_EN 0xF890F0 + +#define mmTPC6_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xF890F4 + +#define mmTPC6_CMDQ_CQ_RD_RATE_LIM_SAT 0xF890F8 + +#define mmTPC6_CMDQ_CQ_RD_RATE_LIM_TOUT 0xF890FC + +#define mmTPC6_CMDQ_CQ_IFIFO_CNT 0xF89108 + +#define mmTPC6_CMDQ_CP_MSG_BASE0_ADDR_LO 0xF89120 + +#define mmTPC6_CMDQ_CP_MSG_BASE0_ADDR_HI 0xF89124 + +#define mmTPC6_CMDQ_CP_MSG_BASE1_ADDR_LO 0xF89128 + +#define mmTPC6_CMDQ_CP_MSG_BASE1_ADDR_HI 0xF8912C + +#define mmTPC6_CMDQ_CP_MSG_BASE2_ADDR_LO 0xF89130 + +#define mmTPC6_CMDQ_CP_MSG_BASE2_ADDR_HI 0xF89134 + +#define mmTPC6_CMDQ_CP_MSG_BASE3_ADDR_LO 0xF89138 + +#define mmTPC6_CMDQ_CP_MSG_BASE3_ADDR_HI 0xF8913C + +#define mmTPC6_CMDQ_CP_LDMA_TSIZE_OFFSET 0xF89140 + +#define mmTPC6_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xF89144 + +#define mmTPC6_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xF89148 + +#define mmTPC6_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xF8914C + +#define mmTPC6_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xF89150 + +#define mmTPC6_CMDQ_CP_LDMA_COMMIT_OFFSET 0xF89154 + +#define mmTPC6_CMDQ_CP_FENCE0_RDATA 0xF89158 + +#define mmTPC6_CMDQ_CP_FENCE1_RDATA 0xF8915C + +#define mmTPC6_CMDQ_CP_FENCE2_RDATA 0xF89160 + +#define mmTPC6_CMDQ_CP_FENCE3_RDATA 0xF89164 + +#define mmTPC6_CMDQ_CP_FENCE0_CNT 0xF89168 + +#define mmTPC6_CMDQ_CP_FENCE1_CNT 0xF8916C + +#define mmTPC6_CMDQ_CP_FENCE2_CNT 0xF89170 + +#define mmTPC6_CMDQ_CP_FENCE3_CNT 0xF89174 + +#define mmTPC6_CMDQ_CP_STS 0xF89178 + +#define mmTPC6_CMDQ_CP_CURRENT_INST_LO 0xF8917C + +#define mmTPC6_CMDQ_CP_CURRENT_INST_HI 0xF89180 + +#define mmTPC6_CMDQ_CP_BARRIER_CFG 0xF89184 + +#define mmTPC6_CMDQ_CP_DBG_0 0xF89188 + +#define mmTPC6_CMDQ_CQ_BUF_ADDR 0xF89308 + +#define mmTPC6_CMDQ_CQ_BUF_RDATA 0xF8930C + +#endif /* ASIC_REG_TPC6_CMDQ_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h new file mode 100644 index 000000000..80fa0fe0f --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h @@ -0,0 +1,178 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC6_QM_REGS_H_ +#define ASIC_REG_TPC6_QM_REGS_H_ + +/* + ***************************************** + * TPC6_QM (Prototype: QMAN) + ***************************************** + */ + +#define mmTPC6_QM_GLBL_CFG0 0xF88000 + +#define mmTPC6_QM_GLBL_CFG1 0xF88004 + +#define mmTPC6_QM_GLBL_PROT 0xF88008 + +#define mmTPC6_QM_GLBL_ERR_CFG 0xF8800C + +#define mmTPC6_QM_GLBL_ERR_ADDR_LO 0xF88010 + +#define mmTPC6_QM_GLBL_ERR_ADDR_HI 0xF88014 + +#define mmTPC6_QM_GLBL_ERR_WDATA 0xF88018 + +#define mmTPC6_QM_GLBL_SECURE_PROPS 0xF8801C + +#define mmTPC6_QM_GLBL_NON_SECURE_PROPS 0xF88020 + +#define mmTPC6_QM_GLBL_STS0 0xF88024 + +#define mmTPC6_QM_GLBL_STS1 0xF88028 + +#define mmTPC6_QM_PQ_BASE_LO 0xF88060 + +#define mmTPC6_QM_PQ_BASE_HI 0xF88064 + +#define mmTPC6_QM_PQ_SIZE 0xF88068 + +#define mmTPC6_QM_PQ_PI 0xF8806C + +#define mmTPC6_QM_PQ_CI 0xF88070 + +#define mmTPC6_QM_PQ_CFG0 0xF88074 + +#define mmTPC6_QM_PQ_CFG1 0xF88078 + +#define mmTPC6_QM_PQ_ARUSER 0xF8807C + +#define mmTPC6_QM_PQ_PUSH0 0xF88080 + +#define mmTPC6_QM_PQ_PUSH1 0xF88084 + +#define mmTPC6_QM_PQ_PUSH2 0xF88088 + +#define mmTPC6_QM_PQ_PUSH3 0xF8808C + +#define mmTPC6_QM_PQ_STS0 0xF88090 + +#define mmTPC6_QM_PQ_STS1 0xF88094 + +#define mmTPC6_QM_PQ_RD_RATE_LIM_EN 0xF880A0 + +#define mmTPC6_QM_PQ_RD_RATE_LIM_RST_TOKEN 0xF880A4 + +#define mmTPC6_QM_PQ_RD_RATE_LIM_SAT 0xF880A8 + +#define mmTPC6_QM_PQ_RD_RATE_LIM_TOUT 0xF880AC + +#define mmTPC6_QM_CQ_CFG0 0xF880B0 + +#define mmTPC6_QM_CQ_CFG1 0xF880B4 + +#define mmTPC6_QM_CQ_ARUSER 0xF880B8 + +#define mmTPC6_QM_CQ_PTR_LO 0xF880C0 + +#define mmTPC6_QM_CQ_PTR_HI 0xF880C4 + +#define mmTPC6_QM_CQ_TSIZE 0xF880C8 + +#define mmTPC6_QM_CQ_CTL 0xF880CC + +#define mmTPC6_QM_CQ_PTR_LO_STS 0xF880D4 + +#define mmTPC6_QM_CQ_PTR_HI_STS 0xF880D8 + +#define mmTPC6_QM_CQ_TSIZE_STS 0xF880DC + +#define mmTPC6_QM_CQ_CTL_STS 0xF880E0 + +#define mmTPC6_QM_CQ_STS0 0xF880E4 + +#define mmTPC6_QM_CQ_STS1 0xF880E8 + +#define mmTPC6_QM_CQ_RD_RATE_LIM_EN 0xF880F0 + +#define mmTPC6_QM_CQ_RD_RATE_LIM_RST_TOKEN 0xF880F4 + +#define mmTPC6_QM_CQ_RD_RATE_LIM_SAT 0xF880F8 + +#define mmTPC6_QM_CQ_RD_RATE_LIM_TOUT 0xF880FC + +#define mmTPC6_QM_CQ_IFIFO_CNT 0xF88108 + +#define mmTPC6_QM_CP_MSG_BASE0_ADDR_LO 0xF88120 + +#define mmTPC6_QM_CP_MSG_BASE0_ADDR_HI 0xF88124 + +#define mmTPC6_QM_CP_MSG_BASE1_ADDR_LO 0xF88128 + +#define mmTPC6_QM_CP_MSG_BASE1_ADDR_HI 0xF8812C + +#define mmTPC6_QM_CP_MSG_BASE2_ADDR_LO 0xF88130 + +#define mmTPC6_QM_CP_MSG_BASE2_ADDR_HI 0xF88134 + +#define mmTPC6_QM_CP_MSG_BASE3_ADDR_LO 0xF88138 + +#define mmTPC6_QM_CP_MSG_BASE3_ADDR_HI 0xF8813C + +#define mmTPC6_QM_CP_LDMA_TSIZE_OFFSET 0xF88140 + +#define mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0xF88144 + +#define mmTPC6_QM_CP_LDMA_SRC_BASE_HI_OFFSET 0xF88148 + +#define mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET 0xF8814C + +#define mmTPC6_QM_CP_LDMA_DST_BASE_HI_OFFSET 0xF88150 + +#define mmTPC6_QM_CP_LDMA_COMMIT_OFFSET 0xF88154 + +#define mmTPC6_QM_CP_FENCE0_RDATA 0xF88158 + +#define mmTPC6_QM_CP_FENCE1_RDATA 0xF8815C + +#define mmTPC6_QM_CP_FENCE2_RDATA 0xF88160 + +#define mmTPC6_QM_CP_FENCE3_RDATA 0xF88164 + +#define mmTPC6_QM_CP_FENCE0_CNT 0xF88168 + +#define mmTPC6_QM_CP_FENCE1_CNT 0xF8816C + +#define mmTPC6_QM_CP_FENCE2_CNT 0xF88170 + +#define mmTPC6_QM_CP_FENCE3_CNT 0xF88174 + +#define mmTPC6_QM_CP_STS 0xF88178 + +#define mmTPC6_QM_CP_CURRENT_INST_LO 0xF8817C + +#define mmTPC6_QM_CP_CURRENT_INST_HI 0xF88180 + +#define mmTPC6_QM_CP_BARRIER_CFG 0xF88184 + +#define mmTPC6_QM_CP_DBG_0 0xF88188 + +#define mmTPC6_QM_PQ_BUF_ADDR 0xF88300 + +#define mmTPC6_QM_PQ_BUF_RDATA 0xF88304 + +#define mmTPC6_QM_CQ_BUF_ADDR 0xF88308 + +#define mmTPC6_QM_CQ_BUF_RDATA 0xF8830C + +#endif /* ASIC_REG_TPC6_QM_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h new file mode 100644 index 000000000..d6cae8b8a --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h @@ -0,0 +1,322 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC6_RTR_REGS_H_ +#define ASIC_REG_TPC6_RTR_REGS_H_ + +/* + ***************************************** + * TPC6_RTR (Prototype: TPC_RTR) + ***************************************** + */ + +#define mmTPC6_RTR_HBW_RD_RQ_E_ARB 0xF80100 + +#define mmTPC6_RTR_HBW_RD_RQ_W_ARB 0xF80104 + +#define mmTPC6_RTR_HBW_RD_RQ_N_ARB 0xF80108 + +#define mmTPC6_RTR_HBW_RD_RQ_S_ARB 0xF8010C + +#define mmTPC6_RTR_HBW_RD_RQ_L_ARB 0xF80110 + +#define mmTPC6_RTR_HBW_E_ARB_MAX 0xF80120 + +#define mmTPC6_RTR_HBW_W_ARB_MAX 0xF80124 + +#define mmTPC6_RTR_HBW_N_ARB_MAX 0xF80128 + +#define mmTPC6_RTR_HBW_S_ARB_MAX 0xF8012C + +#define mmTPC6_RTR_HBW_L_ARB_MAX 0xF80130 + +#define mmTPC6_RTR_HBW_RD_RS_E_ARB 0xF80140 + +#define mmTPC6_RTR_HBW_RD_RS_W_ARB 0xF80144 + +#define mmTPC6_RTR_HBW_RD_RS_N_ARB 0xF80148 + +#define mmTPC6_RTR_HBW_RD_RS_S_ARB 0xF8014C + +#define mmTPC6_RTR_HBW_RD_RS_L_ARB 0xF80150 + +#define mmTPC6_RTR_HBW_WR_RQ_E_ARB 0xF80170 + +#define mmTPC6_RTR_HBW_WR_RQ_W_ARB 0xF80174 + +#define mmTPC6_RTR_HBW_WR_RQ_N_ARB 0xF80178 + +#define mmTPC6_RTR_HBW_WR_RQ_S_ARB 0xF8017C + +#define mmTPC6_RTR_HBW_WR_RQ_L_ARB 0xF80180 + +#define mmTPC6_RTR_HBW_WR_RS_E_ARB 0xF80190 + +#define mmTPC6_RTR_HBW_WR_RS_W_ARB 0xF80194 + +#define mmTPC6_RTR_HBW_WR_RS_N_ARB 0xF80198 + +#define mmTPC6_RTR_HBW_WR_RS_S_ARB 0xF8019C + +#define mmTPC6_RTR_HBW_WR_RS_L_ARB 0xF801A0 + +#define mmTPC6_RTR_LBW_RD_RQ_E_ARB 0xF80200 + +#define mmTPC6_RTR_LBW_RD_RQ_W_ARB 0xF80204 + +#define mmTPC6_RTR_LBW_RD_RQ_N_ARB 0xF80208 + +#define mmTPC6_RTR_LBW_RD_RQ_S_ARB 0xF8020C + +#define mmTPC6_RTR_LBW_RD_RQ_L_ARB 0xF80210 + +#define mmTPC6_RTR_LBW_E_ARB_MAX 0xF80220 + +#define mmTPC6_RTR_LBW_W_ARB_MAX 0xF80224 + +#define mmTPC6_RTR_LBW_N_ARB_MAX 0xF80228 + +#define mmTPC6_RTR_LBW_S_ARB_MAX 0xF8022C + +#define mmTPC6_RTR_LBW_L_ARB_MAX 0xF80230 + +#define mmTPC6_RTR_LBW_RD_RS_E_ARB 0xF80250 + +#define mmTPC6_RTR_LBW_RD_RS_W_ARB 0xF80254 + +#define mmTPC6_RTR_LBW_RD_RS_N_ARB 0xF80258 + +#define mmTPC6_RTR_LBW_RD_RS_S_ARB 0xF8025C + +#define mmTPC6_RTR_LBW_RD_RS_L_ARB 0xF80260 + +#define mmTPC6_RTR_LBW_WR_RQ_E_ARB 0xF80270 + +#define mmTPC6_RTR_LBW_WR_RQ_W_ARB 0xF80274 + +#define mmTPC6_RTR_LBW_WR_RQ_N_ARB 0xF80278 + +#define mmTPC6_RTR_LBW_WR_RQ_S_ARB 0xF8027C + +#define mmTPC6_RTR_LBW_WR_RQ_L_ARB 0xF80280 + +#define mmTPC6_RTR_LBW_WR_RS_E_ARB 0xF80290 + +#define mmTPC6_RTR_LBW_WR_RS_W_ARB 0xF80294 + +#define mmTPC6_RTR_LBW_WR_RS_N_ARB 0xF80298 + +#define mmTPC6_RTR_LBW_WR_RS_S_ARB 0xF8029C + +#define mmTPC6_RTR_LBW_WR_RS_L_ARB 0xF802A0 + +#define mmTPC6_RTR_DBG_E_ARB 0xF80300 + +#define mmTPC6_RTR_DBG_W_ARB 0xF80304 + +#define mmTPC6_RTR_DBG_N_ARB 0xF80308 + +#define mmTPC6_RTR_DBG_S_ARB 0xF8030C + +#define mmTPC6_RTR_DBG_L_ARB 0xF80310 + +#define mmTPC6_RTR_DBG_E_ARB_MAX 0xF80320 + +#define mmTPC6_RTR_DBG_W_ARB_MAX 0xF80324 + +#define mmTPC6_RTR_DBG_N_ARB_MAX 0xF80328 + +#define mmTPC6_RTR_DBG_S_ARB_MAX 0xF8032C + +#define mmTPC6_RTR_DBG_L_ARB_MAX 0xF80330 + +#define mmTPC6_RTR_SPLIT_COEF_0 0xF80400 + +#define mmTPC6_RTR_SPLIT_COEF_1 0xF80404 + +#define mmTPC6_RTR_SPLIT_COEF_2 0xF80408 + +#define mmTPC6_RTR_SPLIT_COEF_3 0xF8040C + +#define mmTPC6_RTR_SPLIT_COEF_4 0xF80410 + +#define mmTPC6_RTR_SPLIT_COEF_5 0xF80414 + +#define mmTPC6_RTR_SPLIT_COEF_6 0xF80418 + +#define mmTPC6_RTR_SPLIT_COEF_7 0xF8041C + +#define mmTPC6_RTR_SPLIT_COEF_8 0xF80420 + +#define mmTPC6_RTR_SPLIT_COEF_9 0xF80424 + +#define mmTPC6_RTR_SPLIT_CFG 0xF80440 + +#define mmTPC6_RTR_SPLIT_RD_SAT 0xF80444 + +#define mmTPC6_RTR_SPLIT_RD_RST_TOKEN 0xF80448 + +#define mmTPC6_RTR_SPLIT_RD_TIMEOUT_0 0xF8044C + +#define mmTPC6_RTR_SPLIT_RD_TIMEOUT_1 0xF80450 + +#define mmTPC6_RTR_SPLIT_WR_SAT 0xF80454 + +#define mmTPC6_RTR_WPLIT_WR_TST_TOLEN 0xF80458 + +#define mmTPC6_RTR_SPLIT_WR_TIMEOUT_0 0xF8045C + +#define mmTPC6_RTR_SPLIT_WR_TIMEOUT_1 0xF80460 + +#define mmTPC6_RTR_HBW_RANGE_HIT 0xF80470 + +#define mmTPC6_RTR_HBW_RANGE_MASK_L_0 0xF80480 + +#define mmTPC6_RTR_HBW_RANGE_MASK_L_1 0xF80484 + +#define mmTPC6_RTR_HBW_RANGE_MASK_L_2 0xF80488 + +#define mmTPC6_RTR_HBW_RANGE_MASK_L_3 0xF8048C + +#define mmTPC6_RTR_HBW_RANGE_MASK_L_4 0xF80490 + +#define mmTPC6_RTR_HBW_RANGE_MASK_L_5 0xF80494 + +#define mmTPC6_RTR_HBW_RANGE_MASK_L_6 0xF80498 + +#define mmTPC6_RTR_HBW_RANGE_MASK_L_7 0xF8049C + +#define mmTPC6_RTR_HBW_RANGE_MASK_H_0 0xF804A0 + +#define mmTPC6_RTR_HBW_RANGE_MASK_H_1 0xF804A4 + +#define mmTPC6_RTR_HBW_RANGE_MASK_H_2 0xF804A8 + +#define mmTPC6_RTR_HBW_RANGE_MASK_H_3 0xF804AC + +#define mmTPC6_RTR_HBW_RANGE_MASK_H_4 0xF804B0 + +#define mmTPC6_RTR_HBW_RANGE_MASK_H_5 0xF804B4 + +#define mmTPC6_RTR_HBW_RANGE_MASK_H_6 0xF804B8 + +#define mmTPC6_RTR_HBW_RANGE_MASK_H_7 0xF804BC + +#define mmTPC6_RTR_HBW_RANGE_BASE_L_0 0xF804C0 + +#define mmTPC6_RTR_HBW_RANGE_BASE_L_1 0xF804C4 + +#define mmTPC6_RTR_HBW_RANGE_BASE_L_2 0xF804C8 + +#define mmTPC6_RTR_HBW_RANGE_BASE_L_3 0xF804CC + +#define mmTPC6_RTR_HBW_RANGE_BASE_L_4 0xF804D0 + +#define mmTPC6_RTR_HBW_RANGE_BASE_L_5 0xF804D4 + +#define mmTPC6_RTR_HBW_RANGE_BASE_L_6 0xF804D8 + +#define mmTPC6_RTR_HBW_RANGE_BASE_L_7 0xF804DC + +#define mmTPC6_RTR_HBW_RANGE_BASE_H_0 0xF804E0 + +#define mmTPC6_RTR_HBW_RANGE_BASE_H_1 0xF804E4 + +#define mmTPC6_RTR_HBW_RANGE_BASE_H_2 0xF804E8 + +#define mmTPC6_RTR_HBW_RANGE_BASE_H_3 0xF804EC + +#define mmTPC6_RTR_HBW_RANGE_BASE_H_4 0xF804F0 + +#define mmTPC6_RTR_HBW_RANGE_BASE_H_5 0xF804F4 + +#define mmTPC6_RTR_HBW_RANGE_BASE_H_6 0xF804F8 + +#define mmTPC6_RTR_HBW_RANGE_BASE_H_7 0xF804FC + +#define mmTPC6_RTR_LBW_RANGE_HIT 0xF80500 + +#define mmTPC6_RTR_LBW_RANGE_MASK_0 0xF80510 + +#define mmTPC6_RTR_LBW_RANGE_MASK_1 0xF80514 + +#define mmTPC6_RTR_LBW_RANGE_MASK_2 0xF80518 + +#define mmTPC6_RTR_LBW_RANGE_MASK_3 0xF8051C + +#define mmTPC6_RTR_LBW_RANGE_MASK_4 0xF80520 + +#define mmTPC6_RTR_LBW_RANGE_MASK_5 0xF80524 + +#define mmTPC6_RTR_LBW_RANGE_MASK_6 0xF80528 + +#define mmTPC6_RTR_LBW_RANGE_MASK_7 0xF8052C + +#define mmTPC6_RTR_LBW_RANGE_MASK_8 0xF80530 + +#define mmTPC6_RTR_LBW_RANGE_MASK_9 0xF80534 + +#define mmTPC6_RTR_LBW_RANGE_MASK_10 0xF80538 + +#define mmTPC6_RTR_LBW_RANGE_MASK_11 0xF8053C + +#define mmTPC6_RTR_LBW_RANGE_MASK_12 0xF80540 + +#define mmTPC6_RTR_LBW_RANGE_MASK_13 0xF80544 + +#define mmTPC6_RTR_LBW_RANGE_MASK_14 0xF80548 + +#define mmTPC6_RTR_LBW_RANGE_MASK_15 0xF8054C + +#define mmTPC6_RTR_LBW_RANGE_BASE_0 0xF80550 + +#define mmTPC6_RTR_LBW_RANGE_BASE_1 0xF80554 + +#define mmTPC6_RTR_LBW_RANGE_BASE_2 0xF80558 + +#define mmTPC6_RTR_LBW_RANGE_BASE_3 0xF8055C + +#define mmTPC6_RTR_LBW_RANGE_BASE_4 0xF80560 + +#define mmTPC6_RTR_LBW_RANGE_BASE_5 0xF80564 + +#define mmTPC6_RTR_LBW_RANGE_BASE_6 0xF80568 + +#define mmTPC6_RTR_LBW_RANGE_BASE_7 0xF8056C + +#define mmTPC6_RTR_LBW_RANGE_BASE_8 0xF80570 + +#define mmTPC6_RTR_LBW_RANGE_BASE_9 0xF80574 + +#define mmTPC6_RTR_LBW_RANGE_BASE_10 0xF80578 + +#define mmTPC6_RTR_LBW_RANGE_BASE_11 0xF8057C + +#define mmTPC6_RTR_LBW_RANGE_BASE_12 0xF80580 + +#define mmTPC6_RTR_LBW_RANGE_BASE_13 0xF80584 + +#define mmTPC6_RTR_LBW_RANGE_BASE_14 0xF80588 + +#define mmTPC6_RTR_LBW_RANGE_BASE_15 0xF8058C + +#define mmTPC6_RTR_RGLTR 0xF80590 + +#define mmTPC6_RTR_RGLTR_WR_RESULT 0xF80594 + +#define mmTPC6_RTR_RGLTR_RD_RESULT 0xF80598 + +#define mmTPC6_RTR_SCRAMB_EN 0xF80600 + +#define mmTPC6_RTR_NON_LIN_SCRAMB 0xF80604 + +#endif /* ASIC_REG_TPC6_RTR_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h new file mode 100644 index 000000000..234147adb --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h @@ -0,0 +1,886 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC7_CFG_REGS_H_ +#define ASIC_REG_TPC7_CFG_REGS_H_ + +/* + ***************************************** + * TPC7_CFG (Prototype: TPC) + ***************************************** + */ + +#define mmTPC7_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xFC6400 + +#define mmTPC7_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xFC6404 + +#define mmTPC7_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xFC6408 + +#define mmTPC7_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xFC640C + +#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xFC6410 + +#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xFC6414 + +#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET 0xFC6418 + +#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xFC641C + +#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xFC6420 + +#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET 0xFC6424 + +#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xFC6428 + +#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xFC642C + +#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET 0xFC6430 + +#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xFC6434 + +#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xFC6438 + +#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET 0xFC643C + +#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xFC6440 + +#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xFC6444 + +#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET 0xFC6448 + +#define mmTPC7_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xFC644C + +#define mmTPC7_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xFC6450 + +#define mmTPC7_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xFC6454 + +#define mmTPC7_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xFC6458 + +#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xFC645C + +#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xFC6460 + +#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET 0xFC6464 + +#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xFC6468 + +#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xFC646C + +#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET 0xFC6470 + +#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xFC6474 + +#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xFC6478 + +#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET 0xFC647C + +#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xFC6480 + +#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xFC6484 + +#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET 0xFC6488 + +#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xFC648C + +#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xFC6490 + +#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET 0xFC6494 + +#define mmTPC7_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xFC6498 + +#define mmTPC7_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xFC649C + +#define mmTPC7_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xFC64A0 + +#define mmTPC7_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xFC64A4 + +#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xFC64A8 + +#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xFC64AC + +#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET 0xFC64B0 + +#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xFC64B4 + +#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xFC64B8 + +#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET 0xFC64BC + +#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xFC64C0 + +#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xFC64C4 + +#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET 0xFC64C8 + +#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xFC64CC + +#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xFC64D0 + +#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET 0xFC64D4 + +#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xFC64D8 + +#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xFC64DC + +#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET 0xFC64E0 + +#define mmTPC7_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xFC64E4 + +#define mmTPC7_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xFC64E8 + +#define mmTPC7_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xFC64EC + +#define mmTPC7_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xFC64F0 + +#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xFC64F4 + +#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xFC64F8 + +#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET 0xFC64FC + +#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xFC6500 + +#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xFC6504 + +#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET 0xFC6508 + +#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xFC650C + +#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xFC6510 + +#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET 0xFC6514 + +#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xFC6518 + +#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xFC651C + +#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET 0xFC6520 + +#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xFC6524 + +#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xFC6528 + +#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET 0xFC652C + +#define mmTPC7_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xFC6530 + +#define mmTPC7_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xFC6534 + +#define mmTPC7_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xFC6538 + +#define mmTPC7_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xFC653C + +#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xFC6540 + +#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xFC6544 + +#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET 0xFC6548 + +#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xFC654C + +#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xFC6550 + +#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET 0xFC6554 + +#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xFC6558 + +#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xFC655C + +#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET 0xFC6560 + +#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xFC6564 + +#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xFC6568 + +#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET 0xFC656C + +#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xFC6570 + +#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xFC6574 + +#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET 0xFC6578 + +#define mmTPC7_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xFC657C + +#define mmTPC7_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xFC6580 + +#define mmTPC7_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xFC6584 + +#define mmTPC7_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xFC6588 + +#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xFC658C + +#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xFC6590 + +#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET 0xFC6594 + +#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xFC6598 + +#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xFC659C + +#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET 0xFC65A0 + +#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xFC65A4 + +#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xFC65A8 + +#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET 0xFC65AC + +#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xFC65B0 + +#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xFC65B4 + +#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET 0xFC65B8 + +#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xFC65BC + +#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xFC65C0 + +#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET 0xFC65C4 + +#define mmTPC7_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xFC65C8 + +#define mmTPC7_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xFC65CC + +#define mmTPC7_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xFC65D0 + +#define mmTPC7_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xFC65D4 + +#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xFC65D8 + +#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xFC65DC + +#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET 0xFC65E0 + +#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xFC65E4 + +#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xFC65E8 + +#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET 0xFC65EC + +#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xFC65F0 + +#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xFC65F4 + +#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET 0xFC65F8 + +#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xFC65FC + +#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xFC6600 + +#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET 0xFC6604 + +#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xFC6608 + +#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xFC660C + +#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET 0xFC6610 + +#define mmTPC7_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xFC6614 + +#define mmTPC7_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xFC6618 + +#define mmTPC7_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xFC661C + +#define mmTPC7_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xFC6620 + +#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xFC6624 + +#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xFC6628 + +#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET 0xFC662C + +#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xFC6630 + +#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xFC6634 + +#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET 0xFC6638 + +#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xFC663C + +#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xFC6640 + +#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET 0xFC6644 + +#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xFC6648 + +#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xFC664C + +#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET 0xFC6650 + +#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xFC6654 + +#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xFC6658 + +#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET 0xFC665C + +#define mmTPC7_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xFC6660 + +#define mmTPC7_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xFC6664 + +#define mmTPC7_CFG_KERNEL_TID_BASE_DIM_0 0xFC6668 + +#define mmTPC7_CFG_KERNEL_TID_SIZE_DIM_0 0xFC666C + +#define mmTPC7_CFG_KERNEL_TID_BASE_DIM_1 0xFC6670 + +#define mmTPC7_CFG_KERNEL_TID_SIZE_DIM_1 0xFC6674 + +#define mmTPC7_CFG_KERNEL_TID_BASE_DIM_2 0xFC6678 + +#define mmTPC7_CFG_KERNEL_TID_SIZE_DIM_2 0xFC667C + +#define mmTPC7_CFG_KERNEL_TID_BASE_DIM_3 0xFC6680 + +#define mmTPC7_CFG_KERNEL_TID_SIZE_DIM_3 0xFC6684 + +#define mmTPC7_CFG_KERNEL_TID_BASE_DIM_4 0xFC6688 + +#define mmTPC7_CFG_KERNEL_TID_SIZE_DIM_4 0xFC668C + +#define mmTPC7_CFG_KERNEL_SRF_0 0xFC6690 + +#define mmTPC7_CFG_KERNEL_SRF_1 0xFC6694 + +#define mmTPC7_CFG_KERNEL_SRF_2 0xFC6698 + +#define mmTPC7_CFG_KERNEL_SRF_3 0xFC669C + +#define mmTPC7_CFG_KERNEL_SRF_4 0xFC66A0 + +#define mmTPC7_CFG_KERNEL_SRF_5 0xFC66A4 + +#define mmTPC7_CFG_KERNEL_SRF_6 0xFC66A8 + +#define mmTPC7_CFG_KERNEL_SRF_7 0xFC66AC + +#define mmTPC7_CFG_KERNEL_SRF_8 0xFC66B0 + +#define mmTPC7_CFG_KERNEL_SRF_9 0xFC66B4 + +#define mmTPC7_CFG_KERNEL_SRF_10 0xFC66B8 + +#define mmTPC7_CFG_KERNEL_SRF_11 0xFC66BC + +#define mmTPC7_CFG_KERNEL_SRF_12 0xFC66C0 + +#define mmTPC7_CFG_KERNEL_SRF_13 0xFC66C4 + +#define mmTPC7_CFG_KERNEL_SRF_14 0xFC66C8 + +#define mmTPC7_CFG_KERNEL_SRF_15 0xFC66CC + +#define mmTPC7_CFG_KERNEL_SRF_16 0xFC66D0 + +#define mmTPC7_CFG_KERNEL_SRF_17 0xFC66D4 + +#define mmTPC7_CFG_KERNEL_SRF_18 0xFC66D8 + +#define mmTPC7_CFG_KERNEL_SRF_19 0xFC66DC + +#define mmTPC7_CFG_KERNEL_SRF_20 0xFC66E0 + +#define mmTPC7_CFG_KERNEL_SRF_21 0xFC66E4 + +#define mmTPC7_CFG_KERNEL_SRF_22 0xFC66E8 + +#define mmTPC7_CFG_KERNEL_SRF_23 0xFC66EC + +#define mmTPC7_CFG_KERNEL_SRF_24 0xFC66F0 + +#define mmTPC7_CFG_KERNEL_SRF_25 0xFC66F4 + +#define mmTPC7_CFG_KERNEL_SRF_26 0xFC66F8 + +#define mmTPC7_CFG_KERNEL_SRF_27 0xFC66FC + +#define mmTPC7_CFG_KERNEL_SRF_28 0xFC6700 + +#define mmTPC7_CFG_KERNEL_SRF_29 0xFC6704 + +#define mmTPC7_CFG_KERNEL_SRF_30 0xFC6708 + +#define mmTPC7_CFG_KERNEL_SRF_31 0xFC670C + +#define mmTPC7_CFG_KERNEL_KERNEL_CONFIG 0xFC6710 + +#define mmTPC7_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xFC6714 + +#define mmTPC7_CFG_RESERVED_DESC_END 0xFC6738 + +#define mmTPC7_CFG_ROUND_CSR 0xFC67FC + +#define mmTPC7_CFG_TBUF_BASE_ADDR_LOW 0xFC6800 + +#define mmTPC7_CFG_TBUF_BASE_ADDR_HIGH 0xFC6804 + +#define mmTPC7_CFG_SEMAPHORE 0xFC6808 + +#define mmTPC7_CFG_VFLAGS 0xFC680C + +#define mmTPC7_CFG_SFLAGS 0xFC6810 + +#define mmTPC7_CFG_LFSR_POLYNOM 0xFC6818 + +#define mmTPC7_CFG_STATUS 0xFC681C + +#define mmTPC7_CFG_CFG_BASE_ADDRESS_HIGH 0xFC6820 + +#define mmTPC7_CFG_CFG_SUBTRACT_VALUE 0xFC6824 + +#define mmTPC7_CFG_SM_BASE_ADDRESS_LOW 0xFC6828 + +#define mmTPC7_CFG_SM_BASE_ADDRESS_HIGH 0xFC682C + +#define mmTPC7_CFG_TPC_CMD 0xFC6830 + +#define mmTPC7_CFG_TPC_EXECUTE 0xFC6838 + +#define mmTPC7_CFG_TPC_STALL 0xFC683C + +#define mmTPC7_CFG_ICACHE_BASE_ADDERESS_LOW 0xFC6840 + +#define mmTPC7_CFG_ICACHE_BASE_ADDERESS_HIGH 0xFC6844 + +#define mmTPC7_CFG_MSS_CONFIG 0xFC6854 + +#define mmTPC7_CFG_TPC_INTR_CAUSE 0xFC6858 + +#define mmTPC7_CFG_TPC_INTR_MASK 0xFC685C + +#define mmTPC7_CFG_TSB_CONFIG 0xFC6860 + +#define mmTPC7_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xFC6A00 + +#define mmTPC7_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xFC6A04 + +#define mmTPC7_CFG_QM_TENSOR_0_PADDING_VALUE 0xFC6A08 + +#define mmTPC7_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xFC6A0C + +#define mmTPC7_CFG_QM_TENSOR_0_DIM_0_SIZE 0xFC6A10 + +#define mmTPC7_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xFC6A14 + +#define mmTPC7_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET 0xFC6A18 + +#define mmTPC7_CFG_QM_TENSOR_0_DIM_1_SIZE 0xFC6A1C + +#define mmTPC7_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xFC6A20 + +#define mmTPC7_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET 0xFC6A24 + +#define mmTPC7_CFG_QM_TENSOR_0_DIM_2_SIZE 0xFC6A28 + +#define mmTPC7_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xFC6A2C + +#define mmTPC7_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET 0xFC6A30 + +#define mmTPC7_CFG_QM_TENSOR_0_DIM_3_SIZE 0xFC6A34 + +#define mmTPC7_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xFC6A38 + +#define mmTPC7_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET 0xFC6A3C + +#define mmTPC7_CFG_QM_TENSOR_0_DIM_4_SIZE 0xFC6A40 + +#define mmTPC7_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xFC6A44 + +#define mmTPC7_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET 0xFC6A48 + +#define mmTPC7_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xFC6A4C + +#define mmTPC7_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xFC6A50 + +#define mmTPC7_CFG_QM_TENSOR_1_PADDING_VALUE 0xFC6A54 + +#define mmTPC7_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xFC6A58 + +#define mmTPC7_CFG_QM_TENSOR_1_DIM_0_SIZE 0xFC6A5C + +#define mmTPC7_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xFC6A60 + +#define mmTPC7_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET 0xFC6A64 + +#define mmTPC7_CFG_QM_TENSOR_1_DIM_1_SIZE 0xFC6A68 + +#define mmTPC7_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xFC6A6C + +#define mmTPC7_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET 0xFC6A70 + +#define mmTPC7_CFG_QM_TENSOR_1_DIM_2_SIZE 0xFC6A74 + +#define mmTPC7_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xFC6A78 + +#define mmTPC7_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET 0xFC6A7C + +#define mmTPC7_CFG_QM_TENSOR_1_DIM_3_SIZE 0xFC6A80 + +#define mmTPC7_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xFC6A84 + +#define mmTPC7_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET 0xFC6A88 + +#define mmTPC7_CFG_QM_TENSOR_1_DIM_4_SIZE 0xFC6A8C + +#define mmTPC7_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xFC6A90 + +#define mmTPC7_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET 0xFC6A94 + +#define mmTPC7_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xFC6A98 + +#define mmTPC7_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xFC6A9C + +#define mmTPC7_CFG_QM_TENSOR_2_PADDING_VALUE 0xFC6AA0 + +#define mmTPC7_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xFC6AA4 + +#define mmTPC7_CFG_QM_TENSOR_2_DIM_0_SIZE 0xFC6AA8 + +#define mmTPC7_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xFC6AAC + +#define mmTPC7_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET 0xFC6AB0 + +#define mmTPC7_CFG_QM_TENSOR_2_DIM_1_SIZE 0xFC6AB4 + +#define mmTPC7_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xFC6AB8 + +#define mmTPC7_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET 0xFC6ABC + +#define mmTPC7_CFG_QM_TENSOR_2_DIM_2_SIZE 0xFC6AC0 + +#define mmTPC7_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xFC6AC4 + +#define mmTPC7_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET 0xFC6AC8 + +#define mmTPC7_CFG_QM_TENSOR_2_DIM_3_SIZE 0xFC6ACC + +#define mmTPC7_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xFC6AD0 + +#define mmTPC7_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET 0xFC6AD4 + +#define mmTPC7_CFG_QM_TENSOR_2_DIM_4_SIZE 0xFC6AD8 + +#define mmTPC7_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xFC6ADC + +#define mmTPC7_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET 0xFC6AE0 + +#define mmTPC7_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xFC6AE4 + +#define mmTPC7_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xFC6AE8 + +#define mmTPC7_CFG_QM_TENSOR_3_PADDING_VALUE 0xFC6AEC + +#define mmTPC7_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xFC6AF0 + +#define mmTPC7_CFG_QM_TENSOR_3_DIM_0_SIZE 0xFC6AF4 + +#define mmTPC7_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xFC6AF8 + +#define mmTPC7_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET 0xFC6AFC + +#define mmTPC7_CFG_QM_TENSOR_3_DIM_1_SIZE 0xFC6B00 + +#define mmTPC7_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xFC6B04 + +#define mmTPC7_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET 0xFC6B08 + +#define mmTPC7_CFG_QM_TENSOR_3_DIM_2_SIZE 0xFC6B0C + +#define mmTPC7_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xFC6B10 + +#define mmTPC7_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET 0xFC6B14 + +#define mmTPC7_CFG_QM_TENSOR_3_DIM_3_SIZE 0xFC6B18 + +#define mmTPC7_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xFC6B1C + +#define mmTPC7_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET 0xFC6B20 + +#define mmTPC7_CFG_QM_TENSOR_3_DIM_4_SIZE 0xFC6B24 + +#define mmTPC7_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xFC6B28 + +#define mmTPC7_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET 0xFC6B2C + +#define mmTPC7_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xFC6B30 + +#define mmTPC7_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xFC6B34 + +#define mmTPC7_CFG_QM_TENSOR_4_PADDING_VALUE 0xFC6B38 + +#define mmTPC7_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xFC6B3C + +#define mmTPC7_CFG_QM_TENSOR_4_DIM_0_SIZE 0xFC6B40 + +#define mmTPC7_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xFC6B44 + +#define mmTPC7_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET 0xFC6B48 + +#define mmTPC7_CFG_QM_TENSOR_4_DIM_1_SIZE 0xFC6B4C + +#define mmTPC7_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xFC6B50 + +#define mmTPC7_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET 0xFC6B54 + +#define mmTPC7_CFG_QM_TENSOR_4_DIM_2_SIZE 0xFC6B58 + +#define mmTPC7_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xFC6B5C + +#define mmTPC7_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET 0xFC6B60 + +#define mmTPC7_CFG_QM_TENSOR_4_DIM_3_SIZE 0xFC6B64 + +#define mmTPC7_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xFC6B68 + +#define mmTPC7_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET 0xFC6B6C + +#define mmTPC7_CFG_QM_TENSOR_4_DIM_4_SIZE 0xFC6B70 + +#define mmTPC7_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xFC6B74 + +#define mmTPC7_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET 0xFC6B78 + +#define mmTPC7_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xFC6B7C + +#define mmTPC7_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xFC6B80 + +#define mmTPC7_CFG_QM_TENSOR_5_PADDING_VALUE 0xFC6B84 + +#define mmTPC7_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xFC6B88 + +#define mmTPC7_CFG_QM_TENSOR_5_DIM_0_SIZE 0xFC6B8C + +#define mmTPC7_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xFC6B90 + +#define mmTPC7_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET 0xFC6B94 + +#define mmTPC7_CFG_QM_TENSOR_5_DIM_1_SIZE 0xFC6B98 + +#define mmTPC7_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xFC6B9C + +#define mmTPC7_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET 0xFC6BA0 + +#define mmTPC7_CFG_QM_TENSOR_5_DIM_2_SIZE 0xFC6BA4 + +#define mmTPC7_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xFC6BA8 + +#define mmTPC7_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET 0xFC6BAC + +#define mmTPC7_CFG_QM_TENSOR_5_DIM_3_SIZE 0xFC6BB0 + +#define mmTPC7_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xFC6BB4 + +#define mmTPC7_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET 0xFC6BB8 + +#define mmTPC7_CFG_QM_TENSOR_5_DIM_4_SIZE 0xFC6BBC + +#define mmTPC7_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xFC6BC0 + +#define mmTPC7_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET 0xFC6BC4 + +#define mmTPC7_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xFC6BC8 + +#define mmTPC7_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xFC6BCC + +#define mmTPC7_CFG_QM_TENSOR_6_PADDING_VALUE 0xFC6BD0 + +#define mmTPC7_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xFC6BD4 + +#define mmTPC7_CFG_QM_TENSOR_6_DIM_0_SIZE 0xFC6BD8 + +#define mmTPC7_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xFC6BDC + +#define mmTPC7_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET 0xFC6BE0 + +#define mmTPC7_CFG_QM_TENSOR_6_DIM_1_SIZE 0xFC6BE4 + +#define mmTPC7_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xFC6BE8 + +#define mmTPC7_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET 0xFC6BEC + +#define mmTPC7_CFG_QM_TENSOR_6_DIM_2_SIZE 0xFC6BF0 + +#define mmTPC7_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xFC6BF4 + +#define mmTPC7_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET 0xFC6BF8 + +#define mmTPC7_CFG_QM_TENSOR_6_DIM_3_SIZE 0xFC6BFC + +#define mmTPC7_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xFC6C00 + +#define mmTPC7_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET 0xFC6C04 + +#define mmTPC7_CFG_QM_TENSOR_6_DIM_4_SIZE 0xFC6C08 + +#define mmTPC7_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xFC6C0C + +#define mmTPC7_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET 0xFC6C10 + +#define mmTPC7_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xFC6C14 + +#define mmTPC7_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xFC6C18 + +#define mmTPC7_CFG_QM_TENSOR_7_PADDING_VALUE 0xFC6C1C + +#define mmTPC7_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xFC6C20 + +#define mmTPC7_CFG_QM_TENSOR_7_DIM_0_SIZE 0xFC6C24 + +#define mmTPC7_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xFC6C28 + +#define mmTPC7_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET 0xFC6C2C + +#define mmTPC7_CFG_QM_TENSOR_7_DIM_1_SIZE 0xFC6C30 + +#define mmTPC7_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xFC6C34 + +#define mmTPC7_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET 0xFC6C38 + +#define mmTPC7_CFG_QM_TENSOR_7_DIM_2_SIZE 0xFC6C3C + +#define mmTPC7_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xFC6C40 + +#define mmTPC7_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET 0xFC6C44 + +#define mmTPC7_CFG_QM_TENSOR_7_DIM_3_SIZE 0xFC6C48 + +#define mmTPC7_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xFC6C4C + +#define mmTPC7_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET 0xFC6C50 + +#define mmTPC7_CFG_QM_TENSOR_7_DIM_4_SIZE 0xFC6C54 + +#define mmTPC7_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xFC6C58 + +#define mmTPC7_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET 0xFC6C5C + +#define mmTPC7_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xFC6C60 + +#define mmTPC7_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xFC6C64 + +#define mmTPC7_CFG_QM_TID_BASE_DIM_0 0xFC6C68 + +#define mmTPC7_CFG_QM_TID_SIZE_DIM_0 0xFC6C6C + +#define mmTPC7_CFG_QM_TID_BASE_DIM_1 0xFC6C70 + +#define mmTPC7_CFG_QM_TID_SIZE_DIM_1 0xFC6C74 + +#define mmTPC7_CFG_QM_TID_BASE_DIM_2 0xFC6C78 + +#define mmTPC7_CFG_QM_TID_SIZE_DIM_2 0xFC6C7C + +#define mmTPC7_CFG_QM_TID_BASE_DIM_3 0xFC6C80 + +#define mmTPC7_CFG_QM_TID_SIZE_DIM_3 0xFC6C84 + +#define mmTPC7_CFG_QM_TID_BASE_DIM_4 0xFC6C88 + +#define mmTPC7_CFG_QM_TID_SIZE_DIM_4 0xFC6C8C + +#define mmTPC7_CFG_QM_SRF_0 0xFC6C90 + +#define mmTPC7_CFG_QM_SRF_1 0xFC6C94 + +#define mmTPC7_CFG_QM_SRF_2 0xFC6C98 + +#define mmTPC7_CFG_QM_SRF_3 0xFC6C9C + +#define mmTPC7_CFG_QM_SRF_4 0xFC6CA0 + +#define mmTPC7_CFG_QM_SRF_5 0xFC6CA4 + +#define mmTPC7_CFG_QM_SRF_6 0xFC6CA8 + +#define mmTPC7_CFG_QM_SRF_7 0xFC6CAC + +#define mmTPC7_CFG_QM_SRF_8 0xFC6CB0 + +#define mmTPC7_CFG_QM_SRF_9 0xFC6CB4 + +#define mmTPC7_CFG_QM_SRF_10 0xFC6CB8 + +#define mmTPC7_CFG_QM_SRF_11 0xFC6CBC + +#define mmTPC7_CFG_QM_SRF_12 0xFC6CC0 + +#define mmTPC7_CFG_QM_SRF_13 0xFC6CC4 + +#define mmTPC7_CFG_QM_SRF_14 0xFC6CC8 + +#define mmTPC7_CFG_QM_SRF_15 0xFC6CCC + +#define mmTPC7_CFG_QM_SRF_16 0xFC6CD0 + +#define mmTPC7_CFG_QM_SRF_17 0xFC6CD4 + +#define mmTPC7_CFG_QM_SRF_18 0xFC6CD8 + +#define mmTPC7_CFG_QM_SRF_19 0xFC6CDC + +#define mmTPC7_CFG_QM_SRF_20 0xFC6CE0 + +#define mmTPC7_CFG_QM_SRF_21 0xFC6CE4 + +#define mmTPC7_CFG_QM_SRF_22 0xFC6CE8 + +#define mmTPC7_CFG_QM_SRF_23 0xFC6CEC + +#define mmTPC7_CFG_QM_SRF_24 0xFC6CF0 + +#define mmTPC7_CFG_QM_SRF_25 0xFC6CF4 + +#define mmTPC7_CFG_QM_SRF_26 0xFC6CF8 + +#define mmTPC7_CFG_QM_SRF_27 0xFC6CFC + +#define mmTPC7_CFG_QM_SRF_28 0xFC6D00 + +#define mmTPC7_CFG_QM_SRF_29 0xFC6D04 + +#define mmTPC7_CFG_QM_SRF_30 0xFC6D08 + +#define mmTPC7_CFG_QM_SRF_31 0xFC6D0C + +#define mmTPC7_CFG_QM_KERNEL_CONFIG 0xFC6D10 + +#define mmTPC7_CFG_QM_SYNC_OBJECT_MESSAGE 0xFC6D14 + +#define mmTPC7_CFG_ARUSER 0xFC6D18 + +#define mmTPC7_CFG_AWUSER 0xFC6D1C + +#define mmTPC7_CFG_FUNC_MBIST_CNTRL 0xFC6E00 + +#define mmTPC7_CFG_FUNC_MBIST_PAT 0xFC6E04 + +#define mmTPC7_CFG_FUNC_MBIST_MEM_0 0xFC6E08 + +#define mmTPC7_CFG_FUNC_MBIST_MEM_1 0xFC6E0C + +#define mmTPC7_CFG_FUNC_MBIST_MEM_2 0xFC6E10 + +#define mmTPC7_CFG_FUNC_MBIST_MEM_3 0xFC6E14 + +#define mmTPC7_CFG_FUNC_MBIST_MEM_4 0xFC6E18 + +#define mmTPC7_CFG_FUNC_MBIST_MEM_5 0xFC6E1C + +#define mmTPC7_CFG_FUNC_MBIST_MEM_6 0xFC6E20 + +#define mmTPC7_CFG_FUNC_MBIST_MEM_7 0xFC6E24 + +#define mmTPC7_CFG_FUNC_MBIST_MEM_8 0xFC6E28 + +#define mmTPC7_CFG_FUNC_MBIST_MEM_9 0xFC6E2C + +#endif /* ASIC_REG_TPC7_CFG_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h new file mode 100644 index 000000000..4c160632f --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h @@ -0,0 +1,138 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC7_CMDQ_REGS_H_ +#define ASIC_REG_TPC7_CMDQ_REGS_H_ + +/* + ***************************************** + * TPC7_CMDQ (Prototype: CMDQ) + ***************************************** + */ + +#define mmTPC7_CMDQ_GLBL_CFG0 0xFC9000 + +#define mmTPC7_CMDQ_GLBL_CFG1 0xFC9004 + +#define mmTPC7_CMDQ_GLBL_PROT 0xFC9008 + +#define mmTPC7_CMDQ_GLBL_ERR_CFG 0xFC900C + +#define mmTPC7_CMDQ_GLBL_ERR_ADDR_LO 0xFC9010 + +#define mmTPC7_CMDQ_GLBL_ERR_ADDR_HI 0xFC9014 + +#define mmTPC7_CMDQ_GLBL_ERR_WDATA 0xFC9018 + +#define mmTPC7_CMDQ_GLBL_SECURE_PROPS 0xFC901C + +#define mmTPC7_CMDQ_GLBL_NON_SECURE_PROPS 0xFC9020 + +#define mmTPC7_CMDQ_GLBL_STS0 0xFC9024 + +#define mmTPC7_CMDQ_GLBL_STS1 0xFC9028 + +#define mmTPC7_CMDQ_CQ_CFG0 0xFC90B0 + +#define mmTPC7_CMDQ_CQ_CFG1 0xFC90B4 + +#define mmTPC7_CMDQ_CQ_ARUSER 0xFC90B8 + +#define mmTPC7_CMDQ_CQ_PTR_LO 0xFC90C0 + +#define mmTPC7_CMDQ_CQ_PTR_HI 0xFC90C4 + +#define mmTPC7_CMDQ_CQ_TSIZE 0xFC90C8 + +#define mmTPC7_CMDQ_CQ_CTL 0xFC90CC + +#define mmTPC7_CMDQ_CQ_PTR_LO_STS 0xFC90D4 + +#define mmTPC7_CMDQ_CQ_PTR_HI_STS 0xFC90D8 + +#define mmTPC7_CMDQ_CQ_TSIZE_STS 0xFC90DC + +#define mmTPC7_CMDQ_CQ_CTL_STS 0xFC90E0 + +#define mmTPC7_CMDQ_CQ_STS0 0xFC90E4 + +#define mmTPC7_CMDQ_CQ_STS1 0xFC90E8 + +#define mmTPC7_CMDQ_CQ_RD_RATE_LIM_EN 0xFC90F0 + +#define mmTPC7_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xFC90F4 + +#define mmTPC7_CMDQ_CQ_RD_RATE_LIM_SAT 0xFC90F8 + +#define mmTPC7_CMDQ_CQ_RD_RATE_LIM_TOUT 0xFC90FC + +#define mmTPC7_CMDQ_CQ_IFIFO_CNT 0xFC9108 + +#define mmTPC7_CMDQ_CP_MSG_BASE0_ADDR_LO 0xFC9120 + +#define mmTPC7_CMDQ_CP_MSG_BASE0_ADDR_HI 0xFC9124 + +#define mmTPC7_CMDQ_CP_MSG_BASE1_ADDR_LO 0xFC9128 + +#define mmTPC7_CMDQ_CP_MSG_BASE1_ADDR_HI 0xFC912C + +#define mmTPC7_CMDQ_CP_MSG_BASE2_ADDR_LO 0xFC9130 + +#define mmTPC7_CMDQ_CP_MSG_BASE2_ADDR_HI 0xFC9134 + +#define mmTPC7_CMDQ_CP_MSG_BASE3_ADDR_LO 0xFC9138 + +#define mmTPC7_CMDQ_CP_MSG_BASE3_ADDR_HI 0xFC913C + +#define mmTPC7_CMDQ_CP_LDMA_TSIZE_OFFSET 0xFC9140 + +#define mmTPC7_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xFC9144 + +#define mmTPC7_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xFC9148 + +#define mmTPC7_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xFC914C + +#define mmTPC7_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xFC9150 + +#define mmTPC7_CMDQ_CP_LDMA_COMMIT_OFFSET 0xFC9154 + +#define mmTPC7_CMDQ_CP_FENCE0_RDATA 0xFC9158 + +#define mmTPC7_CMDQ_CP_FENCE1_RDATA 0xFC915C + +#define mmTPC7_CMDQ_CP_FENCE2_RDATA 0xFC9160 + +#define mmTPC7_CMDQ_CP_FENCE3_RDATA 0xFC9164 + +#define mmTPC7_CMDQ_CP_FENCE0_CNT 0xFC9168 + +#define mmTPC7_CMDQ_CP_FENCE1_CNT 0xFC916C + +#define mmTPC7_CMDQ_CP_FENCE2_CNT 0xFC9170 + +#define mmTPC7_CMDQ_CP_FENCE3_CNT 0xFC9174 + +#define mmTPC7_CMDQ_CP_STS 0xFC9178 + +#define mmTPC7_CMDQ_CP_CURRENT_INST_LO 0xFC917C + +#define mmTPC7_CMDQ_CP_CURRENT_INST_HI 0xFC9180 + +#define mmTPC7_CMDQ_CP_BARRIER_CFG 0xFC9184 + +#define mmTPC7_CMDQ_CP_DBG_0 0xFC9188 + +#define mmTPC7_CMDQ_CQ_BUF_ADDR 0xFC9308 + +#define mmTPC7_CMDQ_CQ_BUF_RDATA 0xFC930C + +#endif /* ASIC_REG_TPC7_CMDQ_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h new file mode 100644 index 000000000..0c13d4d16 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h @@ -0,0 +1,226 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC7_NRTR_REGS_H_ +#define ASIC_REG_TPC7_NRTR_REGS_H_ + +/* + ***************************************** + * TPC7_NRTR (Prototype: IF_NRTR) + ***************************************** + */ + +#define mmTPC7_NRTR_HBW_MAX_CRED 0xFC0100 + +#define mmTPC7_NRTR_LBW_MAX_CRED 0xFC0120 + +#define mmTPC7_NRTR_DBG_E_ARB 0xFC0300 + +#define mmTPC7_NRTR_DBG_W_ARB 0xFC0304 + +#define mmTPC7_NRTR_DBG_N_ARB 0xFC0308 + +#define mmTPC7_NRTR_DBG_S_ARB 0xFC030C + +#define mmTPC7_NRTR_DBG_L_ARB 0xFC0310 + +#define mmTPC7_NRTR_DBG_E_ARB_MAX 0xFC0320 + +#define mmTPC7_NRTR_DBG_W_ARB_MAX 0xFC0324 + +#define mmTPC7_NRTR_DBG_N_ARB_MAX 0xFC0328 + +#define mmTPC7_NRTR_DBG_S_ARB_MAX 0xFC032C + +#define mmTPC7_NRTR_DBG_L_ARB_MAX 0xFC0330 + +#define mmTPC7_NRTR_SPLIT_COEF_0 0xFC0400 + +#define mmTPC7_NRTR_SPLIT_COEF_1 0xFC0404 + +#define mmTPC7_NRTR_SPLIT_COEF_2 0xFC0408 + +#define mmTPC7_NRTR_SPLIT_COEF_3 0xFC040C + +#define mmTPC7_NRTR_SPLIT_COEF_4 0xFC0410 + +#define mmTPC7_NRTR_SPLIT_COEF_5 0xFC0414 + +#define mmTPC7_NRTR_SPLIT_COEF_6 0xFC0418 + +#define mmTPC7_NRTR_SPLIT_COEF_7 0xFC041C + +#define mmTPC7_NRTR_SPLIT_COEF_8 0xFC0420 + +#define mmTPC7_NRTR_SPLIT_COEF_9 0xFC0424 + +#define mmTPC7_NRTR_SPLIT_CFG 0xFC0440 + +#define mmTPC7_NRTR_SPLIT_RD_SAT 0xFC0444 + +#define mmTPC7_NRTR_SPLIT_RD_RST_TOKEN 0xFC0448 + +#define mmTPC7_NRTR_SPLIT_RD_TIMEOUT_0 0xFC044C + +#define mmTPC7_NRTR_SPLIT_RD_TIMEOUT_1 0xFC0450 + +#define mmTPC7_NRTR_SPLIT_WR_SAT 0xFC0454 + +#define mmTPC7_NRTR_WPLIT_WR_TST_TOLEN 0xFC0458 + +#define mmTPC7_NRTR_SPLIT_WR_TIMEOUT_0 0xFC045C + +#define mmTPC7_NRTR_SPLIT_WR_TIMEOUT_1 0xFC0460 + +#define mmTPC7_NRTR_HBW_RANGE_HIT 0xFC0470 + +#define mmTPC7_NRTR_HBW_RANGE_MASK_L_0 0xFC0480 + +#define mmTPC7_NRTR_HBW_RANGE_MASK_L_1 0xFC0484 + +#define mmTPC7_NRTR_HBW_RANGE_MASK_L_2 0xFC0488 + +#define mmTPC7_NRTR_HBW_RANGE_MASK_L_3 0xFC048C + +#define mmTPC7_NRTR_HBW_RANGE_MASK_L_4 0xFC0490 + +#define mmTPC7_NRTR_HBW_RANGE_MASK_L_5 0xFC0494 + +#define mmTPC7_NRTR_HBW_RANGE_MASK_L_6 0xFC0498 + +#define mmTPC7_NRTR_HBW_RANGE_MASK_L_7 0xFC049C + +#define mmTPC7_NRTR_HBW_RANGE_MASK_H_0 0xFC04A0 + +#define mmTPC7_NRTR_HBW_RANGE_MASK_H_1 0xFC04A4 + +#define mmTPC7_NRTR_HBW_RANGE_MASK_H_2 0xFC04A8 + +#define mmTPC7_NRTR_HBW_RANGE_MASK_H_3 0xFC04AC + +#define mmTPC7_NRTR_HBW_RANGE_MASK_H_4 0xFC04B0 + +#define mmTPC7_NRTR_HBW_RANGE_MASK_H_5 0xFC04B4 + +#define mmTPC7_NRTR_HBW_RANGE_MASK_H_6 0xFC04B8 + +#define mmTPC7_NRTR_HBW_RANGE_MASK_H_7 0xFC04BC + +#define mmTPC7_NRTR_HBW_RANGE_BASE_L_0 0xFC04C0 + +#define mmTPC7_NRTR_HBW_RANGE_BASE_L_1 0xFC04C4 + +#define mmTPC7_NRTR_HBW_RANGE_BASE_L_2 0xFC04C8 + +#define mmTPC7_NRTR_HBW_RANGE_BASE_L_3 0xFC04CC + +#define mmTPC7_NRTR_HBW_RANGE_BASE_L_4 0xFC04D0 + +#define mmTPC7_NRTR_HBW_RANGE_BASE_L_5 0xFC04D4 + +#define mmTPC7_NRTR_HBW_RANGE_BASE_L_6 0xFC04D8 + +#define mmTPC7_NRTR_HBW_RANGE_BASE_L_7 0xFC04DC + +#define mmTPC7_NRTR_HBW_RANGE_BASE_H_0 0xFC04E0 + +#define mmTPC7_NRTR_HBW_RANGE_BASE_H_1 0xFC04E4 + +#define mmTPC7_NRTR_HBW_RANGE_BASE_H_2 0xFC04E8 + +#define mmTPC7_NRTR_HBW_RANGE_BASE_H_3 0xFC04EC + +#define mmTPC7_NRTR_HBW_RANGE_BASE_H_4 0xFC04F0 + +#define mmTPC7_NRTR_HBW_RANGE_BASE_H_5 0xFC04F4 + +#define mmTPC7_NRTR_HBW_RANGE_BASE_H_6 0xFC04F8 + +#define mmTPC7_NRTR_HBW_RANGE_BASE_H_7 0xFC04FC + +#define mmTPC7_NRTR_LBW_RANGE_HIT 0xFC0500 + +#define mmTPC7_NRTR_LBW_RANGE_MASK_0 0xFC0510 + +#define mmTPC7_NRTR_LBW_RANGE_MASK_1 0xFC0514 + +#define mmTPC7_NRTR_LBW_RANGE_MASK_2 0xFC0518 + +#define mmTPC7_NRTR_LBW_RANGE_MASK_3 0xFC051C + +#define mmTPC7_NRTR_LBW_RANGE_MASK_4 0xFC0520 + +#define mmTPC7_NRTR_LBW_RANGE_MASK_5 0xFC0524 + +#define mmTPC7_NRTR_LBW_RANGE_MASK_6 0xFC0528 + +#define mmTPC7_NRTR_LBW_RANGE_MASK_7 0xFC052C + +#define mmTPC7_NRTR_LBW_RANGE_MASK_8 0xFC0530 + +#define mmTPC7_NRTR_LBW_RANGE_MASK_9 0xFC0534 + +#define mmTPC7_NRTR_LBW_RANGE_MASK_10 0xFC0538 + +#define mmTPC7_NRTR_LBW_RANGE_MASK_11 0xFC053C + +#define mmTPC7_NRTR_LBW_RANGE_MASK_12 0xFC0540 + +#define mmTPC7_NRTR_LBW_RANGE_MASK_13 0xFC0544 + +#define mmTPC7_NRTR_LBW_RANGE_MASK_14 0xFC0548 + +#define mmTPC7_NRTR_LBW_RANGE_MASK_15 0xFC054C + +#define mmTPC7_NRTR_LBW_RANGE_BASE_0 0xFC0550 + +#define mmTPC7_NRTR_LBW_RANGE_BASE_1 0xFC0554 + +#define mmTPC7_NRTR_LBW_RANGE_BASE_2 0xFC0558 + +#define mmTPC7_NRTR_LBW_RANGE_BASE_3 0xFC055C + +#define mmTPC7_NRTR_LBW_RANGE_BASE_4 0xFC0560 + +#define mmTPC7_NRTR_LBW_RANGE_BASE_5 0xFC0564 + +#define mmTPC7_NRTR_LBW_RANGE_BASE_6 0xFC0568 + +#define mmTPC7_NRTR_LBW_RANGE_BASE_7 0xFC056C + +#define mmTPC7_NRTR_LBW_RANGE_BASE_8 0xFC0570 + +#define mmTPC7_NRTR_LBW_RANGE_BASE_9 0xFC0574 + +#define mmTPC7_NRTR_LBW_RANGE_BASE_10 0xFC0578 + +#define mmTPC7_NRTR_LBW_RANGE_BASE_11 0xFC057C + +#define mmTPC7_NRTR_LBW_RANGE_BASE_12 0xFC0580 + +#define mmTPC7_NRTR_LBW_RANGE_BASE_13 0xFC0584 + +#define mmTPC7_NRTR_LBW_RANGE_BASE_14 0xFC0588 + +#define mmTPC7_NRTR_LBW_RANGE_BASE_15 0xFC058C + +#define mmTPC7_NRTR_RGLTR 0xFC0590 + +#define mmTPC7_NRTR_RGLTR_WR_RESULT 0xFC0594 + +#define mmTPC7_NRTR_RGLTR_RD_RESULT 0xFC0598 + +#define mmTPC7_NRTR_SCRAMB_EN 0xFC0600 + +#define mmTPC7_NRTR_NON_LIN_SCRAMB 0xFC0604 + +#endif /* ASIC_REG_TPC7_NRTR_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h new file mode 100644 index 000000000..cbe11425b --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h @@ -0,0 +1,178 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC7_QM_REGS_H_ +#define ASIC_REG_TPC7_QM_REGS_H_ + +/* + ***************************************** + * TPC7_QM (Prototype: QMAN) + ***************************************** + */ + +#define mmTPC7_QM_GLBL_CFG0 0xFC8000 + +#define mmTPC7_QM_GLBL_CFG1 0xFC8004 + +#define mmTPC7_QM_GLBL_PROT 0xFC8008 + +#define mmTPC7_QM_GLBL_ERR_CFG 0xFC800C + +#define mmTPC7_QM_GLBL_ERR_ADDR_LO 0xFC8010 + +#define mmTPC7_QM_GLBL_ERR_ADDR_HI 0xFC8014 + +#define mmTPC7_QM_GLBL_ERR_WDATA 0xFC8018 + +#define mmTPC7_QM_GLBL_SECURE_PROPS 0xFC801C + +#define mmTPC7_QM_GLBL_NON_SECURE_PROPS 0xFC8020 + +#define mmTPC7_QM_GLBL_STS0 0xFC8024 + +#define mmTPC7_QM_GLBL_STS1 0xFC8028 + +#define mmTPC7_QM_PQ_BASE_LO 0xFC8060 + +#define mmTPC7_QM_PQ_BASE_HI 0xFC8064 + +#define mmTPC7_QM_PQ_SIZE 0xFC8068 + +#define mmTPC7_QM_PQ_PI 0xFC806C + +#define mmTPC7_QM_PQ_CI 0xFC8070 + +#define mmTPC7_QM_PQ_CFG0 0xFC8074 + +#define mmTPC7_QM_PQ_CFG1 0xFC8078 + +#define mmTPC7_QM_PQ_ARUSER 0xFC807C + +#define mmTPC7_QM_PQ_PUSH0 0xFC8080 + +#define mmTPC7_QM_PQ_PUSH1 0xFC8084 + +#define mmTPC7_QM_PQ_PUSH2 0xFC8088 + +#define mmTPC7_QM_PQ_PUSH3 0xFC808C + +#define mmTPC7_QM_PQ_STS0 0xFC8090 + +#define mmTPC7_QM_PQ_STS1 0xFC8094 + +#define mmTPC7_QM_PQ_RD_RATE_LIM_EN 0xFC80A0 + +#define mmTPC7_QM_PQ_RD_RATE_LIM_RST_TOKEN 0xFC80A4 + +#define mmTPC7_QM_PQ_RD_RATE_LIM_SAT 0xFC80A8 + +#define mmTPC7_QM_PQ_RD_RATE_LIM_TOUT 0xFC80AC + +#define mmTPC7_QM_CQ_CFG0 0xFC80B0 + +#define mmTPC7_QM_CQ_CFG1 0xFC80B4 + +#define mmTPC7_QM_CQ_ARUSER 0xFC80B8 + +#define mmTPC7_QM_CQ_PTR_LO 0xFC80C0 + +#define mmTPC7_QM_CQ_PTR_HI 0xFC80C4 + +#define mmTPC7_QM_CQ_TSIZE 0xFC80C8 + +#define mmTPC7_QM_CQ_CTL 0xFC80CC + +#define mmTPC7_QM_CQ_PTR_LO_STS 0xFC80D4 + +#define mmTPC7_QM_CQ_PTR_HI_STS 0xFC80D8 + +#define mmTPC7_QM_CQ_TSIZE_STS 0xFC80DC + +#define mmTPC7_QM_CQ_CTL_STS 0xFC80E0 + +#define mmTPC7_QM_CQ_STS0 0xFC80E4 + +#define mmTPC7_QM_CQ_STS1 0xFC80E8 + +#define mmTPC7_QM_CQ_RD_RATE_LIM_EN 0xFC80F0 + +#define mmTPC7_QM_CQ_RD_RATE_LIM_RST_TOKEN 0xFC80F4 + +#define mmTPC7_QM_CQ_RD_RATE_LIM_SAT 0xFC80F8 + +#define mmTPC7_QM_CQ_RD_RATE_LIM_TOUT 0xFC80FC + +#define mmTPC7_QM_CQ_IFIFO_CNT 0xFC8108 + +#define mmTPC7_QM_CP_MSG_BASE0_ADDR_LO 0xFC8120 + +#define mmTPC7_QM_CP_MSG_BASE0_ADDR_HI 0xFC8124 + +#define mmTPC7_QM_CP_MSG_BASE1_ADDR_LO 0xFC8128 + +#define mmTPC7_QM_CP_MSG_BASE1_ADDR_HI 0xFC812C + +#define mmTPC7_QM_CP_MSG_BASE2_ADDR_LO 0xFC8130 + +#define mmTPC7_QM_CP_MSG_BASE2_ADDR_HI 0xFC8134 + +#define mmTPC7_QM_CP_MSG_BASE3_ADDR_LO 0xFC8138 + +#define mmTPC7_QM_CP_MSG_BASE3_ADDR_HI 0xFC813C + +#define mmTPC7_QM_CP_LDMA_TSIZE_OFFSET 0xFC8140 + +#define mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0xFC8144 + +#define mmTPC7_QM_CP_LDMA_SRC_BASE_HI_OFFSET 0xFC8148 + +#define mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET 0xFC814C + +#define mmTPC7_QM_CP_LDMA_DST_BASE_HI_OFFSET 0xFC8150 + +#define mmTPC7_QM_CP_LDMA_COMMIT_OFFSET 0xFC8154 + +#define mmTPC7_QM_CP_FENCE0_RDATA 0xFC8158 + +#define mmTPC7_QM_CP_FENCE1_RDATA 0xFC815C + +#define mmTPC7_QM_CP_FENCE2_RDATA 0xFC8160 + +#define mmTPC7_QM_CP_FENCE3_RDATA 0xFC8164 + +#define mmTPC7_QM_CP_FENCE0_CNT 0xFC8168 + +#define mmTPC7_QM_CP_FENCE1_CNT 0xFC816C + +#define mmTPC7_QM_CP_FENCE2_CNT 0xFC8170 + +#define mmTPC7_QM_CP_FENCE3_CNT 0xFC8174 + +#define mmTPC7_QM_CP_STS 0xFC8178 + +#define mmTPC7_QM_CP_CURRENT_INST_LO 0xFC817C + +#define mmTPC7_QM_CP_CURRENT_INST_HI 0xFC8180 + +#define mmTPC7_QM_CP_BARRIER_CFG 0xFC8184 + +#define mmTPC7_QM_CP_DBG_0 0xFC8188 + +#define mmTPC7_QM_PQ_BUF_ADDR 0xFC8300 + +#define mmTPC7_QM_PQ_BUF_RDATA 0xFC8304 + +#define mmTPC7_QM_CQ_BUF_ADDR 0xFC8308 + +#define mmTPC7_QM_CQ_BUF_RDATA 0xFC830C + +#endif /* ASIC_REG_TPC7_QM_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc_pll_regs.h new file mode 100644 index 000000000..e25e19660 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc_pll_regs.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC_PLL_REGS_H_ +#define ASIC_REG_TPC_PLL_REGS_H_ + +/* + ***************************************** + * TPC_PLL (Prototype: PLL) + ***************************************** + */ + +#define mmTPC_PLL_NR 0xE01100 + +#define mmTPC_PLL_NF 0xE01104 + +#define mmTPC_PLL_OD 0xE01108 + +#define mmTPC_PLL_NB 0xE0110C + +#define mmTPC_PLL_CFG 0xE01110 + +#define mmTPC_PLL_LOSE_MASK 0xE01120 + +#define mmTPC_PLL_LOCK_INTR 0xE01128 + +#define mmTPC_PLL_LOCK_BYPASS 0xE0112C + +#define mmTPC_PLL_DATA_CHNG 0xE01130 + +#define mmTPC_PLL_RST 0xE01134 + +#define mmTPC_PLL_SLIP_WD_CNTR 0xE01150 + +#define mmTPC_PLL_DIV_FACTOR_0 0xE01200 + +#define mmTPC_PLL_DIV_FACTOR_1 0xE01204 + +#define mmTPC_PLL_DIV_FACTOR_2 0xE01208 + +#define mmTPC_PLL_DIV_FACTOR_3 0xE0120C + +#define mmTPC_PLL_DIV_FACTOR_CMD_0 0xE01220 + +#define mmTPC_PLL_DIV_FACTOR_CMD_1 0xE01224 + +#define mmTPC_PLL_DIV_FACTOR_CMD_2 0xE01228 + +#define mmTPC_PLL_DIV_FACTOR_CMD_3 0xE0122C + +#define mmTPC_PLL_DIV_SEL_0 0xE01280 + +#define mmTPC_PLL_DIV_SEL_1 0xE01284 + +#define mmTPC_PLL_DIV_SEL_2 0xE01288 + +#define mmTPC_PLL_DIV_SEL_3 0xE0128C + +#define mmTPC_PLL_DIV_EN_0 0xE012A0 + +#define mmTPC_PLL_DIV_EN_1 0xE012A4 + +#define mmTPC_PLL_DIV_EN_2 0xE012A8 + +#define mmTPC_PLL_DIV_EN_3 0xE012AC + +#define mmTPC_PLL_DIV_FACTOR_BUSY_0 0xE012C0 + +#define mmTPC_PLL_DIV_FACTOR_BUSY_1 0xE012C4 + +#define mmTPC_PLL_DIV_FACTOR_BUSY_2 0xE012C8 + +#define mmTPC_PLL_DIV_FACTOR_BUSY_3 0xE012CC + +#define mmTPC_PLL_CLK_GATER 0xE01300 + +#define mmTPC_PLL_CLK_RLX_0 0xE01310 + +#define mmTPC_PLL_CLK_RLX_1 0xE01314 + +#define mmTPC_PLL_CLK_RLX_2 0xE01318 + +#define mmTPC_PLL_CLK_RLX_3 0xE0131C + +#define mmTPC_PLL_REF_CNTR_PERIOD 0xE01400 + +#define mmTPC_PLL_REF_LOW_THRESHOLD 0xE01410 + +#define mmTPC_PLL_REF_HIGH_THRESHOLD 0xE01420 + +#define mmTPC_PLL_PLL_NOT_STABLE 0xE01430 + +#define mmTPC_PLL_FREQ_CALC_EN 0xE01440 + +#endif /* ASIC_REG_TPC_PLL_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/goya.h b/drivers/misc/habanalabs/include/goya/goya.h new file mode 100644 index 000000000..43d241891 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/goya.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef GOYA_H +#define GOYA_H + +#define SRAM_CFG_BAR_ID 0 +#define MSIX_BAR_ID 2 +#define DDR_BAR_ID 4 + +#define CFG_BAR_SIZE 0x10000000ull /* 256MB */ +#define MSIX_BAR_SIZE 0x1000ull /* 4KB */ + +#define CFG_BASE 0x7FFC000000ull +#define CFG_SIZE 0x4000000 /* 32MB CFG + 32MB DBG*/ + +#define SRAM_BASE_ADDR 0x7FF0000000ull +#define SRAM_SIZE 0x32A0000 /* 50.625MB */ + +#define DRAM_PHYS_BASE 0x0ull + +#define HOST_PHYS_BASE 0x8000000000ull /* 0.5TB */ +#define HOST_PHYS_SIZE 0x1000000000000ull /* 0.25PB (48 bits) */ + +#define GOYA_MSIX_ENTRIES 8 + +#define QMAN_PQ_ENTRY_SIZE 16 /* Bytes */ + +#define MAX_ASID 1024 + +#define PROT_BITS_OFFS 0xF80 + +#define DMA_MAX_NUM 5 + +#define TPC_MAX_NUM 8 + +#define MME_MAX_NUM 1 + +#endif /* GOYA_H */ diff --git a/drivers/misc/habanalabs/include/goya/goya_async_events.h b/drivers/misc/habanalabs/include/goya/goya_async_events.h new file mode 100644 index 000000000..5fb92362f --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/goya_async_events.h @@ -0,0 +1,199 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef __GOYA_ASYNC_EVENTS_H_ +#define __GOYA_ASYNC_EVENTS_H_ + +enum goya_async_event_id { + GOYA_ASYNC_EVENT_ID_PCIE_CORE = 32, + GOYA_ASYNC_EVENT_ID_PCIE_IF = 33, + GOYA_ASYNC_EVENT_ID_PCIE_PHY = 34, + GOYA_ASYNC_EVENT_ID_TPC0_ECC = 36, + GOYA_ASYNC_EVENT_ID_TPC1_ECC = 39, + GOYA_ASYNC_EVENT_ID_TPC2_ECC = 42, + GOYA_ASYNC_EVENT_ID_TPC3_ECC = 45, + GOYA_ASYNC_EVENT_ID_TPC4_ECC = 48, + GOYA_ASYNC_EVENT_ID_TPC5_ECC = 51, + GOYA_ASYNC_EVENT_ID_TPC6_ECC = 54, + GOYA_ASYNC_EVENT_ID_TPC7_ECC = 57, + GOYA_ASYNC_EVENT_ID_MME_ECC = 60, + GOYA_ASYNC_EVENT_ID_MME_ECC_EXT = 61, + GOYA_ASYNC_EVENT_ID_MMU_ECC = 63, + GOYA_ASYNC_EVENT_ID_DMA_MACRO = 64, + GOYA_ASYNC_EVENT_ID_DMA_ECC = 66, + GOYA_ASYNC_EVENT_ID_DDR0_PARITY = 69, + GOYA_ASYNC_EVENT_ID_DDR1_PARITY = 72, + GOYA_ASYNC_EVENT_ID_CPU_IF_ECC = 75, + GOYA_ASYNC_EVENT_ID_PSOC_MEM = 78, + GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT = 79, + GOYA_ASYNC_EVENT_ID_SRAM0 = 81, + GOYA_ASYNC_EVENT_ID_SRAM1 = 82, + GOYA_ASYNC_EVENT_ID_SRAM2 = 83, + GOYA_ASYNC_EVENT_ID_SRAM3 = 84, + GOYA_ASYNC_EVENT_ID_SRAM4 = 85, + GOYA_ASYNC_EVENT_ID_SRAM5 = 86, + GOYA_ASYNC_EVENT_ID_SRAM6 = 87, + GOYA_ASYNC_EVENT_ID_SRAM7 = 88, + GOYA_ASYNC_EVENT_ID_SRAM8 = 89, + GOYA_ASYNC_EVENT_ID_SRAM9 = 90, + GOYA_ASYNC_EVENT_ID_SRAM10 = 91, + GOYA_ASYNC_EVENT_ID_SRAM11 = 92, + GOYA_ASYNC_EVENT_ID_SRAM12 = 93, + GOYA_ASYNC_EVENT_ID_SRAM13 = 94, + GOYA_ASYNC_EVENT_ID_SRAM14 = 95, + GOYA_ASYNC_EVENT_ID_SRAM15 = 96, + GOYA_ASYNC_EVENT_ID_SRAM16 = 97, + GOYA_ASYNC_EVENT_ID_SRAM17 = 98, + GOYA_ASYNC_EVENT_ID_SRAM18 = 99, + GOYA_ASYNC_EVENT_ID_SRAM19 = 100, + GOYA_ASYNC_EVENT_ID_SRAM20 = 101, + GOYA_ASYNC_EVENT_ID_SRAM21 = 102, + GOYA_ASYNC_EVENT_ID_SRAM22 = 103, + GOYA_ASYNC_EVENT_ID_SRAM23 = 104, + GOYA_ASYNC_EVENT_ID_SRAM24 = 105, + GOYA_ASYNC_EVENT_ID_SRAM25 = 106, + GOYA_ASYNC_EVENT_ID_SRAM26 = 107, + GOYA_ASYNC_EVENT_ID_SRAM27 = 108, + GOYA_ASYNC_EVENT_ID_SRAM28 = 109, + GOYA_ASYNC_EVENT_ID_SRAM29 = 110, + GOYA_ASYNC_EVENT_ID_GIC500 = 112, + GOYA_ASYNC_EVENT_ID_PCIE_DEC = 115, + GOYA_ASYNC_EVENT_ID_TPC0_DEC = 117, + GOYA_ASYNC_EVENT_ID_TPC1_DEC = 120, + GOYA_ASYNC_EVENT_ID_TPC2_DEC = 123, + GOYA_ASYNC_EVENT_ID_TPC3_DEC = 126, + GOYA_ASYNC_EVENT_ID_TPC4_DEC = 129, + GOYA_ASYNC_EVENT_ID_TPC5_DEC = 132, + GOYA_ASYNC_EVENT_ID_TPC6_DEC = 135, + GOYA_ASYNC_EVENT_ID_TPC7_DEC = 138, + GOYA_ASYNC_EVENT_ID_AXI_ECC = 139, + GOYA_ASYNC_EVENT_ID_L2_RAM_ECC = 140, + GOYA_ASYNC_EVENT_ID_MME_WACS = 141, + GOYA_ASYNC_EVENT_ID_MME_WACSD = 142, + GOYA_ASYNC_EVENT_ID_PLL0 = 143, + GOYA_ASYNC_EVENT_ID_PLL1 = 144, + GOYA_ASYNC_EVENT_ID_PLL2 = 145, + GOYA_ASYNC_EVENT_ID_PLL3 = 146, + GOYA_ASYNC_EVENT_ID_PLL4 = 147, + GOYA_ASYNC_EVENT_ID_PLL5 = 148, + GOYA_ASYNC_EVENT_ID_PLL6 = 149, + GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER = 155, + GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC = 159, + GOYA_ASYNC_EVENT_ID_PSOC = 160, + GOYA_ASYNC_EVENT_ID_PCIE_FLR = 171, + GOYA_ASYNC_EVENT_ID_PCIE_HOT_RESET = 172, + GOYA_ASYNC_EVENT_ID_PCIE_PERST = 173, + GOYA_ASYNC_EVENT_ID_PCIE_QID0_ENG0 = 174, + GOYA_ASYNC_EVENT_ID_PCIE_QID0_ENG1 = 175, + GOYA_ASYNC_EVENT_ID_PCIE_QID0_ENG2 = 176, + GOYA_ASYNC_EVENT_ID_PCIE_QID0_ENG3 = 177, + GOYA_ASYNC_EVENT_ID_PCIE_QID1_ENG0 = 178, + GOYA_ASYNC_EVENT_ID_PCIE_QID1_ENG1 = 179, + GOYA_ASYNC_EVENT_ID_PCIE_QID1_ENG2 = 180, + GOYA_ASYNC_EVENT_ID_PCIE_QID1_ENG3 = 181, + GOYA_ASYNC_EVENT_ID_PCIE_APB = 182, + GOYA_ASYNC_EVENT_ID_PCIE_QDB = 183, + GOYA_ASYNC_EVENT_ID_PCIE_BM_D_P_WR = 184, + GOYA_ASYNC_EVENT_ID_PCIE_BM_D_RD = 185, + GOYA_ASYNC_EVENT_ID_PCIE_BM_U_P_WR = 186, + GOYA_ASYNC_EVENT_ID_PCIE_BM_U_RD = 187, + GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU = 190, + GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR = 191, + GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU = 200, + GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR = 201, + GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU = 210, + GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR = 211, + GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU = 220, + GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR = 221, + GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU = 230, + GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR = 231, + GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU = 240, + GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR = 241, + GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU = 250, + GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR = 251, + GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU = 260, + GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR = 261, + GOYA_ASYNC_EVENT_ID_MMU_SBA_SPMU0 = 270, + GOYA_ASYNC_EVENT_ID_MMU_SBA_SPMU1 = 271, + GOYA_ASYNC_EVENT_ID_MME_WACS_UP = 272, + GOYA_ASYNC_EVENT_ID_MME_WACS_DOWN = 273, + GOYA_ASYNC_EVENT_ID_MMU_PAGE_FAULT = 280, + GOYA_ASYNC_EVENT_ID_MMU_WR_PERM = 281, + GOYA_ASYNC_EVENT_ID_MMU_DBG_BM = 282, + GOYA_ASYNC_EVENT_ID_DMA_BM_CH0 = 290, + GOYA_ASYNC_EVENT_ID_DMA_BM_CH1 = 291, + GOYA_ASYNC_EVENT_ID_DMA_BM_CH2 = 292, + GOYA_ASYNC_EVENT_ID_DMA_BM_CH3 = 293, + GOYA_ASYNC_EVENT_ID_DMA_BM_CH4 = 294, + GOYA_ASYNC_EVENT_ID_DDR0_PHY_DFI = 300, + GOYA_ASYNC_EVENT_ID_DDR0_ECC_SCRUB = 301, + GOYA_ASYNC_EVENT_ID_DDR0_DB_ECC = 302, + GOYA_ASYNC_EVENT_ID_DDR0_SB_ECC = 303, + GOYA_ASYNC_EVENT_ID_DDR0_SB_ECC_MC = 304, + GOYA_ASYNC_EVENT_ID_DDR0_AXI_RD = 305, + GOYA_ASYNC_EVENT_ID_DDR0_AXI_WR = 306, + GOYA_ASYNC_EVENT_ID_DDR1_PHY_DFI = 310, + GOYA_ASYNC_EVENT_ID_DDR1_ECC_SCRUB = 311, + GOYA_ASYNC_EVENT_ID_DDR1_DB_ECC = 312, + GOYA_ASYNC_EVENT_ID_DDR1_SB_ECC = 313, + GOYA_ASYNC_EVENT_ID_DDR1_SB_ECC_MC = 314, + GOYA_ASYNC_EVENT_ID_DDR1_AXI_RD = 315, + GOYA_ASYNC_EVENT_ID_DDR1_AXI_WR = 316, + GOYA_ASYNC_EVENT_ID_CPU_BMON = 320, + GOYA_ASYNC_EVENT_ID_TS_EAST = 322, + GOYA_ASYNC_EVENT_ID_TS_WEST = 323, + GOYA_ASYNC_EVENT_ID_TS_NORTH = 324, + GOYA_ASYNC_EVENT_ID_PSOC_GPIO_U16_0 = 330, + GOYA_ASYNC_EVENT_ID_PSOC_GPIO_U16_1 = 331, + GOYA_ASYNC_EVENT_ID_PSOC_GPIO_U16_2 = 332, + GOYA_ASYNC_EVENT_ID_PSOC_GPIO_U16_3 = 333, + GOYA_ASYNC_EVENT_ID_PSOC_GPIO_U16_4 = 334, + GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET = 356, + GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT = 361, + GOYA_ASYNC_EVENT_ID_FAN = 425, + GOYA_ASYNC_EVENT_ID_TPC0_CMDQ = 430, + GOYA_ASYNC_EVENT_ID_TPC1_CMDQ = 431, + GOYA_ASYNC_EVENT_ID_TPC2_CMDQ = 432, + GOYA_ASYNC_EVENT_ID_TPC3_CMDQ = 433, + GOYA_ASYNC_EVENT_ID_TPC4_CMDQ = 434, + GOYA_ASYNC_EVENT_ID_TPC5_CMDQ = 435, + GOYA_ASYNC_EVENT_ID_TPC6_CMDQ = 436, + GOYA_ASYNC_EVENT_ID_TPC7_CMDQ = 437, + GOYA_ASYNC_EVENT_ID_TPC0_QM = 438, + GOYA_ASYNC_EVENT_ID_TPC1_QM = 439, + GOYA_ASYNC_EVENT_ID_TPC2_QM = 440, + GOYA_ASYNC_EVENT_ID_TPC3_QM = 441, + GOYA_ASYNC_EVENT_ID_TPC4_QM = 442, + GOYA_ASYNC_EVENT_ID_TPC5_QM = 443, + GOYA_ASYNC_EVENT_ID_TPC6_QM = 444, + GOYA_ASYNC_EVENT_ID_TPC7_QM = 445, + GOYA_ASYNC_EVENT_ID_MME_QM = 447, + GOYA_ASYNC_EVENT_ID_MME_CMDQ = 448, + GOYA_ASYNC_EVENT_ID_DMA0_QM = 449, + GOYA_ASYNC_EVENT_ID_DMA1_QM = 450, + GOYA_ASYNC_EVENT_ID_DMA2_QM = 451, + GOYA_ASYNC_EVENT_ID_DMA3_QM = 452, + GOYA_ASYNC_EVENT_ID_DMA4_QM = 453, + GOYA_ASYNC_EVENT_ID_DMA_ON_HBW = 454, + GOYA_ASYNC_EVENT_ID_DMA0_CH = 455, + GOYA_ASYNC_EVENT_ID_DMA1_CH = 456, + GOYA_ASYNC_EVENT_ID_DMA2_CH = 457, + GOYA_ASYNC_EVENT_ID_DMA3_CH = 458, + GOYA_ASYNC_EVENT_ID_DMA4_CH = 459, + GOYA_ASYNC_EVENT_ID_PI_UPDATE = 484, + GOYA_ASYNC_EVENT_ID_HALT_MACHINE = 485, + GOYA_ASYNC_EVENT_ID_INTS_REGISTER = 486, + GOYA_ASYNC_EVENT_ID_SOFT_RESET = 487, + GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_S = 507, + GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_E = 508, + GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_S = 509, + GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E = 510, + GOYA_ASYNC_EVENT_ID_LAST_VALID_ID = 1023, + GOYA_ASYNC_EVENT_ID_SIZE +}; + +#endif /* __GOYA_ASYNC_EVENTS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/goya_coresight.h b/drivers/misc/habanalabs/include/goya/goya_coresight.h new file mode 100644 index 000000000..6e933c0ca --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/goya_coresight.h @@ -0,0 +1,199 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef GOYA_CORESIGHT_H +#define GOYA_CORESIGHT_H + +enum goya_debug_stm_regs_index { + GOYA_STM_FIRST = 0, + GOYA_STM_CPU = GOYA_STM_FIRST, + GOYA_STM_DMA_CH_0_CS, + GOYA_STM_DMA_CH_1_CS, + GOYA_STM_DMA_CH_2_CS, + GOYA_STM_DMA_CH_3_CS, + GOYA_STM_DMA_CH_4_CS, + GOYA_STM_DMA_MACRO_CS, + GOYA_STM_MME1_SBA, + GOYA_STM_MME3_SBB, + GOYA_STM_MME4_WACS2, + GOYA_STM_MME4_WACS, + GOYA_STM_MMU_CS, + GOYA_STM_PCIE, + GOYA_STM_PSOC, + GOYA_STM_TPC0_EML, + GOYA_STM_TPC1_EML, + GOYA_STM_TPC2_EML, + GOYA_STM_TPC3_EML, + GOYA_STM_TPC4_EML, + GOYA_STM_TPC5_EML, + GOYA_STM_TPC6_EML, + GOYA_STM_TPC7_EML, + GOYA_STM_LAST = GOYA_STM_TPC7_EML +}; + +enum goya_debug_etf_regs_index { + GOYA_ETF_FIRST = 0, + GOYA_ETF_CPU_0 = GOYA_ETF_FIRST, + GOYA_ETF_CPU_1, + GOYA_ETF_CPU_TRACE, + GOYA_ETF_DMA_CH_0_CS, + GOYA_ETF_DMA_CH_1_CS, + GOYA_ETF_DMA_CH_2_CS, + GOYA_ETF_DMA_CH_3_CS, + GOYA_ETF_DMA_CH_4_CS, + GOYA_ETF_DMA_MACRO_CS, + GOYA_ETF_MME1_SBA, + GOYA_ETF_MME3_SBB, + GOYA_ETF_MME4_WACS2, + GOYA_ETF_MME4_WACS, + GOYA_ETF_MMU_CS, + GOYA_ETF_PCIE, + GOYA_ETF_PSOC, + GOYA_ETF_TPC0_EML, + GOYA_ETF_TPC1_EML, + GOYA_ETF_TPC2_EML, + GOYA_ETF_TPC3_EML, + GOYA_ETF_TPC4_EML, + GOYA_ETF_TPC5_EML, + GOYA_ETF_TPC6_EML, + GOYA_ETF_TPC7_EML, + GOYA_ETF_LAST = GOYA_ETF_TPC7_EML +}; + +enum goya_debug_funnel_regs_index { + GOYA_FUNNEL_FIRST = 0, + GOYA_FUNNEL_CPU = GOYA_FUNNEL_FIRST, + GOYA_FUNNEL_DMA_CH_6_1, + GOYA_FUNNEL_DMA_MACRO_3_1, + GOYA_FUNNEL_MME0_RTR, + GOYA_FUNNEL_MME1_RTR, + GOYA_FUNNEL_MME2_RTR, + GOYA_FUNNEL_MME3_RTR, + GOYA_FUNNEL_MME4_RTR, + GOYA_FUNNEL_MME5_RTR, + GOYA_FUNNEL_PCIE, + GOYA_FUNNEL_PSOC, + GOYA_FUNNEL_TPC0_EML, + GOYA_FUNNEL_TPC1_EML, + GOYA_FUNNEL_TPC1_RTR, + GOYA_FUNNEL_TPC2_EML, + GOYA_FUNNEL_TPC2_RTR, + GOYA_FUNNEL_TPC3_EML, + GOYA_FUNNEL_TPC3_RTR, + GOYA_FUNNEL_TPC4_EML, + GOYA_FUNNEL_TPC4_RTR, + GOYA_FUNNEL_TPC5_EML, + GOYA_FUNNEL_TPC5_RTR, + GOYA_FUNNEL_TPC6_EML, + GOYA_FUNNEL_TPC6_RTR, + GOYA_FUNNEL_TPC7_EML, + GOYA_FUNNEL_LAST = GOYA_FUNNEL_TPC7_EML +}; + +enum goya_debug_bmon_regs_index { + GOYA_BMON_FIRST = 0, + GOYA_BMON_CPU_RD = GOYA_BMON_FIRST, + GOYA_BMON_CPU_WR, + GOYA_BMON_DMA_CH_0_0, + GOYA_BMON_DMA_CH_0_1, + GOYA_BMON_DMA_CH_1_0, + GOYA_BMON_DMA_CH_1_1, + GOYA_BMON_DMA_CH_2_0, + GOYA_BMON_DMA_CH_2_1, + GOYA_BMON_DMA_CH_3_0, + GOYA_BMON_DMA_CH_3_1, + GOYA_BMON_DMA_CH_4_0, + GOYA_BMON_DMA_CH_4_1, + GOYA_BMON_DMA_MACRO_0, + GOYA_BMON_DMA_MACRO_1, + GOYA_BMON_DMA_MACRO_2, + GOYA_BMON_DMA_MACRO_3, + GOYA_BMON_DMA_MACRO_4, + GOYA_BMON_DMA_MACRO_5, + GOYA_BMON_DMA_MACRO_6, + GOYA_BMON_DMA_MACRO_7, + GOYA_BMON_MME1_SBA_0, + GOYA_BMON_MME1_SBA_1, + GOYA_BMON_MME3_SBB_0, + GOYA_BMON_MME3_SBB_1, + GOYA_BMON_MME4_WACS2_0, + GOYA_BMON_MME4_WACS2_1, + GOYA_BMON_MME4_WACS2_2, + GOYA_BMON_MME4_WACS_0, + GOYA_BMON_MME4_WACS_1, + GOYA_BMON_MME4_WACS_2, + GOYA_BMON_MME4_WACS_3, + GOYA_BMON_MME4_WACS_4, + GOYA_BMON_MME4_WACS_5, + GOYA_BMON_MME4_WACS_6, + GOYA_BMON_MMU_0, + GOYA_BMON_MMU_1, + GOYA_BMON_PCIE_MSTR_RD, + GOYA_BMON_PCIE_MSTR_WR, + GOYA_BMON_PCIE_SLV_RD, + GOYA_BMON_PCIE_SLV_WR, + GOYA_BMON_TPC0_EML_0, + GOYA_BMON_TPC0_EML_1, + GOYA_BMON_TPC0_EML_2, + GOYA_BMON_TPC0_EML_3, + GOYA_BMON_TPC1_EML_0, + GOYA_BMON_TPC1_EML_1, + GOYA_BMON_TPC1_EML_2, + GOYA_BMON_TPC1_EML_3, + GOYA_BMON_TPC2_EML_0, + GOYA_BMON_TPC2_EML_1, + GOYA_BMON_TPC2_EML_2, + GOYA_BMON_TPC2_EML_3, + GOYA_BMON_TPC3_EML_0, + GOYA_BMON_TPC3_EML_1, + GOYA_BMON_TPC3_EML_2, + GOYA_BMON_TPC3_EML_3, + GOYA_BMON_TPC4_EML_0, + GOYA_BMON_TPC4_EML_1, + GOYA_BMON_TPC4_EML_2, + GOYA_BMON_TPC4_EML_3, + GOYA_BMON_TPC5_EML_0, + GOYA_BMON_TPC5_EML_1, + GOYA_BMON_TPC5_EML_2, + GOYA_BMON_TPC5_EML_3, + GOYA_BMON_TPC6_EML_0, + GOYA_BMON_TPC6_EML_1, + GOYA_BMON_TPC6_EML_2, + GOYA_BMON_TPC6_EML_3, + GOYA_BMON_TPC7_EML_0, + GOYA_BMON_TPC7_EML_1, + GOYA_BMON_TPC7_EML_2, + GOYA_BMON_TPC7_EML_3, + GOYA_BMON_LAST = GOYA_BMON_TPC7_EML_3 +}; + +enum goya_debug_spmu_regs_index { + GOYA_SPMU_FIRST = 0, + GOYA_SPMU_DMA_CH_0_CS = GOYA_SPMU_FIRST, + GOYA_SPMU_DMA_CH_1_CS, + GOYA_SPMU_DMA_CH_2_CS, + GOYA_SPMU_DMA_CH_3_CS, + GOYA_SPMU_DMA_CH_4_CS, + GOYA_SPMU_DMA_MACRO_CS, + GOYA_SPMU_MME1_SBA, + GOYA_SPMU_MME3_SBB, + GOYA_SPMU_MME4_WACS2, + GOYA_SPMU_MME4_WACS, + GOYA_SPMU_MMU_CS, + GOYA_SPMU_PCIE, + GOYA_SPMU_TPC0_EML, + GOYA_SPMU_TPC1_EML, + GOYA_SPMU_TPC2_EML, + GOYA_SPMU_TPC3_EML, + GOYA_SPMU_TPC4_EML, + GOYA_SPMU_TPC5_EML, + GOYA_SPMU_TPC6_EML, + GOYA_SPMU_TPC7_EML, + GOYA_SPMU_LAST = GOYA_SPMU_TPC7_EML +}; + +#endif /* GOYA_CORESIGHT_H */ diff --git a/drivers/misc/habanalabs/include/goya/goya_fw_if.h b/drivers/misc/habanalabs/include/goya/goya_fw_if.h new file mode 100644 index 000000000..0fa80fe9f --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/goya_fw_if.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef GOYA_FW_IF_H +#define GOYA_FW_IF_H + +#define GOYA_EVENT_QUEUE_MSIX_IDX 5 + +#define CPU_BOOT_ADDR 0x7FF8040000ull + +#define UBOOT_FW_OFFSET 0x100000 /* 1MB in SRAM */ +#define LINUX_FW_OFFSET 0x800000 /* 8MB in DDR */ + +enum goya_pll_index { + CPU_PLL = 0, + IC_PLL, + MC_PLL, + MME_PLL, + PCI_PLL, + EMMC_PLL, + TPC_PLL +}; + +#define GOYA_PLL_FREQ_LOW 50000000 /* 50 MHz */ + +#endif /* GOYA_FW_IF_H */ diff --git a/drivers/misc/habanalabs/include/goya/goya_packets.h b/drivers/misc/habanalabs/include/goya/goya_packets.h new file mode 100644 index 000000000..ef54bad20 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/goya_packets.h @@ -0,0 +1,142 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2017-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef GOYA_PACKETS_H +#define GOYA_PACKETS_H + +#include <linux/types.h> + +#define PACKET_HEADER_PACKET_ID_SHIFT 56 +#define PACKET_HEADER_PACKET_ID_MASK 0x1F00000000000000ull + +enum packet_id { + PACKET_WREG_32 = 0x1, + PACKET_WREG_BULK = 0x2, + PACKET_MSG_LONG = 0x3, + PACKET_MSG_SHORT = 0x4, + PACKET_CP_DMA = 0x5, + PACKET_MSG_PROT = 0x7, + PACKET_FENCE = 0x8, + PACKET_LIN_DMA = 0x9, + PACKET_NOP = 0xA, + PACKET_STOP = 0xB, + MAX_PACKET_ID = (PACKET_HEADER_PACKET_ID_MASK >> + PACKET_HEADER_PACKET_ID_SHIFT) + 1 +}; + +enum goya_dma_direction { + DMA_HOST_TO_DRAM, + DMA_HOST_TO_SRAM, + DMA_DRAM_TO_SRAM, + DMA_SRAM_TO_DRAM, + DMA_SRAM_TO_HOST, + DMA_DRAM_TO_HOST, + DMA_DRAM_TO_DRAM, + DMA_SRAM_TO_SRAM, + DMA_ENUM_MAX +}; + +#define GOYA_PKT_CTL_OPCODE_SHIFT 24 +#define GOYA_PKT_CTL_OPCODE_MASK 0x1F000000 + +#define GOYA_PKT_CTL_EB_SHIFT 29 +#define GOYA_PKT_CTL_EB_MASK 0x20000000 + +#define GOYA_PKT_CTL_RB_SHIFT 30 +#define GOYA_PKT_CTL_RB_MASK 0x40000000 + +#define GOYA_PKT_CTL_MB_SHIFT 31 +#define GOYA_PKT_CTL_MB_MASK 0x80000000 + +/* All packets have, at least, an 8-byte header, which contains + * the packet type. The kernel driver uses the packet header for packet + * validation and to perform any necessary required preparation before + * sending them off to the hardware. + */ +struct goya_packet { + __le64 header; + /* The rest of the packet data follows. Use the corresponding + * packet_XXX struct to deference the data, based on packet type + */ + u8 contents[0]; +}; + +struct packet_nop { + __le32 reserved; + __le32 ctl; +}; + +struct packet_stop { + __le32 reserved; + __le32 ctl; +}; + +#define GOYA_PKT_WREG32_CTL_REG_OFFSET_SHIFT 0 +#define GOYA_PKT_WREG32_CTL_REG_OFFSET_MASK 0x0000FFFF + +struct packet_wreg32 { + __le32 value; + __le32 ctl; +}; + +struct packet_wreg_bulk { + __le32 size64; + __le32 ctl; + __le64 values[0]; /* data starts here */ +}; + +struct packet_msg_long { + __le32 value; + __le32 ctl; + __le64 addr; +}; + +struct packet_msg_short { + __le32 value; + __le32 ctl; +}; + +struct packet_msg_prot { + __le32 value; + __le32 ctl; + __le64 addr; +}; + +struct packet_fence { + __le32 cfg; + __le32 ctl; +}; + +#define GOYA_PKT_LIN_DMA_CTL_WO_SHIFT 0 +#define GOYA_PKT_LIN_DMA_CTL_WO_MASK 0x00000001 + +#define GOYA_PKT_LIN_DMA_CTL_RDCOMP_SHIFT 1 +#define GOYA_PKT_LIN_DMA_CTL_RDCOMP_MASK 0x00000002 + +#define GOYA_PKT_LIN_DMA_CTL_WRCOMP_SHIFT 2 +#define GOYA_PKT_LIN_DMA_CTL_WRCOMP_MASK 0x00000004 + +#define GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT 6 +#define GOYA_PKT_LIN_DMA_CTL_MEMSET_MASK 0x00000040 + +#define GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT 20 +#define GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK 0x00700000 + +struct packet_lin_dma { + __le32 tsize; + __le32 ctl; + __le64 src_addr; + __le64 dst_addr; +}; + +struct packet_cp_dma { + __le32 tsize; + __le32 ctl; + __le64 src_addr; +}; + +#endif /* GOYA_PACKETS_H */ diff --git a/drivers/misc/habanalabs/include/goya/goya_reg_map.h b/drivers/misc/habanalabs/include/goya/goya_reg_map.h new file mode 100644 index 000000000..e56124265 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/goya_reg_map.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2019 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef GOYA_REG_MAP_H_ +#define GOYA_REG_MAP_H_ + +/* + * PSOC scratch-pad registers + */ +#define mmCPU_PQ_BASE_ADDR_LOW mmPSOC_GLOBAL_CONF_SCRATCHPAD_0 +#define mmCPU_PQ_BASE_ADDR_HIGH mmPSOC_GLOBAL_CONF_SCRATCHPAD_1 +#define mmCPU_EQ_BASE_ADDR_LOW mmPSOC_GLOBAL_CONF_SCRATCHPAD_2 +#define mmCPU_EQ_BASE_ADDR_HIGH mmPSOC_GLOBAL_CONF_SCRATCHPAD_3 +#define mmCPU_EQ_LENGTH mmPSOC_GLOBAL_CONF_SCRATCHPAD_4 +#define mmCPU_PQ_LENGTH mmPSOC_GLOBAL_CONF_SCRATCHPAD_5 +#define mmCPU_EQ_CI mmPSOC_GLOBAL_CONF_SCRATCHPAD_6 +#define mmCPU_PQ_INIT_STATUS mmPSOC_GLOBAL_CONF_SCRATCHPAD_7 +#define mmCPU_CQ_BASE_ADDR_LOW mmPSOC_GLOBAL_CONF_SCRATCHPAD_8 +#define mmCPU_CQ_BASE_ADDR_HIGH mmPSOC_GLOBAL_CONF_SCRATCHPAD_9 +#define mmCPU_CQ_LENGTH mmPSOC_GLOBAL_CONF_SCRATCHPAD_10 +#define mmFUSE_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_22 +#define mmCPU_CMD_STATUS_TO_HOST mmPSOC_GLOBAL_CONF_SCRATCHPAD_23 +#define mmCPU_BOOT_ERR0 mmPSOC_GLOBAL_CONF_SCRATCHPAD_24 +#define mmCPU_BOOT_ERR1 mmPSOC_GLOBAL_CONF_SCRATCHPAD_25 +#define mmUPD_STS mmPSOC_GLOBAL_CONF_SCRATCHPAD_26 +#define mmUPD_CMD mmPSOC_GLOBAL_CONF_SCRATCHPAD_27 +#define mmPREBOOT_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_28 +#define mmUBOOT_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_29 +#define mmRDWR_TEST mmPSOC_GLOBAL_CONF_SCRATCHPAD_30 +#define mmBTL_ID mmPSOC_GLOBAL_CONF_SCRATCHPAD_31 + +#define mmHW_STATE mmPSOC_GLOBAL_CONF_APP_STATUS +#define mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS mmPSOC_GLOBAL_CONF_WARM_REBOOT +#define mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU mmPSOC_GLOBAL_CONF_UBOOT_MAGIC +#define mmUPD_PENDING_STS mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_3 + +#endif /* GOYA_REG_MAP_H_ */ diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h new file mode 100644 index 000000000..dedf20e8f --- /dev/null +++ b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2020 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef INCLUDE_MMU_GENERAL_H_ +#define INCLUDE_MMU_GENERAL_H_ + +#define PAGE_SHIFT_4KB 12 +#define PAGE_SHIFT_2MB 21 +#define PAGE_SIZE_2MB (_AC(1, UL) << PAGE_SHIFT_2MB) +#define PAGE_SIZE_4KB (_AC(1, UL) << PAGE_SHIFT_4KB) + +#define PAGE_PRESENT_MASK 0x0000000000001ull +#define SWAP_OUT_MASK 0x0000000000004ull +#define LAST_MASK 0x0000000000800ull +#define HOP0_MASK 0x3000000000000ull +#define HOP1_MASK 0x0FF8000000000ull +#define HOP2_MASK 0x0007FC0000000ull +#define HOP3_MASK 0x000003FE00000ull +#define HOP4_MASK 0x00000001FF000ull +#define FLAGS_MASK 0x0000000000FFFull + +#define HOP0_SHIFT 48 +#define HOP1_SHIFT 39 +#define HOP2_SHIFT 30 +#define HOP3_SHIFT 21 +#define HOP4_SHIFT 12 + +#define MMU_ARCH_5_HOPS 5 + +#define HOP_PHYS_ADDR_MASK (~FLAGS_MASK) + +#define HL_PTE_SIZE sizeof(u64) +#define HOP_TABLE_SIZE PAGE_SIZE_4KB +#define PTE_ENTRIES_IN_HOP (HOP_TABLE_SIZE / HL_PTE_SIZE) +#define HOP0_TABLES_TOTAL_SIZE (HOP_TABLE_SIZE * MAX_ASID) + +#define MMU_HOP0_PA43_12_SHIFT 12 +#define MMU_HOP0_PA49_44_SHIFT (12 + 32) + +#define MMU_CONFIG_TIMEOUT_USEC 2000 /* 2 ms */ + +#endif /* INCLUDE_MMU_GENERAL_H_ */ diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_0.h b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_0.h new file mode 100644 index 000000000..8539dd041 --- /dev/null +++ b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_0.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef INCLUDE_MMU_V1_0_H_ +#define INCLUDE_MMU_V1_0_H_ + +#define MMU_HOP0_PA43_12 0x490004 +#define MMU_HOP0_PA49_44 0x490008 +#define MMU_ASID_BUSY 0x490000 + +#endif /* INCLUDE_MMU_V1_0_H_ */ diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_1.h b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_1.h new file mode 100644 index 000000000..b2a957058 --- /dev/null +++ b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_1.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef INCLUDE_MMU_V1_1_H_ +#define INCLUDE_MMU_V1_1_H_ + +#define MMU_ASID 0xC12004 +#define MMU_HOP0_PA43_12 0xC12008 +#define MMU_HOP0_PA49_44 0xC1200C +#define MMU_BUSY 0xC12000 + +#endif /* INCLUDE_MMU_V1_1_H_ */ diff --git a/drivers/misc/habanalabs/include/hw_ip/pci/pci_general.h b/drivers/misc/habanalabs/include/hw_ip/pci/pci_general.h new file mode 100644 index 000000000..d232081d4 --- /dev/null +++ b/drivers/misc/habanalabs/include/hw_ip/pci/pci_general.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef INCLUDE_PCI_GENERAL_H_ +#define INCLUDE_PCI_GENERAL_H_ + +/* PCI CONFIGURATION SPACE */ +#define mmPCI_CONFIG_ELBI_ADDR 0xFF0 +#define mmPCI_CONFIG_ELBI_DATA 0xFF4 +#define mmPCI_CONFIG_ELBI_CTRL 0xFF8 +#define PCI_CONFIG_ELBI_CTRL_WRITE (1 << 31) + +#define mmPCI_CONFIG_ELBI_STS 0xFFC +#define PCI_CONFIG_ELBI_STS_ERR (1 << 30) +#define PCI_CONFIG_ELBI_STS_DONE (1 << 31) +#define PCI_CONFIG_ELBI_STS_MASK (PCI_CONFIG_ELBI_STS_ERR | \ + PCI_CONFIG_ELBI_STS_DONE) + +#endif /* INCLUDE_PCI_GENERAL_H_ */ diff --git a/drivers/misc/hisi_hikey_usb.c b/drivers/misc/hisi_hikey_usb.c new file mode 100644 index 000000000..cc93569e6 --- /dev/null +++ b/drivers/misc/hisi_hikey_usb.c @@ -0,0 +1,273 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Support for usb functionality of Hikey series boards + * based on Hisilicon Kirin Soc. + * + * Copyright (C) 2017-2018 Hilisicon Electronics Co., Ltd. + * http://www.huawei.com + * + * Authors: Yu Chen <chenyu56@huawei.com> + */ + +#include <linux/gpio/consumer.h> +#include <linux/kernel.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/notifier.h> +#include <linux/of_gpio.h> +#include <linux/platform_device.h> +#include <linux/property.h> +#include <linux/regulator/consumer.h> +#include <linux/slab.h> +#include <linux/usb/role.h> + +#define DEVICE_DRIVER_NAME "hisi_hikey_usb" + +#define HUB_VBUS_POWER_ON 1 +#define HUB_VBUS_POWER_OFF 0 +#define USB_SWITCH_TO_HUB 1 +#define USB_SWITCH_TO_TYPEC 0 +#define TYPEC_VBUS_POWER_ON 1 +#define TYPEC_VBUS_POWER_OFF 0 + +struct hisi_hikey_usb { + struct device *dev; + struct gpio_desc *otg_switch; + struct gpio_desc *typec_vbus; + struct gpio_desc *hub_vbus; + struct gpio_desc *reset; + + struct regulator *regulator; + + struct usb_role_switch *hub_role_sw; + + struct usb_role_switch *dev_role_sw; + enum usb_role role; + + struct mutex lock; + struct work_struct work; + + struct notifier_block nb; +}; + +static void hub_power_ctrl(struct hisi_hikey_usb *hisi_hikey_usb, int value) +{ + int ret, status; + + if (hisi_hikey_usb->hub_vbus) + gpiod_set_value_cansleep(hisi_hikey_usb->hub_vbus, value); + + if (!hisi_hikey_usb->regulator) + return; + + status = regulator_is_enabled(hisi_hikey_usb->regulator); + if (status == !!value) + return; + + if (value) + ret = regulator_enable(hisi_hikey_usb->regulator); + else + ret = regulator_disable(hisi_hikey_usb->regulator); + + if (ret) + dev_err(hisi_hikey_usb->dev, + "Can't switch regulator state to %s\n", + value ? "enabled" : "disabled"); +} + +static void usb_switch_ctrl(struct hisi_hikey_usb *hisi_hikey_usb, + int switch_to) +{ + if (!hisi_hikey_usb->otg_switch) + return; + + gpiod_set_value_cansleep(hisi_hikey_usb->otg_switch, switch_to); +} + +static void usb_typec_power_ctrl(struct hisi_hikey_usb *hisi_hikey_usb, + int value) +{ + if (!hisi_hikey_usb->typec_vbus) + return; + + gpiod_set_value_cansleep(hisi_hikey_usb->typec_vbus, value); +} + +static void relay_set_role_switch(struct work_struct *work) +{ + struct hisi_hikey_usb *hisi_hikey_usb = container_of(work, + struct hisi_hikey_usb, + work); + struct usb_role_switch *sw; + enum usb_role role; + + if (!hisi_hikey_usb || !hisi_hikey_usb->dev_role_sw) + return; + + mutex_lock(&hisi_hikey_usb->lock); + switch (hisi_hikey_usb->role) { + case USB_ROLE_NONE: + usb_typec_power_ctrl(hisi_hikey_usb, TYPEC_VBUS_POWER_OFF); + usb_switch_ctrl(hisi_hikey_usb, USB_SWITCH_TO_HUB); + hub_power_ctrl(hisi_hikey_usb, HUB_VBUS_POWER_ON); + break; + case USB_ROLE_HOST: + hub_power_ctrl(hisi_hikey_usb, HUB_VBUS_POWER_OFF); + usb_switch_ctrl(hisi_hikey_usb, USB_SWITCH_TO_TYPEC); + usb_typec_power_ctrl(hisi_hikey_usb, TYPEC_VBUS_POWER_ON); + break; + case USB_ROLE_DEVICE: + hub_power_ctrl(hisi_hikey_usb, HUB_VBUS_POWER_OFF); + usb_typec_power_ctrl(hisi_hikey_usb, TYPEC_VBUS_POWER_OFF); + usb_switch_ctrl(hisi_hikey_usb, USB_SWITCH_TO_TYPEC); + break; + default: + break; + } + sw = hisi_hikey_usb->dev_role_sw; + role = hisi_hikey_usb->role; + mutex_unlock(&hisi_hikey_usb->lock); + + usb_role_switch_set_role(sw, role); +} + +static int hub_usb_role_switch_set(struct usb_role_switch *sw, enum usb_role role) +{ + struct hisi_hikey_usb *hisi_hikey_usb = usb_role_switch_get_drvdata(sw); + + if (!hisi_hikey_usb || !hisi_hikey_usb->dev_role_sw) + return -EINVAL; + + mutex_lock(&hisi_hikey_usb->lock); + hisi_hikey_usb->role = role; + mutex_unlock(&hisi_hikey_usb->lock); + + schedule_work(&hisi_hikey_usb->work); + + return 0; +} + +static int hisi_hikey_usb_parse_kirin970(struct platform_device *pdev, + struct hisi_hikey_usb *hisi_hikey_usb) +{ + struct regulator *regulator; + + regulator = devm_regulator_get(&pdev->dev, "hub-vdd"); + if (IS_ERR(regulator)) { + if (PTR_ERR(regulator) == -EPROBE_DEFER) { + dev_info(&pdev->dev, + "waiting for hub-vdd-supply to be probed\n"); + return PTR_ERR(regulator); + } + dev_err(&pdev->dev, + "get hub-vdd-supply failed with error %ld\n", + PTR_ERR(regulator)); + return PTR_ERR(regulator); + } + hisi_hikey_usb->regulator = regulator; + + hisi_hikey_usb->reset = devm_gpiod_get(&pdev->dev, "hub_reset_en_gpio", + GPIOD_OUT_HIGH); + if (IS_ERR(hisi_hikey_usb->reset)) + return PTR_ERR(hisi_hikey_usb->reset); + + return 0; +} + +static int hisi_hikey_usb_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct hisi_hikey_usb *hisi_hikey_usb; + struct usb_role_switch_desc hub_role_switch = {NULL}; + int ret; + + hisi_hikey_usb = devm_kzalloc(dev, sizeof(*hisi_hikey_usb), GFP_KERNEL); + if (!hisi_hikey_usb) + return -ENOMEM; + + hisi_hikey_usb->dev = &pdev->dev; + + hisi_hikey_usb->otg_switch = devm_gpiod_get(dev, "otg-switch", + GPIOD_OUT_HIGH); + if (IS_ERR(hisi_hikey_usb->otg_switch)) + return PTR_ERR(hisi_hikey_usb->otg_switch); + + hisi_hikey_usb->typec_vbus = devm_gpiod_get(dev, "typec-vbus", + GPIOD_OUT_LOW); + if (IS_ERR(hisi_hikey_usb->typec_vbus)) + return PTR_ERR(hisi_hikey_usb->typec_vbus); + + /* Parse Kirin 970-specific OF data */ + if (of_device_is_compatible(pdev->dev.of_node, + "hisilicon,kirin970_hikey_usbhub")) { + ret = hisi_hikey_usb_parse_kirin970(pdev, hisi_hikey_usb); + if (ret) + return ret; + } else { + /* hub-vdd33-en is optional */ + hisi_hikey_usb->hub_vbus = devm_gpiod_get_optional(dev, "hub-vdd33-en", + GPIOD_OUT_HIGH); + if (IS_ERR(hisi_hikey_usb->hub_vbus)) + return PTR_ERR(hisi_hikey_usb->hub_vbus); + } + + hisi_hikey_usb->dev_role_sw = usb_role_switch_get(dev); + if (!hisi_hikey_usb->dev_role_sw) + return -EPROBE_DEFER; + if (IS_ERR(hisi_hikey_usb->dev_role_sw)) + return PTR_ERR(hisi_hikey_usb->dev_role_sw); + + INIT_WORK(&hisi_hikey_usb->work, relay_set_role_switch); + mutex_init(&hisi_hikey_usb->lock); + + hub_role_switch.fwnode = dev_fwnode(dev); + hub_role_switch.set = hub_usb_role_switch_set; + hub_role_switch.driver_data = hisi_hikey_usb; + + hisi_hikey_usb->hub_role_sw = usb_role_switch_register(dev, + &hub_role_switch); + + if (IS_ERR(hisi_hikey_usb->hub_role_sw)) { + usb_role_switch_put(hisi_hikey_usb->dev_role_sw); + return PTR_ERR(hisi_hikey_usb->hub_role_sw); + } + + platform_set_drvdata(pdev, hisi_hikey_usb); + + return 0; +} + +static int hisi_hikey_usb_remove(struct platform_device *pdev) +{ + struct hisi_hikey_usb *hisi_hikey_usb = platform_get_drvdata(pdev); + + if (hisi_hikey_usb->hub_role_sw) + usb_role_switch_unregister(hisi_hikey_usb->hub_role_sw); + + if (hisi_hikey_usb->dev_role_sw) + usb_role_switch_put(hisi_hikey_usb->dev_role_sw); + + return 0; +} + +static const struct of_device_id id_table_hisi_hikey_usb[] = { + { .compatible = "hisilicon,gpio_hubv1" }, + { .compatible = "hisilicon,kirin970_hikey_usbhub" }, + {} +}; +MODULE_DEVICE_TABLE(of, id_table_hisi_hikey_usb); + +static struct platform_driver hisi_hikey_usb_driver = { + .probe = hisi_hikey_usb_probe, + .remove = hisi_hikey_usb_remove, + .driver = { + .name = DEVICE_DRIVER_NAME, + .of_match_table = id_table_hisi_hikey_usb, + }, +}; + +module_platform_driver(hisi_hikey_usb_driver); + +MODULE_AUTHOR("Yu Chen <chenyu56@huawei.com>"); +MODULE_DESCRIPTION("Driver Support for USB functionality of Hikey"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/hmc6352.c b/drivers/misc/hmc6352.c new file mode 100644 index 000000000..572a2ff10 --- /dev/null +++ b/drivers/misc/hmc6352.c @@ -0,0 +1,145 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * hmc6352.c - Honeywell Compass Driver + * + * Copyright (C) 2009 Intel Corp + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/i2c.h> +#include <linux/err.h> +#include <linux/delay.h> +#include <linux/sysfs.h> +#include <linux/nospec.h> + +static DEFINE_MUTEX(compass_mutex); + +static int compass_command(struct i2c_client *c, u8 cmd) +{ + int ret = i2c_master_send(c, &cmd, 1); + if (ret < 0) + dev_warn(&c->dev, "command '%c' failed.\n", cmd); + return ret; +} + +static int compass_store(struct device *dev, const char *buf, size_t count, + const char *map) +{ + struct i2c_client *c = to_i2c_client(dev); + int ret; + unsigned long val; + + ret = kstrtoul(buf, 10, &val); + if (ret) + return ret; + if (val >= strlen(map)) + return -EINVAL; + val = array_index_nospec(val, strlen(map)); + mutex_lock(&compass_mutex); + ret = compass_command(c, map[val]); + mutex_unlock(&compass_mutex); + if (ret < 0) + return ret; + return count; +} + +static ssize_t compass_calibration_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + return compass_store(dev, buf, count, "EC"); +} + +static ssize_t compass_power_mode_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + return compass_store(dev, buf, count, "SW"); +} + +static ssize_t compass_heading_data_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + unsigned char i2c_data[2]; + int ret; + + mutex_lock(&compass_mutex); + ret = compass_command(client, 'A'); + if (ret != 1) { + mutex_unlock(&compass_mutex); + return ret; + } + msleep(10); /* sending 'A' cmd we need to wait for 7-10 millisecs */ + ret = i2c_master_recv(client, i2c_data, 2); + mutex_unlock(&compass_mutex); + if (ret < 0) { + dev_warn(dev, "i2c read data cmd failed\n"); + return ret; + } + ret = (i2c_data[0] << 8) | i2c_data[1]; + return sprintf(buf, "%d.%d\n", ret/10, ret%10); +} + + +static DEVICE_ATTR(heading0_input, S_IRUGO, compass_heading_data_show, NULL); +static DEVICE_ATTR(calibration, S_IWUSR, NULL, compass_calibration_store); +static DEVICE_ATTR(power_state, S_IWUSR, NULL, compass_power_mode_store); + +static struct attribute *mid_att_compass[] = { + &dev_attr_heading0_input.attr, + &dev_attr_calibration.attr, + &dev_attr_power_state.attr, + NULL +}; + +static const struct attribute_group m_compass_gr = { + .name = "hmc6352", + .attrs = mid_att_compass +}; + +static int hmc6352_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int res; + + res = sysfs_create_group(&client->dev.kobj, &m_compass_gr); + if (res) { + dev_err(&client->dev, "device_create_file failed\n"); + return res; + } + dev_info(&client->dev, "%s HMC6352 compass chip found\n", + client->name); + return 0; +} + +static int hmc6352_remove(struct i2c_client *client) +{ + sysfs_remove_group(&client->dev.kobj, &m_compass_gr); + return 0; +} + +static const struct i2c_device_id hmc6352_id[] = { + { "hmc6352", 0 }, + { } +}; + +MODULE_DEVICE_TABLE(i2c, hmc6352_id); + +static struct i2c_driver hmc6352_driver = { + .driver = { + .name = "hmc6352", + }, + .probe = hmc6352_probe, + .remove = hmc6352_remove, + .id_table = hmc6352_id, +}; + +module_i2c_driver(hmc6352_driver); + +MODULE_AUTHOR("Kalhan Trisal <kalhan.trisal@intel.com"); +MODULE_DESCRIPTION("hmc6352 Compass Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c new file mode 100644 index 000000000..fea3ae9d8 --- /dev/null +++ b/drivers/misc/hpilo.c @@ -0,0 +1,925 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for the HP iLO management processor. + * + * Copyright (C) 2008 Hewlett-Packard Development Company, L.P. + * David Altobelli <david.altobelli@hpe.com> + */ +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/pci.h> +#include <linux/interrupt.h> +#include <linux/ioport.h> +#include <linux/device.h> +#include <linux/file.h> +#include <linux/cdev.h> +#include <linux/sched.h> +#include <linux/spinlock.h> +#include <linux/delay.h> +#include <linux/uaccess.h> +#include <linux/io.h> +#include <linux/wait.h> +#include <linux/poll.h> +#include <linux/slab.h> +#include "hpilo.h" + +static struct class *ilo_class; +static unsigned int ilo_major; +static unsigned int max_ccb = 16; +static char ilo_hwdev[MAX_ILO_DEV]; +static const struct pci_device_id ilo_blacklist[] = { + /* auxiliary iLO */ + {PCI_DEVICE_SUB(PCI_VENDOR_ID_HP, 0x3307, PCI_VENDOR_ID_HP, 0x1979)}, + /* CL */ + {PCI_DEVICE_SUB(PCI_VENDOR_ID_HP, 0x3307, PCI_VENDOR_ID_HP_3PAR, 0x0289)}, + {} +}; + +static inline int get_entry_id(int entry) +{ + return (entry & ENTRY_MASK_DESCRIPTOR) >> ENTRY_BITPOS_DESCRIPTOR; +} + +static inline int get_entry_len(int entry) +{ + return ((entry & ENTRY_MASK_QWORDS) >> ENTRY_BITPOS_QWORDS) << 3; +} + +static inline int mk_entry(int id, int len) +{ + int qlen = len & 7 ? (len >> 3) + 1 : len >> 3; + return id << ENTRY_BITPOS_DESCRIPTOR | qlen << ENTRY_BITPOS_QWORDS; +} + +static inline int desc_mem_sz(int nr_entry) +{ + return nr_entry << L2_QENTRY_SZ; +} + +/* + * FIFO queues, shared with hardware. + * + * If a queue has empty slots, an entry is added to the queue tail, + * and that entry is marked as occupied. + * Entries can be dequeued from the head of the list, when the device + * has marked the entry as consumed. + * + * Returns true on successful queue/dequeue, false on failure. + */ +static int fifo_enqueue(struct ilo_hwinfo *hw, char *fifobar, int entry) +{ + struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar); + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&hw->fifo_lock, flags); + if (!(fifo_q->fifobar[(fifo_q->tail + 1) & fifo_q->imask] + & ENTRY_MASK_O)) { + fifo_q->fifobar[fifo_q->tail & fifo_q->imask] |= + (entry & ENTRY_MASK_NOSTATE) | fifo_q->merge; + fifo_q->tail += 1; + ret = 1; + } + spin_unlock_irqrestore(&hw->fifo_lock, flags); + + return ret; +} + +static int fifo_dequeue(struct ilo_hwinfo *hw, char *fifobar, int *entry) +{ + struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar); + unsigned long flags; + int ret = 0; + u64 c; + + spin_lock_irqsave(&hw->fifo_lock, flags); + c = fifo_q->fifobar[fifo_q->head & fifo_q->imask]; + if (c & ENTRY_MASK_C) { + if (entry) + *entry = c & ENTRY_MASK_NOSTATE; + + fifo_q->fifobar[fifo_q->head & fifo_q->imask] = + (c | ENTRY_MASK) + 1; + fifo_q->head += 1; + ret = 1; + } + spin_unlock_irqrestore(&hw->fifo_lock, flags); + + return ret; +} + +static int fifo_check_recv(struct ilo_hwinfo *hw, char *fifobar) +{ + struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar); + unsigned long flags; + int ret = 0; + u64 c; + + spin_lock_irqsave(&hw->fifo_lock, flags); + c = fifo_q->fifobar[fifo_q->head & fifo_q->imask]; + if (c & ENTRY_MASK_C) + ret = 1; + spin_unlock_irqrestore(&hw->fifo_lock, flags); + + return ret; +} + +static int ilo_pkt_enqueue(struct ilo_hwinfo *hw, struct ccb *ccb, + int dir, int id, int len) +{ + char *fifobar; + int entry; + + if (dir == SENDQ) + fifobar = ccb->ccb_u1.send_fifobar; + else + fifobar = ccb->ccb_u3.recv_fifobar; + + entry = mk_entry(id, len); + return fifo_enqueue(hw, fifobar, entry); +} + +static int ilo_pkt_dequeue(struct ilo_hwinfo *hw, struct ccb *ccb, + int dir, int *id, int *len, void **pkt) +{ + char *fifobar, *desc; + int entry = 0, pkt_id = 0; + int ret; + + if (dir == SENDQ) { + fifobar = ccb->ccb_u1.send_fifobar; + desc = ccb->ccb_u2.send_desc; + } else { + fifobar = ccb->ccb_u3.recv_fifobar; + desc = ccb->ccb_u4.recv_desc; + } + + ret = fifo_dequeue(hw, fifobar, &entry); + if (ret) { + pkt_id = get_entry_id(entry); + if (id) + *id = pkt_id; + if (len) + *len = get_entry_len(entry); + if (pkt) + *pkt = (void *)(desc + desc_mem_sz(pkt_id)); + } + + return ret; +} + +static int ilo_pkt_recv(struct ilo_hwinfo *hw, struct ccb *ccb) +{ + char *fifobar = ccb->ccb_u3.recv_fifobar; + + return fifo_check_recv(hw, fifobar); +} + +static inline void doorbell_set(struct ccb *ccb) +{ + iowrite8(1, ccb->ccb_u5.db_base); +} + +static inline void doorbell_clr(struct ccb *ccb) +{ + iowrite8(2, ccb->ccb_u5.db_base); +} + +static inline int ctrl_set(int l2sz, int idxmask, int desclim) +{ + int active = 0, go = 1; + return l2sz << CTRL_BITPOS_L2SZ | + idxmask << CTRL_BITPOS_FIFOINDEXMASK | + desclim << CTRL_BITPOS_DESCLIMIT | + active << CTRL_BITPOS_A | + go << CTRL_BITPOS_G; +} + +static void ctrl_setup(struct ccb *ccb, int nr_desc, int l2desc_sz) +{ + /* for simplicity, use the same parameters for send and recv ctrls */ + ccb->send_ctrl = ctrl_set(l2desc_sz, nr_desc-1, nr_desc-1); + ccb->recv_ctrl = ctrl_set(l2desc_sz, nr_desc-1, nr_desc-1); +} + +static inline int fifo_sz(int nr_entry) +{ + /* size of a fifo is determined by the number of entries it contains */ + return nr_entry * sizeof(u64) + FIFOHANDLESIZE; +} + +static void fifo_setup(void *base_addr, int nr_entry) +{ + struct fifo *fifo_q = base_addr; + int i; + + /* set up an empty fifo */ + fifo_q->head = 0; + fifo_q->tail = 0; + fifo_q->reset = 0; + fifo_q->nrents = nr_entry; + fifo_q->imask = nr_entry - 1; + fifo_q->merge = ENTRY_MASK_O; + + for (i = 0; i < nr_entry; i++) + fifo_q->fifobar[i] = 0; +} + +static void ilo_ccb_close(struct pci_dev *pdev, struct ccb_data *data) +{ + struct ccb *driver_ccb = &data->driver_ccb; + struct ccb __iomem *device_ccb = data->mapped_ccb; + int retries; + + /* complicated dance to tell the hw we are stopping */ + doorbell_clr(driver_ccb); + iowrite32(ioread32(&device_ccb->send_ctrl) & ~(1 << CTRL_BITPOS_G), + &device_ccb->send_ctrl); + iowrite32(ioread32(&device_ccb->recv_ctrl) & ~(1 << CTRL_BITPOS_G), + &device_ccb->recv_ctrl); + + /* give iLO some time to process stop request */ + for (retries = MAX_WAIT; retries > 0; retries--) { + doorbell_set(driver_ccb); + udelay(WAIT_TIME); + if (!(ioread32(&device_ccb->send_ctrl) & (1 << CTRL_BITPOS_A)) + && + !(ioread32(&device_ccb->recv_ctrl) & (1 << CTRL_BITPOS_A))) + break; + } + if (retries == 0) + dev_err(&pdev->dev, "Closing, but controller still active\n"); + + /* clear the hw ccb */ + memset_io(device_ccb, 0, sizeof(struct ccb)); + + /* free resources used to back send/recv queues */ + dma_free_coherent(&pdev->dev, data->dma_size, data->dma_va, + data->dma_pa); +} + +static int ilo_ccb_setup(struct ilo_hwinfo *hw, struct ccb_data *data, int slot) +{ + char *dma_va; + dma_addr_t dma_pa; + struct ccb *driver_ccb, *ilo_ccb; + + driver_ccb = &data->driver_ccb; + ilo_ccb = &data->ilo_ccb; + + data->dma_size = 2 * fifo_sz(NR_QENTRY) + + 2 * desc_mem_sz(NR_QENTRY) + + ILO_START_ALIGN + ILO_CACHE_SZ; + + data->dma_va = dma_alloc_coherent(&hw->ilo_dev->dev, data->dma_size, + &data->dma_pa, GFP_ATOMIC); + if (!data->dma_va) + return -ENOMEM; + + dma_va = (char *)data->dma_va; + dma_pa = data->dma_pa; + + dma_va = (char *)roundup((unsigned long)dma_va, ILO_START_ALIGN); + dma_pa = roundup(dma_pa, ILO_START_ALIGN); + + /* + * Create two ccb's, one with virt addrs, one with phys addrs. + * Copy the phys addr ccb to device shared mem. + */ + ctrl_setup(driver_ccb, NR_QENTRY, L2_QENTRY_SZ); + ctrl_setup(ilo_ccb, NR_QENTRY, L2_QENTRY_SZ); + + fifo_setup(dma_va, NR_QENTRY); + driver_ccb->ccb_u1.send_fifobar = dma_va + FIFOHANDLESIZE; + ilo_ccb->ccb_u1.send_fifobar_pa = dma_pa + FIFOHANDLESIZE; + dma_va += fifo_sz(NR_QENTRY); + dma_pa += fifo_sz(NR_QENTRY); + + dma_va = (char *)roundup((unsigned long)dma_va, ILO_CACHE_SZ); + dma_pa = roundup(dma_pa, ILO_CACHE_SZ); + + fifo_setup(dma_va, NR_QENTRY); + driver_ccb->ccb_u3.recv_fifobar = dma_va + FIFOHANDLESIZE; + ilo_ccb->ccb_u3.recv_fifobar_pa = dma_pa + FIFOHANDLESIZE; + dma_va += fifo_sz(NR_QENTRY); + dma_pa += fifo_sz(NR_QENTRY); + + driver_ccb->ccb_u2.send_desc = dma_va; + ilo_ccb->ccb_u2.send_desc_pa = dma_pa; + dma_pa += desc_mem_sz(NR_QENTRY); + dma_va += desc_mem_sz(NR_QENTRY); + + driver_ccb->ccb_u4.recv_desc = dma_va; + ilo_ccb->ccb_u4.recv_desc_pa = dma_pa; + + driver_ccb->channel = slot; + ilo_ccb->channel = slot; + + driver_ccb->ccb_u5.db_base = hw->db_vaddr + (slot << L2_DB_SIZE); + ilo_ccb->ccb_u5.db_base = NULL; /* hw ccb's doorbell is not used */ + + return 0; +} + +static void ilo_ccb_open(struct ilo_hwinfo *hw, struct ccb_data *data, int slot) +{ + int pkt_id, pkt_sz; + struct ccb *driver_ccb = &data->driver_ccb; + + /* copy the ccb with physical addrs to device memory */ + data->mapped_ccb = (struct ccb __iomem *) + (hw->ram_vaddr + (slot * ILOHW_CCB_SZ)); + memcpy_toio(data->mapped_ccb, &data->ilo_ccb, sizeof(struct ccb)); + + /* put packets on the send and receive queues */ + pkt_sz = 0; + for (pkt_id = 0; pkt_id < NR_QENTRY; pkt_id++) { + ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, pkt_sz); + doorbell_set(driver_ccb); + } + + pkt_sz = desc_mem_sz(1); + for (pkt_id = 0; pkt_id < NR_QENTRY; pkt_id++) + ilo_pkt_enqueue(hw, driver_ccb, RECVQ, pkt_id, pkt_sz); + + /* the ccb is ready to use */ + doorbell_clr(driver_ccb); +} + +static int ilo_ccb_verify(struct ilo_hwinfo *hw, struct ccb_data *data) +{ + int pkt_id, i; + struct ccb *driver_ccb = &data->driver_ccb; + + /* make sure iLO is really handling requests */ + for (i = MAX_WAIT; i > 0; i--) { + if (ilo_pkt_dequeue(hw, driver_ccb, SENDQ, &pkt_id, NULL, NULL)) + break; + udelay(WAIT_TIME); + } + + if (i == 0) { + dev_err(&hw->ilo_dev->dev, "Open could not dequeue a packet\n"); + return -EBUSY; + } + + ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, 0); + doorbell_set(driver_ccb); + return 0; +} + +static inline int is_channel_reset(struct ccb *ccb) +{ + /* check for this particular channel needing a reset */ + return FIFOBARTOHANDLE(ccb->ccb_u1.send_fifobar)->reset; +} + +static inline void set_channel_reset(struct ccb *ccb) +{ + /* set a flag indicating this channel needs a reset */ + FIFOBARTOHANDLE(ccb->ccb_u1.send_fifobar)->reset = 1; +} + +static inline int get_device_outbound(struct ilo_hwinfo *hw) +{ + return ioread32(&hw->mmio_vaddr[DB_OUT]); +} + +static inline int is_db_reset(int db_out) +{ + return db_out & (1 << DB_RESET); +} + +static inline int is_device_reset(struct ilo_hwinfo *hw) +{ + /* check for global reset condition */ + return is_db_reset(get_device_outbound(hw)); +} + +static inline void clear_pending_db(struct ilo_hwinfo *hw, int clr) +{ + iowrite32(clr, &hw->mmio_vaddr[DB_OUT]); +} + +static inline void clear_device(struct ilo_hwinfo *hw) +{ + /* clear the device (reset bits, pending channel entries) */ + clear_pending_db(hw, -1); +} + +static inline void ilo_enable_interrupts(struct ilo_hwinfo *hw) +{ + iowrite8(ioread8(&hw->mmio_vaddr[DB_IRQ]) | 1, &hw->mmio_vaddr[DB_IRQ]); +} + +static inline void ilo_disable_interrupts(struct ilo_hwinfo *hw) +{ + iowrite8(ioread8(&hw->mmio_vaddr[DB_IRQ]) & ~1, + &hw->mmio_vaddr[DB_IRQ]); +} + +static void ilo_set_reset(struct ilo_hwinfo *hw) +{ + int slot; + + /* + * Mapped memory is zeroed on ilo reset, so set a per ccb flag + * to indicate that this ccb needs to be closed and reopened. + */ + for (slot = 0; slot < max_ccb; slot++) { + if (!hw->ccb_alloc[slot]) + continue; + set_channel_reset(&hw->ccb_alloc[slot]->driver_ccb); + } +} + +static ssize_t ilo_read(struct file *fp, char __user *buf, + size_t len, loff_t *off) +{ + int err, found, cnt, pkt_id, pkt_len; + struct ccb_data *data = fp->private_data; + struct ccb *driver_ccb = &data->driver_ccb; + struct ilo_hwinfo *hw = data->ilo_hw; + void *pkt; + + if (is_channel_reset(driver_ccb)) { + /* + * If the device has been reset, applications + * need to close and reopen all ccbs. + */ + return -ENODEV; + } + + /* + * This function is to be called when data is expected + * in the channel, and will return an error if no packet is found + * during the loop below. The sleep/retry logic is to allow + * applications to call read() immediately post write(), + * and give iLO some time to process the sent packet. + */ + cnt = 20; + do { + /* look for a received packet */ + found = ilo_pkt_dequeue(hw, driver_ccb, RECVQ, &pkt_id, + &pkt_len, &pkt); + if (found) + break; + cnt--; + msleep(100); + } while (!found && cnt); + + if (!found) + return -EAGAIN; + + /* only copy the length of the received packet */ + if (pkt_len < len) + len = pkt_len; + + err = copy_to_user(buf, pkt, len); + + /* return the received packet to the queue */ + ilo_pkt_enqueue(hw, driver_ccb, RECVQ, pkt_id, desc_mem_sz(1)); + + return err ? -EFAULT : len; +} + +static ssize_t ilo_write(struct file *fp, const char __user *buf, + size_t len, loff_t *off) +{ + int err, pkt_id, pkt_len; + struct ccb_data *data = fp->private_data; + struct ccb *driver_ccb = &data->driver_ccb; + struct ilo_hwinfo *hw = data->ilo_hw; + void *pkt; + + if (is_channel_reset(driver_ccb)) + return -ENODEV; + + /* get a packet to send the user command */ + if (!ilo_pkt_dequeue(hw, driver_ccb, SENDQ, &pkt_id, &pkt_len, &pkt)) + return -EBUSY; + + /* limit the length to the length of the packet */ + if (pkt_len < len) + len = pkt_len; + + /* on failure, set the len to 0 to return empty packet to the device */ + err = copy_from_user(pkt, buf, len); + if (err) + len = 0; + + /* send the packet */ + ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, len); + doorbell_set(driver_ccb); + + return err ? -EFAULT : len; +} + +static __poll_t ilo_poll(struct file *fp, poll_table *wait) +{ + struct ccb_data *data = fp->private_data; + struct ccb *driver_ccb = &data->driver_ccb; + + poll_wait(fp, &data->ccb_waitq, wait); + + if (is_channel_reset(driver_ccb)) + return EPOLLERR; + else if (ilo_pkt_recv(data->ilo_hw, driver_ccb)) + return EPOLLIN | EPOLLRDNORM; + + return 0; +} + +static int ilo_close(struct inode *ip, struct file *fp) +{ + int slot; + struct ccb_data *data; + struct ilo_hwinfo *hw; + unsigned long flags; + + slot = iminor(ip) % max_ccb; + hw = container_of(ip->i_cdev, struct ilo_hwinfo, cdev); + + spin_lock(&hw->open_lock); + + if (hw->ccb_alloc[slot]->ccb_cnt == 1) { + + data = fp->private_data; + + spin_lock_irqsave(&hw->alloc_lock, flags); + hw->ccb_alloc[slot] = NULL; + spin_unlock_irqrestore(&hw->alloc_lock, flags); + + ilo_ccb_close(hw->ilo_dev, data); + + kfree(data); + } else + hw->ccb_alloc[slot]->ccb_cnt--; + + spin_unlock(&hw->open_lock); + + return 0; +} + +static int ilo_open(struct inode *ip, struct file *fp) +{ + int slot, error; + struct ccb_data *data; + struct ilo_hwinfo *hw; + unsigned long flags; + + slot = iminor(ip) % max_ccb; + hw = container_of(ip->i_cdev, struct ilo_hwinfo, cdev); + + /* new ccb allocation */ + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + spin_lock(&hw->open_lock); + + /* each fd private_data holds sw/hw view of ccb */ + if (hw->ccb_alloc[slot] == NULL) { + /* create a channel control block for this minor */ + error = ilo_ccb_setup(hw, data, slot); + if (error) { + kfree(data); + goto out; + } + + data->ccb_cnt = 1; + data->ccb_excl = fp->f_flags & O_EXCL; + data->ilo_hw = hw; + init_waitqueue_head(&data->ccb_waitq); + + /* write the ccb to hw */ + spin_lock_irqsave(&hw->alloc_lock, flags); + ilo_ccb_open(hw, data, slot); + hw->ccb_alloc[slot] = data; + spin_unlock_irqrestore(&hw->alloc_lock, flags); + + /* make sure the channel is functional */ + error = ilo_ccb_verify(hw, data); + if (error) { + + spin_lock_irqsave(&hw->alloc_lock, flags); + hw->ccb_alloc[slot] = NULL; + spin_unlock_irqrestore(&hw->alloc_lock, flags); + + ilo_ccb_close(hw->ilo_dev, data); + + kfree(data); + goto out; + } + + } else { + kfree(data); + if (fp->f_flags & O_EXCL || hw->ccb_alloc[slot]->ccb_excl) { + /* + * The channel exists, and either this open + * or a previous open of this channel wants + * exclusive access. + */ + error = -EBUSY; + } else { + hw->ccb_alloc[slot]->ccb_cnt++; + error = 0; + } + } +out: + spin_unlock(&hw->open_lock); + + if (!error) + fp->private_data = hw->ccb_alloc[slot]; + + return error; +} + +static const struct file_operations ilo_fops = { + .owner = THIS_MODULE, + .read = ilo_read, + .write = ilo_write, + .poll = ilo_poll, + .open = ilo_open, + .release = ilo_close, + .llseek = noop_llseek, +}; + +static irqreturn_t ilo_isr(int irq, void *data) +{ + struct ilo_hwinfo *hw = data; + int pending, i; + + spin_lock(&hw->alloc_lock); + + /* check for ccbs which have data */ + pending = get_device_outbound(hw); + if (!pending) { + spin_unlock(&hw->alloc_lock); + return IRQ_NONE; + } + + if (is_db_reset(pending)) { + /* wake up all ccbs if the device was reset */ + pending = -1; + ilo_set_reset(hw); + } + + for (i = 0; i < max_ccb; i++) { + if (!hw->ccb_alloc[i]) + continue; + if (pending & (1 << i)) + wake_up_interruptible(&hw->ccb_alloc[i]->ccb_waitq); + } + + /* clear the device of the channels that have been handled */ + clear_pending_db(hw, pending); + + spin_unlock(&hw->alloc_lock); + + return IRQ_HANDLED; +} + +static void ilo_unmap_device(struct pci_dev *pdev, struct ilo_hwinfo *hw) +{ + pci_iounmap(pdev, hw->db_vaddr); + pci_iounmap(pdev, hw->ram_vaddr); + pci_iounmap(pdev, hw->mmio_vaddr); +} + +static int ilo_map_device(struct pci_dev *pdev, struct ilo_hwinfo *hw) +{ + int bar; + unsigned long off; + + /* map the memory mapped i/o registers */ + hw->mmio_vaddr = pci_iomap(pdev, 1, 0); + if (hw->mmio_vaddr == NULL) { + dev_err(&pdev->dev, "Error mapping mmio\n"); + goto out; + } + + /* map the adapter shared memory region */ + if (pdev->subsystem_device == 0x00E4) { + bar = 5; + /* Last 8k is reserved for CCBs */ + off = pci_resource_len(pdev, bar) - 0x2000; + } else { + bar = 2; + off = 0; + } + hw->ram_vaddr = pci_iomap_range(pdev, bar, off, max_ccb * ILOHW_CCB_SZ); + if (hw->ram_vaddr == NULL) { + dev_err(&pdev->dev, "Error mapping shared mem\n"); + goto mmio_free; + } + + /* map the doorbell aperture */ + hw->db_vaddr = pci_iomap(pdev, 3, max_ccb * ONE_DB_SIZE); + if (hw->db_vaddr == NULL) { + dev_err(&pdev->dev, "Error mapping doorbell\n"); + goto ram_free; + } + + return 0; +ram_free: + pci_iounmap(pdev, hw->ram_vaddr); +mmio_free: + pci_iounmap(pdev, hw->mmio_vaddr); +out: + return -ENOMEM; +} + +static void ilo_remove(struct pci_dev *pdev) +{ + int i, minor; + struct ilo_hwinfo *ilo_hw = pci_get_drvdata(pdev); + + if (!ilo_hw) + return; + + clear_device(ilo_hw); + + minor = MINOR(ilo_hw->cdev.dev); + for (i = minor; i < minor + max_ccb; i++) + device_destroy(ilo_class, MKDEV(ilo_major, i)); + + cdev_del(&ilo_hw->cdev); + ilo_disable_interrupts(ilo_hw); + free_irq(pdev->irq, ilo_hw); + ilo_unmap_device(pdev, ilo_hw); + pci_release_regions(pdev); + /* + * pci_disable_device(pdev) used to be here. But this PCI device has + * two functions with interrupt lines connected to a single pin. The + * other one is a USB host controller. So when we disable the PIN here + * e.g. by rmmod hpilo, the controller stops working. It is because + * the interrupt link is disabled in ACPI since it is not refcounted + * yet. See acpi_pci_link_free_irq called from acpi_pci_irq_disable. + */ + kfree(ilo_hw); + ilo_hwdev[(minor / max_ccb)] = 0; +} + +static int ilo_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int devnum, minor, start, error = 0; + struct ilo_hwinfo *ilo_hw; + + if (pci_match_id(ilo_blacklist, pdev)) { + dev_dbg(&pdev->dev, "Not supported on this device\n"); + return -ENODEV; + } + + if (max_ccb > MAX_CCB) + max_ccb = MAX_CCB; + else if (max_ccb < MIN_CCB) + max_ccb = MIN_CCB; + + /* find a free range for device files */ + for (devnum = 0; devnum < MAX_ILO_DEV; devnum++) { + if (ilo_hwdev[devnum] == 0) { + ilo_hwdev[devnum] = 1; + break; + } + } + + if (devnum == MAX_ILO_DEV) { + dev_err(&pdev->dev, "Error finding free device\n"); + return -ENODEV; + } + + /* track global allocations for this device */ + error = -ENOMEM; + ilo_hw = kzalloc(sizeof(*ilo_hw), GFP_KERNEL); + if (!ilo_hw) + goto out; + + ilo_hw->ilo_dev = pdev; + spin_lock_init(&ilo_hw->alloc_lock); + spin_lock_init(&ilo_hw->fifo_lock); + spin_lock_init(&ilo_hw->open_lock); + + error = pci_enable_device(pdev); + if (error) + goto free; + + pci_set_master(pdev); + + error = pci_request_regions(pdev, ILO_NAME); + if (error) + goto disable; + + error = ilo_map_device(pdev, ilo_hw); + if (error) + goto free_regions; + + pci_set_drvdata(pdev, ilo_hw); + clear_device(ilo_hw); + + error = request_irq(pdev->irq, ilo_isr, IRQF_SHARED, "hpilo", ilo_hw); + if (error) + goto unmap; + + ilo_enable_interrupts(ilo_hw); + + cdev_init(&ilo_hw->cdev, &ilo_fops); + ilo_hw->cdev.owner = THIS_MODULE; + start = devnum * max_ccb; + error = cdev_add(&ilo_hw->cdev, MKDEV(ilo_major, start), max_ccb); + if (error) { + dev_err(&pdev->dev, "Could not add cdev\n"); + goto remove_isr; + } + + for (minor = 0 ; minor < max_ccb; minor++) { + struct device *dev; + dev = device_create(ilo_class, &pdev->dev, + MKDEV(ilo_major, minor), NULL, + "hpilo!d%dccb%d", devnum, minor); + if (IS_ERR(dev)) + dev_err(&pdev->dev, "Could not create files\n"); + } + + return 0; +remove_isr: + ilo_disable_interrupts(ilo_hw); + free_irq(pdev->irq, ilo_hw); +unmap: + ilo_unmap_device(pdev, ilo_hw); +free_regions: + pci_release_regions(pdev); +disable: +/* pci_disable_device(pdev); see comment in ilo_remove */ +free: + kfree(ilo_hw); +out: + ilo_hwdev[devnum] = 0; + return error; +} + +static const struct pci_device_id ilo_devices[] = { + { PCI_DEVICE(PCI_VENDOR_ID_COMPAQ, 0xB204) }, + { PCI_DEVICE(PCI_VENDOR_ID_HP, 0x3307) }, + { } +}; +MODULE_DEVICE_TABLE(pci, ilo_devices); + +static struct pci_driver ilo_driver = { + .name = ILO_NAME, + .id_table = ilo_devices, + .probe = ilo_probe, + .remove = ilo_remove, +}; + +static int __init ilo_init(void) +{ + int error; + dev_t dev; + + ilo_class = class_create(THIS_MODULE, "iLO"); + if (IS_ERR(ilo_class)) { + error = PTR_ERR(ilo_class); + goto out; + } + + error = alloc_chrdev_region(&dev, 0, MAX_OPEN, ILO_NAME); + if (error) + goto class_destroy; + + ilo_major = MAJOR(dev); + + error = pci_register_driver(&ilo_driver); + if (error) + goto chr_remove; + + return 0; +chr_remove: + unregister_chrdev_region(dev, MAX_OPEN); +class_destroy: + class_destroy(ilo_class); +out: + return error; +} + +static void __exit ilo_exit(void) +{ + pci_unregister_driver(&ilo_driver); + unregister_chrdev_region(MKDEV(ilo_major, 0), MAX_OPEN); + class_destroy(ilo_class); +} + +MODULE_VERSION("1.5.0"); +MODULE_ALIAS(ILO_NAME); +MODULE_DESCRIPTION(ILO_NAME); +MODULE_AUTHOR("David Altobelli <david.altobelli@hpe.com>"); +MODULE_LICENSE("GPL v2"); + +module_param(max_ccb, uint, 0444); +MODULE_PARM_DESC(max_ccb, "Maximum number of HP iLO channels to attach (8-24)(default=16)"); + +module_init(ilo_init); +module_exit(ilo_exit); diff --git a/drivers/misc/hpilo.h b/drivers/misc/hpilo.h new file mode 100644 index 000000000..f69ff645c --- /dev/null +++ b/drivers/misc/hpilo.h @@ -0,0 +1,211 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * linux/drivers/char/hpilo.h + * + * Copyright (C) 2008 Hewlett-Packard Development Company, L.P. + * David Altobelli <david.altobelli@hp.com> + */ +#ifndef __HPILO_H +#define __HPILO_H + +#define ILO_NAME "hpilo" + +/* max number of open channel control blocks per device, hw limited to 32 */ +#define MAX_CCB 24 +/* min number of open channel control blocks per device, hw limited to 32 */ +#define MIN_CCB 8 +/* max number of supported devices */ +#define MAX_ILO_DEV 1 +/* max number of files */ +#define MAX_OPEN (MAX_CCB * MAX_ILO_DEV) +/* total wait time in usec */ +#define MAX_WAIT_TIME 10000 +/* per spin wait time in usec */ +#define WAIT_TIME 10 +/* spin counter for open/close delay */ +#define MAX_WAIT (MAX_WAIT_TIME / WAIT_TIME) + +/* + * Per device, used to track global memory allocations. + */ +struct ilo_hwinfo { + /* mmio registers on device */ + char __iomem *mmio_vaddr; + + /* doorbell registers on device */ + char __iomem *db_vaddr; + + /* shared memory on device used for channel control blocks */ + char __iomem *ram_vaddr; + + /* files corresponding to this device */ + struct ccb_data *ccb_alloc[MAX_CCB]; + + struct pci_dev *ilo_dev; + + /* + * open_lock serializes ccb_cnt during open and close + * [ irq disabled ] + * -> alloc_lock used when adding/removing/searching ccb_alloc, + * which represents all ccbs open on the device + * --> fifo_lock controls access to fifo queues shared with hw + * + * Locks must be taken in this order, but open_lock and alloc_lock + * are optional, they do not need to be held in order to take a + * lower level lock. + */ + spinlock_t open_lock; + spinlock_t alloc_lock; + spinlock_t fifo_lock; + + struct cdev cdev; +}; + +/* offset from mmio_vaddr for enabling doorbell interrupts */ +#define DB_IRQ 0xB2 +/* offset from mmio_vaddr for outbound communications */ +#define DB_OUT 0xD4 +/* DB_OUT reset bit */ +#define DB_RESET 26 + +/* + * Channel control block. Used to manage hardware queues. + * The format must match hw's version. The hw ccb is 128 bytes, + * but the context area shouldn't be touched by the driver. + */ +#define ILOSW_CCB_SZ 64 +#define ILOHW_CCB_SZ 128 +struct ccb { + union { + char *send_fifobar; + u64 send_fifobar_pa; + } ccb_u1; + union { + char *send_desc; + u64 send_desc_pa; + } ccb_u2; + u64 send_ctrl; + + union { + char *recv_fifobar; + u64 recv_fifobar_pa; + } ccb_u3; + union { + char *recv_desc; + u64 recv_desc_pa; + } ccb_u4; + u64 recv_ctrl; + + union { + char __iomem *db_base; + u64 padding5; + } ccb_u5; + + u64 channel; + + /* unused context area (64 bytes) */ +}; + +/* ccb queue parameters */ +#define SENDQ 1 +#define RECVQ 2 +#define NR_QENTRY 4 +#define L2_QENTRY_SZ 12 + +/* ccb ctrl bitfields */ +#define CTRL_BITPOS_L2SZ 0 +#define CTRL_BITPOS_FIFOINDEXMASK 4 +#define CTRL_BITPOS_DESCLIMIT 18 +#define CTRL_BITPOS_A 30 +#define CTRL_BITPOS_G 31 + +/* ccb doorbell macros */ +#define L2_DB_SIZE 14 +#define ONE_DB_SIZE (1 << L2_DB_SIZE) + +/* + * Per fd structure used to track the ccb allocated to that dev file. + */ +struct ccb_data { + /* software version of ccb, using virtual addrs */ + struct ccb driver_ccb; + + /* hardware version of ccb, using physical addrs */ + struct ccb ilo_ccb; + + /* hardware ccb is written to this shared mapped device memory */ + struct ccb __iomem *mapped_ccb; + + /* dma'able memory used for send/recv queues */ + void *dma_va; + dma_addr_t dma_pa; + size_t dma_size; + + /* pointer to hardware device info */ + struct ilo_hwinfo *ilo_hw; + + /* queue for this ccb to wait for recv data */ + wait_queue_head_t ccb_waitq; + + /* usage count, to allow for shared ccb's */ + int ccb_cnt; + + /* open wanted exclusive access to this ccb */ + int ccb_excl; +}; + +/* + * FIFO queue structure, shared with hw. + */ +#define ILO_START_ALIGN 4096 +#define ILO_CACHE_SZ 128 +struct fifo { + u64 nrents; /* user requested number of fifo entries */ + u64 imask; /* mask to extract valid fifo index */ + u64 merge; /* O/C bits to merge in during enqueue operation */ + u64 reset; /* set to non-zero when the target device resets */ + u8 pad_0[ILO_CACHE_SZ - (sizeof(u64) * 4)]; + + u64 head; + u8 pad_1[ILO_CACHE_SZ - (sizeof(u64))]; + + u64 tail; + u8 pad_2[ILO_CACHE_SZ - (sizeof(u64))]; + + u64 fifobar[]; +}; + +/* convert between struct fifo, and the fifobar, which is saved in the ccb */ +#define FIFOHANDLESIZE (sizeof(struct fifo)) +#define FIFOBARTOHANDLE(_fifo) \ + ((struct fifo *)(((char *)(_fifo)) - FIFOHANDLESIZE)) + +/* the number of qwords to consume from the entry descriptor */ +#define ENTRY_BITPOS_QWORDS 0 +/* descriptor index number (within a specified queue) */ +#define ENTRY_BITPOS_DESCRIPTOR 10 +/* state bit, fifo entry consumed by consumer */ +#define ENTRY_BITPOS_C 22 +/* state bit, fifo entry is occupied */ +#define ENTRY_BITPOS_O 23 + +#define ENTRY_BITS_QWORDS 10 +#define ENTRY_BITS_DESCRIPTOR 12 +#define ENTRY_BITS_C 1 +#define ENTRY_BITS_O 1 +#define ENTRY_BITS_TOTAL \ + (ENTRY_BITS_C + ENTRY_BITS_O + \ + ENTRY_BITS_QWORDS + ENTRY_BITS_DESCRIPTOR) + +/* extract various entry fields */ +#define ENTRY_MASK ((1 << ENTRY_BITS_TOTAL) - 1) +#define ENTRY_MASK_C (((1 << ENTRY_BITS_C) - 1) << ENTRY_BITPOS_C) +#define ENTRY_MASK_O (((1 << ENTRY_BITS_O) - 1) << ENTRY_BITPOS_O) +#define ENTRY_MASK_QWORDS \ + (((1 << ENTRY_BITS_QWORDS) - 1) << ENTRY_BITPOS_QWORDS) +#define ENTRY_MASK_DESCRIPTOR \ + (((1 << ENTRY_BITS_DESCRIPTOR) - 1) << ENTRY_BITPOS_DESCRIPTOR) + +#define ENTRY_MASK_NOSTATE (ENTRY_MASK >> (ENTRY_BITS_C + ENTRY_BITS_O)) + +#endif /* __HPILO_H */ diff --git a/drivers/misc/ibmasm/Makefile b/drivers/misc/ibmasm/Makefile new file mode 100644 index 000000000..1b9dd0f44 --- /dev/null +++ b/drivers/misc/ibmasm/Makefile @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_IBM_ASM) := ibmasm.o + +ibmasm-y := module.o \ + ibmasmfs.o \ + event.o \ + command.o \ + remote.o \ + heartbeat.o \ + r_heartbeat.o \ + dot_command.o \ + lowlevel.o + +ibmasm-$(CONFIG_SERIAL_8250) += uart.o + diff --git a/drivers/misc/ibmasm/command.c b/drivers/misc/ibmasm/command.c new file mode 100644 index 000000000..733dd30fb --- /dev/null +++ b/drivers/misc/ibmasm/command.c @@ -0,0 +1,173 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +/* + * IBM ASM Service Processor Device Driver + * + * Copyright (C) IBM Corporation, 2004 + * + * Author: Max Asböck <amax@us.ibm.com> + */ + +#include <linux/sched.h> +#include <linux/slab.h> +#include "ibmasm.h" +#include "lowlevel.h" + +static void exec_next_command(struct service_processor *sp); + +static atomic_t command_count = ATOMIC_INIT(0); + +struct command *ibmasm_new_command(struct service_processor *sp, size_t buffer_size) +{ + struct command *cmd; + + if (buffer_size > IBMASM_CMD_MAX_BUFFER_SIZE) + return NULL; + + cmd = kzalloc(sizeof(struct command), GFP_KERNEL); + if (cmd == NULL) + return NULL; + + + cmd->buffer = kzalloc(buffer_size, GFP_KERNEL); + if (cmd->buffer == NULL) { + kfree(cmd); + return NULL; + } + cmd->buffer_size = buffer_size; + + kref_init(&cmd->kref); + cmd->lock = &sp->lock; + + cmd->status = IBMASM_CMD_PENDING; + init_waitqueue_head(&cmd->wait); + INIT_LIST_HEAD(&cmd->queue_node); + + atomic_inc(&command_count); + dbg("command count: %d\n", atomic_read(&command_count)); + + return cmd; +} + +void ibmasm_free_command(struct kref *kref) +{ + struct command *cmd = to_command(kref); + + list_del(&cmd->queue_node); + atomic_dec(&command_count); + dbg("command count: %d\n", atomic_read(&command_count)); + kfree(cmd->buffer); + kfree(cmd); +} + +static void enqueue_command(struct service_processor *sp, struct command *cmd) +{ + list_add_tail(&cmd->queue_node, &sp->command_queue); +} + +static struct command *dequeue_command(struct service_processor *sp) +{ + struct command *cmd; + struct list_head *next; + + if (list_empty(&sp->command_queue)) + return NULL; + + next = sp->command_queue.next; + list_del_init(next); + cmd = list_entry(next, struct command, queue_node); + + return cmd; +} + +static inline void do_exec_command(struct service_processor *sp) +{ + char tsbuf[32]; + + dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf)); + + if (ibmasm_send_i2o_message(sp)) { + sp->current_command->status = IBMASM_CMD_FAILED; + wake_up(&sp->current_command->wait); + command_put(sp->current_command); + exec_next_command(sp); + } +} + +/* + * exec_command + * send a command to a service processor + * Commands are executed sequentially. One command (sp->current_command) + * is sent to the service processor. Once the interrupt handler gets a + * message of type command_response, the message is copied into + * the current commands buffer, + */ +void ibmasm_exec_command(struct service_processor *sp, struct command *cmd) +{ + unsigned long flags; + char tsbuf[32]; + + dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf)); + + spin_lock_irqsave(&sp->lock, flags); + + if (!sp->current_command) { + sp->current_command = cmd; + command_get(sp->current_command); + spin_unlock_irqrestore(&sp->lock, flags); + do_exec_command(sp); + } else { + enqueue_command(sp, cmd); + spin_unlock_irqrestore(&sp->lock, flags); + } +} + +static void exec_next_command(struct service_processor *sp) +{ + unsigned long flags; + char tsbuf[32]; + + dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf)); + + spin_lock_irqsave(&sp->lock, flags); + sp->current_command = dequeue_command(sp); + if (sp->current_command) { + command_get(sp->current_command); + spin_unlock_irqrestore(&sp->lock, flags); + do_exec_command(sp); + } else { + spin_unlock_irqrestore(&sp->lock, flags); + } +} + +/* + * Sleep until a command has failed or a response has been received + * and the command status been updated by the interrupt handler. + * (see receive_response). + */ +void ibmasm_wait_for_response(struct command *cmd, int timeout) +{ + wait_event_interruptible_timeout(cmd->wait, + cmd->status == IBMASM_CMD_COMPLETE || + cmd->status == IBMASM_CMD_FAILED, + timeout * HZ); +} + +/* + * receive_command_response + * called by the interrupt handler when a dot command of type command_response + * was received. + */ +void ibmasm_receive_command_response(struct service_processor *sp, void *response, size_t size) +{ + struct command *cmd = sp->current_command; + + if (!sp->current_command) + return; + + memcpy_fromio(cmd->buffer, response, min(size, cmd->buffer_size)); + cmd->status = IBMASM_CMD_COMPLETE; + wake_up(&sp->current_command->wait); + command_put(sp->current_command); + exec_next_command(sp); +} diff --git a/drivers/misc/ibmasm/dot_command.c b/drivers/misc/ibmasm/dot_command.c new file mode 100644 index 000000000..df389bd4c --- /dev/null +++ b/drivers/misc/ibmasm/dot_command.c @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * IBM ASM Service Processor Device Driver + * + * Copyright (C) IBM Corporation, 2004 + * + * Author: Max Asböck <amax@us.ibm.com> + */ + +#include "ibmasm.h" +#include "dot_command.h" + +/* + * Dispatch an incoming message to the specific handler for the message. + * Called from interrupt context. + */ +void ibmasm_receive_message(struct service_processor *sp, void *message, int message_size) +{ + u32 size; + struct dot_command_header *header = (struct dot_command_header *)message; + + if (message_size == 0) + return; + + size = get_dot_command_size(message); + if (size == 0) + return; + + if (size > message_size) + size = message_size; + + switch (header->type) { + case sp_event: + ibmasm_receive_event(sp, message, size); + break; + case sp_command_response: + ibmasm_receive_command_response(sp, message, size); + break; + case sp_heartbeat: + ibmasm_receive_heartbeat(sp, message, size); + break; + default: + dev_err(sp->dev, "Received unknown message from service processor\n"); + } +} + + +#define INIT_BUFFER_SIZE 32 + + +/* + * send the 4.3.5.10 dot command (driver VPD) to the service processor + */ +int ibmasm_send_driver_vpd(struct service_processor *sp) +{ + struct command *command; + struct dot_command_header *header; + u8 *vpd_command; + u8 *vpd_data; + int result = 0; + + command = ibmasm_new_command(sp, INIT_BUFFER_SIZE); + if (command == NULL) + return -ENOMEM; + + header = (struct dot_command_header *)command->buffer; + header->type = sp_write; + header->command_size = 4; + header->data_size = 16; + header->status = 0; + header->reserved = 0; + + vpd_command = command->buffer + sizeof(struct dot_command_header); + vpd_command[0] = 0x4; + vpd_command[1] = 0x3; + vpd_command[2] = 0x5; + vpd_command[3] = 0xa; + + vpd_data = vpd_command + header->command_size; + vpd_data[0] = 0; + strcat(vpd_data, IBMASM_DRIVER_VPD); + vpd_data[10] = 0; + vpd_data[15] = 0; + + ibmasm_exec_command(sp, command); + ibmasm_wait_for_response(command, IBMASM_CMD_TIMEOUT_NORMAL); + + if (command->status != IBMASM_CMD_COMPLETE) + result = -ENODEV; + + command_put(command); + + return result; +} + +struct os_state_command { + struct dot_command_header header; + unsigned char command[3]; + unsigned char data; +}; + +/* + * send the 4.3.6 dot command (os state) to the service processor + * During driver init this function is called with os state "up". + * This causes the service processor to start sending heartbeats the + * driver. + * During driver exit the function is called with os state "down", + * causing the service processor to stop the heartbeats. + */ +int ibmasm_send_os_state(struct service_processor *sp, int os_state) +{ + struct command *cmd; + struct os_state_command *os_state_cmd; + int result = 0; + + cmd = ibmasm_new_command(sp, sizeof(struct os_state_command)); + if (cmd == NULL) + return -ENOMEM; + + os_state_cmd = (struct os_state_command *)cmd->buffer; + os_state_cmd->header.type = sp_write; + os_state_cmd->header.command_size = 3; + os_state_cmd->header.data_size = 1; + os_state_cmd->header.status = 0; + os_state_cmd->command[0] = 4; + os_state_cmd->command[1] = 3; + os_state_cmd->command[2] = 6; + os_state_cmd->data = os_state; + + ibmasm_exec_command(sp, cmd); + ibmasm_wait_for_response(cmd, IBMASM_CMD_TIMEOUT_NORMAL); + + if (cmd->status != IBMASM_CMD_COMPLETE) + result = -ENODEV; + + command_put(cmd); + return result; +} diff --git a/drivers/misc/ibmasm/dot_command.h b/drivers/misc/ibmasm/dot_command.h new file mode 100644 index 000000000..e03399ebe --- /dev/null +++ b/drivers/misc/ibmasm/dot_command.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * IBM ASM Service Processor Device Driver + * + * Copyright (C) IBM Corporation, 2004 + * + * Author: Max Asböck <amax@us.ibm.com> + */ + +#ifndef __DOT_COMMAND_H__ +#define __DOT_COMMAND_H__ + +/* + * dot commands are the protocol used to communicate with the service + * processor. + * They consist of header, a command of variable length and data of + * variable length. + */ + +/* dot command types */ +#define sp_write 0 +#define sp_write_next 1 +#define sp_read 2 +#define sp_read_next 3 +#define sp_command_response 4 +#define sp_event 5 +#define sp_heartbeat 6 + +#pragma pack(1) +struct dot_command_header { + u8 type; + u8 command_size; + u16 data_size; + u8 status; + u8 reserved; +}; +#pragma pack() + +static inline size_t get_dot_command_size(void *buffer) +{ + struct dot_command_header *cmd = (struct dot_command_header *)buffer; + return sizeof(struct dot_command_header) + cmd->command_size + cmd->data_size; +} + +static inline unsigned int get_dot_command_timeout(void *buffer) +{ + struct dot_command_header *header = (struct dot_command_header *)buffer; + unsigned char *cmd = buffer + sizeof(struct dot_command_header); + + /* dot commands 6.3.1, 7.1 and 8.x need a longer timeout */ + + if (header->command_size == 3) { + if ((cmd[0] == 6) && (cmd[1] == 3) && (cmd[2] == 1)) + return IBMASM_CMD_TIMEOUT_EXTRA; + } else if (header->command_size == 2) { + if ((cmd[0] == 7) && (cmd[1] == 1)) + return IBMASM_CMD_TIMEOUT_EXTRA; + if (cmd[0] == 8) + return IBMASM_CMD_TIMEOUT_EXTRA; + } + return IBMASM_CMD_TIMEOUT_NORMAL; +} + +#endif /* __DOT_COMMAND_H__ */ diff --git a/drivers/misc/ibmasm/event.c b/drivers/misc/ibmasm/event.c new file mode 100644 index 000000000..40ce75f89 --- /dev/null +++ b/drivers/misc/ibmasm/event.c @@ -0,0 +1,163 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +/* + * IBM ASM Service Processor Device Driver + * + * Copyright (C) IBM Corporation, 2004 + * + * Author: Max Asböck <amax@us.ibm.com> + */ + +#include <linux/sched.h> +#include <linux/slab.h> +#include "ibmasm.h" +#include "lowlevel.h" + +/* + * ASM service processor event handling routines. + * + * Events are signalled to the device drivers through interrupts. + * They have the format of dot commands, with the type field set to + * sp_event. + * The driver does not interpret the events, it simply stores them in a + * circular buffer. + */ + +static void wake_up_event_readers(struct service_processor *sp) +{ + struct event_reader *reader; + + list_for_each_entry(reader, &sp->event_buffer->readers, node) + wake_up_interruptible(&reader->wait); +} + +/* + * receive_event + * Called by the interrupt handler when a dot command of type sp_event is + * received. + * Store the event in the circular event buffer, wake up any sleeping + * event readers. + * There is no reader marker in the buffer, therefore readers are + * responsible for keeping up with the writer, or they will lose events. + */ +void ibmasm_receive_event(struct service_processor *sp, void *data, unsigned int data_size) +{ + struct event_buffer *buffer = sp->event_buffer; + struct ibmasm_event *event; + unsigned long flags; + + data_size = min(data_size, IBMASM_EVENT_MAX_SIZE); + + spin_lock_irqsave(&sp->lock, flags); + /* copy the event into the next slot in the circular buffer */ + event = &buffer->events[buffer->next_index]; + memcpy_fromio(event->data, data, data_size); + event->data_size = data_size; + event->serial_number = buffer->next_serial_number; + + /* advance indices in the buffer */ + buffer->next_index = (buffer->next_index + 1) % IBMASM_NUM_EVENTS; + buffer->next_serial_number++; + spin_unlock_irqrestore(&sp->lock, flags); + + wake_up_event_readers(sp); +} + +static inline int event_available(struct event_buffer *b, struct event_reader *r) +{ + return (r->next_serial_number < b->next_serial_number); +} + +/* + * get_next_event + * Called by event readers (initiated from user space through the file + * system). + * Sleeps until a new event is available. + */ +int ibmasm_get_next_event(struct service_processor *sp, struct event_reader *reader) +{ + struct event_buffer *buffer = sp->event_buffer; + struct ibmasm_event *event; + unsigned int index; + unsigned long flags; + + reader->cancelled = 0; + + if (wait_event_interruptible(reader->wait, + event_available(buffer, reader) || reader->cancelled)) + return -ERESTARTSYS; + + if (!event_available(buffer, reader)) + return 0; + + spin_lock_irqsave(&sp->lock, flags); + + index = buffer->next_index; + event = &buffer->events[index]; + while (event->serial_number < reader->next_serial_number) { + index = (index + 1) % IBMASM_NUM_EVENTS; + event = &buffer->events[index]; + } + memcpy(reader->data, event->data, event->data_size); + reader->data_size = event->data_size; + reader->next_serial_number = event->serial_number + 1; + + spin_unlock_irqrestore(&sp->lock, flags); + + return event->data_size; +} + +void ibmasm_cancel_next_event(struct event_reader *reader) +{ + reader->cancelled = 1; + wake_up_interruptible(&reader->wait); +} + +void ibmasm_event_reader_register(struct service_processor *sp, struct event_reader *reader) +{ + unsigned long flags; + + reader->next_serial_number = sp->event_buffer->next_serial_number; + init_waitqueue_head(&reader->wait); + spin_lock_irqsave(&sp->lock, flags); + list_add(&reader->node, &sp->event_buffer->readers); + spin_unlock_irqrestore(&sp->lock, flags); +} + +void ibmasm_event_reader_unregister(struct service_processor *sp, struct event_reader *reader) +{ + unsigned long flags; + + spin_lock_irqsave(&sp->lock, flags); + list_del(&reader->node); + spin_unlock_irqrestore(&sp->lock, flags); +} + +int ibmasm_event_buffer_init(struct service_processor *sp) +{ + struct event_buffer *buffer; + struct ibmasm_event *event; + int i; + + buffer = kmalloc(sizeof(struct event_buffer), GFP_KERNEL); + if (!buffer) + return -ENOMEM; + + buffer->next_index = 0; + buffer->next_serial_number = 1; + + event = buffer->events; + for (i=0; i<IBMASM_NUM_EVENTS; i++, event++) + event->serial_number = 0; + + INIT_LIST_HEAD(&buffer->readers); + + sp->event_buffer = buffer; + + return 0; +} + +void ibmasm_event_buffer_exit(struct service_processor *sp) +{ + kfree(sp->event_buffer); +} diff --git a/drivers/misc/ibmasm/heartbeat.c b/drivers/misc/ibmasm/heartbeat.c new file mode 100644 index 000000000..4f5f3bdc8 --- /dev/null +++ b/drivers/misc/ibmasm/heartbeat.c @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +/* + * IBM ASM Service Processor Device Driver + * + * Copyright (C) IBM Corporation, 2004 + * + * Author: Max Asböck <amax@us.ibm.com> + */ + +#include <linux/notifier.h> +#include "ibmasm.h" +#include "dot_command.h" +#include "lowlevel.h" + +static int suspend_heartbeats = 0; + +/* + * Once the driver indicates to the service processor that it is running + * - see send_os_state() - the service processor sends periodic heartbeats + * to the driver. The driver must respond to the heartbeats or else the OS + * will be rebooted. + * In the case of a panic the interrupt handler continues to work and thus + * continues to respond to heartbeats, making the service processor believe + * the OS is still running and thus preventing a reboot. + * To prevent this from happening a callback is added the panic_notifier_list. + * Before responding to a heartbeat the driver checks if a panic has happened, + * if yes it suspends heartbeat, causing the service processor to reboot as + * expected. + */ +static int panic_happened(struct notifier_block *n, unsigned long val, void *v) +{ + suspend_heartbeats = 1; + return 0; +} + +static struct notifier_block panic_notifier = { panic_happened, NULL, 1 }; + +void ibmasm_register_panic_notifier(void) +{ + atomic_notifier_chain_register(&panic_notifier_list, &panic_notifier); +} + +void ibmasm_unregister_panic_notifier(void) +{ + atomic_notifier_chain_unregister(&panic_notifier_list, + &panic_notifier); +} + + +int ibmasm_heartbeat_init(struct service_processor *sp) +{ + sp->heartbeat = ibmasm_new_command(sp, HEARTBEAT_BUFFER_SIZE); + if (sp->heartbeat == NULL) + return -ENOMEM; + + return 0; +} + +void ibmasm_heartbeat_exit(struct service_processor *sp) +{ + char tsbuf[32]; + + dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf)); + ibmasm_wait_for_response(sp->heartbeat, IBMASM_CMD_TIMEOUT_NORMAL); + dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf)); + suspend_heartbeats = 1; + command_put(sp->heartbeat); +} + +void ibmasm_receive_heartbeat(struct service_processor *sp, void *message, size_t size) +{ + struct command *cmd = sp->heartbeat; + struct dot_command_header *header = (struct dot_command_header *)cmd->buffer; + char tsbuf[32]; + + dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf)); + if (suspend_heartbeats) + return; + + /* return the received dot command to sender */ + cmd->status = IBMASM_CMD_PENDING; + size = min(size, cmd->buffer_size); + memcpy_fromio(cmd->buffer, message, size); + header->type = sp_write; + ibmasm_exec_command(sp, cmd); +} diff --git a/drivers/misc/ibmasm/i2o.h b/drivers/misc/ibmasm/i2o.h new file mode 100644 index 000000000..468fa84a3 --- /dev/null +++ b/drivers/misc/ibmasm/i2o.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * IBM ASM Service Processor Device Driver + * + * Copyright (C) IBM Corporation, 2004 + * + * Author: Max Asböck <amax@us.ibm.com> + */ + +#pragma pack(1) +struct i2o_header { + u8 version; + u8 message_flags; + u16 message_size; + u8 target; + u8 initiator_and_target; + u8 initiator; + u8 function; + u32 initiator_context; +}; +#pragma pack() + +#define I2O_HEADER_TEMPLATE \ + { .version = 0x01, \ + .message_flags = 0x00, \ + .function = 0xFF, \ + .initiator = 0x00, \ + .initiator_and_target = 0x40, \ + .target = 0x00, \ + .initiator_context = 0x0 } + +#define I2O_MESSAGE_SIZE 0x1000 +#define I2O_COMMAND_SIZE (I2O_MESSAGE_SIZE - sizeof(struct i2o_header)) + +#pragma pack(1) +struct i2o_message { + struct i2o_header header; + void *data; +}; +#pragma pack() + +static inline unsigned short outgoing_message_size(unsigned int data_size) +{ + unsigned int size; + unsigned short i2o_size; + + if (data_size > I2O_COMMAND_SIZE) + data_size = I2O_COMMAND_SIZE; + + size = sizeof(struct i2o_header) + data_size; + + i2o_size = size / sizeof(u32); + + if (size % sizeof(u32)) + i2o_size++; + + return i2o_size; +} + +static inline u32 incoming_data_size(struct i2o_message *i2o_message) +{ + return (sizeof(u32) * i2o_message->header.message_size); +} diff --git a/drivers/misc/ibmasm/ibmasm.h b/drivers/misc/ibmasm/ibmasm.h new file mode 100644 index 000000000..a5ced88ca --- /dev/null +++ b/drivers/misc/ibmasm/ibmasm.h @@ -0,0 +1,209 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +/* + * IBM ASM Service Processor Device Driver + * + * Copyright (C) IBM Corporation, 2004 + * + * Author: Max Asböck <amax@us.ibm.com> + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/list.h> +#include <linux/wait.h> +#include <linux/spinlock.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/kref.h> +#include <linux/device.h> +#include <linux/input.h> +#include <linux/time64.h> + +/* Driver identification */ +#define DRIVER_NAME "ibmasm" +#define DRIVER_VERSION "1.0" +#define DRIVER_AUTHOR "Max Asbock <masbock@us.ibm.com>, Vernon Mauery <vernux@us.ibm.com>" +#define DRIVER_DESC "IBM ASM Service Processor Driver" + +#define err(msg) printk(KERN_ERR "%s: " msg "\n", DRIVER_NAME) +#define info(msg) printk(KERN_INFO "%s: " msg "\n", DRIVER_NAME) + +extern int ibmasm_debug; +#define dbg(STR, ARGS...) \ + do { \ + if (ibmasm_debug) \ + printk(KERN_DEBUG STR , ##ARGS); \ + } while (0) + +static inline char *get_timestamp(char *buf) +{ + struct timespec64 now; + + ktime_get_real_ts64(&now); + sprintf(buf, "%llu.%.08lu", (long long)now.tv_sec, + now.tv_nsec / NSEC_PER_USEC); + return buf; +} + +#define IBMASM_CMD_PENDING 0 +#define IBMASM_CMD_COMPLETE 1 +#define IBMASM_CMD_FAILED 2 + +#define IBMASM_CMD_TIMEOUT_NORMAL 45 +#define IBMASM_CMD_TIMEOUT_EXTRA 240 + +#define IBMASM_CMD_MAX_BUFFER_SIZE 0x8000 + +#define REVERSE_HEARTBEAT_TIMEOUT 120 + +#define HEARTBEAT_BUFFER_SIZE 0x400 + +#ifdef IA64 +#define IBMASM_DRIVER_VPD "Lin64 6.08 " +#else +#define IBMASM_DRIVER_VPD "Lin32 6.08 " +#endif + +#define SYSTEM_STATE_OS_UP 5 +#define SYSTEM_STATE_OS_DOWN 4 + +#define IBMASM_NAME_SIZE 16 + +#define IBMASM_NUM_EVENTS 10 +#define IBMASM_EVENT_MAX_SIZE 2048u + + +struct command { + struct list_head queue_node; + wait_queue_head_t wait; + unsigned char *buffer; + size_t buffer_size; + int status; + struct kref kref; + spinlock_t *lock; +}; +#define to_command(c) container_of(c, struct command, kref) + +void ibmasm_free_command(struct kref *kref); +static inline void command_put(struct command *cmd) +{ + unsigned long flags; + spinlock_t *lock = cmd->lock; + + spin_lock_irqsave(lock, flags); + kref_put(&cmd->kref, ibmasm_free_command); + spin_unlock_irqrestore(lock, flags); +} + +static inline void command_get(struct command *cmd) +{ + kref_get(&cmd->kref); +} + + +struct ibmasm_event { + unsigned int serial_number; + unsigned int data_size; + unsigned char data[IBMASM_EVENT_MAX_SIZE]; +}; + +struct event_buffer { + struct ibmasm_event events[IBMASM_NUM_EVENTS]; + unsigned int next_serial_number; + unsigned int next_index; + struct list_head readers; +}; + +struct event_reader { + int cancelled; + unsigned int next_serial_number; + wait_queue_head_t wait; + struct list_head node; + unsigned int data_size; + unsigned char data[IBMASM_EVENT_MAX_SIZE]; +}; + +struct reverse_heartbeat { + wait_queue_head_t wait; + unsigned int stopped; +}; + +struct ibmasm_remote { + struct input_dev *keybd_dev; + struct input_dev *mouse_dev; +}; + +struct service_processor { + struct list_head node; + spinlock_t lock; + void __iomem *base_address; + unsigned int irq; + struct command *current_command; + struct command *heartbeat; + struct list_head command_queue; + struct event_buffer *event_buffer; + char dirname[IBMASM_NAME_SIZE]; + char devname[IBMASM_NAME_SIZE]; + unsigned int number; + struct ibmasm_remote remote; + int serial_line; + struct device *dev; +}; + +/* command processing */ +struct command *ibmasm_new_command(struct service_processor *sp, size_t buffer_size); +void ibmasm_exec_command(struct service_processor *sp, struct command *cmd); +void ibmasm_wait_for_response(struct command *cmd, int timeout); +void ibmasm_receive_command_response(struct service_processor *sp, void *response, size_t size); + +/* event processing */ +int ibmasm_event_buffer_init(struct service_processor *sp); +void ibmasm_event_buffer_exit(struct service_processor *sp); +void ibmasm_receive_event(struct service_processor *sp, void *data, unsigned int data_size); +void ibmasm_event_reader_register(struct service_processor *sp, struct event_reader *reader); +void ibmasm_event_reader_unregister(struct service_processor *sp, struct event_reader *reader); +int ibmasm_get_next_event(struct service_processor *sp, struct event_reader *reader); +void ibmasm_cancel_next_event(struct event_reader *reader); + +/* heartbeat - from SP to OS */ +void ibmasm_register_panic_notifier(void); +void ibmasm_unregister_panic_notifier(void); +int ibmasm_heartbeat_init(struct service_processor *sp); +void ibmasm_heartbeat_exit(struct service_processor *sp); +void ibmasm_receive_heartbeat(struct service_processor *sp, void *message, size_t size); + +/* reverse heartbeat - from OS to SP */ +void ibmasm_init_reverse_heartbeat(struct service_processor *sp, struct reverse_heartbeat *rhb); +int ibmasm_start_reverse_heartbeat(struct service_processor *sp, struct reverse_heartbeat *rhb); +void ibmasm_stop_reverse_heartbeat(struct reverse_heartbeat *rhb); + +/* dot commands */ +void ibmasm_receive_message(struct service_processor *sp, void *data, int data_size); +int ibmasm_send_driver_vpd(struct service_processor *sp); +int ibmasm_send_os_state(struct service_processor *sp, int os_state); + +/* low level message processing */ +int ibmasm_send_i2o_message(struct service_processor *sp); +irqreturn_t ibmasm_interrupt_handler(int irq, void * dev_id); + +/* remote console */ +void ibmasm_handle_mouse_interrupt(struct service_processor *sp); +int ibmasm_init_remote_input_dev(struct service_processor *sp); +void ibmasm_free_remote_input_dev(struct service_processor *sp); + +/* file system */ +int ibmasmfs_register(void); +void ibmasmfs_unregister(void); +void ibmasmfs_add_sp(struct service_processor *sp); + +/* uart */ +#if IS_ENABLED(CONFIG_SERIAL_8250) +void ibmasm_register_uart(struct service_processor *sp); +void ibmasm_unregister_uart(struct service_processor *sp); +#else +#define ibmasm_register_uart(sp) do { } while(0) +#define ibmasm_unregister_uart(sp) do { } while(0) +#endif diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c new file mode 100644 index 000000000..35fec1bf1 --- /dev/null +++ b/drivers/misc/ibmasm/ibmasmfs.c @@ -0,0 +1,603 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * IBM ASM Service Processor Device Driver + * + * Copyright (C) IBM Corporation, 2004 + * + * Author: Max Asböck <amax@us.ibm.com> + */ + +/* + * Parts of this code are based on an article by Jonathan Corbet + * that appeared in Linux Weekly News. + */ + + +/* + * The IBMASM file virtual filesystem. It creates the following hierarchy + * dynamically when mounted from user space: + * + * /ibmasm + * |-- 0 + * | |-- command + * | |-- event + * | |-- reverse_heartbeat + * | `-- remote_video + * | |-- depth + * | |-- height + * | `-- width + * . + * . + * . + * `-- n + * |-- command + * |-- event + * |-- reverse_heartbeat + * `-- remote_video + * |-- depth + * |-- height + * `-- width + * + * For each service processor the following files are created: + * + * command: execute dot commands + * write: execute a dot command on the service processor + * read: return the result of a previously executed dot command + * + * events: listen for service processor events + * read: sleep (interruptible) until an event occurs + * write: wakeup sleeping event listener + * + * reverse_heartbeat: send a heartbeat to the service processor + * read: sleep (interruptible) until the reverse heartbeat fails + * write: wakeup sleeping heartbeat listener + * + * remote_video/width + * remote_video/height + * remote_video/width: control remote display settings + * write: set value + * read: read value + */ + +#include <linux/fs.h> +#include <linux/fs_context.h> +#include <linux/pagemap.h> +#include <linux/slab.h> +#include <linux/uaccess.h> +#include <asm/io.h> +#include "ibmasm.h" +#include "remote.h" +#include "dot_command.h" + +#define IBMASMFS_MAGIC 0x66726f67 + +static LIST_HEAD(service_processors); + +static struct inode *ibmasmfs_make_inode(struct super_block *sb, int mode); +static void ibmasmfs_create_files (struct super_block *sb); +static int ibmasmfs_fill_super(struct super_block *sb, struct fs_context *fc); + +static int ibmasmfs_get_tree(struct fs_context *fc) +{ + return get_tree_single(fc, ibmasmfs_fill_super); +} + +static const struct fs_context_operations ibmasmfs_context_ops = { + .get_tree = ibmasmfs_get_tree, +}; + +static int ibmasmfs_init_fs_context(struct fs_context *fc) +{ + fc->ops = &ibmasmfs_context_ops; + return 0; +} + +static const struct super_operations ibmasmfs_s_ops = { + .statfs = simple_statfs, + .drop_inode = generic_delete_inode, +}; + +static const struct file_operations *ibmasmfs_dir_ops = &simple_dir_operations; + +static struct file_system_type ibmasmfs_type = { + .owner = THIS_MODULE, + .name = "ibmasmfs", + .init_fs_context = ibmasmfs_init_fs_context, + .kill_sb = kill_litter_super, +}; +MODULE_ALIAS_FS("ibmasmfs"); + +static int ibmasmfs_fill_super(struct super_block *sb, struct fs_context *fc) +{ + struct inode *root; + + sb->s_blocksize = PAGE_SIZE; + sb->s_blocksize_bits = PAGE_SHIFT; + sb->s_magic = IBMASMFS_MAGIC; + sb->s_op = &ibmasmfs_s_ops; + sb->s_time_gran = 1; + + root = ibmasmfs_make_inode (sb, S_IFDIR | 0500); + if (!root) + return -ENOMEM; + + root->i_op = &simple_dir_inode_operations; + root->i_fop = ibmasmfs_dir_ops; + + sb->s_root = d_make_root(root); + if (!sb->s_root) + return -ENOMEM; + + ibmasmfs_create_files(sb); + return 0; +} + +static struct inode *ibmasmfs_make_inode(struct super_block *sb, int mode) +{ + struct inode *ret = new_inode(sb); + + if (ret) { + ret->i_ino = get_next_ino(); + ret->i_mode = mode; + ret->i_atime = ret->i_mtime = ret->i_ctime = current_time(ret); + } + return ret; +} + +static struct dentry *ibmasmfs_create_file(struct dentry *parent, + const char *name, + const struct file_operations *fops, + void *data, + int mode) +{ + struct dentry *dentry; + struct inode *inode; + + dentry = d_alloc_name(parent, name); + if (!dentry) + return NULL; + + inode = ibmasmfs_make_inode(parent->d_sb, S_IFREG | mode); + if (!inode) { + dput(dentry); + return NULL; + } + + inode->i_fop = fops; + inode->i_private = data; + + d_add(dentry, inode); + return dentry; +} + +static struct dentry *ibmasmfs_create_dir(struct dentry *parent, + const char *name) +{ + struct dentry *dentry; + struct inode *inode; + + dentry = d_alloc_name(parent, name); + if (!dentry) + return NULL; + + inode = ibmasmfs_make_inode(parent->d_sb, S_IFDIR | 0500); + if (!inode) { + dput(dentry); + return NULL; + } + + inode->i_op = &simple_dir_inode_operations; + inode->i_fop = ibmasmfs_dir_ops; + + d_add(dentry, inode); + return dentry; +} + +int ibmasmfs_register(void) +{ + return register_filesystem(&ibmasmfs_type); +} + +void ibmasmfs_unregister(void) +{ + unregister_filesystem(&ibmasmfs_type); +} + +void ibmasmfs_add_sp(struct service_processor *sp) +{ + list_add(&sp->node, &service_processors); +} + +/* struct to save state between command file operations */ +struct ibmasmfs_command_data { + struct service_processor *sp; + struct command *command; +}; + +/* struct to save state between event file operations */ +struct ibmasmfs_event_data { + struct service_processor *sp; + struct event_reader reader; + int active; +}; + +/* struct to save state between reverse heartbeat file operations */ +struct ibmasmfs_heartbeat_data { + struct service_processor *sp; + struct reverse_heartbeat heartbeat; + int active; +}; + +static int command_file_open(struct inode *inode, struct file *file) +{ + struct ibmasmfs_command_data *command_data; + + if (!inode->i_private) + return -ENODEV; + + command_data = kmalloc(sizeof(struct ibmasmfs_command_data), GFP_KERNEL); + if (!command_data) + return -ENOMEM; + + command_data->command = NULL; + command_data->sp = inode->i_private; + file->private_data = command_data; + return 0; +} + +static int command_file_close(struct inode *inode, struct file *file) +{ + struct ibmasmfs_command_data *command_data = file->private_data; + + if (command_data->command) + command_put(command_data->command); + + kfree(command_data); + return 0; +} + +static ssize_t command_file_read(struct file *file, char __user *buf, size_t count, loff_t *offset) +{ + struct ibmasmfs_command_data *command_data = file->private_data; + struct command *cmd; + int len; + unsigned long flags; + + if (*offset < 0) + return -EINVAL; + if (count == 0 || count > IBMASM_CMD_MAX_BUFFER_SIZE) + return 0; + if (*offset != 0) + return 0; + + spin_lock_irqsave(&command_data->sp->lock, flags); + cmd = command_data->command; + if (cmd == NULL) { + spin_unlock_irqrestore(&command_data->sp->lock, flags); + return 0; + } + command_data->command = NULL; + spin_unlock_irqrestore(&command_data->sp->lock, flags); + + if (cmd->status != IBMASM_CMD_COMPLETE) { + command_put(cmd); + return -EIO; + } + len = min(count, cmd->buffer_size); + if (copy_to_user(buf, cmd->buffer, len)) { + command_put(cmd); + return -EFAULT; + } + command_put(cmd); + + return len; +} + +static ssize_t command_file_write(struct file *file, const char __user *ubuff, size_t count, loff_t *offset) +{ + struct ibmasmfs_command_data *command_data = file->private_data; + struct command *cmd; + unsigned long flags; + + if (*offset < 0) + return -EINVAL; + if (count == 0 || count > IBMASM_CMD_MAX_BUFFER_SIZE) + return 0; + if (*offset != 0) + return 0; + + /* commands are executed sequentially, only one command at a time */ + if (command_data->command) + return -EAGAIN; + + cmd = ibmasm_new_command(command_data->sp, count); + if (!cmd) + return -ENOMEM; + + if (copy_from_user(cmd->buffer, ubuff, count)) { + command_put(cmd); + return -EFAULT; + } + + spin_lock_irqsave(&command_data->sp->lock, flags); + if (command_data->command) { + spin_unlock_irqrestore(&command_data->sp->lock, flags); + command_put(cmd); + return -EAGAIN; + } + command_data->command = cmd; + spin_unlock_irqrestore(&command_data->sp->lock, flags); + + ibmasm_exec_command(command_data->sp, cmd); + ibmasm_wait_for_response(cmd, get_dot_command_timeout(cmd->buffer)); + + return count; +} + +static int event_file_open(struct inode *inode, struct file *file) +{ + struct ibmasmfs_event_data *event_data; + struct service_processor *sp; + + if (!inode->i_private) + return -ENODEV; + + sp = inode->i_private; + + event_data = kmalloc(sizeof(struct ibmasmfs_event_data), GFP_KERNEL); + if (!event_data) + return -ENOMEM; + + ibmasm_event_reader_register(sp, &event_data->reader); + + event_data->sp = sp; + event_data->active = 0; + file->private_data = event_data; + return 0; +} + +static int event_file_close(struct inode *inode, struct file *file) +{ + struct ibmasmfs_event_data *event_data = file->private_data; + + ibmasm_event_reader_unregister(event_data->sp, &event_data->reader); + kfree(event_data); + return 0; +} + +static ssize_t event_file_read(struct file *file, char __user *buf, size_t count, loff_t *offset) +{ + struct ibmasmfs_event_data *event_data = file->private_data; + struct event_reader *reader = &event_data->reader; + struct service_processor *sp = event_data->sp; + int ret; + unsigned long flags; + + if (*offset < 0) + return -EINVAL; + if (count == 0 || count > IBMASM_EVENT_MAX_SIZE) + return 0; + if (*offset != 0) + return 0; + + spin_lock_irqsave(&sp->lock, flags); + if (event_data->active) { + spin_unlock_irqrestore(&sp->lock, flags); + return -EBUSY; + } + event_data->active = 1; + spin_unlock_irqrestore(&sp->lock, flags); + + ret = ibmasm_get_next_event(sp, reader); + if (ret <= 0) + goto out; + + if (count < reader->data_size) { + ret = -EINVAL; + goto out; + } + + if (copy_to_user(buf, reader->data, reader->data_size)) { + ret = -EFAULT; + goto out; + } + ret = reader->data_size; + +out: + event_data->active = 0; + return ret; +} + +static ssize_t event_file_write(struct file *file, const char __user *buf, size_t count, loff_t *offset) +{ + struct ibmasmfs_event_data *event_data = file->private_data; + + if (*offset < 0) + return -EINVAL; + if (count != 1) + return 0; + if (*offset != 0) + return 0; + + ibmasm_cancel_next_event(&event_data->reader); + return 0; +} + +static int r_heartbeat_file_open(struct inode *inode, struct file *file) +{ + struct ibmasmfs_heartbeat_data *rhbeat; + + if (!inode->i_private) + return -ENODEV; + + rhbeat = kmalloc(sizeof(struct ibmasmfs_heartbeat_data), GFP_KERNEL); + if (!rhbeat) + return -ENOMEM; + + rhbeat->sp = inode->i_private; + rhbeat->active = 0; + ibmasm_init_reverse_heartbeat(rhbeat->sp, &rhbeat->heartbeat); + file->private_data = rhbeat; + return 0; +} + +static int r_heartbeat_file_close(struct inode *inode, struct file *file) +{ + struct ibmasmfs_heartbeat_data *rhbeat = file->private_data; + + kfree(rhbeat); + return 0; +} + +static ssize_t r_heartbeat_file_read(struct file *file, char __user *buf, size_t count, loff_t *offset) +{ + struct ibmasmfs_heartbeat_data *rhbeat = file->private_data; + unsigned long flags; + int result; + + if (*offset < 0) + return -EINVAL; + if (count == 0 || count > 1024) + return 0; + if (*offset != 0) + return 0; + + /* allow only one reverse heartbeat per process */ + spin_lock_irqsave(&rhbeat->sp->lock, flags); + if (rhbeat->active) { + spin_unlock_irqrestore(&rhbeat->sp->lock, flags); + return -EBUSY; + } + rhbeat->active = 1; + spin_unlock_irqrestore(&rhbeat->sp->lock, flags); + + result = ibmasm_start_reverse_heartbeat(rhbeat->sp, &rhbeat->heartbeat); + rhbeat->active = 0; + + return result; +} + +static ssize_t r_heartbeat_file_write(struct file *file, const char __user *buf, size_t count, loff_t *offset) +{ + struct ibmasmfs_heartbeat_data *rhbeat = file->private_data; + + if (*offset < 0) + return -EINVAL; + if (count != 1) + return 0; + if (*offset != 0) + return 0; + + if (rhbeat->active) + ibmasm_stop_reverse_heartbeat(&rhbeat->heartbeat); + + return 1; +} + +static int remote_settings_file_close(struct inode *inode, struct file *file) +{ + return 0; +} + +static ssize_t remote_settings_file_read(struct file *file, char __user *buf, size_t count, loff_t *offset) +{ + void __iomem *address = (void __iomem *)file->private_data; + int len = 0; + unsigned int value; + char lbuf[20]; + + value = readl(address); + len = snprintf(lbuf, sizeof(lbuf), "%d\n", value); + + return simple_read_from_buffer(buf, count, offset, lbuf, len); +} + +static ssize_t remote_settings_file_write(struct file *file, const char __user *ubuff, size_t count, loff_t *offset) +{ + void __iomem *address = (void __iomem *)file->private_data; + char *buff; + unsigned int value; + + if (*offset < 0) + return -EINVAL; + if (count == 0 || count > 1024) + return 0; + if (*offset != 0) + return 0; + + buff = kzalloc (count + 1, GFP_KERNEL); + if (!buff) + return -ENOMEM; + + + if (copy_from_user(buff, ubuff, count)) { + kfree(buff); + return -EFAULT; + } + + value = simple_strtoul(buff, NULL, 10); + writel(value, address); + kfree(buff); + + return count; +} + +static const struct file_operations command_fops = { + .open = command_file_open, + .release = command_file_close, + .read = command_file_read, + .write = command_file_write, + .llseek = generic_file_llseek, +}; + +static const struct file_operations event_fops = { + .open = event_file_open, + .release = event_file_close, + .read = event_file_read, + .write = event_file_write, + .llseek = generic_file_llseek, +}; + +static const struct file_operations r_heartbeat_fops = { + .open = r_heartbeat_file_open, + .release = r_heartbeat_file_close, + .read = r_heartbeat_file_read, + .write = r_heartbeat_file_write, + .llseek = generic_file_llseek, +}; + +static const struct file_operations remote_settings_fops = { + .open = simple_open, + .release = remote_settings_file_close, + .read = remote_settings_file_read, + .write = remote_settings_file_write, + .llseek = generic_file_llseek, +}; + + +static void ibmasmfs_create_files (struct super_block *sb) +{ + struct list_head *entry; + struct service_processor *sp; + + list_for_each(entry, &service_processors) { + struct dentry *dir; + struct dentry *remote_dir; + sp = list_entry(entry, struct service_processor, node); + dir = ibmasmfs_create_dir(sb->s_root, sp->dirname); + if (!dir) + continue; + + ibmasmfs_create_file(dir, "command", &command_fops, sp, S_IRUSR|S_IWUSR); + ibmasmfs_create_file(dir, "event", &event_fops, sp, S_IRUSR|S_IWUSR); + ibmasmfs_create_file(dir, "reverse_heartbeat", &r_heartbeat_fops, sp, S_IRUSR|S_IWUSR); + + remote_dir = ibmasmfs_create_dir(dir, "remote_video"); + if (!remote_dir) + continue; + + ibmasmfs_create_file(remote_dir, "width", &remote_settings_fops, (void *)display_width(sp), S_IRUSR|S_IWUSR); + ibmasmfs_create_file(remote_dir, "height", &remote_settings_fops, (void *)display_height(sp), S_IRUSR|S_IWUSR); + ibmasmfs_create_file(remote_dir, "depth", &remote_settings_fops, (void *)display_depth(sp), S_IRUSR|S_IWUSR); + } +} diff --git a/drivers/misc/ibmasm/lowlevel.c b/drivers/misc/ibmasm/lowlevel.c new file mode 100644 index 000000000..6922dc6c1 --- /dev/null +++ b/drivers/misc/ibmasm/lowlevel.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * IBM ASM Service Processor Device Driver + * + * Copyright (C) IBM Corporation, 2004 + * + * Author: Max Asböck <amax@us.ibm.com> + */ + +#include "ibmasm.h" +#include "lowlevel.h" +#include "i2o.h" +#include "dot_command.h" +#include "remote.h" + +static struct i2o_header header = I2O_HEADER_TEMPLATE; + + +int ibmasm_send_i2o_message(struct service_processor *sp) +{ + u32 mfa; + unsigned int command_size; + struct i2o_message *message; + struct command *command = sp->current_command; + + mfa = get_mfa_inbound(sp->base_address); + if (!mfa) + return 1; + + command_size = get_dot_command_size(command->buffer); + header.message_size = outgoing_message_size(command_size); + + message = get_i2o_message(sp->base_address, mfa); + + memcpy_toio(&message->header, &header, sizeof(struct i2o_header)); + memcpy_toio(&message->data, command->buffer, command_size); + + set_mfa_inbound(sp->base_address, mfa); + + return 0; +} + +irqreturn_t ibmasm_interrupt_handler(int irq, void * dev_id) +{ + u32 mfa; + struct service_processor *sp = (struct service_processor *)dev_id; + void __iomem *base_address = sp->base_address; + char tsbuf[32]; + + if (!sp_interrupt_pending(base_address)) + return IRQ_NONE; + + dbg("respond to interrupt at %s\n", get_timestamp(tsbuf)); + + if (mouse_interrupt_pending(sp)) { + ibmasm_handle_mouse_interrupt(sp); + clear_mouse_interrupt(sp); + } + + mfa = get_mfa_outbound(base_address); + if (valid_mfa(mfa)) { + struct i2o_message *msg = get_i2o_message(base_address, mfa); + ibmasm_receive_message(sp, &msg->data, incoming_data_size(msg)); + } else + dbg("didn't get a valid MFA\n"); + + set_mfa_outbound(base_address, mfa); + dbg("finished interrupt at %s\n", get_timestamp(tsbuf)); + + return IRQ_HANDLED; +} diff --git a/drivers/misc/ibmasm/lowlevel.h b/drivers/misc/ibmasm/lowlevel.h new file mode 100644 index 000000000..25f1ed07c --- /dev/null +++ b/drivers/misc/ibmasm/lowlevel.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * IBM ASM Service Processor Device Driver + * + * Copyright (C) IBM Corporation, 2004 + * + * Author: Max Asböck <amax@us.ibm.com> + */ + +/* Condor service processor specific hardware definitions */ + +#ifndef __IBMASM_CONDOR_H__ +#define __IBMASM_CONDOR_H__ + +#include <asm/io.h> + +#define VENDORID_IBM 0x1014 +#define DEVICEID_RSA 0x010F + +#define GET_MFA_ADDR(x) (x & 0xFFFFFF00) + +#define MAILBOX_FULL(x) (x & 0x00000001) + +#define NO_MFAS_AVAILABLE 0xFFFFFFFF + + +#define INBOUND_QUEUE_PORT 0x40 /* contains address of next free MFA */ +#define OUTBOUND_QUEUE_PORT 0x44 /* contains address of posted MFA */ + +#define SP_INTR_MASK 0x00000008 +#define UART_INTR_MASK 0x00000010 + +#define INTR_STATUS_REGISTER 0x13A0 +#define INTR_CONTROL_REGISTER 0x13A4 + +#define SCOUT_COM_A_BASE 0x0000 +#define SCOUT_COM_B_BASE 0x0100 +#define SCOUT_COM_C_BASE 0x0200 +#define SCOUT_COM_D_BASE 0x0300 + +static inline int sp_interrupt_pending(void __iomem *base_address) +{ + return SP_INTR_MASK & readl(base_address + INTR_STATUS_REGISTER); +} + +static inline int uart_interrupt_pending(void __iomem *base_address) +{ + return UART_INTR_MASK & readl(base_address + INTR_STATUS_REGISTER); +} + +static inline void ibmasm_enable_interrupts(void __iomem *base_address, int mask) +{ + void __iomem *ctrl_reg = base_address + INTR_CONTROL_REGISTER; + writel( readl(ctrl_reg) & ~mask, ctrl_reg); +} + +static inline void ibmasm_disable_interrupts(void __iomem *base_address, int mask) +{ + void __iomem *ctrl_reg = base_address + INTR_CONTROL_REGISTER; + writel( readl(ctrl_reg) | mask, ctrl_reg); +} + +static inline void enable_sp_interrupts(void __iomem *base_address) +{ + ibmasm_enable_interrupts(base_address, SP_INTR_MASK); +} + +static inline void disable_sp_interrupts(void __iomem *base_address) +{ + ibmasm_disable_interrupts(base_address, SP_INTR_MASK); +} + +static inline void enable_uart_interrupts(void __iomem *base_address) +{ + ibmasm_enable_interrupts(base_address, UART_INTR_MASK); +} + +static inline void disable_uart_interrupts(void __iomem *base_address) +{ + ibmasm_disable_interrupts(base_address, UART_INTR_MASK); +} + +#define valid_mfa(mfa) ( (mfa) != NO_MFAS_AVAILABLE ) + +static inline u32 get_mfa_outbound(void __iomem *base_address) +{ + int retry; + u32 mfa; + + for (retry=0; retry<=10; retry++) { + mfa = readl(base_address + OUTBOUND_QUEUE_PORT); + if (valid_mfa(mfa)) + break; + } + return mfa; +} + +static inline void set_mfa_outbound(void __iomem *base_address, u32 mfa) +{ + writel(mfa, base_address + OUTBOUND_QUEUE_PORT); +} + +static inline u32 get_mfa_inbound(void __iomem *base_address) +{ + u32 mfa = readl(base_address + INBOUND_QUEUE_PORT); + + if (MAILBOX_FULL(mfa)) + return 0; + + return mfa; +} + +static inline void set_mfa_inbound(void __iomem *base_address, u32 mfa) +{ + writel(mfa, base_address + INBOUND_QUEUE_PORT); +} + +static inline struct i2o_message *get_i2o_message(void __iomem *base_address, u32 mfa) +{ + return (struct i2o_message *)(GET_MFA_ADDR(mfa) + base_address); +} + +#endif /* __IBMASM_CONDOR_H__ */ diff --git a/drivers/misc/ibmasm/module.c b/drivers/misc/ibmasm/module.c new file mode 100644 index 000000000..dc8a06c06 --- /dev/null +++ b/drivers/misc/ibmasm/module.c @@ -0,0 +1,224 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +/* + * IBM ASM Service Processor Device Driver + * + * Copyright (C) IBM Corporation, 2004 + * + * Author: Max Asböck <amax@us.ibm.com> + * + * This driver is based on code originally written by Pete Reynolds + * and others. + */ + +/* + * The ASM device driver does the following things: + * + * 1) When loaded it sends a message to the service processor, + * indicating that an OS is * running. This causes the service processor + * to send periodic heartbeats to the OS. + * + * 2) Answers the periodic heartbeats sent by the service processor. + * Failure to do so would result in system reboot. + * + * 3) Acts as a pass through for dot commands sent from user applications. + * The interface for this is the ibmasmfs file system. + * + * 4) Allows user applications to register for event notification. Events + * are sent to the driver through interrupts. They can be read from user + * space through the ibmasmfs file system. + * + * 5) Allows user space applications to send heartbeats to the service + * processor (aka reverse heartbeats). Again this happens through ibmasmfs. + * + * 6) Handles remote mouse and keyboard event interrupts and makes them + * available to user applications through ibmasmfs. + * + */ + +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/slab.h> +#include "ibmasm.h" +#include "lowlevel.h" +#include "remote.h" + +int ibmasm_debug = 0; +module_param(ibmasm_debug, int , S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(ibmasm_debug, " Set debug mode on or off"); + + +static int ibmasm_init_one(struct pci_dev *pdev, const struct pci_device_id *id) +{ + int result; + struct service_processor *sp; + + if ((result = pci_enable_device(pdev))) { + dev_err(&pdev->dev, "Failed to enable PCI device\n"); + return result; + } + if ((result = pci_request_regions(pdev, DRIVER_NAME))) { + dev_err(&pdev->dev, "Failed to allocate PCI resources\n"); + goto error_resources; + } + /* vnc client won't work without bus-mastering */ + pci_set_master(pdev); + + sp = kzalloc(sizeof(struct service_processor), GFP_KERNEL); + if (sp == NULL) { + dev_err(&pdev->dev, "Failed to allocate memory\n"); + result = -ENOMEM; + goto error_kmalloc; + } + + spin_lock_init(&sp->lock); + INIT_LIST_HEAD(&sp->command_queue); + + pci_set_drvdata(pdev, (void *)sp); + sp->dev = &pdev->dev; + sp->number = pdev->bus->number; + snprintf(sp->dirname, IBMASM_NAME_SIZE, "%d", sp->number); + snprintf(sp->devname, IBMASM_NAME_SIZE, "%s%d", DRIVER_NAME, sp->number); + + result = ibmasm_event_buffer_init(sp); + if (result) { + dev_err(sp->dev, "Failed to allocate event buffer\n"); + goto error_eventbuffer; + } + + result = ibmasm_heartbeat_init(sp); + if (result) { + dev_err(sp->dev, "Failed to allocate heartbeat command\n"); + goto error_heartbeat; + } + + sp->irq = pdev->irq; + sp->base_address = pci_ioremap_bar(pdev, 0); + if (!sp->base_address) { + dev_err(sp->dev, "Failed to ioremap pci memory\n"); + result = -ENODEV; + goto error_ioremap; + } + + result = request_irq(sp->irq, ibmasm_interrupt_handler, IRQF_SHARED, sp->devname, (void*)sp); + if (result) { + dev_err(sp->dev, "Failed to register interrupt handler\n"); + goto error_request_irq; + } + + enable_sp_interrupts(sp->base_address); + + result = ibmasm_init_remote_input_dev(sp); + if (result) { + dev_err(sp->dev, "Failed to initialize remote queue\n"); + goto error_init_remote; + } + + result = ibmasm_send_driver_vpd(sp); + if (result) { + dev_err(sp->dev, "Failed to send driver VPD to service processor\n"); + goto error_send_message; + } + result = ibmasm_send_os_state(sp, SYSTEM_STATE_OS_UP); + if (result) { + dev_err(sp->dev, "Failed to send OS state to service processor\n"); + goto error_send_message; + } + ibmasmfs_add_sp(sp); + + ibmasm_register_uart(sp); + + return 0; + +error_send_message: + ibmasm_free_remote_input_dev(sp); +error_init_remote: + disable_sp_interrupts(sp->base_address); + free_irq(sp->irq, (void *)sp); +error_request_irq: + iounmap(sp->base_address); +error_ioremap: + ibmasm_heartbeat_exit(sp); +error_heartbeat: + ibmasm_event_buffer_exit(sp); +error_eventbuffer: + kfree(sp); +error_kmalloc: + pci_release_regions(pdev); +error_resources: + pci_disable_device(pdev); + + return result; +} + +static void ibmasm_remove_one(struct pci_dev *pdev) +{ + struct service_processor *sp = pci_get_drvdata(pdev); + + dbg("Unregistering UART\n"); + ibmasm_unregister_uart(sp); + dbg("Sending OS down message\n"); + if (ibmasm_send_os_state(sp, SYSTEM_STATE_OS_DOWN)) + err("failed to get response to 'Send OS State' command\n"); + dbg("Disabling heartbeats\n"); + ibmasm_heartbeat_exit(sp); + dbg("Disabling interrupts\n"); + disable_sp_interrupts(sp->base_address); + dbg("Freeing SP irq\n"); + free_irq(sp->irq, (void *)sp); + dbg("Cleaning up\n"); + ibmasm_free_remote_input_dev(sp); + iounmap(sp->base_address); + ibmasm_event_buffer_exit(sp); + kfree(sp); + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +static struct pci_device_id ibmasm_pci_table[] = +{ + { PCI_DEVICE(VENDORID_IBM, DEVICEID_RSA) }, + {}, +}; + +static struct pci_driver ibmasm_driver = { + .name = DRIVER_NAME, + .id_table = ibmasm_pci_table, + .probe = ibmasm_init_one, + .remove = ibmasm_remove_one, +}; + +static void __exit ibmasm_exit (void) +{ + ibmasm_unregister_panic_notifier(); + ibmasmfs_unregister(); + pci_unregister_driver(&ibmasm_driver); + info(DRIVER_DESC " version " DRIVER_VERSION " unloaded"); +} + +static int __init ibmasm_init(void) +{ + int result = pci_register_driver(&ibmasm_driver); + if (result) + return result; + + result = ibmasmfs_register(); + if (result) { + pci_unregister_driver(&ibmasm_driver); + err("Failed to register ibmasmfs file system"); + return result; + } + + ibmasm_register_panic_notifier(); + info(DRIVER_DESC " version " DRIVER_VERSION " loaded"); + return 0; +} + +module_init(ibmasm_init); +module_exit(ibmasm_exit); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, ibmasm_pci_table); + diff --git a/drivers/misc/ibmasm/r_heartbeat.c b/drivers/misc/ibmasm/r_heartbeat.c new file mode 100644 index 000000000..21c9b6a6f --- /dev/null +++ b/drivers/misc/ibmasm/r_heartbeat.c @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +/* + * + * Copyright (C) IBM Corporation, 2004 + * + * Author: Max Asböck <amax@us.ibm.com> + */ + +#include <linux/sched/signal.h> +#include "ibmasm.h" +#include "dot_command.h" + +/* + * Reverse Heartbeat, i.e. heartbeats sent from the driver to the + * service processor. + * These heartbeats are initiated by user level programs. + */ + +/* the reverse heartbeat dot command */ +#pragma pack(1) +static struct { + struct dot_command_header header; + unsigned char command[3]; +} rhb_dot_cmd = { + .header = { + .type = sp_read, + .command_size = 3, + .data_size = 0, + .status = 0 + }, + .command = { 4, 3, 6 } +}; +#pragma pack() + +void ibmasm_init_reverse_heartbeat(struct service_processor *sp, struct reverse_heartbeat *rhb) +{ + init_waitqueue_head(&rhb->wait); + rhb->stopped = 0; +} + +/* + * start_reverse_heartbeat + * Loop forever, sending a reverse heartbeat dot command to the service + * processor, then sleeping. The loop comes to an end if the service + * processor fails to respond 3 times or we were interrupted. + */ +int ibmasm_start_reverse_heartbeat(struct service_processor *sp, struct reverse_heartbeat *rhb) +{ + struct command *cmd; + int times_failed = 0; + int result = 1; + + cmd = ibmasm_new_command(sp, sizeof rhb_dot_cmd); + if (!cmd) + return -ENOMEM; + + while (times_failed < 3) { + memcpy(cmd->buffer, (void *)&rhb_dot_cmd, sizeof rhb_dot_cmd); + cmd->status = IBMASM_CMD_PENDING; + ibmasm_exec_command(sp, cmd); + ibmasm_wait_for_response(cmd, IBMASM_CMD_TIMEOUT_NORMAL); + + if (cmd->status != IBMASM_CMD_COMPLETE) + times_failed++; + + wait_event_interruptible_timeout(rhb->wait, + rhb->stopped, + REVERSE_HEARTBEAT_TIMEOUT * HZ); + + if (signal_pending(current) || rhb->stopped) { + result = -EINTR; + break; + } + } + command_put(cmd); + rhb->stopped = 0; + + return result; +} + +void ibmasm_stop_reverse_heartbeat(struct reverse_heartbeat *rhb) +{ + rhb->stopped = 1; + wake_up_interruptible(&rhb->wait); +} diff --git a/drivers/misc/ibmasm/remote.c b/drivers/misc/ibmasm/remote.c new file mode 100644 index 000000000..ec816d3b3 --- /dev/null +++ b/drivers/misc/ibmasm/remote.c @@ -0,0 +1,268 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * IBM ASM Service Processor Device Driver + * + * Copyright (C) IBM Corporation, 2004 + * + * Authors: Max Asböck <amax@us.ibm.com> + * Vernon Mauery <vernux@us.ibm.com> + */ + +/* Remote mouse and keyboard event handling functions */ + +#include <linux/pci.h> +#include "ibmasm.h" +#include "remote.h" + +#define MOUSE_X_MAX 1600 +#define MOUSE_Y_MAX 1200 + +static const unsigned short xlate_high[XLATE_SIZE] = { + [KEY_SYM_ENTER & 0xff] = KEY_ENTER, + [KEY_SYM_KPSLASH & 0xff] = KEY_KPSLASH, + [KEY_SYM_KPSTAR & 0xff] = KEY_KPASTERISK, + [KEY_SYM_KPMINUS & 0xff] = KEY_KPMINUS, + [KEY_SYM_KPDOT & 0xff] = KEY_KPDOT, + [KEY_SYM_KPPLUS & 0xff] = KEY_KPPLUS, + [KEY_SYM_KP0 & 0xff] = KEY_KP0, + [KEY_SYM_KP1 & 0xff] = KEY_KP1, + [KEY_SYM_KP2 & 0xff] = KEY_KP2, [KEY_SYM_KPDOWN & 0xff] = KEY_KP2, + [KEY_SYM_KP3 & 0xff] = KEY_KP3, + [KEY_SYM_KP4 & 0xff] = KEY_KP4, [KEY_SYM_KPLEFT & 0xff] = KEY_KP4, + [KEY_SYM_KP5 & 0xff] = KEY_KP5, + [KEY_SYM_KP6 & 0xff] = KEY_KP6, [KEY_SYM_KPRIGHT & 0xff] = KEY_KP6, + [KEY_SYM_KP7 & 0xff] = KEY_KP7, + [KEY_SYM_KP8 & 0xff] = KEY_KP8, [KEY_SYM_KPUP & 0xff] = KEY_KP8, + [KEY_SYM_KP9 & 0xff] = KEY_KP9, + [KEY_SYM_BK_SPC & 0xff] = KEY_BACKSPACE, + [KEY_SYM_TAB & 0xff] = KEY_TAB, + [KEY_SYM_CTRL & 0xff] = KEY_LEFTCTRL, + [KEY_SYM_ALT & 0xff] = KEY_LEFTALT, + [KEY_SYM_INSERT & 0xff] = KEY_INSERT, + [KEY_SYM_DELETE & 0xff] = KEY_DELETE, + [KEY_SYM_SHIFT & 0xff] = KEY_LEFTSHIFT, + [KEY_SYM_UARROW & 0xff] = KEY_UP, + [KEY_SYM_DARROW & 0xff] = KEY_DOWN, + [KEY_SYM_LARROW & 0xff] = KEY_LEFT, + [KEY_SYM_RARROW & 0xff] = KEY_RIGHT, + [KEY_SYM_ESCAPE & 0xff] = KEY_ESC, + [KEY_SYM_PAGEUP & 0xff] = KEY_PAGEUP, + [KEY_SYM_PAGEDOWN & 0xff] = KEY_PAGEDOWN, + [KEY_SYM_HOME & 0xff] = KEY_HOME, + [KEY_SYM_END & 0xff] = KEY_END, + [KEY_SYM_F1 & 0xff] = KEY_F1, + [KEY_SYM_F2 & 0xff] = KEY_F2, + [KEY_SYM_F3 & 0xff] = KEY_F3, + [KEY_SYM_F4 & 0xff] = KEY_F4, + [KEY_SYM_F5 & 0xff] = KEY_F5, + [KEY_SYM_F6 & 0xff] = KEY_F6, + [KEY_SYM_F7 & 0xff] = KEY_F7, + [KEY_SYM_F8 & 0xff] = KEY_F8, + [KEY_SYM_F9 & 0xff] = KEY_F9, + [KEY_SYM_F10 & 0xff] = KEY_F10, + [KEY_SYM_F11 & 0xff] = KEY_F11, + [KEY_SYM_F12 & 0xff] = KEY_F12, + [KEY_SYM_CAP_LOCK & 0xff] = KEY_CAPSLOCK, + [KEY_SYM_NUM_LOCK & 0xff] = KEY_NUMLOCK, + [KEY_SYM_SCR_LOCK & 0xff] = KEY_SCROLLLOCK, +}; + +static const unsigned short xlate[XLATE_SIZE] = { + [NO_KEYCODE] = KEY_RESERVED, + [KEY_SYM_SPACE] = KEY_SPACE, + [KEY_SYM_TILDE] = KEY_GRAVE, [KEY_SYM_BKTIC] = KEY_GRAVE, + [KEY_SYM_ONE] = KEY_1, [KEY_SYM_BANG] = KEY_1, + [KEY_SYM_TWO] = KEY_2, [KEY_SYM_AT] = KEY_2, + [KEY_SYM_THREE] = KEY_3, [KEY_SYM_POUND] = KEY_3, + [KEY_SYM_FOUR] = KEY_4, [KEY_SYM_DOLLAR] = KEY_4, + [KEY_SYM_FIVE] = KEY_5, [KEY_SYM_PERCENT] = KEY_5, + [KEY_SYM_SIX] = KEY_6, [KEY_SYM_CARAT] = KEY_6, + [KEY_SYM_SEVEN] = KEY_7, [KEY_SYM_AMPER] = KEY_7, + [KEY_SYM_EIGHT] = KEY_8, [KEY_SYM_STAR] = KEY_8, + [KEY_SYM_NINE] = KEY_9, [KEY_SYM_LPAREN] = KEY_9, + [KEY_SYM_ZERO] = KEY_0, [KEY_SYM_RPAREN] = KEY_0, + [KEY_SYM_MINUS] = KEY_MINUS, [KEY_SYM_USCORE] = KEY_MINUS, + [KEY_SYM_EQUAL] = KEY_EQUAL, [KEY_SYM_PLUS] = KEY_EQUAL, + [KEY_SYM_LBRKT] = KEY_LEFTBRACE, [KEY_SYM_LCURLY] = KEY_LEFTBRACE, + [KEY_SYM_RBRKT] = KEY_RIGHTBRACE, [KEY_SYM_RCURLY] = KEY_RIGHTBRACE, + [KEY_SYM_SLASH] = KEY_BACKSLASH, [KEY_SYM_PIPE] = KEY_BACKSLASH, + [KEY_SYM_TIC] = KEY_APOSTROPHE, [KEY_SYM_QUOTE] = KEY_APOSTROPHE, + [KEY_SYM_SEMIC] = KEY_SEMICOLON, [KEY_SYM_COLON] = KEY_SEMICOLON, + [KEY_SYM_COMMA] = KEY_COMMA, [KEY_SYM_LT] = KEY_COMMA, + [KEY_SYM_PERIOD] = KEY_DOT, [KEY_SYM_GT] = KEY_DOT, + [KEY_SYM_BSLASH] = KEY_SLASH, [KEY_SYM_QMARK] = KEY_SLASH, + [KEY_SYM_A] = KEY_A, [KEY_SYM_a] = KEY_A, + [KEY_SYM_B] = KEY_B, [KEY_SYM_b] = KEY_B, + [KEY_SYM_C] = KEY_C, [KEY_SYM_c] = KEY_C, + [KEY_SYM_D] = KEY_D, [KEY_SYM_d] = KEY_D, + [KEY_SYM_E] = KEY_E, [KEY_SYM_e] = KEY_E, + [KEY_SYM_F] = KEY_F, [KEY_SYM_f] = KEY_F, + [KEY_SYM_G] = KEY_G, [KEY_SYM_g] = KEY_G, + [KEY_SYM_H] = KEY_H, [KEY_SYM_h] = KEY_H, + [KEY_SYM_I] = KEY_I, [KEY_SYM_i] = KEY_I, + [KEY_SYM_J] = KEY_J, [KEY_SYM_j] = KEY_J, + [KEY_SYM_K] = KEY_K, [KEY_SYM_k] = KEY_K, + [KEY_SYM_L] = KEY_L, [KEY_SYM_l] = KEY_L, + [KEY_SYM_M] = KEY_M, [KEY_SYM_m] = KEY_M, + [KEY_SYM_N] = KEY_N, [KEY_SYM_n] = KEY_N, + [KEY_SYM_O] = KEY_O, [KEY_SYM_o] = KEY_O, + [KEY_SYM_P] = KEY_P, [KEY_SYM_p] = KEY_P, + [KEY_SYM_Q] = KEY_Q, [KEY_SYM_q] = KEY_Q, + [KEY_SYM_R] = KEY_R, [KEY_SYM_r] = KEY_R, + [KEY_SYM_S] = KEY_S, [KEY_SYM_s] = KEY_S, + [KEY_SYM_T] = KEY_T, [KEY_SYM_t] = KEY_T, + [KEY_SYM_U] = KEY_U, [KEY_SYM_u] = KEY_U, + [KEY_SYM_V] = KEY_V, [KEY_SYM_v] = KEY_V, + [KEY_SYM_W] = KEY_W, [KEY_SYM_w] = KEY_W, + [KEY_SYM_X] = KEY_X, [KEY_SYM_x] = KEY_X, + [KEY_SYM_Y] = KEY_Y, [KEY_SYM_y] = KEY_Y, + [KEY_SYM_Z] = KEY_Z, [KEY_SYM_z] = KEY_Z, +}; + +static void print_input(struct remote_input *input) +{ + if (input->type == INPUT_TYPE_MOUSE) { + unsigned char buttons = input->mouse_buttons; + dbg("remote mouse movement: (x,y)=(%d,%d)%s%s%s%s\n", + input->data.mouse.x, input->data.mouse.y, + (buttons) ? " -- buttons:" : "", + (buttons & REMOTE_BUTTON_LEFT) ? "left " : "", + (buttons & REMOTE_BUTTON_MIDDLE) ? "middle " : "", + (buttons & REMOTE_BUTTON_RIGHT) ? "right" : "" + ); + } else { + dbg("remote keypress (code, flag, down):" + "%d (0x%x) [0x%x] [0x%x]\n", + input->data.keyboard.key_code, + input->data.keyboard.key_code, + input->data.keyboard.key_flag, + input->data.keyboard.key_down + ); + } +} + +static void send_mouse_event(struct input_dev *dev, struct remote_input *input) +{ + unsigned char buttons = input->mouse_buttons; + + input_report_abs(dev, ABS_X, input->data.mouse.x); + input_report_abs(dev, ABS_Y, input->data.mouse.y); + input_report_key(dev, BTN_LEFT, buttons & REMOTE_BUTTON_LEFT); + input_report_key(dev, BTN_MIDDLE, buttons & REMOTE_BUTTON_MIDDLE); + input_report_key(dev, BTN_RIGHT, buttons & REMOTE_BUTTON_RIGHT); + input_sync(dev); +} + +static void send_keyboard_event(struct input_dev *dev, + struct remote_input *input) +{ + unsigned int key; + unsigned short code = input->data.keyboard.key_code; + + if (code & 0xff00) + key = xlate_high[code & 0xff]; + else + key = xlate[code]; + input_report_key(dev, key, input->data.keyboard.key_down); + input_sync(dev); +} + +void ibmasm_handle_mouse_interrupt(struct service_processor *sp) +{ + unsigned long reader; + unsigned long writer; + struct remote_input input; + + reader = get_queue_reader(sp); + writer = get_queue_writer(sp); + + while (reader != writer) { + memcpy_fromio(&input, get_queue_entry(sp, reader), + sizeof(struct remote_input)); + + print_input(&input); + if (input.type == INPUT_TYPE_MOUSE) { + send_mouse_event(sp->remote.mouse_dev, &input); + } else if (input.type == INPUT_TYPE_KEYBOARD) { + send_keyboard_event(sp->remote.keybd_dev, &input); + } else + break; + + reader = advance_queue_reader(sp, reader); + writer = get_queue_writer(sp); + } +} + +int ibmasm_init_remote_input_dev(struct service_processor *sp) +{ + /* set up the mouse input device */ + struct input_dev *mouse_dev, *keybd_dev; + struct pci_dev *pdev = to_pci_dev(sp->dev); + int error = -ENOMEM; + int i; + + sp->remote.mouse_dev = mouse_dev = input_allocate_device(); + sp->remote.keybd_dev = keybd_dev = input_allocate_device(); + + if (!mouse_dev || !keybd_dev) + goto err_free_devices; + + mouse_dev->id.bustype = BUS_PCI; + mouse_dev->id.vendor = pdev->vendor; + mouse_dev->id.product = pdev->device; + mouse_dev->id.version = 1; + mouse_dev->dev.parent = sp->dev; + mouse_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); + mouse_dev->keybit[BIT_WORD(BTN_MOUSE)] = BIT_MASK(BTN_LEFT) | + BIT_MASK(BTN_RIGHT) | BIT_MASK(BTN_MIDDLE); + set_bit(BTN_TOUCH, mouse_dev->keybit); + mouse_dev->name = "ibmasm RSA I remote mouse"; + input_set_abs_params(mouse_dev, ABS_X, 0, MOUSE_X_MAX, 0, 0); + input_set_abs_params(mouse_dev, ABS_Y, 0, MOUSE_Y_MAX, 0, 0); + + keybd_dev->id.bustype = BUS_PCI; + keybd_dev->id.vendor = pdev->vendor; + keybd_dev->id.product = pdev->device; + keybd_dev->id.version = 2; + keybd_dev->dev.parent = sp->dev; + keybd_dev->evbit[0] = BIT_MASK(EV_KEY); + keybd_dev->name = "ibmasm RSA I remote keyboard"; + + for (i = 0; i < XLATE_SIZE; i++) { + if (xlate_high[i]) + set_bit(xlate_high[i], keybd_dev->keybit); + if (xlate[i]) + set_bit(xlate[i], keybd_dev->keybit); + } + + error = input_register_device(mouse_dev); + if (error) + goto err_free_devices; + + error = input_register_device(keybd_dev); + if (error) + goto err_unregister_mouse_dev; + + enable_mouse_interrupts(sp); + + printk(KERN_INFO "ibmasm remote responding to events on RSA card %d\n", sp->number); + + return 0; + + err_unregister_mouse_dev: + input_unregister_device(mouse_dev); + mouse_dev = NULL; /* so we don't try to free it again below */ + err_free_devices: + input_free_device(mouse_dev); + input_free_device(keybd_dev); + + return error; +} + +void ibmasm_free_remote_input_dev(struct service_processor *sp) +{ + disable_mouse_interrupts(sp); + input_unregister_device(sp->remote.mouse_dev); + input_unregister_device(sp->remote.keybd_dev); +} + diff --git a/drivers/misc/ibmasm/remote.h b/drivers/misc/ibmasm/remote.h new file mode 100644 index 000000000..8d364462a --- /dev/null +++ b/drivers/misc/ibmasm/remote.h @@ -0,0 +1,257 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +/* + * IBM ASM Service Processor Device Driver + * + * Copyright (C) IBM Corporation, 2004 + * + * Author: Max Asböck <amax@us.ibm.com> + * + * Originally written by Pete Reynolds + */ + +#ifndef _IBMASM_REMOTE_H_ +#define _IBMASM_REMOTE_H_ + +#include <asm/io.h> + +/* pci offsets */ +#define CONDOR_MOUSE_DATA 0x000AC000 +#define CONDOR_MOUSE_ISR_CONTROL 0x00 +#define CONDOR_MOUSE_ISR_STATUS 0x04 +#define CONDOR_MOUSE_Q_READER 0x08 +#define CONDOR_MOUSE_Q_WRITER 0x0C +#define CONDOR_MOUSE_Q_BEGIN 0x10 +#define CONDOR_MOUSE_MAX_X 0x14 +#define CONDOR_MOUSE_MAX_Y 0x18 + +#define CONDOR_INPUT_DESKTOP_INFO 0x1F0 +#define CONDOR_INPUT_DISPLAY_RESX 0x1F4 +#define CONDOR_INPUT_DISPLAY_RESY 0x1F8 +#define CONDOR_INPUT_DISPLAY_BITS 0x1FC +#define CONDOR_OUTPUT_VNC_STATUS 0x200 + +#define CONDOR_MOUSE_INTR_STATUS_MASK 0x00000001 + +#define INPUT_TYPE_MOUSE 0x1 +#define INPUT_TYPE_KEYBOARD 0x2 + + +/* mouse button states received from SP */ +#define REMOTE_DOUBLE_CLICK 0xF0 +#define REMOTE_BUTTON_LEFT 0x01 +#define REMOTE_BUTTON_MIDDLE 0x02 +#define REMOTE_BUTTON_RIGHT 0x04 + +/* size of keysym/keycode translation matricies */ +#define XLATE_SIZE 256 + +struct mouse_input { + unsigned short y; + unsigned short x; +}; + + +struct keyboard_input { + unsigned short key_code; + unsigned char key_flag; + unsigned char key_down; +}; + + + +struct remote_input { + union { + struct mouse_input mouse; + struct keyboard_input keyboard; + } data; + + unsigned char type; + unsigned char pad1; + unsigned char mouse_buttons; + unsigned char pad3; +}; + +#define mouse_addr(sp) (sp->base_address + CONDOR_MOUSE_DATA) +#define display_width(sp) (mouse_addr(sp) + CONDOR_INPUT_DISPLAY_RESX) +#define display_height(sp) (mouse_addr(sp) + CONDOR_INPUT_DISPLAY_RESY) +#define display_depth(sp) (mouse_addr(sp) + CONDOR_INPUT_DISPLAY_BITS) +#define desktop_info(sp) (mouse_addr(sp) + CONDOR_INPUT_DESKTOP_INFO) +#define vnc_status(sp) (mouse_addr(sp) + CONDOR_OUTPUT_VNC_STATUS) +#define isr_control(sp) (mouse_addr(sp) + CONDOR_MOUSE_ISR_CONTROL) + +#define mouse_interrupt_pending(sp) readl(mouse_addr(sp) + CONDOR_MOUSE_ISR_STATUS) +#define clear_mouse_interrupt(sp) writel(0, mouse_addr(sp) + CONDOR_MOUSE_ISR_STATUS) +#define enable_mouse_interrupts(sp) writel(1, mouse_addr(sp) + CONDOR_MOUSE_ISR_CONTROL) +#define disable_mouse_interrupts(sp) writel(0, mouse_addr(sp) + CONDOR_MOUSE_ISR_CONTROL) + +/* remote input queue operations */ +#define REMOTE_QUEUE_SIZE 60 + +#define get_queue_writer(sp) readl(mouse_addr(sp) + CONDOR_MOUSE_Q_WRITER) +#define get_queue_reader(sp) readl(mouse_addr(sp) + CONDOR_MOUSE_Q_READER) +#define set_queue_reader(sp, reader) writel(reader, mouse_addr(sp) + CONDOR_MOUSE_Q_READER) + +#define queue_begin (mouse_addr(sp) + CONDOR_MOUSE_Q_BEGIN) + +#define get_queue_entry(sp, read_index) \ + ((void*)(queue_begin + read_index * sizeof(struct remote_input))) + +static inline int advance_queue_reader(struct service_processor *sp, unsigned long reader) +{ + reader++; + if (reader == REMOTE_QUEUE_SIZE) + reader = 0; + + set_queue_reader(sp, reader); + return reader; +} + +#define NO_KEYCODE 0 +#define KEY_SYM_BK_SPC 0xFF08 +#define KEY_SYM_TAB 0xFF09 +#define KEY_SYM_ENTER 0xFF0D +#define KEY_SYM_SCR_LOCK 0xFF14 +#define KEY_SYM_ESCAPE 0xFF1B +#define KEY_SYM_HOME 0xFF50 +#define KEY_SYM_LARROW 0xFF51 +#define KEY_SYM_UARROW 0xFF52 +#define KEY_SYM_RARROW 0xFF53 +#define KEY_SYM_DARROW 0xFF54 +#define KEY_SYM_PAGEUP 0xFF55 +#define KEY_SYM_PAGEDOWN 0xFF56 +#define KEY_SYM_END 0xFF57 +#define KEY_SYM_INSERT 0xFF63 +#define KEY_SYM_NUM_LOCK 0xFF7F +#define KEY_SYM_KPSTAR 0xFFAA +#define KEY_SYM_KPPLUS 0xFFAB +#define KEY_SYM_KPMINUS 0xFFAD +#define KEY_SYM_KPDOT 0xFFAE +#define KEY_SYM_KPSLASH 0xFFAF +#define KEY_SYM_KPRIGHT 0xFF96 +#define KEY_SYM_KPUP 0xFF97 +#define KEY_SYM_KPLEFT 0xFF98 +#define KEY_SYM_KPDOWN 0xFF99 +#define KEY_SYM_KP0 0xFFB0 +#define KEY_SYM_KP1 0xFFB1 +#define KEY_SYM_KP2 0xFFB2 +#define KEY_SYM_KP3 0xFFB3 +#define KEY_SYM_KP4 0xFFB4 +#define KEY_SYM_KP5 0xFFB5 +#define KEY_SYM_KP6 0xFFB6 +#define KEY_SYM_KP7 0xFFB7 +#define KEY_SYM_KP8 0xFFB8 +#define KEY_SYM_KP9 0xFFB9 +#define KEY_SYM_F1 0xFFBE // 1B 5B 5B 41 +#define KEY_SYM_F2 0xFFBF // 1B 5B 5B 42 +#define KEY_SYM_F3 0xFFC0 // 1B 5B 5B 43 +#define KEY_SYM_F4 0xFFC1 // 1B 5B 5B 44 +#define KEY_SYM_F5 0xFFC2 // 1B 5B 5B 45 +#define KEY_SYM_F6 0xFFC3 // 1B 5B 31 37 7E +#define KEY_SYM_F7 0xFFC4 // 1B 5B 31 38 7E +#define KEY_SYM_F8 0xFFC5 // 1B 5B 31 39 7E +#define KEY_SYM_F9 0xFFC6 // 1B 5B 32 30 7E +#define KEY_SYM_F10 0xFFC7 // 1B 5B 32 31 7E +#define KEY_SYM_F11 0xFFC8 // 1B 5B 32 33 7E +#define KEY_SYM_F12 0xFFC9 // 1B 5B 32 34 7E +#define KEY_SYM_SHIFT 0xFFE1 +#define KEY_SYM_CTRL 0xFFE3 +#define KEY_SYM_ALT 0xFFE9 +#define KEY_SYM_CAP_LOCK 0xFFE5 +#define KEY_SYM_DELETE 0xFFFF +#define KEY_SYM_TILDE 0x60 +#define KEY_SYM_BKTIC 0x7E +#define KEY_SYM_ONE 0x31 +#define KEY_SYM_BANG 0x21 +#define KEY_SYM_TWO 0x32 +#define KEY_SYM_AT 0x40 +#define KEY_SYM_THREE 0x33 +#define KEY_SYM_POUND 0x23 +#define KEY_SYM_FOUR 0x34 +#define KEY_SYM_DOLLAR 0x24 +#define KEY_SYM_FIVE 0x35 +#define KEY_SYM_PERCENT 0x25 +#define KEY_SYM_SIX 0x36 +#define KEY_SYM_CARAT 0x5E +#define KEY_SYM_SEVEN 0x37 +#define KEY_SYM_AMPER 0x26 +#define KEY_SYM_EIGHT 0x38 +#define KEY_SYM_STAR 0x2A +#define KEY_SYM_NINE 0x39 +#define KEY_SYM_LPAREN 0x28 +#define KEY_SYM_ZERO 0x30 +#define KEY_SYM_RPAREN 0x29 +#define KEY_SYM_MINUS 0x2D +#define KEY_SYM_USCORE 0x5F +#define KEY_SYM_EQUAL 0x2B +#define KEY_SYM_PLUS 0x3D +#define KEY_SYM_LBRKT 0x5B +#define KEY_SYM_LCURLY 0x7B +#define KEY_SYM_RBRKT 0x5D +#define KEY_SYM_RCURLY 0x7D +#define KEY_SYM_SLASH 0x5C +#define KEY_SYM_PIPE 0x7C +#define KEY_SYM_TIC 0x27 +#define KEY_SYM_QUOTE 0x22 +#define KEY_SYM_SEMIC 0x3B +#define KEY_SYM_COLON 0x3A +#define KEY_SYM_COMMA 0x2C +#define KEY_SYM_LT 0x3C +#define KEY_SYM_PERIOD 0x2E +#define KEY_SYM_GT 0x3E +#define KEY_SYM_BSLASH 0x2F +#define KEY_SYM_QMARK 0x3F +#define KEY_SYM_A 0x41 +#define KEY_SYM_B 0x42 +#define KEY_SYM_C 0x43 +#define KEY_SYM_D 0x44 +#define KEY_SYM_E 0x45 +#define KEY_SYM_F 0x46 +#define KEY_SYM_G 0x47 +#define KEY_SYM_H 0x48 +#define KEY_SYM_I 0x49 +#define KEY_SYM_J 0x4A +#define KEY_SYM_K 0x4B +#define KEY_SYM_L 0x4C +#define KEY_SYM_M 0x4D +#define KEY_SYM_N 0x4E +#define KEY_SYM_O 0x4F +#define KEY_SYM_P 0x50 +#define KEY_SYM_Q 0x51 +#define KEY_SYM_R 0x52 +#define KEY_SYM_S 0x53 +#define KEY_SYM_T 0x54 +#define KEY_SYM_U 0x55 +#define KEY_SYM_V 0x56 +#define KEY_SYM_W 0x57 +#define KEY_SYM_X 0x58 +#define KEY_SYM_Y 0x59 +#define KEY_SYM_Z 0x5A +#define KEY_SYM_a 0x61 +#define KEY_SYM_b 0x62 +#define KEY_SYM_c 0x63 +#define KEY_SYM_d 0x64 +#define KEY_SYM_e 0x65 +#define KEY_SYM_f 0x66 +#define KEY_SYM_g 0x67 +#define KEY_SYM_h 0x68 +#define KEY_SYM_i 0x69 +#define KEY_SYM_j 0x6A +#define KEY_SYM_k 0x6B +#define KEY_SYM_l 0x6C +#define KEY_SYM_m 0x6D +#define KEY_SYM_n 0x6E +#define KEY_SYM_o 0x6F +#define KEY_SYM_p 0x70 +#define KEY_SYM_q 0x71 +#define KEY_SYM_r 0x72 +#define KEY_SYM_s 0x73 +#define KEY_SYM_t 0x74 +#define KEY_SYM_u 0x75 +#define KEY_SYM_v 0x76 +#define KEY_SYM_w 0x77 +#define KEY_SYM_x 0x78 +#define KEY_SYM_y 0x79 +#define KEY_SYM_z 0x7A +#define KEY_SYM_SPACE 0x20 +#endif /* _IBMASM_REMOTE_H_ */ diff --git a/drivers/misc/ibmasm/uart.c b/drivers/misc/ibmasm/uart.c new file mode 100644 index 000000000..a5d4c8e01 --- /dev/null +++ b/drivers/misc/ibmasm/uart.c @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +/* + * IBM ASM Service Processor Device Driver + * + * Copyright (C) IBM Corporation, 2004 + * + * Author: Max Asböck <amax@us.ibm.com> + */ + +#include <linux/termios.h> +#include <linux/tty.h> +#include <linux/serial_core.h> +#include <linux/serial_reg.h> +#include <linux/serial_8250.h> +#include "ibmasm.h" +#include "lowlevel.h" + + +void ibmasm_register_uart(struct service_processor *sp) +{ + struct uart_8250_port uart; + void __iomem *iomem_base; + + iomem_base = sp->base_address + SCOUT_COM_B_BASE; + + /* read the uart scratch register to determine if the UART + * is dedicated to the service processor or if the OS can use it + */ + if (0 == readl(iomem_base + UART_SCR)) { + dev_info(sp->dev, "IBM SP UART not registered, owned by service processor\n"); + sp->serial_line = -1; + return; + } + + memset(&uart, 0, sizeof(uart)); + uart.port.irq = sp->irq; + uart.port.uartclk = 3686400; + uart.port.flags = UPF_SHARE_IRQ; + uart.port.iotype = UPIO_MEM; + uart.port.membase = iomem_base; + + sp->serial_line = serial8250_register_8250_port(&uart); + if (sp->serial_line < 0) { + dev_err(sp->dev, "Failed to register serial port\n"); + return; + } + enable_uart_interrupts(sp->base_address); +} + +void ibmasm_unregister_uart(struct service_processor *sp) +{ + if (sp->serial_line < 0) + return; + + disable_uart_interrupts(sp->base_address); + serial8250_unregister_port(sp->serial_line); +} diff --git a/drivers/misc/ibmvmc.c b/drivers/misc/ibmvmc.c new file mode 100644 index 000000000..2d778d0f0 --- /dev/null +++ b/drivers/misc/ibmvmc.c @@ -0,0 +1,2421 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * IBM Power Systems Virtual Management Channel Support. + * + * Copyright (c) 2004, 2018 IBM Corp. + * Dave Engebretsen engebret@us.ibm.com + * Steven Royer seroyer@linux.vnet.ibm.com + * Adam Reznechek adreznec@linux.vnet.ibm.com + * Bryant G. Ly <bryantly@linux.vnet.ibm.com> + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/kthread.h> +#include <linux/major.h> +#include <linux/string.h> +#include <linux/fcntl.h> +#include <linux/slab.h> +#include <linux/poll.h> +#include <linux/init.h> +#include <linux/fs.h> +#include <linux/interrupt.h> +#include <linux/spinlock.h> +#include <linux/percpu.h> +#include <linux/delay.h> +#include <linux/uaccess.h> +#include <linux/io.h> +#include <linux/miscdevice.h> +#include <linux/sched/signal.h> + +#include <asm/byteorder.h> +#include <asm/irq.h> +#include <asm/vio.h> + +#include "ibmvmc.h" + +#define IBMVMC_DRIVER_VERSION "1.0" + +/* + * Static global variables + */ +static DECLARE_WAIT_QUEUE_HEAD(ibmvmc_read_wait); + +static const char ibmvmc_driver_name[] = "ibmvmc"; + +static struct ibmvmc_struct ibmvmc; +static struct ibmvmc_hmc hmcs[MAX_HMCS]; +static struct crq_server_adapter ibmvmc_adapter; + +static int ibmvmc_max_buf_pool_size = DEFAULT_BUF_POOL_SIZE; +static int ibmvmc_max_hmcs = DEFAULT_HMCS; +static int ibmvmc_max_mtu = DEFAULT_MTU; + +static inline long h_copy_rdma(s64 length, u64 sliobn, u64 slioba, + u64 dliobn, u64 dlioba) +{ + long rc = 0; + + /* Ensure all writes to source memory are visible before hcall */ + dma_wmb(); + pr_debug("ibmvmc: h_copy_rdma(0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n", + length, sliobn, slioba, dliobn, dlioba); + rc = plpar_hcall_norets(H_COPY_RDMA, length, sliobn, slioba, + dliobn, dlioba); + pr_debug("ibmvmc: h_copy_rdma rc = 0x%lx\n", rc); + + return rc; +} + +static inline void h_free_crq(uint32_t unit_address) +{ + long rc = 0; + + do { + if (H_IS_LONG_BUSY(rc)) + msleep(get_longbusy_msecs(rc)); + + rc = plpar_hcall_norets(H_FREE_CRQ, unit_address); + } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc))); +} + +/** + * h_request_vmc: - request a hypervisor virtual management channel device + * @vmc_index: drc index of the vmc device created + * + * Requests the hypervisor create a new virtual management channel device, + * allowing this partition to send hypervisor virtualization control + * commands. + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static inline long h_request_vmc(u32 *vmc_index) +{ + long rc = 0; + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + + do { + if (H_IS_LONG_BUSY(rc)) + msleep(get_longbusy_msecs(rc)); + + /* Call to request the VMC device from phyp */ + rc = plpar_hcall(H_REQUEST_VMC, retbuf); + pr_debug("ibmvmc: %s rc = 0x%lx\n", __func__, rc); + *vmc_index = retbuf[0]; + } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc))); + + return rc; +} + +/* routines for managing a command/response queue */ +/** + * ibmvmc_handle_event: - Interrupt handler for crq events + * @irq: number of irq to handle, not used + * @dev_instance: crq_server_adapter that received interrupt + * + * Disables interrupts and schedules ibmvmc_task + * + * Always returns IRQ_HANDLED + */ +static irqreturn_t ibmvmc_handle_event(int irq, void *dev_instance) +{ + struct crq_server_adapter *adapter = + (struct crq_server_adapter *)dev_instance; + + vio_disable_interrupts(to_vio_dev(adapter->dev)); + tasklet_schedule(&adapter->work_task); + + return IRQ_HANDLED; +} + +/** + * ibmvmc_release_crq_queue - Release CRQ Queue + * + * @adapter: crq_server_adapter struct + * + * Return: + * 0 - Success + * Non-Zero - Failure + */ +static void ibmvmc_release_crq_queue(struct crq_server_adapter *adapter) +{ + struct vio_dev *vdev = to_vio_dev(adapter->dev); + struct crq_queue *queue = &adapter->queue; + + free_irq(vdev->irq, (void *)adapter); + tasklet_kill(&adapter->work_task); + + if (adapter->reset_task) + kthread_stop(adapter->reset_task); + + h_free_crq(vdev->unit_address); + dma_unmap_single(adapter->dev, + queue->msg_token, + queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL); + free_page((unsigned long)queue->msgs); +} + +/** + * ibmvmc_reset_crq_queue - Reset CRQ Queue + * + * @adapter: crq_server_adapter struct + * + * This function calls h_free_crq and then calls H_REG_CRQ and does all the + * bookkeeping to get us back to where we can communicate. + * + * Return: + * 0 - Success + * Non-Zero - Failure + */ +static int ibmvmc_reset_crq_queue(struct crq_server_adapter *adapter) +{ + struct vio_dev *vdev = to_vio_dev(adapter->dev); + struct crq_queue *queue = &adapter->queue; + int rc = 0; + + /* Close the CRQ */ + h_free_crq(vdev->unit_address); + + /* Clean out the queue */ + memset(queue->msgs, 0x00, PAGE_SIZE); + queue->cur = 0; + + /* And re-open it again */ + rc = plpar_hcall_norets(H_REG_CRQ, + vdev->unit_address, + queue->msg_token, PAGE_SIZE); + if (rc == 2) + /* Adapter is good, but other end is not ready */ + dev_warn(adapter->dev, "Partner adapter not ready\n"); + else if (rc != 0) + dev_err(adapter->dev, "couldn't register crq--rc 0x%x\n", rc); + + return rc; +} + +/** + * crq_queue_next_crq: - Returns the next entry in message queue + * @queue: crq_queue to use + * + * Returns pointer to next entry in queue, or NULL if there are no new + * entried in the CRQ. + */ +static struct ibmvmc_crq_msg *crq_queue_next_crq(struct crq_queue *queue) +{ + struct ibmvmc_crq_msg *crq; + unsigned long flags; + + spin_lock_irqsave(&queue->lock, flags); + crq = &queue->msgs[queue->cur]; + if (crq->valid & 0x80) { + if (++queue->cur == queue->size) + queue->cur = 0; + + /* Ensure the read of the valid bit occurs before reading any + * other bits of the CRQ entry + */ + dma_rmb(); + } else { + crq = NULL; + } + + spin_unlock_irqrestore(&queue->lock, flags); + + return crq; +} + +/** + * ibmvmc_send_crq - Send CRQ + * + * @adapter: crq_server_adapter struct + * @word1: Word1 Data field + * @word2: Word2 Data field + * + * Return: + * 0 - Success + * Non-Zero - Failure + */ +static long ibmvmc_send_crq(struct crq_server_adapter *adapter, + u64 word1, u64 word2) +{ + struct vio_dev *vdev = to_vio_dev(adapter->dev); + long rc = 0; + + dev_dbg(adapter->dev, "(0x%x, 0x%016llx, 0x%016llx)\n", + vdev->unit_address, word1, word2); + + /* + * Ensure the command buffer is flushed to memory before handing it + * over to the other side to prevent it from fetching any stale data. + */ + dma_wmb(); + rc = plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2); + dev_dbg(adapter->dev, "rc = 0x%lx\n", rc); + + return rc; +} + +/** + * alloc_dma_buffer - Create DMA Buffer + * + * @vdev: vio_dev struct + * @size: Size field + * @dma_handle: DMA address field + * + * Allocates memory for the command queue and maps remote memory into an + * ioba. + * + * Returns a pointer to the buffer + */ +static void *alloc_dma_buffer(struct vio_dev *vdev, size_t size, + dma_addr_t *dma_handle) +{ + /* allocate memory */ + void *buffer = kzalloc(size, GFP_ATOMIC); + + if (!buffer) { + *dma_handle = 0; + return NULL; + } + + /* DMA map */ + *dma_handle = dma_map_single(&vdev->dev, buffer, size, + DMA_BIDIRECTIONAL); + + if (dma_mapping_error(&vdev->dev, *dma_handle)) { + *dma_handle = 0; + kfree_sensitive(buffer); + return NULL; + } + + return buffer; +} + +/** + * free_dma_buffer - Free DMA Buffer + * + * @vdev: vio_dev struct + * @size: Size field + * @vaddr: Address field + * @dma_handle: DMA address field + * + * Releases memory for a command queue and unmaps mapped remote memory. + */ +static void free_dma_buffer(struct vio_dev *vdev, size_t size, void *vaddr, + dma_addr_t dma_handle) +{ + /* DMA unmap */ + dma_unmap_single(&vdev->dev, dma_handle, size, DMA_BIDIRECTIONAL); + + /* deallocate memory */ + kfree_sensitive(vaddr); +} + +/** + * ibmvmc_get_valid_hmc_buffer - Retrieve Valid HMC Buffer + * + * @hmc_index: HMC Index Field + * + * Return: + * Pointer to ibmvmc_buffer + */ +static struct ibmvmc_buffer *ibmvmc_get_valid_hmc_buffer(u8 hmc_index) +{ + struct ibmvmc_buffer *buffer; + struct ibmvmc_buffer *ret_buf = NULL; + unsigned long i; + + if (hmc_index > ibmvmc.max_hmc_index) + return NULL; + + buffer = hmcs[hmc_index].buffer; + + for (i = 0; i < ibmvmc_max_buf_pool_size; i++) { + if (buffer[i].valid && buffer[i].free && + buffer[i].owner == VMC_BUF_OWNER_ALPHA) { + buffer[i].free = 0; + ret_buf = &buffer[i]; + break; + } + } + + return ret_buf; +} + +/** + * ibmvmc_get_free_hmc_buffer - Get Free HMC Buffer + * + * @adapter: crq_server_adapter struct + * @hmc_index: Hmc Index field + * + * Return: + * Pointer to ibmvmc_buffer + */ +static struct ibmvmc_buffer *ibmvmc_get_free_hmc_buffer(struct crq_server_adapter *adapter, + u8 hmc_index) +{ + struct ibmvmc_buffer *buffer; + struct ibmvmc_buffer *ret_buf = NULL; + unsigned long i; + + if (hmc_index > ibmvmc.max_hmc_index) { + dev_info(adapter->dev, "get_free_hmc_buffer: invalid hmc_index=0x%x\n", + hmc_index); + return NULL; + } + + buffer = hmcs[hmc_index].buffer; + + for (i = 0; i < ibmvmc_max_buf_pool_size; i++) { + if (buffer[i].free && + buffer[i].owner == VMC_BUF_OWNER_ALPHA) { + buffer[i].free = 0; + ret_buf = &buffer[i]; + break; + } + } + + return ret_buf; +} + +/** + * ibmvmc_free_hmc_buffer - Free an HMC Buffer + * + * @hmc: ibmvmc_hmc struct + * @buffer: ibmvmc_buffer struct + * + */ +static void ibmvmc_free_hmc_buffer(struct ibmvmc_hmc *hmc, + struct ibmvmc_buffer *buffer) +{ + unsigned long flags; + + spin_lock_irqsave(&hmc->lock, flags); + buffer->free = 1; + spin_unlock_irqrestore(&hmc->lock, flags); +} + +/** + * ibmvmc_count_hmc_buffers - Count HMC Buffers + * + * @hmc_index: HMC Index field + * @valid: Valid number of buffers field + * @free: Free number of buffers field + * + */ +static void ibmvmc_count_hmc_buffers(u8 hmc_index, unsigned int *valid, + unsigned int *free) +{ + struct ibmvmc_buffer *buffer; + unsigned long i; + unsigned long flags; + + if (hmc_index > ibmvmc.max_hmc_index) + return; + + if (!valid || !free) + return; + + *valid = 0; *free = 0; + + buffer = hmcs[hmc_index].buffer; + spin_lock_irqsave(&hmcs[hmc_index].lock, flags); + + for (i = 0; i < ibmvmc_max_buf_pool_size; i++) { + if (buffer[i].valid) { + *valid = *valid + 1; + if (buffer[i].free) + *free = *free + 1; + } + } + + spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags); +} + +/** + * ibmvmc_get_free_hmc - Get Free HMC + * + * Return: + * Pointer to an available HMC Connection + * Null otherwise + */ +static struct ibmvmc_hmc *ibmvmc_get_free_hmc(void) +{ + unsigned long i; + unsigned long flags; + + /* + * Find an available HMC connection. + */ + for (i = 0; i <= ibmvmc.max_hmc_index; i++) { + spin_lock_irqsave(&hmcs[i].lock, flags); + if (hmcs[i].state == ibmhmc_state_free) { + hmcs[i].index = i; + hmcs[i].state = ibmhmc_state_initial; + spin_unlock_irqrestore(&hmcs[i].lock, flags); + return &hmcs[i]; + } + spin_unlock_irqrestore(&hmcs[i].lock, flags); + } + + return NULL; +} + +/** + * ibmvmc_return_hmc - Return an HMC Connection + * + * @hmc: ibmvmc_hmc struct + * @release_readers: Number of readers connected to session + * + * This function releases the HMC connections back into the pool. + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static int ibmvmc_return_hmc(struct ibmvmc_hmc *hmc, bool release_readers) +{ + struct ibmvmc_buffer *buffer; + struct crq_server_adapter *adapter; + struct vio_dev *vdev; + unsigned long i; + unsigned long flags; + + if (!hmc || !hmc->adapter) + return -EIO; + + if (release_readers) { + if (hmc->file_session) { + struct ibmvmc_file_session *session = hmc->file_session; + + session->valid = 0; + wake_up_interruptible(&ibmvmc_read_wait); + } + } + + adapter = hmc->adapter; + vdev = to_vio_dev(adapter->dev); + + spin_lock_irqsave(&hmc->lock, flags); + hmc->index = 0; + hmc->state = ibmhmc_state_free; + hmc->queue_head = 0; + hmc->queue_tail = 0; + buffer = hmc->buffer; + for (i = 0; i < ibmvmc_max_buf_pool_size; i++) { + if (buffer[i].valid) { + free_dma_buffer(vdev, + ibmvmc.max_mtu, + buffer[i].real_addr_local, + buffer[i].dma_addr_local); + dev_dbg(adapter->dev, "Forgot buffer id 0x%lx\n", i); + } + memset(&buffer[i], 0, sizeof(struct ibmvmc_buffer)); + + hmc->queue_outbound_msgs[i] = VMC_INVALID_BUFFER_ID; + } + + spin_unlock_irqrestore(&hmc->lock, flags); + + return 0; +} + +/** + * ibmvmc_send_open - Interface Open + * @buffer: Pointer to ibmvmc_buffer struct + * @hmc: Pointer to ibmvmc_hmc struct + * + * This command is sent by the management partition as the result of a + * management partition device request. It causes the hypervisor to + * prepare a set of data buffers for the management application connection + * indicated HMC idx. A unique HMC Idx would be used if multiple management + * applications running concurrently were desired. Before responding to this + * command, the hypervisor must provide the management partition with at + * least one of these new buffers via the Add Buffer. This indicates whether + * the messages are inbound or outbound from the hypervisor. + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static int ibmvmc_send_open(struct ibmvmc_buffer *buffer, + struct ibmvmc_hmc *hmc) +{ + struct ibmvmc_crq_msg crq_msg; + struct crq_server_adapter *adapter; + __be64 *crq_as_u64 = (__be64 *)&crq_msg; + int rc = 0; + + if (!hmc || !hmc->adapter) + return -EIO; + + adapter = hmc->adapter; + + dev_dbg(adapter->dev, "send_open: 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n", + (unsigned long)buffer->size, (unsigned long)adapter->liobn, + (unsigned long)buffer->dma_addr_local, + (unsigned long)adapter->riobn, + (unsigned long)buffer->dma_addr_remote); + + rc = h_copy_rdma(buffer->size, + adapter->liobn, + buffer->dma_addr_local, + adapter->riobn, + buffer->dma_addr_remote); + if (rc) { + dev_err(adapter->dev, "Error: In send_open, h_copy_rdma rc 0x%x\n", + rc); + return -EIO; + } + + hmc->state = ibmhmc_state_opening; + + crq_msg.valid = 0x80; + crq_msg.type = VMC_MSG_OPEN; + crq_msg.status = 0; + crq_msg.var1.rsvd = 0; + crq_msg.hmc_session = hmc->session; + crq_msg.hmc_index = hmc->index; + crq_msg.var2.buffer_id = cpu_to_be16(buffer->id); + crq_msg.rsvd = 0; + crq_msg.var3.rsvd = 0; + + ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]), + be64_to_cpu(crq_as_u64[1])); + + return rc; +} + +/** + * ibmvmc_send_close - Interface Close + * @hmc: Pointer to ibmvmc_hmc struct + * + * This command is sent by the management partition to terminate a + * management application to hypervisor connection. When this command is + * sent, the management partition has quiesced all I/O operations to all + * buffers associated with this management application connection, and + * has freed any storage for these buffers. + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static int ibmvmc_send_close(struct ibmvmc_hmc *hmc) +{ + struct ibmvmc_crq_msg crq_msg; + struct crq_server_adapter *adapter; + __be64 *crq_as_u64 = (__be64 *)&crq_msg; + int rc = 0; + + if (!hmc || !hmc->adapter) + return -EIO; + + adapter = hmc->adapter; + + dev_info(adapter->dev, "CRQ send: close\n"); + + crq_msg.valid = 0x80; + crq_msg.type = VMC_MSG_CLOSE; + crq_msg.status = 0; + crq_msg.var1.rsvd = 0; + crq_msg.hmc_session = hmc->session; + crq_msg.hmc_index = hmc->index; + crq_msg.var2.rsvd = 0; + crq_msg.rsvd = 0; + crq_msg.var3.rsvd = 0; + + ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]), + be64_to_cpu(crq_as_u64[1])); + + return rc; +} + +/** + * ibmvmc_send_capabilities - Send VMC Capabilities + * + * @adapter: crq_server_adapter struct + * + * The capabilities message is an administrative message sent after the CRQ + * initialization sequence of messages and is used to exchange VMC capabilities + * between the management partition and the hypervisor. The management + * partition must send this message and the hypervisor must respond with VMC + * capabilities Response message before HMC interface message can begin. Any + * HMC interface messages received before the exchange of capabilities has + * complete are dropped. + * + * Return: + * 0 - Success + */ +static int ibmvmc_send_capabilities(struct crq_server_adapter *adapter) +{ + struct ibmvmc_admin_crq_msg crq_msg; + __be64 *crq_as_u64 = (__be64 *)&crq_msg; + + dev_dbg(adapter->dev, "ibmvmc: CRQ send: capabilities\n"); + crq_msg.valid = 0x80; + crq_msg.type = VMC_MSG_CAP; + crq_msg.status = 0; + crq_msg.rsvd[0] = 0; + crq_msg.rsvd[1] = 0; + crq_msg.max_hmc = ibmvmc_max_hmcs; + crq_msg.max_mtu = cpu_to_be32(ibmvmc_max_mtu); + crq_msg.pool_size = cpu_to_be16(ibmvmc_max_buf_pool_size); + crq_msg.crq_size = cpu_to_be16(adapter->queue.size); + crq_msg.version = cpu_to_be16(IBMVMC_PROTOCOL_VERSION); + + ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]), + be64_to_cpu(crq_as_u64[1])); + + ibmvmc.state = ibmvmc_state_capabilities; + + return 0; +} + +/** + * ibmvmc_send_add_buffer_resp - Add Buffer Response + * + * @adapter: crq_server_adapter struct + * @status: Status field + * @hmc_session: HMC Session field + * @hmc_index: HMC Index field + * @buffer_id: Buffer Id field + * + * This command is sent by the management partition to the hypervisor in + * response to the Add Buffer message. The Status field indicates the result of + * the command. + * + * Return: + * 0 - Success + */ +static int ibmvmc_send_add_buffer_resp(struct crq_server_adapter *adapter, + u8 status, u8 hmc_session, + u8 hmc_index, u16 buffer_id) +{ + struct ibmvmc_crq_msg crq_msg; + __be64 *crq_as_u64 = (__be64 *)&crq_msg; + + dev_dbg(adapter->dev, "CRQ send: add_buffer_resp\n"); + crq_msg.valid = 0x80; + crq_msg.type = VMC_MSG_ADD_BUF_RESP; + crq_msg.status = status; + crq_msg.var1.rsvd = 0; + crq_msg.hmc_session = hmc_session; + crq_msg.hmc_index = hmc_index; + crq_msg.var2.buffer_id = cpu_to_be16(buffer_id); + crq_msg.rsvd = 0; + crq_msg.var3.rsvd = 0; + + ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]), + be64_to_cpu(crq_as_u64[1])); + + return 0; +} + +/** + * ibmvmc_send_rem_buffer_resp - Remove Buffer Response + * + * @adapter: crq_server_adapter struct + * @status: Status field + * @hmc_session: HMC Session field + * @hmc_index: HMC Index field + * @buffer_id: Buffer Id field + * + * This command is sent by the management partition to the hypervisor in + * response to the Remove Buffer message. The Buffer ID field indicates + * which buffer the management partition selected to remove. The Status + * field indicates the result of the command. + * + * Return: + * 0 - Success + */ +static int ibmvmc_send_rem_buffer_resp(struct crq_server_adapter *adapter, + u8 status, u8 hmc_session, + u8 hmc_index, u16 buffer_id) +{ + struct ibmvmc_crq_msg crq_msg; + __be64 *crq_as_u64 = (__be64 *)&crq_msg; + + dev_dbg(adapter->dev, "CRQ send: rem_buffer_resp\n"); + crq_msg.valid = 0x80; + crq_msg.type = VMC_MSG_REM_BUF_RESP; + crq_msg.status = status; + crq_msg.var1.rsvd = 0; + crq_msg.hmc_session = hmc_session; + crq_msg.hmc_index = hmc_index; + crq_msg.var2.buffer_id = cpu_to_be16(buffer_id); + crq_msg.rsvd = 0; + crq_msg.var3.rsvd = 0; + + ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]), + be64_to_cpu(crq_as_u64[1])); + + return 0; +} + +/** + * ibmvmc_send_msg - Signal Message + * + * @adapter: crq_server_adapter struct + * @buffer: ibmvmc_buffer struct + * @hmc: ibmvmc_hmc struct + * @msg_len: message length field + * + * This command is sent between the management partition and the hypervisor + * in order to signal the arrival of an HMC protocol message. The command + * can be sent by both the management partition and the hypervisor. It is + * used for all traffic between the management application and the hypervisor, + * regardless of who initiated the communication. + * + * There is no response to this message. + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static int ibmvmc_send_msg(struct crq_server_adapter *adapter, + struct ibmvmc_buffer *buffer, + struct ibmvmc_hmc *hmc, int msg_len) +{ + struct ibmvmc_crq_msg crq_msg; + __be64 *crq_as_u64 = (__be64 *)&crq_msg; + int rc = 0; + + dev_dbg(adapter->dev, "CRQ send: rdma to HV\n"); + rc = h_copy_rdma(msg_len, + adapter->liobn, + buffer->dma_addr_local, + adapter->riobn, + buffer->dma_addr_remote); + if (rc) { + dev_err(adapter->dev, "Error in send_msg, h_copy_rdma rc 0x%x\n", + rc); + return rc; + } + + crq_msg.valid = 0x80; + crq_msg.type = VMC_MSG_SIGNAL; + crq_msg.status = 0; + crq_msg.var1.rsvd = 0; + crq_msg.hmc_session = hmc->session; + crq_msg.hmc_index = hmc->index; + crq_msg.var2.buffer_id = cpu_to_be16(buffer->id); + crq_msg.var3.msg_len = cpu_to_be32(msg_len); + dev_dbg(adapter->dev, "CRQ send: msg to HV 0x%llx 0x%llx\n", + be64_to_cpu(crq_as_u64[0]), be64_to_cpu(crq_as_u64[1])); + + buffer->owner = VMC_BUF_OWNER_HV; + ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]), + be64_to_cpu(crq_as_u64[1])); + + return rc; +} + +/** + * ibmvmc_open - Open Session + * + * @inode: inode struct + * @file: file struct + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static int ibmvmc_open(struct inode *inode, struct file *file) +{ + struct ibmvmc_file_session *session; + + pr_debug("%s: inode = 0x%lx, file = 0x%lx, state = 0x%x\n", __func__, + (unsigned long)inode, (unsigned long)file, + ibmvmc.state); + + session = kzalloc(sizeof(*session), GFP_KERNEL); + if (!session) + return -ENOMEM; + + session->file = file; + file->private_data = session; + + return 0; +} + +/** + * ibmvmc_close - Close Session + * + * @inode: inode struct + * @file: file struct + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static int ibmvmc_close(struct inode *inode, struct file *file) +{ + struct ibmvmc_file_session *session; + struct ibmvmc_hmc *hmc; + int rc = 0; + unsigned long flags; + + pr_debug("%s: file = 0x%lx, state = 0x%x\n", __func__, + (unsigned long)file, ibmvmc.state); + + session = file->private_data; + if (!session) + return -EIO; + + hmc = session->hmc; + if (hmc) { + if (!hmc->adapter) + return -EIO; + + if (ibmvmc.state == ibmvmc_state_failed) { + dev_warn(hmc->adapter->dev, "close: state_failed\n"); + return -EIO; + } + + spin_lock_irqsave(&hmc->lock, flags); + if (hmc->state >= ibmhmc_state_opening) { + rc = ibmvmc_send_close(hmc); + if (rc) + dev_warn(hmc->adapter->dev, "close: send_close failed.\n"); + } + spin_unlock_irqrestore(&hmc->lock, flags); + } + + kfree_sensitive(session); + + return rc; +} + +/** + * ibmvmc_read - Read + * + * @file: file struct + * @buf: Character buffer + * @nbytes: Size in bytes + * @ppos: Offset + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static ssize_t ibmvmc_read(struct file *file, char *buf, size_t nbytes, + loff_t *ppos) +{ + struct ibmvmc_file_session *session; + struct ibmvmc_hmc *hmc; + struct crq_server_adapter *adapter; + struct ibmvmc_buffer *buffer; + ssize_t n; + ssize_t retval = 0; + unsigned long flags; + DEFINE_WAIT(wait); + + pr_debug("ibmvmc: read: file = 0x%lx, buf = 0x%lx, nbytes = 0x%lx\n", + (unsigned long)file, (unsigned long)buf, + (unsigned long)nbytes); + + if (nbytes == 0) + return 0; + + if (nbytes > ibmvmc.max_mtu) { + pr_warn("ibmvmc: read: nbytes invalid 0x%x\n", + (unsigned int)nbytes); + return -EINVAL; + } + + session = file->private_data; + if (!session) { + pr_warn("ibmvmc: read: no session\n"); + return -EIO; + } + + hmc = session->hmc; + if (!hmc) { + pr_warn("ibmvmc: read: no hmc\n"); + return -EIO; + } + + adapter = hmc->adapter; + if (!adapter) { + pr_warn("ibmvmc: read: no adapter\n"); + return -EIO; + } + + do { + prepare_to_wait(&ibmvmc_read_wait, &wait, TASK_INTERRUPTIBLE); + + spin_lock_irqsave(&hmc->lock, flags); + if (hmc->queue_tail != hmc->queue_head) + /* Data is available */ + break; + + spin_unlock_irqrestore(&hmc->lock, flags); + + if (!session->valid) { + retval = -EBADFD; + goto out; + } + if (file->f_flags & O_NONBLOCK) { + retval = -EAGAIN; + goto out; + } + + schedule(); + + if (signal_pending(current)) { + retval = -ERESTARTSYS; + goto out; + } + } while (1); + + buffer = &(hmc->buffer[hmc->queue_outbound_msgs[hmc->queue_tail]]); + hmc->queue_tail++; + if (hmc->queue_tail == ibmvmc_max_buf_pool_size) + hmc->queue_tail = 0; + spin_unlock_irqrestore(&hmc->lock, flags); + + nbytes = min_t(size_t, nbytes, buffer->msg_len); + n = copy_to_user((void *)buf, buffer->real_addr_local, nbytes); + dev_dbg(adapter->dev, "read: copy to user nbytes = 0x%lx.\n", nbytes); + ibmvmc_free_hmc_buffer(hmc, buffer); + retval = nbytes; + + if (n) { + dev_warn(adapter->dev, "read: copy to user failed.\n"); + retval = -EFAULT; + } + + out: + finish_wait(&ibmvmc_read_wait, &wait); + dev_dbg(adapter->dev, "read: out %ld\n", retval); + return retval; +} + +/** + * ibmvmc_poll - Poll + * + * @file: file struct + * @wait: Poll Table + * + * Return: + * poll.h return values + */ +static unsigned int ibmvmc_poll(struct file *file, poll_table *wait) +{ + struct ibmvmc_file_session *session; + struct ibmvmc_hmc *hmc; + unsigned int mask = 0; + + session = file->private_data; + if (!session) + return 0; + + hmc = session->hmc; + if (!hmc) + return 0; + + poll_wait(file, &ibmvmc_read_wait, wait); + + if (hmc->queue_head != hmc->queue_tail) + mask |= POLLIN | POLLRDNORM; + + return mask; +} + +/** + * ibmvmc_write - Write + * + * @file: file struct + * @buffer: Character buffer + * @count: Count field + * @ppos: Offset + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static ssize_t ibmvmc_write(struct file *file, const char *buffer, + size_t count, loff_t *ppos) +{ + struct ibmvmc_buffer *vmc_buffer; + struct ibmvmc_file_session *session; + struct crq_server_adapter *adapter; + struct ibmvmc_hmc *hmc; + unsigned char *buf; + unsigned long flags; + size_t bytes; + const char *p = buffer; + size_t c = count; + int ret = 0; + + session = file->private_data; + if (!session) + return -EIO; + + hmc = session->hmc; + if (!hmc) + return -EIO; + + spin_lock_irqsave(&hmc->lock, flags); + if (hmc->state == ibmhmc_state_free) { + /* HMC connection is not valid (possibly was reset under us). */ + ret = -EIO; + goto out; + } + + adapter = hmc->adapter; + if (!adapter) { + ret = -EIO; + goto out; + } + + if (count > ibmvmc.max_mtu) { + dev_warn(adapter->dev, "invalid buffer size 0x%lx\n", + (unsigned long)count); + ret = -EIO; + goto out; + } + + /* Waiting for the open resp message to the ioctl(1) - retry */ + if (hmc->state == ibmhmc_state_opening) { + ret = -EBUSY; + goto out; + } + + /* Make sure the ioctl() was called & the open msg sent, and that + * the HMC connection has not failed. + */ + if (hmc->state != ibmhmc_state_ready) { + ret = -EIO; + goto out; + } + + vmc_buffer = ibmvmc_get_valid_hmc_buffer(hmc->index); + if (!vmc_buffer) { + /* No buffer available for the msg send, or we have not yet + * completed the open/open_resp sequence. Retry until this is + * complete. + */ + ret = -EBUSY; + goto out; + } + if (!vmc_buffer->real_addr_local) { + dev_err(adapter->dev, "no buffer storage assigned\n"); + ret = -EIO; + goto out; + } + buf = vmc_buffer->real_addr_local; + + while (c > 0) { + bytes = min_t(size_t, c, vmc_buffer->size); + + bytes -= copy_from_user(buf, p, bytes); + if (!bytes) { + ret = -EFAULT; + goto out; + } + c -= bytes; + p += bytes; + } + if (p == buffer) + goto out; + + file->f_path.dentry->d_inode->i_mtime = current_time(file_inode(file)); + mark_inode_dirty(file->f_path.dentry->d_inode); + + dev_dbg(adapter->dev, "write: file = 0x%lx, count = 0x%lx\n", + (unsigned long)file, (unsigned long)count); + + ibmvmc_send_msg(adapter, vmc_buffer, hmc, count); + ret = p - buffer; + out: + spin_unlock_irqrestore(&hmc->lock, flags); + return (ssize_t)(ret); +} + +/** + * ibmvmc_setup_hmc - Setup the HMC + * + * @session: ibmvmc_file_session struct + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static long ibmvmc_setup_hmc(struct ibmvmc_file_session *session) +{ + struct ibmvmc_hmc *hmc; + unsigned int valid, free, index; + + if (ibmvmc.state == ibmvmc_state_failed) { + pr_warn("ibmvmc: Reserve HMC: state_failed\n"); + return -EIO; + } + + if (ibmvmc.state < ibmvmc_state_ready) { + pr_warn("ibmvmc: Reserve HMC: not state_ready\n"); + return -EAGAIN; + } + + /* Device is busy until capabilities have been exchanged and we + * have a generic buffer for each possible HMC connection. + */ + for (index = 0; index <= ibmvmc.max_hmc_index; index++) { + valid = 0; + ibmvmc_count_hmc_buffers(index, &valid, &free); + if (valid == 0) { + pr_warn("ibmvmc: buffers not ready for index %d\n", + index); + return -ENOBUFS; + } + } + + /* Get an hmc object, and transition to ibmhmc_state_initial */ + hmc = ibmvmc_get_free_hmc(); + if (!hmc) { + pr_warn("%s: free hmc not found\n", __func__); + return -EBUSY; + } + + hmc->session = hmc->session + 1; + if (hmc->session == 0xff) + hmc->session = 1; + + session->hmc = hmc; + hmc->adapter = &ibmvmc_adapter; + hmc->file_session = session; + session->valid = 1; + + return 0; +} + +/** + * ibmvmc_ioctl_sethmcid - IOCTL Set HMC ID + * + * @session: ibmvmc_file_session struct + * @new_hmc_id: HMC id field + * + * IOCTL command to setup the hmc id + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static long ibmvmc_ioctl_sethmcid(struct ibmvmc_file_session *session, + unsigned char __user *new_hmc_id) +{ + struct ibmvmc_hmc *hmc; + struct ibmvmc_buffer *buffer; + size_t bytes; + char print_buffer[HMC_ID_LEN + 1]; + unsigned long flags; + long rc = 0; + + /* Reserve HMC session */ + hmc = session->hmc; + if (!hmc) { + rc = ibmvmc_setup_hmc(session); + if (rc) + return rc; + + hmc = session->hmc; + if (!hmc) { + pr_err("ibmvmc: setup_hmc success but no hmc\n"); + return -EIO; + } + } + + if (hmc->state != ibmhmc_state_initial) { + pr_warn("ibmvmc: sethmcid: invalid state to send open 0x%x\n", + hmc->state); + return -EIO; + } + + bytes = copy_from_user(hmc->hmc_id, new_hmc_id, HMC_ID_LEN); + if (bytes) + return -EFAULT; + + /* Send Open Session command */ + spin_lock_irqsave(&hmc->lock, flags); + buffer = ibmvmc_get_valid_hmc_buffer(hmc->index); + spin_unlock_irqrestore(&hmc->lock, flags); + + if (!buffer || !buffer->real_addr_local) { + pr_warn("ibmvmc: sethmcid: no buffer available\n"); + return -EIO; + } + + /* Make sure buffer is NULL terminated before trying to print it */ + memset(print_buffer, 0, HMC_ID_LEN + 1); + strncpy(print_buffer, hmc->hmc_id, HMC_ID_LEN); + pr_info("ibmvmc: sethmcid: Set HMC ID: \"%s\"\n", print_buffer); + + memcpy(buffer->real_addr_local, hmc->hmc_id, HMC_ID_LEN); + /* RDMA over ID, send open msg, change state to ibmhmc_state_opening */ + rc = ibmvmc_send_open(buffer, hmc); + + return rc; +} + +/** + * ibmvmc_ioctl_query - IOCTL Query + * + * @session: ibmvmc_file_session struct + * @ret_struct: ibmvmc_query_struct + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static long ibmvmc_ioctl_query(struct ibmvmc_file_session *session, + struct ibmvmc_query_struct __user *ret_struct) +{ + struct ibmvmc_query_struct query_struct; + size_t bytes; + + memset(&query_struct, 0, sizeof(query_struct)); + query_struct.have_vmc = (ibmvmc.state > ibmvmc_state_initial); + query_struct.state = ibmvmc.state; + query_struct.vmc_drc_index = ibmvmc.vmc_drc_index; + + bytes = copy_to_user(ret_struct, &query_struct, + sizeof(query_struct)); + if (bytes) + return -EFAULT; + + return 0; +} + +/** + * ibmvmc_ioctl_requestvmc - IOCTL Request VMC + * + * @session: ibmvmc_file_session struct + * @ret_vmc_index: VMC Index + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static long ibmvmc_ioctl_requestvmc(struct ibmvmc_file_session *session, + u32 __user *ret_vmc_index) +{ + /* TODO: (adreznec) Add locking to control multiple process access */ + size_t bytes; + long rc; + u32 vmc_drc_index; + + /* Call to request the VMC device from phyp*/ + rc = h_request_vmc(&vmc_drc_index); + pr_debug("ibmvmc: requestvmc: H_REQUEST_VMC rc = 0x%lx\n", rc); + + if (rc == H_SUCCESS) { + rc = 0; + } else if (rc == H_FUNCTION) { + pr_err("ibmvmc: requestvmc: h_request_vmc not supported\n"); + return -EPERM; + } else if (rc == H_AUTHORITY) { + pr_err("ibmvmc: requestvmc: hypervisor denied vmc request\n"); + return -EPERM; + } else if (rc == H_HARDWARE) { + pr_err("ibmvmc: requestvmc: hypervisor hardware fault\n"); + return -EIO; + } else if (rc == H_RESOURCE) { + pr_err("ibmvmc: requestvmc: vmc resource unavailable\n"); + return -ENODEV; + } else if (rc == H_NOT_AVAILABLE) { + pr_err("ibmvmc: requestvmc: system cannot be vmc managed\n"); + return -EPERM; + } else if (rc == H_PARAMETER) { + pr_err("ibmvmc: requestvmc: invalid parameter\n"); + return -EINVAL; + } + + /* Success, set the vmc index in global struct */ + ibmvmc.vmc_drc_index = vmc_drc_index; + + bytes = copy_to_user(ret_vmc_index, &vmc_drc_index, + sizeof(*ret_vmc_index)); + if (bytes) { + pr_warn("ibmvmc: requestvmc: copy to user failed.\n"); + return -EFAULT; + } + return rc; +} + +/** + * ibmvmc_ioctl - IOCTL + * + * @file: file information + * @cmd: cmd field + * @arg: Argument field + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static long ibmvmc_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct ibmvmc_file_session *session = file->private_data; + + pr_debug("ibmvmc: ioctl file=0x%lx, cmd=0x%x, arg=0x%lx, ses=0x%lx\n", + (unsigned long)file, cmd, arg, + (unsigned long)session); + + if (!session) { + pr_warn("ibmvmc: ioctl: no session\n"); + return -EIO; + } + + switch (cmd) { + case VMC_IOCTL_SETHMCID: + return ibmvmc_ioctl_sethmcid(session, + (unsigned char __user *)arg); + case VMC_IOCTL_QUERY: + return ibmvmc_ioctl_query(session, + (struct ibmvmc_query_struct __user *)arg); + case VMC_IOCTL_REQUESTVMC: + return ibmvmc_ioctl_requestvmc(session, + (unsigned int __user *)arg); + default: + pr_warn("ibmvmc: unknown ioctl 0x%x\n", cmd); + return -EINVAL; + } +} + +static const struct file_operations ibmvmc_fops = { + .owner = THIS_MODULE, + .read = ibmvmc_read, + .write = ibmvmc_write, + .poll = ibmvmc_poll, + .unlocked_ioctl = ibmvmc_ioctl, + .open = ibmvmc_open, + .release = ibmvmc_close, +}; + +/** + * ibmvmc_add_buffer - Add Buffer + * + * @adapter: crq_server_adapter struct + * @crq: ibmvmc_crq_msg struct + * + * This message transfers a buffer from hypervisor ownership to management + * partition ownership. The LIOBA is obtained from the virtual TCE table + * associated with the hypervisor side of the VMC device, and points to a + * buffer of size MTU (as established in the capabilities exchange). + * + * Typical flow for ading buffers: + * 1. A new management application connection is opened by the management + * partition. + * 2. The hypervisor assigns new buffers for the traffic associated with + * that connection. + * 3. The hypervisor sends VMC Add Buffer messages to the management + * partition, informing it of the new buffers. + * 4. The hypervisor sends an HMC protocol message (to the management + * application) notifying it of the new buffers. This informs the + * application that it has buffers available for sending HMC + * commands. + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static int ibmvmc_add_buffer(struct crq_server_adapter *adapter, + struct ibmvmc_crq_msg *crq) +{ + struct ibmvmc_buffer *buffer; + u8 hmc_index; + u8 hmc_session; + u16 buffer_id; + unsigned long flags; + int rc = 0; + + if (!crq) + return -1; + + hmc_session = crq->hmc_session; + hmc_index = crq->hmc_index; + buffer_id = be16_to_cpu(crq->var2.buffer_id); + + if (hmc_index > ibmvmc.max_hmc_index) { + dev_err(adapter->dev, "add_buffer: invalid hmc_index = 0x%x\n", + hmc_index); + ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_HMC_INDEX, + hmc_session, hmc_index, buffer_id); + return -1; + } + + if (buffer_id >= ibmvmc.max_buffer_pool_size) { + dev_err(adapter->dev, "add_buffer: invalid buffer_id = 0x%x\n", + buffer_id); + ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_BUFFER_ID, + hmc_session, hmc_index, buffer_id); + return -1; + } + + spin_lock_irqsave(&hmcs[hmc_index].lock, flags); + buffer = &hmcs[hmc_index].buffer[buffer_id]; + + if (buffer->real_addr_local || buffer->dma_addr_local) { + dev_warn(adapter->dev, "add_buffer: already allocated id = 0x%lx\n", + (unsigned long)buffer_id); + spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags); + ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_BUFFER_ID, + hmc_session, hmc_index, buffer_id); + return -1; + } + + buffer->real_addr_local = alloc_dma_buffer(to_vio_dev(adapter->dev), + ibmvmc.max_mtu, + &buffer->dma_addr_local); + + if (!buffer->real_addr_local) { + dev_err(adapter->dev, "add_buffer: alloc_dma_buffer failed.\n"); + spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags); + ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INTERFACE_FAILURE, + hmc_session, hmc_index, buffer_id); + return -1; + } + + buffer->dma_addr_remote = be32_to_cpu(crq->var3.lioba); + buffer->size = ibmvmc.max_mtu; + buffer->owner = crq->var1.owner; + buffer->free = 1; + /* Must ensure valid==1 is observable only after all other fields are */ + dma_wmb(); + buffer->valid = 1; + buffer->id = buffer_id; + + dev_dbg(adapter->dev, "add_buffer: successfully added a buffer:\n"); + dev_dbg(adapter->dev, " index: %d, session: %d, buffer: 0x%x, owner: %d\n", + hmc_index, hmc_session, buffer_id, buffer->owner); + dev_dbg(adapter->dev, " local: 0x%x, remote: 0x%x\n", + (u32)buffer->dma_addr_local, + (u32)buffer->dma_addr_remote); + spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags); + + ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_SUCCESS, hmc_session, + hmc_index, buffer_id); + + return rc; +} + +/** + * ibmvmc_rem_buffer - Remove Buffer + * + * @adapter: crq_server_adapter struct + * @crq: ibmvmc_crq_msg struct + * + * This message requests an HMC buffer to be transferred from management + * partition ownership to hypervisor ownership. The management partition may + * not be able to satisfy the request at a particular point in time if all its + * buffers are in use. The management partition requires a depth of at least + * one inbound buffer to allow management application commands to flow to the + * hypervisor. It is, therefore, an interface error for the hypervisor to + * attempt to remove the management partition's last buffer. + * + * The hypervisor is expected to manage buffer usage with the management + * application directly and inform the management partition when buffers may be + * removed. The typical flow for removing buffers: + * + * 1. The management application no longer needs a communication path to a + * particular hypervisor function. That function is closed. + * 2. The hypervisor and the management application quiesce all traffic to that + * function. The hypervisor requests a reduction in buffer pool size. + * 3. The management application acknowledges the reduction in buffer pool size. + * 4. The hypervisor sends a Remove Buffer message to the management partition, + * informing it of the reduction in buffers. + * 5. The management partition verifies it can remove the buffer. This is + * possible if buffers have been quiesced. + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +/* + * The hypervisor requested that we pick an unused buffer, and return it. + * Before sending the buffer back, we free any storage associated with the + * buffer. + */ +static int ibmvmc_rem_buffer(struct crq_server_adapter *adapter, + struct ibmvmc_crq_msg *crq) +{ + struct ibmvmc_buffer *buffer; + u8 hmc_index; + u8 hmc_session; + u16 buffer_id = 0; + unsigned long flags; + int rc = 0; + + if (!crq) + return -1; + + hmc_session = crq->hmc_session; + hmc_index = crq->hmc_index; + + if (hmc_index > ibmvmc.max_hmc_index) { + dev_warn(adapter->dev, "rem_buffer: invalid hmc_index = 0x%x\n", + hmc_index); + ibmvmc_send_rem_buffer_resp(adapter, VMC_MSG_INVALID_HMC_INDEX, + hmc_session, hmc_index, buffer_id); + return -1; + } + + spin_lock_irqsave(&hmcs[hmc_index].lock, flags); + buffer = ibmvmc_get_free_hmc_buffer(adapter, hmc_index); + if (!buffer) { + dev_info(adapter->dev, "rem_buffer: no buffer to remove\n"); + spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags); + ibmvmc_send_rem_buffer_resp(adapter, VMC_MSG_NO_BUFFER, + hmc_session, hmc_index, + VMC_INVALID_BUFFER_ID); + return -1; + } + + buffer_id = buffer->id; + + if (buffer->valid) + free_dma_buffer(to_vio_dev(adapter->dev), + ibmvmc.max_mtu, + buffer->real_addr_local, + buffer->dma_addr_local); + + memset(buffer, 0, sizeof(struct ibmvmc_buffer)); + spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags); + + dev_dbg(adapter->dev, "rem_buffer: removed buffer 0x%x.\n", buffer_id); + ibmvmc_send_rem_buffer_resp(adapter, VMC_MSG_SUCCESS, hmc_session, + hmc_index, buffer_id); + + return rc; +} + +static int ibmvmc_recv_msg(struct crq_server_adapter *adapter, + struct ibmvmc_crq_msg *crq) +{ + struct ibmvmc_buffer *buffer; + struct ibmvmc_hmc *hmc; + unsigned long msg_len; + u8 hmc_index; + u8 hmc_session; + u16 buffer_id; + unsigned long flags; + int rc = 0; + + if (!crq) + return -1; + + /* Hypervisor writes CRQs directly into our memory in big endian */ + dev_dbg(adapter->dev, "Recv_msg: msg from HV 0x%016llx 0x%016llx\n", + be64_to_cpu(*((unsigned long *)crq)), + be64_to_cpu(*(((unsigned long *)crq) + 1))); + + hmc_session = crq->hmc_session; + hmc_index = crq->hmc_index; + buffer_id = be16_to_cpu(crq->var2.buffer_id); + msg_len = be32_to_cpu(crq->var3.msg_len); + + if (hmc_index > ibmvmc.max_hmc_index) { + dev_err(adapter->dev, "Recv_msg: invalid hmc_index = 0x%x\n", + hmc_index); + ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_HMC_INDEX, + hmc_session, hmc_index, buffer_id); + return -1; + } + + if (buffer_id >= ibmvmc.max_buffer_pool_size) { + dev_err(adapter->dev, "Recv_msg: invalid buffer_id = 0x%x\n", + buffer_id); + ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_BUFFER_ID, + hmc_session, hmc_index, buffer_id); + return -1; + } + + hmc = &hmcs[hmc_index]; + spin_lock_irqsave(&hmc->lock, flags); + + if (hmc->state == ibmhmc_state_free) { + dev_err(adapter->dev, "Recv_msg: invalid hmc state = 0x%x\n", + hmc->state); + /* HMC connection is not valid (possibly was reset under us). */ + spin_unlock_irqrestore(&hmc->lock, flags); + return -1; + } + + buffer = &hmc->buffer[buffer_id]; + + if (buffer->valid == 0 || buffer->owner == VMC_BUF_OWNER_ALPHA) { + dev_err(adapter->dev, "Recv_msg: not valid, or not HV. 0x%x 0x%x\n", + buffer->valid, buffer->owner); + spin_unlock_irqrestore(&hmc->lock, flags); + return -1; + } + + /* RDMA the data into the partition. */ + rc = h_copy_rdma(msg_len, + adapter->riobn, + buffer->dma_addr_remote, + adapter->liobn, + buffer->dma_addr_local); + + dev_dbg(adapter->dev, "Recv_msg: msg_len = 0x%x, buffer_id = 0x%x, queue_head = 0x%x, hmc_idx = 0x%x\n", + (unsigned int)msg_len, (unsigned int)buffer_id, + (unsigned int)hmc->queue_head, (unsigned int)hmc_index); + buffer->msg_len = msg_len; + buffer->free = 0; + buffer->owner = VMC_BUF_OWNER_ALPHA; + + if (rc) { + dev_err(adapter->dev, "Failure in recv_msg: h_copy_rdma = 0x%x\n", + rc); + spin_unlock_irqrestore(&hmc->lock, flags); + return -1; + } + + /* Must be locked because read operates on the same data */ + hmc->queue_outbound_msgs[hmc->queue_head] = buffer_id; + hmc->queue_head++; + if (hmc->queue_head == ibmvmc_max_buf_pool_size) + hmc->queue_head = 0; + + if (hmc->queue_head == hmc->queue_tail) + dev_err(adapter->dev, "outbound buffer queue wrapped.\n"); + + spin_unlock_irqrestore(&hmc->lock, flags); + + wake_up_interruptible(&ibmvmc_read_wait); + + return 0; +} + +/** + * ibmvmc_process_capabilities - Process Capabilities + * + * @adapter: crq_server_adapter struct + * @crqp: ibmvmc_crq_msg struct + * + */ +static void ibmvmc_process_capabilities(struct crq_server_adapter *adapter, + struct ibmvmc_crq_msg *crqp) +{ + struct ibmvmc_admin_crq_msg *crq = (struct ibmvmc_admin_crq_msg *)crqp; + + if ((be16_to_cpu(crq->version) >> 8) != + (IBMVMC_PROTOCOL_VERSION >> 8)) { + dev_err(adapter->dev, "init failed, incompatible versions 0x%x 0x%x\n", + be16_to_cpu(crq->version), + IBMVMC_PROTOCOL_VERSION); + ibmvmc.state = ibmvmc_state_failed; + return; + } + + ibmvmc.max_mtu = min_t(u32, ibmvmc_max_mtu, be32_to_cpu(crq->max_mtu)); + ibmvmc.max_buffer_pool_size = min_t(u16, ibmvmc_max_buf_pool_size, + be16_to_cpu(crq->pool_size)); + ibmvmc.max_hmc_index = min_t(u8, ibmvmc_max_hmcs, crq->max_hmc) - 1; + ibmvmc.state = ibmvmc_state_ready; + + dev_info(adapter->dev, "Capabilities: mtu=0x%x, pool_size=0x%x, max_hmc=0x%x\n", + ibmvmc.max_mtu, ibmvmc.max_buffer_pool_size, + ibmvmc.max_hmc_index); +} + +/** + * ibmvmc_validate_hmc_session - Validate HMC Session + * + * @adapter: crq_server_adapter struct + * @crq: ibmvmc_crq_msg struct + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static int ibmvmc_validate_hmc_session(struct crq_server_adapter *adapter, + struct ibmvmc_crq_msg *crq) +{ + unsigned char hmc_index; + + hmc_index = crq->hmc_index; + + if (crq->hmc_session == 0) + return 0; + + if (hmc_index > ibmvmc.max_hmc_index) + return -1; + + if (hmcs[hmc_index].session != crq->hmc_session) { + dev_warn(adapter->dev, "Drop, bad session: expected 0x%x, recv 0x%x\n", + hmcs[hmc_index].session, crq->hmc_session); + return -1; + } + + return 0; +} + +/** + * ibmvmc_reset - Reset + * + * @adapter: crq_server_adapter struct + * @xport_event: export_event field + * + * Closes all HMC sessions and conditionally schedules a CRQ reset. + * @xport_event: If true, the partner closed their CRQ; we don't need to reset. + * If false, we need to schedule a CRQ reset. + */ +static void ibmvmc_reset(struct crq_server_adapter *adapter, bool xport_event) +{ + int i; + + if (ibmvmc.state != ibmvmc_state_sched_reset) { + dev_info(adapter->dev, "*** Reset to initial state.\n"); + for (i = 0; i < ibmvmc_max_hmcs; i++) + ibmvmc_return_hmc(&hmcs[i], xport_event); + + if (xport_event) { + /* CRQ was closed by the partner. We don't need to do + * anything except set ourself to the correct state to + * handle init msgs. + */ + ibmvmc.state = ibmvmc_state_crqinit; + } else { + /* The partner did not close their CRQ - instead, we're + * closing the CRQ on our end. Need to schedule this + * for process context, because CRQ reset may require a + * sleep. + * + * Setting ibmvmc.state here immediately prevents + * ibmvmc_open from completing until the reset + * completes in process context. + */ + ibmvmc.state = ibmvmc_state_sched_reset; + dev_dbg(adapter->dev, "Device reset scheduled"); + wake_up_interruptible(&adapter->reset_wait_queue); + } + } +} + +/** + * ibmvmc_reset_task - Reset Task + * + * @data: Data field + * + * Performs a CRQ reset of the VMC device in process context. + * NOTE: This function should not be called directly, use ibmvmc_reset. + */ +static int ibmvmc_reset_task(void *data) +{ + struct crq_server_adapter *adapter = data; + int rc; + + set_user_nice(current, -20); + + while (!kthread_should_stop()) { + wait_event_interruptible(adapter->reset_wait_queue, + (ibmvmc.state == ibmvmc_state_sched_reset) || + kthread_should_stop()); + + if (kthread_should_stop()) + break; + + dev_dbg(adapter->dev, "CRQ resetting in process context"); + tasklet_disable(&adapter->work_task); + + rc = ibmvmc_reset_crq_queue(adapter); + + if (rc != H_SUCCESS && rc != H_RESOURCE) { + dev_err(adapter->dev, "Error initializing CRQ. rc = 0x%x\n", + rc); + ibmvmc.state = ibmvmc_state_failed; + } else { + ibmvmc.state = ibmvmc_state_crqinit; + + if (ibmvmc_send_crq(adapter, 0xC001000000000000LL, 0) + != 0 && rc != H_RESOURCE) + dev_warn(adapter->dev, "Failed to send initialize CRQ message\n"); + } + + vio_enable_interrupts(to_vio_dev(adapter->dev)); + tasklet_enable(&adapter->work_task); + } + + return 0; +} + +/** + * ibmvmc_process_open_resp - Process Open Response + * + * @crq: ibmvmc_crq_msg struct + * @adapter: crq_server_adapter struct + * + * This command is sent by the hypervisor in response to the Interface + * Open message. When this message is received, the indicated buffer is + * again available for management partition use. + */ +static void ibmvmc_process_open_resp(struct ibmvmc_crq_msg *crq, + struct crq_server_adapter *adapter) +{ + unsigned char hmc_index; + unsigned short buffer_id; + + hmc_index = crq->hmc_index; + if (hmc_index > ibmvmc.max_hmc_index) { + /* Why would PHYP give an index > max negotiated? */ + ibmvmc_reset(adapter, false); + return; + } + + if (crq->status) { + dev_warn(adapter->dev, "open_resp: failed - status 0x%x\n", + crq->status); + ibmvmc_return_hmc(&hmcs[hmc_index], false); + return; + } + + if (hmcs[hmc_index].state == ibmhmc_state_opening) { + buffer_id = be16_to_cpu(crq->var2.buffer_id); + if (buffer_id >= ibmvmc.max_buffer_pool_size) { + dev_err(adapter->dev, "open_resp: invalid buffer_id = 0x%x\n", + buffer_id); + hmcs[hmc_index].state = ibmhmc_state_failed; + } else { + ibmvmc_free_hmc_buffer(&hmcs[hmc_index], + &hmcs[hmc_index].buffer[buffer_id]); + hmcs[hmc_index].state = ibmhmc_state_ready; + dev_dbg(adapter->dev, "open_resp: set hmc state = ready\n"); + } + } else { + dev_warn(adapter->dev, "open_resp: invalid hmc state (0x%x)\n", + hmcs[hmc_index].state); + } +} + +/** + * ibmvmc_process_close_resp - Process Close Response + * + * @crq: ibmvmc_crq_msg struct + * @adapter: crq_server_adapter struct + * + * This command is sent by the hypervisor in response to the managemant + * application Interface Close message. + * + * If the close fails, simply reset the entire driver as the state of the VMC + * must be in tough shape. + */ +static void ibmvmc_process_close_resp(struct ibmvmc_crq_msg *crq, + struct crq_server_adapter *adapter) +{ + unsigned char hmc_index; + + hmc_index = crq->hmc_index; + if (hmc_index > ibmvmc.max_hmc_index) { + ibmvmc_reset(adapter, false); + return; + } + + if (crq->status) { + dev_warn(adapter->dev, "close_resp: failed - status 0x%x\n", + crq->status); + ibmvmc_reset(adapter, false); + return; + } + + ibmvmc_return_hmc(&hmcs[hmc_index], false); +} + +/** + * ibmvmc_crq_process - Process CRQ + * + * @adapter: crq_server_adapter struct + * @crq: ibmvmc_crq_msg struct + * + * Process the CRQ message based upon the type of message received. + * + */ +static void ibmvmc_crq_process(struct crq_server_adapter *adapter, + struct ibmvmc_crq_msg *crq) +{ + switch (crq->type) { + case VMC_MSG_CAP_RESP: + dev_dbg(adapter->dev, "CRQ recv: capabilities resp (0x%x)\n", + crq->type); + if (ibmvmc.state == ibmvmc_state_capabilities) + ibmvmc_process_capabilities(adapter, crq); + else + dev_warn(adapter->dev, "caps msg invalid in state 0x%x\n", + ibmvmc.state); + break; + case VMC_MSG_OPEN_RESP: + dev_dbg(adapter->dev, "CRQ recv: open resp (0x%x)\n", + crq->type); + if (ibmvmc_validate_hmc_session(adapter, crq) == 0) + ibmvmc_process_open_resp(crq, adapter); + break; + case VMC_MSG_ADD_BUF: + dev_dbg(adapter->dev, "CRQ recv: add buf (0x%x)\n", + crq->type); + if (ibmvmc_validate_hmc_session(adapter, crq) == 0) + ibmvmc_add_buffer(adapter, crq); + break; + case VMC_MSG_REM_BUF: + dev_dbg(adapter->dev, "CRQ recv: rem buf (0x%x)\n", + crq->type); + if (ibmvmc_validate_hmc_session(adapter, crq) == 0) + ibmvmc_rem_buffer(adapter, crq); + break; + case VMC_MSG_SIGNAL: + dev_dbg(adapter->dev, "CRQ recv: signal msg (0x%x)\n", + crq->type); + if (ibmvmc_validate_hmc_session(adapter, crq) == 0) + ibmvmc_recv_msg(adapter, crq); + break; + case VMC_MSG_CLOSE_RESP: + dev_dbg(adapter->dev, "CRQ recv: close resp (0x%x)\n", + crq->type); + if (ibmvmc_validate_hmc_session(adapter, crq) == 0) + ibmvmc_process_close_resp(crq, adapter); + break; + case VMC_MSG_CAP: + case VMC_MSG_OPEN: + case VMC_MSG_CLOSE: + case VMC_MSG_ADD_BUF_RESP: + case VMC_MSG_REM_BUF_RESP: + dev_warn(adapter->dev, "CRQ recv: unexpected msg (0x%x)\n", + crq->type); + break; + default: + dev_warn(adapter->dev, "CRQ recv: unknown msg (0x%x)\n", + crq->type); + break; + } +} + +/** + * ibmvmc_handle_crq_init - Handle CRQ Init + * + * @crq: ibmvmc_crq_msg struct + * @adapter: crq_server_adapter struct + * + * Handle the type of crq initialization based on whether + * it is a message or a response. + * + */ +static void ibmvmc_handle_crq_init(struct ibmvmc_crq_msg *crq, + struct crq_server_adapter *adapter) +{ + switch (crq->type) { + case 0x01: /* Initialization message */ + dev_dbg(adapter->dev, "CRQ recv: CRQ init msg - state 0x%x\n", + ibmvmc.state); + if (ibmvmc.state == ibmvmc_state_crqinit) { + /* Send back a response */ + if (ibmvmc_send_crq(adapter, 0xC002000000000000, + 0) == 0) + ibmvmc_send_capabilities(adapter); + else + dev_err(adapter->dev, " Unable to send init rsp\n"); + } else { + dev_err(adapter->dev, "Invalid state 0x%x mtu = 0x%x\n", + ibmvmc.state, ibmvmc.max_mtu); + } + + break; + case 0x02: /* Initialization response */ + dev_dbg(adapter->dev, "CRQ recv: initialization resp msg - state 0x%x\n", + ibmvmc.state); + if (ibmvmc.state == ibmvmc_state_crqinit) + ibmvmc_send_capabilities(adapter); + break; + default: + dev_warn(adapter->dev, "Unknown crq message type 0x%lx\n", + (unsigned long)crq->type); + } +} + +/** + * ibmvmc_handle_crq - Handle CRQ + * + * @crq: ibmvmc_crq_msg struct + * @adapter: crq_server_adapter struct + * + * Read the command elements from the command queue and execute the + * requests based upon the type of crq message. + * + */ +static void ibmvmc_handle_crq(struct ibmvmc_crq_msg *crq, + struct crq_server_adapter *adapter) +{ + switch (crq->valid) { + case 0xC0: /* initialization */ + ibmvmc_handle_crq_init(crq, adapter); + break; + case 0xFF: /* Hypervisor telling us the connection is closed */ + dev_warn(adapter->dev, "CRQ recv: virtual adapter failed - resetting.\n"); + ibmvmc_reset(adapter, true); + break; + case 0x80: /* real payload */ + ibmvmc_crq_process(adapter, crq); + break; + default: + dev_warn(adapter->dev, "CRQ recv: unknown msg 0x%02x.\n", + crq->valid); + break; + } +} + +static void ibmvmc_task(unsigned long data) +{ + struct crq_server_adapter *adapter = + (struct crq_server_adapter *)data; + struct vio_dev *vdev = to_vio_dev(adapter->dev); + struct ibmvmc_crq_msg *crq; + int done = 0; + + while (!done) { + /* Pull all the valid messages off the CRQ */ + while ((crq = crq_queue_next_crq(&adapter->queue)) != NULL) { + ibmvmc_handle_crq(crq, adapter); + crq->valid = 0x00; + /* CRQ reset was requested, stop processing CRQs. + * Interrupts will be re-enabled by the reset task. + */ + if (ibmvmc.state == ibmvmc_state_sched_reset) + return; + } + + vio_enable_interrupts(vdev); + crq = crq_queue_next_crq(&adapter->queue); + if (crq) { + vio_disable_interrupts(vdev); + ibmvmc_handle_crq(crq, adapter); + crq->valid = 0x00; + /* CRQ reset was requested, stop processing CRQs. + * Interrupts will be re-enabled by the reset task. + */ + if (ibmvmc.state == ibmvmc_state_sched_reset) + return; + } else { + done = 1; + } + } +} + +/** + * ibmvmc_init_crq_queue - Init CRQ Queue + * + * @adapter: crq_server_adapter struct + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static int ibmvmc_init_crq_queue(struct crq_server_adapter *adapter) +{ + struct vio_dev *vdev = to_vio_dev(adapter->dev); + struct crq_queue *queue = &adapter->queue; + int rc = 0; + int retrc = 0; + + queue->msgs = (struct ibmvmc_crq_msg *)get_zeroed_page(GFP_KERNEL); + + if (!queue->msgs) + goto malloc_failed; + + queue->size = PAGE_SIZE / sizeof(*queue->msgs); + + queue->msg_token = dma_map_single(adapter->dev, queue->msgs, + queue->size * sizeof(*queue->msgs), + DMA_BIDIRECTIONAL); + + if (dma_mapping_error(adapter->dev, queue->msg_token)) + goto map_failed; + + retrc = plpar_hcall_norets(H_REG_CRQ, + vdev->unit_address, + queue->msg_token, PAGE_SIZE); + rc = retrc; + + if (rc == H_RESOURCE) + rc = ibmvmc_reset_crq_queue(adapter); + + if (rc == 2) { + dev_warn(adapter->dev, "Partner adapter not ready\n"); + retrc = 0; + } else if (rc != 0) { + dev_err(adapter->dev, "Error %d opening adapter\n", rc); + goto reg_crq_failed; + } + + queue->cur = 0; + spin_lock_init(&queue->lock); + + tasklet_init(&adapter->work_task, ibmvmc_task, (unsigned long)adapter); + + if (request_irq(vdev->irq, + ibmvmc_handle_event, + 0, "ibmvmc", (void *)adapter) != 0) { + dev_err(adapter->dev, "couldn't register irq 0x%x\n", + vdev->irq); + goto req_irq_failed; + } + + rc = vio_enable_interrupts(vdev); + if (rc != 0) { + dev_err(adapter->dev, "Error %d enabling interrupts!!!\n", rc); + goto req_irq_failed; + } + + return retrc; + +req_irq_failed: + /* Cannot have any work since we either never got our IRQ registered, + * or never got interrupts enabled + */ + tasklet_kill(&adapter->work_task); + h_free_crq(vdev->unit_address); +reg_crq_failed: + dma_unmap_single(adapter->dev, + queue->msg_token, + queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL); +map_failed: + free_page((unsigned long)queue->msgs); +malloc_failed: + return -ENOMEM; +} + +/* Fill in the liobn and riobn fields on the adapter */ +static int read_dma_window(struct vio_dev *vdev, + struct crq_server_adapter *adapter) +{ + const __be32 *dma_window; + const __be32 *prop; + + /* TODO Using of_parse_dma_window would be better, but it doesn't give + * a way to read multiple windows without already knowing the size of + * a window or the number of windows + */ + dma_window = + (const __be32 *)vio_get_attribute(vdev, "ibm,my-dma-window", + NULL); + if (!dma_window) { + dev_warn(adapter->dev, "Couldn't find ibm,my-dma-window property\n"); + return -1; + } + + adapter->liobn = be32_to_cpu(*dma_window); + dma_window++; + + prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-address-cells", + NULL); + if (!prop) { + dev_warn(adapter->dev, "Couldn't find ibm,#dma-address-cells property\n"); + dma_window++; + } else { + dma_window += be32_to_cpu(*prop); + } + + prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-size-cells", + NULL); + if (!prop) { + dev_warn(adapter->dev, "Couldn't find ibm,#dma-size-cells property\n"); + dma_window++; + } else { + dma_window += be32_to_cpu(*prop); + } + + /* dma_window should point to the second window now */ + adapter->riobn = be32_to_cpu(*dma_window); + + return 0; +} + +static int ibmvmc_probe(struct vio_dev *vdev, const struct vio_device_id *id) +{ + struct crq_server_adapter *adapter = &ibmvmc_adapter; + int rc; + + dev_set_drvdata(&vdev->dev, NULL); + memset(adapter, 0, sizeof(*adapter)); + adapter->dev = &vdev->dev; + + dev_info(adapter->dev, "Probe for UA 0x%x\n", vdev->unit_address); + + rc = read_dma_window(vdev, adapter); + if (rc != 0) { + ibmvmc.state = ibmvmc_state_failed; + return -1; + } + + dev_dbg(adapter->dev, "Probe: liobn 0x%x, riobn 0x%x\n", + adapter->liobn, adapter->riobn); + + init_waitqueue_head(&adapter->reset_wait_queue); + adapter->reset_task = kthread_run(ibmvmc_reset_task, adapter, "ibmvmc"); + if (IS_ERR(adapter->reset_task)) { + dev_err(adapter->dev, "Failed to start reset thread\n"); + ibmvmc.state = ibmvmc_state_failed; + rc = PTR_ERR(adapter->reset_task); + adapter->reset_task = NULL; + return rc; + } + + rc = ibmvmc_init_crq_queue(adapter); + if (rc != 0 && rc != H_RESOURCE) { + dev_err(adapter->dev, "Error initializing CRQ. rc = 0x%x\n", + rc); + ibmvmc.state = ibmvmc_state_failed; + goto crq_failed; + } + + ibmvmc.state = ibmvmc_state_crqinit; + + /* Try to send an initialization message. Note that this is allowed + * to fail if the other end is not acive. In that case we just wait + * for the other side to initialize. + */ + if (ibmvmc_send_crq(adapter, 0xC001000000000000LL, 0) != 0 && + rc != H_RESOURCE) + dev_warn(adapter->dev, "Failed to send initialize CRQ message\n"); + + dev_set_drvdata(&vdev->dev, adapter); + + return 0; + +crq_failed: + kthread_stop(adapter->reset_task); + adapter->reset_task = NULL; + return -EPERM; +} + +static int ibmvmc_remove(struct vio_dev *vdev) +{ + struct crq_server_adapter *adapter = dev_get_drvdata(&vdev->dev); + + dev_info(adapter->dev, "Entering remove for UA 0x%x\n", + vdev->unit_address); + ibmvmc_release_crq_queue(adapter); + + return 0; +} + +static struct vio_device_id ibmvmc_device_table[] = { + { "ibm,vmc", "IBM,vmc" }, + { "", "" } +}; +MODULE_DEVICE_TABLE(vio, ibmvmc_device_table); + +static struct vio_driver ibmvmc_driver = { + .name = ibmvmc_driver_name, + .id_table = ibmvmc_device_table, + .probe = ibmvmc_probe, + .remove = ibmvmc_remove, +}; + +static void __init ibmvmc_scrub_module_parms(void) +{ + if (ibmvmc_max_mtu > MAX_MTU) { + pr_warn("ibmvmc: Max MTU reduced to %d\n", MAX_MTU); + ibmvmc_max_mtu = MAX_MTU; + } else if (ibmvmc_max_mtu < MIN_MTU) { + pr_warn("ibmvmc: Max MTU increased to %d\n", MIN_MTU); + ibmvmc_max_mtu = MIN_MTU; + } + + if (ibmvmc_max_buf_pool_size > MAX_BUF_POOL_SIZE) { + pr_warn("ibmvmc: Max buffer pool size reduced to %d\n", + MAX_BUF_POOL_SIZE); + ibmvmc_max_buf_pool_size = MAX_BUF_POOL_SIZE; + } else if (ibmvmc_max_buf_pool_size < MIN_BUF_POOL_SIZE) { + pr_warn("ibmvmc: Max buffer pool size increased to %d\n", + MIN_BUF_POOL_SIZE); + ibmvmc_max_buf_pool_size = MIN_BUF_POOL_SIZE; + } + + if (ibmvmc_max_hmcs > MAX_HMCS) { + pr_warn("ibmvmc: Max HMCs reduced to %d\n", MAX_HMCS); + ibmvmc_max_hmcs = MAX_HMCS; + } else if (ibmvmc_max_hmcs < MIN_HMCS) { + pr_warn("ibmvmc: Max HMCs increased to %d\n", MIN_HMCS); + ibmvmc_max_hmcs = MIN_HMCS; + } +} + +static struct miscdevice ibmvmc_miscdev = { + .name = ibmvmc_driver_name, + .minor = MISC_DYNAMIC_MINOR, + .fops = &ibmvmc_fops, +}; + +static int __init ibmvmc_module_init(void) +{ + int rc, i, j; + + ibmvmc.state = ibmvmc_state_initial; + pr_info("ibmvmc: version %s\n", IBMVMC_DRIVER_VERSION); + + rc = misc_register(&ibmvmc_miscdev); + if (rc) { + pr_err("ibmvmc: misc registration failed\n"); + goto misc_register_failed; + } + pr_info("ibmvmc: node %d:%d\n", MISC_MAJOR, + ibmvmc_miscdev.minor); + + /* Initialize data structures */ + memset(hmcs, 0, sizeof(struct ibmvmc_hmc) * MAX_HMCS); + for (i = 0; i < MAX_HMCS; i++) { + spin_lock_init(&hmcs[i].lock); + hmcs[i].state = ibmhmc_state_free; + for (j = 0; j < MAX_BUF_POOL_SIZE; j++) + hmcs[i].queue_outbound_msgs[j] = VMC_INVALID_BUFFER_ID; + } + + /* Sanity check module parms */ + ibmvmc_scrub_module_parms(); + + /* + * Initialize some reasonable values. Might be negotiated smaller + * values during the capabilities exchange. + */ + ibmvmc.max_mtu = ibmvmc_max_mtu; + ibmvmc.max_buffer_pool_size = ibmvmc_max_buf_pool_size; + ibmvmc.max_hmc_index = ibmvmc_max_hmcs - 1; + + rc = vio_register_driver(&ibmvmc_driver); + + if (rc) { + pr_err("ibmvmc: rc %d from vio_register_driver\n", rc); + goto vio_reg_failed; + } + + return 0; + +vio_reg_failed: + misc_deregister(&ibmvmc_miscdev); +misc_register_failed: + return rc; +} + +static void __exit ibmvmc_module_exit(void) +{ + pr_info("ibmvmc: module exit\n"); + vio_unregister_driver(&ibmvmc_driver); + misc_deregister(&ibmvmc_miscdev); +} + +module_init(ibmvmc_module_init); +module_exit(ibmvmc_module_exit); + +module_param_named(buf_pool_size, ibmvmc_max_buf_pool_size, + int, 0644); +MODULE_PARM_DESC(buf_pool_size, "Buffer pool size"); +module_param_named(max_hmcs, ibmvmc_max_hmcs, int, 0644); +MODULE_PARM_DESC(max_hmcs, "Max HMCs"); +module_param_named(max_mtu, ibmvmc_max_mtu, int, 0644); +MODULE_PARM_DESC(max_mtu, "Max MTU"); + +MODULE_AUTHOR("Steven Royer <seroyer@linux.vnet.ibm.com>"); +MODULE_DESCRIPTION("IBM VMC"); +MODULE_VERSION(IBMVMC_DRIVER_VERSION); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/ibmvmc.h b/drivers/misc/ibmvmc.h new file mode 100644 index 000000000..0e1756fff --- /dev/null +++ b/drivers/misc/ibmvmc.h @@ -0,0 +1,209 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * linux/drivers/misc/ibmvmc.h + * + * IBM Power Systems Virtual Management Channel Support. + * + * Copyright (c) 2004, 2018 IBM Corp. + * Dave Engebretsen engebret@us.ibm.com + * Steven Royer seroyer@linux.vnet.ibm.com + * Adam Reznechek adreznec@linux.vnet.ibm.com + * Bryant G. Ly <bryantly@linux.vnet.ibm.com> + */ +#ifndef IBMVMC_H +#define IBMVMC_H + +#include <linux/types.h> +#include <linux/cdev.h> + +#include <asm/vio.h> + +#define IBMVMC_PROTOCOL_VERSION 0x0101 + +#define MIN_BUF_POOL_SIZE 16 +#define MIN_HMCS 1 +#define MIN_MTU 4096 +#define MAX_BUF_POOL_SIZE 64 +#define MAX_HMCS 2 +#define MAX_MTU (4 * 4096) +#define DEFAULT_BUF_POOL_SIZE 32 +#define DEFAULT_HMCS 1 +#define DEFAULT_MTU 4096 +#define HMC_ID_LEN 32 + +#define VMC_INVALID_BUFFER_ID 0xFFFF + +/* ioctl numbers */ +#define VMC_BASE 0xCC +#define VMC_IOCTL_SETHMCID _IOW(VMC_BASE, 0x00, unsigned char *) +#define VMC_IOCTL_QUERY _IOR(VMC_BASE, 0x01, struct ibmvmc_query_struct) +#define VMC_IOCTL_REQUESTVMC _IOR(VMC_BASE, 0x02, u32) + +#define VMC_MSG_CAP 0x01 +#define VMC_MSG_CAP_RESP 0x81 +#define VMC_MSG_OPEN 0x02 +#define VMC_MSG_OPEN_RESP 0x82 +#define VMC_MSG_CLOSE 0x03 +#define VMC_MSG_CLOSE_RESP 0x83 +#define VMC_MSG_ADD_BUF 0x04 +#define VMC_MSG_ADD_BUF_RESP 0x84 +#define VMC_MSG_REM_BUF 0x05 +#define VMC_MSG_REM_BUF_RESP 0x85 +#define VMC_MSG_SIGNAL 0x06 + +#define VMC_MSG_SUCCESS 0 +#define VMC_MSG_INVALID_HMC_INDEX 1 +#define VMC_MSG_INVALID_BUFFER_ID 2 +#define VMC_MSG_CLOSED_HMC 3 +#define VMC_MSG_INTERFACE_FAILURE 4 +#define VMC_MSG_NO_BUFFER 5 + +#define VMC_BUF_OWNER_ALPHA 0 +#define VMC_BUF_OWNER_HV 1 + +enum ibmvmc_states { + ibmvmc_state_sched_reset = -1, + ibmvmc_state_initial = 0, + ibmvmc_state_crqinit = 1, + ibmvmc_state_capabilities = 2, + ibmvmc_state_ready = 3, + ibmvmc_state_failed = 4, +}; + +enum ibmhmc_states { + /* HMC connection not established */ + ibmhmc_state_free = 0, + + /* HMC connection established (open called) */ + ibmhmc_state_initial = 1, + + /* open msg sent to HV, due to ioctl(1) call */ + ibmhmc_state_opening = 2, + + /* HMC connection ready, open resp msg from HV */ + ibmhmc_state_ready = 3, + + /* HMC connection failure */ + ibmhmc_state_failed = 4, +}; + +struct ibmvmc_buffer { + u8 valid; /* 1 when DMA storage allocated to buffer */ + u8 free; /* 1 when buffer available for the Alpha Partition */ + u8 owner; + u16 id; + u32 size; + u32 msg_len; + dma_addr_t dma_addr_local; + dma_addr_t dma_addr_remote; + void *real_addr_local; +}; + +struct ibmvmc_admin_crq_msg { + u8 valid; /* RPA Defined */ + u8 type; /* ibmvmc msg type */ + u8 status; /* Response msg status. Zero is success and on failure, + * either 1 - General Failure, or 2 - Invalid Version is + * returned. + */ + u8 rsvd[2]; + u8 max_hmc; /* Max # of independent HMC connections supported */ + __be16 pool_size; /* Maximum number of buffers supported per HMC + * connection + */ + __be32 max_mtu; /* Maximum message size supported (bytes) */ + __be16 crq_size; /* # of entries available in the CRQ for the + * source partition. The target partition must + * limit the number of outstanding messages to + * one half or less. + */ + __be16 version; /* Indicates the code level of the management partition + * or the hypervisor with the high-order byte + * indicating a major version and the low-order byte + * indicating a minor version. + */ +}; + +struct ibmvmc_crq_msg { + u8 valid; /* RPA Defined */ + u8 type; /* ibmvmc msg type */ + u8 status; /* Response msg status */ + union { + u8 rsvd; /* Reserved */ + u8 owner; + } var1; + u8 hmc_session; /* Session Identifier for the current VMC connection */ + u8 hmc_index; /* A unique HMC Idx would be used if multiple management + * applications running concurrently were desired + */ + union { + __be16 rsvd; + __be16 buffer_id; + } var2; + __be32 rsvd; + union { + __be32 rsvd; + __be32 lioba; + __be32 msg_len; + } var3; +}; + +/* an RPA command/response transport queue */ +struct crq_queue { + struct ibmvmc_crq_msg *msgs; + int size, cur; + dma_addr_t msg_token; + spinlock_t lock; +}; + +/* VMC server adapter settings */ +struct crq_server_adapter { + struct device *dev; + struct crq_queue queue; + u32 liobn; + u32 riobn; + struct tasklet_struct work_task; + wait_queue_head_t reset_wait_queue; + struct task_struct *reset_task; +}; + +/* Driver wide settings */ +struct ibmvmc_struct { + u32 state; + u32 max_mtu; + u32 max_buffer_pool_size; + u32 max_hmc_index; + struct crq_server_adapter *adapter; + struct cdev cdev; + u32 vmc_drc_index; +}; + +struct ibmvmc_file_session; + +/* Connection specific settings */ +struct ibmvmc_hmc { + u8 session; + u8 index; + u32 state; + struct crq_server_adapter *adapter; + spinlock_t lock; + unsigned char hmc_id[HMC_ID_LEN]; + struct ibmvmc_buffer buffer[MAX_BUF_POOL_SIZE]; + unsigned short queue_outbound_msgs[MAX_BUF_POOL_SIZE]; + int queue_head, queue_tail; + struct ibmvmc_file_session *file_session; +}; + +struct ibmvmc_file_session { + struct file *file; + struct ibmvmc_hmc *hmc; + bool valid; +}; + +struct ibmvmc_query_struct { + int have_vmc; + int state; + int vmc_drc_index; +}; + +#endif /* __IBMVMC_H */ diff --git a/drivers/misc/ics932s401.c b/drivers/misc/ics932s401.c new file mode 100644 index 000000000..0f9ea75b0 --- /dev/null +++ b/drivers/misc/ics932s401.c @@ -0,0 +1,484 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * A driver for the Integrated Circuits ICS932S401 + * Copyright (C) 2008 IBM + * + * Author: Darrick J. Wong <darrick.wong@oracle.com> + */ + +#include <linux/module.h> +#include <linux/jiffies.h> +#include <linux/i2c.h> +#include <linux/err.h> +#include <linux/mutex.h> +#include <linux/delay.h> +#include <linux/log2.h> +#include <linux/slab.h> + +/* Addresses to scan */ +static const unsigned short normal_i2c[] = { 0x69, I2C_CLIENT_END }; + +/* ICS932S401 registers */ +#define ICS932S401_REG_CFG2 0x01 +#define ICS932S401_CFG1_SPREAD 0x01 +#define ICS932S401_REG_CFG7 0x06 +#define ICS932S401_FS_MASK 0x07 +#define ICS932S401_REG_VENDOR_REV 0x07 +#define ICS932S401_VENDOR 1 +#define ICS932S401_VENDOR_MASK 0x0F +#define ICS932S401_REV 4 +#define ICS932S401_REV_SHIFT 4 +#define ICS932S401_REG_DEVICE 0x09 +#define ICS932S401_DEVICE 11 +#define ICS932S401_REG_CTRL 0x0A +#define ICS932S401_MN_ENABLED 0x80 +#define ICS932S401_CPU_ALT 0x04 +#define ICS932S401_SRC_ALT 0x08 +#define ICS932S401_REG_CPU_M_CTRL 0x0B +#define ICS932S401_M_MASK 0x3F +#define ICS932S401_REG_CPU_N_CTRL 0x0C +#define ICS932S401_REG_CPU_SPREAD1 0x0D +#define ICS932S401_REG_CPU_SPREAD2 0x0E +#define ICS932S401_SPREAD_MASK 0x7FFF +#define ICS932S401_REG_SRC_M_CTRL 0x0F +#define ICS932S401_REG_SRC_N_CTRL 0x10 +#define ICS932S401_REG_SRC_SPREAD1 0x11 +#define ICS932S401_REG_SRC_SPREAD2 0x12 +#define ICS932S401_REG_CPU_DIVISOR 0x13 +#define ICS932S401_CPU_DIVISOR_SHIFT 4 +#define ICS932S401_REG_PCISRC_DIVISOR 0x14 +#define ICS932S401_SRC_DIVISOR_MASK 0x0F +#define ICS932S401_PCI_DIVISOR_SHIFT 4 + +/* Base clock is 14.318MHz */ +#define BASE_CLOCK 14318 + +#define NUM_REGS 21 +#define NUM_MIRRORED_REGS 15 + +static int regs_to_copy[NUM_MIRRORED_REGS] = { + ICS932S401_REG_CFG2, + ICS932S401_REG_CFG7, + ICS932S401_REG_VENDOR_REV, + ICS932S401_REG_DEVICE, + ICS932S401_REG_CTRL, + ICS932S401_REG_CPU_M_CTRL, + ICS932S401_REG_CPU_N_CTRL, + ICS932S401_REG_CPU_SPREAD1, + ICS932S401_REG_CPU_SPREAD2, + ICS932S401_REG_SRC_M_CTRL, + ICS932S401_REG_SRC_N_CTRL, + ICS932S401_REG_SRC_SPREAD1, + ICS932S401_REG_SRC_SPREAD2, + ICS932S401_REG_CPU_DIVISOR, + ICS932S401_REG_PCISRC_DIVISOR, +}; + +/* How often do we reread sensors values? (In jiffies) */ +#define SENSOR_REFRESH_INTERVAL (2 * HZ) + +/* How often do we reread sensor limit values? (In jiffies) */ +#define LIMIT_REFRESH_INTERVAL (60 * HZ) + +struct ics932s401_data { + struct attribute_group attrs; + struct mutex lock; + char sensors_valid; + unsigned long sensors_last_updated; /* In jiffies */ + + u8 regs[NUM_REGS]; +}; + +static int ics932s401_probe(struct i2c_client *client, + const struct i2c_device_id *id); +static int ics932s401_detect(struct i2c_client *client, + struct i2c_board_info *info); +static int ics932s401_remove(struct i2c_client *client); + +static const struct i2c_device_id ics932s401_id[] = { + { "ics932s401", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, ics932s401_id); + +static struct i2c_driver ics932s401_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "ics932s401", + }, + .probe = ics932s401_probe, + .remove = ics932s401_remove, + .id_table = ics932s401_id, + .detect = ics932s401_detect, + .address_list = normal_i2c, +}; + +static struct ics932s401_data *ics932s401_update_device(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct ics932s401_data *data = i2c_get_clientdata(client); + unsigned long local_jiffies = jiffies; + int i, temp; + + mutex_lock(&data->lock); + if (time_before(local_jiffies, data->sensors_last_updated + + SENSOR_REFRESH_INTERVAL) + && data->sensors_valid) + goto out; + + /* + * Each register must be read as a word and then right shifted 8 bits. + * Not really sure why this is; setting the "byte count programming" + * register to 1 does not fix this problem. + */ + for (i = 0; i < NUM_MIRRORED_REGS; i++) { + temp = i2c_smbus_read_word_data(client, regs_to_copy[i]); + if (temp < 0) + temp = 0; + data->regs[regs_to_copy[i]] = temp >> 8; + } + + data->sensors_last_updated = local_jiffies; + data->sensors_valid = 1; + +out: + mutex_unlock(&data->lock); + return data; +} + +static ssize_t show_spread_enabled(struct device *dev, + struct device_attribute *devattr, + char *buf) +{ + struct ics932s401_data *data = ics932s401_update_device(dev); + + if (data->regs[ICS932S401_REG_CFG2] & ICS932S401_CFG1_SPREAD) + return sprintf(buf, "1\n"); + + return sprintf(buf, "0\n"); +} + +/* bit to cpu khz map */ +static const int fs_speeds[] = { + 266666, + 133333, + 200000, + 166666, + 333333, + 100000, + 400000, + 0, +}; + +/* clock divisor map */ +static const int divisors[] = {2, 3, 5, 15, 4, 6, 10, 30, 8, 12, 20, 60, 16, + 24, 40, 120}; + +/* Calculate CPU frequency from the M/N registers. */ +static int calculate_cpu_freq(struct ics932s401_data *data) +{ + int m, n, freq; + + m = data->regs[ICS932S401_REG_CPU_M_CTRL] & ICS932S401_M_MASK; + n = data->regs[ICS932S401_REG_CPU_N_CTRL]; + + /* Pull in bits 8 & 9 from the M register */ + n |= ((int)data->regs[ICS932S401_REG_CPU_M_CTRL] & 0x80) << 1; + n |= ((int)data->regs[ICS932S401_REG_CPU_M_CTRL] & 0x40) << 3; + + freq = BASE_CLOCK * (n + 8) / (m + 2); + freq /= divisors[data->regs[ICS932S401_REG_CPU_DIVISOR] >> + ICS932S401_CPU_DIVISOR_SHIFT]; + + return freq; +} + +static ssize_t show_cpu_clock(struct device *dev, + struct device_attribute *devattr, + char *buf) +{ + struct ics932s401_data *data = ics932s401_update_device(dev); + + return sprintf(buf, "%d\n", calculate_cpu_freq(data)); +} + +static ssize_t show_cpu_clock_sel(struct device *dev, + struct device_attribute *devattr, + char *buf) +{ + struct ics932s401_data *data = ics932s401_update_device(dev); + int freq; + + if (data->regs[ICS932S401_REG_CTRL] & ICS932S401_MN_ENABLED) + freq = calculate_cpu_freq(data); + else { + /* Freq is neatly wrapped up for us */ + int fid = data->regs[ICS932S401_REG_CFG7] & ICS932S401_FS_MASK; + + freq = fs_speeds[fid]; + if (data->regs[ICS932S401_REG_CTRL] & ICS932S401_CPU_ALT) { + switch (freq) { + case 166666: + freq = 160000; + break; + case 333333: + freq = 320000; + break; + } + } + } + + return sprintf(buf, "%d\n", freq); +} + +/* Calculate SRC frequency from the M/N registers. */ +static int calculate_src_freq(struct ics932s401_data *data) +{ + int m, n, freq; + + m = data->regs[ICS932S401_REG_SRC_M_CTRL] & ICS932S401_M_MASK; + n = data->regs[ICS932S401_REG_SRC_N_CTRL]; + + /* Pull in bits 8 & 9 from the M register */ + n |= ((int)data->regs[ICS932S401_REG_SRC_M_CTRL] & 0x80) << 1; + n |= ((int)data->regs[ICS932S401_REG_SRC_M_CTRL] & 0x40) << 3; + + freq = BASE_CLOCK * (n + 8) / (m + 2); + freq /= divisors[data->regs[ICS932S401_REG_PCISRC_DIVISOR] & + ICS932S401_SRC_DIVISOR_MASK]; + + return freq; +} + +static ssize_t show_src_clock(struct device *dev, + struct device_attribute *devattr, + char *buf) +{ + struct ics932s401_data *data = ics932s401_update_device(dev); + + return sprintf(buf, "%d\n", calculate_src_freq(data)); +} + +static ssize_t show_src_clock_sel(struct device *dev, + struct device_attribute *devattr, + char *buf) +{ + struct ics932s401_data *data = ics932s401_update_device(dev); + int freq; + + if (data->regs[ICS932S401_REG_CTRL] & ICS932S401_MN_ENABLED) + freq = calculate_src_freq(data); + else + /* Freq is neatly wrapped up for us */ + if (data->regs[ICS932S401_REG_CTRL] & ICS932S401_CPU_ALT && + data->regs[ICS932S401_REG_CTRL] & ICS932S401_SRC_ALT) + freq = 96000; + else + freq = 100000; + + return sprintf(buf, "%d\n", freq); +} + +/* Calculate PCI frequency from the SRC M/N registers. */ +static int calculate_pci_freq(struct ics932s401_data *data) +{ + int m, n, freq; + + m = data->regs[ICS932S401_REG_SRC_M_CTRL] & ICS932S401_M_MASK; + n = data->regs[ICS932S401_REG_SRC_N_CTRL]; + + /* Pull in bits 8 & 9 from the M register */ + n |= ((int)data->regs[ICS932S401_REG_SRC_M_CTRL] & 0x80) << 1; + n |= ((int)data->regs[ICS932S401_REG_SRC_M_CTRL] & 0x40) << 3; + + freq = BASE_CLOCK * (n + 8) / (m + 2); + freq /= divisors[data->regs[ICS932S401_REG_PCISRC_DIVISOR] >> + ICS932S401_PCI_DIVISOR_SHIFT]; + + return freq; +} + +static ssize_t show_pci_clock(struct device *dev, + struct device_attribute *devattr, + char *buf) +{ + struct ics932s401_data *data = ics932s401_update_device(dev); + + return sprintf(buf, "%d\n", calculate_pci_freq(data)); +} + +static ssize_t show_pci_clock_sel(struct device *dev, + struct device_attribute *devattr, + char *buf) +{ + struct ics932s401_data *data = ics932s401_update_device(dev); + int freq; + + if (data->regs[ICS932S401_REG_CTRL] & ICS932S401_MN_ENABLED) + freq = calculate_pci_freq(data); + else + freq = 33333; + + return sprintf(buf, "%d\n", freq); +} + +static ssize_t show_value(struct device *dev, + struct device_attribute *devattr, + char *buf); + +static ssize_t show_spread(struct device *dev, + struct device_attribute *devattr, + char *buf); + +static DEVICE_ATTR(spread_enabled, S_IRUGO, show_spread_enabled, NULL); +static DEVICE_ATTR(cpu_clock_selection, S_IRUGO, show_cpu_clock_sel, NULL); +static DEVICE_ATTR(cpu_clock, S_IRUGO, show_cpu_clock, NULL); +static DEVICE_ATTR(src_clock_selection, S_IRUGO, show_src_clock_sel, NULL); +static DEVICE_ATTR(src_clock, S_IRUGO, show_src_clock, NULL); +static DEVICE_ATTR(pci_clock_selection, S_IRUGO, show_pci_clock_sel, NULL); +static DEVICE_ATTR(pci_clock, S_IRUGO, show_pci_clock, NULL); +static DEVICE_ATTR(usb_clock, S_IRUGO, show_value, NULL); +static DEVICE_ATTR(ref_clock, S_IRUGO, show_value, NULL); +static DEVICE_ATTR(cpu_spread, S_IRUGO, show_spread, NULL); +static DEVICE_ATTR(src_spread, S_IRUGO, show_spread, NULL); + +static struct attribute *ics932s401_attr[] = { + &dev_attr_spread_enabled.attr, + &dev_attr_cpu_clock_selection.attr, + &dev_attr_cpu_clock.attr, + &dev_attr_src_clock_selection.attr, + &dev_attr_src_clock.attr, + &dev_attr_pci_clock_selection.attr, + &dev_attr_pci_clock.attr, + &dev_attr_usb_clock.attr, + &dev_attr_ref_clock.attr, + &dev_attr_cpu_spread.attr, + &dev_attr_src_spread.attr, + NULL +}; + +static ssize_t show_value(struct device *dev, + struct device_attribute *devattr, + char *buf) +{ + int x; + + if (devattr == &dev_attr_usb_clock) + x = 48000; + else if (devattr == &dev_attr_ref_clock) + x = BASE_CLOCK; + else + BUG(); + + return sprintf(buf, "%d\n", x); +} + +static ssize_t show_spread(struct device *dev, + struct device_attribute *devattr, + char *buf) +{ + struct ics932s401_data *data = ics932s401_update_device(dev); + int reg; + unsigned long val; + + if (!(data->regs[ICS932S401_REG_CFG2] & ICS932S401_CFG1_SPREAD)) + return sprintf(buf, "0%%\n"); + + if (devattr == &dev_attr_src_spread) + reg = ICS932S401_REG_SRC_SPREAD1; + else if (devattr == &dev_attr_cpu_spread) + reg = ICS932S401_REG_CPU_SPREAD1; + else + BUG(); + + val = data->regs[reg] | (data->regs[reg + 1] << 8); + val &= ICS932S401_SPREAD_MASK; + + /* Scale 0..2^14 to -0.5. */ + val = 500000 * val / 16384; + return sprintf(buf, "-0.%lu%%\n", val); +} + +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int ics932s401_detect(struct i2c_client *client, + struct i2c_board_info *info) +{ + struct i2c_adapter *adapter = client->adapter; + int vendor, device, revision; + + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) + return -ENODEV; + + vendor = i2c_smbus_read_word_data(client, ICS932S401_REG_VENDOR_REV); + vendor >>= 8; + revision = vendor >> ICS932S401_REV_SHIFT; + vendor &= ICS932S401_VENDOR_MASK; + if (vendor != ICS932S401_VENDOR) + return -ENODEV; + + device = i2c_smbus_read_word_data(client, ICS932S401_REG_DEVICE); + device >>= 8; + if (device != ICS932S401_DEVICE) + return -ENODEV; + + if (revision != ICS932S401_REV) + dev_info(&adapter->dev, "Unknown revision %d\n", revision); + + strlcpy(info->type, "ics932s401", I2C_NAME_SIZE); + + return 0; +} + +static int ics932s401_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct ics932s401_data *data; + int err; + + data = kzalloc(sizeof(struct ics932s401_data), GFP_KERNEL); + if (!data) { + err = -ENOMEM; + goto exit; + } + + i2c_set_clientdata(client, data); + mutex_init(&data->lock); + + dev_info(&client->dev, "%s chip found\n", client->name); + + /* Register sysfs hooks */ + data->attrs.attrs = ics932s401_attr; + err = sysfs_create_group(&client->dev.kobj, &data->attrs); + if (err) + goto exit_free; + + return 0; + +exit_free: + kfree(data); +exit: + return err; +} + +static int ics932s401_remove(struct i2c_client *client) +{ + struct ics932s401_data *data = i2c_get_clientdata(client); + + sysfs_remove_group(&client->dev.kobj, &data->attrs); + kfree(data); + return 0; +} + +module_i2c_driver(ics932s401_driver); + +MODULE_AUTHOR("Darrick J. Wong <darrick.wong@oracle.com>"); +MODULE_DESCRIPTION("ICS932S401 driver"); +MODULE_LICENSE("GPL"); + +/* IBM IntelliStation Z30 */ +MODULE_ALIAS("dmi:bvnIBM:*:rn9228:*"); +MODULE_ALIAS("dmi:bvnIBM:*:rn9232:*"); + +/* IBM x3650/x3550 */ +MODULE_ALIAS("dmi:bvnIBM:*:pnIBMSystemx3650*"); +MODULE_ALIAS("dmi:bvnIBM:*:pnIBMSystemx3550*"); diff --git a/drivers/misc/isl29003.c b/drivers/misc/isl29003.c new file mode 100644 index 000000000..c12406f61 --- /dev/null +++ b/drivers/misc/isl29003.c @@ -0,0 +1,474 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * isl29003.c - Linux kernel module for + * Intersil ISL29003 ambient light sensor + * + * See file:Documentation/misc-devices/isl29003.rst + * + * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de> + * + * Based on code written by + * Rodolfo Giometti <giometti@linux.it> + * Eurotech S.p.A. <info@eurotech.it> + */ + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/i2c.h> +#include <linux/mutex.h> +#include <linux/delay.h> + +#define ISL29003_DRV_NAME "isl29003" +#define DRIVER_VERSION "1.0" + +#define ISL29003_REG_COMMAND 0x00 +#define ISL29003_ADC_ENABLED (1 << 7) +#define ISL29003_ADC_PD (1 << 6) +#define ISL29003_TIMING_INT (1 << 5) +#define ISL29003_MODE_SHIFT (2) +#define ISL29003_MODE_MASK (0x3 << ISL29003_MODE_SHIFT) +#define ISL29003_RES_SHIFT (0) +#define ISL29003_RES_MASK (0x3 << ISL29003_RES_SHIFT) + +#define ISL29003_REG_CONTROL 0x01 +#define ISL29003_INT_FLG (1 << 5) +#define ISL29003_RANGE_SHIFT (2) +#define ISL29003_RANGE_MASK (0x3 << ISL29003_RANGE_SHIFT) +#define ISL29003_INT_PERSISTS_SHIFT (0) +#define ISL29003_INT_PERSISTS_MASK (0xf << ISL29003_INT_PERSISTS_SHIFT) + +#define ISL29003_REG_IRQ_THRESH_HI 0x02 +#define ISL29003_REG_IRQ_THRESH_LO 0x03 +#define ISL29003_REG_LSB_SENSOR 0x04 +#define ISL29003_REG_MSB_SENSOR 0x05 +#define ISL29003_REG_LSB_TIMER 0x06 +#define ISL29003_REG_MSB_TIMER 0x07 + +#define ISL29003_NUM_CACHABLE_REGS 4 + +struct isl29003_data { + struct i2c_client *client; + struct mutex lock; + u8 reg_cache[ISL29003_NUM_CACHABLE_REGS]; + u8 power_state_before_suspend; +}; + +static int gain_range[] = { + 1000, 4000, 16000, 64000 +}; + +/* + * register access helpers + */ + +static int __isl29003_read_reg(struct i2c_client *client, + u32 reg, u8 mask, u8 shift) +{ + struct isl29003_data *data = i2c_get_clientdata(client); + + return (data->reg_cache[reg] & mask) >> shift; +} + +static int __isl29003_write_reg(struct i2c_client *client, + u32 reg, u8 mask, u8 shift, u8 val) +{ + struct isl29003_data *data = i2c_get_clientdata(client); + int ret = 0; + u8 tmp; + + if (reg >= ISL29003_NUM_CACHABLE_REGS) + return -EINVAL; + + mutex_lock(&data->lock); + + tmp = data->reg_cache[reg]; + tmp &= ~mask; + tmp |= val << shift; + + ret = i2c_smbus_write_byte_data(client, reg, tmp); + if (!ret) + data->reg_cache[reg] = tmp; + + mutex_unlock(&data->lock); + return ret; +} + +/* + * internally used functions + */ + +/* range */ +static int isl29003_get_range(struct i2c_client *client) +{ + return __isl29003_read_reg(client, ISL29003_REG_CONTROL, + ISL29003_RANGE_MASK, ISL29003_RANGE_SHIFT); +} + +static int isl29003_set_range(struct i2c_client *client, int range) +{ + return __isl29003_write_reg(client, ISL29003_REG_CONTROL, + ISL29003_RANGE_MASK, ISL29003_RANGE_SHIFT, range); +} + +/* resolution */ +static int isl29003_get_resolution(struct i2c_client *client) +{ + return __isl29003_read_reg(client, ISL29003_REG_COMMAND, + ISL29003_RES_MASK, ISL29003_RES_SHIFT); +} + +static int isl29003_set_resolution(struct i2c_client *client, int res) +{ + return __isl29003_write_reg(client, ISL29003_REG_COMMAND, + ISL29003_RES_MASK, ISL29003_RES_SHIFT, res); +} + +/* mode */ +static int isl29003_get_mode(struct i2c_client *client) +{ + return __isl29003_read_reg(client, ISL29003_REG_COMMAND, + ISL29003_RES_MASK, ISL29003_RES_SHIFT); +} + +static int isl29003_set_mode(struct i2c_client *client, int mode) +{ + return __isl29003_write_reg(client, ISL29003_REG_COMMAND, + ISL29003_RES_MASK, ISL29003_RES_SHIFT, mode); +} + +/* power_state */ +static int isl29003_set_power_state(struct i2c_client *client, int state) +{ + return __isl29003_write_reg(client, ISL29003_REG_COMMAND, + ISL29003_ADC_ENABLED | ISL29003_ADC_PD, 0, + state ? ISL29003_ADC_ENABLED : ISL29003_ADC_PD); +} + +static int isl29003_get_power_state(struct i2c_client *client) +{ + struct isl29003_data *data = i2c_get_clientdata(client); + u8 cmdreg = data->reg_cache[ISL29003_REG_COMMAND]; + + return ~cmdreg & ISL29003_ADC_PD; +} + +static int isl29003_get_adc_value(struct i2c_client *client) +{ + struct isl29003_data *data = i2c_get_clientdata(client); + int lsb, msb, range, bitdepth; + + mutex_lock(&data->lock); + lsb = i2c_smbus_read_byte_data(client, ISL29003_REG_LSB_SENSOR); + + if (lsb < 0) { + mutex_unlock(&data->lock); + return lsb; + } + + msb = i2c_smbus_read_byte_data(client, ISL29003_REG_MSB_SENSOR); + mutex_unlock(&data->lock); + + if (msb < 0) + return msb; + + range = isl29003_get_range(client); + bitdepth = (4 - isl29003_get_resolution(client)) * 4; + return (((msb << 8) | lsb) * gain_range[range]) >> bitdepth; +} + +/* + * sysfs layer + */ + +/* range */ +static ssize_t isl29003_show_range(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + + return sprintf(buf, "%i\n", isl29003_get_range(client)); +} + +static ssize_t isl29003_store_range(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + unsigned long val; + int ret; + + ret = kstrtoul(buf, 10, &val); + if (ret) + return ret; + + if (val > 3) + return -EINVAL; + + ret = isl29003_set_range(client, val); + if (ret < 0) + return ret; + + return count; +} + +static DEVICE_ATTR(range, S_IWUSR | S_IRUGO, + isl29003_show_range, isl29003_store_range); + + +/* resolution */ +static ssize_t isl29003_show_resolution(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + + return sprintf(buf, "%d\n", isl29003_get_resolution(client)); +} + +static ssize_t isl29003_store_resolution(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + unsigned long val; + int ret; + + ret = kstrtoul(buf, 10, &val); + if (ret) + return ret; + + if (val > 3) + return -EINVAL; + + ret = isl29003_set_resolution(client, val); + if (ret < 0) + return ret; + + return count; +} + +static DEVICE_ATTR(resolution, S_IWUSR | S_IRUGO, + isl29003_show_resolution, isl29003_store_resolution); + +/* mode */ +static ssize_t isl29003_show_mode(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + + return sprintf(buf, "%d\n", isl29003_get_mode(client)); +} + +static ssize_t isl29003_store_mode(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + unsigned long val; + int ret; + + ret = kstrtoul(buf, 10, &val); + if (ret) + return ret; + + if (val > 2) + return -EINVAL; + + ret = isl29003_set_mode(client, val); + if (ret < 0) + return ret; + + return count; +} + +static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, + isl29003_show_mode, isl29003_store_mode); + + +/* power state */ +static ssize_t isl29003_show_power_state(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + + return sprintf(buf, "%d\n", isl29003_get_power_state(client)); +} + +static ssize_t isl29003_store_power_state(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + unsigned long val; + int ret; + + ret = kstrtoul(buf, 10, &val); + if (ret) + return ret; + + if (val > 1) + return -EINVAL; + + ret = isl29003_set_power_state(client, val); + return ret ? ret : count; +} + +static DEVICE_ATTR(power_state, S_IWUSR | S_IRUGO, + isl29003_show_power_state, isl29003_store_power_state); + + +/* lux */ +static ssize_t isl29003_show_lux(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + + /* No LUX data if not operational */ + if (!isl29003_get_power_state(client)) + return -EBUSY; + + return sprintf(buf, "%d\n", isl29003_get_adc_value(client)); +} + +static DEVICE_ATTR(lux, S_IRUGO, isl29003_show_lux, NULL); + +static struct attribute *isl29003_attributes[] = { + &dev_attr_range.attr, + &dev_attr_resolution.attr, + &dev_attr_mode.attr, + &dev_attr_power_state.attr, + &dev_attr_lux.attr, + NULL +}; + +static const struct attribute_group isl29003_attr_group = { + .attrs = isl29003_attributes, +}; + +static int isl29003_init_client(struct i2c_client *client) +{ + struct isl29003_data *data = i2c_get_clientdata(client); + int i; + + /* read all the registers once to fill the cache. + * if one of the reads fails, we consider the init failed */ + for (i = 0; i < ARRAY_SIZE(data->reg_cache); i++) { + int v = i2c_smbus_read_byte_data(client, i); + + if (v < 0) + return -ENODEV; + + data->reg_cache[i] = v; + } + + /* set defaults */ + isl29003_set_range(client, 0); + isl29003_set_resolution(client, 0); + isl29003_set_mode(client, 0); + isl29003_set_power_state(client, 0); + + return 0; +} + +/* + * I2C layer + */ + +static int isl29003_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct i2c_adapter *adapter = client->adapter; + struct isl29003_data *data; + int err = 0; + + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE)) + return -EIO; + + data = kzalloc(sizeof(struct isl29003_data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->client = client; + i2c_set_clientdata(client, data); + mutex_init(&data->lock); + + /* initialize the ISL29003 chip */ + err = isl29003_init_client(client); + if (err) + goto exit_kfree; + + /* register sysfs hooks */ + err = sysfs_create_group(&client->dev.kobj, &isl29003_attr_group); + if (err) + goto exit_kfree; + + dev_info(&client->dev, "driver version %s enabled\n", DRIVER_VERSION); + return 0; + +exit_kfree: + kfree(data); + return err; +} + +static int isl29003_remove(struct i2c_client *client) +{ + sysfs_remove_group(&client->dev.kobj, &isl29003_attr_group); + isl29003_set_power_state(client, 0); + kfree(i2c_get_clientdata(client)); + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int isl29003_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct isl29003_data *data = i2c_get_clientdata(client); + + data->power_state_before_suspend = isl29003_get_power_state(client); + return isl29003_set_power_state(client, 0); +} + +static int isl29003_resume(struct device *dev) +{ + int i; + struct i2c_client *client = to_i2c_client(dev); + struct isl29003_data *data = i2c_get_clientdata(client); + + /* restore registers from cache */ + for (i = 0; i < ARRAY_SIZE(data->reg_cache); i++) + if (i2c_smbus_write_byte_data(client, i, data->reg_cache[i])) + return -EIO; + + return isl29003_set_power_state(client, + data->power_state_before_suspend); +} + +static SIMPLE_DEV_PM_OPS(isl29003_pm_ops, isl29003_suspend, isl29003_resume); +#define ISL29003_PM_OPS (&isl29003_pm_ops) + +#else +#define ISL29003_PM_OPS NULL +#endif /* CONFIG_PM_SLEEP */ + +static const struct i2c_device_id isl29003_id[] = { + { "isl29003", 0 }, + {} +}; +MODULE_DEVICE_TABLE(i2c, isl29003_id); + +static struct i2c_driver isl29003_driver = { + .driver = { + .name = ISL29003_DRV_NAME, + .pm = ISL29003_PM_OPS, + }, + .probe = isl29003_probe, + .remove = isl29003_remove, + .id_table = isl29003_id, +}; + +module_i2c_driver(isl29003_driver); + +MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>"); +MODULE_DESCRIPTION("ISL29003 ambient light sensor driver"); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION(DRIVER_VERSION); diff --git a/drivers/misc/isl29020.c b/drivers/misc/isl29020.c new file mode 100644 index 000000000..fc5ff2805 --- /dev/null +++ b/drivers/misc/isl29020.c @@ -0,0 +1,228 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * isl29020.c - Intersil ALS Driver + * + * Copyright (C) 2008 Intel Corp + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * Data sheet at: http://www.intersil.com/data/fn/fn6505.pdf + */ + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/i2c.h> +#include <linux/err.h> +#include <linux/delay.h> +#include <linux/sysfs.h> +#include <linux/pm_runtime.h> + +static DEFINE_MUTEX(mutex); + +static ssize_t als_sensing_range_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + int val; + + val = i2c_smbus_read_byte_data(client, 0x00); + + if (val < 0) + return val; + return sprintf(buf, "%d000\n", 1 << (2 * (val & 3))); + +} + +static ssize_t als_lux_input_data_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + int ret_val, val; + unsigned long int lux; + int temp; + + pm_runtime_get_sync(dev); + msleep(100); + + mutex_lock(&mutex); + temp = i2c_smbus_read_byte_data(client, 0x02); /* MSB data */ + if (temp < 0) { + pm_runtime_put_sync(dev); + mutex_unlock(&mutex); + return temp; + } + + ret_val = i2c_smbus_read_byte_data(client, 0x01); /* LSB data */ + mutex_unlock(&mutex); + + if (ret_val < 0) { + pm_runtime_put_sync(dev); + return ret_val; + } + + ret_val |= temp << 8; + val = i2c_smbus_read_byte_data(client, 0x00); + pm_runtime_put_sync(dev); + if (val < 0) + return val; + lux = ((((1 << (2 * (val & 3))))*1000) * ret_val) / 65536; + return sprintf(buf, "%ld\n", lux); +} + +static ssize_t als_sensing_range_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + int ret_val; + unsigned long val; + + ret_val = kstrtoul(buf, 10, &val); + if (ret_val) + return ret_val; + + if (val < 1 || val > 64000) + return -EINVAL; + + /* Pick the smallest sensor range that will meet our requirements */ + if (val <= 1000) + val = 1; + else if (val <= 4000) + val = 2; + else if (val <= 16000) + val = 3; + else + val = 4; + + ret_val = i2c_smbus_read_byte_data(client, 0x00); + if (ret_val < 0) + return ret_val; + + ret_val &= 0xFC; /*reset the bit before setting them */ + ret_val |= val - 1; + ret_val = i2c_smbus_write_byte_data(client, 0x00, ret_val); + + if (ret_val < 0) + return ret_val; + return count; +} + +static void als_set_power_state(struct i2c_client *client, int enable) +{ + int ret_val; + + ret_val = i2c_smbus_read_byte_data(client, 0x00); + if (ret_val < 0) + return; + + if (enable) + ret_val |= 0x80; + else + ret_val &= 0x7F; + + i2c_smbus_write_byte_data(client, 0x00, ret_val); +} + +static DEVICE_ATTR(lux0_sensor_range, S_IRUGO | S_IWUSR, + als_sensing_range_show, als_sensing_range_store); +static DEVICE_ATTR(lux0_input, S_IRUGO, als_lux_input_data_show, NULL); + +static struct attribute *mid_att_als[] = { + &dev_attr_lux0_sensor_range.attr, + &dev_attr_lux0_input.attr, + NULL +}; + +static const struct attribute_group m_als_gr = { + .name = "isl29020", + .attrs = mid_att_als +}; + +static int als_set_default_config(struct i2c_client *client) +{ + int retval; + + retval = i2c_smbus_write_byte_data(client, 0x00, 0xc0); + if (retval < 0) { + dev_err(&client->dev, "default write failed."); + return retval; + } + return 0; +} + +static int isl29020_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int res; + + res = als_set_default_config(client); + if (res < 0) + return res; + + res = sysfs_create_group(&client->dev.kobj, &m_als_gr); + if (res) { + dev_err(&client->dev, "isl29020: device create file failed\n"); + return res; + } + dev_info(&client->dev, "%s isl29020: ALS chip found\n", client->name); + als_set_power_state(client, 0); + pm_runtime_enable(&client->dev); + return res; +} + +static int isl29020_remove(struct i2c_client *client) +{ + pm_runtime_disable(&client->dev); + sysfs_remove_group(&client->dev.kobj, &m_als_gr); + return 0; +} + +static const struct i2c_device_id isl29020_id[] = { + { "isl29020", 0 }, + { } +}; + +MODULE_DEVICE_TABLE(i2c, isl29020_id); + +#ifdef CONFIG_PM + +static int isl29020_runtime_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + als_set_power_state(client, 0); + return 0; +} + +static int isl29020_runtime_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + als_set_power_state(client, 1); + return 0; +} + +static const struct dev_pm_ops isl29020_pm_ops = { + .runtime_suspend = isl29020_runtime_suspend, + .runtime_resume = isl29020_runtime_resume, +}; + +#define ISL29020_PM_OPS (&isl29020_pm_ops) +#else /* CONFIG_PM */ +#define ISL29020_PM_OPS NULL +#endif /* CONFIG_PM */ + +static struct i2c_driver isl29020_driver = { + .driver = { + .name = "isl29020", + .pm = ISL29020_PM_OPS, + }, + .probe = isl29020_probe, + .remove = isl29020_remove, + .id_table = isl29020_id, +}; + +module_i2c_driver(isl29020_driver); + +MODULE_AUTHOR("Kalhan Trisal <kalhan.trisal@intel.com>"); +MODULE_DESCRIPTION("Intersil isl29020 ALS Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c new file mode 100644 index 000000000..3e4d89471 --- /dev/null +++ b/drivers/misc/kgdbts.c @@ -0,0 +1,1181 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * kgdbts is a test suite for kgdb for the sole purpose of validating + * that key pieces of the kgdb internals are working properly such as + * HW/SW breakpoints, single stepping, and NMI. + * + * Created by: Jason Wessel <jason.wessel@windriver.com> + * + * Copyright (c) 2008 Wind River Systems, Inc. + */ +/* Information about the kgdb test suite. + * ------------------------------------- + * + * The kgdb test suite is designed as a KGDB I/O module which + * simulates the communications that a debugger would have with kgdb. + * The tests are broken up in to a line by line and referenced here as + * a "get" which is kgdb requesting input and "put" which is kgdb + * sending a response. + * + * The kgdb suite can be invoked from the kernel command line + * arguments system or executed dynamically at run time. The test + * suite uses the variable "kgdbts" to obtain the information about + * which tests to run and to configure the verbosity level. The + * following are the various characters you can use with the kgdbts= + * line: + * + * When using the "kgdbts=" you only choose one of the following core + * test types: + * A = Run all the core tests silently + * V1 = Run all the core tests with minimal output + * V2 = Run all the core tests in debug mode + * + * You can also specify optional tests: + * N## = Go to sleep with interrupts of for ## seconds + * to test the HW NMI watchdog + * F## = Break at kernel_clone for ## iterations + * S## = Break at sys_open for ## iterations + * I## = Run the single step test ## iterations + * + * NOTE: that the kernel_clone and sys_open tests are mutually exclusive. + * + * To invoke the kgdb test suite from boot you use a kernel start + * argument as follows: + * kgdbts=V1 kgdbwait + * Or if you wanted to perform the NMI test for 6 seconds and kernel_clone + * test for 100 forks, you could use: + * kgdbts=V1N6F100 kgdbwait + * + * The test suite can also be invoked at run time with: + * echo kgdbts=V1N6F100 > /sys/module/kgdbts/parameters/kgdbts + * Or as another example: + * echo kgdbts=V2 > /sys/module/kgdbts/parameters/kgdbts + * + * When developing a new kgdb arch specific implementation or + * using these tests for the purpose of regression testing, + * several invocations are required. + * + * 1) Boot with the test suite enabled by using the kernel arguments + * "kgdbts=V1F100 kgdbwait" + * ## If kgdb arch specific implementation has NMI use + * "kgdbts=V1N6F100 + * + * 2) After the system boot run the basic test. + * echo kgdbts=V1 > /sys/module/kgdbts/parameters/kgdbts + * + * 3) Run the concurrency tests. It is best to use n+1 + * while loops where n is the number of cpus you have + * in your system. The example below uses only two + * loops. + * + * ## This tests break points on sys_open + * while [ 1 ] ; do find / > /dev/null 2>&1 ; done & + * while [ 1 ] ; do find / > /dev/null 2>&1 ; done & + * echo kgdbts=V1S10000 > /sys/module/kgdbts/parameters/kgdbts + * fg # and hit control-c + * fg # and hit control-c + * ## This tests break points on kernel_clone + * while [ 1 ] ; do date > /dev/null ; done & + * while [ 1 ] ; do date > /dev/null ; done & + * echo kgdbts=V1F1000 > /sys/module/kgdbts/parameters/kgdbts + * fg # and hit control-c + * + */ + +#include <linux/kernel.h> +#include <linux/kgdb.h> +#include <linux/ctype.h> +#include <linux/uaccess.h> +#include <linux/syscalls.h> +#include <linux/nmi.h> +#include <linux/delay.h> +#include <linux/kthread.h> +#include <linux/module.h> +#include <linux/sched/task.h> + +#include <asm/sections.h> + +#define v1printk(a...) do { \ + if (verbose) \ + printk(KERN_INFO a); \ +} while (0) +#define v2printk(a...) do { \ + if (verbose > 1) { \ + printk(KERN_INFO a); \ + } \ + touch_nmi_watchdog(); \ +} while (0) +#define eprintk(a...) do { \ + printk(KERN_ERR a); \ + WARN_ON(1); \ +} while (0) +#define MAX_CONFIG_LEN 40 + +static struct kgdb_io kgdbts_io_ops; +static char get_buf[BUFMAX]; +static int get_buf_cnt; +static char put_buf[BUFMAX]; +static int put_buf_cnt; +static char scratch_buf[BUFMAX]; +static int verbose; +static int repeat_test; +static int test_complete; +static int send_ack; +static int final_ack; +static int force_hwbrks; +static int hwbreaks_ok; +static int hw_break_val; +static int hw_break_val2; +static int cont_instead_of_sstep; +static unsigned long cont_thread_id; +static unsigned long sstep_thread_id; +#if defined(CONFIG_ARM) || defined(CONFIG_MIPS) || defined(CONFIG_SPARC) +static int arch_needs_sstep_emulation = 1; +#else +static int arch_needs_sstep_emulation; +#endif +static unsigned long cont_addr; +static unsigned long sstep_addr; +static int restart_from_top_after_write; +static int sstep_state; + +/* Storage for the registers, in GDB format. */ +static unsigned long kgdbts_gdb_regs[(NUMREGBYTES + + sizeof(unsigned long) - 1) / + sizeof(unsigned long)]; +static struct pt_regs kgdbts_regs; + +/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */ +static int configured = -1; + +#ifdef CONFIG_KGDB_TESTS_BOOT_STRING +static char config[MAX_CONFIG_LEN] = CONFIG_KGDB_TESTS_BOOT_STRING; +#else +static char config[MAX_CONFIG_LEN]; +#endif +static struct kparam_string kps = { + .string = config, + .maxlen = MAX_CONFIG_LEN, +}; + +static void fill_get_buf(char *buf); + +struct test_struct { + char *get; + char *put; + void (*get_handler)(char *); + int (*put_handler)(char *, char *); +}; + +struct test_state { + char *name; + struct test_struct *tst; + int idx; + int (*run_test) (int, int); + int (*validate_put) (char *); +}; + +static struct test_state ts; + +static int kgdbts_unreg_thread(void *ptr) +{ + /* Wait until the tests are complete and then ungresiter the I/O + * driver. + */ + while (!final_ack) + msleep_interruptible(1500); + /* Pause for any other threads to exit after final ack. */ + msleep_interruptible(1000); + if (configured) + kgdb_unregister_io_module(&kgdbts_io_ops); + configured = 0; + + return 0; +} + +/* This is noinline such that it can be used for a single location to + * place a breakpoint + */ +static noinline void kgdbts_break_test(void) +{ + v2printk("kgdbts: breakpoint complete\n"); +} + +/* Lookup symbol info in the kernel */ +static unsigned long lookup_addr(char *arg) +{ + unsigned long addr = 0; + + if (!strcmp(arg, "kgdbts_break_test")) + addr = (unsigned long)kgdbts_break_test; + else if (!strcmp(arg, "sys_open")) + addr = (unsigned long)do_sys_open; + else if (!strcmp(arg, "kernel_clone")) + addr = (unsigned long)kernel_clone; + else if (!strcmp(arg, "hw_break_val")) + addr = (unsigned long)&hw_break_val; + addr = (unsigned long) dereference_function_descriptor((void *)addr); + return addr; +} + +static void break_helper(char *bp_type, char *arg, unsigned long vaddr) +{ + unsigned long addr; + + if (arg) + addr = lookup_addr(arg); + else + addr = vaddr; + + sprintf(scratch_buf, "%s,%lx,%i", bp_type, addr, + BREAK_INSTR_SIZE); + fill_get_buf(scratch_buf); +} + +static void sw_break(char *arg) +{ + break_helper(force_hwbrks ? "Z1" : "Z0", arg, 0); +} + +static void sw_rem_break(char *arg) +{ + break_helper(force_hwbrks ? "z1" : "z0", arg, 0); +} + +static void hw_break(char *arg) +{ + break_helper("Z1", arg, 0); +} + +static void hw_rem_break(char *arg) +{ + break_helper("z1", arg, 0); +} + +static void hw_write_break(char *arg) +{ + break_helper("Z2", arg, 0); +} + +static void hw_rem_write_break(char *arg) +{ + break_helper("z2", arg, 0); +} + +static void hw_access_break(char *arg) +{ + break_helper("Z4", arg, 0); +} + +static void hw_rem_access_break(char *arg) +{ + break_helper("z4", arg, 0); +} + +static void hw_break_val_access(void) +{ + hw_break_val2 = hw_break_val; +} + +static void hw_break_val_write(void) +{ + hw_break_val++; +} + +static int get_thread_id_continue(char *put_str, char *arg) +{ + char *ptr = &put_str[11]; + + if (put_str[1] != 'T' || put_str[2] != '0') + return 1; + kgdb_hex2long(&ptr, &cont_thread_id); + return 0; +} + +static int check_and_rewind_pc(char *put_str, char *arg) +{ + unsigned long addr = lookup_addr(arg); + unsigned long ip; + int offset = 0; + + kgdb_hex2mem(&put_str[1], (char *)kgdbts_gdb_regs, + NUMREGBYTES); + gdb_regs_to_pt_regs(kgdbts_gdb_regs, &kgdbts_regs); + ip = instruction_pointer(&kgdbts_regs); + v2printk("Stopped at IP: %lx\n", ip); +#ifdef GDB_ADJUSTS_BREAK_OFFSET + /* On some arches, a breakpoint stop requires it to be decremented */ + if (addr + BREAK_INSTR_SIZE == ip) + offset = -BREAK_INSTR_SIZE; +#endif + + if (arch_needs_sstep_emulation && sstep_addr && + ip + offset == sstep_addr && + ((!strcmp(arg, "sys_open") || !strcmp(arg, "kernel_clone")))) { + /* This is special case for emulated single step */ + v2printk("Emul: rewind hit single step bp\n"); + restart_from_top_after_write = 1; + } else if (strcmp(arg, "silent") && ip + offset != addr) { + eprintk("kgdbts: BP mismatch %lx expected %lx\n", + ip + offset, addr); + return 1; + } + /* Readjust the instruction pointer if needed */ + ip += offset; + cont_addr = ip; +#ifdef GDB_ADJUSTS_BREAK_OFFSET + instruction_pointer_set(&kgdbts_regs, ip); +#endif + return 0; +} + +static int check_single_step(char *put_str, char *arg) +{ + unsigned long addr = lookup_addr(arg); + static int matched_id; + + /* + * From an arch indepent point of view the instruction pointer + * should be on a different instruction + */ + kgdb_hex2mem(&put_str[1], (char *)kgdbts_gdb_regs, + NUMREGBYTES); + gdb_regs_to_pt_regs(kgdbts_gdb_regs, &kgdbts_regs); + v2printk("Singlestep stopped at IP: %lx\n", + instruction_pointer(&kgdbts_regs)); + + if (sstep_thread_id != cont_thread_id) { + /* + * Ensure we stopped in the same thread id as before, else the + * debugger should continue until the original thread that was + * single stepped is scheduled again, emulating gdb's behavior. + */ + v2printk("ThrID does not match: %lx\n", cont_thread_id); + if (arch_needs_sstep_emulation) { + if (matched_id && + instruction_pointer(&kgdbts_regs) != addr) + goto continue_test; + matched_id++; + ts.idx -= 2; + sstep_state = 0; + return 0; + } + cont_instead_of_sstep = 1; + ts.idx -= 4; + return 0; + } +continue_test: + matched_id = 0; + if (instruction_pointer(&kgdbts_regs) == addr) { + eprintk("kgdbts: SingleStep failed at %lx\n", + instruction_pointer(&kgdbts_regs)); + return 1; + } + + return 0; +} + +static void write_regs(char *arg) +{ + memset(scratch_buf, 0, sizeof(scratch_buf)); + scratch_buf[0] = 'G'; + pt_regs_to_gdb_regs(kgdbts_gdb_regs, &kgdbts_regs); + kgdb_mem2hex((char *)kgdbts_gdb_regs, &scratch_buf[1], NUMREGBYTES); + fill_get_buf(scratch_buf); +} + +static void skip_back_repeat_test(char *arg) +{ + int go_back = simple_strtol(arg, NULL, 10); + + repeat_test--; + if (repeat_test <= 0) { + ts.idx++; + } else { + if (repeat_test % 100 == 0) + v1printk("kgdbts:RUN ... %d remaining\n", repeat_test); + + ts.idx -= go_back; + } + fill_get_buf(ts.tst[ts.idx].get); +} + +static int got_break(char *put_str, char *arg) +{ + test_complete = 1; + if (!strncmp(put_str+1, arg, 2)) { + if (!strncmp(arg, "T0", 2)) + test_complete = 2; + return 0; + } + return 1; +} + +static void get_cont_catch(char *arg) +{ + /* Always send detach because the test is completed at this point */ + fill_get_buf("D"); +} + +static int put_cont_catch(char *put_str, char *arg) +{ + /* This is at the end of the test and we catch any and all input */ + v2printk("kgdbts: cleanup task: %lx\n", sstep_thread_id); + ts.idx--; + return 0; +} + +static int emul_reset(char *put_str, char *arg) +{ + if (strncmp(put_str, "$OK", 3)) + return 1; + if (restart_from_top_after_write) { + restart_from_top_after_write = 0; + ts.idx = -1; + } + return 0; +} + +static void emul_sstep_get(char *arg) +{ + if (!arch_needs_sstep_emulation) { + if (cont_instead_of_sstep) { + cont_instead_of_sstep = 0; + fill_get_buf("c"); + } else { + fill_get_buf(arg); + } + return; + } + switch (sstep_state) { + case 0: + v2printk("Emulate single step\n"); + /* Start by looking at the current PC */ + fill_get_buf("g"); + break; + case 1: + /* set breakpoint */ + break_helper("Z0", NULL, sstep_addr); + break; + case 2: + /* Continue */ + fill_get_buf("c"); + break; + case 3: + /* Clear breakpoint */ + break_helper("z0", NULL, sstep_addr); + break; + default: + eprintk("kgdbts: ERROR failed sstep get emulation\n"); + } + sstep_state++; +} + +static int emul_sstep_put(char *put_str, char *arg) +{ + if (!arch_needs_sstep_emulation) { + char *ptr = &put_str[11]; + if (put_str[1] != 'T' || put_str[2] != '0') + return 1; + kgdb_hex2long(&ptr, &sstep_thread_id); + return 0; + } + switch (sstep_state) { + case 1: + /* validate the "g" packet to get the IP */ + kgdb_hex2mem(&put_str[1], (char *)kgdbts_gdb_regs, + NUMREGBYTES); + gdb_regs_to_pt_regs(kgdbts_gdb_regs, &kgdbts_regs); + v2printk("Stopped at IP: %lx\n", + instruction_pointer(&kgdbts_regs)); + /* Want to stop at IP + break instruction size by default */ + sstep_addr = cont_addr + BREAK_INSTR_SIZE; + break; + case 2: + if (strncmp(put_str, "$OK", 3)) { + eprintk("kgdbts: failed sstep break set\n"); + return 1; + } + break; + case 3: + if (strncmp(put_str, "$T0", 3)) { + eprintk("kgdbts: failed continue sstep\n"); + return 1; + } else { + char *ptr = &put_str[11]; + kgdb_hex2long(&ptr, &sstep_thread_id); + } + break; + case 4: + if (strncmp(put_str, "$OK", 3)) { + eprintk("kgdbts: failed sstep break unset\n"); + return 1; + } + /* Single step is complete so continue on! */ + sstep_state = 0; + return 0; + default: + eprintk("kgdbts: ERROR failed sstep put emulation\n"); + } + + /* Continue on the same test line until emulation is complete */ + ts.idx--; + return 0; +} + +static int final_ack_set(char *put_str, char *arg) +{ + if (strncmp(put_str+1, arg, 2)) + return 1; + final_ack = 1; + return 0; +} +/* + * Test to plant a breakpoint and detach, which should clear out the + * breakpoint and restore the original instruction. + */ +static struct test_struct plant_and_detach_test[] = { + { "?", "S0*" }, /* Clear break points */ + { "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */ + { "D", "OK" }, /* Detach */ + { "", "" }, +}; + +/* + * Simple test to write in a software breakpoint, check for the + * correct stop location and detach. + */ +static struct test_struct sw_breakpoint_test[] = { + { "?", "S0*" }, /* Clear break points */ + { "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */ + { "c", "T0*", }, /* Continue */ + { "g", "kgdbts_break_test", NULL, check_and_rewind_pc }, + { "write", "OK", write_regs }, + { "kgdbts_break_test", "OK", sw_rem_break }, /*remove breakpoint */ + { "D", "OK" }, /* Detach */ + { "D", "OK", NULL, got_break }, /* On success we made it here */ + { "", "" }, +}; + +/* + * Test a known bad memory read location to test the fault handler and + * read bytes 1-8 at the bad address + */ +static struct test_struct bad_read_test[] = { + { "?", "S0*" }, /* Clear break points */ + { "m0,1", "E*" }, /* read 1 byte at address 1 */ + { "m0,2", "E*" }, /* read 1 byte at address 2 */ + { "m0,3", "E*" }, /* read 1 byte at address 3 */ + { "m0,4", "E*" }, /* read 1 byte at address 4 */ + { "m0,5", "E*" }, /* read 1 byte at address 5 */ + { "m0,6", "E*" }, /* read 1 byte at address 6 */ + { "m0,7", "E*" }, /* read 1 byte at address 7 */ + { "m0,8", "E*" }, /* read 1 byte at address 8 */ + { "D", "OK" }, /* Detach which removes all breakpoints and continues */ + { "", "" }, +}; + +/* + * Test for hitting a breakpoint, remove it, single step, plant it + * again and detach. + */ +static struct test_struct singlestep_break_test[] = { + { "?", "S0*" }, /* Clear break points */ + { "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */ + { "c", "T0*", NULL, get_thread_id_continue }, /* Continue */ + { "kgdbts_break_test", "OK", sw_rem_break }, /*remove breakpoint */ + { "g", "kgdbts_break_test", NULL, check_and_rewind_pc }, + { "write", "OK", write_regs }, /* Write registers */ + { "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */ + { "g", "kgdbts_break_test", NULL, check_single_step }, + { "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */ + { "c", "T0*", }, /* Continue */ + { "g", "kgdbts_break_test", NULL, check_and_rewind_pc }, + { "write", "OK", write_regs }, /* Write registers */ + { "D", "OK" }, /* Remove all breakpoints and continues */ + { "", "" }, +}; + +/* + * Test for hitting a breakpoint at kernel_clone for what ever the number + * of iterations required by the variable repeat_test. + */ +static struct test_struct do_kernel_clone_test[] = { + { "?", "S0*" }, /* Clear break points */ + { "kernel_clone", "OK", sw_break, }, /* set sw breakpoint */ + { "c", "T0*", NULL, get_thread_id_continue }, /* Continue */ + { "kernel_clone", "OK", sw_rem_break }, /*remove breakpoint */ + { "g", "kernel_clone", NULL, check_and_rewind_pc }, /* check location */ + { "write", "OK", write_regs, emul_reset }, /* Write registers */ + { "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */ + { "g", "kernel_clone", NULL, check_single_step }, + { "kernel_clone", "OK", sw_break, }, /* set sw breakpoint */ + { "7", "T0*", skip_back_repeat_test }, /* Loop based on repeat_test */ + { "D", "OK", NULL, final_ack_set }, /* detach and unregister I/O */ + { "", "", get_cont_catch, put_cont_catch }, +}; + +/* Test for hitting a breakpoint at sys_open for what ever the number + * of iterations required by the variable repeat_test. + */ +static struct test_struct sys_open_test[] = { + { "?", "S0*" }, /* Clear break points */ + { "sys_open", "OK", sw_break, }, /* set sw breakpoint */ + { "c", "T0*", NULL, get_thread_id_continue }, /* Continue */ + { "sys_open", "OK", sw_rem_break }, /*remove breakpoint */ + { "g", "sys_open", NULL, check_and_rewind_pc }, /* check location */ + { "write", "OK", write_regs, emul_reset }, /* Write registers */ + { "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */ + { "g", "sys_open", NULL, check_single_step }, + { "sys_open", "OK", sw_break, }, /* set sw breakpoint */ + { "7", "T0*", skip_back_repeat_test }, /* Loop based on repeat_test */ + { "D", "OK", NULL, final_ack_set }, /* detach and unregister I/O */ + { "", "", get_cont_catch, put_cont_catch }, +}; + +/* + * Test for hitting a simple hw breakpoint + */ +static struct test_struct hw_breakpoint_test[] = { + { "?", "S0*" }, /* Clear break points */ + { "kgdbts_break_test", "OK", hw_break, }, /* set hw breakpoint */ + { "c", "T0*", }, /* Continue */ + { "g", "kgdbts_break_test", NULL, check_and_rewind_pc }, + { "write", "OK", write_regs }, + { "kgdbts_break_test", "OK", hw_rem_break }, /*remove breakpoint */ + { "D", "OK" }, /* Detach */ + { "D", "OK", NULL, got_break }, /* On success we made it here */ + { "", "" }, +}; + +/* + * Test for hitting a hw write breakpoint + */ +static struct test_struct hw_write_break_test[] = { + { "?", "S0*" }, /* Clear break points */ + { "hw_break_val", "OK", hw_write_break, }, /* set hw breakpoint */ + { "c", "T0*", NULL, got_break }, /* Continue */ + { "g", "silent", NULL, check_and_rewind_pc }, + { "write", "OK", write_regs }, + { "hw_break_val", "OK", hw_rem_write_break }, /*remove breakpoint */ + { "D", "OK" }, /* Detach */ + { "D", "OK", NULL, got_break }, /* On success we made it here */ + { "", "" }, +}; + +/* + * Test for hitting a hw access breakpoint + */ +static struct test_struct hw_access_break_test[] = { + { "?", "S0*" }, /* Clear break points */ + { "hw_break_val", "OK", hw_access_break, }, /* set hw breakpoint */ + { "c", "T0*", NULL, got_break }, /* Continue */ + { "g", "silent", NULL, check_and_rewind_pc }, + { "write", "OK", write_regs }, + { "hw_break_val", "OK", hw_rem_access_break }, /*remove breakpoint */ + { "D", "OK" }, /* Detach */ + { "D", "OK", NULL, got_break }, /* On success we made it here */ + { "", "" }, +}; + +/* + * Test for hitting a hw access breakpoint + */ +static struct test_struct nmi_sleep_test[] = { + { "?", "S0*" }, /* Clear break points */ + { "c", "T0*", NULL, got_break }, /* Continue */ + { "D", "OK" }, /* Detach */ + { "D", "OK", NULL, got_break }, /* On success we made it here */ + { "", "" }, +}; + +static void fill_get_buf(char *buf) +{ + unsigned char checksum = 0; + int count = 0; + char ch; + + strcpy(get_buf, "$"); + strcat(get_buf, buf); + while ((ch = buf[count])) { + checksum += ch; + count++; + } + strcat(get_buf, "#"); + get_buf[count + 2] = hex_asc_hi(checksum); + get_buf[count + 3] = hex_asc_lo(checksum); + get_buf[count + 4] = '\0'; + v2printk("get%i: %s\n", ts.idx, get_buf); +} + +static int validate_simple_test(char *put_str) +{ + char *chk_str; + + if (ts.tst[ts.idx].put_handler) + return ts.tst[ts.idx].put_handler(put_str, + ts.tst[ts.idx].put); + + chk_str = ts.tst[ts.idx].put; + if (*put_str == '$') + put_str++; + + while (*chk_str != '\0' && *put_str != '\0') { + /* If someone does a * to match the rest of the string, allow + * it, or stop if the received string is complete. + */ + if (*put_str == '#' || *chk_str == '*') + return 0; + if (*put_str != *chk_str) + return 1; + + chk_str++; + put_str++; + } + if (*chk_str == '\0' && (*put_str == '\0' || *put_str == '#')) + return 0; + + return 1; +} + +static int run_simple_test(int is_get_char, int chr) +{ + int ret = 0; + if (is_get_char) { + /* Send an ACK on the get if a prior put completed and set the + * send ack variable + */ + if (send_ack) { + send_ack = 0; + return '+'; + } + /* On the first get char, fill the transmit buffer and then + * take from the get_string. + */ + if (get_buf_cnt == 0) { + if (ts.tst[ts.idx].get_handler) + ts.tst[ts.idx].get_handler(ts.tst[ts.idx].get); + else + fill_get_buf(ts.tst[ts.idx].get); + } + + if (get_buf[get_buf_cnt] == '\0') { + eprintk("kgdbts: ERROR GET: EOB on '%s' at %i\n", + ts.name, ts.idx); + get_buf_cnt = 0; + fill_get_buf("D"); + } + ret = get_buf[get_buf_cnt]; + get_buf_cnt++; + return ret; + } + + /* This callback is a put char which is when kgdb sends data to + * this I/O module. + */ + if (ts.tst[ts.idx].get[0] == '\0' && ts.tst[ts.idx].put[0] == '\0' && + !ts.tst[ts.idx].get_handler) { + eprintk("kgdbts: ERROR: beyond end of test on" + " '%s' line %i\n", ts.name, ts.idx); + return 0; + } + + if (put_buf_cnt >= BUFMAX) { + eprintk("kgdbts: ERROR: put buffer overflow on" + " '%s' line %i\n", ts.name, ts.idx); + put_buf_cnt = 0; + return 0; + } + /* Ignore everything until the first valid packet start '$' */ + if (put_buf_cnt == 0 && chr != '$') + return 0; + + put_buf[put_buf_cnt] = chr; + put_buf_cnt++; + + /* End of packet == #XX so look for the '#' */ + if (put_buf_cnt > 3 && put_buf[put_buf_cnt - 3] == '#') { + if (put_buf_cnt >= BUFMAX) { + eprintk("kgdbts: ERROR: put buffer overflow on" + " '%s' line %i\n", ts.name, ts.idx); + put_buf_cnt = 0; + return 0; + } + put_buf[put_buf_cnt] = '\0'; + v2printk("put%i: %s\n", ts.idx, put_buf); + /* Trigger check here */ + if (ts.validate_put && ts.validate_put(put_buf)) { + eprintk("kgdbts: ERROR PUT: end of test " + "buffer on '%s' line %i expected %s got %s\n", + ts.name, ts.idx, ts.tst[ts.idx].put, put_buf); + } + ts.idx++; + put_buf_cnt = 0; + get_buf_cnt = 0; + send_ack = 1; + } + return 0; +} + +static void init_simple_test(void) +{ + memset(&ts, 0, sizeof(ts)); + ts.run_test = run_simple_test; + ts.validate_put = validate_simple_test; +} + +static void run_plant_and_detach_test(int is_early) +{ + char before[BREAK_INSTR_SIZE]; + char after[BREAK_INSTR_SIZE]; + + copy_from_kernel_nofault(before, (char *)kgdbts_break_test, + BREAK_INSTR_SIZE); + init_simple_test(); + ts.tst = plant_and_detach_test; + ts.name = "plant_and_detach_test"; + /* Activate test with initial breakpoint */ + if (!is_early) + kgdb_breakpoint(); + copy_from_kernel_nofault(after, (char *)kgdbts_break_test, + BREAK_INSTR_SIZE); + if (memcmp(before, after, BREAK_INSTR_SIZE)) { + printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n"); + panic("kgdb memory corruption"); + } + + /* complete the detach test */ + if (!is_early) + kgdbts_break_test(); +} + +static void run_breakpoint_test(int is_hw_breakpoint) +{ + test_complete = 0; + init_simple_test(); + if (is_hw_breakpoint) { + ts.tst = hw_breakpoint_test; + ts.name = "hw_breakpoint_test"; + } else { + ts.tst = sw_breakpoint_test; + ts.name = "sw_breakpoint_test"; + } + /* Activate test with initial breakpoint */ + kgdb_breakpoint(); + /* run code with the break point in it */ + kgdbts_break_test(); + kgdb_breakpoint(); + + if (test_complete) + return; + + eprintk("kgdbts: ERROR %s test failed\n", ts.name); + if (is_hw_breakpoint) + hwbreaks_ok = 0; +} + +static void run_hw_break_test(int is_write_test) +{ + test_complete = 0; + init_simple_test(); + if (is_write_test) { + ts.tst = hw_write_break_test; + ts.name = "hw_write_break_test"; + } else { + ts.tst = hw_access_break_test; + ts.name = "hw_access_break_test"; + } + /* Activate test with initial breakpoint */ + kgdb_breakpoint(); + hw_break_val_access(); + if (is_write_test) { + if (test_complete == 2) { + eprintk("kgdbts: ERROR %s broke on access\n", + ts.name); + hwbreaks_ok = 0; + } + hw_break_val_write(); + } + kgdb_breakpoint(); + + if (test_complete == 1) + return; + + eprintk("kgdbts: ERROR %s test failed\n", ts.name); + hwbreaks_ok = 0; +} + +static void run_nmi_sleep_test(int nmi_sleep) +{ + unsigned long flags; + + init_simple_test(); + ts.tst = nmi_sleep_test; + ts.name = "nmi_sleep_test"; + /* Activate test with initial breakpoint */ + kgdb_breakpoint(); + local_irq_save(flags); + mdelay(nmi_sleep*1000); + touch_nmi_watchdog(); + local_irq_restore(flags); + if (test_complete != 2) + eprintk("kgdbts: ERROR nmi_test did not hit nmi\n"); + kgdb_breakpoint(); + if (test_complete == 1) + return; + + eprintk("kgdbts: ERROR %s test failed\n", ts.name); +} + +static void run_bad_read_test(void) +{ + init_simple_test(); + ts.tst = bad_read_test; + ts.name = "bad_read_test"; + /* Activate test with initial breakpoint */ + kgdb_breakpoint(); +} + +static void run_kernel_clone_test(void) +{ + init_simple_test(); + ts.tst = do_kernel_clone_test; + ts.name = "do_kernel_clone_test"; + /* Activate test with initial breakpoint */ + kgdb_breakpoint(); +} + +static void run_sys_open_test(void) +{ + init_simple_test(); + ts.tst = sys_open_test; + ts.name = "sys_open_test"; + /* Activate test with initial breakpoint */ + kgdb_breakpoint(); +} + +static void run_singlestep_break_test(void) +{ + init_simple_test(); + ts.tst = singlestep_break_test; + ts.name = "singlestep_breakpoint_test"; + /* Activate test with initial breakpoint */ + kgdb_breakpoint(); + kgdbts_break_test(); + kgdbts_break_test(); +} + +static void kgdbts_run_tests(void) +{ + char *ptr; + int clone_test = 0; + int do_sys_open_test = 0; + int sstep_test = 1000; + int nmi_sleep = 0; + int i; + + verbose = 0; + if (strstr(config, "V1")) + verbose = 1; + if (strstr(config, "V2")) + verbose = 2; + + ptr = strchr(config, 'F'); + if (ptr) + clone_test = simple_strtol(ptr + 1, NULL, 10); + ptr = strchr(config, 'S'); + if (ptr) + do_sys_open_test = simple_strtol(ptr + 1, NULL, 10); + ptr = strchr(config, 'N'); + if (ptr) + nmi_sleep = simple_strtol(ptr+1, NULL, 10); + ptr = strchr(config, 'I'); + if (ptr) + sstep_test = simple_strtol(ptr+1, NULL, 10); + + /* All HW break point tests */ + if (arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT) { + hwbreaks_ok = 1; + v1printk("kgdbts:RUN hw breakpoint test\n"); + run_breakpoint_test(1); + v1printk("kgdbts:RUN hw write breakpoint test\n"); + run_hw_break_test(1); + v1printk("kgdbts:RUN access write breakpoint test\n"); + run_hw_break_test(0); + } + + /* required internal KGDB tests */ + v1printk("kgdbts:RUN plant and detach test\n"); + run_plant_and_detach_test(0); + v1printk("kgdbts:RUN sw breakpoint test\n"); + run_breakpoint_test(0); + v1printk("kgdbts:RUN bad memory access test\n"); + run_bad_read_test(); + v1printk("kgdbts:RUN singlestep test %i iterations\n", sstep_test); + for (i = 0; i < sstep_test; i++) { + run_singlestep_break_test(); + if (i % 100 == 0) + v1printk("kgdbts:RUN singlestep [%i/%i]\n", + i, sstep_test); + } + + /* ===Optional tests=== */ + + if (nmi_sleep) { + v1printk("kgdbts:RUN NMI sleep %i seconds test\n", nmi_sleep); + run_nmi_sleep_test(nmi_sleep); + } + + /* If the kernel_clone test is run it will be the last test that is + * executed because a kernel thread will be spawned at the very + * end to unregister the debug hooks. + */ + if (clone_test) { + repeat_test = clone_test; + printk(KERN_INFO "kgdbts:RUN kernel_clone for %i breakpoints\n", + repeat_test); + kthread_run(kgdbts_unreg_thread, NULL, "kgdbts_unreg"); + run_kernel_clone_test(); + return; + } + + /* If the sys_open test is run it will be the last test that is + * executed because a kernel thread will be spawned at the very + * end to unregister the debug hooks. + */ + if (do_sys_open_test) { + repeat_test = do_sys_open_test; + printk(KERN_INFO "kgdbts:RUN sys_open for %i breakpoints\n", + repeat_test); + kthread_run(kgdbts_unreg_thread, NULL, "kgdbts_unreg"); + run_sys_open_test(); + return; + } + /* Shutdown and unregister */ + kgdb_unregister_io_module(&kgdbts_io_ops); + configured = 0; +} + +static int kgdbts_option_setup(char *opt) +{ + if (strlen(opt) >= MAX_CONFIG_LEN) { + printk(KERN_ERR "kgdbts: config string too long\n"); + return 1; + } + strcpy(config, opt); + return 1; +} + +__setup("kgdbts=", kgdbts_option_setup); + +static int configure_kgdbts(void) +{ + int err = 0; + + if (!strlen(config) || isspace(config[0])) + goto noconfig; + + final_ack = 0; + run_plant_and_detach_test(1); + + err = kgdb_register_io_module(&kgdbts_io_ops); + if (err) { + configured = 0; + return err; + } + configured = 1; + kgdbts_run_tests(); + + return err; + +noconfig: + config[0] = 0; + configured = 0; + + return err; +} + +static int __init init_kgdbts(void) +{ + /* Already configured? */ + if (configured == 1) + return 0; + + return configure_kgdbts(); +} +device_initcall(init_kgdbts); + +static int kgdbts_get_char(void) +{ + int val = 0; + + if (ts.run_test) + val = ts.run_test(1, 0); + + return val; +} + +static void kgdbts_put_char(u8 chr) +{ + if (ts.run_test) + ts.run_test(0, chr); +} + +static int param_set_kgdbts_var(const char *kmessage, + const struct kernel_param *kp) +{ + size_t len = strlen(kmessage); + + if (len >= MAX_CONFIG_LEN) { + printk(KERN_ERR "kgdbts: config string too long\n"); + return -ENOSPC; + } + + /* Only copy in the string if the init function has not run yet */ + if (configured < 0) { + strcpy(config, kmessage); + return 0; + } + + if (configured == 1) { + printk(KERN_ERR "kgdbts: ERROR: Already configured and running.\n"); + return -EBUSY; + } + + strcpy(config, kmessage); + /* Chop out \n char as a result of echo */ + if (len && config[len - 1] == '\n') + config[len - 1] = '\0'; + + /* Go and configure with the new params. */ + return configure_kgdbts(); +} + +static void kgdbts_pre_exp_handler(void) +{ + /* Increment the module count when the debugger is active */ + if (!kgdb_connected) + try_module_get(THIS_MODULE); +} + +static void kgdbts_post_exp_handler(void) +{ + /* decrement the module count when the debugger detaches */ + if (!kgdb_connected) + module_put(THIS_MODULE); +} + +static struct kgdb_io kgdbts_io_ops = { + .name = "kgdbts", + .read_char = kgdbts_get_char, + .write_char = kgdbts_put_char, + .pre_exception = kgdbts_pre_exp_handler, + .post_exception = kgdbts_post_exp_handler, +}; + +/* + * not really modular, but the easiest way to keep compat with existing + * bootargs behaviour is to continue using module_param here. + */ +module_param_call(kgdbts, param_set_kgdbts_var, param_get_string, &kps, 0644); +MODULE_PARM_DESC(kgdbts, "<A|V1|V2>[F#|S#][N#]"); diff --git a/drivers/misc/lattice-ecp3-config.c b/drivers/misc/lattice-ecp3-config.c new file mode 100644 index 000000000..556bb7d70 --- /dev/null +++ b/drivers/misc/lattice-ecp3-config.c @@ -0,0 +1,244 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2012 Stefan Roese <sr@denx.de> + */ + +#include <linux/device.h> +#include <linux/firmware.h> +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/spi/spi.h> +#include <linux/platform_device.h> +#include <linux/delay.h> +#include <asm/unaligned.h> + +#define FIRMWARE_NAME "lattice-ecp3.bit" + +/* + * The JTAG ID's of the supported FPGA's. The ID is 32bit wide + * reversed as noted in the manual. + */ +#define ID_ECP3_17 0xc2088080 +#define ID_ECP3_35 0xc2048080 + +/* FPGA commands */ +#define FPGA_CMD_READ_ID 0x07 /* plus 24 bits */ +#define FPGA_CMD_READ_STATUS 0x09 /* plus 24 bits */ +#define FPGA_CMD_CLEAR 0x70 +#define FPGA_CMD_REFRESH 0x71 +#define FPGA_CMD_WRITE_EN 0x4a /* plus 2 bits */ +#define FPGA_CMD_WRITE_DIS 0x4f /* plus 8 bits */ +#define FPGA_CMD_WRITE_INC 0x41 /* plus 0 bits */ + +/* + * The status register is 32bit revered, DONE is bit 17 from the TN1222.pdf + * (LatticeECP3 Slave SPI Port User's Guide) + */ +#define FPGA_STATUS_DONE 0x00004000 +#define FPGA_STATUS_CLEARED 0x00010000 + +#define FPGA_CLEAR_TIMEOUT 5000 /* max. 5000ms for FPGA clear */ +#define FPGA_CLEAR_MSLEEP 10 +#define FPGA_CLEAR_LOOP_COUNT (FPGA_CLEAR_TIMEOUT / FPGA_CLEAR_MSLEEP) + +struct fpga_data { + struct completion fw_loaded; +}; + +struct ecp3_dev { + u32 jedec_id; + char *name; +}; + +static const struct ecp3_dev ecp3_dev[] = { + { + .jedec_id = ID_ECP3_17, + .name = "Lattice ECP3-17", + }, + { + .jedec_id = ID_ECP3_35, + .name = "Lattice ECP3-35", + }, +}; + +static void firmware_load(const struct firmware *fw, void *context) +{ + struct spi_device *spi = (struct spi_device *)context; + struct fpga_data *data = spi_get_drvdata(spi); + u8 *buffer; + u8 txbuf[8]; + u8 rxbuf[8]; + int rx_len = 8; + int i; + u32 jedec_id; + u32 status; + + if (fw == NULL) { + dev_err(&spi->dev, "Cannot load firmware, aborting\n"); + goto out; + } + + if (fw->size == 0) { + dev_err(&spi->dev, "Error: Firmware size is 0!\n"); + goto out; + } + + /* Fill dummy data (24 stuffing bits for commands) */ + txbuf[1] = 0x00; + txbuf[2] = 0x00; + txbuf[3] = 0x00; + + /* Trying to speak with the FPGA via SPI... */ + txbuf[0] = FPGA_CMD_READ_ID; + spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len); + jedec_id = get_unaligned_be32(&rxbuf[4]); + dev_dbg(&spi->dev, "FPGA JTAG ID=%08x\n", jedec_id); + + for (i = 0; i < ARRAY_SIZE(ecp3_dev); i++) { + if (jedec_id == ecp3_dev[i].jedec_id) + break; + } + if (i == ARRAY_SIZE(ecp3_dev)) { + dev_err(&spi->dev, + "Error: No supported FPGA detected (JEDEC_ID=%08x)!\n", + jedec_id); + goto out; + } + + dev_info(&spi->dev, "FPGA %s detected\n", ecp3_dev[i].name); + + txbuf[0] = FPGA_CMD_READ_STATUS; + spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len); + status = get_unaligned_be32(&rxbuf[4]); + dev_dbg(&spi->dev, "FPGA Status=%08x\n", status); + + buffer = kzalloc(fw->size + 8, GFP_KERNEL); + if (!buffer) { + dev_err(&spi->dev, "Error: Can't allocate memory!\n"); + goto out; + } + + /* + * Insert WRITE_INC command into stream (one SPI frame) + */ + buffer[0] = FPGA_CMD_WRITE_INC; + buffer[1] = 0xff; + buffer[2] = 0xff; + buffer[3] = 0xff; + memcpy(buffer + 4, fw->data, fw->size); + + txbuf[0] = FPGA_CMD_REFRESH; + spi_write(spi, txbuf, 4); + + txbuf[0] = FPGA_CMD_WRITE_EN; + spi_write(spi, txbuf, 4); + + txbuf[0] = FPGA_CMD_CLEAR; + spi_write(spi, txbuf, 4); + + /* + * Wait for FPGA memory to become cleared + */ + for (i = 0; i < FPGA_CLEAR_LOOP_COUNT; i++) { + txbuf[0] = FPGA_CMD_READ_STATUS; + spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len); + status = get_unaligned_be32(&rxbuf[4]); + if (status == FPGA_STATUS_CLEARED) + break; + + msleep(FPGA_CLEAR_MSLEEP); + } + + if (i == FPGA_CLEAR_LOOP_COUNT) { + dev_err(&spi->dev, + "Error: Timeout waiting for FPGA to clear (status=%08x)!\n", + status); + kfree(buffer); + goto out; + } + + dev_info(&spi->dev, "Configuring the FPGA...\n"); + spi_write(spi, buffer, fw->size + 8); + + txbuf[0] = FPGA_CMD_WRITE_DIS; + spi_write(spi, txbuf, 4); + + txbuf[0] = FPGA_CMD_READ_STATUS; + spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len); + status = get_unaligned_be32(&rxbuf[4]); + dev_dbg(&spi->dev, "FPGA Status=%08x\n", status); + + /* Check result */ + if (status & FPGA_STATUS_DONE) + dev_info(&spi->dev, "FPGA successfully configured!\n"); + else + dev_info(&spi->dev, "FPGA not configured (DONE not set)\n"); + + /* + * Don't forget to release the firmware again + */ + release_firmware(fw); + + kfree(buffer); +out: + complete(&data->fw_loaded); +} + +static int lattice_ecp3_probe(struct spi_device *spi) +{ + struct fpga_data *data; + int err; + + data = devm_kzalloc(&spi->dev, sizeof(*data), GFP_KERNEL); + if (!data) { + dev_err(&spi->dev, "Memory allocation for fpga_data failed\n"); + return -ENOMEM; + } + spi_set_drvdata(spi, data); + + init_completion(&data->fw_loaded); + err = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, + FIRMWARE_NAME, &spi->dev, + GFP_KERNEL, spi, firmware_load); + if (err) { + dev_err(&spi->dev, "Firmware loading failed with %d!\n", err); + return err; + } + + dev_info(&spi->dev, "FPGA bitstream configuration driver registered\n"); + + return 0; +} + +static int lattice_ecp3_remove(struct spi_device *spi) +{ + struct fpga_data *data = spi_get_drvdata(spi); + + wait_for_completion(&data->fw_loaded); + + return 0; +} + +static const struct spi_device_id lattice_ecp3_id[] = { + { "ecp3-17", 0 }, + { "ecp3-35", 0 }, + { } +}; +MODULE_DEVICE_TABLE(spi, lattice_ecp3_id); + +static struct spi_driver lattice_ecp3_driver = { + .driver = { + .name = "lattice-ecp3", + }, + .probe = lattice_ecp3_probe, + .remove = lattice_ecp3_remove, + .id_table = lattice_ecp3_id, +}; + +module_spi_driver(lattice_ecp3_driver); + +MODULE_AUTHOR("Stefan Roese <sr@denx.de>"); +MODULE_DESCRIPTION("Lattice ECP3 FPGA configuration via SPI"); +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE(FIRMWARE_NAME); diff --git a/drivers/misc/lis3lv02d/Kconfig b/drivers/misc/lis3lv02d/Kconfig new file mode 100644 index 000000000..bb2fec4b5 --- /dev/null +++ b/drivers/misc/lis3lv02d/Kconfig @@ -0,0 +1,36 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# STMicroelectonics LIS3LV02D and similar accelerometers +# + +config SENSORS_LIS3_SPI + tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer (SPI)" + depends on !ACPI && SPI_MASTER && INPUT + select SENSORS_LIS3LV02D + help + This driver provides support for the LIS3LV02Dx accelerometer connected + via SPI. The accelerometer data is readable via + /sys/devices/platform/lis3lv02d. + + This driver also provides an absolute input class device, allowing + the laptop to act as a pinball machine-esque joystick. + + This driver can also be built as modules. If so, the core module + will be called lis3lv02d and a specific module for the SPI transport + is called lis3lv02d_spi. + +config SENSORS_LIS3_I2C + tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer (I2C)" + depends on I2C && INPUT + select SENSORS_LIS3LV02D + help + This driver provides support for the LIS3LV02Dx accelerometer connected + via I2C. The accelerometer data is readable via + /sys/devices/platform/lis3lv02d. + + This driver also provides an absolute input class device, allowing + the device to act as a pinball machine-esque joystick. + + This driver can also be built as modules. If so, the core module + will be called lis3lv02d and a specific module for the I2C transport + is called lis3lv02d_i2c. diff --git a/drivers/misc/lis3lv02d/Makefile b/drivers/misc/lis3lv02d/Makefile new file mode 100644 index 000000000..137e7020c --- /dev/null +++ b/drivers/misc/lis3lv02d/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# STMicroelectonics LIS3LV02D and similar accelerometers +# + +obj-$(CONFIG_SENSORS_LIS3LV02D) += lis3lv02d.o +obj-$(CONFIG_SENSORS_LIS3_SPI) += lis3lv02d_spi.o +obj-$(CONFIG_SENSORS_LIS3_I2C) += lis3lv02d_i2c.o diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c new file mode 100644 index 000000000..9d14bf444 --- /dev/null +++ b/drivers/misc/lis3lv02d/lis3lv02d.c @@ -0,0 +1,1269 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * lis3lv02d.c - ST LIS3LV02DL accelerometer driver + * + * Copyright (C) 2007-2008 Yan Burman + * Copyright (C) 2008 Eric Piel + * Copyright (C) 2008-2009 Pavel Machek + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/kernel.h> +#include <linux/sched/signal.h> +#include <linux/dmi.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/platform_device.h> +#include <linux/interrupt.h> +#include <linux/input.h> +#include <linux/delay.h> +#include <linux/wait.h> +#include <linux/poll.h> +#include <linux/slab.h> +#include <linux/freezer.h> +#include <linux/uaccess.h> +#include <linux/miscdevice.h> +#include <linux/pm_runtime.h> +#include <linux/atomic.h> +#include <linux/of_device.h> +#include "lis3lv02d.h" + +#define DRIVER_NAME "lis3lv02d" + +/* joystick device poll interval in milliseconds */ +#define MDPS_POLL_INTERVAL 50 +#define MDPS_POLL_MIN 0 +#define MDPS_POLL_MAX 2000 + +#define LIS3_SYSFS_POWERDOWN_DELAY 5000 /* In milliseconds */ + +#define SELFTEST_OK 0 +#define SELFTEST_FAIL -1 +#define SELFTEST_IRQ -2 + +#define IRQ_LINE0 0 +#define IRQ_LINE1 1 + +/* + * The sensor can also generate interrupts (DRDY) but it's pretty pointless + * because they are generated even if the data do not change. So it's better + * to keep the interrupt for the free-fall event. The values are updated at + * 40Hz (at the lowest frequency), but as it can be pretty time consuming on + * some low processor, we poll the sensor only at 20Hz... enough for the + * joystick. + */ + +#define LIS3_PWRON_DELAY_WAI_12B (5000) +#define LIS3_PWRON_DELAY_WAI_8B (3000) + +/* + * LIS3LV02D spec says 1024 LSBs corresponds 1 G -> 1LSB is 1000/1024 mG + * LIS302D spec says: 18 mG / digit + * LIS3_ACCURACY is used to increase accuracy of the intermediate + * calculation results. + */ +#define LIS3_ACCURACY 1024 +/* Sensitivity values for -2G +2G scale */ +#define LIS3_SENSITIVITY_12B ((LIS3_ACCURACY * 1000) / 1024) +#define LIS3_SENSITIVITY_8B (18 * LIS3_ACCURACY) + +/* + * LIS331DLH spec says 1LSBs corresponds 4G/4096 -> 1LSB is 1000/1024 mG. + * Below macros defines sensitivity values for +/-2G. Dataout bits for + * +/-2G range is 12 bits so 4 bits adjustment must be done to get 12bit + * data from 16bit value. Currently this driver supports only 2G range. + */ +#define LIS3DLH_SENSITIVITY_2G ((LIS3_ACCURACY * 1000) / 1024) +#define SHIFT_ADJ_2G 4 + +#define LIS3_DEFAULT_FUZZ_12B 3 +#define LIS3_DEFAULT_FLAT_12B 3 +#define LIS3_DEFAULT_FUZZ_8B 1 +#define LIS3_DEFAULT_FLAT_8B 1 + +struct lis3lv02d lis3_dev = { + .misc_wait = __WAIT_QUEUE_HEAD_INITIALIZER(lis3_dev.misc_wait), +}; +EXPORT_SYMBOL_GPL(lis3_dev); + +/* just like param_set_int() but does sanity-check so that it won't point + * over the axis array size + */ +static int param_set_axis(const char *val, const struct kernel_param *kp) +{ + int ret = param_set_int(val, kp); + if (!ret) { + int val = *(int *)kp->arg; + if (val < 0) + val = -val; + if (!val || val > 3) + return -EINVAL; + } + return ret; +} + +static const struct kernel_param_ops param_ops_axis = { + .set = param_set_axis, + .get = param_get_int, +}; + +#define param_check_axis(name, p) param_check_int(name, p) + +module_param_array_named(axes, lis3_dev.ac.as_array, axis, NULL, 0644); +MODULE_PARM_DESC(axes, "Axis-mapping for x,y,z directions"); + +static s16 lis3lv02d_read_8(struct lis3lv02d *lis3, int reg) +{ + s8 lo; + if (lis3->read(lis3, reg, &lo) < 0) + return 0; + + return lo; +} + +static s16 lis3lv02d_read_12(struct lis3lv02d *lis3, int reg) +{ + u8 lo, hi; + + lis3->read(lis3, reg - 1, &lo); + lis3->read(lis3, reg, &hi); + /* In "12 bit right justified" mode, bit 6, bit 7, bit 8 = bit 5 */ + return (s16)((hi << 8) | lo); +} + +/* 12bits for 2G range, 13 bits for 4G range and 14 bits for 8G range */ +static s16 lis331dlh_read_data(struct lis3lv02d *lis3, int reg) +{ + u8 lo, hi; + int v; + + lis3->read(lis3, reg - 1, &lo); + lis3->read(lis3, reg, &hi); + v = (int) ((hi << 8) | lo); + + return (s16) v >> lis3->shift_adj; +} + +/** + * lis3lv02d_get_axis - For the given axis, give the value converted + * @axis: 1,2,3 - can also be negative + * @hw_values: raw values returned by the hardware + * + * Returns the converted value. + */ +static inline int lis3lv02d_get_axis(s8 axis, int hw_values[3]) +{ + if (axis > 0) + return hw_values[axis - 1]; + else + return -hw_values[-axis - 1]; +} + +/** + * lis3lv02d_get_xyz - Get X, Y and Z axis values from the accelerometer + * @lis3: pointer to the device struct + * @x: where to store the X axis value + * @y: where to store the Y axis value + * @z: where to store the Z axis value + * + * Note that 40Hz input device can eat up about 10% CPU at 800MHZ + */ +static void lis3lv02d_get_xyz(struct lis3lv02d *lis3, int *x, int *y, int *z) +{ + int position[3]; + int i; + + if (lis3->blkread) { + if (lis3->whoami == WAI_12B) { + u16 data[3]; + lis3->blkread(lis3, OUTX_L, 6, (u8 *)data); + for (i = 0; i < 3; i++) + position[i] = (s16)le16_to_cpu(data[i]); + } else { + u8 data[5]; + /* Data: x, dummy, y, dummy, z */ + lis3->blkread(lis3, OUTX, 5, data); + for (i = 0; i < 3; i++) + position[i] = (s8)data[i * 2]; + } + } else { + position[0] = lis3->read_data(lis3, OUTX); + position[1] = lis3->read_data(lis3, OUTY); + position[2] = lis3->read_data(lis3, OUTZ); + } + + for (i = 0; i < 3; i++) + position[i] = (position[i] * lis3->scale) / LIS3_ACCURACY; + + *x = lis3lv02d_get_axis(lis3->ac.x, position); + *y = lis3lv02d_get_axis(lis3->ac.y, position); + *z = lis3lv02d_get_axis(lis3->ac.z, position); +} + +/* conversion btw sampling rate and the register values */ +static int lis3_12_rates[4] = {40, 160, 640, 2560}; +static int lis3_8_rates[2] = {100, 400}; +static int lis3_3dc_rates[16] = {0, 1, 10, 25, 50, 100, 200, 400, 1600, 5000}; +static int lis3_3dlh_rates[4] = {50, 100, 400, 1000}; + +/* ODR is Output Data Rate */ +static int lis3lv02d_get_odr_index(struct lis3lv02d *lis3) +{ + u8 ctrl; + int shift; + + lis3->read(lis3, CTRL_REG1, &ctrl); + ctrl &= lis3->odr_mask; + shift = ffs(lis3->odr_mask) - 1; + return (ctrl >> shift); +} + +static int lis3lv02d_get_pwron_wait(struct lis3lv02d *lis3) +{ + int odr_idx = lis3lv02d_get_odr_index(lis3); + int div = lis3->odrs[odr_idx]; + + if (div == 0) { + if (odr_idx == 0) { + /* Power-down mode, not sampling no need to sleep */ + return 0; + } + + dev_err(&lis3->pdev->dev, "Error unknown odrs-index: %d\n", odr_idx); + return -ENXIO; + } + + /* LIS3 power on delay is quite long */ + msleep(lis3->pwron_delay / div); + return 0; +} + +static int lis3lv02d_set_odr(struct lis3lv02d *lis3, int rate) +{ + u8 ctrl; + int i, len, shift; + + if (!rate) + return -EINVAL; + + lis3->read(lis3, CTRL_REG1, &ctrl); + ctrl &= ~lis3->odr_mask; + len = 1 << hweight_long(lis3->odr_mask); /* # of possible values */ + shift = ffs(lis3->odr_mask) - 1; + + for (i = 0; i < len; i++) + if (lis3->odrs[i] == rate) { + lis3->write(lis3, CTRL_REG1, + ctrl | (i << shift)); + return 0; + } + return -EINVAL; +} + +static int lis3lv02d_selftest(struct lis3lv02d *lis3, s16 results[3]) +{ + u8 ctlreg, reg; + s16 x, y, z; + u8 selftest; + int ret; + u8 ctrl_reg_data; + unsigned char irq_cfg; + + mutex_lock(&lis3->mutex); + + irq_cfg = lis3->irq_cfg; + if (lis3->whoami == WAI_8B) { + lis3->data_ready_count[IRQ_LINE0] = 0; + lis3->data_ready_count[IRQ_LINE1] = 0; + + /* Change interrupt cfg to data ready for selftest */ + atomic_inc(&lis3->wake_thread); + lis3->irq_cfg = LIS3_IRQ1_DATA_READY | LIS3_IRQ2_DATA_READY; + lis3->read(lis3, CTRL_REG3, &ctrl_reg_data); + lis3->write(lis3, CTRL_REG3, (ctrl_reg_data & + ~(LIS3_IRQ1_MASK | LIS3_IRQ2_MASK)) | + (LIS3_IRQ1_DATA_READY | LIS3_IRQ2_DATA_READY)); + } + + if ((lis3->whoami == WAI_3DC) || (lis3->whoami == WAI_3DLH)) { + ctlreg = CTRL_REG4; + selftest = CTRL4_ST0; + } else { + ctlreg = CTRL_REG1; + if (lis3->whoami == WAI_12B) + selftest = CTRL1_ST; + else + selftest = CTRL1_STP; + } + + lis3->read(lis3, ctlreg, ®); + lis3->write(lis3, ctlreg, (reg | selftest)); + ret = lis3lv02d_get_pwron_wait(lis3); + if (ret) + goto fail; + + /* Read directly to avoid axis remap */ + x = lis3->read_data(lis3, OUTX); + y = lis3->read_data(lis3, OUTY); + z = lis3->read_data(lis3, OUTZ); + + /* back to normal settings */ + lis3->write(lis3, ctlreg, reg); + ret = lis3lv02d_get_pwron_wait(lis3); + if (ret) + goto fail; + + results[0] = x - lis3->read_data(lis3, OUTX); + results[1] = y - lis3->read_data(lis3, OUTY); + results[2] = z - lis3->read_data(lis3, OUTZ); + + ret = 0; + + if (lis3->whoami == WAI_8B) { + /* Restore original interrupt configuration */ + atomic_dec(&lis3->wake_thread); + lis3->write(lis3, CTRL_REG3, ctrl_reg_data); + lis3->irq_cfg = irq_cfg; + + if ((irq_cfg & LIS3_IRQ1_MASK) && + lis3->data_ready_count[IRQ_LINE0] < 2) { + ret = SELFTEST_IRQ; + goto fail; + } + + if ((irq_cfg & LIS3_IRQ2_MASK) && + lis3->data_ready_count[IRQ_LINE1] < 2) { + ret = SELFTEST_IRQ; + goto fail; + } + } + + if (lis3->pdata) { + int i; + for (i = 0; i < 3; i++) { + /* Check against selftest acceptance limits */ + if ((results[i] < lis3->pdata->st_min_limits[i]) || + (results[i] > lis3->pdata->st_max_limits[i])) { + ret = SELFTEST_FAIL; + goto fail; + } + } + } + + /* test passed */ +fail: + mutex_unlock(&lis3->mutex); + return ret; +} + +/* + * Order of registers in the list affects to order of the restore process. + * Perhaps it is a good idea to set interrupt enable register as a last one + * after all other configurations + */ +static u8 lis3_wai8_regs[] = { FF_WU_CFG_1, FF_WU_THS_1, FF_WU_DURATION_1, + FF_WU_CFG_2, FF_WU_THS_2, FF_WU_DURATION_2, + CLICK_CFG, CLICK_SRC, CLICK_THSY_X, CLICK_THSZ, + CLICK_TIMELIMIT, CLICK_LATENCY, CLICK_WINDOW, + CTRL_REG1, CTRL_REG2, CTRL_REG3}; + +static u8 lis3_wai12_regs[] = {FF_WU_CFG, FF_WU_THS_L, FF_WU_THS_H, + FF_WU_DURATION, DD_CFG, DD_THSI_L, DD_THSI_H, + DD_THSE_L, DD_THSE_H, + CTRL_REG1, CTRL_REG3, CTRL_REG2}; + +static inline void lis3_context_save(struct lis3lv02d *lis3) +{ + int i; + for (i = 0; i < lis3->regs_size; i++) + lis3->read(lis3, lis3->regs[i], &lis3->reg_cache[i]); + lis3->regs_stored = true; +} + +static inline void lis3_context_restore(struct lis3lv02d *lis3) +{ + int i; + if (lis3->regs_stored) + for (i = 0; i < lis3->regs_size; i++) + lis3->write(lis3, lis3->regs[i], lis3->reg_cache[i]); +} + +void lis3lv02d_poweroff(struct lis3lv02d *lis3) +{ + if (lis3->reg_ctrl) + lis3_context_save(lis3); + /* disable X,Y,Z axis and power down */ + lis3->write(lis3, CTRL_REG1, 0x00); + if (lis3->reg_ctrl) + lis3->reg_ctrl(lis3, LIS3_REG_OFF); +} +EXPORT_SYMBOL_GPL(lis3lv02d_poweroff); + +int lis3lv02d_poweron(struct lis3lv02d *lis3) +{ + int err; + u8 reg; + + lis3->init(lis3); + + /* + * Common configuration + * BDU: (12 bits sensors only) LSB and MSB values are not updated until + * both have been read. So the value read will always be correct. + * Set BOOT bit to refresh factory tuning values. + */ + if (lis3->pdata) { + lis3->read(lis3, CTRL_REG2, ®); + if (lis3->whoami == WAI_12B) + reg |= CTRL2_BDU | CTRL2_BOOT; + else if (lis3->whoami == WAI_3DLH) + reg |= CTRL2_BOOT_3DLH; + else + reg |= CTRL2_BOOT_8B; + lis3->write(lis3, CTRL_REG2, reg); + + if (lis3->whoami == WAI_3DLH) { + lis3->read(lis3, CTRL_REG4, ®); + reg |= CTRL4_BDU; + lis3->write(lis3, CTRL_REG4, reg); + } + } + + err = lis3lv02d_get_pwron_wait(lis3); + if (err) + return err; + + if (lis3->reg_ctrl) + lis3_context_restore(lis3); + + return 0; +} +EXPORT_SYMBOL_GPL(lis3lv02d_poweron); + + +static void lis3lv02d_joystick_poll(struct input_dev *input) +{ + struct lis3lv02d *lis3 = input_get_drvdata(input); + int x, y, z; + + mutex_lock(&lis3->mutex); + lis3lv02d_get_xyz(lis3, &x, &y, &z); + input_report_abs(input, ABS_X, x); + input_report_abs(input, ABS_Y, y); + input_report_abs(input, ABS_Z, z); + input_sync(input); + mutex_unlock(&lis3->mutex); +} + +static int lis3lv02d_joystick_open(struct input_dev *input) +{ + struct lis3lv02d *lis3 = input_get_drvdata(input); + + if (lis3->pm_dev) + pm_runtime_get_sync(lis3->pm_dev); + + if (lis3->pdata && lis3->whoami == WAI_8B && lis3->idev) + atomic_set(&lis3->wake_thread, 1); + /* + * Update coordinates for the case where poll interval is 0 and + * the chip in running purely under interrupt control + */ + lis3lv02d_joystick_poll(input); + + return 0; +} + +static void lis3lv02d_joystick_close(struct input_dev *input) +{ + struct lis3lv02d *lis3 = input_get_drvdata(input); + + atomic_set(&lis3->wake_thread, 0); + if (lis3->pm_dev) + pm_runtime_put(lis3->pm_dev); +} + +static irqreturn_t lis302dl_interrupt(int irq, void *data) +{ + struct lis3lv02d *lis3 = data; + + if (!test_bit(0, &lis3->misc_opened)) + goto out; + + /* + * Be careful: on some HP laptops the bios force DD when on battery and + * the lid is closed. This leads to interrupts as soon as a little move + * is done. + */ + atomic_inc(&lis3->count); + + wake_up_interruptible(&lis3->misc_wait); + kill_fasync(&lis3->async_queue, SIGIO, POLL_IN); +out: + if (atomic_read(&lis3->wake_thread)) + return IRQ_WAKE_THREAD; + return IRQ_HANDLED; +} + +static void lis302dl_interrupt_handle_click(struct lis3lv02d *lis3) +{ + struct input_dev *dev = lis3->idev; + u8 click_src; + + mutex_lock(&lis3->mutex); + lis3->read(lis3, CLICK_SRC, &click_src); + + if (click_src & CLICK_SINGLE_X) { + input_report_key(dev, lis3->mapped_btns[0], 1); + input_report_key(dev, lis3->mapped_btns[0], 0); + } + + if (click_src & CLICK_SINGLE_Y) { + input_report_key(dev, lis3->mapped_btns[1], 1); + input_report_key(dev, lis3->mapped_btns[1], 0); + } + + if (click_src & CLICK_SINGLE_Z) { + input_report_key(dev, lis3->mapped_btns[2], 1); + input_report_key(dev, lis3->mapped_btns[2], 0); + } + input_sync(dev); + mutex_unlock(&lis3->mutex); +} + +static inline void lis302dl_data_ready(struct lis3lv02d *lis3, int index) +{ + int dummy; + + /* Dummy read to ack interrupt */ + lis3lv02d_get_xyz(lis3, &dummy, &dummy, &dummy); + lis3->data_ready_count[index]++; +} + +static irqreturn_t lis302dl_interrupt_thread1_8b(int irq, void *data) +{ + struct lis3lv02d *lis3 = data; + u8 irq_cfg = lis3->irq_cfg & LIS3_IRQ1_MASK; + + if (irq_cfg == LIS3_IRQ1_CLICK) + lis302dl_interrupt_handle_click(lis3); + else if (unlikely(irq_cfg == LIS3_IRQ1_DATA_READY)) + lis302dl_data_ready(lis3, IRQ_LINE0); + else + lis3lv02d_joystick_poll(lis3->idev); + + return IRQ_HANDLED; +} + +static irqreturn_t lis302dl_interrupt_thread2_8b(int irq, void *data) +{ + struct lis3lv02d *lis3 = data; + u8 irq_cfg = lis3->irq_cfg & LIS3_IRQ2_MASK; + + if (irq_cfg == LIS3_IRQ2_CLICK) + lis302dl_interrupt_handle_click(lis3); + else if (unlikely(irq_cfg == LIS3_IRQ2_DATA_READY)) + lis302dl_data_ready(lis3, IRQ_LINE1); + else + lis3lv02d_joystick_poll(lis3->idev); + + return IRQ_HANDLED; +} + +static int lis3lv02d_misc_open(struct inode *inode, struct file *file) +{ + struct lis3lv02d *lis3 = container_of(file->private_data, + struct lis3lv02d, miscdev); + + if (test_and_set_bit(0, &lis3->misc_opened)) + return -EBUSY; /* already open */ + + if (lis3->pm_dev) + pm_runtime_get_sync(lis3->pm_dev); + + atomic_set(&lis3->count, 0); + return 0; +} + +static int lis3lv02d_misc_release(struct inode *inode, struct file *file) +{ + struct lis3lv02d *lis3 = container_of(file->private_data, + struct lis3lv02d, miscdev); + + clear_bit(0, &lis3->misc_opened); /* release the device */ + if (lis3->pm_dev) + pm_runtime_put(lis3->pm_dev); + return 0; +} + +static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf, + size_t count, loff_t *pos) +{ + struct lis3lv02d *lis3 = container_of(file->private_data, + struct lis3lv02d, miscdev); + + DECLARE_WAITQUEUE(wait, current); + u32 data; + unsigned char byte_data; + ssize_t retval = 1; + + if (count < 1) + return -EINVAL; + + add_wait_queue(&lis3->misc_wait, &wait); + while (true) { + set_current_state(TASK_INTERRUPTIBLE); + data = atomic_xchg(&lis3->count, 0); + if (data) + break; + + if (file->f_flags & O_NONBLOCK) { + retval = -EAGAIN; + goto out; + } + + if (signal_pending(current)) { + retval = -ERESTARTSYS; + goto out; + } + + schedule(); + } + + if (data < 255) + byte_data = data; + else + byte_data = 255; + + /* make sure we are not going into copy_to_user() with + * TASK_INTERRUPTIBLE state */ + set_current_state(TASK_RUNNING); + if (copy_to_user(buf, &byte_data, sizeof(byte_data))) + retval = -EFAULT; + +out: + __set_current_state(TASK_RUNNING); + remove_wait_queue(&lis3->misc_wait, &wait); + + return retval; +} + +static __poll_t lis3lv02d_misc_poll(struct file *file, poll_table *wait) +{ + struct lis3lv02d *lis3 = container_of(file->private_data, + struct lis3lv02d, miscdev); + + poll_wait(file, &lis3->misc_wait, wait); + if (atomic_read(&lis3->count)) + return EPOLLIN | EPOLLRDNORM; + return 0; +} + +static int lis3lv02d_misc_fasync(int fd, struct file *file, int on) +{ + struct lis3lv02d *lis3 = container_of(file->private_data, + struct lis3lv02d, miscdev); + + return fasync_helper(fd, file, on, &lis3->async_queue); +} + +static const struct file_operations lis3lv02d_misc_fops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .read = lis3lv02d_misc_read, + .open = lis3lv02d_misc_open, + .release = lis3lv02d_misc_release, + .poll = lis3lv02d_misc_poll, + .fasync = lis3lv02d_misc_fasync, +}; + +int lis3lv02d_joystick_enable(struct lis3lv02d *lis3) +{ + struct input_dev *input_dev; + int err; + int max_val, fuzz, flat; + int btns[] = {BTN_X, BTN_Y, BTN_Z}; + + if (lis3->idev) + return -EINVAL; + + input_dev = input_allocate_device(); + if (!input_dev) + return -ENOMEM; + + input_dev->name = "ST LIS3LV02DL Accelerometer"; + input_dev->phys = DRIVER_NAME "/input0"; + input_dev->id.bustype = BUS_HOST; + input_dev->id.vendor = 0; + input_dev->dev.parent = &lis3->pdev->dev; + + input_dev->open = lis3lv02d_joystick_open; + input_dev->close = lis3lv02d_joystick_close; + + max_val = (lis3->mdps_max_val * lis3->scale) / LIS3_ACCURACY; + if (lis3->whoami == WAI_12B) { + fuzz = LIS3_DEFAULT_FUZZ_12B; + flat = LIS3_DEFAULT_FLAT_12B; + } else { + fuzz = LIS3_DEFAULT_FUZZ_8B; + flat = LIS3_DEFAULT_FLAT_8B; + } + fuzz = (fuzz * lis3->scale) / LIS3_ACCURACY; + flat = (flat * lis3->scale) / LIS3_ACCURACY; + + input_set_abs_params(input_dev, ABS_X, -max_val, max_val, fuzz, flat); + input_set_abs_params(input_dev, ABS_Y, -max_val, max_val, fuzz, flat); + input_set_abs_params(input_dev, ABS_Z, -max_val, max_val, fuzz, flat); + + input_set_drvdata(input_dev, lis3); + lis3->idev = input_dev; + + err = input_setup_polling(input_dev, lis3lv02d_joystick_poll); + if (err) + goto err_free_input; + + input_set_poll_interval(input_dev, MDPS_POLL_INTERVAL); + input_set_min_poll_interval(input_dev, MDPS_POLL_MIN); + input_set_max_poll_interval(input_dev, MDPS_POLL_MAX); + + lis3->mapped_btns[0] = lis3lv02d_get_axis(abs(lis3->ac.x), btns); + lis3->mapped_btns[1] = lis3lv02d_get_axis(abs(lis3->ac.y), btns); + lis3->mapped_btns[2] = lis3lv02d_get_axis(abs(lis3->ac.z), btns); + + err = input_register_device(lis3->idev); + if (err) + goto err_free_input; + + return 0; + +err_free_input: + input_free_device(input_dev); + lis3->idev = NULL; + return err; + +} +EXPORT_SYMBOL_GPL(lis3lv02d_joystick_enable); + +void lis3lv02d_joystick_disable(struct lis3lv02d *lis3) +{ + if (lis3->irq) + free_irq(lis3->irq, lis3); + if (lis3->pdata && lis3->pdata->irq2) + free_irq(lis3->pdata->irq2, lis3); + + if (!lis3->idev) + return; + + if (lis3->irq) + misc_deregister(&lis3->miscdev); + input_unregister_device(lis3->idev); + lis3->idev = NULL; +} +EXPORT_SYMBOL_GPL(lis3lv02d_joystick_disable); + +/* Sysfs stuff */ +static void lis3lv02d_sysfs_poweron(struct lis3lv02d *lis3) +{ + /* + * SYSFS functions are fast visitors so put-call + * immediately after the get-call. However, keep + * chip running for a while and schedule delayed + * suspend. This way periodic sysfs calls doesn't + * suffer from relatively long power up time. + */ + + if (lis3->pm_dev) { + pm_runtime_get_sync(lis3->pm_dev); + pm_runtime_put_noidle(lis3->pm_dev); + pm_schedule_suspend(lis3->pm_dev, LIS3_SYSFS_POWERDOWN_DELAY); + } +} + +static ssize_t lis3lv02d_selftest_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct lis3lv02d *lis3 = dev_get_drvdata(dev); + s16 values[3]; + + static const char ok[] = "OK"; + static const char fail[] = "FAIL"; + static const char irq[] = "FAIL_IRQ"; + const char *res; + + lis3lv02d_sysfs_poweron(lis3); + switch (lis3lv02d_selftest(lis3, values)) { + case SELFTEST_FAIL: + res = fail; + break; + case SELFTEST_IRQ: + res = irq; + break; + case SELFTEST_OK: + default: + res = ok; + break; + } + return sprintf(buf, "%s %d %d %d\n", res, + values[0], values[1], values[2]); +} + +static ssize_t lis3lv02d_position_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct lis3lv02d *lis3 = dev_get_drvdata(dev); + int x, y, z; + + lis3lv02d_sysfs_poweron(lis3); + mutex_lock(&lis3->mutex); + lis3lv02d_get_xyz(lis3, &x, &y, &z); + mutex_unlock(&lis3->mutex); + return sprintf(buf, "(%d,%d,%d)\n", x, y, z); +} + +static ssize_t lis3lv02d_rate_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct lis3lv02d *lis3 = dev_get_drvdata(dev); + int odr_idx; + + lis3lv02d_sysfs_poweron(lis3); + + odr_idx = lis3lv02d_get_odr_index(lis3); + return sprintf(buf, "%d\n", lis3->odrs[odr_idx]); +} + +static ssize_t lis3lv02d_rate_set(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct lis3lv02d *lis3 = dev_get_drvdata(dev); + unsigned long rate; + int ret; + + ret = kstrtoul(buf, 0, &rate); + if (ret) + return ret; + + lis3lv02d_sysfs_poweron(lis3); + if (lis3lv02d_set_odr(lis3, rate)) + return -EINVAL; + + return count; +} + +static DEVICE_ATTR(selftest, S_IRUSR, lis3lv02d_selftest_show, NULL); +static DEVICE_ATTR(position, S_IRUGO, lis3lv02d_position_show, NULL); +static DEVICE_ATTR(rate, S_IRUGO | S_IWUSR, lis3lv02d_rate_show, + lis3lv02d_rate_set); + +static struct attribute *lis3lv02d_attributes[] = { + &dev_attr_selftest.attr, + &dev_attr_position.attr, + &dev_attr_rate.attr, + NULL +}; + +static const struct attribute_group lis3lv02d_attribute_group = { + .attrs = lis3lv02d_attributes +}; + + +static int lis3lv02d_add_fs(struct lis3lv02d *lis3) +{ + lis3->pdev = platform_device_register_simple(DRIVER_NAME, -1, NULL, 0); + if (IS_ERR(lis3->pdev)) + return PTR_ERR(lis3->pdev); + + platform_set_drvdata(lis3->pdev, lis3); + return sysfs_create_group(&lis3->pdev->dev.kobj, &lis3lv02d_attribute_group); +} + +int lis3lv02d_remove_fs(struct lis3lv02d *lis3) +{ + sysfs_remove_group(&lis3->pdev->dev.kobj, &lis3lv02d_attribute_group); + platform_device_unregister(lis3->pdev); + if (lis3->pm_dev) { + /* Barrier after the sysfs remove */ + pm_runtime_barrier(lis3->pm_dev); + + /* SYSFS may have left chip running. Turn off if necessary */ + if (!pm_runtime_suspended(lis3->pm_dev)) + lis3lv02d_poweroff(lis3); + + pm_runtime_disable(lis3->pm_dev); + pm_runtime_set_suspended(lis3->pm_dev); + } + kfree(lis3->reg_cache); + return 0; +} +EXPORT_SYMBOL_GPL(lis3lv02d_remove_fs); + +static void lis3lv02d_8b_configure(struct lis3lv02d *lis3, + struct lis3lv02d_platform_data *p) +{ + int err; + int ctrl2 = p->hipass_ctrl; + + if (p->click_flags) { + lis3->write(lis3, CLICK_CFG, p->click_flags); + lis3->write(lis3, CLICK_TIMELIMIT, p->click_time_limit); + lis3->write(lis3, CLICK_LATENCY, p->click_latency); + lis3->write(lis3, CLICK_WINDOW, p->click_window); + lis3->write(lis3, CLICK_THSZ, p->click_thresh_z & 0xf); + lis3->write(lis3, CLICK_THSY_X, + (p->click_thresh_x & 0xf) | + (p->click_thresh_y << 4)); + + if (lis3->idev) { + input_set_capability(lis3->idev, EV_KEY, BTN_X); + input_set_capability(lis3->idev, EV_KEY, BTN_Y); + input_set_capability(lis3->idev, EV_KEY, BTN_Z); + } + } + + if (p->wakeup_flags) { + lis3->write(lis3, FF_WU_CFG_1, p->wakeup_flags); + lis3->write(lis3, FF_WU_THS_1, p->wakeup_thresh & 0x7f); + /* pdata value + 1 to keep this backward compatible*/ + lis3->write(lis3, FF_WU_DURATION_1, p->duration1 + 1); + ctrl2 ^= HP_FF_WU1; /* Xor to keep compatible with old pdata*/ + } + + if (p->wakeup_flags2) { + lis3->write(lis3, FF_WU_CFG_2, p->wakeup_flags2); + lis3->write(lis3, FF_WU_THS_2, p->wakeup_thresh2 & 0x7f); + /* pdata value + 1 to keep this backward compatible*/ + lis3->write(lis3, FF_WU_DURATION_2, p->duration2 + 1); + ctrl2 ^= HP_FF_WU2; /* Xor to keep compatible with old pdata*/ + } + /* Configure hipass filters */ + lis3->write(lis3, CTRL_REG2, ctrl2); + + if (p->irq2) { + err = request_threaded_irq(p->irq2, + NULL, + lis302dl_interrupt_thread2_8b, + IRQF_TRIGGER_RISING | IRQF_ONESHOT | + (p->irq_flags2 & IRQF_TRIGGER_MASK), + DRIVER_NAME, lis3); + if (err < 0) + pr_err("No second IRQ. Limited functionality\n"); + } +} + +#ifdef CONFIG_OF +int lis3lv02d_init_dt(struct lis3lv02d *lis3) +{ + struct lis3lv02d_platform_data *pdata; + struct device_node *np = lis3->of_node; + u32 val; + s32 sval; + + if (!lis3->of_node) + return 0; + + pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + if (of_get_property(np, "st,click-single-x", NULL)) + pdata->click_flags |= LIS3_CLICK_SINGLE_X; + if (of_get_property(np, "st,click-double-x", NULL)) + pdata->click_flags |= LIS3_CLICK_DOUBLE_X; + + if (of_get_property(np, "st,click-single-y", NULL)) + pdata->click_flags |= LIS3_CLICK_SINGLE_Y; + if (of_get_property(np, "st,click-double-y", NULL)) + pdata->click_flags |= LIS3_CLICK_DOUBLE_Y; + + if (of_get_property(np, "st,click-single-z", NULL)) + pdata->click_flags |= LIS3_CLICK_SINGLE_Z; + if (of_get_property(np, "st,click-double-z", NULL)) + pdata->click_flags |= LIS3_CLICK_DOUBLE_Z; + + if (!of_property_read_u32(np, "st,click-threshold-x", &val)) + pdata->click_thresh_x = val; + if (!of_property_read_u32(np, "st,click-threshold-y", &val)) + pdata->click_thresh_y = val; + if (!of_property_read_u32(np, "st,click-threshold-z", &val)) + pdata->click_thresh_z = val; + + if (!of_property_read_u32(np, "st,click-time-limit", &val)) + pdata->click_time_limit = val; + if (!of_property_read_u32(np, "st,click-latency", &val)) + pdata->click_latency = val; + if (!of_property_read_u32(np, "st,click-window", &val)) + pdata->click_window = val; + + if (of_get_property(np, "st,irq1-disable", NULL)) + pdata->irq_cfg |= LIS3_IRQ1_DISABLE; + if (of_get_property(np, "st,irq1-ff-wu-1", NULL)) + pdata->irq_cfg |= LIS3_IRQ1_FF_WU_1; + if (of_get_property(np, "st,irq1-ff-wu-2", NULL)) + pdata->irq_cfg |= LIS3_IRQ1_FF_WU_2; + if (of_get_property(np, "st,irq1-data-ready", NULL)) + pdata->irq_cfg |= LIS3_IRQ1_DATA_READY; + if (of_get_property(np, "st,irq1-click", NULL)) + pdata->irq_cfg |= LIS3_IRQ1_CLICK; + + if (of_get_property(np, "st,irq2-disable", NULL)) + pdata->irq_cfg |= LIS3_IRQ2_DISABLE; + if (of_get_property(np, "st,irq2-ff-wu-1", NULL)) + pdata->irq_cfg |= LIS3_IRQ2_FF_WU_1; + if (of_get_property(np, "st,irq2-ff-wu-2", NULL)) + pdata->irq_cfg |= LIS3_IRQ2_FF_WU_2; + if (of_get_property(np, "st,irq2-data-ready", NULL)) + pdata->irq_cfg |= LIS3_IRQ2_DATA_READY; + if (of_get_property(np, "st,irq2-click", NULL)) + pdata->irq_cfg |= LIS3_IRQ2_CLICK; + + if (of_get_property(np, "st,irq-open-drain", NULL)) + pdata->irq_cfg |= LIS3_IRQ_OPEN_DRAIN; + if (of_get_property(np, "st,irq-active-low", NULL)) + pdata->irq_cfg |= LIS3_IRQ_ACTIVE_LOW; + + if (!of_property_read_u32(np, "st,wu-duration-1", &val)) + pdata->duration1 = val; + if (!of_property_read_u32(np, "st,wu-duration-2", &val)) + pdata->duration2 = val; + + if (of_get_property(np, "st,wakeup-x-lo", NULL)) + pdata->wakeup_flags |= LIS3_WAKEUP_X_LO; + if (of_get_property(np, "st,wakeup-x-hi", NULL)) + pdata->wakeup_flags |= LIS3_WAKEUP_X_HI; + if (of_get_property(np, "st,wakeup-y-lo", NULL)) + pdata->wakeup_flags |= LIS3_WAKEUP_Y_LO; + if (of_get_property(np, "st,wakeup-y-hi", NULL)) + pdata->wakeup_flags |= LIS3_WAKEUP_Y_HI; + if (of_get_property(np, "st,wakeup-z-lo", NULL)) + pdata->wakeup_flags |= LIS3_WAKEUP_Z_LO; + if (of_get_property(np, "st,wakeup-z-hi", NULL)) + pdata->wakeup_flags |= LIS3_WAKEUP_Z_HI; + if (of_get_property(np, "st,wakeup-threshold", &val)) + pdata->wakeup_thresh = val; + + if (of_get_property(np, "st,wakeup2-x-lo", NULL)) + pdata->wakeup_flags2 |= LIS3_WAKEUP_X_LO; + if (of_get_property(np, "st,wakeup2-x-hi", NULL)) + pdata->wakeup_flags2 |= LIS3_WAKEUP_X_HI; + if (of_get_property(np, "st,wakeup2-y-lo", NULL)) + pdata->wakeup_flags2 |= LIS3_WAKEUP_Y_LO; + if (of_get_property(np, "st,wakeup2-y-hi", NULL)) + pdata->wakeup_flags2 |= LIS3_WAKEUP_Y_HI; + if (of_get_property(np, "st,wakeup2-z-lo", NULL)) + pdata->wakeup_flags2 |= LIS3_WAKEUP_Z_LO; + if (of_get_property(np, "st,wakeup2-z-hi", NULL)) + pdata->wakeup_flags2 |= LIS3_WAKEUP_Z_HI; + if (of_get_property(np, "st,wakeup2-threshold", &val)) + pdata->wakeup_thresh2 = val; + + if (!of_property_read_u32(np, "st,highpass-cutoff-hz", &val)) { + switch (val) { + case 1: + pdata->hipass_ctrl = LIS3_HIPASS_CUTFF_1HZ; + break; + case 2: + pdata->hipass_ctrl = LIS3_HIPASS_CUTFF_2HZ; + break; + case 4: + pdata->hipass_ctrl = LIS3_HIPASS_CUTFF_4HZ; + break; + case 8: + pdata->hipass_ctrl = LIS3_HIPASS_CUTFF_8HZ; + break; + } + } + + if (of_get_property(np, "st,hipass1-disable", NULL)) + pdata->hipass_ctrl |= LIS3_HIPASS1_DISABLE; + if (of_get_property(np, "st,hipass2-disable", NULL)) + pdata->hipass_ctrl |= LIS3_HIPASS2_DISABLE; + + if (of_property_read_s32(np, "st,axis-x", &sval) == 0) + pdata->axis_x = sval; + if (of_property_read_s32(np, "st,axis-y", &sval) == 0) + pdata->axis_y = sval; + if (of_property_read_s32(np, "st,axis-z", &sval) == 0) + pdata->axis_z = sval; + + if (of_get_property(np, "st,default-rate", NULL)) + pdata->default_rate = val; + + if (of_property_read_s32(np, "st,min-limit-x", &sval) == 0) + pdata->st_min_limits[0] = sval; + if (of_property_read_s32(np, "st,min-limit-y", &sval) == 0) + pdata->st_min_limits[1] = sval; + if (of_property_read_s32(np, "st,min-limit-z", &sval) == 0) + pdata->st_min_limits[2] = sval; + + if (of_property_read_s32(np, "st,max-limit-x", &sval) == 0) + pdata->st_max_limits[0] = sval; + if (of_property_read_s32(np, "st,max-limit-y", &sval) == 0) + pdata->st_max_limits[1] = sval; + if (of_property_read_s32(np, "st,max-limit-z", &sval) == 0) + pdata->st_max_limits[2] = sval; + + + lis3->pdata = pdata; + + return 0; +} + +#else +int lis3lv02d_init_dt(struct lis3lv02d *lis3) +{ + return 0; +} +#endif +EXPORT_SYMBOL_GPL(lis3lv02d_init_dt); + +/* + * Initialise the accelerometer and the various subsystems. + * Should be rather independent of the bus system. + */ +int lis3lv02d_init_device(struct lis3lv02d *lis3) +{ + int err; + irq_handler_t thread_fn; + int irq_flags = 0; + + lis3->whoami = lis3lv02d_read_8(lis3, WHO_AM_I); + + switch (lis3->whoami) { + case WAI_12B: + pr_info("12 bits sensor found\n"); + lis3->read_data = lis3lv02d_read_12; + lis3->mdps_max_val = 2048; + lis3->pwron_delay = LIS3_PWRON_DELAY_WAI_12B; + lis3->odrs = lis3_12_rates; + lis3->odr_mask = CTRL1_DF0 | CTRL1_DF1; + lis3->scale = LIS3_SENSITIVITY_12B; + lis3->regs = lis3_wai12_regs; + lis3->regs_size = ARRAY_SIZE(lis3_wai12_regs); + break; + case WAI_8B: + pr_info("8 bits sensor found\n"); + lis3->read_data = lis3lv02d_read_8; + lis3->mdps_max_val = 128; + lis3->pwron_delay = LIS3_PWRON_DELAY_WAI_8B; + lis3->odrs = lis3_8_rates; + lis3->odr_mask = CTRL1_DR; + lis3->scale = LIS3_SENSITIVITY_8B; + lis3->regs = lis3_wai8_regs; + lis3->regs_size = ARRAY_SIZE(lis3_wai8_regs); + break; + case WAI_3DC: + pr_info("8 bits 3DC sensor found\n"); + lis3->read_data = lis3lv02d_read_8; + lis3->mdps_max_val = 128; + lis3->pwron_delay = LIS3_PWRON_DELAY_WAI_8B; + lis3->odrs = lis3_3dc_rates; + lis3->odr_mask = CTRL1_ODR0|CTRL1_ODR1|CTRL1_ODR2|CTRL1_ODR3; + lis3->scale = LIS3_SENSITIVITY_8B; + break; + case WAI_3DLH: + pr_info("16 bits lis331dlh sensor found\n"); + lis3->read_data = lis331dlh_read_data; + lis3->mdps_max_val = 2048; /* 12 bits for 2G */ + lis3->shift_adj = SHIFT_ADJ_2G; + lis3->pwron_delay = LIS3_PWRON_DELAY_WAI_8B; + lis3->odrs = lis3_3dlh_rates; + lis3->odr_mask = CTRL1_DR0 | CTRL1_DR1; + lis3->scale = LIS3DLH_SENSITIVITY_2G; + break; + default: + pr_err("unknown sensor type 0x%X\n", lis3->whoami); + return -EINVAL; + } + + lis3->reg_cache = kzalloc(max(sizeof(lis3_wai8_regs), + sizeof(lis3_wai12_regs)), GFP_KERNEL); + + if (lis3->reg_cache == NULL) { + printk(KERN_ERR DRIVER_NAME "out of memory\n"); + return -ENOMEM; + } + + mutex_init(&lis3->mutex); + atomic_set(&lis3->wake_thread, 0); + + lis3lv02d_add_fs(lis3); + err = lis3lv02d_poweron(lis3); + if (err) { + lis3lv02d_remove_fs(lis3); + return err; + } + + if (lis3->pm_dev) { + pm_runtime_set_active(lis3->pm_dev); + pm_runtime_enable(lis3->pm_dev); + } + + if (lis3lv02d_joystick_enable(lis3)) + pr_err("joystick initialization failed\n"); + + /* passing in platform specific data is purely optional and only + * used by the SPI transport layer at the moment */ + if (lis3->pdata) { + struct lis3lv02d_platform_data *p = lis3->pdata; + + if (lis3->whoami == WAI_8B) + lis3lv02d_8b_configure(lis3, p); + + irq_flags = p->irq_flags1 & IRQF_TRIGGER_MASK; + + lis3->irq_cfg = p->irq_cfg; + if (p->irq_cfg) + lis3->write(lis3, CTRL_REG3, p->irq_cfg); + + if (p->default_rate) + lis3lv02d_set_odr(lis3, p->default_rate); + } + + /* bail if we did not get an IRQ from the bus layer */ + if (!lis3->irq) { + pr_debug("No IRQ. Disabling /dev/freefall\n"); + goto out; + } + + /* + * The sensor can generate interrupts for free-fall and direction + * detection (distinguishable with FF_WU_SRC and DD_SRC) but to keep + * the things simple and _fast_ we activate it only for free-fall, so + * no need to read register (very slow with ACPI). For the same reason, + * we forbid shared interrupts. + * + * IRQF_TRIGGER_RISING seems pointless on HP laptops because the + * io-apic is not configurable (and generates a warning) but I keep it + * in case of support for other hardware. + */ + if (lis3->pdata && lis3->whoami == WAI_8B) + thread_fn = lis302dl_interrupt_thread1_8b; + else + thread_fn = NULL; + + err = request_threaded_irq(lis3->irq, lis302dl_interrupt, + thread_fn, + IRQF_TRIGGER_RISING | IRQF_ONESHOT | + irq_flags, + DRIVER_NAME, lis3); + + if (err < 0) { + pr_err("Cannot get IRQ\n"); + goto out; + } + + lis3->miscdev.minor = MISC_DYNAMIC_MINOR; + lis3->miscdev.name = "freefall"; + lis3->miscdev.fops = &lis3lv02d_misc_fops; + + if (misc_register(&lis3->miscdev)) + pr_err("misc_register failed\n"); +out: + return 0; +} +EXPORT_SYMBOL_GPL(lis3lv02d_init_device); + +MODULE_DESCRIPTION("ST LIS3LV02Dx three-axis digital accelerometer driver"); +MODULE_AUTHOR("Yan Burman, Eric Piel, Pavel Machek"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h new file mode 100644 index 000000000..7ac788fae --- /dev/null +++ b/drivers/misc/lis3lv02d/lis3lv02d.h @@ -0,0 +1,319 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * lis3lv02d.h - ST LIS3LV02DL accelerometer driver + * + * Copyright (C) 2007-2008 Yan Burman + * Copyright (C) 2008-2009 Eric Piel + */ +#include <linux/platform_device.h> +#include <linux/input.h> +#include <linux/regulator/consumer.h> +#include <linux/miscdevice.h> + +/* + * This driver tries to support the "digital" accelerometer chips from + * STMicroelectronics such as LIS3LV02DL, LIS302DL, LIS3L02DQ, LIS331DL, + * LIS331DLH, LIS35DE, or LIS202DL. They are very similar in terms of + * programming, with almost the same registers. In addition to differing + * on physical properties, they differ on the number of axes (2/3), + * precision (8/12 bits), and special features (freefall detection, + * click...). Unfortunately, not all the differences can be probed via + * a register. They can be connected either via I²C or SPI. + */ + +#include <linux/lis3lv02d.h> + +enum lis3_reg { + WHO_AM_I = 0x0F, + OFFSET_X = 0x16, + OFFSET_Y = 0x17, + OFFSET_Z = 0x18, + GAIN_X = 0x19, + GAIN_Y = 0x1A, + GAIN_Z = 0x1B, + CTRL_REG1 = 0x20, + CTRL_REG2 = 0x21, + CTRL_REG3 = 0x22, + CTRL_REG4 = 0x23, + HP_FILTER_RESET = 0x23, + STATUS_REG = 0x27, + OUTX_L = 0x28, + OUTX_H = 0x29, + OUTX = 0x29, + OUTY_L = 0x2A, + OUTY_H = 0x2B, + OUTY = 0x2B, + OUTZ_L = 0x2C, + OUTZ_H = 0x2D, + OUTZ = 0x2D, +}; + +enum lis302d_reg { + FF_WU_CFG_1 = 0x30, + FF_WU_SRC_1 = 0x31, + FF_WU_THS_1 = 0x32, + FF_WU_DURATION_1 = 0x33, + FF_WU_CFG_2 = 0x34, + FF_WU_SRC_2 = 0x35, + FF_WU_THS_2 = 0x36, + FF_WU_DURATION_2 = 0x37, + CLICK_CFG = 0x38, + CLICK_SRC = 0x39, + CLICK_THSY_X = 0x3B, + CLICK_THSZ = 0x3C, + CLICK_TIMELIMIT = 0x3D, + CLICK_LATENCY = 0x3E, + CLICK_WINDOW = 0x3F, +}; + +enum lis3lv02d_reg { + FF_WU_CFG = 0x30, + FF_WU_SRC = 0x31, + FF_WU_ACK = 0x32, + FF_WU_THS_L = 0x34, + FF_WU_THS_H = 0x35, + FF_WU_DURATION = 0x36, + DD_CFG = 0x38, + DD_SRC = 0x39, + DD_ACK = 0x3A, + DD_THSI_L = 0x3C, + DD_THSI_H = 0x3D, + DD_THSE_L = 0x3E, + DD_THSE_H = 0x3F, +}; + +enum lis3_who_am_i { + WAI_3DLH = 0x32, /* 16 bits: LIS331DLH */ + WAI_3DC = 0x33, /* 8 bits: LIS3DC, HP3DC */ + WAI_12B = 0x3A, /* 12 bits: LIS3LV02D[LQ]... */ + WAI_8B = 0x3B, /* 8 bits: LIS[23]02D[LQ]... */ + WAI_6B = 0x52, /* 6 bits: LIS331DLF - not supported */ +}; + +enum lis3_type { + LIS3LV02D, + LIS3DC, + HP3DC, + LIS2302D, + LIS331DLF, + LIS331DLH, +}; + +enum lis3lv02d_ctrl1_12b { + CTRL1_Xen = 0x01, + CTRL1_Yen = 0x02, + CTRL1_Zen = 0x04, + CTRL1_ST = 0x08, + CTRL1_DF0 = 0x10, + CTRL1_DF1 = 0x20, + CTRL1_PD0 = 0x40, + CTRL1_PD1 = 0x80, +}; + +/* Delta to ctrl1_12b version */ +enum lis3lv02d_ctrl1_8b { + CTRL1_STM = 0x08, + CTRL1_STP = 0x10, + CTRL1_FS = 0x20, + CTRL1_PD = 0x40, + CTRL1_DR = 0x80, +}; + +enum lis3lv02d_ctrl1_3dc { + CTRL1_ODR0 = 0x10, + CTRL1_ODR1 = 0x20, + CTRL1_ODR2 = 0x40, + CTRL1_ODR3 = 0x80, +}; + +enum lis331dlh_ctrl1 { + CTRL1_DR0 = 0x08, + CTRL1_DR1 = 0x10, + CTRL1_PM0 = 0x20, + CTRL1_PM1 = 0x40, + CTRL1_PM2 = 0x80, +}; + +enum lis331dlh_ctrl2 { + CTRL2_HPEN1 = 0x04, + CTRL2_HPEN2 = 0x08, + CTRL2_FDS_3DLH = 0x10, + CTRL2_BOOT_3DLH = 0x80, +}; + +enum lis331dlh_ctrl4 { + CTRL4_STSIGN = 0x08, + CTRL4_BLE = 0x40, + CTRL4_BDU = 0x80, +}; + +enum lis3lv02d_ctrl2 { + CTRL2_DAS = 0x01, + CTRL2_SIM = 0x02, + CTRL2_DRDY = 0x04, + CTRL2_IEN = 0x08, + CTRL2_BOOT = 0x10, + CTRL2_BLE = 0x20, + CTRL2_BDU = 0x40, /* Block Data Update */ + CTRL2_FS = 0x80, /* Full Scale selection */ +}; + +enum lis3lv02d_ctrl4_3dc { + CTRL4_SIM = 0x01, + CTRL4_ST0 = 0x02, + CTRL4_ST1 = 0x04, + CTRL4_FS0 = 0x10, + CTRL4_FS1 = 0x20, +}; + +enum lis302d_ctrl2 { + HP_FF_WU2 = 0x08, + HP_FF_WU1 = 0x04, + CTRL2_BOOT_8B = 0x40, +}; + +enum lis3lv02d_ctrl3 { + CTRL3_CFS0 = 0x01, + CTRL3_CFS1 = 0x02, + CTRL3_FDS = 0x10, + CTRL3_HPFF = 0x20, + CTRL3_HPDD = 0x40, + CTRL3_ECK = 0x80, +}; + +enum lis3lv02d_status_reg { + STATUS_XDA = 0x01, + STATUS_YDA = 0x02, + STATUS_ZDA = 0x04, + STATUS_XYZDA = 0x08, + STATUS_XOR = 0x10, + STATUS_YOR = 0x20, + STATUS_ZOR = 0x40, + STATUS_XYZOR = 0x80, +}; + +enum lis3lv02d_ff_wu_cfg { + FF_WU_CFG_XLIE = 0x01, + FF_WU_CFG_XHIE = 0x02, + FF_WU_CFG_YLIE = 0x04, + FF_WU_CFG_YHIE = 0x08, + FF_WU_CFG_ZLIE = 0x10, + FF_WU_CFG_ZHIE = 0x20, + FF_WU_CFG_LIR = 0x40, + FF_WU_CFG_AOI = 0x80, +}; + +enum lis3lv02d_ff_wu_src { + FF_WU_SRC_XL = 0x01, + FF_WU_SRC_XH = 0x02, + FF_WU_SRC_YL = 0x04, + FF_WU_SRC_YH = 0x08, + FF_WU_SRC_ZL = 0x10, + FF_WU_SRC_ZH = 0x20, + FF_WU_SRC_IA = 0x40, +}; + +enum lis3lv02d_dd_cfg { + DD_CFG_XLIE = 0x01, + DD_CFG_XHIE = 0x02, + DD_CFG_YLIE = 0x04, + DD_CFG_YHIE = 0x08, + DD_CFG_ZLIE = 0x10, + DD_CFG_ZHIE = 0x20, + DD_CFG_LIR = 0x40, + DD_CFG_IEND = 0x80, +}; + +enum lis3lv02d_dd_src { + DD_SRC_XL = 0x01, + DD_SRC_XH = 0x02, + DD_SRC_YL = 0x04, + DD_SRC_YH = 0x08, + DD_SRC_ZL = 0x10, + DD_SRC_ZH = 0x20, + DD_SRC_IA = 0x40, +}; + +enum lis3lv02d_click_src_8b { + CLICK_SINGLE_X = 0x01, + CLICK_DOUBLE_X = 0x02, + CLICK_SINGLE_Y = 0x04, + CLICK_DOUBLE_Y = 0x08, + CLICK_SINGLE_Z = 0x10, + CLICK_DOUBLE_Z = 0x20, + CLICK_IA = 0x40, +}; + +enum lis3lv02d_reg_state { + LIS3_REG_OFF = 0x00, + LIS3_REG_ON = 0x01, +}; + +union axis_conversion { + struct { + int x, y, z; + }; + int as_array[3]; + +}; + +struct lis3lv02d { + void *bus_priv; /* used by the bus layer only */ + struct device *pm_dev; /* for pm_runtime purposes */ + int (*init) (struct lis3lv02d *lis3); + int (*write) (struct lis3lv02d *lis3, int reg, u8 val); + int (*read) (struct lis3lv02d *lis3, int reg, u8 *ret); + int (*blkread) (struct lis3lv02d *lis3, int reg, int len, u8 *ret); + int (*reg_ctrl) (struct lis3lv02d *lis3, bool state); + + int *odrs; /* Supported output data rates */ + u8 *regs; /* Regs to store / restore */ + int regs_size; + u8 *reg_cache; + bool regs_stored; + bool init_required; + u8 odr_mask; /* ODR bit mask */ + u8 whoami; /* indicates measurement precision */ + s16 (*read_data) (struct lis3lv02d *lis3, int reg); + int mdps_max_val; + int pwron_delay; + int scale; /* + * relationship between 1 LBS and mG + * (1/1000th of earth gravity) + */ + + struct input_dev *idev; /* input device */ + struct platform_device *pdev; /* platform device */ + struct regulator_bulk_data regulators[2]; + atomic_t count; /* interrupt count after last read */ + union axis_conversion ac; /* hw -> logical axis */ + int mapped_btns[3]; + + u32 irq; /* IRQ number */ + struct fasync_struct *async_queue; /* queue for the misc device */ + wait_queue_head_t misc_wait; /* Wait queue for the misc device */ + unsigned long misc_opened; /* bit0: whether the device is open */ + struct miscdevice miscdev; + + int data_ready_count[2]; + atomic_t wake_thread; + unsigned char irq_cfg; + unsigned int shift_adj; + + struct lis3lv02d_platform_data *pdata; /* for passing board config */ + struct mutex mutex; /* Serialize poll and selftest */ + +#ifdef CONFIG_OF + struct device_node *of_node; +#endif +}; + +int lis3lv02d_init_device(struct lis3lv02d *lis3); +int lis3lv02d_joystick_enable(struct lis3lv02d *lis3); +void lis3lv02d_joystick_disable(struct lis3lv02d *lis3); +void lis3lv02d_poweroff(struct lis3lv02d *lis3); +int lis3lv02d_poweron(struct lis3lv02d *lis3); +int lis3lv02d_remove_fs(struct lis3lv02d *lis3); +int lis3lv02d_init_dt(struct lis3lv02d *lis3); + +extern struct lis3lv02d lis3_dev; diff --git a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c new file mode 100644 index 000000000..52555d2e8 --- /dev/null +++ b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c @@ -0,0 +1,276 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * drivers/hwmon/lis3lv02d_i2c.c + * + * Implements I2C interface for lis3lv02d (STMicroelectronics) accelerometer. + * Driver is based on corresponding SPI driver written by Daniel Mack + * (lis3lv02d_spi.c (C) 2009 Daniel Mack <daniel@caiaq.de> ). + * + * Copyright (C) 2009 Nokia Corporation and/or its subsidiary(-ies). + * + * Contact: Samu Onkalo <samu.p.onkalo@nokia.com> + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/pm_runtime.h> +#include <linux/delay.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/of_device.h> + +#include "lis3lv02d.h" + +#define DRV_NAME "lis3lv02d_i2c" + +static const char reg_vdd[] = "Vdd"; +static const char reg_vdd_io[] = "Vdd_IO"; + +static int lis3_reg_ctrl(struct lis3lv02d *lis3, bool state) +{ + int ret; + if (state == LIS3_REG_OFF) { + ret = regulator_bulk_disable(ARRAY_SIZE(lis3->regulators), + lis3->regulators); + } else { + ret = regulator_bulk_enable(ARRAY_SIZE(lis3->regulators), + lis3->regulators); + /* Chip needs time to wakeup. Not mentioned in datasheet */ + usleep_range(10000, 20000); + } + return ret; +} + +static inline s32 lis3_i2c_write(struct lis3lv02d *lis3, int reg, u8 value) +{ + struct i2c_client *c = lis3->bus_priv; + return i2c_smbus_write_byte_data(c, reg, value); +} + +static inline s32 lis3_i2c_read(struct lis3lv02d *lis3, int reg, u8 *v) +{ + struct i2c_client *c = lis3->bus_priv; + *v = i2c_smbus_read_byte_data(c, reg); + return 0; +} + +static inline s32 lis3_i2c_blockread(struct lis3lv02d *lis3, int reg, int len, + u8 *v) +{ + struct i2c_client *c = lis3->bus_priv; + reg |= (1 << 7); /* 7th bit enables address auto incrementation */ + return i2c_smbus_read_i2c_block_data(c, reg, len, v); +} + +static int lis3_i2c_init(struct lis3lv02d *lis3) +{ + u8 reg; + int ret; + + lis3_reg_ctrl(lis3, LIS3_REG_ON); + + lis3->read(lis3, WHO_AM_I, ®); + if (reg != lis3->whoami) + printk(KERN_ERR "lis3: power on failure\n"); + + /* power up the device */ + ret = lis3->read(lis3, CTRL_REG1, ®); + if (ret < 0) + return ret; + + if (lis3->whoami == WAI_3DLH) + reg |= CTRL1_PM0 | CTRL1_Xen | CTRL1_Yen | CTRL1_Zen; + else + reg |= CTRL1_PD0 | CTRL1_Xen | CTRL1_Yen | CTRL1_Zen; + + return lis3->write(lis3, CTRL_REG1, reg); +} + +/* Default axis mapping but it can be overwritten by platform data */ +static union axis_conversion lis3lv02d_axis_map = + { .as_array = { LIS3_DEV_X, LIS3_DEV_Y, LIS3_DEV_Z } }; + +#ifdef CONFIG_OF +static const struct of_device_id lis3lv02d_i2c_dt_ids[] = { + { .compatible = "st,lis3lv02d" }, + {} +}; +MODULE_DEVICE_TABLE(of, lis3lv02d_i2c_dt_ids); +#endif + +static int lis3lv02d_i2c_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int ret = 0; + struct lis3lv02d_platform_data *pdata = client->dev.platform_data; + +#ifdef CONFIG_OF + if (of_match_device(lis3lv02d_i2c_dt_ids, &client->dev)) { + lis3_dev.of_node = client->dev.of_node; + ret = lis3lv02d_init_dt(&lis3_dev); + if (ret) + return ret; + pdata = lis3_dev.pdata; + } +#endif + + if (pdata) { + if ((pdata->driver_features & LIS3_USE_BLOCK_READ) && + (i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_I2C_BLOCK))) + lis3_dev.blkread = lis3_i2c_blockread; + + if (pdata->axis_x) + lis3lv02d_axis_map.x = pdata->axis_x; + + if (pdata->axis_y) + lis3lv02d_axis_map.y = pdata->axis_y; + + if (pdata->axis_z) + lis3lv02d_axis_map.z = pdata->axis_z; + + if (pdata->setup_resources) + ret = pdata->setup_resources(); + + if (ret) + goto fail; + } + + lis3_dev.regulators[0].supply = reg_vdd; + lis3_dev.regulators[1].supply = reg_vdd_io; + ret = regulator_bulk_get(&client->dev, + ARRAY_SIZE(lis3_dev.regulators), + lis3_dev.regulators); + if (ret < 0) + goto fail; + + lis3_dev.pdata = pdata; + lis3_dev.bus_priv = client; + lis3_dev.init = lis3_i2c_init; + lis3_dev.read = lis3_i2c_read; + lis3_dev.write = lis3_i2c_write; + lis3_dev.irq = client->irq; + lis3_dev.ac = lis3lv02d_axis_map; + lis3_dev.pm_dev = &client->dev; + + i2c_set_clientdata(client, &lis3_dev); + + /* Provide power over the init call */ + lis3_reg_ctrl(&lis3_dev, LIS3_REG_ON); + + ret = lis3lv02d_init_device(&lis3_dev); + + lis3_reg_ctrl(&lis3_dev, LIS3_REG_OFF); + + if (ret) + goto fail2; + return 0; + +fail2: + regulator_bulk_free(ARRAY_SIZE(lis3_dev.regulators), + lis3_dev.regulators); +fail: + if (pdata && pdata->release_resources) + pdata->release_resources(); + return ret; +} + +static int lis3lv02d_i2c_remove(struct i2c_client *client) +{ + struct lis3lv02d *lis3 = i2c_get_clientdata(client); + struct lis3lv02d_platform_data *pdata = client->dev.platform_data; + + if (pdata && pdata->release_resources) + pdata->release_resources(); + + lis3lv02d_joystick_disable(lis3); + lis3lv02d_remove_fs(&lis3_dev); + + regulator_bulk_free(ARRAY_SIZE(lis3->regulators), + lis3_dev.regulators); + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int lis3lv02d_i2c_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct lis3lv02d *lis3 = i2c_get_clientdata(client); + + if (!lis3->pdata || !lis3->pdata->wakeup_flags) + lis3lv02d_poweroff(lis3); + return 0; +} + +static int lis3lv02d_i2c_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct lis3lv02d *lis3 = i2c_get_clientdata(client); + + /* + * pm_runtime documentation says that devices should always + * be powered on at resume. Pm_runtime turns them off after system + * wide resume is complete. + */ + if (!lis3->pdata || !lis3->pdata->wakeup_flags || + pm_runtime_suspended(dev)) + lis3lv02d_poweron(lis3); + + return 0; +} +#endif /* CONFIG_PM_SLEEP */ + +#ifdef CONFIG_PM +static int lis3_i2c_runtime_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct lis3lv02d *lis3 = i2c_get_clientdata(client); + + lis3lv02d_poweroff(lis3); + return 0; +} + +static int lis3_i2c_runtime_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct lis3lv02d *lis3 = i2c_get_clientdata(client); + + lis3lv02d_poweron(lis3); + return 0; +} +#endif /* CONFIG_PM */ + +static const struct i2c_device_id lis3lv02d_id[] = { + {"lis3lv02d", LIS3LV02D}, + {"lis331dlh", LIS331DLH}, + {} +}; + +MODULE_DEVICE_TABLE(i2c, lis3lv02d_id); + +static const struct dev_pm_ops lis3_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(lis3lv02d_i2c_suspend, + lis3lv02d_i2c_resume) + SET_RUNTIME_PM_OPS(lis3_i2c_runtime_suspend, + lis3_i2c_runtime_resume, + NULL) +}; + +static struct i2c_driver lis3lv02d_i2c_driver = { + .driver = { + .name = DRV_NAME, + .pm = &lis3_pm_ops, + .of_match_table = of_match_ptr(lis3lv02d_i2c_dt_ids), + }, + .probe = lis3lv02d_i2c_probe, + .remove = lis3lv02d_i2c_remove, + .id_table = lis3lv02d_id, +}; + +module_i2c_driver(lis3lv02d_i2c_driver); + +MODULE_AUTHOR("Nokia Corporation"); +MODULE_DESCRIPTION("lis3lv02d I2C interface"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/lis3lv02d/lis3lv02d_spi.c b/drivers/misc/lis3lv02d/lis3lv02d_spi.c new file mode 100644 index 000000000..f664ed123 --- /dev/null +++ b/drivers/misc/lis3lv02d/lis3lv02d_spi.c @@ -0,0 +1,150 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * lis3lv02d_spi - SPI glue layer for lis3lv02d + * + * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de> + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/err.h> +#include <linux/input.h> +#include <linux/interrupt.h> +#include <linux/workqueue.h> +#include <linux/spi/spi.h> +#include <linux/pm.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/of_device.h> + +#include "lis3lv02d.h" + +#define DRV_NAME "lis3lv02d_spi" +#define LIS3_SPI_READ 0x80 + +static int lis3_spi_read(struct lis3lv02d *lis3, int reg, u8 *v) +{ + struct spi_device *spi = lis3->bus_priv; + int ret = spi_w8r8(spi, reg | LIS3_SPI_READ); + if (ret < 0) + return -EINVAL; + + *v = (u8) ret; + return 0; +} + +static int lis3_spi_write(struct lis3lv02d *lis3, int reg, u8 val) +{ + u8 tmp[2] = { reg, val }; + struct spi_device *spi = lis3->bus_priv; + return spi_write(spi, tmp, sizeof(tmp)); +} + +static int lis3_spi_init(struct lis3lv02d *lis3) +{ + u8 reg; + int ret; + + /* power up the device */ + ret = lis3->read(lis3, CTRL_REG1, ®); + if (ret < 0) + return ret; + + reg |= CTRL1_PD0 | CTRL1_Xen | CTRL1_Yen | CTRL1_Zen; + return lis3->write(lis3, CTRL_REG1, reg); +} + +static union axis_conversion lis3lv02d_axis_normal = + { .as_array = { 1, 2, 3 } }; + +#ifdef CONFIG_OF +static const struct of_device_id lis302dl_spi_dt_ids[] = { + { .compatible = "st,lis302dl-spi" }, + {} +}; +MODULE_DEVICE_TABLE(of, lis302dl_spi_dt_ids); +#endif + +static int lis302dl_spi_probe(struct spi_device *spi) +{ + int ret; + + spi->bits_per_word = 8; + spi->mode = SPI_MODE_0; + ret = spi_setup(spi); + if (ret < 0) + return ret; + + lis3_dev.bus_priv = spi; + lis3_dev.init = lis3_spi_init; + lis3_dev.read = lis3_spi_read; + lis3_dev.write = lis3_spi_write; + lis3_dev.irq = spi->irq; + lis3_dev.ac = lis3lv02d_axis_normal; + lis3_dev.pdata = spi->dev.platform_data; + +#ifdef CONFIG_OF + if (of_match_device(lis302dl_spi_dt_ids, &spi->dev)) { + lis3_dev.of_node = spi->dev.of_node; + ret = lis3lv02d_init_dt(&lis3_dev); + if (ret) + return ret; + } +#endif + spi_set_drvdata(spi, &lis3_dev); + + return lis3lv02d_init_device(&lis3_dev); +} + +static int lis302dl_spi_remove(struct spi_device *spi) +{ + struct lis3lv02d *lis3 = spi_get_drvdata(spi); + lis3lv02d_joystick_disable(lis3); + lis3lv02d_poweroff(lis3); + + return lis3lv02d_remove_fs(&lis3_dev); +} + +#ifdef CONFIG_PM_SLEEP +static int lis3lv02d_spi_suspend(struct device *dev) +{ + struct spi_device *spi = to_spi_device(dev); + struct lis3lv02d *lis3 = spi_get_drvdata(spi); + + if (!lis3->pdata || !lis3->pdata->wakeup_flags) + lis3lv02d_poweroff(&lis3_dev); + + return 0; +} + +static int lis3lv02d_spi_resume(struct device *dev) +{ + struct spi_device *spi = to_spi_device(dev); + struct lis3lv02d *lis3 = spi_get_drvdata(spi); + + if (!lis3->pdata || !lis3->pdata->wakeup_flags) + lis3lv02d_poweron(lis3); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(lis3lv02d_spi_pm, lis3lv02d_spi_suspend, + lis3lv02d_spi_resume); + +static struct spi_driver lis302dl_spi_driver = { + .driver = { + .name = DRV_NAME, + .pm = &lis3lv02d_spi_pm, + .of_match_table = of_match_ptr(lis302dl_spi_dt_ids), + }, + .probe = lis302dl_spi_probe, + .remove = lis302dl_spi_remove, +}; + +module_spi_driver(lis302dl_spi_driver); + +MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>"); +MODULE_DESCRIPTION("lis3lv02d SPI glue layer"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("spi:" DRV_NAME); diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile new file mode 100644 index 000000000..4405fb2bc --- /dev/null +++ b/drivers/misc/lkdtm/Makefile @@ -0,0 +1,22 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_LKDTM) += lkdtm.o + +lkdtm-$(CONFIG_LKDTM) += core.o +lkdtm-$(CONFIG_LKDTM) += bugs.o +lkdtm-$(CONFIG_LKDTM) += heap.o +lkdtm-$(CONFIG_LKDTM) += perms.o +lkdtm-$(CONFIG_LKDTM) += refcount.o +lkdtm-$(CONFIG_LKDTM) += rodata_objcopy.o +lkdtm-$(CONFIG_LKDTM) += usercopy.o +lkdtm-$(CONFIG_LKDTM) += stackleak.o +lkdtm-$(CONFIG_LKDTM) += cfi.o + +KASAN_SANITIZE_stackleak.o := n +KCOV_INSTRUMENT_rodata.o := n + +OBJCOPYFLAGS := +OBJCOPYFLAGS_rodata_objcopy.o := \ + --rename-section .noinstr.text=.rodata,alloc,readonly,load,contents +targets += rodata.o rodata_objcopy.o +$(obj)/rodata_objcopy.o: $(obj)/rodata.o FORCE + $(call if_changed,objcopy) diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c new file mode 100644 index 000000000..d39b8139b --- /dev/null +++ b/drivers/misc/lkdtm/bugs.c @@ -0,0 +1,492 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This is for all the tests related to logic bugs (e.g. bad dereferences, + * bad alignment, bad loops, bad locking, bad scheduling, deep stacks, and + * lockups) along with other things that don't fit well into existing LKDTM + * test source files. + */ +#include "lkdtm.h" +#include <linux/list.h> +#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/task_stack.h> +#include <linux/uaccess.h> +#include <linux/slab.h> + +#if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML) +#include <asm/desc.h> +#endif + +struct lkdtm_list { + struct list_head node; +}; + +/* + * Make sure our attempts to over run the kernel stack doesn't trigger + * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we + * recurse past the end of THREAD_SIZE by default. + */ +#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0) +#define REC_STACK_SIZE (_AC(CONFIG_FRAME_WARN, UL) / 2) +#else +#define REC_STACK_SIZE (THREAD_SIZE / 8) +#endif +#define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2) + +static int recur_count = REC_NUM_DEFAULT; + +static DEFINE_SPINLOCK(lock_me_up); + +/* + * Make sure compiler does not optimize this function or stack frame away: + * - function marked noinline + * - stack variables are marked volatile + * - stack variables are written (memset()) and read (pr_info()) + * - function has external effects (pr_info()) + * */ +static int noinline recursive_loop(int remaining) +{ + volatile char buf[REC_STACK_SIZE]; + + memset((void *)buf, remaining & 0xFF, sizeof(buf)); + pr_info("loop %d/%d ...\n", (int)buf[remaining % sizeof(buf)], + recur_count); + if (!remaining) + return 0; + else + return recursive_loop(remaining - 1); +} + +/* If the depth is negative, use the default, otherwise keep parameter. */ +void __init lkdtm_bugs_init(int *recur_param) +{ + if (*recur_param < 0) + *recur_param = recur_count; + else + recur_count = *recur_param; +} + +void lkdtm_PANIC(void) +{ + panic("dumptest"); +} + +void lkdtm_BUG(void) +{ + BUG(); +} + +static int warn_counter; + +void lkdtm_WARNING(void) +{ + WARN_ON(++warn_counter); +} + +void lkdtm_WARNING_MESSAGE(void) +{ + WARN(1, "Warning message trigger count: %d\n", ++warn_counter); +} + +void lkdtm_EXCEPTION(void) +{ + *((volatile int *) 0) = 0; +} + +void lkdtm_LOOP(void) +{ + for (;;) + ; +} + +void lkdtm_EXHAUST_STACK(void) +{ + pr_info("Calling function with %lu frame size to depth %d ...\n", + REC_STACK_SIZE, recur_count); + recursive_loop(recur_count); + pr_info("FAIL: survived without exhausting stack?!\n"); +} + +static noinline void __lkdtm_CORRUPT_STACK(void *stack) +{ + memset(stack, '\xff', 64); +} + +/* This should trip the stack canary, not corrupt the return address. */ +noinline void lkdtm_CORRUPT_STACK(void) +{ + /* Use default char array length that triggers stack protection. */ + char data[8] __aligned(sizeof(void *)); + + pr_info("Corrupting stack containing char array ...\n"); + __lkdtm_CORRUPT_STACK((void *)&data); +} + +/* Same as above but will only get a canary with -fstack-protector-strong */ +noinline void lkdtm_CORRUPT_STACK_STRONG(void) +{ + union { + unsigned short shorts[4]; + unsigned long *ptr; + } data __aligned(sizeof(void *)); + + pr_info("Corrupting stack containing union ...\n"); + __lkdtm_CORRUPT_STACK((void *)&data); +} + +void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void) +{ + static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5}; + u32 *p; + u32 val = 0x12345678; + + p = (u32 *)(data + 1); + if (*p == 0) + val = 0x87654321; + *p = val; + + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) + pr_err("XFAIL: arch has CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS\n"); +} + +void lkdtm_SOFTLOCKUP(void) +{ + preempt_disable(); + for (;;) + cpu_relax(); +} + +void lkdtm_HARDLOCKUP(void) +{ + local_irq_disable(); + for (;;) + cpu_relax(); +} + +void lkdtm_SPINLOCKUP(void) +{ + /* Must be called twice to trigger. */ + spin_lock(&lock_me_up); + /* Let sparse know we intended to exit holding the lock. */ + __release(&lock_me_up); +} + +void lkdtm_HUNG_TASK(void) +{ + set_current_state(TASK_UNINTERRUPTIBLE); + schedule(); +} + +volatile unsigned int huge = INT_MAX - 2; +volatile unsigned int ignored; + +void lkdtm_OVERFLOW_SIGNED(void) +{ + int value; + + value = huge; + pr_info("Normal signed addition ...\n"); + value += 1; + ignored = value; + + pr_info("Overflowing signed addition ...\n"); + value += 4; + ignored = value; +} + + +void lkdtm_OVERFLOW_UNSIGNED(void) +{ + unsigned int value; + + value = huge; + pr_info("Normal unsigned addition ...\n"); + value += 1; + ignored = value; + + pr_info("Overflowing unsigned addition ...\n"); + value += 4; + ignored = value; +} + +/* Intentionally using old-style flex array definition of 1 byte. */ +struct array_bounds_flex_array { + int one; + int two; + char data[1]; +}; + +struct array_bounds { + int one; + int two; + char data[8]; + int three; +}; + +void lkdtm_ARRAY_BOUNDS(void) +{ + struct array_bounds_flex_array *not_checked; + struct array_bounds *checked; + volatile int i; + + not_checked = kmalloc(sizeof(*not_checked) * 2, GFP_KERNEL); + checked = kmalloc(sizeof(*checked) * 2, GFP_KERNEL); + if (!not_checked || !checked) { + kfree(not_checked); + kfree(checked); + return; + } + + pr_info("Array access within bounds ...\n"); + /* For both, touch all bytes in the actual member size. */ + for (i = 0; i < sizeof(checked->data); i++) + checked->data[i] = 'A'; + /* + * For the uninstrumented flex array member, also touch 1 byte + * beyond to verify it is correctly uninstrumented. + */ + for (i = 0; i < sizeof(not_checked->data) + 1; i++) + not_checked->data[i] = 'A'; + + pr_info("Array access beyond bounds ...\n"); + for (i = 0; i < sizeof(checked->data) + 1; i++) + checked->data[i] = 'B'; + + kfree(not_checked); + kfree(checked); + pr_err("FAIL: survived array bounds overflow!\n"); +} + +void lkdtm_CORRUPT_LIST_ADD(void) +{ + /* + * Initially, an empty list via LIST_HEAD: + * test_head.next = &test_head + * test_head.prev = &test_head + */ + LIST_HEAD(test_head); + struct lkdtm_list good, bad; + void *target[2] = { }; + void *redirection = ⌖ + + pr_info("attempting good list addition\n"); + + /* + * Adding to the list performs these actions: + * test_head.next->prev = &good.node + * good.node.next = test_head.next + * good.node.prev = test_head + * test_head.next = good.node + */ + list_add(&good.node, &test_head); + + pr_info("attempting corrupted list addition\n"); + /* + * In simulating this "write what where" primitive, the "what" is + * the address of &bad.node, and the "where" is the address held + * by "redirection". + */ + test_head.next = redirection; + list_add(&bad.node, &test_head); + + if (target[0] == NULL && target[1] == NULL) + pr_err("Overwrite did not happen, but no BUG?!\n"); + else + pr_err("list_add() corruption not detected!\n"); +} + +void lkdtm_CORRUPT_LIST_DEL(void) +{ + LIST_HEAD(test_head); + struct lkdtm_list item; + void *target[2] = { }; + void *redirection = ⌖ + + list_add(&item.node, &test_head); + + pr_info("attempting good list removal\n"); + list_del(&item.node); + + pr_info("attempting corrupted list removal\n"); + list_add(&item.node, &test_head); + + /* As with the list_add() test above, this corrupts "next". */ + item.node.next = redirection; + list_del(&item.node); + + if (target[0] == NULL && target[1] == NULL) + pr_err("Overwrite did not happen, but no BUG?!\n"); + else + pr_err("list_del() corruption not detected!\n"); +} + +/* Test that VMAP_STACK is actually allocating with a leading guard page */ +void lkdtm_STACK_GUARD_PAGE_LEADING(void) +{ + const unsigned char *stack = task_stack_page(current); + const unsigned char *ptr = stack - 1; + volatile unsigned char byte; + + pr_info("attempting bad read from page below current stack\n"); + + byte = *ptr; + + pr_err("FAIL: accessed page before stack! (byte: %x)\n", byte); +} + +/* Test that VMAP_STACK is actually allocating with a trailing guard page */ +void lkdtm_STACK_GUARD_PAGE_TRAILING(void) +{ + const unsigned char *stack = task_stack_page(current); + const unsigned char *ptr = stack + THREAD_SIZE; + volatile unsigned char byte; + + pr_info("attempting bad read from page above current stack\n"); + + byte = *ptr; + + pr_err("FAIL: accessed page after stack! (byte: %x)\n", byte); +} + +void lkdtm_UNSET_SMEP(void) +{ +#if IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_UML) +#define MOV_CR4_DEPTH 64 + void (*direct_write_cr4)(unsigned long val); + unsigned char *insn; + unsigned long cr4; + int i; + + cr4 = native_read_cr4(); + + if ((cr4 & X86_CR4_SMEP) != X86_CR4_SMEP) { + pr_err("FAIL: SMEP not in use\n"); + return; + } + cr4 &= ~(X86_CR4_SMEP); + + pr_info("trying to clear SMEP normally\n"); + native_write_cr4(cr4); + if (cr4 == native_read_cr4()) { + pr_err("FAIL: pinning SMEP failed!\n"); + cr4 |= X86_CR4_SMEP; + pr_info("restoring SMEP\n"); + native_write_cr4(cr4); + return; + } + pr_info("ok: SMEP did not get cleared\n"); + + /* + * To test the post-write pinning verification we need to call + * directly into the middle of native_write_cr4() where the + * cr4 write happens, skipping any pinning. This searches for + * the cr4 writing instruction. + */ + insn = (unsigned char *)native_write_cr4; + for (i = 0; i < MOV_CR4_DEPTH; i++) { + /* mov %rdi, %cr4 */ + if (insn[i] == 0x0f && insn[i+1] == 0x22 && insn[i+2] == 0xe7) + break; + /* mov %rdi,%rax; mov %rax, %cr4 */ + if (insn[i] == 0x48 && insn[i+1] == 0x89 && + insn[i+2] == 0xf8 && insn[i+3] == 0x0f && + insn[i+4] == 0x22 && insn[i+5] == 0xe0) + break; + } + if (i >= MOV_CR4_DEPTH) { + pr_info("ok: cannot locate cr4 writing call gadget\n"); + return; + } + direct_write_cr4 = (void *)(insn + i); + + pr_info("trying to clear SMEP with call gadget\n"); + direct_write_cr4(cr4); + if (native_read_cr4() & X86_CR4_SMEP) { + pr_info("ok: SMEP removal was reverted\n"); + } else { + pr_err("FAIL: cleared SMEP not detected!\n"); + cr4 |= X86_CR4_SMEP; + pr_info("restoring SMEP\n"); + native_write_cr4(cr4); + } +#else + pr_err("XFAIL: this test is x86_64-only\n"); +#endif +} + +void lkdtm_DOUBLE_FAULT(void) +{ +#if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML) + /* + * Trigger #DF by setting the stack limit to zero. This clobbers + * a GDT TLS slot, which is okay because the current task will die + * anyway due to the double fault. + */ + struct desc_struct d = { + .type = 3, /* expand-up, writable, accessed data */ + .p = 1, /* present */ + .d = 1, /* 32-bit */ + .g = 0, /* limit in bytes */ + .s = 1, /* not system */ + }; + + local_irq_disable(); + write_gdt_entry(get_cpu_gdt_rw(smp_processor_id()), + GDT_ENTRY_TLS_MIN, &d, DESCTYPE_S); + + /* + * Put our zero-limit segment in SS and then trigger a fault. The + * 4-byte access to (%esp) will fault with #SS, and the attempt to + * deliver the fault will recursively cause #SS and result in #DF. + * This whole process happens while NMIs and MCEs are blocked by the + * MOV SS window. This is nice because an NMI with an invalid SS + * would also double-fault, resulting in the NMI or MCE being lost. + */ + asm volatile ("movw %0, %%ss; addl $0, (%%esp)" :: + "r" ((unsigned short)(GDT_ENTRY_TLS_MIN << 3))); + + pr_err("FAIL: tried to double fault but didn't die\n"); +#else + pr_err("XFAIL: this test is ia32-only\n"); +#endif +} + +#ifdef CONFIG_ARM64 +static noinline void change_pac_parameters(void) +{ + if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH)) { + /* Reset the keys of current task */ + ptrauth_thread_init_kernel(current); + ptrauth_thread_switch_kernel(current); + } +} +#endif + +noinline void lkdtm_CORRUPT_PAC(void) +{ +#ifdef CONFIG_ARM64 +#define CORRUPT_PAC_ITERATE 10 + int i; + + if (!IS_ENABLED(CONFIG_ARM64_PTR_AUTH)) + pr_err("FAIL: kernel not built with CONFIG_ARM64_PTR_AUTH\n"); + + if (!system_supports_address_auth()) { + pr_err("FAIL: CPU lacks pointer authentication feature\n"); + return; + } + + pr_info("changing PAC parameters to force function return failure...\n"); + /* + * PAC is a hash value computed from input keys, return address and + * stack pointer. As pac has fewer bits so there is a chance of + * collision, so iterate few times to reduce the collision probability. + */ + for (i = 0; i < CORRUPT_PAC_ITERATE; i++) + change_pac_parameters(); + + pr_err("FAIL: survived PAC changes! Kernel may be unstable from here\n"); +#else + pr_err("XFAIL: this test is arm64-only\n"); +#endif +} diff --git a/drivers/misc/lkdtm/cfi.c b/drivers/misc/lkdtm/cfi.c new file mode 100644 index 000000000..e73ebdbfa --- /dev/null +++ b/drivers/misc/lkdtm/cfi.c @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This is for all the tests relating directly to Control Flow Integrity. + */ +#include "lkdtm.h" + +static int called_count; + +/* Function taking one argument, without a return value. */ +static noinline void lkdtm_increment_void(int *counter) +{ + (*counter)++; +} + +/* Function taking one argument, returning int. */ +static noinline int lkdtm_increment_int(int *counter) +{ + (*counter)++; + + return *counter; +} +/* + * This tries to call an indirect function with a mismatched prototype. + */ +void lkdtm_CFI_FORWARD_PROTO(void) +{ + /* + * Matches lkdtm_increment_void()'s prototype, but not + * lkdtm_increment_int()'s prototype. + */ + void (*func)(int *); + + pr_info("Calling matched prototype ...\n"); + func = lkdtm_increment_void; + func(&called_count); + + pr_info("Calling mismatched prototype ...\n"); + func = (void *)lkdtm_increment_int; + func(&called_count); + + pr_info("Fail: survived mismatched prototype function call!\n"); +} diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c new file mode 100644 index 000000000..32b3d7736 --- /dev/null +++ b/drivers/misc/lkdtm/core.c @@ -0,0 +1,489 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Linux Kernel Dump Test Module for testing kernel crashes conditions: + * induces system failures at predefined crashpoints and under predefined + * operational conditions in order to evaluate the reliability of kernel + * sanity checking and crash dumps obtained using different dumping + * solutions. + * + * Copyright (C) IBM Corporation, 2006 + * + * Author: Ankita Garg <ankita@in.ibm.com> + * + * It is adapted from the Linux Kernel Dump Test Tool by + * Fernando Luis Vazquez Cao <http://lkdtt.sourceforge.net> + * + * Debugfs support added by Simon Kagstrom <simon.kagstrom@netinsight.net> + * + * See Documentation/fault-injection/provoke-crashes.rst for instructions + */ +#include "lkdtm.h" +#include <linux/fs.h> +#include <linux/module.h> +#include <linux/buffer_head.h> +#include <linux/kprobes.h> +#include <linux/list.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/debugfs.h> + +#define DEFAULT_COUNT 10 + +static int lkdtm_debugfs_open(struct inode *inode, struct file *file); +static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf, + size_t count, loff_t *off); +static ssize_t direct_entry(struct file *f, const char __user *user_buf, + size_t count, loff_t *off); + +#ifdef CONFIG_KPROBES +static int lkdtm_kprobe_handler(struct kprobe *kp, struct pt_regs *regs); +static ssize_t lkdtm_debugfs_entry(struct file *f, + const char __user *user_buf, + size_t count, loff_t *off); +# define CRASHPOINT_KPROBE(_symbol) \ + .kprobe = { \ + .symbol_name = (_symbol), \ + .pre_handler = lkdtm_kprobe_handler, \ + }, +# define CRASHPOINT_WRITE(_symbol) \ + (_symbol) ? lkdtm_debugfs_entry : direct_entry +#else +# define CRASHPOINT_KPROBE(_symbol) +# define CRASHPOINT_WRITE(_symbol) direct_entry +#endif + +/* Crash points */ +struct crashpoint { + const char *name; + const struct file_operations fops; + struct kprobe kprobe; +}; + +#define CRASHPOINT(_name, _symbol) \ + { \ + .name = _name, \ + .fops = { \ + .read = lkdtm_debugfs_read, \ + .llseek = generic_file_llseek, \ + .open = lkdtm_debugfs_open, \ + .write = CRASHPOINT_WRITE(_symbol) \ + }, \ + CRASHPOINT_KPROBE(_symbol) \ + } + +/* Define the possible places where we can trigger a crash point. */ +static struct crashpoint crashpoints[] = { + CRASHPOINT("DIRECT", NULL), +#ifdef CONFIG_KPROBES + CRASHPOINT("INT_HARDWARE_ENTRY", "do_IRQ"), + CRASHPOINT("INT_HW_IRQ_EN", "handle_irq_event"), + CRASHPOINT("INT_TASKLET_ENTRY", "tasklet_action"), + CRASHPOINT("FS_DEVRW", "ll_rw_block"), + CRASHPOINT("MEM_SWAPOUT", "shrink_inactive_list"), + CRASHPOINT("TIMERADD", "hrtimer_start"), + CRASHPOINT("SCSI_QUEUE_RQ", "scsi_queue_rq"), + CRASHPOINT("IDE_CORE_CP", "generic_ide_ioctl"), +#endif +}; + + +/* Crash types. */ +struct crashtype { + const char *name; + void (*func)(void); +}; + +#define CRASHTYPE(_name) \ + { \ + .name = __stringify(_name), \ + .func = lkdtm_ ## _name, \ + } + +/* Define the possible types of crashes that can be triggered. */ +static const struct crashtype crashtypes[] = { + CRASHTYPE(PANIC), + CRASHTYPE(BUG), + CRASHTYPE(WARNING), + CRASHTYPE(WARNING_MESSAGE), + CRASHTYPE(EXCEPTION), + CRASHTYPE(LOOP), + CRASHTYPE(EXHAUST_STACK), + CRASHTYPE(CORRUPT_STACK), + CRASHTYPE(CORRUPT_STACK_STRONG), + CRASHTYPE(CORRUPT_LIST_ADD), + CRASHTYPE(CORRUPT_LIST_DEL), + CRASHTYPE(STACK_GUARD_PAGE_LEADING), + CRASHTYPE(STACK_GUARD_PAGE_TRAILING), + CRASHTYPE(UNSET_SMEP), + CRASHTYPE(CORRUPT_PAC), + CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE), + CRASHTYPE(OVERWRITE_ALLOCATION), + CRASHTYPE(WRITE_AFTER_FREE), + CRASHTYPE(READ_AFTER_FREE), + CRASHTYPE(WRITE_BUDDY_AFTER_FREE), + CRASHTYPE(READ_BUDDY_AFTER_FREE), + CRASHTYPE(SLAB_FREE_DOUBLE), + CRASHTYPE(SLAB_FREE_CROSS), + CRASHTYPE(SLAB_FREE_PAGE), + CRASHTYPE(SOFTLOCKUP), + CRASHTYPE(HARDLOCKUP), + CRASHTYPE(SPINLOCKUP), + CRASHTYPE(HUNG_TASK), + CRASHTYPE(OVERFLOW_SIGNED), + CRASHTYPE(OVERFLOW_UNSIGNED), + CRASHTYPE(ARRAY_BOUNDS), + CRASHTYPE(EXEC_DATA), + CRASHTYPE(EXEC_STACK), + CRASHTYPE(EXEC_KMALLOC), + CRASHTYPE(EXEC_VMALLOC), + CRASHTYPE(EXEC_RODATA), + CRASHTYPE(EXEC_USERSPACE), + CRASHTYPE(EXEC_NULL), + CRASHTYPE(ACCESS_USERSPACE), + CRASHTYPE(ACCESS_NULL), + CRASHTYPE(WRITE_RO), + CRASHTYPE(WRITE_RO_AFTER_INIT), + CRASHTYPE(WRITE_KERN), + CRASHTYPE(REFCOUNT_INC_OVERFLOW), + CRASHTYPE(REFCOUNT_ADD_OVERFLOW), + CRASHTYPE(REFCOUNT_INC_NOT_ZERO_OVERFLOW), + CRASHTYPE(REFCOUNT_ADD_NOT_ZERO_OVERFLOW), + CRASHTYPE(REFCOUNT_DEC_ZERO), + CRASHTYPE(REFCOUNT_DEC_NEGATIVE), + CRASHTYPE(REFCOUNT_DEC_AND_TEST_NEGATIVE), + CRASHTYPE(REFCOUNT_SUB_AND_TEST_NEGATIVE), + CRASHTYPE(REFCOUNT_INC_ZERO), + CRASHTYPE(REFCOUNT_ADD_ZERO), + CRASHTYPE(REFCOUNT_INC_SATURATED), + CRASHTYPE(REFCOUNT_DEC_SATURATED), + CRASHTYPE(REFCOUNT_ADD_SATURATED), + CRASHTYPE(REFCOUNT_INC_NOT_ZERO_SATURATED), + CRASHTYPE(REFCOUNT_ADD_NOT_ZERO_SATURATED), + CRASHTYPE(REFCOUNT_DEC_AND_TEST_SATURATED), + CRASHTYPE(REFCOUNT_SUB_AND_TEST_SATURATED), + CRASHTYPE(REFCOUNT_TIMING), + CRASHTYPE(ATOMIC_TIMING), + CRASHTYPE(USERCOPY_HEAP_SIZE_TO), + CRASHTYPE(USERCOPY_HEAP_SIZE_FROM), + CRASHTYPE(USERCOPY_HEAP_WHITELIST_TO), + CRASHTYPE(USERCOPY_HEAP_WHITELIST_FROM), + CRASHTYPE(USERCOPY_STACK_FRAME_TO), + CRASHTYPE(USERCOPY_STACK_FRAME_FROM), + CRASHTYPE(USERCOPY_STACK_BEYOND), + CRASHTYPE(USERCOPY_KERNEL), + CRASHTYPE(STACKLEAK_ERASING), + CRASHTYPE(CFI_FORWARD_PROTO), + CRASHTYPE(DOUBLE_FAULT), +}; + + +/* Global kprobe entry and crashtype. */ +static struct kprobe *lkdtm_kprobe; +static struct crashpoint *lkdtm_crashpoint; +static const struct crashtype *lkdtm_crashtype; + +/* Module parameters */ +static int recur_count = -1; +module_param(recur_count, int, 0644); +MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test"); + +static char* cpoint_name; +module_param(cpoint_name, charp, 0444); +MODULE_PARM_DESC(cpoint_name, " Crash Point, where kernel is to be crashed"); + +static char* cpoint_type; +module_param(cpoint_type, charp, 0444); +MODULE_PARM_DESC(cpoint_type, " Crash Point Type, action to be taken on "\ + "hitting the crash point"); + +static int cpoint_count = DEFAULT_COUNT; +module_param(cpoint_count, int, 0644); +MODULE_PARM_DESC(cpoint_count, " Crash Point Count, number of times the "\ + "crash point is to be hit to trigger action"); + + +/* Return the crashtype number or NULL if the name is invalid */ +static const struct crashtype *find_crashtype(const char *name) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(crashtypes); i++) { + if (!strcmp(name, crashtypes[i].name)) + return &crashtypes[i]; + } + + return NULL; +} + +/* + * This is forced noinline just so it distinctly shows up in the stackdump + * which makes validation of expected lkdtm crashes easier. + */ +static noinline void lkdtm_do_action(const struct crashtype *crashtype) +{ + if (WARN_ON(!crashtype || !crashtype->func)) + return; + crashtype->func(); +} + +static int lkdtm_register_cpoint(struct crashpoint *crashpoint, + const struct crashtype *crashtype) +{ + int ret; + + /* If this doesn't have a symbol, just call immediately. */ + if (!crashpoint->kprobe.symbol_name) { + lkdtm_do_action(crashtype); + return 0; + } + + if (lkdtm_kprobe != NULL) + unregister_kprobe(lkdtm_kprobe); + + lkdtm_crashpoint = crashpoint; + lkdtm_crashtype = crashtype; + lkdtm_kprobe = &crashpoint->kprobe; + ret = register_kprobe(lkdtm_kprobe); + if (ret < 0) { + pr_info("Couldn't register kprobe %s\n", + crashpoint->kprobe.symbol_name); + lkdtm_kprobe = NULL; + lkdtm_crashpoint = NULL; + lkdtm_crashtype = NULL; + } + + return ret; +} + +#ifdef CONFIG_KPROBES +/* Global crash counter and spinlock. */ +static int crash_count = DEFAULT_COUNT; +static DEFINE_SPINLOCK(crash_count_lock); + +/* Called by kprobe entry points. */ +static int lkdtm_kprobe_handler(struct kprobe *kp, struct pt_regs *regs) +{ + unsigned long flags; + bool do_it = false; + + if (WARN_ON(!lkdtm_crashpoint || !lkdtm_crashtype)) + return 0; + + spin_lock_irqsave(&crash_count_lock, flags); + crash_count--; + pr_info("Crash point %s of type %s hit, trigger in %d rounds\n", + lkdtm_crashpoint->name, lkdtm_crashtype->name, crash_count); + + if (crash_count == 0) { + do_it = true; + crash_count = cpoint_count; + } + spin_unlock_irqrestore(&crash_count_lock, flags); + + if (do_it) + lkdtm_do_action(lkdtm_crashtype); + + return 0; +} + +static ssize_t lkdtm_debugfs_entry(struct file *f, + const char __user *user_buf, + size_t count, loff_t *off) +{ + struct crashpoint *crashpoint = file_inode(f)->i_private; + const struct crashtype *crashtype = NULL; + char *buf; + int err; + + if (count >= PAGE_SIZE) + return -EINVAL; + + buf = (char *)__get_free_page(GFP_KERNEL); + if (!buf) + return -ENOMEM; + if (copy_from_user(buf, user_buf, count)) { + free_page((unsigned long) buf); + return -EFAULT; + } + /* NULL-terminate and remove enter */ + buf[count] = '\0'; + strim(buf); + + crashtype = find_crashtype(buf); + free_page((unsigned long)buf); + + if (!crashtype) + return -EINVAL; + + err = lkdtm_register_cpoint(crashpoint, crashtype); + if (err < 0) + return err; + + *off += count; + + return count; +} +#endif + +/* Generic read callback that just prints out the available crash types */ +static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf, + size_t count, loff_t *off) +{ + char *buf; + int i, n, out; + + buf = (char *)__get_free_page(GFP_KERNEL); + if (buf == NULL) + return -ENOMEM; + + n = scnprintf(buf, PAGE_SIZE, "Available crash types:\n"); + for (i = 0; i < ARRAY_SIZE(crashtypes); i++) { + n += scnprintf(buf + n, PAGE_SIZE - n, "%s\n", + crashtypes[i].name); + } + buf[n] = '\0'; + + out = simple_read_from_buffer(user_buf, count, off, + buf, n); + free_page((unsigned long) buf); + + return out; +} + +static int lkdtm_debugfs_open(struct inode *inode, struct file *file) +{ + return 0; +} + +/* Special entry to just crash directly. Available without KPROBEs */ +static ssize_t direct_entry(struct file *f, const char __user *user_buf, + size_t count, loff_t *off) +{ + const struct crashtype *crashtype; + char *buf; + + if (count >= PAGE_SIZE) + return -EINVAL; + if (count < 1) + return -EINVAL; + + buf = (char *)__get_free_page(GFP_KERNEL); + if (!buf) + return -ENOMEM; + if (copy_from_user(buf, user_buf, count)) { + free_page((unsigned long) buf); + return -EFAULT; + } + /* NULL-terminate and remove enter */ + buf[count] = '\0'; + strim(buf); + + crashtype = find_crashtype(buf); + free_page((unsigned long) buf); + if (!crashtype) + return -EINVAL; + + pr_info("Performing direct entry %s\n", crashtype->name); + lkdtm_do_action(crashtype); + *off += count; + + return count; +} + +static struct dentry *lkdtm_debugfs_root; + +static int __init lkdtm_module_init(void) +{ + struct crashpoint *crashpoint = NULL; + const struct crashtype *crashtype = NULL; + int ret; + int i; + + /* Neither or both of these need to be set */ + if ((cpoint_type || cpoint_name) && !(cpoint_type && cpoint_name)) { + pr_err("Need both cpoint_type and cpoint_name or neither\n"); + return -EINVAL; + } + + if (cpoint_type) { + crashtype = find_crashtype(cpoint_type); + if (!crashtype) { + pr_err("Unknown crashtype '%s'\n", cpoint_type); + return -EINVAL; + } + } + + if (cpoint_name) { + for (i = 0; i < ARRAY_SIZE(crashpoints); i++) { + if (!strcmp(cpoint_name, crashpoints[i].name)) + crashpoint = &crashpoints[i]; + } + + /* Refuse unknown crashpoints. */ + if (!crashpoint) { + pr_err("Invalid crashpoint %s\n", cpoint_name); + return -EINVAL; + } + } + +#ifdef CONFIG_KPROBES + /* Set crash count. */ + crash_count = cpoint_count; +#endif + + /* Handle test-specific initialization. */ + lkdtm_bugs_init(&recur_count); + lkdtm_perms_init(); + lkdtm_usercopy_init(); + lkdtm_heap_init(); + + /* Register debugfs interface */ + lkdtm_debugfs_root = debugfs_create_dir("provoke-crash", NULL); + + /* Install debugfs trigger files. */ + for (i = 0; i < ARRAY_SIZE(crashpoints); i++) { + struct crashpoint *cur = &crashpoints[i]; + + debugfs_create_file(cur->name, 0644, lkdtm_debugfs_root, cur, + &cur->fops); + } + + /* Install crashpoint if one was selected. */ + if (crashpoint) { + ret = lkdtm_register_cpoint(crashpoint, crashtype); + if (ret < 0) { + pr_info("Invalid crashpoint %s\n", crashpoint->name); + goto out_err; + } + pr_info("Crash point %s of type %s registered\n", + crashpoint->name, cpoint_type); + } else { + pr_info("No crash points registered, enable through debugfs\n"); + } + + return 0; + +out_err: + debugfs_remove_recursive(lkdtm_debugfs_root); + return ret; +} + +static void __exit lkdtm_module_exit(void) +{ + debugfs_remove_recursive(lkdtm_debugfs_root); + + /* Handle test-specific clean-up. */ + lkdtm_heap_exit(); + lkdtm_usercopy_exit(); + + if (lkdtm_kprobe != NULL) + unregister_kprobe(lkdtm_kprobe); + + pr_info("Crash point unregistered\n"); +} + +module_init(lkdtm_module_init); +module_exit(lkdtm_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Kernel crash testing module"); diff --git a/drivers/misc/lkdtm/heap.c b/drivers/misc/lkdtm/heap.c new file mode 100644 index 000000000..1323bc16f --- /dev/null +++ b/drivers/misc/lkdtm/heap.c @@ -0,0 +1,221 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This is for all the tests relating directly to heap memory, including + * page allocation and slab allocations. + */ +#include "lkdtm.h" +#include <linux/slab.h> +#include <linux/sched.h> + +static struct kmem_cache *double_free_cache; +static struct kmem_cache *a_cache; +static struct kmem_cache *b_cache; + +/* + * This tries to stay within the next largest power-of-2 kmalloc cache + * to avoid actually overwriting anything important if it's not detected + * correctly. + */ +void lkdtm_OVERWRITE_ALLOCATION(void) +{ + size_t len = 1020; + u32 *data = kmalloc(len, GFP_KERNEL); + if (!data) + return; + + data[1024 / sizeof(u32)] = 0x12345678; + kfree(data); +} + +void lkdtm_WRITE_AFTER_FREE(void) +{ + int *base, *again; + size_t len = 1024; + /* + * The slub allocator uses the first word to store the free + * pointer in some configurations. Use the middle of the + * allocation to avoid running into the freelist + */ + size_t offset = (len / sizeof(*base)) / 2; + + base = kmalloc(len, GFP_KERNEL); + if (!base) + return; + pr_info("Allocated memory %p-%p\n", base, &base[offset * 2]); + pr_info("Attempting bad write to freed memory at %p\n", + &base[offset]); + kfree(base); + base[offset] = 0x0abcdef0; + /* Attempt to notice the overwrite. */ + again = kmalloc(len, GFP_KERNEL); + kfree(again); + if (again != base) + pr_info("Hmm, didn't get the same memory range.\n"); +} + +void lkdtm_READ_AFTER_FREE(void) +{ + int *base, *val, saw; + size_t len = 1024; + /* + * The slub allocator will use the either the first word or + * the middle of the allocation to store the free pointer, + * depending on configurations. Store in the second word to + * avoid running into the freelist. + */ + size_t offset = sizeof(*base); + + base = kmalloc(len, GFP_KERNEL); + if (!base) { + pr_info("Unable to allocate base memory.\n"); + return; + } + + val = kmalloc(len, GFP_KERNEL); + if (!val) { + pr_info("Unable to allocate val memory.\n"); + kfree(base); + return; + } + + *val = 0x12345678; + base[offset] = *val; + pr_info("Value in memory before free: %x\n", base[offset]); + + kfree(base); + + pr_info("Attempting bad read from freed memory\n"); + saw = base[offset]; + if (saw != *val) { + /* Good! Poisoning happened, so declare a win. */ + pr_info("Memory correctly poisoned (%x)\n", saw); + BUG(); + } + pr_info("Memory was not poisoned\n"); + + kfree(val); +} + +void lkdtm_WRITE_BUDDY_AFTER_FREE(void) +{ + unsigned long p = __get_free_page(GFP_KERNEL); + if (!p) { + pr_info("Unable to allocate free page\n"); + return; + } + + pr_info("Writing to the buddy page before free\n"); + memset((void *)p, 0x3, PAGE_SIZE); + free_page(p); + schedule(); + pr_info("Attempting bad write to the buddy page after free\n"); + memset((void *)p, 0x78, PAGE_SIZE); + /* Attempt to notice the overwrite. */ + p = __get_free_page(GFP_KERNEL); + free_page(p); + schedule(); +} + +void lkdtm_READ_BUDDY_AFTER_FREE(void) +{ + unsigned long p = __get_free_page(GFP_KERNEL); + int saw, *val; + int *base; + + if (!p) { + pr_info("Unable to allocate free page\n"); + return; + } + + val = kmalloc(1024, GFP_KERNEL); + if (!val) { + pr_info("Unable to allocate val memory.\n"); + free_page(p); + return; + } + + base = (int *)p; + + *val = 0x12345678; + base[0] = *val; + pr_info("Value in memory before free: %x\n", base[0]); + free_page(p); + pr_info("Attempting to read from freed memory\n"); + saw = base[0]; + if (saw != *val) { + /* Good! Poisoning happened, so declare a win. */ + pr_info("Memory correctly poisoned (%x)\n", saw); + BUG(); + } + pr_info("Buddy page was not poisoned\n"); + + kfree(val); +} + +void lkdtm_SLAB_FREE_DOUBLE(void) +{ + int *val; + + val = kmem_cache_alloc(double_free_cache, GFP_KERNEL); + if (!val) { + pr_info("Unable to allocate double_free_cache memory.\n"); + return; + } + + /* Just make sure we got real memory. */ + *val = 0x12345678; + pr_info("Attempting double slab free ...\n"); + kmem_cache_free(double_free_cache, val); + kmem_cache_free(double_free_cache, val); +} + +void lkdtm_SLAB_FREE_CROSS(void) +{ + int *val; + + val = kmem_cache_alloc(a_cache, GFP_KERNEL); + if (!val) { + pr_info("Unable to allocate a_cache memory.\n"); + return; + } + + /* Just make sure we got real memory. */ + *val = 0x12345679; + pr_info("Attempting cross-cache slab free ...\n"); + kmem_cache_free(b_cache, val); +} + +void lkdtm_SLAB_FREE_PAGE(void) +{ + unsigned long p = __get_free_page(GFP_KERNEL); + + pr_info("Attempting non-Slab slab free ...\n"); + kmem_cache_free(NULL, (void *)p); + free_page(p); +} + +/* + * We have constructors to keep the caches distinctly separated without + * needing to boot with "slab_nomerge". + */ +static void ctor_double_free(void *region) +{ } +static void ctor_a(void *region) +{ } +static void ctor_b(void *region) +{ } + +void __init lkdtm_heap_init(void) +{ + double_free_cache = kmem_cache_create("lkdtm-heap-double_free", + 64, 0, 0, ctor_double_free); + a_cache = kmem_cache_create("lkdtm-heap-a", 64, 0, 0, ctor_a); + b_cache = kmem_cache_create("lkdtm-heap-b", 64, 0, 0, ctor_b); +} + +void __exit lkdtm_heap_exit(void) +{ + kmem_cache_destroy(double_free_cache); + kmem_cache_destroy(a_cache); + kmem_cache_destroy(b_cache); +} diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h new file mode 100644 index 000000000..6dec4c9b4 --- /dev/null +++ b/drivers/misc/lkdtm/lkdtm.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LKDTM_H +#define __LKDTM_H + +#define pr_fmt(fmt) "lkdtm: " fmt + +#include <linux/kernel.h> + +/* lkdtm_bugs.c */ +void __init lkdtm_bugs_init(int *recur_param); +void lkdtm_PANIC(void); +void lkdtm_BUG(void); +void lkdtm_WARNING(void); +void lkdtm_WARNING_MESSAGE(void); +void lkdtm_EXCEPTION(void); +void lkdtm_LOOP(void); +void lkdtm_EXHAUST_STACK(void); +void lkdtm_CORRUPT_STACK(void); +void lkdtm_CORRUPT_STACK_STRONG(void); +void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void); +void lkdtm_SOFTLOCKUP(void); +void lkdtm_HARDLOCKUP(void); +void lkdtm_SPINLOCKUP(void); +void lkdtm_HUNG_TASK(void); +void lkdtm_OVERFLOW_SIGNED(void); +void lkdtm_OVERFLOW_UNSIGNED(void); +void lkdtm_ARRAY_BOUNDS(void); +void lkdtm_CORRUPT_LIST_ADD(void); +void lkdtm_CORRUPT_LIST_DEL(void); +void lkdtm_STACK_GUARD_PAGE_LEADING(void); +void lkdtm_STACK_GUARD_PAGE_TRAILING(void); +void lkdtm_UNSET_SMEP(void); +void lkdtm_DOUBLE_FAULT(void); +void lkdtm_CORRUPT_PAC(void); + +/* lkdtm_heap.c */ +void __init lkdtm_heap_init(void); +void __exit lkdtm_heap_exit(void); +void lkdtm_OVERWRITE_ALLOCATION(void); +void lkdtm_WRITE_AFTER_FREE(void); +void lkdtm_READ_AFTER_FREE(void); +void lkdtm_WRITE_BUDDY_AFTER_FREE(void); +void lkdtm_READ_BUDDY_AFTER_FREE(void); +void lkdtm_SLAB_FREE_DOUBLE(void); +void lkdtm_SLAB_FREE_CROSS(void); +void lkdtm_SLAB_FREE_PAGE(void); + +/* lkdtm_perms.c */ +void __init lkdtm_perms_init(void); +void lkdtm_WRITE_RO(void); +void lkdtm_WRITE_RO_AFTER_INIT(void); +void lkdtm_WRITE_KERN(void); +void lkdtm_EXEC_DATA(void); +void lkdtm_EXEC_STACK(void); +void lkdtm_EXEC_KMALLOC(void); +void lkdtm_EXEC_VMALLOC(void); +void lkdtm_EXEC_RODATA(void); +void lkdtm_EXEC_USERSPACE(void); +void lkdtm_EXEC_NULL(void); +void lkdtm_ACCESS_USERSPACE(void); +void lkdtm_ACCESS_NULL(void); + +/* lkdtm_refcount.c */ +void lkdtm_REFCOUNT_INC_OVERFLOW(void); +void lkdtm_REFCOUNT_ADD_OVERFLOW(void); +void lkdtm_REFCOUNT_INC_NOT_ZERO_OVERFLOW(void); +void lkdtm_REFCOUNT_ADD_NOT_ZERO_OVERFLOW(void); +void lkdtm_REFCOUNT_DEC_ZERO(void); +void lkdtm_REFCOUNT_DEC_NEGATIVE(void); +void lkdtm_REFCOUNT_DEC_AND_TEST_NEGATIVE(void); +void lkdtm_REFCOUNT_SUB_AND_TEST_NEGATIVE(void); +void lkdtm_REFCOUNT_INC_ZERO(void); +void lkdtm_REFCOUNT_ADD_ZERO(void); +void lkdtm_REFCOUNT_INC_SATURATED(void); +void lkdtm_REFCOUNT_DEC_SATURATED(void); +void lkdtm_REFCOUNT_ADD_SATURATED(void); +void lkdtm_REFCOUNT_INC_NOT_ZERO_SATURATED(void); +void lkdtm_REFCOUNT_ADD_NOT_ZERO_SATURATED(void); +void lkdtm_REFCOUNT_DEC_AND_TEST_SATURATED(void); +void lkdtm_REFCOUNT_SUB_AND_TEST_SATURATED(void); +void lkdtm_REFCOUNT_TIMING(void); +void lkdtm_ATOMIC_TIMING(void); + +/* lkdtm_rodata.c */ +void lkdtm_rodata_do_nothing(void); + +/* lkdtm_usercopy.c */ +void __init lkdtm_usercopy_init(void); +void __exit lkdtm_usercopy_exit(void); +void lkdtm_USERCOPY_HEAP_SIZE_TO(void); +void lkdtm_USERCOPY_HEAP_SIZE_FROM(void); +void lkdtm_USERCOPY_HEAP_WHITELIST_TO(void); +void lkdtm_USERCOPY_HEAP_WHITELIST_FROM(void); +void lkdtm_USERCOPY_STACK_FRAME_TO(void); +void lkdtm_USERCOPY_STACK_FRAME_FROM(void); +void lkdtm_USERCOPY_STACK_BEYOND(void); +void lkdtm_USERCOPY_KERNEL(void); + +/* lkdtm_stackleak.c */ +void lkdtm_STACKLEAK_ERASING(void); + +/* cfi.c */ +void lkdtm_CFI_FORWARD_PROTO(void); + +#endif diff --git a/drivers/misc/lkdtm/perms.c b/drivers/misc/lkdtm/perms.c new file mode 100644 index 000000000..2dede2ef6 --- /dev/null +++ b/drivers/misc/lkdtm/perms.c @@ -0,0 +1,229 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This is for all the tests related to validating kernel memory + * permissions: non-executable regions, non-writable regions, and + * even non-readable regions. + */ +#include "lkdtm.h" +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include <linux/mman.h> +#include <linux/uaccess.h> +#include <asm/cacheflush.h> + +/* Whether or not to fill the target memory area with do_nothing(). */ +#define CODE_WRITE true +#define CODE_AS_IS false + +/* How many bytes to copy to be sure we've copied enough of do_nothing(). */ +#define EXEC_SIZE 64 + +/* This is non-const, so it will end up in the .data section. */ +static u8 data_area[EXEC_SIZE]; + +/* This is cost, so it will end up in the .rodata section. */ +static const unsigned long rodata = 0xAA55AA55; + +/* This is marked __ro_after_init, so it should ultimately be .rodata. */ +static unsigned long ro_after_init __ro_after_init = 0x55AA5500; + +/* + * This just returns to the caller. It is designed to be copied into + * non-executable memory regions. + */ +static void do_nothing(void) +{ + return; +} + +/* Must immediately follow do_nothing for size calculuations to work out. */ +static void do_overwritten(void) +{ + pr_info("do_overwritten wasn't overwritten!\n"); + return; +} + +static noinline void execute_location(void *dst, bool write) +{ + void (*func)(void) = dst; + + pr_info("attempting ok execution at %px\n", do_nothing); + do_nothing(); + + if (write == CODE_WRITE) { + memcpy(dst, do_nothing, EXEC_SIZE); + flush_icache_range((unsigned long)dst, + (unsigned long)dst + EXEC_SIZE); + } + pr_info("attempting bad execution at %px\n", func); + func(); + pr_err("FAIL: func returned\n"); +} + +static void execute_user_location(void *dst) +{ + int copied; + + /* Intentionally crossing kernel/user memory boundary. */ + void (*func)(void) = dst; + + pr_info("attempting ok execution at %px\n", do_nothing); + do_nothing(); + + copied = access_process_vm(current, (unsigned long)dst, do_nothing, + EXEC_SIZE, FOLL_WRITE); + if (copied < EXEC_SIZE) + return; + pr_info("attempting bad execution at %px\n", func); + func(); + pr_err("FAIL: func returned\n"); +} + +void lkdtm_WRITE_RO(void) +{ + /* Explicitly cast away "const" for the test and make volatile. */ + volatile unsigned long *ptr = (unsigned long *)&rodata; + + pr_info("attempting bad rodata write at %px\n", ptr); + *ptr ^= 0xabcd1234; + pr_err("FAIL: survived bad write\n"); +} + +void lkdtm_WRITE_RO_AFTER_INIT(void) +{ + volatile unsigned long *ptr = &ro_after_init; + + /* + * Verify we were written to during init. Since an Oops + * is considered a "success", a failure is to just skip the + * real test. + */ + if ((*ptr & 0xAA) != 0xAA) { + pr_info("%p was NOT written during init!?\n", ptr); + return; + } + + pr_info("attempting bad ro_after_init write at %px\n", ptr); + *ptr ^= 0xabcd1234; + pr_err("FAIL: survived bad write\n"); +} + +void lkdtm_WRITE_KERN(void) +{ + size_t size; + volatile unsigned char *ptr; + + size = (unsigned long)do_overwritten - (unsigned long)do_nothing; + ptr = (unsigned char *)do_overwritten; + + pr_info("attempting bad %zu byte write at %px\n", size, ptr); + memcpy((void *)ptr, (unsigned char *)do_nothing, size); + flush_icache_range((unsigned long)ptr, (unsigned long)(ptr + size)); + pr_err("FAIL: survived bad write\n"); + + do_overwritten(); +} + +void lkdtm_EXEC_DATA(void) +{ + execute_location(data_area, CODE_WRITE); +} + +void lkdtm_EXEC_STACK(void) +{ + u8 stack_area[EXEC_SIZE]; + execute_location(stack_area, CODE_WRITE); +} + +void lkdtm_EXEC_KMALLOC(void) +{ + u32 *kmalloc_area = kmalloc(EXEC_SIZE, GFP_KERNEL); + execute_location(kmalloc_area, CODE_WRITE); + kfree(kmalloc_area); +} + +void lkdtm_EXEC_VMALLOC(void) +{ + u32 *vmalloc_area = vmalloc(EXEC_SIZE); + execute_location(vmalloc_area, CODE_WRITE); + vfree(vmalloc_area); +} + +void lkdtm_EXEC_RODATA(void) +{ + execute_location(lkdtm_rodata_do_nothing, CODE_AS_IS); +} + +void lkdtm_EXEC_USERSPACE(void) +{ + unsigned long user_addr; + + user_addr = vm_mmap(NULL, 0, PAGE_SIZE, + PROT_READ | PROT_WRITE | PROT_EXEC, + MAP_ANONYMOUS | MAP_PRIVATE, 0); + if (user_addr >= TASK_SIZE) { + pr_warn("Failed to allocate user memory\n"); + return; + } + execute_user_location((void *)user_addr); + vm_munmap(user_addr, PAGE_SIZE); +} + +void lkdtm_EXEC_NULL(void) +{ + execute_location(NULL, CODE_AS_IS); +} + +void lkdtm_ACCESS_USERSPACE(void) +{ + unsigned long user_addr, tmp = 0; + unsigned long *ptr; + + user_addr = vm_mmap(NULL, 0, PAGE_SIZE, + PROT_READ | PROT_WRITE | PROT_EXEC, + MAP_ANONYMOUS | MAP_PRIVATE, 0); + if (user_addr >= TASK_SIZE) { + pr_warn("Failed to allocate user memory\n"); + return; + } + + if (copy_to_user((void __user *)user_addr, &tmp, sizeof(tmp))) { + pr_warn("copy_to_user failed\n"); + vm_munmap(user_addr, PAGE_SIZE); + return; + } + + ptr = (unsigned long *)user_addr; + + pr_info("attempting bad read at %px\n", ptr); + tmp = *ptr; + tmp += 0xc0dec0de; + pr_err("FAIL: survived bad read\n"); + + pr_info("attempting bad write at %px\n", ptr); + *ptr = tmp; + pr_err("FAIL: survived bad write\n"); + + vm_munmap(user_addr, PAGE_SIZE); +} + +void lkdtm_ACCESS_NULL(void) +{ + unsigned long tmp; + volatile unsigned long *ptr = (unsigned long *)NULL; + + pr_info("attempting bad read at %px\n", ptr); + tmp = *ptr; + tmp += 0xc0dec0de; + pr_err("FAIL: survived bad read\n"); + + pr_info("attempting bad write at %px\n", ptr); + *ptr = tmp; + pr_err("FAIL: survived bad write\n"); +} + +void __init lkdtm_perms_init(void) +{ + /* Make sure we can write to __ro_after_init values during __init */ + ro_after_init |= 0xAA; +} diff --git a/drivers/misc/lkdtm/refcount.c b/drivers/misc/lkdtm/refcount.c new file mode 100644 index 000000000..de7c5ab52 --- /dev/null +++ b/drivers/misc/lkdtm/refcount.c @@ -0,0 +1,392 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This is for all the tests related to refcount bugs (e.g. overflow, + * underflow, reaching zero untested, etc). + */ +#include "lkdtm.h" +#include <linux/refcount.h> + +static void overflow_check(refcount_t *ref) +{ + switch (refcount_read(ref)) { + case REFCOUNT_SATURATED: + pr_info("Overflow detected: saturated\n"); + break; + case REFCOUNT_MAX: + pr_warn("Overflow detected: unsafely reset to max\n"); + break; + default: + pr_err("Fail: refcount wrapped to %d\n", refcount_read(ref)); + } +} + +/* + * A refcount_inc() above the maximum value of the refcount implementation, + * should at least saturate, and at most also WARN. + */ +void lkdtm_REFCOUNT_INC_OVERFLOW(void) +{ + refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX - 1); + + pr_info("attempting good refcount_inc() without overflow\n"); + refcount_dec(&over); + refcount_inc(&over); + + pr_info("attempting bad refcount_inc() overflow\n"); + refcount_inc(&over); + refcount_inc(&over); + + overflow_check(&over); +} + +/* refcount_add() should behave just like refcount_inc() above. */ +void lkdtm_REFCOUNT_ADD_OVERFLOW(void) +{ + refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX - 1); + + pr_info("attempting good refcount_add() without overflow\n"); + refcount_dec(&over); + refcount_dec(&over); + refcount_dec(&over); + refcount_dec(&over); + refcount_add(4, &over); + + pr_info("attempting bad refcount_add() overflow\n"); + refcount_add(4, &over); + + overflow_check(&over); +} + +/* refcount_inc_not_zero() should behave just like refcount_inc() above. */ +void lkdtm_REFCOUNT_INC_NOT_ZERO_OVERFLOW(void) +{ + refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX); + + pr_info("attempting bad refcount_inc_not_zero() overflow\n"); + if (!refcount_inc_not_zero(&over)) + pr_warn("Weird: refcount_inc_not_zero() reported zero\n"); + + overflow_check(&over); +} + +/* refcount_add_not_zero() should behave just like refcount_inc() above. */ +void lkdtm_REFCOUNT_ADD_NOT_ZERO_OVERFLOW(void) +{ + refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX); + + pr_info("attempting bad refcount_add_not_zero() overflow\n"); + if (!refcount_add_not_zero(6, &over)) + pr_warn("Weird: refcount_add_not_zero() reported zero\n"); + + overflow_check(&over); +} + +static void check_zero(refcount_t *ref) +{ + switch (refcount_read(ref)) { + case REFCOUNT_SATURATED: + pr_info("Zero detected: saturated\n"); + break; + case REFCOUNT_MAX: + pr_warn("Zero detected: unsafely reset to max\n"); + break; + case 0: + pr_warn("Still at zero: refcount_inc/add() must not inc-from-0\n"); + break; + default: + pr_err("Fail: refcount went crazy: %d\n", refcount_read(ref)); + } +} + +/* + * A refcount_dec(), as opposed to a refcount_dec_and_test(), when it hits + * zero it should either saturate (when inc-from-zero isn't protected) + * or stay at zero (when inc-from-zero is protected) and should WARN for both. + */ +void lkdtm_REFCOUNT_DEC_ZERO(void) +{ + refcount_t zero = REFCOUNT_INIT(2); + + pr_info("attempting good refcount_dec()\n"); + refcount_dec(&zero); + + pr_info("attempting bad refcount_dec() to zero\n"); + refcount_dec(&zero); + + check_zero(&zero); +} + +static void check_negative(refcount_t *ref, int start) +{ + /* + * refcount_t refuses to move a refcount at all on an + * over-sub, so we have to track our starting position instead of + * looking only at zero-pinning. + */ + if (refcount_read(ref) == start) { + pr_warn("Still at %d: refcount_inc/add() must not inc-from-0\n", + start); + return; + } + + switch (refcount_read(ref)) { + case REFCOUNT_SATURATED: + pr_info("Negative detected: saturated\n"); + break; + case REFCOUNT_MAX: + pr_warn("Negative detected: unsafely reset to max\n"); + break; + default: + pr_err("Fail: refcount went crazy: %d\n", refcount_read(ref)); + } +} + +/* A refcount_dec() going negative should saturate and may WARN. */ +void lkdtm_REFCOUNT_DEC_NEGATIVE(void) +{ + refcount_t neg = REFCOUNT_INIT(0); + + pr_info("attempting bad refcount_dec() below zero\n"); + refcount_dec(&neg); + + check_negative(&neg, 0); +} + +/* + * A refcount_dec_and_test() should act like refcount_dec() above when + * going negative. + */ +void lkdtm_REFCOUNT_DEC_AND_TEST_NEGATIVE(void) +{ + refcount_t neg = REFCOUNT_INIT(0); + + pr_info("attempting bad refcount_dec_and_test() below zero\n"); + if (refcount_dec_and_test(&neg)) + pr_warn("Weird: refcount_dec_and_test() reported zero\n"); + + check_negative(&neg, 0); +} + +/* + * A refcount_sub_and_test() should act like refcount_dec_and_test() + * above when going negative. + */ +void lkdtm_REFCOUNT_SUB_AND_TEST_NEGATIVE(void) +{ + refcount_t neg = REFCOUNT_INIT(3); + + pr_info("attempting bad refcount_sub_and_test() below zero\n"); + if (refcount_sub_and_test(5, &neg)) + pr_warn("Weird: refcount_sub_and_test() reported zero\n"); + + check_negative(&neg, 3); +} + +static void check_from_zero(refcount_t *ref) +{ + switch (refcount_read(ref)) { + case 0: + pr_info("Zero detected: stayed at zero\n"); + break; + case REFCOUNT_SATURATED: + pr_info("Zero detected: saturated\n"); + break; + case REFCOUNT_MAX: + pr_warn("Zero detected: unsafely reset to max\n"); + break; + default: + pr_info("Fail: zero not detected, incremented to %d\n", + refcount_read(ref)); + } +} + +/* + * A refcount_inc() from zero should pin to zero or saturate and may WARN. + */ +void lkdtm_REFCOUNT_INC_ZERO(void) +{ + refcount_t zero = REFCOUNT_INIT(0); + + pr_info("attempting safe refcount_inc_not_zero() from zero\n"); + if (!refcount_inc_not_zero(&zero)) { + pr_info("Good: zero detected\n"); + if (refcount_read(&zero) == 0) + pr_info("Correctly stayed at zero\n"); + else + pr_err("Fail: refcount went past zero!\n"); + } else { + pr_err("Fail: Zero not detected!?\n"); + } + + pr_info("attempting bad refcount_inc() from zero\n"); + refcount_inc(&zero); + + check_from_zero(&zero); +} + +/* + * A refcount_add() should act like refcount_inc() above when starting + * at zero. + */ +void lkdtm_REFCOUNT_ADD_ZERO(void) +{ + refcount_t zero = REFCOUNT_INIT(0); + + pr_info("attempting safe refcount_add_not_zero() from zero\n"); + if (!refcount_add_not_zero(3, &zero)) { + pr_info("Good: zero detected\n"); + if (refcount_read(&zero) == 0) + pr_info("Correctly stayed at zero\n"); + else + pr_err("Fail: refcount went past zero\n"); + } else { + pr_err("Fail: Zero not detected!?\n"); + } + + pr_info("attempting bad refcount_add() from zero\n"); + refcount_add(3, &zero); + + check_from_zero(&zero); +} + +static void check_saturated(refcount_t *ref) +{ + switch (refcount_read(ref)) { + case REFCOUNT_SATURATED: + pr_info("Saturation detected: still saturated\n"); + break; + case REFCOUNT_MAX: + pr_warn("Saturation detected: unsafely reset to max\n"); + break; + default: + pr_err("Fail: refcount went crazy: %d\n", refcount_read(ref)); + } +} + +/* + * A refcount_inc() from a saturated value should at most warn about + * being saturated already. + */ +void lkdtm_REFCOUNT_INC_SATURATED(void) +{ + refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED); + + pr_info("attempting bad refcount_inc() from saturated\n"); + refcount_inc(&sat); + + check_saturated(&sat); +} + +/* Should act like refcount_inc() above from saturated. */ +void lkdtm_REFCOUNT_DEC_SATURATED(void) +{ + refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED); + + pr_info("attempting bad refcount_dec() from saturated\n"); + refcount_dec(&sat); + + check_saturated(&sat); +} + +/* Should act like refcount_inc() above from saturated. */ +void lkdtm_REFCOUNT_ADD_SATURATED(void) +{ + refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED); + + pr_info("attempting bad refcount_dec() from saturated\n"); + refcount_add(8, &sat); + + check_saturated(&sat); +} + +/* Should act like refcount_inc() above from saturated. */ +void lkdtm_REFCOUNT_INC_NOT_ZERO_SATURATED(void) +{ + refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED); + + pr_info("attempting bad refcount_inc_not_zero() from saturated\n"); + if (!refcount_inc_not_zero(&sat)) + pr_warn("Weird: refcount_inc_not_zero() reported zero\n"); + + check_saturated(&sat); +} + +/* Should act like refcount_inc() above from saturated. */ +void lkdtm_REFCOUNT_ADD_NOT_ZERO_SATURATED(void) +{ + refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED); + + pr_info("attempting bad refcount_add_not_zero() from saturated\n"); + if (!refcount_add_not_zero(7, &sat)) + pr_warn("Weird: refcount_add_not_zero() reported zero\n"); + + check_saturated(&sat); +} + +/* Should act like refcount_inc() above from saturated. */ +void lkdtm_REFCOUNT_DEC_AND_TEST_SATURATED(void) +{ + refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED); + + pr_info("attempting bad refcount_dec_and_test() from saturated\n"); + if (refcount_dec_and_test(&sat)) + pr_warn("Weird: refcount_dec_and_test() reported zero\n"); + + check_saturated(&sat); +} + +/* Should act like refcount_inc() above from saturated. */ +void lkdtm_REFCOUNT_SUB_AND_TEST_SATURATED(void) +{ + refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED); + + pr_info("attempting bad refcount_sub_and_test() from saturated\n"); + if (refcount_sub_and_test(8, &sat)) + pr_warn("Weird: refcount_sub_and_test() reported zero\n"); + + check_saturated(&sat); +} + +/* Used to time the existing atomic_t when used for reference counting */ +void lkdtm_ATOMIC_TIMING(void) +{ + unsigned int i; + atomic_t count = ATOMIC_INIT(1); + + for (i = 0; i < INT_MAX - 1; i++) + atomic_inc(&count); + + for (i = INT_MAX; i > 0; i--) + if (atomic_dec_and_test(&count)) + break; + + if (i != 1) + pr_err("atomic timing: out of sync up/down cycle: %u\n", i - 1); + else + pr_info("atomic timing: done\n"); +} + +/* + * This can be compared to ATOMIC_TIMING when implementing fast refcount + * protections. Looking at the number of CPU cycles tells the real story + * about performance. For example: + * cd /sys/kernel/debug/provoke-crash + * perf stat -B -- cat <(echo REFCOUNT_TIMING) > DIRECT + */ +void lkdtm_REFCOUNT_TIMING(void) +{ + unsigned int i; + refcount_t count = REFCOUNT_INIT(1); + + for (i = 0; i < INT_MAX - 1; i++) + refcount_inc(&count); + + for (i = INT_MAX; i > 0; i--) + if (refcount_dec_and_test(&count)) + break; + + if (i != 1) + pr_err("refcount: out of sync up/down cycle: %u\n", i - 1); + else + pr_info("refcount timing: done\n"); +} diff --git a/drivers/misc/lkdtm/rodata.c b/drivers/misc/lkdtm/rodata.c new file mode 100644 index 000000000..baacb876d --- /dev/null +++ b/drivers/misc/lkdtm/rodata.c @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This includes functions that are meant to live entirely in .rodata + * (via objcopy tricks), to validate the non-executability of .rodata. + */ +#include "lkdtm.h" + +void noinstr lkdtm_rodata_do_nothing(void) +{ + /* Does nothing. We just want an architecture agnostic "return". */ +} diff --git a/drivers/misc/lkdtm/stackleak.c b/drivers/misc/lkdtm/stackleak.c new file mode 100644 index 000000000..d1a5c0705 --- /dev/null +++ b/drivers/misc/lkdtm/stackleak.c @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This code tests that the current task stack is properly erased (filled + * with STACKLEAK_POISON). + * + * Authors: + * Alexander Popov <alex.popov@linux.com> + * Tycho Andersen <tycho@tycho.ws> + */ + +#include "lkdtm.h" +#include <linux/stackleak.h> + +void lkdtm_STACKLEAK_ERASING(void) +{ + unsigned long *sp, left, found, i; + const unsigned long check_depth = + STACKLEAK_SEARCH_DEPTH / sizeof(unsigned long); + bool test_failed = false; + + /* + * For the details about the alignment of the poison values, see + * the comment in stackleak_track_stack(). + */ + sp = PTR_ALIGN(&i, sizeof(unsigned long)); + + left = ((unsigned long)sp & (THREAD_SIZE - 1)) / sizeof(unsigned long); + sp--; + + /* + * One 'long int' at the bottom of the thread stack is reserved + * and not poisoned. + */ + if (left > 1) { + left--; + } else { + pr_err("FAIL: not enough stack space for the test\n"); + test_failed = true; + goto end; + } + + pr_info("checking unused part of the thread stack (%lu bytes)...\n", + left * sizeof(unsigned long)); + + /* + * Search for 'check_depth' poison values in a row (just like + * stackleak_erase() does). + */ + for (i = 0, found = 0; i < left && found <= check_depth; i++) { + if (*(sp - i) == STACKLEAK_POISON) + found++; + else + found = 0; + } + + if (found <= check_depth) { + pr_err("FAIL: the erased part is not found (checked %lu bytes)\n", + i * sizeof(unsigned long)); + test_failed = true; + goto end; + } + + pr_info("the erased part begins after %lu not poisoned bytes\n", + (i - found) * sizeof(unsigned long)); + + /* The rest of thread stack should be erased */ + for (; i < left; i++) { + if (*(sp - i) != STACKLEAK_POISON) { + pr_err("FAIL: bad value number %lu in the erased part: 0x%lx\n", + i, *(sp - i)); + test_failed = true; + } + } + +end: + if (test_failed) { + pr_err("FAIL: the thread stack is NOT properly erased\n"); + dump_stack(); + } else { + pr_info("OK: the rest of the thread stack is properly erased\n"); + } +} diff --git a/drivers/misc/lkdtm/usercopy.c b/drivers/misc/lkdtm/usercopy.c new file mode 100644 index 000000000..cde265548 --- /dev/null +++ b/drivers/misc/lkdtm/usercopy.c @@ -0,0 +1,353 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This is for all the tests related to copy_to_user() and copy_from_user() + * hardening. + */ +#include "lkdtm.h" +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include <linux/sched/task_stack.h> +#include <linux/mman.h> +#include <linux/uaccess.h> +#include <asm/cacheflush.h> + +/* + * Many of the tests here end up using const sizes, but those would + * normally be ignored by hardened usercopy, so force the compiler + * into choosing the non-const path to make sure we trigger the + * hardened usercopy checks by added "unconst" to all the const copies, + * and making sure "cache_size" isn't optimized into a const. + */ +static volatile size_t unconst; +static volatile size_t cache_size = 1024; +static struct kmem_cache *whitelist_cache; + +static const unsigned char test_text[] = "This is a test.\n"; + +/* + * Instead of adding -Wno-return-local-addr, just pass the stack address + * through a function to obfuscate it from the compiler. + */ +static noinline unsigned char *trick_compiler(unsigned char *stack) +{ + return stack + unconst; +} + +static noinline unsigned char *do_usercopy_stack_callee(int value) +{ + unsigned char buf[128]; + int i; + + /* Exercise stack to avoid everything living in registers. */ + for (i = 0; i < sizeof(buf); i++) { + buf[i] = value & 0xff; + } + + /* + * Put the target buffer in the middle of stack allocation + * so that we don't step on future stack users regardless + * of stack growth direction. + */ + return trick_compiler(&buf[(128/2)-32]); +} + +static noinline void do_usercopy_stack(bool to_user, bool bad_frame) +{ + unsigned long user_addr; + unsigned char good_stack[32]; + unsigned char *bad_stack; + int i; + + /* Exercise stack to avoid everything living in registers. */ + for (i = 0; i < sizeof(good_stack); i++) + good_stack[i] = test_text[i % sizeof(test_text)]; + + /* This is a pointer to outside our current stack frame. */ + if (bad_frame) { + bad_stack = do_usercopy_stack_callee((uintptr_t)&bad_stack); + } else { + /* Put start address just inside stack. */ + bad_stack = task_stack_page(current) + THREAD_SIZE; + bad_stack -= sizeof(unsigned long); + } + +#ifdef ARCH_HAS_CURRENT_STACK_POINTER + pr_info("stack : %px\n", (void *)current_stack_pointer); +#endif + pr_info("good_stack: %px-%px\n", good_stack, good_stack + sizeof(good_stack)); + pr_info("bad_stack : %px-%px\n", bad_stack, bad_stack + sizeof(good_stack)); + + user_addr = vm_mmap(NULL, 0, PAGE_SIZE, + PROT_READ | PROT_WRITE | PROT_EXEC, + MAP_ANONYMOUS | MAP_PRIVATE, 0); + if (user_addr >= TASK_SIZE) { + pr_warn("Failed to allocate user memory\n"); + return; + } + + if (to_user) { + pr_info("attempting good copy_to_user of local stack\n"); + if (copy_to_user((void __user *)user_addr, good_stack, + unconst + sizeof(good_stack))) { + pr_warn("copy_to_user failed unexpectedly?!\n"); + goto free_user; + } + + pr_info("attempting bad copy_to_user of distant stack\n"); + if (copy_to_user((void __user *)user_addr, bad_stack, + unconst + sizeof(good_stack))) { + pr_warn("copy_to_user failed, but lacked Oops\n"); + goto free_user; + } + } else { + /* + * There isn't a safe way to not be protected by usercopy + * if we're going to write to another thread's stack. + */ + if (!bad_frame) + goto free_user; + + pr_info("attempting good copy_from_user of local stack\n"); + if (copy_from_user(good_stack, (void __user *)user_addr, + unconst + sizeof(good_stack))) { + pr_warn("copy_from_user failed unexpectedly?!\n"); + goto free_user; + } + + pr_info("attempting bad copy_from_user of distant stack\n"); + if (copy_from_user(bad_stack, (void __user *)user_addr, + unconst + sizeof(good_stack))) { + pr_warn("copy_from_user failed, but lacked Oops\n"); + goto free_user; + } + } + +free_user: + vm_munmap(user_addr, PAGE_SIZE); +} + +/* + * This checks for whole-object size validation with hardened usercopy, + * with or without usercopy whitelisting. + */ +static void do_usercopy_heap_size(bool to_user) +{ + unsigned long user_addr; + unsigned char *one, *two; + void __user *test_user_addr; + void *test_kern_addr; + size_t size = unconst + 1024; + + one = kmalloc(size, GFP_KERNEL); + two = kmalloc(size, GFP_KERNEL); + if (!one || !two) { + pr_warn("Failed to allocate kernel memory\n"); + goto free_kernel; + } + + user_addr = vm_mmap(NULL, 0, PAGE_SIZE, + PROT_READ | PROT_WRITE | PROT_EXEC, + MAP_ANONYMOUS | MAP_PRIVATE, 0); + if (user_addr >= TASK_SIZE) { + pr_warn("Failed to allocate user memory\n"); + goto free_kernel; + } + + memset(one, 'A', size); + memset(two, 'B', size); + + test_user_addr = (void __user *)(user_addr + 16); + test_kern_addr = one + 16; + + if (to_user) { + pr_info("attempting good copy_to_user of correct size\n"); + if (copy_to_user(test_user_addr, test_kern_addr, size / 2)) { + pr_warn("copy_to_user failed unexpectedly?!\n"); + goto free_user; + } + + pr_info("attempting bad copy_to_user of too large size\n"); + if (copy_to_user(test_user_addr, test_kern_addr, size)) { + pr_warn("copy_to_user failed, but lacked Oops\n"); + goto free_user; + } + } else { + pr_info("attempting good copy_from_user of correct size\n"); + if (copy_from_user(test_kern_addr, test_user_addr, size / 2)) { + pr_warn("copy_from_user failed unexpectedly?!\n"); + goto free_user; + } + + pr_info("attempting bad copy_from_user of too large size\n"); + if (copy_from_user(test_kern_addr, test_user_addr, size)) { + pr_warn("copy_from_user failed, but lacked Oops\n"); + goto free_user; + } + } + +free_user: + vm_munmap(user_addr, PAGE_SIZE); +free_kernel: + kfree(one); + kfree(two); +} + +/* + * This checks for the specific whitelist window within an object. If this + * test passes, then do_usercopy_heap_size() tests will pass too. + */ +static void do_usercopy_heap_whitelist(bool to_user) +{ + unsigned long user_alloc; + unsigned char *buf = NULL; + unsigned char __user *user_addr; + size_t offset, size; + + /* Make sure cache was prepared. */ + if (!whitelist_cache) { + pr_warn("Failed to allocate kernel cache\n"); + return; + } + + /* + * Allocate a buffer with a whitelisted window in the buffer. + */ + buf = kmem_cache_alloc(whitelist_cache, GFP_KERNEL); + if (!buf) { + pr_warn("Failed to allocate buffer from whitelist cache\n"); + goto free_alloc; + } + + /* Allocate user memory we'll poke at. */ + user_alloc = vm_mmap(NULL, 0, PAGE_SIZE, + PROT_READ | PROT_WRITE | PROT_EXEC, + MAP_ANONYMOUS | MAP_PRIVATE, 0); + if (user_alloc >= TASK_SIZE) { + pr_warn("Failed to allocate user memory\n"); + goto free_alloc; + } + user_addr = (void __user *)user_alloc; + + memset(buf, 'B', cache_size); + + /* Whitelisted window in buffer, from kmem_cache_create_usercopy. */ + offset = (cache_size / 4) + unconst; + size = (cache_size / 16) + unconst; + + if (to_user) { + pr_info("attempting good copy_to_user inside whitelist\n"); + if (copy_to_user(user_addr, buf + offset, size)) { + pr_warn("copy_to_user failed unexpectedly?!\n"); + goto free_user; + } + + pr_info("attempting bad copy_to_user outside whitelist\n"); + if (copy_to_user(user_addr, buf + offset - 1, size)) { + pr_warn("copy_to_user failed, but lacked Oops\n"); + goto free_user; + } + } else { + pr_info("attempting good copy_from_user inside whitelist\n"); + if (copy_from_user(buf + offset, user_addr, size)) { + pr_warn("copy_from_user failed unexpectedly?!\n"); + goto free_user; + } + + pr_info("attempting bad copy_from_user outside whitelist\n"); + if (copy_from_user(buf + offset - 1, user_addr, size)) { + pr_warn("copy_from_user failed, but lacked Oops\n"); + goto free_user; + } + } + +free_user: + vm_munmap(user_alloc, PAGE_SIZE); +free_alloc: + if (buf) + kmem_cache_free(whitelist_cache, buf); +} + +/* Callable tests. */ +void lkdtm_USERCOPY_HEAP_SIZE_TO(void) +{ + do_usercopy_heap_size(true); +} + +void lkdtm_USERCOPY_HEAP_SIZE_FROM(void) +{ + do_usercopy_heap_size(false); +} + +void lkdtm_USERCOPY_HEAP_WHITELIST_TO(void) +{ + do_usercopy_heap_whitelist(true); +} + +void lkdtm_USERCOPY_HEAP_WHITELIST_FROM(void) +{ + do_usercopy_heap_whitelist(false); +} + +void lkdtm_USERCOPY_STACK_FRAME_TO(void) +{ + do_usercopy_stack(true, true); +} + +void lkdtm_USERCOPY_STACK_FRAME_FROM(void) +{ + do_usercopy_stack(false, true); +} + +void lkdtm_USERCOPY_STACK_BEYOND(void) +{ + do_usercopy_stack(true, false); +} + +void lkdtm_USERCOPY_KERNEL(void) +{ + unsigned long user_addr; + + user_addr = vm_mmap(NULL, 0, PAGE_SIZE, + PROT_READ | PROT_WRITE | PROT_EXEC, + MAP_ANONYMOUS | MAP_PRIVATE, 0); + if (user_addr >= TASK_SIZE) { + pr_warn("Failed to allocate user memory\n"); + return; + } + + pr_info("attempting good copy_to_user from kernel rodata: %px\n", + test_text); + if (copy_to_user((void __user *)user_addr, test_text, + unconst + sizeof(test_text))) { + pr_warn("copy_to_user failed unexpectedly?!\n"); + goto free_user; + } + + pr_info("attempting bad copy_to_user from kernel text: %px\n", + vm_mmap); + if (copy_to_user((void __user *)user_addr, vm_mmap, + unconst + PAGE_SIZE)) { + pr_warn("copy_to_user failed, but lacked Oops\n"); + goto free_user; + } + pr_err("FAIL: survived bad copy_to_user()\n"); + +free_user: + vm_munmap(user_addr, PAGE_SIZE); +} + +void __init lkdtm_usercopy_init(void) +{ + /* Prepare cache that lacks SLAB_USERCOPY flag. */ + whitelist_cache = + kmem_cache_create_usercopy("lkdtm-usercopy", cache_size, + 0, 0, + cache_size / 4, + cache_size / 16, + NULL); +} + +void __exit lkdtm_usercopy_exit(void) +{ + kmem_cache_destroy(whitelist_cache); +} diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig new file mode 100644 index 000000000..f5fd5b786 --- /dev/null +++ b/drivers/misc/mei/Kconfig @@ -0,0 +1,49 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2003-2019, Intel Corporation. All rights reserved. +config INTEL_MEI + tristate "Intel Management Engine Interface" + depends on X86 && PCI + help + The Intel Management Engine (Intel ME) provides Manageability, + Security and Media services for system containing Intel chipsets. + if selected /dev/mei misc device will be created. + + For more information see + <https://software.intel.com/en-us/manageability/> + +config INTEL_MEI_ME + tristate "ME Enabled Intel Chipsets" + select INTEL_MEI + depends on X86 && PCI + help + MEI support for ME Enabled Intel chipsets. + + Supported Chipsets are: + 7 Series Chipset Family + 6 Series Chipset Family + 5 Series Chipset Family + 4 Series Chipset Family + Mobile 4 Series Chipset Family + ICH9 + 82946GZ/GL + 82G35 Express + 82Q963/Q965 + 82P965/G965 + Mobile PM965/GM965 + Mobile GME965/GLE960 + 82Q35 Express + 82G33/G31/P35/P31 Express + 82Q33 Express + 82X38/X48 Express + +config INTEL_MEI_TXE + tristate "Intel Trusted Execution Environment with ME Interface" + select INTEL_MEI + depends on X86 && PCI + help + MEI Support for Trusted Execution Environment device on Intel SoCs + + Supported SoCs: + Intel Bay Trail + +source "drivers/misc/mei/hdcp/Kconfig" diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile new file mode 100644 index 000000000..f1c76f7ee --- /dev/null +++ b/drivers/misc/mei/Makefile @@ -0,0 +1,28 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Copyright (c) 2010-2019, Intel Corporation. All rights reserved. +# Makefile - Intel Management Engine Interface (Intel MEI) Linux driver +# +obj-$(CONFIG_INTEL_MEI) += mei.o +mei-objs := init.o +mei-objs += hbm.o +mei-objs += interrupt.o +mei-objs += client.o +mei-objs += main.o +mei-objs += dma-ring.o +mei-objs += bus.o +mei-objs += bus-fixup.o +mei-$(CONFIG_DEBUG_FS) += debugfs.o + +obj-$(CONFIG_INTEL_MEI_ME) += mei-me.o +mei-me-objs := pci-me.o +mei-me-objs += hw-me.o + +obj-$(CONFIG_INTEL_MEI_TXE) += mei-txe.o +mei-txe-objs := pci-txe.o +mei-txe-objs += hw-txe.o + +mei-$(CONFIG_EVENT_TRACING) += mei-trace.o +CFLAGS_mei-trace.o = -I$(src) + +obj-$(CONFIG_INTEL_MEI_HDCP) += hdcp/ diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c new file mode 100644 index 000000000..c4c127558 --- /dev/null +++ b/drivers/misc/mei/bus-fixup.c @@ -0,0 +1,512 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2013-2020, Intel Corporation. All rights reserved. + * Intel Management Engine Interface (Intel MEI) Linux driver + */ + +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/slab.h> +#include <linux/uuid.h> + +#include <linux/mei_cl_bus.h> + +#include "mei_dev.h" +#include "client.h" + +#define MEI_UUID_NFC_INFO UUID_LE(0xd2de1625, 0x382d, 0x417d, \ + 0x48, 0xa4, 0xef, 0xab, 0xba, 0x8a, 0x12, 0x06) + +static const uuid_le mei_nfc_info_guid = MEI_UUID_NFC_INFO; + +#define MEI_UUID_NFC_HCI UUID_LE(0x0bb17a78, 0x2a8e, 0x4c50, \ + 0x94, 0xd4, 0x50, 0x26, 0x67, 0x23, 0x77, 0x5c) + +#define MEI_UUID_WD UUID_LE(0x05B79A6F, 0x4628, 0x4D7F, \ + 0x89, 0x9D, 0xA9, 0x15, 0x14, 0xCB, 0x32, 0xAB) + +#define MEI_UUID_MKHIF_FIX UUID_LE(0x55213584, 0x9a29, 0x4916, \ + 0xba, 0xdf, 0xf, 0xb7, 0xed, 0x68, 0x2a, 0xeb) + +#define MEI_UUID_HDCP UUID_LE(0xB638AB7E, 0x94E2, 0x4EA2, \ + 0xA5, 0x52, 0xD1, 0xC5, 0x4B, 0x62, 0x7F, 0x04) + +#define MEI_UUID_ANY NULL_UUID_LE + +/** + * number_of_connections - determine whether an client be on the bus + * according number of connections + * We support only clients: + * 1. with single connection + * 2. and fixed clients (max_number_of_connections == 0) + * + * @cldev: me clients device + */ +static void number_of_connections(struct mei_cl_device *cldev) +{ + if (cldev->me_cl->props.max_number_of_connections > 1) + cldev->do_match = 0; +} + +/** + * blacklist - blacklist a client from the bus + * + * @cldev: me clients device + */ +static void blacklist(struct mei_cl_device *cldev) +{ + cldev->do_match = 0; +} + +/** + * whitelist - forcefully whitelist client + * + * @cldev: me clients device + */ +static void whitelist(struct mei_cl_device *cldev) +{ + cldev->do_match = 1; +} + +#define OSTYPE_LINUX 2 +struct mei_os_ver { + __le16 build; + __le16 reserved1; + u8 os_type; + u8 major; + u8 minor; + u8 reserved2; +} __packed; + +#define MKHI_FEATURE_PTT 0x10 + +struct mkhi_rule_id { + __le16 rule_type; + u8 feature_id; + u8 reserved; +} __packed; + +struct mkhi_fwcaps { + struct mkhi_rule_id id; + u8 len; + u8 data[]; +} __packed; + +struct mkhi_fw_ver_block { + u16 minor; + u8 major; + u8 platform; + u16 buildno; + u16 hotfix; +} __packed; + +struct mkhi_fw_ver { + struct mkhi_fw_ver_block ver[MEI_MAX_FW_VER_BLOCKS]; +} __packed; + +#define MKHI_FWCAPS_GROUP_ID 0x3 +#define MKHI_FWCAPS_SET_OS_VER_APP_RULE_CMD 6 +#define MKHI_GEN_GROUP_ID 0xFF +#define MKHI_GEN_GET_FW_VERSION_CMD 0x2 +struct mkhi_msg_hdr { + u8 group_id; + u8 command; + u8 reserved; + u8 result; +} __packed; + +struct mkhi_msg { + struct mkhi_msg_hdr hdr; + u8 data[]; +} __packed; + +#define MKHI_OSVER_BUF_LEN (sizeof(struct mkhi_msg_hdr) + \ + sizeof(struct mkhi_fwcaps) + \ + sizeof(struct mei_os_ver)) +static int mei_osver(struct mei_cl_device *cldev) +{ + const size_t size = MKHI_OSVER_BUF_LEN; + char buf[MKHI_OSVER_BUF_LEN]; + struct mkhi_msg *req; + struct mkhi_fwcaps *fwcaps; + struct mei_os_ver *os_ver; + unsigned int mode = MEI_CL_IO_TX_BLOCKING | MEI_CL_IO_TX_INTERNAL; + + memset(buf, 0, size); + + req = (struct mkhi_msg *)buf; + req->hdr.group_id = MKHI_FWCAPS_GROUP_ID; + req->hdr.command = MKHI_FWCAPS_SET_OS_VER_APP_RULE_CMD; + + fwcaps = (struct mkhi_fwcaps *)req->data; + + fwcaps->id.rule_type = 0x0; + fwcaps->id.feature_id = MKHI_FEATURE_PTT; + fwcaps->len = sizeof(*os_ver); + os_ver = (struct mei_os_ver *)fwcaps->data; + os_ver->os_type = OSTYPE_LINUX; + + return __mei_cl_send(cldev->cl, buf, size, mode); +} + +#define MKHI_FWVER_BUF_LEN (sizeof(struct mkhi_msg_hdr) + \ + sizeof(struct mkhi_fw_ver)) +#define MKHI_FWVER_LEN(__num) (sizeof(struct mkhi_msg_hdr) + \ + sizeof(struct mkhi_fw_ver_block) * (__num)) +#define MKHI_RCV_TIMEOUT 500 /* receive timeout in msec */ +static int mei_fwver(struct mei_cl_device *cldev) +{ + char buf[MKHI_FWVER_BUF_LEN]; + struct mkhi_msg req; + struct mkhi_msg *rsp; + struct mkhi_fw_ver *fwver; + int bytes_recv, ret, i; + + memset(buf, 0, sizeof(buf)); + + req.hdr.group_id = MKHI_GEN_GROUP_ID; + req.hdr.command = MKHI_GEN_GET_FW_VERSION_CMD; + + ret = __mei_cl_send(cldev->cl, (u8 *)&req, sizeof(req), + MEI_CL_IO_TX_BLOCKING); + if (ret < 0) { + dev_err(&cldev->dev, "Could not send ReqFWVersion cmd ret = %d\n", ret); + return ret; + } + + ret = 0; + bytes_recv = __mei_cl_recv(cldev->cl, buf, sizeof(buf), 0, + MKHI_RCV_TIMEOUT); + if (bytes_recv < 0 || (size_t)bytes_recv < MKHI_FWVER_LEN(1)) { + /* + * Should be at least one version block, + * error out if nothing found + */ + dev_err(&cldev->dev, "Could not read FW version ret = %d\n", bytes_recv); + return -EIO; + } + + rsp = (struct mkhi_msg *)buf; + fwver = (struct mkhi_fw_ver *)rsp->data; + memset(cldev->bus->fw_ver, 0, sizeof(cldev->bus->fw_ver)); + for (i = 0; i < MEI_MAX_FW_VER_BLOCKS; i++) { + if ((size_t)bytes_recv < MKHI_FWVER_LEN(i + 1)) + break; + dev_dbg(&cldev->dev, "FW version%d %d:%d.%d.%d.%d\n", + i, fwver->ver[i].platform, + fwver->ver[i].major, fwver->ver[i].minor, + fwver->ver[i].hotfix, fwver->ver[i].buildno); + + cldev->bus->fw_ver[i].platform = fwver->ver[i].platform; + cldev->bus->fw_ver[i].major = fwver->ver[i].major; + cldev->bus->fw_ver[i].minor = fwver->ver[i].minor; + cldev->bus->fw_ver[i].hotfix = fwver->ver[i].hotfix; + cldev->bus->fw_ver[i].buildno = fwver->ver[i].buildno; + } + + return ret; +} + +static void mei_mkhi_fix(struct mei_cl_device *cldev) +{ + int ret; + + /* No need to enable the client if nothing is needed from it */ + if (!cldev->bus->fw_f_fw_ver_supported && + !cldev->bus->hbm_f_os_supported) + return; + + ret = mei_cldev_enable(cldev); + if (ret) + return; + + if (cldev->bus->fw_f_fw_ver_supported) { + ret = mei_fwver(cldev); + if (ret < 0) + dev_err(&cldev->dev, "FW version command failed %d\n", + ret); + } + + if (cldev->bus->hbm_f_os_supported) { + ret = mei_osver(cldev); + if (ret < 0) + dev_err(&cldev->dev, "OS version command failed %d\n", + ret); + } + mei_cldev_disable(cldev); +} + +/** + * mei_wd - wd client on the bus, change protocol version + * as the API has changed. + * + * @cldev: me clients device + */ +#if IS_ENABLED(CONFIG_INTEL_MEI_ME) +#include <linux/pci.h> +#include "hw-me-regs.h" +static void mei_wd(struct mei_cl_device *cldev) +{ + struct pci_dev *pdev = to_pci_dev(cldev->dev.parent); + + if (pdev->device == MEI_DEV_ID_WPT_LP || + pdev->device == MEI_DEV_ID_SPT || + pdev->device == MEI_DEV_ID_SPT_H) + cldev->me_cl->props.protocol_version = 0x2; + + cldev->do_match = 1; +} +#else +static inline void mei_wd(struct mei_cl_device *cldev) {} +#endif /* CONFIG_INTEL_MEI_ME */ + +struct mei_nfc_cmd { + u8 command; + u8 status; + u16 req_id; + u32 reserved; + u16 data_size; + u8 sub_command; + u8 data[]; +} __packed; + +struct mei_nfc_reply { + u8 command; + u8 status; + u16 req_id; + u32 reserved; + u16 data_size; + u8 sub_command; + u8 reply_status; + u8 data[]; +} __packed; + +struct mei_nfc_if_version { + u8 radio_version_sw[3]; + u8 reserved[3]; + u8 radio_version_hw[3]; + u8 i2c_addr; + u8 fw_ivn; + u8 vendor_id; + u8 radio_type; +} __packed; + + +#define MEI_NFC_CMD_MAINTENANCE 0x00 +#define MEI_NFC_SUBCMD_IF_VERSION 0x01 + +/* Vendors */ +#define MEI_NFC_VENDOR_INSIDE 0x00 +#define MEI_NFC_VENDOR_NXP 0x01 + +/* Radio types */ +#define MEI_NFC_VENDOR_INSIDE_UREAD 0x00 +#define MEI_NFC_VENDOR_NXP_PN544 0x01 + +/** + * mei_nfc_if_version - get NFC interface version + * + * @cl: host client (nfc info) + * @ver: NFC interface version to be filled in + * + * Return: 0 on success; < 0 otherwise + */ +static int mei_nfc_if_version(struct mei_cl *cl, + struct mei_nfc_if_version *ver) +{ + struct mei_device *bus; + struct mei_nfc_cmd cmd = { + .command = MEI_NFC_CMD_MAINTENANCE, + .data_size = 1, + .sub_command = MEI_NFC_SUBCMD_IF_VERSION, + }; + struct mei_nfc_reply *reply = NULL; + size_t if_version_length; + int bytes_recv, ret; + + bus = cl->dev; + + WARN_ON(mutex_is_locked(&bus->device_lock)); + + ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(cmd), MEI_CL_IO_TX_BLOCKING); + if (ret < 0) { + dev_err(bus->dev, "Could not send IF version cmd ret = %d\n", ret); + return ret; + } + + /* to be sure on the stack we alloc memory */ + if_version_length = sizeof(*reply) + sizeof(*ver); + + reply = kzalloc(if_version_length, GFP_KERNEL); + if (!reply) + return -ENOMEM; + + ret = 0; + bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length, 0, 0); + if (bytes_recv < 0 || (size_t)bytes_recv < if_version_length) { + dev_err(bus->dev, "Could not read IF version ret = %d\n", bytes_recv); + ret = -EIO; + goto err; + } + + memcpy(ver, reply->data, sizeof(*ver)); + + dev_info(bus->dev, "NFC MEI VERSION: IVN 0x%x Vendor ID 0x%x Type 0x%x\n", + ver->fw_ivn, ver->vendor_id, ver->radio_type); + +err: + kfree(reply); + return ret; +} + +/** + * mei_nfc_radio_name - derive nfc radio name from the interface version + * + * @ver: NFC radio version + * + * Return: radio name string + */ +static const char *mei_nfc_radio_name(struct mei_nfc_if_version *ver) +{ + + if (ver->vendor_id == MEI_NFC_VENDOR_INSIDE) { + if (ver->radio_type == MEI_NFC_VENDOR_INSIDE_UREAD) + return "microread"; + } + + if (ver->vendor_id == MEI_NFC_VENDOR_NXP) { + if (ver->radio_type == MEI_NFC_VENDOR_NXP_PN544) + return "pn544"; + } + + return NULL; +} + +/** + * mei_nfc - The nfc fixup function. The function retrieves nfc radio + * name and set is as device attribute so we can load + * the proper device driver for it + * + * @cldev: me client device (nfc) + */ +static void mei_nfc(struct mei_cl_device *cldev) +{ + struct mei_device *bus; + struct mei_cl *cl; + struct mei_me_client *me_cl = NULL; + struct mei_nfc_if_version ver; + const char *radio_name = NULL; + int ret; + + bus = cldev->bus; + + mutex_lock(&bus->device_lock); + /* we need to connect to INFO GUID */ + cl = mei_cl_alloc_linked(bus); + if (IS_ERR(cl)) { + ret = PTR_ERR(cl); + cl = NULL; + dev_err(bus->dev, "nfc hook alloc failed %d\n", ret); + goto out; + } + + me_cl = mei_me_cl_by_uuid(bus, &mei_nfc_info_guid); + if (!me_cl) { + ret = -ENOTTY; + dev_err(bus->dev, "Cannot find nfc info %d\n", ret); + goto out; + } + + ret = mei_cl_connect(cl, me_cl, NULL); + if (ret < 0) { + dev_err(&cldev->dev, "Can't connect to the NFC INFO ME ret = %d\n", + ret); + goto out; + } + + mutex_unlock(&bus->device_lock); + + ret = mei_nfc_if_version(cl, &ver); + if (ret) + goto disconnect; + + radio_name = mei_nfc_radio_name(&ver); + + if (!radio_name) { + ret = -ENOENT; + dev_err(&cldev->dev, "Can't get the NFC interface version ret = %d\n", + ret); + goto disconnect; + } + + dev_dbg(bus->dev, "nfc radio %s\n", radio_name); + strlcpy(cldev->name, radio_name, sizeof(cldev->name)); + +disconnect: + mutex_lock(&bus->device_lock); + if (mei_cl_disconnect(cl) < 0) + dev_err(bus->dev, "Can't disconnect the NFC INFO ME\n"); + + mei_cl_flush_queues(cl, NULL); + +out: + mei_cl_unlink(cl); + mutex_unlock(&bus->device_lock); + mei_me_cl_put(me_cl); + kfree(cl); + + if (ret) + cldev->do_match = 0; + + dev_dbg(bus->dev, "end of fixup match = %d\n", cldev->do_match); +} + +/** + * vt_support - enable on bus clients with vtag support + * + * @cldev: me clients device + */ +static void vt_support(struct mei_cl_device *cldev) +{ + if (cldev->me_cl->props.vt_supported == 1) + cldev->do_match = 1; +} + +#define MEI_FIXUP(_uuid, _hook) { _uuid, _hook } + +static struct mei_fixup { + + const uuid_le uuid; + void (*hook)(struct mei_cl_device *cldev); +} mei_fixups[] = { + MEI_FIXUP(MEI_UUID_ANY, number_of_connections), + MEI_FIXUP(MEI_UUID_NFC_INFO, blacklist), + MEI_FIXUP(MEI_UUID_NFC_HCI, mei_nfc), + MEI_FIXUP(MEI_UUID_WD, mei_wd), + MEI_FIXUP(MEI_UUID_MKHIF_FIX, mei_mkhi_fix), + MEI_FIXUP(MEI_UUID_HDCP, whitelist), + MEI_FIXUP(MEI_UUID_ANY, vt_support), +}; + +/** + * mei_cldev_fixup - run fixup handlers + * + * @cldev: me client device + */ +void mei_cl_bus_dev_fixup(struct mei_cl_device *cldev) +{ + struct mei_fixup *f; + const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl); + size_t i; + + for (i = 0; i < ARRAY_SIZE(mei_fixups); i++) { + + f = &mei_fixups[i]; + if (uuid_le_cmp(f->uuid, MEI_UUID_ANY) == 0 || + uuid_le_cmp(f->uuid, *uuid) == 0) + f->hook(cldev); + } +} + diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c new file mode 100644 index 000000000..9cdaa7f3a --- /dev/null +++ b/drivers/misc/mei/bus.c @@ -0,0 +1,1257 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2012-2019, Intel Corporation. All rights reserved. + * Intel Management Engine Interface (Intel MEI) Linux driver + */ + +#include <linux/module.h> +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/sched/signal.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/slab.h> +#include <linux/mutex.h> +#include <linux/interrupt.h> +#include <linux/mei_cl_bus.h> + +#include "mei_dev.h" +#include "client.h" + +#define to_mei_cl_driver(d) container_of(d, struct mei_cl_driver, driver) + +/** + * __mei_cl_send - internal client send (write) + * + * @cl: host client + * @buf: buffer to send + * @length: buffer length + * @mode: sending mode + * + * Return: written size bytes or < 0 on error + */ +ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, + unsigned int mode) +{ + struct mei_device *bus; + struct mei_cl_cb *cb; + ssize_t rets; + + if (WARN_ON(!cl || !cl->dev)) + return -ENODEV; + + bus = cl->dev; + + mutex_lock(&bus->device_lock); + if (bus->dev_state != MEI_DEV_ENABLED) { + rets = -ENODEV; + goto out; + } + + if (!mei_cl_is_connected(cl)) { + rets = -ENODEV; + goto out; + } + + /* Check if we have an ME client device */ + if (!mei_me_cl_is_active(cl->me_cl)) { + rets = -ENOTTY; + goto out; + } + + if (length > mei_cl_mtu(cl)) { + rets = -EFBIG; + goto out; + } + + while (cl->tx_cb_queued >= bus->tx_queue_limit) { + mutex_unlock(&bus->device_lock); + rets = wait_event_interruptible(cl->tx_wait, + cl->writing_state == MEI_WRITE_COMPLETE || + (!mei_cl_is_connected(cl))); + mutex_lock(&bus->device_lock); + if (rets) { + if (signal_pending(current)) + rets = -EINTR; + goto out; + } + if (!mei_cl_is_connected(cl)) { + rets = -ENODEV; + goto out; + } + } + + cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, NULL); + if (!cb) { + rets = -ENOMEM; + goto out; + } + + cb->internal = !!(mode & MEI_CL_IO_TX_INTERNAL); + cb->blocking = !!(mode & MEI_CL_IO_TX_BLOCKING); + memcpy(cb->buf.data, buf, length); + + rets = mei_cl_write(cl, cb); + +out: + mutex_unlock(&bus->device_lock); + + return rets; +} + +/** + * __mei_cl_recv - internal client receive (read) + * + * @cl: host client + * @buf: buffer to receive + * @length: buffer length + * @mode: io mode + * @timeout: recv timeout, 0 for infinite timeout + * + * Return: read size in bytes of < 0 on error + */ +ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, + unsigned int mode, unsigned long timeout) +{ + struct mei_device *bus; + struct mei_cl_cb *cb; + size_t r_length; + ssize_t rets; + bool nonblock = !!(mode & MEI_CL_IO_RX_NONBLOCK); + + if (WARN_ON(!cl || !cl->dev)) + return -ENODEV; + + bus = cl->dev; + + mutex_lock(&bus->device_lock); + if (bus->dev_state != MEI_DEV_ENABLED) { + rets = -ENODEV; + goto out; + } + + cb = mei_cl_read_cb(cl, NULL); + if (cb) + goto copy; + + rets = mei_cl_read_start(cl, length, NULL); + if (rets && rets != -EBUSY) + goto out; + + if (nonblock) { + rets = -EAGAIN; + goto out; + } + + /* wait on event only if there is no other waiter */ + /* synchronized under device mutex */ + if (!waitqueue_active(&cl->rx_wait)) { + + mutex_unlock(&bus->device_lock); + + if (timeout) { + rets = wait_event_interruptible_timeout + (cl->rx_wait, + mei_cl_read_cb(cl, NULL) || + (!mei_cl_is_connected(cl)), + msecs_to_jiffies(timeout)); + if (rets == 0) + return -ETIME; + if (rets < 0) { + if (signal_pending(current)) + return -EINTR; + return -ERESTARTSYS; + } + } else { + if (wait_event_interruptible + (cl->rx_wait, + mei_cl_read_cb(cl, NULL) || + (!mei_cl_is_connected(cl)))) { + if (signal_pending(current)) + return -EINTR; + return -ERESTARTSYS; + } + } + + mutex_lock(&bus->device_lock); + + if (!mei_cl_is_connected(cl)) { + rets = -ENODEV; + goto out; + } + } + + cb = mei_cl_read_cb(cl, NULL); + if (!cb) { + rets = 0; + goto out; + } + +copy: + if (cb->status) { + rets = cb->status; + goto free; + } + + r_length = min_t(size_t, length, cb->buf_idx); + memcpy(buf, cb->buf.data, r_length); + rets = r_length; + +free: + mei_cl_del_rd_completed(cl, cb); +out: + mutex_unlock(&bus->device_lock); + + return rets; +} + +/** + * mei_cldev_send - me device send (write) + * + * @cldev: me client device + * @buf: buffer to send + * @length: buffer length + * + * Return: written size in bytes or < 0 on error + */ +ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length) +{ + struct mei_cl *cl = cldev->cl; + + return __mei_cl_send(cl, buf, length, MEI_CL_IO_TX_BLOCKING); +} +EXPORT_SYMBOL_GPL(mei_cldev_send); + +/** + * mei_cldev_recv_nonblock - non block client receive (read) + * + * @cldev: me client device + * @buf: buffer to receive + * @length: buffer length + * + * Return: read size in bytes of < 0 on error + * -EAGAIN if function will block. + */ +ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf, + size_t length) +{ + struct mei_cl *cl = cldev->cl; + + return __mei_cl_recv(cl, buf, length, MEI_CL_IO_RX_NONBLOCK, 0); +} +EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock); + +/** + * mei_cldev_recv - client receive (read) + * + * @cldev: me client device + * @buf: buffer to receive + * @length: buffer length + * + * Return: read size in bytes of < 0 on error + */ +ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length) +{ + struct mei_cl *cl = cldev->cl; + + return __mei_cl_recv(cl, buf, length, 0, 0); +} +EXPORT_SYMBOL_GPL(mei_cldev_recv); + +/** + * mei_cl_bus_rx_work - dispatch rx event for a bus device + * + * @work: work + */ +static void mei_cl_bus_rx_work(struct work_struct *work) +{ + struct mei_cl_device *cldev; + struct mei_device *bus; + + cldev = container_of(work, struct mei_cl_device, rx_work); + + bus = cldev->bus; + + if (cldev->rx_cb) + cldev->rx_cb(cldev); + + mutex_lock(&bus->device_lock); + mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL); + mutex_unlock(&bus->device_lock); +} + +/** + * mei_cl_bus_notif_work - dispatch FW notif event for a bus device + * + * @work: work + */ +static void mei_cl_bus_notif_work(struct work_struct *work) +{ + struct mei_cl_device *cldev; + + cldev = container_of(work, struct mei_cl_device, notif_work); + + if (cldev->notif_cb) + cldev->notif_cb(cldev); +} + +/** + * mei_cl_bus_notify_event - schedule notify cb on bus client + * + * @cl: host client + * + * Return: true if event was scheduled + * false if the client is not waiting for event + */ +bool mei_cl_bus_notify_event(struct mei_cl *cl) +{ + struct mei_cl_device *cldev = cl->cldev; + + if (!cldev || !cldev->notif_cb) + return false; + + if (!cl->notify_ev) + return false; + + schedule_work(&cldev->notif_work); + + cl->notify_ev = false; + + return true; +} + +/** + * mei_cl_bus_rx_event - schedule rx event + * + * @cl: host client + * + * Return: true if event was scheduled + * false if the client is not waiting for event + */ +bool mei_cl_bus_rx_event(struct mei_cl *cl) +{ + struct mei_cl_device *cldev = cl->cldev; + + if (!cldev || !cldev->rx_cb) + return false; + + schedule_work(&cldev->rx_work); + + return true; +} + +/** + * mei_cldev_register_rx_cb - register Rx event callback + * + * @cldev: me client devices + * @rx_cb: callback function + * + * Return: 0 on success + * -EALREADY if an callback is already registered + * <0 on other errors + */ +int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb) +{ + struct mei_device *bus = cldev->bus; + int ret; + + if (!rx_cb) + return -EINVAL; + if (cldev->rx_cb) + return -EALREADY; + + cldev->rx_cb = rx_cb; + INIT_WORK(&cldev->rx_work, mei_cl_bus_rx_work); + + mutex_lock(&bus->device_lock); + ret = mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL); + mutex_unlock(&bus->device_lock); + if (ret && ret != -EBUSY) + return ret; + + return 0; +} +EXPORT_SYMBOL_GPL(mei_cldev_register_rx_cb); + +/** + * mei_cldev_register_notif_cb - register FW notification event callback + * + * @cldev: me client devices + * @notif_cb: callback function + * + * Return: 0 on success + * -EALREADY if an callback is already registered + * <0 on other errors + */ +int mei_cldev_register_notif_cb(struct mei_cl_device *cldev, + mei_cldev_cb_t notif_cb) +{ + struct mei_device *bus = cldev->bus; + int ret; + + if (!notif_cb) + return -EINVAL; + + if (cldev->notif_cb) + return -EALREADY; + + cldev->notif_cb = notif_cb; + INIT_WORK(&cldev->notif_work, mei_cl_bus_notif_work); + + mutex_lock(&bus->device_lock); + ret = mei_cl_notify_request(cldev->cl, NULL, 1); + mutex_unlock(&bus->device_lock); + if (ret) + return ret; + + return 0; +} +EXPORT_SYMBOL_GPL(mei_cldev_register_notif_cb); + +/** + * mei_cldev_get_drvdata - driver data getter + * + * @cldev: mei client device + * + * Return: driver private data + */ +void *mei_cldev_get_drvdata(const struct mei_cl_device *cldev) +{ + return dev_get_drvdata(&cldev->dev); +} +EXPORT_SYMBOL_GPL(mei_cldev_get_drvdata); + +/** + * mei_cldev_set_drvdata - driver data setter + * + * @cldev: mei client device + * @data: data to store + */ +void mei_cldev_set_drvdata(struct mei_cl_device *cldev, void *data) +{ + dev_set_drvdata(&cldev->dev, data); +} +EXPORT_SYMBOL_GPL(mei_cldev_set_drvdata); + +/** + * mei_cldev_uuid - return uuid of the underlying me client + * + * @cldev: mei client device + * + * Return: me client uuid + */ +const uuid_le *mei_cldev_uuid(const struct mei_cl_device *cldev) +{ + return mei_me_cl_uuid(cldev->me_cl); +} +EXPORT_SYMBOL_GPL(mei_cldev_uuid); + +/** + * mei_cldev_ver - return protocol version of the underlying me client + * + * @cldev: mei client device + * + * Return: me client protocol version + */ +u8 mei_cldev_ver(const struct mei_cl_device *cldev) +{ + return mei_me_cl_ver(cldev->me_cl); +} +EXPORT_SYMBOL_GPL(mei_cldev_ver); + +/** + * mei_cldev_enabled - check whether the device is enabled + * + * @cldev: mei client device + * + * Return: true if me client is initialized and connected + */ +bool mei_cldev_enabled(struct mei_cl_device *cldev) +{ + return mei_cl_is_connected(cldev->cl); +} +EXPORT_SYMBOL_GPL(mei_cldev_enabled); + +/** + * mei_cl_bus_module_get - acquire module of the underlying + * hw driver. + * + * @cldev: mei client device + * + * Return: true on success; false if the module was removed. + */ +static bool mei_cl_bus_module_get(struct mei_cl_device *cldev) +{ + return try_module_get(cldev->bus->dev->driver->owner); +} + +/** + * mei_cl_bus_module_put - release the underlying hw module. + * + * @cldev: mei client device + */ +static void mei_cl_bus_module_put(struct mei_cl_device *cldev) +{ + module_put(cldev->bus->dev->driver->owner); +} + +/** + * mei_cl_bus_vtag - get bus vtag entry wrapper + * The tag for bus client is always first. + * + * @cl: host client + * + * Return: bus vtag or NULL + */ +static inline struct mei_cl_vtag *mei_cl_bus_vtag(struct mei_cl *cl) +{ + return list_first_entry_or_null(&cl->vtag_map, + struct mei_cl_vtag, list); +} + +/** + * mei_cl_bus_vtag_alloc - add bus client entry to vtag map + * + * @cldev: me client device + * + * Return: + * * 0 on success + * * -ENOMEM if memory allocation failed + */ +static int mei_cl_bus_vtag_alloc(struct mei_cl_device *cldev) +{ + struct mei_cl *cl = cldev->cl; + struct mei_cl_vtag *cl_vtag; + + /* + * Bail out if the client does not supports vtags + * or has already allocated one + */ + if (mei_cl_vt_support_check(cl) || mei_cl_bus_vtag(cl)) + return 0; + + cl_vtag = mei_cl_vtag_alloc(NULL, 0); + if (IS_ERR(cl_vtag)) + return -ENOMEM; + + list_add_tail(&cl_vtag->list, &cl->vtag_map); + + return 0; +} + +/** + * mei_cl_bus_vtag_free - remove the bus entry from vtag map + * + * @cldev: me client device + */ +static void mei_cl_bus_vtag_free(struct mei_cl_device *cldev) +{ + struct mei_cl *cl = cldev->cl; + struct mei_cl_vtag *cl_vtag; + + cl_vtag = mei_cl_bus_vtag(cl); + if (!cl_vtag) + return; + + list_del(&cl_vtag->list); + kfree(cl_vtag); +} + +/** + * mei_cldev_enable - enable me client device + * create connection with me client + * + * @cldev: me client device + * + * Return: 0 on success and < 0 on error + */ +int mei_cldev_enable(struct mei_cl_device *cldev) +{ + struct mei_device *bus = cldev->bus; + struct mei_cl *cl; + int ret; + + cl = cldev->cl; + + mutex_lock(&bus->device_lock); + if (cl->state == MEI_FILE_UNINITIALIZED) { + ret = mei_cl_link(cl); + if (ret) + goto out; + /* update pointers */ + cl->cldev = cldev; + } + + if (mei_cl_is_connected(cl)) { + ret = 0; + goto out; + } + + if (!mei_me_cl_is_active(cldev->me_cl)) { + dev_err(&cldev->dev, "me client is not active\n"); + ret = -ENOTTY; + goto out; + } + + ret = mei_cl_bus_vtag_alloc(cldev); + if (ret) + goto out; + + ret = mei_cl_connect(cl, cldev->me_cl, NULL); + if (ret < 0) { + dev_err(&cldev->dev, "cannot connect\n"); + mei_cl_bus_vtag_free(cldev); + } + +out: + mutex_unlock(&bus->device_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(mei_cldev_enable); + +/** + * mei_cldev_unregister_callbacks - internal wrapper for unregistering + * callbacks. + * + * @cldev: client device + */ +static void mei_cldev_unregister_callbacks(struct mei_cl_device *cldev) +{ + if (cldev->rx_cb) { + cancel_work_sync(&cldev->rx_work); + cldev->rx_cb = NULL; + } + + if (cldev->notif_cb) { + cancel_work_sync(&cldev->notif_work); + cldev->notif_cb = NULL; + } +} + +/** + * mei_cldev_disable - disable me client device + * disconnect form the me client + * + * @cldev: me client device + * + * Return: 0 on success and < 0 on error + */ +int mei_cldev_disable(struct mei_cl_device *cldev) +{ + struct mei_device *bus; + struct mei_cl *cl; + int err; + + if (!cldev) + return -ENODEV; + + cl = cldev->cl; + + bus = cldev->bus; + + mei_cldev_unregister_callbacks(cldev); + + mutex_lock(&bus->device_lock); + + mei_cl_bus_vtag_free(cldev); + + if (!mei_cl_is_connected(cl)) { + dev_dbg(bus->dev, "Already disconnected\n"); + err = 0; + goto out; + } + + err = mei_cl_disconnect(cl); + if (err < 0) + dev_err(bus->dev, "Could not disconnect from the ME client\n"); + +out: + /* Flush queues and remove any pending read */ + mei_cl_flush_queues(cl, NULL); + mei_cl_unlink(cl); + + mutex_unlock(&bus->device_lock); + return err; +} +EXPORT_SYMBOL_GPL(mei_cldev_disable); + +/** + * mei_cl_device_find - find matching entry in the driver id table + * + * @cldev: me client device + * @cldrv: me client driver + * + * Return: id on success; NULL if no id is matching + */ +static const +struct mei_cl_device_id *mei_cl_device_find(struct mei_cl_device *cldev, + struct mei_cl_driver *cldrv) +{ + const struct mei_cl_device_id *id; + const uuid_le *uuid; + u8 version; + bool match; + + uuid = mei_me_cl_uuid(cldev->me_cl); + version = mei_me_cl_ver(cldev->me_cl); + + id = cldrv->id_table; + while (uuid_le_cmp(NULL_UUID_LE, id->uuid)) { + if (!uuid_le_cmp(*uuid, id->uuid)) { + match = true; + + if (cldev->name[0]) + if (strncmp(cldev->name, id->name, + sizeof(id->name))) + match = false; + + if (id->version != MEI_CL_VERSION_ANY) + if (id->version != version) + match = false; + if (match) + return id; + } + + id++; + } + + return NULL; +} + +/** + * mei_cl_device_match - device match function + * + * @dev: device + * @drv: driver + * + * Return: 1 if matching device was found 0 otherwise + */ +static int mei_cl_device_match(struct device *dev, struct device_driver *drv) +{ + struct mei_cl_device *cldev = to_mei_cl_device(dev); + struct mei_cl_driver *cldrv = to_mei_cl_driver(drv); + const struct mei_cl_device_id *found_id; + + if (!cldev) + return 0; + + if (!cldev->do_match) + return 0; + + if (!cldrv || !cldrv->id_table) + return 0; + + found_id = mei_cl_device_find(cldev, cldrv); + if (found_id) + return 1; + + return 0; +} + +/** + * mei_cl_device_probe - bus probe function + * + * @dev: device + * + * Return: 0 on success; < 0 otherwise + */ +static int mei_cl_device_probe(struct device *dev) +{ + struct mei_cl_device *cldev; + struct mei_cl_driver *cldrv; + const struct mei_cl_device_id *id; + int ret; + + cldev = to_mei_cl_device(dev); + cldrv = to_mei_cl_driver(dev->driver); + + if (!cldev) + return 0; + + if (!cldrv || !cldrv->probe) + return -ENODEV; + + id = mei_cl_device_find(cldev, cldrv); + if (!id) + return -ENODEV; + + if (!mei_cl_bus_module_get(cldev)) { + dev_err(&cldev->dev, "get hw module failed"); + return -ENODEV; + } + + ret = cldrv->probe(cldev, id); + if (ret) { + mei_cl_bus_module_put(cldev); + return ret; + } + + __module_get(THIS_MODULE); + return 0; +} + +/** + * mei_cl_device_remove - remove device from the bus + * + * @dev: device + * + * Return: 0 on success; < 0 otherwise + */ +static int mei_cl_device_remove(struct device *dev) +{ + struct mei_cl_device *cldev = to_mei_cl_device(dev); + struct mei_cl_driver *cldrv; + int ret = 0; + + if (!cldev || !dev->driver) + return 0; + + cldrv = to_mei_cl_driver(dev->driver); + if (cldrv->remove) + ret = cldrv->remove(cldev); + + mei_cldev_unregister_callbacks(cldev); + + mei_cl_bus_module_put(cldev); + module_put(THIS_MODULE); + + return ret; +} + +static ssize_t name_show(struct device *dev, struct device_attribute *a, + char *buf) +{ + struct mei_cl_device *cldev = to_mei_cl_device(dev); + + return scnprintf(buf, PAGE_SIZE, "%s", cldev->name); +} +static DEVICE_ATTR_RO(name); + +static ssize_t uuid_show(struct device *dev, struct device_attribute *a, + char *buf) +{ + struct mei_cl_device *cldev = to_mei_cl_device(dev); + const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl); + + return sprintf(buf, "%pUl", uuid); +} +static DEVICE_ATTR_RO(uuid); + +static ssize_t version_show(struct device *dev, struct device_attribute *a, + char *buf) +{ + struct mei_cl_device *cldev = to_mei_cl_device(dev); + u8 version = mei_me_cl_ver(cldev->me_cl); + + return sprintf(buf, "%02X", version); +} +static DEVICE_ATTR_RO(version); + +static ssize_t modalias_show(struct device *dev, struct device_attribute *a, + char *buf) +{ + struct mei_cl_device *cldev = to_mei_cl_device(dev); + const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl); + u8 version = mei_me_cl_ver(cldev->me_cl); + + return scnprintf(buf, PAGE_SIZE, "mei:%s:%pUl:%02X:", + cldev->name, uuid, version); +} +static DEVICE_ATTR_RO(modalias); + +static ssize_t max_conn_show(struct device *dev, struct device_attribute *a, + char *buf) +{ + struct mei_cl_device *cldev = to_mei_cl_device(dev); + u8 maxconn = mei_me_cl_max_conn(cldev->me_cl); + + return sprintf(buf, "%d", maxconn); +} +static DEVICE_ATTR_RO(max_conn); + +static ssize_t fixed_show(struct device *dev, struct device_attribute *a, + char *buf) +{ + struct mei_cl_device *cldev = to_mei_cl_device(dev); + u8 fixed = mei_me_cl_fixed(cldev->me_cl); + + return sprintf(buf, "%d", fixed); +} +static DEVICE_ATTR_RO(fixed); + +static ssize_t vtag_show(struct device *dev, struct device_attribute *a, + char *buf) +{ + struct mei_cl_device *cldev = to_mei_cl_device(dev); + bool vt = mei_me_cl_vt(cldev->me_cl); + + return sprintf(buf, "%d", vt); +} +static DEVICE_ATTR_RO(vtag); + +static ssize_t max_len_show(struct device *dev, struct device_attribute *a, + char *buf) +{ + struct mei_cl_device *cldev = to_mei_cl_device(dev); + u32 maxlen = mei_me_cl_max_len(cldev->me_cl); + + return sprintf(buf, "%u", maxlen); +} +static DEVICE_ATTR_RO(max_len); + +static struct attribute *mei_cldev_attrs[] = { + &dev_attr_name.attr, + &dev_attr_uuid.attr, + &dev_attr_version.attr, + &dev_attr_modalias.attr, + &dev_attr_max_conn.attr, + &dev_attr_fixed.attr, + &dev_attr_vtag.attr, + &dev_attr_max_len.attr, + NULL, +}; +ATTRIBUTE_GROUPS(mei_cldev); + +/** + * mei_cl_device_uevent - me client bus uevent handler + * + * @dev: device + * @env: uevent kobject + * + * Return: 0 on success -ENOMEM on when add_uevent_var fails + */ +static int mei_cl_device_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + struct mei_cl_device *cldev = to_mei_cl_device(dev); + const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl); + u8 version = mei_me_cl_ver(cldev->me_cl); + + if (add_uevent_var(env, "MEI_CL_VERSION=%d", version)) + return -ENOMEM; + + if (add_uevent_var(env, "MEI_CL_UUID=%pUl", uuid)) + return -ENOMEM; + + if (add_uevent_var(env, "MEI_CL_NAME=%s", cldev->name)) + return -ENOMEM; + + if (add_uevent_var(env, "MODALIAS=mei:%s:%pUl:%02X:", + cldev->name, uuid, version)) + return -ENOMEM; + + return 0; +} + +static struct bus_type mei_cl_bus_type = { + .name = "mei", + .dev_groups = mei_cldev_groups, + .match = mei_cl_device_match, + .probe = mei_cl_device_probe, + .remove = mei_cl_device_remove, + .uevent = mei_cl_device_uevent, +}; + +static struct mei_device *mei_dev_bus_get(struct mei_device *bus) +{ + if (bus) + get_device(bus->dev); + + return bus; +} + +static void mei_dev_bus_put(struct mei_device *bus) +{ + if (bus) + put_device(bus->dev); +} + +static void mei_cl_bus_dev_release(struct device *dev) +{ + struct mei_cl_device *cldev = to_mei_cl_device(dev); + + if (!cldev) + return; + + mei_me_cl_put(cldev->me_cl); + mei_dev_bus_put(cldev->bus); + mei_cl_unlink(cldev->cl); + kfree(cldev->cl); + kfree(cldev); +} + +static const struct device_type mei_cl_device_type = { + .release = mei_cl_bus_dev_release, +}; + +/** + * mei_cl_bus_set_name - set device name for me client device + * <controller>-<client device> + * Example: 0000:00:16.0-55213584-9a29-4916-badf-0fb7ed682aeb + * + * @cldev: me client device + */ +static inline void mei_cl_bus_set_name(struct mei_cl_device *cldev) +{ + dev_set_name(&cldev->dev, "%s-%pUl", + dev_name(cldev->bus->dev), + mei_me_cl_uuid(cldev->me_cl)); +} + +/** + * mei_cl_bus_dev_alloc - initialize and allocate mei client device + * + * @bus: mei device + * @me_cl: me client + * + * Return: allocated device structur or NULL on allocation failure + */ +static struct mei_cl_device *mei_cl_bus_dev_alloc(struct mei_device *bus, + struct mei_me_client *me_cl) +{ + struct mei_cl_device *cldev; + struct mei_cl *cl; + + cldev = kzalloc(sizeof(*cldev), GFP_KERNEL); + if (!cldev) + return NULL; + + cl = mei_cl_allocate(bus); + if (!cl) { + kfree(cldev); + return NULL; + } + + device_initialize(&cldev->dev); + cldev->dev.parent = bus->dev; + cldev->dev.bus = &mei_cl_bus_type; + cldev->dev.type = &mei_cl_device_type; + cldev->bus = mei_dev_bus_get(bus); + cldev->me_cl = mei_me_cl_get(me_cl); + cldev->cl = cl; + mei_cl_bus_set_name(cldev); + cldev->is_added = 0; + INIT_LIST_HEAD(&cldev->bus_list); + + return cldev; +} + +/** + * mei_cl_dev_setup - setup me client device + * run fix up routines and set the device name + * + * @bus: mei device + * @cldev: me client device + * + * Return: true if the device is eligible for enumeration + */ +static bool mei_cl_bus_dev_setup(struct mei_device *bus, + struct mei_cl_device *cldev) +{ + cldev->do_match = 1; + mei_cl_bus_dev_fixup(cldev); + + /* the device name can change during fix up */ + if (cldev->do_match) + mei_cl_bus_set_name(cldev); + + return cldev->do_match == 1; +} + +/** + * mei_cl_bus_dev_add - add me client devices + * + * @cldev: me client device + * + * Return: 0 on success; < 0 on failre + */ +static int mei_cl_bus_dev_add(struct mei_cl_device *cldev) +{ + int ret; + + dev_dbg(cldev->bus->dev, "adding %pUL:%02X\n", + mei_me_cl_uuid(cldev->me_cl), + mei_me_cl_ver(cldev->me_cl)); + ret = device_add(&cldev->dev); + if (!ret) + cldev->is_added = 1; + + return ret; +} + +/** + * mei_cl_bus_dev_stop - stop the driver + * + * @cldev: me client device + */ +static void mei_cl_bus_dev_stop(struct mei_cl_device *cldev) +{ + if (cldev->is_added) + device_release_driver(&cldev->dev); +} + +/** + * mei_cl_bus_dev_destroy - destroy me client devices object + * + * @cldev: me client device + * + * Locking: called under "dev->cl_bus_lock" lock + */ +static void mei_cl_bus_dev_destroy(struct mei_cl_device *cldev) +{ + + WARN_ON(!mutex_is_locked(&cldev->bus->cl_bus_lock)); + + if (!cldev->is_added) + return; + + device_del(&cldev->dev); + + list_del_init(&cldev->bus_list); + + cldev->is_added = 0; + put_device(&cldev->dev); +} + +/** + * mei_cl_bus_remove_device - remove a devices form the bus + * + * @cldev: me client device + */ +static void mei_cl_bus_remove_device(struct mei_cl_device *cldev) +{ + mei_cl_bus_dev_stop(cldev); + mei_cl_bus_dev_destroy(cldev); +} + +/** + * mei_cl_bus_remove_devices - remove all devices form the bus + * + * @bus: mei device + */ +void mei_cl_bus_remove_devices(struct mei_device *bus) +{ + struct mei_cl_device *cldev, *next; + + mutex_lock(&bus->cl_bus_lock); + list_for_each_entry_safe(cldev, next, &bus->device_list, bus_list) + mei_cl_bus_remove_device(cldev); + mutex_unlock(&bus->cl_bus_lock); +} + + +/** + * mei_cl_bus_dev_init - allocate and initializes an mei client devices + * based on me client + * + * @bus: mei device + * @me_cl: me client + * + * Locking: called under "dev->cl_bus_lock" lock + */ +static void mei_cl_bus_dev_init(struct mei_device *bus, + struct mei_me_client *me_cl) +{ + struct mei_cl_device *cldev; + + WARN_ON(!mutex_is_locked(&bus->cl_bus_lock)); + + dev_dbg(bus->dev, "initializing %pUl", mei_me_cl_uuid(me_cl)); + + if (me_cl->bus_added) + return; + + cldev = mei_cl_bus_dev_alloc(bus, me_cl); + if (!cldev) + return; + + me_cl->bus_added = true; + list_add_tail(&cldev->bus_list, &bus->device_list); + +} + +/** + * mei_cl_bus_rescan - scan me clients list and add create + * devices for eligible clients + * + * @bus: mei device + */ +static void mei_cl_bus_rescan(struct mei_device *bus) +{ + struct mei_cl_device *cldev, *n; + struct mei_me_client *me_cl; + + mutex_lock(&bus->cl_bus_lock); + + down_read(&bus->me_clients_rwsem); + list_for_each_entry(me_cl, &bus->me_clients, list) + mei_cl_bus_dev_init(bus, me_cl); + up_read(&bus->me_clients_rwsem); + + list_for_each_entry_safe(cldev, n, &bus->device_list, bus_list) { + + if (!mei_me_cl_is_active(cldev->me_cl)) { + mei_cl_bus_remove_device(cldev); + continue; + } + + if (cldev->is_added) + continue; + + if (mei_cl_bus_dev_setup(bus, cldev)) + mei_cl_bus_dev_add(cldev); + else { + list_del_init(&cldev->bus_list); + put_device(&cldev->dev); + } + } + mutex_unlock(&bus->cl_bus_lock); + + dev_dbg(bus->dev, "rescan end"); +} + +void mei_cl_bus_rescan_work(struct work_struct *work) +{ + struct mei_device *bus = + container_of(work, struct mei_device, bus_rescan_work); + + mei_cl_bus_rescan(bus); +} + +int __mei_cldev_driver_register(struct mei_cl_driver *cldrv, + struct module *owner) +{ + int err; + + cldrv->driver.name = cldrv->name; + cldrv->driver.owner = owner; + cldrv->driver.bus = &mei_cl_bus_type; + + err = driver_register(&cldrv->driver); + if (err) + return err; + + pr_debug("mei: driver [%s] registered\n", cldrv->driver.name); + + return 0; +} +EXPORT_SYMBOL_GPL(__mei_cldev_driver_register); + +void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv) +{ + driver_unregister(&cldrv->driver); + + pr_debug("mei: driver [%s] unregistered\n", cldrv->driver.name); +} +EXPORT_SYMBOL_GPL(mei_cldev_driver_unregister); + + +int __init mei_cl_bus_init(void) +{ + return bus_register(&mei_cl_bus_type); +} + +void __exit mei_cl_bus_exit(void) +{ + bus_unregister(&mei_cl_bus_type); +} diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c new file mode 100644 index 000000000..aa87542f1 --- /dev/null +++ b/drivers/misc/mei/client.c @@ -0,0 +1,2139 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2003-2020, Intel Corporation. All rights reserved. + * Intel Management Engine Interface (Intel MEI) Linux driver + */ + +#include <linux/sched/signal.h> +#include <linux/wait.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/pm_runtime.h> + +#include <linux/mei.h> + +#include "mei_dev.h" +#include "hbm.h" +#include "client.h" + +/** + * mei_me_cl_init - initialize me client + * + * @me_cl: me client + */ +void mei_me_cl_init(struct mei_me_client *me_cl) +{ + INIT_LIST_HEAD(&me_cl->list); + kref_init(&me_cl->refcnt); +} + +/** + * mei_me_cl_get - increases me client refcount + * + * @me_cl: me client + * + * Locking: called under "dev->device_lock" lock + * + * Return: me client or NULL + */ +struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl) +{ + if (me_cl && kref_get_unless_zero(&me_cl->refcnt)) + return me_cl; + + return NULL; +} + +/** + * mei_me_cl_release - free me client + * + * Locking: called under "dev->device_lock" lock + * + * @ref: me_client refcount + */ +static void mei_me_cl_release(struct kref *ref) +{ + struct mei_me_client *me_cl = + container_of(ref, struct mei_me_client, refcnt); + + kfree(me_cl); +} + +/** + * mei_me_cl_put - decrease me client refcount and free client if necessary + * + * Locking: called under "dev->device_lock" lock + * + * @me_cl: me client + */ +void mei_me_cl_put(struct mei_me_client *me_cl) +{ + if (me_cl) + kref_put(&me_cl->refcnt, mei_me_cl_release); +} + +/** + * __mei_me_cl_del - delete me client from the list and decrease + * reference counter + * + * @dev: mei device + * @me_cl: me client + * + * Locking: dev->me_clients_rwsem + */ +static void __mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl) +{ + if (!me_cl) + return; + + list_del_init(&me_cl->list); + mei_me_cl_put(me_cl); +} + +/** + * mei_me_cl_del - delete me client from the list and decrease + * reference counter + * + * @dev: mei device + * @me_cl: me client + */ +void mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl) +{ + down_write(&dev->me_clients_rwsem); + __mei_me_cl_del(dev, me_cl); + up_write(&dev->me_clients_rwsem); +} + +/** + * mei_me_cl_add - add me client to the list + * + * @dev: mei device + * @me_cl: me client + */ +void mei_me_cl_add(struct mei_device *dev, struct mei_me_client *me_cl) +{ + down_write(&dev->me_clients_rwsem); + list_add(&me_cl->list, &dev->me_clients); + up_write(&dev->me_clients_rwsem); +} + +/** + * __mei_me_cl_by_uuid - locate me client by uuid + * increases ref count + * + * @dev: mei device + * @uuid: me client uuid + * + * Return: me client or NULL if not found + * + * Locking: dev->me_clients_rwsem + */ +static struct mei_me_client *__mei_me_cl_by_uuid(struct mei_device *dev, + const uuid_le *uuid) +{ + struct mei_me_client *me_cl; + const uuid_le *pn; + + WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem)); + + list_for_each_entry(me_cl, &dev->me_clients, list) { + pn = &me_cl->props.protocol_name; + if (uuid_le_cmp(*uuid, *pn) == 0) + return mei_me_cl_get(me_cl); + } + + return NULL; +} + +/** + * mei_me_cl_by_uuid - locate me client by uuid + * increases ref count + * + * @dev: mei device + * @uuid: me client uuid + * + * Return: me client or NULL if not found + * + * Locking: dev->me_clients_rwsem + */ +struct mei_me_client *mei_me_cl_by_uuid(struct mei_device *dev, + const uuid_le *uuid) +{ + struct mei_me_client *me_cl; + + down_read(&dev->me_clients_rwsem); + me_cl = __mei_me_cl_by_uuid(dev, uuid); + up_read(&dev->me_clients_rwsem); + + return me_cl; +} + +/** + * mei_me_cl_by_id - locate me client by client id + * increases ref count + * + * @dev: the device structure + * @client_id: me client id + * + * Return: me client or NULL if not found + * + * Locking: dev->me_clients_rwsem + */ +struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id) +{ + + struct mei_me_client *__me_cl, *me_cl = NULL; + + down_read(&dev->me_clients_rwsem); + list_for_each_entry(__me_cl, &dev->me_clients, list) { + if (__me_cl->client_id == client_id) { + me_cl = mei_me_cl_get(__me_cl); + break; + } + } + up_read(&dev->me_clients_rwsem); + + return me_cl; +} + +/** + * __mei_me_cl_by_uuid_id - locate me client by client id and uuid + * increases ref count + * + * @dev: the device structure + * @uuid: me client uuid + * @client_id: me client id + * + * Return: me client or null if not found + * + * Locking: dev->me_clients_rwsem + */ +static struct mei_me_client *__mei_me_cl_by_uuid_id(struct mei_device *dev, + const uuid_le *uuid, u8 client_id) +{ + struct mei_me_client *me_cl; + const uuid_le *pn; + + WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem)); + + list_for_each_entry(me_cl, &dev->me_clients, list) { + pn = &me_cl->props.protocol_name; + if (uuid_le_cmp(*uuid, *pn) == 0 && + me_cl->client_id == client_id) + return mei_me_cl_get(me_cl); + } + + return NULL; +} + + +/** + * mei_me_cl_by_uuid_id - locate me client by client id and uuid + * increases ref count + * + * @dev: the device structure + * @uuid: me client uuid + * @client_id: me client id + * + * Return: me client or null if not found + */ +struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev, + const uuid_le *uuid, u8 client_id) +{ + struct mei_me_client *me_cl; + + down_read(&dev->me_clients_rwsem); + me_cl = __mei_me_cl_by_uuid_id(dev, uuid, client_id); + up_read(&dev->me_clients_rwsem); + + return me_cl; +} + +/** + * mei_me_cl_rm_by_uuid - remove all me clients matching uuid + * + * @dev: the device structure + * @uuid: me client uuid + * + * Locking: called under "dev->device_lock" lock + */ +void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid) +{ + struct mei_me_client *me_cl; + + dev_dbg(dev->dev, "remove %pUl\n", uuid); + + down_write(&dev->me_clients_rwsem); + me_cl = __mei_me_cl_by_uuid(dev, uuid); + __mei_me_cl_del(dev, me_cl); + mei_me_cl_put(me_cl); + up_write(&dev->me_clients_rwsem); +} + +/** + * mei_me_cl_rm_by_uuid_id - remove all me clients matching client id + * + * @dev: the device structure + * @uuid: me client uuid + * @id: me client id + * + * Locking: called under "dev->device_lock" lock + */ +void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id) +{ + struct mei_me_client *me_cl; + + dev_dbg(dev->dev, "remove %pUl %d\n", uuid, id); + + down_write(&dev->me_clients_rwsem); + me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id); + __mei_me_cl_del(dev, me_cl); + mei_me_cl_put(me_cl); + up_write(&dev->me_clients_rwsem); +} + +/** + * mei_me_cl_rm_all - remove all me clients + * + * @dev: the device structure + * + * Locking: called under "dev->device_lock" lock + */ +void mei_me_cl_rm_all(struct mei_device *dev) +{ + struct mei_me_client *me_cl, *next; + + down_write(&dev->me_clients_rwsem); + list_for_each_entry_safe(me_cl, next, &dev->me_clients, list) + __mei_me_cl_del(dev, me_cl); + up_write(&dev->me_clients_rwsem); +} + +/** + * mei_io_cb_free - free mei_cb_private related memory + * + * @cb: mei callback struct + */ +void mei_io_cb_free(struct mei_cl_cb *cb) +{ + if (cb == NULL) + return; + + list_del(&cb->list); + kfree(cb->buf.data); + kfree(cb); +} + +/** + * mei_tx_cb_queue - queue tx callback + * + * Locking: called under "dev->device_lock" lock + * + * @cb: mei callback struct + * @head: an instance of list to queue on + */ +static inline void mei_tx_cb_enqueue(struct mei_cl_cb *cb, + struct list_head *head) +{ + list_add_tail(&cb->list, head); + cb->cl->tx_cb_queued++; +} + +/** + * mei_tx_cb_dequeue - dequeue tx callback + * + * Locking: called under "dev->device_lock" lock + * + * @cb: mei callback struct to dequeue and free + */ +static inline void mei_tx_cb_dequeue(struct mei_cl_cb *cb) +{ + if (!WARN_ON(cb->cl->tx_cb_queued == 0)) + cb->cl->tx_cb_queued--; + + mei_io_cb_free(cb); +} + +/** + * mei_cl_set_read_by_fp - set pending_read flag to vtag struct for given fp + * + * Locking: called under "dev->device_lock" lock + * + * @cl: mei client + * @fp: pointer to file structure + */ +static void mei_cl_set_read_by_fp(const struct mei_cl *cl, + const struct file *fp) +{ + struct mei_cl_vtag *cl_vtag; + + list_for_each_entry(cl_vtag, &cl->vtag_map, list) { + if (cl_vtag->fp == fp) { + cl_vtag->pending_read = true; + return; + } + } +} + +/** + * mei_io_cb_init - allocate and initialize io callback + * + * @cl: mei client + * @type: operation type + * @fp: pointer to file structure + * + * Return: mei_cl_cb pointer or NULL; + */ +static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, + enum mei_cb_file_ops type, + const struct file *fp) +{ + struct mei_cl_cb *cb; + + cb = kzalloc(sizeof(*cb), GFP_KERNEL); + if (!cb) + return NULL; + + INIT_LIST_HEAD(&cb->list); + cb->fp = fp; + cb->cl = cl; + cb->buf_idx = 0; + cb->fop_type = type; + cb->vtag = 0; + + return cb; +} + +/** + * mei_io_list_flush_cl - removes cbs belonging to the cl. + * + * @head: an instance of our list structure + * @cl: host client + */ +static void mei_io_list_flush_cl(struct list_head *head, + const struct mei_cl *cl) +{ + struct mei_cl_cb *cb, *next; + + list_for_each_entry_safe(cb, next, head, list) { + if (cl == cb->cl) { + list_del_init(&cb->list); + if (cb->fop_type == MEI_FOP_READ) + mei_io_cb_free(cb); + } + } +} + +/** + * mei_io_tx_list_free_cl - removes cb belonging to the cl and free them + * + * @head: An instance of our list structure + * @cl: host client + * @fp: file pointer (matching cb file object), may be NULL + */ +static void mei_io_tx_list_free_cl(struct list_head *head, + const struct mei_cl *cl, + const struct file *fp) +{ + struct mei_cl_cb *cb, *next; + + list_for_each_entry_safe(cb, next, head, list) { + if (cl == cb->cl && (!fp || fp == cb->fp)) + mei_tx_cb_dequeue(cb); + } +} + +/** + * mei_io_list_free_fp - free cb from a list that matches file pointer + * + * @head: io list + * @fp: file pointer (matching cb file object), may be NULL + */ +static void mei_io_list_free_fp(struct list_head *head, const struct file *fp) +{ + struct mei_cl_cb *cb, *next; + + list_for_each_entry_safe(cb, next, head, list) + if (!fp || fp == cb->fp) + mei_io_cb_free(cb); +} + +/** + * mei_cl_free_pending - free pending cb + * + * @cl: host client + */ +static void mei_cl_free_pending(struct mei_cl *cl) +{ + struct mei_cl_cb *cb; + + cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list); + mei_io_cb_free(cb); +} + +/** + * mei_cl_alloc_cb - a convenient wrapper for allocating read cb + * + * @cl: host client + * @length: size of the buffer + * @fop_type: operation type + * @fp: associated file pointer (might be NULL) + * + * Return: cb on success and NULL on failure + */ +struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length, + enum mei_cb_file_ops fop_type, + const struct file *fp) +{ + struct mei_cl_cb *cb; + + cb = mei_io_cb_init(cl, fop_type, fp); + if (!cb) + return NULL; + + if (length == 0) + return cb; + + cb->buf.data = kmalloc(roundup(length, MEI_SLOT_SIZE), GFP_KERNEL); + if (!cb->buf.data) { + mei_io_cb_free(cb); + return NULL; + } + cb->buf.size = length; + + return cb; +} + +/** + * mei_cl_enqueue_ctrl_wr_cb - a convenient wrapper for allocating + * and enqueuing of the control commands cb + * + * @cl: host client + * @length: size of the buffer + * @fop_type: operation type + * @fp: associated file pointer (might be NULL) + * + * Return: cb on success and NULL on failure + * Locking: called under "dev->device_lock" lock + */ +struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length, + enum mei_cb_file_ops fop_type, + const struct file *fp) +{ + struct mei_cl_cb *cb; + + /* for RX always allocate at least client's mtu */ + if (length) + length = max_t(size_t, length, mei_cl_mtu(cl)); + + cb = mei_cl_alloc_cb(cl, length, fop_type, fp); + if (!cb) + return NULL; + + list_add_tail(&cb->list, &cl->dev->ctrl_wr_list); + return cb; +} + +/** + * mei_cl_read_cb - find this cl's callback in the read list + * for a specific file + * + * @cl: host client + * @fp: file pointer (matching cb file object), may be NULL + * + * Return: cb on success, NULL if cb is not found + */ +struct mei_cl_cb *mei_cl_read_cb(struct mei_cl *cl, const struct file *fp) +{ + struct mei_cl_cb *cb; + struct mei_cl_cb *ret_cb = NULL; + + spin_lock(&cl->rd_completed_lock); + list_for_each_entry(cb, &cl->rd_completed, list) + if (!fp || fp == cb->fp) { + ret_cb = cb; + break; + } + spin_unlock(&cl->rd_completed_lock); + return ret_cb; +} + +/** + * mei_cl_flush_queues - flushes queue lists belonging to cl. + * + * @cl: host client + * @fp: file pointer (matching cb file object), may be NULL + * + * Return: 0 on success, -EINVAL if cl or cl->dev is NULL. + */ +int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp) +{ + struct mei_device *dev; + + if (WARN_ON(!cl || !cl->dev)) + return -EINVAL; + + dev = cl->dev; + + cl_dbg(dev, cl, "remove list entry belonging to cl\n"); + mei_io_tx_list_free_cl(&cl->dev->write_list, cl, fp); + mei_io_tx_list_free_cl(&cl->dev->write_waiting_list, cl, fp); + /* free pending and control cb only in final flush */ + if (!fp) { + mei_io_list_flush_cl(&cl->dev->ctrl_wr_list, cl); + mei_io_list_flush_cl(&cl->dev->ctrl_rd_list, cl); + mei_cl_free_pending(cl); + } + spin_lock(&cl->rd_completed_lock); + mei_io_list_free_fp(&cl->rd_completed, fp); + spin_unlock(&cl->rd_completed_lock); + + return 0; +} + +/** + * mei_cl_init - initializes cl. + * + * @cl: host client to be initialized + * @dev: mei device + */ +static void mei_cl_init(struct mei_cl *cl, struct mei_device *dev) +{ + memset(cl, 0, sizeof(*cl)); + init_waitqueue_head(&cl->wait); + init_waitqueue_head(&cl->rx_wait); + init_waitqueue_head(&cl->tx_wait); + init_waitqueue_head(&cl->ev_wait); + INIT_LIST_HEAD(&cl->vtag_map); + spin_lock_init(&cl->rd_completed_lock); + INIT_LIST_HEAD(&cl->rd_completed); + INIT_LIST_HEAD(&cl->rd_pending); + INIT_LIST_HEAD(&cl->link); + cl->writing_state = MEI_IDLE; + cl->state = MEI_FILE_UNINITIALIZED; + cl->dev = dev; +} + +/** + * mei_cl_allocate - allocates cl structure and sets it up. + * + * @dev: mei device + * Return: The allocated file or NULL on failure + */ +struct mei_cl *mei_cl_allocate(struct mei_device *dev) +{ + struct mei_cl *cl; + + cl = kmalloc(sizeof(*cl), GFP_KERNEL); + if (!cl) + return NULL; + + mei_cl_init(cl, dev); + + return cl; +} + +/** + * mei_cl_link - allocate host id in the host map + * + * @cl: host client + * + * Return: 0 on success + * -EINVAL on incorrect values + * -EMFILE if open count exceeded. + */ +int mei_cl_link(struct mei_cl *cl) +{ + struct mei_device *dev; + int id; + + if (WARN_ON(!cl || !cl->dev)) + return -EINVAL; + + dev = cl->dev; + + id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX); + if (id >= MEI_CLIENTS_MAX) { + dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX); + return -EMFILE; + } + + if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) { + dev_err(dev->dev, "open_handle_count exceeded %d", + MEI_MAX_OPEN_HANDLE_COUNT); + return -EMFILE; + } + + dev->open_handle_count++; + + cl->host_client_id = id; + list_add_tail(&cl->link, &dev->file_list); + + set_bit(id, dev->host_clients_map); + + cl->state = MEI_FILE_INITIALIZING; + + cl_dbg(dev, cl, "link cl\n"); + return 0; +} + +/** + * mei_cl_unlink - remove host client from the list + * + * @cl: host client + * + * Return: always 0 + */ +int mei_cl_unlink(struct mei_cl *cl) +{ + struct mei_device *dev; + + /* don't shout on error exit path */ + if (!cl) + return 0; + + if (WARN_ON(!cl->dev)) + return 0; + + dev = cl->dev; + + cl_dbg(dev, cl, "unlink client"); + + if (dev->open_handle_count > 0) + dev->open_handle_count--; + + /* never clear the 0 bit */ + if (cl->host_client_id) + clear_bit(cl->host_client_id, dev->host_clients_map); + + list_del_init(&cl->link); + + cl->state = MEI_FILE_UNINITIALIZED; + cl->writing_state = MEI_IDLE; + + WARN_ON(!list_empty(&cl->rd_completed) || + !list_empty(&cl->rd_pending) || + !list_empty(&cl->link)); + + return 0; +} + +void mei_host_client_init(struct mei_device *dev) +{ + mei_set_devstate(dev, MEI_DEV_ENABLED); + dev->reset_count = 0; + + schedule_work(&dev->bus_rescan_work); + + pm_runtime_mark_last_busy(dev->dev); + dev_dbg(dev->dev, "rpm: autosuspend\n"); + pm_request_autosuspend(dev->dev); +} + +/** + * mei_hbuf_acquire - try to acquire host buffer + * + * @dev: the device structure + * Return: true if host buffer was acquired + */ +bool mei_hbuf_acquire(struct mei_device *dev) +{ + if (mei_pg_state(dev) == MEI_PG_ON || + mei_pg_in_transition(dev)) { + dev_dbg(dev->dev, "device is in pg\n"); + return false; + } + + if (!dev->hbuf_is_ready) { + dev_dbg(dev->dev, "hbuf is not ready\n"); + return false; + } + + dev->hbuf_is_ready = false; + + return true; +} + +/** + * mei_cl_wake_all - wake up readers, writers and event waiters so + * they can be interrupted + * + * @cl: host client + */ +static void mei_cl_wake_all(struct mei_cl *cl) +{ + struct mei_device *dev = cl->dev; + + /* synchronized under device mutex */ + if (waitqueue_active(&cl->rx_wait)) { + cl_dbg(dev, cl, "Waking up reading client!\n"); + wake_up_interruptible(&cl->rx_wait); + } + /* synchronized under device mutex */ + if (waitqueue_active(&cl->tx_wait)) { + cl_dbg(dev, cl, "Waking up writing client!\n"); + wake_up_interruptible(&cl->tx_wait); + } + /* synchronized under device mutex */ + if (waitqueue_active(&cl->ev_wait)) { + cl_dbg(dev, cl, "Waking up waiting for event clients!\n"); + wake_up_interruptible(&cl->ev_wait); + } + /* synchronized under device mutex */ + if (waitqueue_active(&cl->wait)) { + cl_dbg(dev, cl, "Waking up ctrl write clients!\n"); + wake_up(&cl->wait); + } +} + +/** + * mei_cl_set_disconnected - set disconnected state and clear + * associated states and resources + * + * @cl: host client + */ +static void mei_cl_set_disconnected(struct mei_cl *cl) +{ + struct mei_device *dev = cl->dev; + + if (cl->state == MEI_FILE_DISCONNECTED || + cl->state <= MEI_FILE_INITIALIZING) + return; + + cl->state = MEI_FILE_DISCONNECTED; + mei_io_tx_list_free_cl(&dev->write_list, cl, NULL); + mei_io_tx_list_free_cl(&dev->write_waiting_list, cl, NULL); + mei_io_list_flush_cl(&dev->ctrl_rd_list, cl); + mei_io_list_flush_cl(&dev->ctrl_wr_list, cl); + mei_cl_wake_all(cl); + cl->rx_flow_ctrl_creds = 0; + cl->tx_flow_ctrl_creds = 0; + cl->timer_count = 0; + + if (!cl->me_cl) + return; + + if (!WARN_ON(cl->me_cl->connect_count == 0)) + cl->me_cl->connect_count--; + + if (cl->me_cl->connect_count == 0) + cl->me_cl->tx_flow_ctrl_creds = 0; + + mei_me_cl_put(cl->me_cl); + cl->me_cl = NULL; +} + +static int mei_cl_set_connecting(struct mei_cl *cl, struct mei_me_client *me_cl) +{ + if (!mei_me_cl_get(me_cl)) + return -ENOENT; + + /* only one connection is allowed for fixed address clients */ + if (me_cl->props.fixed_address) { + if (me_cl->connect_count) { + mei_me_cl_put(me_cl); + return -EBUSY; + } + } + + cl->me_cl = me_cl; + cl->state = MEI_FILE_CONNECTING; + cl->me_cl->connect_count++; + + return 0; +} + +/* + * mei_cl_send_disconnect - send disconnect request + * + * @cl: host client + * @cb: callback block + * + * Return: 0, OK; otherwise, error. + */ +static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb) +{ + struct mei_device *dev; + int ret; + + dev = cl->dev; + + ret = mei_hbm_cl_disconnect_req(dev, cl); + cl->status = ret; + if (ret) { + cl->state = MEI_FILE_DISCONNECT_REPLY; + return ret; + } + + list_move_tail(&cb->list, &dev->ctrl_rd_list); + cl->timer_count = MEI_CONNECT_TIMEOUT; + mei_schedule_stall_timer(dev); + + return 0; +} + +/** + * mei_cl_irq_disconnect - processes close related operation from + * interrupt thread context - send disconnect request + * + * @cl: client + * @cb: callback block. + * @cmpl_list: complete list. + * + * Return: 0, OK; otherwise, error. + */ +int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb, + struct list_head *cmpl_list) +{ + struct mei_device *dev = cl->dev; + u32 msg_slots; + int slots; + int ret; + + msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request)); + slots = mei_hbuf_empty_slots(dev); + if (slots < 0) + return -EOVERFLOW; + + if ((u32)slots < msg_slots) + return -EMSGSIZE; + + ret = mei_cl_send_disconnect(cl, cb); + if (ret) + list_move_tail(&cb->list, cmpl_list); + + return ret; +} + +/** + * __mei_cl_disconnect - disconnect host client from the me one + * internal function runtime pm has to be already acquired + * + * @cl: host client + * + * Return: 0 on success, <0 on failure. + */ +static int __mei_cl_disconnect(struct mei_cl *cl) +{ + struct mei_device *dev; + struct mei_cl_cb *cb; + int rets; + + dev = cl->dev; + + cl->state = MEI_FILE_DISCONNECTING; + + cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DISCONNECT, NULL); + if (!cb) { + rets = -ENOMEM; + goto out; + } + + if (mei_hbuf_acquire(dev)) { + rets = mei_cl_send_disconnect(cl, cb); + if (rets) { + cl_err(dev, cl, "failed to disconnect.\n"); + goto out; + } + } + + mutex_unlock(&dev->device_lock); + wait_event_timeout(cl->wait, + cl->state == MEI_FILE_DISCONNECT_REPLY || + cl->state == MEI_FILE_DISCONNECTED, + mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); + mutex_lock(&dev->device_lock); + + rets = cl->status; + if (cl->state != MEI_FILE_DISCONNECT_REPLY && + cl->state != MEI_FILE_DISCONNECTED) { + cl_dbg(dev, cl, "timeout on disconnect from FW client.\n"); + rets = -ETIME; + } + +out: + /* we disconnect also on error */ + mei_cl_set_disconnected(cl); + if (!rets) + cl_dbg(dev, cl, "successfully disconnected from FW client.\n"); + + mei_io_cb_free(cb); + return rets; +} + +/** + * mei_cl_disconnect - disconnect host client from the me one + * + * @cl: host client + * + * Locking: called under "dev->device_lock" lock + * + * Return: 0 on success, <0 on failure. + */ +int mei_cl_disconnect(struct mei_cl *cl) +{ + struct mei_device *dev; + int rets; + + if (WARN_ON(!cl || !cl->dev)) + return -ENODEV; + + dev = cl->dev; + + cl_dbg(dev, cl, "disconnecting"); + + if (!mei_cl_is_connected(cl)) + return 0; + + if (mei_cl_is_fixed_address(cl)) { + mei_cl_set_disconnected(cl); + return 0; + } + + if (dev->dev_state == MEI_DEV_POWER_DOWN) { + cl_dbg(dev, cl, "Device is powering down, don't bother with disconnection\n"); + mei_cl_set_disconnected(cl); + return 0; + } + + rets = pm_runtime_get(dev->dev); + if (rets < 0 && rets != -EINPROGRESS) { + pm_runtime_put_noidle(dev->dev); + cl_err(dev, cl, "rpm: get failed %d\n", rets); + return rets; + } + + rets = __mei_cl_disconnect(cl); + + cl_dbg(dev, cl, "rpm: autosuspend\n"); + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); + + return rets; +} + + +/** + * mei_cl_is_other_connecting - checks if other + * client with the same me client id is connecting + * + * @cl: private data of the file object + * + * Return: true if other client is connected, false - otherwise. + */ +static bool mei_cl_is_other_connecting(struct mei_cl *cl) +{ + struct mei_device *dev; + struct mei_cl_cb *cb; + + dev = cl->dev; + + list_for_each_entry(cb, &dev->ctrl_rd_list, list) { + if (cb->fop_type == MEI_FOP_CONNECT && + mei_cl_me_id(cl) == mei_cl_me_id(cb->cl)) + return true; + } + + return false; +} + +/** + * mei_cl_send_connect - send connect request + * + * @cl: host client + * @cb: callback block + * + * Return: 0, OK; otherwise, error. + */ +static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb) +{ + struct mei_device *dev; + int ret; + + dev = cl->dev; + + ret = mei_hbm_cl_connect_req(dev, cl); + cl->status = ret; + if (ret) { + cl->state = MEI_FILE_DISCONNECT_REPLY; + return ret; + } + + list_move_tail(&cb->list, &dev->ctrl_rd_list); + cl->timer_count = MEI_CONNECT_TIMEOUT; + mei_schedule_stall_timer(dev); + return 0; +} + +/** + * mei_cl_irq_connect - send connect request in irq_thread context + * + * @cl: host client + * @cb: callback block + * @cmpl_list: complete list + * + * Return: 0, OK; otherwise, error. + */ +int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb, + struct list_head *cmpl_list) +{ + struct mei_device *dev = cl->dev; + u32 msg_slots; + int slots; + int rets; + + if (mei_cl_is_other_connecting(cl)) + return 0; + + msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request)); + slots = mei_hbuf_empty_slots(dev); + if (slots < 0) + return -EOVERFLOW; + + if ((u32)slots < msg_slots) + return -EMSGSIZE; + + rets = mei_cl_send_connect(cl, cb); + if (rets) + list_move_tail(&cb->list, cmpl_list); + + return rets; +} + +/** + * mei_cl_connect - connect host client to the me one + * + * @cl: host client + * @me_cl: me client + * @fp: pointer to file structure + * + * Locking: called under "dev->device_lock" lock + * + * Return: 0 on success, <0 on failure. + */ +int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl, + const struct file *fp) +{ + struct mei_device *dev; + struct mei_cl_cb *cb; + int rets; + + if (WARN_ON(!cl || !cl->dev || !me_cl)) + return -ENODEV; + + dev = cl->dev; + + rets = mei_cl_set_connecting(cl, me_cl); + if (rets) + goto nortpm; + + if (mei_cl_is_fixed_address(cl)) { + cl->state = MEI_FILE_CONNECTED; + rets = 0; + goto nortpm; + } + + rets = pm_runtime_get(dev->dev); + if (rets < 0 && rets != -EINPROGRESS) { + pm_runtime_put_noidle(dev->dev); + cl_err(dev, cl, "rpm: get failed %d\n", rets); + goto nortpm; + } + + cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_CONNECT, fp); + if (!cb) { + rets = -ENOMEM; + goto out; + } + + /* run hbuf acquire last so we don't have to undo */ + if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) { + rets = mei_cl_send_connect(cl, cb); + if (rets) + goto out; + } + + mutex_unlock(&dev->device_lock); + wait_event_timeout(cl->wait, + (cl->state == MEI_FILE_CONNECTED || + cl->state == MEI_FILE_DISCONNECTED || + cl->state == MEI_FILE_DISCONNECT_REQUIRED || + cl->state == MEI_FILE_DISCONNECT_REPLY), + mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); + mutex_lock(&dev->device_lock); + + if (!mei_cl_is_connected(cl)) { + if (cl->state == MEI_FILE_DISCONNECT_REQUIRED) { + mei_io_list_flush_cl(&dev->ctrl_rd_list, cl); + mei_io_list_flush_cl(&dev->ctrl_wr_list, cl); + /* ignore disconnect return valuue; + * in case of failure reset will be invoked + */ + __mei_cl_disconnect(cl); + rets = -EFAULT; + goto out; + } + + /* timeout or something went really wrong */ + if (!cl->status) + cl->status = -EFAULT; + } + + rets = cl->status; +out: + cl_dbg(dev, cl, "rpm: autosuspend\n"); + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); + + mei_io_cb_free(cb); + +nortpm: + if (!mei_cl_is_connected(cl)) + mei_cl_set_disconnected(cl); + + return rets; +} + +/** + * mei_cl_alloc_linked - allocate and link host client + * + * @dev: the device structure + * + * Return: cl on success ERR_PTR on failure + */ +struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev) +{ + struct mei_cl *cl; + int ret; + + cl = mei_cl_allocate(dev); + if (!cl) { + ret = -ENOMEM; + goto err; + } + + ret = mei_cl_link(cl); + if (ret) + goto err; + + return cl; +err: + kfree(cl); + return ERR_PTR(ret); +} + +/** + * mei_cl_tx_flow_ctrl_creds - checks flow_control credits for cl. + * + * @cl: host client + * + * Return: 1 if tx_flow_ctrl_creds >0, 0 - otherwise. + */ +static int mei_cl_tx_flow_ctrl_creds(struct mei_cl *cl) +{ + if (WARN_ON(!cl || !cl->me_cl)) + return -EINVAL; + + if (cl->tx_flow_ctrl_creds > 0) + return 1; + + if (mei_cl_is_fixed_address(cl)) + return 1; + + if (mei_cl_is_single_recv_buf(cl)) { + if (cl->me_cl->tx_flow_ctrl_creds > 0) + return 1; + } + return 0; +} + +/** + * mei_cl_tx_flow_ctrl_creds_reduce - reduces transmit flow control credits + * for a client + * + * @cl: host client + * + * Return: + * 0 on success + * -EINVAL when ctrl credits are <= 0 + */ +static int mei_cl_tx_flow_ctrl_creds_reduce(struct mei_cl *cl) +{ + if (WARN_ON(!cl || !cl->me_cl)) + return -EINVAL; + + if (mei_cl_is_fixed_address(cl)) + return 0; + + if (mei_cl_is_single_recv_buf(cl)) { + if (WARN_ON(cl->me_cl->tx_flow_ctrl_creds <= 0)) + return -EINVAL; + cl->me_cl->tx_flow_ctrl_creds--; + } else { + if (WARN_ON(cl->tx_flow_ctrl_creds <= 0)) + return -EINVAL; + cl->tx_flow_ctrl_creds--; + } + return 0; +} + +/** + * mei_cl_vtag_alloc - allocate and fill the vtag structure + * + * @fp: pointer to file structure + * @vtag: vm tag + * + * Return: + * * Pointer to allocated struct - on success + * * ERR_PTR(-ENOMEM) on memory allocation failure + */ +struct mei_cl_vtag *mei_cl_vtag_alloc(struct file *fp, u8 vtag) +{ + struct mei_cl_vtag *cl_vtag; + + cl_vtag = kzalloc(sizeof(*cl_vtag), GFP_KERNEL); + if (!cl_vtag) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&cl_vtag->list); + cl_vtag->vtag = vtag; + cl_vtag->fp = fp; + + return cl_vtag; +} + +/** + * mei_cl_fp_by_vtag - obtain the file pointer by vtag + * + * @cl: host client + * @vtag: vm tag + * + * Return: + * * A file pointer - on success + * * ERR_PTR(-ENOENT) if vtag is not found in the client vtag list + */ +const struct file *mei_cl_fp_by_vtag(const struct mei_cl *cl, u8 vtag) +{ + struct mei_cl_vtag *vtag_l; + + list_for_each_entry(vtag_l, &cl->vtag_map, list) + if (vtag_l->vtag == vtag) + return vtag_l->fp; + + return ERR_PTR(-ENOENT); +} + +/** + * mei_cl_reset_read_by_vtag - reset pending_read flag by given vtag + * + * @cl: host client + * @vtag: vm tag + */ +static void mei_cl_reset_read_by_vtag(const struct mei_cl *cl, u8 vtag) +{ + struct mei_cl_vtag *vtag_l; + + list_for_each_entry(vtag_l, &cl->vtag_map, list) { + if (vtag_l->vtag == vtag) { + vtag_l->pending_read = false; + break; + } + } +} + +/** + * mei_cl_read_vtag_add_fc - add flow control for next pending reader + * in the vtag list + * + * @cl: host client + */ +static void mei_cl_read_vtag_add_fc(struct mei_cl *cl) +{ + struct mei_cl_vtag *cl_vtag; + + list_for_each_entry(cl_vtag, &cl->vtag_map, list) { + if (cl_vtag->pending_read) { + if (mei_cl_enqueue_ctrl_wr_cb(cl, + mei_cl_mtu(cl), + MEI_FOP_READ, + cl_vtag->fp)) + cl->rx_flow_ctrl_creds++; + break; + } + } +} + +/** + * mei_cl_vt_support_check - check if client support vtags + * + * @cl: host client + * + * Return: + * * 0 - supported, or not connected at all + * * -EOPNOTSUPP - vtags are not supported by client + */ +int mei_cl_vt_support_check(const struct mei_cl *cl) +{ + struct mei_device *dev = cl->dev; + + if (!dev->hbm_f_vt_supported) + return -EOPNOTSUPP; + + if (!cl->me_cl) + return 0; + + return cl->me_cl->props.vt_supported ? 0 : -EOPNOTSUPP; +} + +/** + * mei_cl_add_rd_completed - add read completed callback to list with lock + * and vtag check + * + * @cl: host client + * @cb: callback block + * + */ +void mei_cl_add_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb) +{ + const struct file *fp; + + if (!mei_cl_vt_support_check(cl)) { + fp = mei_cl_fp_by_vtag(cl, cb->vtag); + if (IS_ERR(fp)) { + /* client already disconnected, discarding */ + mei_io_cb_free(cb); + return; + } + cb->fp = fp; + mei_cl_reset_read_by_vtag(cl, cb->vtag); + mei_cl_read_vtag_add_fc(cl); + } + + spin_lock(&cl->rd_completed_lock); + list_add_tail(&cb->list, &cl->rd_completed); + spin_unlock(&cl->rd_completed_lock); +} + +/** + * mei_cl_del_rd_completed - free read completed callback with lock + * + * @cl: host client + * @cb: callback block + * + */ +void mei_cl_del_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb) +{ + spin_lock(&cl->rd_completed_lock); + mei_io_cb_free(cb); + spin_unlock(&cl->rd_completed_lock); +} + +/** + * mei_cl_notify_fop2req - convert fop to proper request + * + * @fop: client notification start response command + * + * Return: MEI_HBM_NOTIFICATION_START/STOP + */ +u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop) +{ + if (fop == MEI_FOP_NOTIFY_START) + return MEI_HBM_NOTIFICATION_START; + else + return MEI_HBM_NOTIFICATION_STOP; +} + +/** + * mei_cl_notify_req2fop - convert notification request top file operation type + * + * @req: hbm notification request type + * + * Return: MEI_FOP_NOTIFY_START/STOP + */ +enum mei_cb_file_ops mei_cl_notify_req2fop(u8 req) +{ + if (req == MEI_HBM_NOTIFICATION_START) + return MEI_FOP_NOTIFY_START; + else + return MEI_FOP_NOTIFY_STOP; +} + +/** + * mei_cl_irq_notify - send notification request in irq_thread context + * + * @cl: client + * @cb: callback block. + * @cmpl_list: complete list. + * + * Return: 0 on such and error otherwise. + */ +int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb, + struct list_head *cmpl_list) +{ + struct mei_device *dev = cl->dev; + u32 msg_slots; + int slots; + int ret; + bool request; + + msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request)); + slots = mei_hbuf_empty_slots(dev); + if (slots < 0) + return -EOVERFLOW; + + if ((u32)slots < msg_slots) + return -EMSGSIZE; + + request = mei_cl_notify_fop2req(cb->fop_type); + ret = mei_hbm_cl_notify_req(dev, cl, request); + if (ret) { + cl->status = ret; + list_move_tail(&cb->list, cmpl_list); + return ret; + } + + list_move_tail(&cb->list, &dev->ctrl_rd_list); + return 0; +} + +/** + * mei_cl_notify_request - send notification stop/start request + * + * @cl: host client + * @fp: associate request with file + * @request: 1 for start or 0 for stop + * + * Locking: called under "dev->device_lock" lock + * + * Return: 0 on such and error otherwise. + */ +int mei_cl_notify_request(struct mei_cl *cl, + const struct file *fp, u8 request) +{ + struct mei_device *dev; + struct mei_cl_cb *cb; + enum mei_cb_file_ops fop_type; + int rets; + + if (WARN_ON(!cl || !cl->dev)) + return -ENODEV; + + dev = cl->dev; + + if (!dev->hbm_f_ev_supported) { + cl_dbg(dev, cl, "notifications not supported\n"); + return -EOPNOTSUPP; + } + + if (!mei_cl_is_connected(cl)) + return -ENODEV; + + rets = pm_runtime_get(dev->dev); + if (rets < 0 && rets != -EINPROGRESS) { + pm_runtime_put_noidle(dev->dev); + cl_err(dev, cl, "rpm: get failed %d\n", rets); + return rets; + } + + fop_type = mei_cl_notify_req2fop(request); + cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, fop_type, fp); + if (!cb) { + rets = -ENOMEM; + goto out; + } + + if (mei_hbuf_acquire(dev)) { + if (mei_hbm_cl_notify_req(dev, cl, request)) { + rets = -ENODEV; + goto out; + } + list_move_tail(&cb->list, &dev->ctrl_rd_list); + } + + mutex_unlock(&dev->device_lock); + wait_event_timeout(cl->wait, + cl->notify_en == request || + cl->status || + !mei_cl_is_connected(cl), + mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); + mutex_lock(&dev->device_lock); + + if (cl->notify_en != request && !cl->status) + cl->status = -EFAULT; + + rets = cl->status; + +out: + cl_dbg(dev, cl, "rpm: autosuspend\n"); + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); + + mei_io_cb_free(cb); + return rets; +} + +/** + * mei_cl_notify - raise notification + * + * @cl: host client + * + * Locking: called under "dev->device_lock" lock + */ +void mei_cl_notify(struct mei_cl *cl) +{ + struct mei_device *dev; + + if (!cl || !cl->dev) + return; + + dev = cl->dev; + + if (!cl->notify_en) + return; + + cl_dbg(dev, cl, "notify event"); + cl->notify_ev = true; + if (!mei_cl_bus_notify_event(cl)) + wake_up_interruptible(&cl->ev_wait); + + if (cl->ev_async) + kill_fasync(&cl->ev_async, SIGIO, POLL_PRI); + +} + +/** + * mei_cl_notify_get - get or wait for notification event + * + * @cl: host client + * @block: this request is blocking + * @notify_ev: true if notification event was received + * + * Locking: called under "dev->device_lock" lock + * + * Return: 0 on such and error otherwise. + */ +int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev) +{ + struct mei_device *dev; + int rets; + + *notify_ev = false; + + if (WARN_ON(!cl || !cl->dev)) + return -ENODEV; + + dev = cl->dev; + + if (!dev->hbm_f_ev_supported) { + cl_dbg(dev, cl, "notifications not supported\n"); + return -EOPNOTSUPP; + } + + if (!mei_cl_is_connected(cl)) + return -ENODEV; + + if (cl->notify_ev) + goto out; + + if (!block) + return -EAGAIN; + + mutex_unlock(&dev->device_lock); + rets = wait_event_interruptible(cl->ev_wait, cl->notify_ev); + mutex_lock(&dev->device_lock); + + if (rets < 0) + return rets; + +out: + *notify_ev = cl->notify_ev; + cl->notify_ev = false; + return 0; +} + +/** + * mei_cl_read_start - the start read client message function. + * + * @cl: host client + * @length: number of bytes to read + * @fp: pointer to file structure + * + * Return: 0 on success, <0 on failure. + */ +int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp) +{ + struct mei_device *dev; + struct mei_cl_cb *cb; + int rets; + + if (WARN_ON(!cl || !cl->dev)) + return -ENODEV; + + dev = cl->dev; + + if (!mei_cl_is_connected(cl)) + return -ENODEV; + + if (!mei_me_cl_is_active(cl->me_cl)) { + cl_err(dev, cl, "no such me client\n"); + return -ENOTTY; + } + + if (mei_cl_is_fixed_address(cl)) + return 0; + + /* HW currently supports only one pending read */ + if (cl->rx_flow_ctrl_creds) { + mei_cl_set_read_by_fp(cl, fp); + return -EBUSY; + } + + cb = mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, fp); + if (!cb) + return -ENOMEM; + + mei_cl_set_read_by_fp(cl, fp); + + rets = pm_runtime_get(dev->dev); + if (rets < 0 && rets != -EINPROGRESS) { + pm_runtime_put_noidle(dev->dev); + cl_err(dev, cl, "rpm: get failed %d\n", rets); + goto nortpm; + } + + rets = 0; + if (mei_hbuf_acquire(dev)) { + rets = mei_hbm_cl_flow_control_req(dev, cl); + if (rets < 0) + goto out; + + list_move_tail(&cb->list, &cl->rd_pending); + } + cl->rx_flow_ctrl_creds++; + +out: + cl_dbg(dev, cl, "rpm: autosuspend\n"); + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); +nortpm: + if (rets) + mei_io_cb_free(cb); + + return rets; +} + +static inline u8 mei_ext_hdr_set_vtag(struct mei_ext_hdr *ext, u8 vtag) +{ + ext->type = MEI_EXT_HDR_VTAG; + ext->ext_payload[0] = vtag; + ext->length = mei_data2slots(sizeof(*ext)); + return ext->length; +} + +/** + * mei_msg_hdr_init - allocate and initialize mei message header + * + * @cb: message callback structure + * + * Return: a pointer to initialized header + */ +static struct mei_msg_hdr *mei_msg_hdr_init(const struct mei_cl_cb *cb) +{ + size_t hdr_len; + struct mei_ext_meta_hdr *meta; + struct mei_ext_hdr *ext; + struct mei_msg_hdr *mei_hdr; + bool is_ext, is_vtag; + + if (!cb) + return ERR_PTR(-EINVAL); + + /* Extended header for vtag is attached only on the first fragment */ + is_vtag = (cb->vtag && cb->buf_idx == 0); + is_ext = is_vtag; + + /* Compute extended header size */ + hdr_len = sizeof(*mei_hdr); + + if (!is_ext) + goto setup_hdr; + + hdr_len += sizeof(*meta); + if (is_vtag) + hdr_len += sizeof(*ext); + +setup_hdr: + mei_hdr = kzalloc(hdr_len, GFP_KERNEL); + if (!mei_hdr) + return ERR_PTR(-ENOMEM); + + mei_hdr->host_addr = mei_cl_host_addr(cb->cl); + mei_hdr->me_addr = mei_cl_me_id(cb->cl); + mei_hdr->internal = cb->internal; + mei_hdr->extended = is_ext; + + if (!is_ext) + goto out; + + meta = (struct mei_ext_meta_hdr *)mei_hdr->extension; + if (is_vtag) { + meta->count++; + meta->size += mei_ext_hdr_set_vtag(meta->hdrs, cb->vtag); + } +out: + mei_hdr->length = hdr_len - sizeof(*mei_hdr); + return mei_hdr; +} + +/** + * mei_cl_irq_write - write a message to device + * from the interrupt thread context + * + * @cl: client + * @cb: callback block. + * @cmpl_list: complete list. + * + * Return: 0, OK; otherwise error. + */ +int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, + struct list_head *cmpl_list) +{ + struct mei_device *dev; + struct mei_msg_data *buf; + struct mei_msg_hdr *mei_hdr = NULL; + size_t hdr_len; + size_t hbuf_len, dr_len; + size_t buf_len; + size_t data_len; + int hbuf_slots; + u32 dr_slots; + u32 dma_len; + int rets; + bool first_chunk; + const void *data; + + if (WARN_ON(!cl || !cl->dev)) + return -ENODEV; + + dev = cl->dev; + + buf = &cb->buf; + + first_chunk = cb->buf_idx == 0; + + rets = first_chunk ? mei_cl_tx_flow_ctrl_creds(cl) : 1; + if (rets < 0) + goto err; + + if (rets == 0) { + cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); + return 0; + } + + buf_len = buf->size - cb->buf_idx; + data = buf->data + cb->buf_idx; + hbuf_slots = mei_hbuf_empty_slots(dev); + if (hbuf_slots < 0) { + rets = -EOVERFLOW; + goto err; + } + + hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK; + dr_slots = mei_dma_ring_empty_slots(dev); + dr_len = mei_slots2data(dr_slots); + + mei_hdr = mei_msg_hdr_init(cb); + if (IS_ERR(mei_hdr)) { + rets = PTR_ERR(mei_hdr); + mei_hdr = NULL; + goto err; + } + + cl_dbg(dev, cl, "Extended Header %d vtag = %d\n", + mei_hdr->extended, cb->vtag); + + hdr_len = sizeof(*mei_hdr) + mei_hdr->length; + + /** + * Split the message only if we can write the whole host buffer + * otherwise wait for next time the host buffer is empty. + */ + if (hdr_len + buf_len <= hbuf_len) { + data_len = buf_len; + mei_hdr->msg_complete = 1; + } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) { + mei_hdr->dma_ring = 1; + if (buf_len > dr_len) + buf_len = dr_len; + else + mei_hdr->msg_complete = 1; + + data_len = sizeof(dma_len); + dma_len = buf_len; + data = &dma_len; + } else if ((u32)hbuf_slots == mei_hbuf_depth(dev)) { + buf_len = hbuf_len - hdr_len; + data_len = buf_len; + } else { + kfree(mei_hdr); + return 0; + } + mei_hdr->length += data_len; + + if (mei_hdr->dma_ring) + mei_dma_ring_write(dev, buf->data + cb->buf_idx, buf_len); + rets = mei_write_message(dev, mei_hdr, hdr_len, data, data_len); + + if (rets) + goto err; + + cl->status = 0; + cl->writing_state = MEI_WRITING; + cb->buf_idx += buf_len; + + if (first_chunk) { + if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) { + rets = -EIO; + goto err; + } + } + + if (mei_hdr->msg_complete) + list_move_tail(&cb->list, &dev->write_waiting_list); + + kfree(mei_hdr); + return 0; + +err: + kfree(mei_hdr); + cl->status = rets; + list_move_tail(&cb->list, cmpl_list); + return rets; +} + +/** + * mei_cl_write - submit a write cb to mei device + * assumes device_lock is locked + * + * @cl: host client + * @cb: write callback with filled data + * + * Return: number of bytes sent on success, <0 on failure. + */ +ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) +{ + struct mei_device *dev; + struct mei_msg_data *buf; + struct mei_msg_hdr *mei_hdr = NULL; + size_t hdr_len; + size_t hbuf_len, dr_len; + size_t buf_len; + size_t data_len; + int hbuf_slots; + u32 dr_slots; + u32 dma_len; + ssize_t rets; + bool blocking; + const void *data; + + if (WARN_ON(!cl || !cl->dev)) + return -ENODEV; + + if (WARN_ON(!cb)) + return -EINVAL; + + dev = cl->dev; + + buf = &cb->buf; + buf_len = buf->size; + + cl_dbg(dev, cl, "buf_len=%zd\n", buf_len); + + blocking = cb->blocking; + data = buf->data; + + rets = pm_runtime_get(dev->dev); + if (rets < 0 && rets != -EINPROGRESS) { + pm_runtime_put_noidle(dev->dev); + cl_err(dev, cl, "rpm: get failed %zd\n", rets); + goto free; + } + + cb->buf_idx = 0; + cl->writing_state = MEI_IDLE; + + + rets = mei_cl_tx_flow_ctrl_creds(cl); + if (rets < 0) + goto err; + + mei_hdr = mei_msg_hdr_init(cb); + if (IS_ERR(mei_hdr)) { + rets = PTR_ERR(mei_hdr); + mei_hdr = NULL; + goto err; + } + + cl_dbg(dev, cl, "Extended Header %d vtag = %d\n", + mei_hdr->extended, cb->vtag); + + hdr_len = sizeof(*mei_hdr) + mei_hdr->length; + + if (rets == 0) { + cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); + rets = buf_len; + goto out; + } + + if (!mei_hbuf_acquire(dev)) { + cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n"); + rets = buf_len; + goto out; + } + + hbuf_slots = mei_hbuf_empty_slots(dev); + if (hbuf_slots < 0) { + buf_len = -EOVERFLOW; + goto out; + } + + hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK; + dr_slots = mei_dma_ring_empty_slots(dev); + dr_len = mei_slots2data(dr_slots); + + if (hdr_len + buf_len <= hbuf_len) { + data_len = buf_len; + mei_hdr->msg_complete = 1; + } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) { + mei_hdr->dma_ring = 1; + if (buf_len > dr_len) + buf_len = dr_len; + else + mei_hdr->msg_complete = 1; + + data_len = sizeof(dma_len); + dma_len = buf_len; + data = &dma_len; + } else { + buf_len = hbuf_len - hdr_len; + data_len = buf_len; + } + + mei_hdr->length += data_len; + + if (mei_hdr->dma_ring) + mei_dma_ring_write(dev, buf->data, buf_len); + rets = mei_write_message(dev, mei_hdr, hdr_len, data, data_len); + + if (rets) + goto err; + + rets = mei_cl_tx_flow_ctrl_creds_reduce(cl); + if (rets) + goto err; + + cl->writing_state = MEI_WRITING; + cb->buf_idx = buf_len; + /* restore return value */ + buf_len = buf->size; + +out: + if (mei_hdr->msg_complete) + mei_tx_cb_enqueue(cb, &dev->write_waiting_list); + else + mei_tx_cb_enqueue(cb, &dev->write_list); + + cb = NULL; + if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) { + + mutex_unlock(&dev->device_lock); + rets = wait_event_interruptible(cl->tx_wait, + cl->writing_state == MEI_WRITE_COMPLETE || + (!mei_cl_is_connected(cl))); + mutex_lock(&dev->device_lock); + /* wait_event_interruptible returns -ERESTARTSYS */ + if (rets) { + if (signal_pending(current)) + rets = -EINTR; + goto err; + } + if (cl->writing_state != MEI_WRITE_COMPLETE) { + rets = -EFAULT; + goto err; + } + } + + rets = buf_len; +err: + cl_dbg(dev, cl, "rpm: autosuspend\n"); + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); +free: + mei_io_cb_free(cb); + + kfree(mei_hdr); + + return rets; +} + +/** + * mei_cl_complete - processes completed operation for a client + * + * @cl: private data of the file object. + * @cb: callback block. + */ +void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb) +{ + struct mei_device *dev = cl->dev; + + switch (cb->fop_type) { + case MEI_FOP_WRITE: + mei_tx_cb_dequeue(cb); + cl->writing_state = MEI_WRITE_COMPLETE; + if (waitqueue_active(&cl->tx_wait)) { + wake_up_interruptible(&cl->tx_wait); + } else { + pm_runtime_mark_last_busy(dev->dev); + pm_request_autosuspend(dev->dev); + } + break; + + case MEI_FOP_READ: + mei_cl_add_rd_completed(cl, cb); + if (!mei_cl_is_fixed_address(cl) && + !WARN_ON(!cl->rx_flow_ctrl_creds)) + cl->rx_flow_ctrl_creds--; + if (!mei_cl_bus_rx_event(cl)) + wake_up_interruptible(&cl->rx_wait); + break; + + case MEI_FOP_CONNECT: + case MEI_FOP_DISCONNECT: + case MEI_FOP_NOTIFY_STOP: + case MEI_FOP_NOTIFY_START: + if (waitqueue_active(&cl->wait)) + wake_up(&cl->wait); + + break; + case MEI_FOP_DISCONNECT_RSP: + mei_io_cb_free(cb); + mei_cl_set_disconnected(cl); + break; + default: + BUG_ON(0); + } +} + + +/** + * mei_cl_all_disconnect - disconnect forcefully all connected clients + * + * @dev: mei device + */ +void mei_cl_all_disconnect(struct mei_device *dev) +{ + struct mei_cl *cl; + + list_for_each_entry(cl, &dev->file_list, link) + mei_cl_set_disconnected(cl); +} diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h new file mode 100644 index 000000000..9e08a9843 --- /dev/null +++ b/drivers/misc/mei/client.h @@ -0,0 +1,280 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2003-2018, Intel Corporation. All rights reserved. + * Intel Management Engine Interface (Intel MEI) Linux driver + */ + +#ifndef _MEI_CLIENT_H_ +#define _MEI_CLIENT_H_ + +#include <linux/types.h> +#include <linux/poll.h> +#include <linux/mei.h> + +#include "mei_dev.h" + +/* + * reference counting base function + */ +void mei_me_cl_init(struct mei_me_client *me_cl); +void mei_me_cl_put(struct mei_me_client *me_cl); +struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl); + +void mei_me_cl_add(struct mei_device *dev, struct mei_me_client *me_cl); +void mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl); + +struct mei_me_client *mei_me_cl_by_uuid(struct mei_device *dev, + const uuid_le *uuid); +struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id); +struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev, + const uuid_le *uuid, u8 client_id); +void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid); +void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, + const uuid_le *uuid, u8 id); +void mei_me_cl_rm_all(struct mei_device *dev); + +/** + * mei_me_cl_is_active - check whether me client is active in the fw + * + * @me_cl: me client + * + * Return: true if the me client is active in the firmware + */ +static inline bool mei_me_cl_is_active(const struct mei_me_client *me_cl) +{ + return !list_empty_careful(&me_cl->list); +} + +/** + * mei_me_cl_uuid - return me client protocol name (uuid) + * + * @me_cl: me client + * + * Return: me client protocol name + */ +static inline const uuid_le *mei_me_cl_uuid(const struct mei_me_client *me_cl) +{ + return &me_cl->props.protocol_name; +} + +/** + * mei_me_cl_ver - return me client protocol version + * + * @me_cl: me client + * + * Return: me client protocol version + */ +static inline u8 mei_me_cl_ver(const struct mei_me_client *me_cl) +{ + return me_cl->props.protocol_version; +} + +/** + * mei_me_cl_max_conn - return me client max number of connections + * + * @me_cl: me client + * + * Return: me client max number of connections + */ +static inline u8 mei_me_cl_max_conn(const struct mei_me_client *me_cl) +{ + return me_cl->props.max_number_of_connections; +} + +/** + * mei_me_cl_fixed - return me client fixed address, if any + * + * @me_cl: me client + * + * Return: me client fixed address + */ +static inline u8 mei_me_cl_fixed(const struct mei_me_client *me_cl) +{ + return me_cl->props.fixed_address; +} + +/** + * mei_me_cl_vt - return me client vtag supported status + * + * @me_cl: me client + * + * Return: true if me client supports vt tagging + */ +static inline bool mei_me_cl_vt(const struct mei_me_client *me_cl) +{ + return me_cl->props.vt_supported == 1; +} + +/** + * mei_me_cl_max_len - return me client max msg length + * + * @me_cl: me client + * + * Return: me client max msg length + */ +static inline u32 mei_me_cl_max_len(const struct mei_me_client *me_cl) +{ + return me_cl->props.max_msg_length; +} + +/* + * MEI IO Functions + */ +void mei_io_cb_free(struct mei_cl_cb *priv_cb); + +/* + * MEI Host Client Functions + */ + +struct mei_cl *mei_cl_allocate(struct mei_device *dev); + +int mei_cl_link(struct mei_cl *cl); +int mei_cl_unlink(struct mei_cl *cl); + +struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev); + +struct mei_cl_cb *mei_cl_read_cb(struct mei_cl *cl, const struct file *fp); + +void mei_cl_add_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb); +void mei_cl_del_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb); + +struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length, + enum mei_cb_file_ops type, + const struct file *fp); +struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length, + enum mei_cb_file_ops type, + const struct file *fp); +int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp); + +struct mei_cl_vtag *mei_cl_vtag_alloc(struct file *fp, u8 vtag); +const struct file *mei_cl_fp_by_vtag(const struct mei_cl *cl, u8 vtag); +int mei_cl_vt_support_check(const struct mei_cl *cl); +/* + * MEI input output function prototype + */ + +/** + * mei_cl_is_connected - host client is connected + * + * @cl: host client + * + * Return: true if the host client is connected + */ +static inline bool mei_cl_is_connected(struct mei_cl *cl) +{ + return cl->state == MEI_FILE_CONNECTED; +} + +/** + * mei_cl_me_id - me client id + * + * @cl: host client + * + * Return: me client id or 0 if client is not connected + */ +static inline u8 mei_cl_me_id(const struct mei_cl *cl) +{ + return cl->me_cl ? cl->me_cl->client_id : 0; +} + +/** + * mei_cl_mtu - maximal message that client can send and receive + * + * @cl: host client + * + * Return: mtu or 0 if client is not connected + */ +static inline size_t mei_cl_mtu(const struct mei_cl *cl) +{ + return cl->me_cl ? cl->me_cl->props.max_msg_length : 0; +} + +/** + * mei_cl_is_fixed_address - check whether the me client uses fixed address + * + * @cl: host client + * + * Return: true if the client is connected and it has fixed me address + */ +static inline bool mei_cl_is_fixed_address(const struct mei_cl *cl) +{ + return cl->me_cl && cl->me_cl->props.fixed_address; +} + +/** + * mei_cl_is_single_recv_buf- check whether the me client + * uses single receiving buffer + * + * @cl: host client + * + * Return: true if single_recv_buf == 1; 0 otherwise + */ +static inline bool mei_cl_is_single_recv_buf(const struct mei_cl *cl) +{ + return cl->me_cl->props.single_recv_buf; +} + +/** + * mei_cl_uuid - client's uuid + * + * @cl: host client + * + * Return: return uuid of connected me client + */ +static inline const uuid_le *mei_cl_uuid(const struct mei_cl *cl) +{ + return mei_me_cl_uuid(cl->me_cl); +} + +/** + * mei_cl_host_addr - client's host address + * + * @cl: host client + * + * Return: 0 for fixed address client, host address for dynamic client + */ +static inline u8 mei_cl_host_addr(const struct mei_cl *cl) +{ + return mei_cl_is_fixed_address(cl) ? 0 : cl->host_client_id; +} + +int mei_cl_disconnect(struct mei_cl *cl); +int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb, + struct list_head *cmpl_list); +int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl, + const struct file *file); +int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb, + struct list_head *cmpl_list); +int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp); +ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb); +int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, + struct list_head *cmpl_list); + +void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb); + +void mei_host_client_init(struct mei_device *dev); + +u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop); +enum mei_cb_file_ops mei_cl_notify_req2fop(u8 request); +int mei_cl_notify_request(struct mei_cl *cl, + const struct file *file, u8 request); +int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb, + struct list_head *cmpl_list); +int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev); +void mei_cl_notify(struct mei_cl *cl); + +void mei_cl_all_disconnect(struct mei_device *dev); + +#define MEI_CL_FMT "cl:host=%02d me=%02d " +#define MEI_CL_PRM(cl) (cl)->host_client_id, mei_cl_me_id(cl) + +#define cl_dbg(dev, cl, format, arg...) \ + dev_dbg((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg) + +#define cl_warn(dev, cl, format, arg...) \ + dev_warn((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg) + +#define cl_err(dev, cl, format, arg...) \ + dev_err((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg) + +#endif /* _MEI_CLIENT_H_ */ diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c new file mode 100644 index 000000000..3ab1a431d --- /dev/null +++ b/drivers/misc/mei/debugfs.c @@ -0,0 +1,177 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2012-2016, Intel Corporation. All rights reserved + * Intel Management Engine Interface (Intel MEI) Linux driver + */ + +#include <linux/slab.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> + +#include <linux/mei.h> + +#include "mei_dev.h" +#include "client.h" +#include "hw.h" + +static int mei_dbgfs_meclients_show(struct seq_file *m, void *unused) +{ + struct mei_device *dev = m->private; + struct mei_me_client *me_cl; + int i = 0; + + if (!dev) + return -ENODEV; + + down_read(&dev->me_clients_rwsem); + + seq_puts(m, " |id|fix| UUID |con|msg len|sb|refc|vt|\n"); + + /* if the driver is not enabled the list won't be consistent */ + if (dev->dev_state != MEI_DEV_ENABLED) + goto out; + + list_for_each_entry(me_cl, &dev->me_clients, list) { + if (!mei_me_cl_get(me_cl)) + continue; + + seq_printf(m, "%2d|%2d|%3d|%pUl|%3d|%7d|%2d|%4d|%2d|\n", + i++, me_cl->client_id, + me_cl->props.fixed_address, + &me_cl->props.protocol_name, + me_cl->props.max_number_of_connections, + me_cl->props.max_msg_length, + me_cl->props.single_recv_buf, + kref_read(&me_cl->refcnt), + me_cl->props.vt_supported); + mei_me_cl_put(me_cl); + } + +out: + up_read(&dev->me_clients_rwsem); + return 0; +} +DEFINE_SHOW_ATTRIBUTE(mei_dbgfs_meclients); + +static int mei_dbgfs_active_show(struct seq_file *m, void *unused) +{ + struct mei_device *dev = m->private; + struct mei_cl *cl; + int i = 0; + + if (!dev) + return -ENODEV; + + mutex_lock(&dev->device_lock); + + seq_puts(m, " |me|host|state|rd|wr|wrq\n"); + + /* if the driver is not enabled the list won't be consistent */ + if (dev->dev_state != MEI_DEV_ENABLED) + goto out; + + list_for_each_entry(cl, &dev->file_list, link) { + + seq_printf(m, "%3d|%2d|%4d|%5d|%2d|%2d|%3u\n", + i, mei_cl_me_id(cl), cl->host_client_id, cl->state, + !list_empty(&cl->rd_completed), cl->writing_state, + cl->tx_cb_queued); + i++; + } +out: + mutex_unlock(&dev->device_lock); + return 0; +} +DEFINE_SHOW_ATTRIBUTE(mei_dbgfs_active); + +static int mei_dbgfs_devstate_show(struct seq_file *m, void *unused) +{ + struct mei_device *dev = m->private; + + seq_printf(m, "dev: %s\n", mei_dev_state_str(dev->dev_state)); + seq_printf(m, "hbm: %s\n", mei_hbm_state_str(dev->hbm_state)); + + if (dev->hbm_state >= MEI_HBM_ENUM_CLIENTS && + dev->hbm_state <= MEI_HBM_STARTED) { + seq_puts(m, "hbm features:\n"); + seq_printf(m, "\tPG: %01d\n", dev->hbm_f_pg_supported); + seq_printf(m, "\tDC: %01d\n", dev->hbm_f_dc_supported); + seq_printf(m, "\tIE: %01d\n", dev->hbm_f_ie_supported); + seq_printf(m, "\tDOT: %01d\n", dev->hbm_f_dot_supported); + seq_printf(m, "\tEV: %01d\n", dev->hbm_f_ev_supported); + seq_printf(m, "\tFA: %01d\n", dev->hbm_f_fa_supported); + seq_printf(m, "\tOS: %01d\n", dev->hbm_f_os_supported); + seq_printf(m, "\tDR: %01d\n", dev->hbm_f_dr_supported); + seq_printf(m, "\tVT: %01d\n", dev->hbm_f_vt_supported); + seq_printf(m, "\tCAP: %01d\n", dev->hbm_f_cap_supported); + } + + seq_printf(m, "pg: %s, %s\n", + mei_pg_is_enabled(dev) ? "ENABLED" : "DISABLED", + mei_pg_state_str(mei_pg_state(dev))); + return 0; +} +DEFINE_SHOW_ATTRIBUTE(mei_dbgfs_devstate); + +static ssize_t mei_dbgfs_write_allow_fa(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct mei_device *dev; + int ret; + + dev = container_of(file->private_data, + struct mei_device, allow_fixed_address); + + ret = debugfs_write_file_bool(file, user_buf, count, ppos); + if (ret < 0) + return ret; + dev->override_fixed_address = true; + return ret; +} + +static const struct file_operations mei_dbgfs_allow_fa_fops = { + .open = simple_open, + .read = debugfs_read_file_bool, + .write = mei_dbgfs_write_allow_fa, + .llseek = generic_file_llseek, +}; + +/** + * mei_dbgfs_deregister - Remove the debugfs files and directories + * + * @dev: the mei device structure + */ +void mei_dbgfs_deregister(struct mei_device *dev) +{ + if (!dev->dbgfs_dir) + return; + debugfs_remove_recursive(dev->dbgfs_dir); + dev->dbgfs_dir = NULL; +} + +/** + * mei_dbgfs_register - Add the debugfs files + * + * @dev: the mei device structure + * @name: the mei device name + */ +void mei_dbgfs_register(struct mei_device *dev, const char *name) +{ + struct dentry *dir; + + dir = debugfs_create_dir(name, NULL); + dev->dbgfs_dir = dir; + + debugfs_create_file("meclients", S_IRUSR, dir, dev, + &mei_dbgfs_meclients_fops); + debugfs_create_file("active", S_IRUSR, dir, dev, + &mei_dbgfs_active_fops); + debugfs_create_file("devstate", S_IRUSR, dir, dev, + &mei_dbgfs_devstate_fops); + debugfs_create_file("allow_fixed_address", S_IRUSR | S_IWUSR, dir, + &dev->allow_fixed_address, + &mei_dbgfs_allow_fa_fops); +} diff --git a/drivers/misc/mei/dma-ring.c b/drivers/misc/mei/dma-ring.c new file mode 100644 index 000000000..ef56f849b --- /dev/null +++ b/drivers/misc/mei/dma-ring.c @@ -0,0 +1,269 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright(c) 2016-2018 Intel Corporation. All rights reserved. + */ +#include <linux/dma-mapping.h> +#include <linux/mei.h> + +#include "mei_dev.h" + +/** + * mei_dmam_dscr_alloc() - allocate a managed coherent buffer + * for the dma descriptor + * @dev: mei_device + * @dscr: dma descriptor + * + * Return: + * * 0 - on success or zero allocation request + * * -EINVAL - if size is not power of 2 + * * -ENOMEM - of allocation has failed + */ +static int mei_dmam_dscr_alloc(struct mei_device *dev, + struct mei_dma_dscr *dscr) +{ + if (!dscr->size) + return 0; + + if (WARN_ON(!is_power_of_2(dscr->size))) + return -EINVAL; + + if (dscr->vaddr) + return 0; + + dscr->vaddr = dmam_alloc_coherent(dev->dev, dscr->size, &dscr->daddr, + GFP_KERNEL); + if (!dscr->vaddr) + return -ENOMEM; + + return 0; +} + +/** + * mei_dmam_dscr_free() - free a managed coherent buffer + * from the dma descriptor + * @dev: mei_device + * @dscr: dma descriptor + */ +static void mei_dmam_dscr_free(struct mei_device *dev, + struct mei_dma_dscr *dscr) +{ + if (!dscr->vaddr) + return; + + dmam_free_coherent(dev->dev, dscr->size, dscr->vaddr, dscr->daddr); + dscr->vaddr = NULL; +} + +/** + * mei_dmam_ring_free() - free dma ring buffers + * @dev: mei device + */ +void mei_dmam_ring_free(struct mei_device *dev) +{ + int i; + + for (i = 0; i < DMA_DSCR_NUM; i++) + mei_dmam_dscr_free(dev, &dev->dr_dscr[i]); +} + +/** + * mei_dmam_ring_alloc() - allocate dma ring buffers + * @dev: mei device + * + * Return: -ENOMEM on allocation failure 0 otherwise + */ +int mei_dmam_ring_alloc(struct mei_device *dev) +{ + int i; + + for (i = 0; i < DMA_DSCR_NUM; i++) + if (mei_dmam_dscr_alloc(dev, &dev->dr_dscr[i])) + goto err; + + return 0; + +err: + mei_dmam_ring_free(dev); + return -ENOMEM; +} + +/** + * mei_dma_ring_is_allocated() - check if dma ring is allocated + * @dev: mei device + * + * Return: true if dma ring is allocated + */ +bool mei_dma_ring_is_allocated(struct mei_device *dev) +{ + return !!dev->dr_dscr[DMA_DSCR_HOST].vaddr; +} + +static inline +struct hbm_dma_ring_ctrl *mei_dma_ring_ctrl(struct mei_device *dev) +{ + return (struct hbm_dma_ring_ctrl *)dev->dr_dscr[DMA_DSCR_CTRL].vaddr; +} + +/** + * mei_dma_ring_reset() - reset the dma control block + * @dev: mei device + */ +void mei_dma_ring_reset(struct mei_device *dev) +{ + struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev); + + if (!ctrl) + return; + + memset(ctrl, 0, sizeof(*ctrl)); +} + +/** + * mei_dma_copy_from() - copy from dma ring into buffer + * @dev: mei device + * @buf: data buffer + * @offset: offset in slots. + * @n: number of slots to copy. + */ +static size_t mei_dma_copy_from(struct mei_device *dev, unsigned char *buf, + u32 offset, u32 n) +{ + unsigned char *dbuf = dev->dr_dscr[DMA_DSCR_DEVICE].vaddr; + + size_t b_offset = offset << 2; + size_t b_n = n << 2; + + memcpy(buf, dbuf + b_offset, b_n); + + return b_n; +} + +/** + * mei_dma_copy_to() - copy to a buffer to the dma ring + * @dev: mei device + * @buf: data buffer + * @offset: offset in slots. + * @n: number of slots to copy. + */ +static size_t mei_dma_copy_to(struct mei_device *dev, unsigned char *buf, + u32 offset, u32 n) +{ + unsigned char *hbuf = dev->dr_dscr[DMA_DSCR_HOST].vaddr; + + size_t b_offset = offset << 2; + size_t b_n = n << 2; + + memcpy(hbuf + b_offset, buf, b_n); + + return b_n; +} + +/** + * mei_dma_ring_read() - read data from the ring + * @dev: mei device + * @buf: buffer to read into: may be NULL in case of droping the data. + * @len: length to read. + */ +void mei_dma_ring_read(struct mei_device *dev, unsigned char *buf, u32 len) +{ + struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev); + u32 dbuf_depth; + u32 rd_idx, rem, slots; + + if (WARN_ON(!ctrl)) + return; + + dev_dbg(dev->dev, "reading from dma %u bytes\n", len); + + if (!len) + return; + + dbuf_depth = dev->dr_dscr[DMA_DSCR_DEVICE].size >> 2; + rd_idx = READ_ONCE(ctrl->dbuf_rd_idx) & (dbuf_depth - 1); + slots = mei_data2slots(len); + + /* if buf is NULL we drop the packet by advancing the pointer.*/ + if (!buf) + goto out; + + if (rd_idx + slots > dbuf_depth) { + buf += mei_dma_copy_from(dev, buf, rd_idx, dbuf_depth - rd_idx); + rem = slots - (dbuf_depth - rd_idx); + rd_idx = 0; + } else { + rem = slots; + } + + mei_dma_copy_from(dev, buf, rd_idx, rem); +out: + WRITE_ONCE(ctrl->dbuf_rd_idx, ctrl->dbuf_rd_idx + slots); +} + +static inline u32 mei_dma_ring_hbuf_depth(struct mei_device *dev) +{ + return dev->dr_dscr[DMA_DSCR_HOST].size >> 2; +} + +/** + * mei_dma_ring_empty_slots() - calaculate number of empty slots in dma ring + * @dev: mei_device + * + * Return: number of empty slots + */ +u32 mei_dma_ring_empty_slots(struct mei_device *dev) +{ + struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev); + u32 wr_idx, rd_idx, hbuf_depth, empty; + + if (!mei_dma_ring_is_allocated(dev)) + return 0; + + if (WARN_ON(!ctrl)) + return 0; + + /* easier to work in slots */ + hbuf_depth = mei_dma_ring_hbuf_depth(dev); + rd_idx = READ_ONCE(ctrl->hbuf_rd_idx); + wr_idx = READ_ONCE(ctrl->hbuf_wr_idx); + + if (rd_idx > wr_idx) + empty = rd_idx - wr_idx; + else + empty = hbuf_depth - (wr_idx - rd_idx); + + return empty; +} + +/** + * mei_dma_ring_write - write data to dma ring host buffer + * + * @dev: mei_device + * @buf: data will be written + * @len: data length + */ +void mei_dma_ring_write(struct mei_device *dev, unsigned char *buf, u32 len) +{ + struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev); + u32 hbuf_depth; + u32 wr_idx, rem, slots; + + if (WARN_ON(!ctrl)) + return; + + dev_dbg(dev->dev, "writing to dma %u bytes\n", len); + hbuf_depth = mei_dma_ring_hbuf_depth(dev); + wr_idx = READ_ONCE(ctrl->hbuf_wr_idx) & (hbuf_depth - 1); + slots = mei_data2slots(len); + + if (wr_idx + slots > hbuf_depth) { + buf += mei_dma_copy_to(dev, buf, wr_idx, hbuf_depth - wr_idx); + rem = slots - (hbuf_depth - wr_idx); + wr_idx = 0; + } else { + rem = slots; + } + + mei_dma_copy_to(dev, buf, wr_idx, rem); + + WRITE_ONCE(ctrl->hbuf_wr_idx, ctrl->hbuf_wr_idx + slots); +} diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c new file mode 100644 index 000000000..33579d979 --- /dev/null +++ b/drivers/misc/mei/hbm.c @@ -0,0 +1,1437 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2003-2020, Intel Corporation. All rights reserved. + * Intel Management Engine Interface (Intel MEI) Linux driver + */ +#include <linux/export.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/pm_runtime.h> +#include <linux/slab.h> + +#include <linux/mei.h> + +#include "mei_dev.h" +#include "hbm.h" +#include "client.h" + +static const char *mei_hbm_status_str(enum mei_hbm_status status) +{ +#define MEI_HBM_STATUS(status) case MEI_HBMS_##status: return #status + switch (status) { + MEI_HBM_STATUS(SUCCESS); + MEI_HBM_STATUS(CLIENT_NOT_FOUND); + MEI_HBM_STATUS(ALREADY_EXISTS); + MEI_HBM_STATUS(REJECTED); + MEI_HBM_STATUS(INVALID_PARAMETER); + MEI_HBM_STATUS(NOT_ALLOWED); + MEI_HBM_STATUS(ALREADY_STARTED); + MEI_HBM_STATUS(NOT_STARTED); + default: return "unknown"; + } +#undef MEI_HBM_STATUS +}; + +static const char *mei_cl_conn_status_str(enum mei_cl_connect_status status) +{ +#define MEI_CL_CS(status) case MEI_CL_CONN_##status: return #status + switch (status) { + MEI_CL_CS(SUCCESS); + MEI_CL_CS(NOT_FOUND); + MEI_CL_CS(ALREADY_STARTED); + MEI_CL_CS(OUT_OF_RESOURCES); + MEI_CL_CS(MESSAGE_SMALL); + MEI_CL_CS(NOT_ALLOWED); + default: return "unknown"; + } +#undef MEI_CL_CCS +} + +const char *mei_hbm_state_str(enum mei_hbm_state state) +{ +#define MEI_HBM_STATE(state) case MEI_HBM_##state: return #state + switch (state) { + MEI_HBM_STATE(IDLE); + MEI_HBM_STATE(STARTING); + MEI_HBM_STATE(STARTED); + MEI_HBM_STATE(DR_SETUP); + MEI_HBM_STATE(ENUM_CLIENTS); + MEI_HBM_STATE(CLIENT_PROPERTIES); + MEI_HBM_STATE(STOPPED); + default: + return "unknown"; + } +#undef MEI_HBM_STATE +} + +/** + * mei_cl_conn_status_to_errno - convert client connect response + * status to error code + * + * @status: client connect response status + * + * Return: corresponding error code + */ +static int mei_cl_conn_status_to_errno(enum mei_cl_connect_status status) +{ + switch (status) { + case MEI_CL_CONN_SUCCESS: return 0; + case MEI_CL_CONN_NOT_FOUND: return -ENOTTY; + case MEI_CL_CONN_ALREADY_STARTED: return -EBUSY; + case MEI_CL_CONN_OUT_OF_RESOURCES: return -EBUSY; + case MEI_CL_CONN_MESSAGE_SMALL: return -EINVAL; + case MEI_CL_CONN_NOT_ALLOWED: return -EBUSY; + default: return -EINVAL; + } +} + +/** + * mei_hbm_write_message - wrapper for sending hbm messages. + * + * @dev: mei device + * @hdr: mei header + * @data: payload + */ +static inline int mei_hbm_write_message(struct mei_device *dev, + struct mei_msg_hdr *hdr, + const void *data) +{ + return mei_write_message(dev, hdr, sizeof(*hdr), data, hdr->length); +} + +/** + * mei_hbm_idle - set hbm to idle state + * + * @dev: the device structure + */ +void mei_hbm_idle(struct mei_device *dev) +{ + dev->init_clients_timer = 0; + dev->hbm_state = MEI_HBM_IDLE; +} + +/** + * mei_hbm_reset - reset hbm counters and book keeping data structurs + * + * @dev: the device structure + */ +void mei_hbm_reset(struct mei_device *dev) +{ + mei_me_cl_rm_all(dev); + + mei_hbm_idle(dev); +} + +/** + * mei_hbm_hdr - construct hbm header + * + * @mei_hdr: hbm header + * @length: payload length + */ + +static inline void mei_hbm_hdr(struct mei_msg_hdr *mei_hdr, size_t length) +{ + memset(mei_hdr, 0, sizeof(*mei_hdr)); + mei_hdr->length = length; + mei_hdr->msg_complete = 1; +} + +/** + * mei_hbm_cl_hdr - construct client hbm header + * + * @cl: client + * @hbm_cmd: host bus message command + * @buf: buffer for cl header + * @len: buffer length + */ +static inline +void mei_hbm_cl_hdr(struct mei_cl *cl, u8 hbm_cmd, void *buf, size_t len) +{ + struct mei_hbm_cl_cmd *cmd = buf; + + memset(cmd, 0, len); + + cmd->hbm_cmd = hbm_cmd; + cmd->host_addr = mei_cl_host_addr(cl); + cmd->me_addr = mei_cl_me_id(cl); +} + +/** + * mei_hbm_cl_write - write simple hbm client message + * + * @dev: the device structure + * @cl: client + * @hbm_cmd: host bus message command + * @buf: message buffer + * @len: buffer length + * + * Return: 0 on success, <0 on failure. + */ +static inline int mei_hbm_cl_write(struct mei_device *dev, struct mei_cl *cl, + u8 hbm_cmd, void *buf, size_t len) +{ + struct mei_msg_hdr mei_hdr; + + mei_hbm_hdr(&mei_hdr, len); + mei_hbm_cl_hdr(cl, hbm_cmd, buf, len); + + return mei_hbm_write_message(dev, &mei_hdr, buf); +} + +/** + * mei_hbm_cl_addr_equal - check if the client's and + * the message address match + * + * @cl: client + * @cmd: hbm client message + * + * Return: true if addresses are the same + */ +static inline +bool mei_hbm_cl_addr_equal(struct mei_cl *cl, struct mei_hbm_cl_cmd *cmd) +{ + return mei_cl_host_addr(cl) == cmd->host_addr && + mei_cl_me_id(cl) == cmd->me_addr; +} + +/** + * mei_hbm_cl_find_by_cmd - find recipient client + * + * @dev: the device structure + * @buf: a buffer with hbm cl command + * + * Return: the recipient client or NULL if not found + */ +static inline +struct mei_cl *mei_hbm_cl_find_by_cmd(struct mei_device *dev, void *buf) +{ + struct mei_hbm_cl_cmd *cmd = (struct mei_hbm_cl_cmd *)buf; + struct mei_cl *cl; + + list_for_each_entry(cl, &dev->file_list, link) + if (mei_hbm_cl_addr_equal(cl, cmd)) + return cl; + return NULL; +} + + +/** + * mei_hbm_start_wait - wait for start response message. + * + * @dev: the device structure + * + * Return: 0 on success and < 0 on failure + */ +int mei_hbm_start_wait(struct mei_device *dev) +{ + int ret; + + if (dev->hbm_state > MEI_HBM_STARTING) + return 0; + + mutex_unlock(&dev->device_lock); + ret = wait_event_timeout(dev->wait_hbm_start, + dev->hbm_state != MEI_HBM_STARTING, + mei_secs_to_jiffies(MEI_HBM_TIMEOUT)); + mutex_lock(&dev->device_lock); + + if (ret == 0 && (dev->hbm_state <= MEI_HBM_STARTING)) { + dev->hbm_state = MEI_HBM_IDLE; + dev_err(dev->dev, "waiting for mei start failed\n"); + return -ETIME; + } + return 0; +} + +/** + * mei_hbm_start_req - sends start request message. + * + * @dev: the device structure + * + * Return: 0 on success and < 0 on failure + */ +int mei_hbm_start_req(struct mei_device *dev) +{ + struct mei_msg_hdr mei_hdr; + struct hbm_host_version_request req; + int ret; + + mei_hbm_reset(dev); + + mei_hbm_hdr(&mei_hdr, sizeof(req)); + + /* host start message */ + memset(&req, 0, sizeof(req)); + req.hbm_cmd = HOST_START_REQ_CMD; + req.host_version.major_version = HBM_MAJOR_VERSION; + req.host_version.minor_version = HBM_MINOR_VERSION; + + dev->hbm_state = MEI_HBM_IDLE; + ret = mei_hbm_write_message(dev, &mei_hdr, &req); + if (ret) { + dev_err(dev->dev, "version message write failed: ret = %d\n", + ret); + return ret; + } + + dev->hbm_state = MEI_HBM_STARTING; + dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; + mei_schedule_stall_timer(dev); + return 0; +} + +/** + * mei_hbm_dma_setup_req() - setup DMA request + * @dev: the device structure + * + * Return: 0 on success and < 0 on failure + */ +static int mei_hbm_dma_setup_req(struct mei_device *dev) +{ + struct mei_msg_hdr mei_hdr; + struct hbm_dma_setup_request req; + unsigned int i; + int ret; + + mei_hbm_hdr(&mei_hdr, sizeof(req)); + + memset(&req, 0, sizeof(req)); + req.hbm_cmd = MEI_HBM_DMA_SETUP_REQ_CMD; + for (i = 0; i < DMA_DSCR_NUM; i++) { + phys_addr_t paddr; + + paddr = dev->dr_dscr[i].daddr; + req.dma_dscr[i].addr_hi = upper_32_bits(paddr); + req.dma_dscr[i].addr_lo = lower_32_bits(paddr); + req.dma_dscr[i].size = dev->dr_dscr[i].size; + } + + mei_dma_ring_reset(dev); + + ret = mei_hbm_write_message(dev, &mei_hdr, &req); + if (ret) { + dev_err(dev->dev, "dma setup request write failed: ret = %d.\n", + ret); + return ret; + } + + dev->hbm_state = MEI_HBM_DR_SETUP; + dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; + mei_schedule_stall_timer(dev); + return 0; +} + +/** + * mei_hbm_capabilities_req - request capabilities + * + * @dev: the device structure + * + * Return: 0 on success and < 0 on failure + */ +static int mei_hbm_capabilities_req(struct mei_device *dev) +{ + struct mei_msg_hdr mei_hdr; + struct hbm_capability_request req; + int ret; + + mei_hbm_hdr(&mei_hdr, sizeof(req)); + + memset(&req, 0, sizeof(req)); + req.hbm_cmd = MEI_HBM_CAPABILITIES_REQ_CMD; + if (dev->hbm_f_vt_supported) + req.capability_requested[0] = HBM_CAP_VT; + + ret = mei_hbm_write_message(dev, &mei_hdr, &req); + if (ret) { + dev_err(dev->dev, + "capabilities request write failed: ret = %d.\n", ret); + return ret; + } + + dev->hbm_state = MEI_HBM_CAP_SETUP; + dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; + mei_schedule_stall_timer(dev); + return 0; +} + +/** + * mei_hbm_enum_clients_req - sends enumeration client request message. + * + * @dev: the device structure + * + * Return: 0 on success and < 0 on failure + */ +static int mei_hbm_enum_clients_req(struct mei_device *dev) +{ + struct mei_msg_hdr mei_hdr; + struct hbm_host_enum_request req; + int ret; + + /* enumerate clients */ + mei_hbm_hdr(&mei_hdr, sizeof(req)); + + memset(&req, 0, sizeof(req)); + req.hbm_cmd = HOST_ENUM_REQ_CMD; + req.flags |= dev->hbm_f_dc_supported ? MEI_HBM_ENUM_F_ALLOW_ADD : 0; + req.flags |= dev->hbm_f_ie_supported ? + MEI_HBM_ENUM_F_IMMEDIATE_ENUM : 0; + + ret = mei_hbm_write_message(dev, &mei_hdr, &req); + if (ret) { + dev_err(dev->dev, "enumeration request write failed: ret = %d.\n", + ret); + return ret; + } + dev->hbm_state = MEI_HBM_ENUM_CLIENTS; + dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; + mei_schedule_stall_timer(dev); + return 0; +} + +/** + * mei_hbm_me_cl_add - add new me client to the list + * + * @dev: the device structure + * @res: hbm property response + * + * Return: 0 on success and -ENOMEM on allocation failure + */ + +static int mei_hbm_me_cl_add(struct mei_device *dev, + struct hbm_props_response *res) +{ + struct mei_me_client *me_cl; + const uuid_le *uuid = &res->client_properties.protocol_name; + + mei_me_cl_rm_by_uuid(dev, uuid); + + me_cl = kzalloc(sizeof(*me_cl), GFP_KERNEL); + if (!me_cl) + return -ENOMEM; + + mei_me_cl_init(me_cl); + + me_cl->props = res->client_properties; + me_cl->client_id = res->me_addr; + me_cl->tx_flow_ctrl_creds = 0; + + mei_me_cl_add(dev, me_cl); + + return 0; +} + +/** + * mei_hbm_add_cl_resp - send response to fw on client add request + * + * @dev: the device structure + * @addr: me address + * @status: response status + * + * Return: 0 on success and < 0 on failure + */ +static int mei_hbm_add_cl_resp(struct mei_device *dev, u8 addr, u8 status) +{ + struct mei_msg_hdr mei_hdr; + struct hbm_add_client_response resp; + int ret; + + dev_dbg(dev->dev, "adding client response\n"); + + mei_hbm_hdr(&mei_hdr, sizeof(resp)); + + memset(&resp, 0, sizeof(resp)); + resp.hbm_cmd = MEI_HBM_ADD_CLIENT_RES_CMD; + resp.me_addr = addr; + resp.status = status; + + ret = mei_hbm_write_message(dev, &mei_hdr, &resp); + if (ret) + dev_err(dev->dev, "add client response write failed: ret = %d\n", + ret); + return ret; +} + +/** + * mei_hbm_fw_add_cl_req - request from the fw to add a client + * + * @dev: the device structure + * @req: add client request + * + * Return: 0 on success and < 0 on failure + */ +static int mei_hbm_fw_add_cl_req(struct mei_device *dev, + struct hbm_add_client_request *req) +{ + int ret; + u8 status = MEI_HBMS_SUCCESS; + + BUILD_BUG_ON(sizeof(struct hbm_add_client_request) != + sizeof(struct hbm_props_response)); + + ret = mei_hbm_me_cl_add(dev, (struct hbm_props_response *)req); + if (ret) + status = !MEI_HBMS_SUCCESS; + + if (dev->dev_state == MEI_DEV_ENABLED) + schedule_work(&dev->bus_rescan_work); + + return mei_hbm_add_cl_resp(dev, req->me_addr, status); +} + +/** + * mei_hbm_cl_notify_req - send notification request + * + * @dev: the device structure + * @cl: a client to disconnect from + * @start: true for start false for stop + * + * Return: 0 on success and -EIO on write failure + */ +int mei_hbm_cl_notify_req(struct mei_device *dev, + struct mei_cl *cl, u8 start) +{ + + struct mei_msg_hdr mei_hdr; + struct hbm_notification_request req; + int ret; + + mei_hbm_hdr(&mei_hdr, sizeof(req)); + mei_hbm_cl_hdr(cl, MEI_HBM_NOTIFY_REQ_CMD, &req, sizeof(req)); + + req.start = start; + + ret = mei_hbm_write_message(dev, &mei_hdr, &req); + if (ret) + dev_err(dev->dev, "notify request failed: ret = %d\n", ret); + + return ret; +} + +/** + * notify_res_to_fop - convert notification response to the proper + * notification FOP + * + * @cmd: client notification start response command + * + * Return: MEI_FOP_NOTIFY_START or MEI_FOP_NOTIFY_STOP; + */ +static inline enum mei_cb_file_ops notify_res_to_fop(struct mei_hbm_cl_cmd *cmd) +{ + struct hbm_notification_response *rs = + (struct hbm_notification_response *)cmd; + + return mei_cl_notify_req2fop(rs->start); +} + +/** + * mei_hbm_cl_notify_start_res - update the client state according + * notify start response + * + * @dev: the device structure + * @cl: mei host client + * @cmd: client notification start response command + */ +static void mei_hbm_cl_notify_start_res(struct mei_device *dev, + struct mei_cl *cl, + struct mei_hbm_cl_cmd *cmd) +{ + struct hbm_notification_response *rs = + (struct hbm_notification_response *)cmd; + + cl_dbg(dev, cl, "hbm: notify start response status=%d\n", rs->status); + + if (rs->status == MEI_HBMS_SUCCESS || + rs->status == MEI_HBMS_ALREADY_STARTED) { + cl->notify_en = true; + cl->status = 0; + } else { + cl->status = -EINVAL; + } +} + +/** + * mei_hbm_cl_notify_stop_res - update the client state according + * notify stop response + * + * @dev: the device structure + * @cl: mei host client + * @cmd: client notification stop response command + */ +static void mei_hbm_cl_notify_stop_res(struct mei_device *dev, + struct mei_cl *cl, + struct mei_hbm_cl_cmd *cmd) +{ + struct hbm_notification_response *rs = + (struct hbm_notification_response *)cmd; + + cl_dbg(dev, cl, "hbm: notify stop response status=%d\n", rs->status); + + if (rs->status == MEI_HBMS_SUCCESS || + rs->status == MEI_HBMS_NOT_STARTED) { + cl->notify_en = false; + cl->status = 0; + } else { + /* TODO: spec is not clear yet about other possible issues */ + cl->status = -EINVAL; + } +} + +/** + * mei_hbm_cl_notify - signal notification event + * + * @dev: the device structure + * @cmd: notification client message + */ +static void mei_hbm_cl_notify(struct mei_device *dev, + struct mei_hbm_cl_cmd *cmd) +{ + struct mei_cl *cl; + + cl = mei_hbm_cl_find_by_cmd(dev, cmd); + if (cl) + mei_cl_notify(cl); +} + +/** + * mei_hbm_prop_req - request property for a single client + * + * @dev: the device structure + * @start_idx: client index to start search + * + * Return: 0 on success and < 0 on failure + */ +static int mei_hbm_prop_req(struct mei_device *dev, unsigned long start_idx) +{ + struct mei_msg_hdr mei_hdr; + struct hbm_props_request req; + unsigned long addr; + int ret; + + addr = find_next_bit(dev->me_clients_map, MEI_CLIENTS_MAX, start_idx); + + /* We got all client properties */ + if (addr == MEI_CLIENTS_MAX) { + dev->hbm_state = MEI_HBM_STARTED; + mei_host_client_init(dev); + return 0; + } + + mei_hbm_hdr(&mei_hdr, sizeof(req)); + + memset(&req, 0, sizeof(req)); + + req.hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD; + req.me_addr = addr; + + ret = mei_hbm_write_message(dev, &mei_hdr, &req); + if (ret) { + dev_err(dev->dev, "properties request write failed: ret = %d\n", + ret); + return ret; + } + + dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; + mei_schedule_stall_timer(dev); + + return 0; +} + +/** + * mei_hbm_pg - sends pg command + * + * @dev: the device structure + * @pg_cmd: the pg command code + * + * Return: -EIO on write failure + * -EOPNOTSUPP if the operation is not supported by the protocol + */ +int mei_hbm_pg(struct mei_device *dev, u8 pg_cmd) +{ + struct mei_msg_hdr mei_hdr; + struct hbm_power_gate req; + int ret; + + if (!dev->hbm_f_pg_supported) + return -EOPNOTSUPP; + + mei_hbm_hdr(&mei_hdr, sizeof(req)); + + memset(&req, 0, sizeof(req)); + req.hbm_cmd = pg_cmd; + + ret = mei_hbm_write_message(dev, &mei_hdr, &req); + if (ret) + dev_err(dev->dev, "power gate command write failed.\n"); + return ret; +} +EXPORT_SYMBOL_GPL(mei_hbm_pg); + +/** + * mei_hbm_stop_req - send stop request message + * + * @dev: mei device + * + * Return: -EIO on write failure + */ +static int mei_hbm_stop_req(struct mei_device *dev) +{ + struct mei_msg_hdr mei_hdr; + struct hbm_host_stop_request req; + + mei_hbm_hdr(&mei_hdr, sizeof(req)); + + memset(&req, 0, sizeof(req)); + req.hbm_cmd = HOST_STOP_REQ_CMD; + req.reason = DRIVER_STOP_REQUEST; + + return mei_hbm_write_message(dev, &mei_hdr, &req); +} + +/** + * mei_hbm_cl_flow_control_req - sends flow control request. + * + * @dev: the device structure + * @cl: client info + * + * Return: -EIO on write failure + */ +int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl) +{ + struct hbm_flow_control req; + + cl_dbg(dev, cl, "sending flow control\n"); + return mei_hbm_cl_write(dev, cl, MEI_FLOW_CONTROL_CMD, + &req, sizeof(req)); +} + +/** + * mei_hbm_add_single_tx_flow_ctrl_creds - adds single buffer credentials. + * + * @dev: the device structure + * @fctrl: flow control response bus message + * + * Return: 0 on success, < 0 otherwise + */ +static int mei_hbm_add_single_tx_flow_ctrl_creds(struct mei_device *dev, + struct hbm_flow_control *fctrl) +{ + struct mei_me_client *me_cl; + int rets; + + me_cl = mei_me_cl_by_id(dev, fctrl->me_addr); + if (!me_cl) { + dev_err(dev->dev, "no such me client %d\n", fctrl->me_addr); + return -ENOENT; + } + + if (WARN_ON(me_cl->props.single_recv_buf == 0)) { + rets = -EINVAL; + goto out; + } + + me_cl->tx_flow_ctrl_creds++; + dev_dbg(dev->dev, "recv flow ctrl msg ME %d (single) creds = %d.\n", + fctrl->me_addr, me_cl->tx_flow_ctrl_creds); + + rets = 0; +out: + mei_me_cl_put(me_cl); + return rets; +} + +/** + * mei_hbm_cl_flow_control_res - flow control response from me + * + * @dev: the device structure + * @fctrl: flow control response bus message + */ +static void mei_hbm_cl_tx_flow_ctrl_creds_res(struct mei_device *dev, + struct hbm_flow_control *fctrl) +{ + struct mei_cl *cl; + + if (!fctrl->host_addr) { + /* single receive buffer */ + mei_hbm_add_single_tx_flow_ctrl_creds(dev, fctrl); + return; + } + + cl = mei_hbm_cl_find_by_cmd(dev, fctrl); + if (cl) { + cl->tx_flow_ctrl_creds++; + cl_dbg(dev, cl, "flow control creds = %d.\n", + cl->tx_flow_ctrl_creds); + } +} + + +/** + * mei_hbm_cl_disconnect_req - sends disconnect message to fw. + * + * @dev: the device structure + * @cl: a client to disconnect from + * + * Return: -EIO on write failure + */ +int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl) +{ + struct hbm_client_connect_request req; + + return mei_hbm_cl_write(dev, cl, CLIENT_DISCONNECT_REQ_CMD, + &req, sizeof(req)); +} + +/** + * mei_hbm_cl_disconnect_rsp - sends disconnect respose to the FW + * + * @dev: the device structure + * @cl: a client to disconnect from + * + * Return: -EIO on write failure + */ +int mei_hbm_cl_disconnect_rsp(struct mei_device *dev, struct mei_cl *cl) +{ + struct hbm_client_connect_response resp; + + return mei_hbm_cl_write(dev, cl, CLIENT_DISCONNECT_RES_CMD, + &resp, sizeof(resp)); +} + +/** + * mei_hbm_cl_disconnect_res - update the client state according + * disconnect response + * + * @dev: the device structure + * @cl: mei host client + * @cmd: disconnect client response host bus message + */ +static void mei_hbm_cl_disconnect_res(struct mei_device *dev, struct mei_cl *cl, + struct mei_hbm_cl_cmd *cmd) +{ + struct hbm_client_connect_response *rs = + (struct hbm_client_connect_response *)cmd; + + cl_dbg(dev, cl, "hbm: disconnect response status=%d\n", rs->status); + + if (rs->status == MEI_CL_DISCONN_SUCCESS) + cl->state = MEI_FILE_DISCONNECT_REPLY; + cl->status = 0; +} + +/** + * mei_hbm_cl_connect_req - send connection request to specific me client + * + * @dev: the device structure + * @cl: a client to connect to + * + * Return: -EIO on write failure + */ +int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl) +{ + struct hbm_client_connect_request req; + + return mei_hbm_cl_write(dev, cl, CLIENT_CONNECT_REQ_CMD, + &req, sizeof(req)); +} + +/** + * mei_hbm_cl_connect_res - update the client state according + * connection response + * + * @dev: the device structure + * @cl: mei host client + * @cmd: connect client response host bus message + */ +static void mei_hbm_cl_connect_res(struct mei_device *dev, struct mei_cl *cl, + struct mei_hbm_cl_cmd *cmd) +{ + struct hbm_client_connect_response *rs = + (struct hbm_client_connect_response *)cmd; + + cl_dbg(dev, cl, "hbm: connect response status=%s\n", + mei_cl_conn_status_str(rs->status)); + + if (rs->status == MEI_CL_CONN_SUCCESS) + cl->state = MEI_FILE_CONNECTED; + else { + cl->state = MEI_FILE_DISCONNECT_REPLY; + if (rs->status == MEI_CL_CONN_NOT_FOUND) { + mei_me_cl_del(dev, cl->me_cl); + if (dev->dev_state == MEI_DEV_ENABLED) + schedule_work(&dev->bus_rescan_work); + } + } + cl->status = mei_cl_conn_status_to_errno(rs->status); +} + +/** + * mei_hbm_cl_res - process hbm response received on behalf + * an client + * + * @dev: the device structure + * @rs: hbm client message + * @fop_type: file operation type + */ +static void mei_hbm_cl_res(struct mei_device *dev, + struct mei_hbm_cl_cmd *rs, + enum mei_cb_file_ops fop_type) +{ + struct mei_cl *cl; + struct mei_cl_cb *cb, *next; + + cl = NULL; + list_for_each_entry_safe(cb, next, &dev->ctrl_rd_list, list) { + + cl = cb->cl; + + if (cb->fop_type != fop_type) + continue; + + if (mei_hbm_cl_addr_equal(cl, rs)) { + list_del_init(&cb->list); + break; + } + } + + if (!cl) + return; + + switch (fop_type) { + case MEI_FOP_CONNECT: + mei_hbm_cl_connect_res(dev, cl, rs); + break; + case MEI_FOP_DISCONNECT: + mei_hbm_cl_disconnect_res(dev, cl, rs); + break; + case MEI_FOP_NOTIFY_START: + mei_hbm_cl_notify_start_res(dev, cl, rs); + break; + case MEI_FOP_NOTIFY_STOP: + mei_hbm_cl_notify_stop_res(dev, cl, rs); + break; + default: + return; + } + + cl->timer_count = 0; + wake_up(&cl->wait); +} + + +/** + * mei_hbm_fw_disconnect_req - disconnect request initiated by ME firmware + * host sends disconnect response + * + * @dev: the device structure. + * @disconnect_req: disconnect request bus message from the me + * + * Return: -ENOMEM on allocation failure + */ +static int mei_hbm_fw_disconnect_req(struct mei_device *dev, + struct hbm_client_connect_request *disconnect_req) +{ + struct mei_cl *cl; + struct mei_cl_cb *cb; + + cl = mei_hbm_cl_find_by_cmd(dev, disconnect_req); + if (cl) { + cl_warn(dev, cl, "fw disconnect request received\n"); + cl->state = MEI_FILE_DISCONNECTING; + cl->timer_count = 0; + + cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DISCONNECT_RSP, + NULL); + if (!cb) + return -ENOMEM; + } + return 0; +} + +/** + * mei_hbm_pg_enter_res - PG enter response received + * + * @dev: the device structure. + * + * Return: 0 on success, -EPROTO on state mismatch + */ +static int mei_hbm_pg_enter_res(struct mei_device *dev) +{ + if (mei_pg_state(dev) != MEI_PG_OFF || + dev->pg_event != MEI_PG_EVENT_WAIT) { + dev_err(dev->dev, "hbm: pg entry response: state mismatch [%s, %d]\n", + mei_pg_state_str(mei_pg_state(dev)), dev->pg_event); + return -EPROTO; + } + + dev->pg_event = MEI_PG_EVENT_RECEIVED; + wake_up(&dev->wait_pg); + + return 0; +} + +/** + * mei_hbm_pg_resume - process with PG resume + * + * @dev: the device structure. + */ +void mei_hbm_pg_resume(struct mei_device *dev) +{ + pm_request_resume(dev->dev); +} +EXPORT_SYMBOL_GPL(mei_hbm_pg_resume); + +/** + * mei_hbm_pg_exit_res - PG exit response received + * + * @dev: the device structure. + * + * Return: 0 on success, -EPROTO on state mismatch + */ +static int mei_hbm_pg_exit_res(struct mei_device *dev) +{ + if (mei_pg_state(dev) != MEI_PG_ON || + (dev->pg_event != MEI_PG_EVENT_WAIT && + dev->pg_event != MEI_PG_EVENT_IDLE)) { + dev_err(dev->dev, "hbm: pg exit response: state mismatch [%s, %d]\n", + mei_pg_state_str(mei_pg_state(dev)), dev->pg_event); + return -EPROTO; + } + + switch (dev->pg_event) { + case MEI_PG_EVENT_WAIT: + dev->pg_event = MEI_PG_EVENT_RECEIVED; + wake_up(&dev->wait_pg); + break; + case MEI_PG_EVENT_IDLE: + /* + * If the driver is not waiting on this then + * this is HW initiated exit from PG. + * Start runtime pm resume sequence to exit from PG. + */ + dev->pg_event = MEI_PG_EVENT_RECEIVED; + mei_hbm_pg_resume(dev); + break; + default: + WARN(1, "hbm: pg exit response: unexpected pg event = %d\n", + dev->pg_event); + return -EPROTO; + } + + return 0; +} + +/** + * mei_hbm_config_features - check what hbm features and commands + * are supported by the fw + * + * @dev: the device structure + */ +static void mei_hbm_config_features(struct mei_device *dev) +{ + /* Power Gating Isolation Support */ + dev->hbm_f_pg_supported = 0; + if (dev->version.major_version > HBM_MAJOR_VERSION_PGI) + dev->hbm_f_pg_supported = 1; + + if (dev->version.major_version == HBM_MAJOR_VERSION_PGI && + dev->version.minor_version >= HBM_MINOR_VERSION_PGI) + dev->hbm_f_pg_supported = 1; + + dev->hbm_f_dc_supported = 0; + if (dev->version.major_version >= HBM_MAJOR_VERSION_DC) + dev->hbm_f_dc_supported = 1; + + dev->hbm_f_ie_supported = 0; + if (dev->version.major_version >= HBM_MAJOR_VERSION_IE) + dev->hbm_f_ie_supported = 1; + + /* disconnect on connect timeout instead of link reset */ + dev->hbm_f_dot_supported = 0; + if (dev->version.major_version >= HBM_MAJOR_VERSION_DOT) + dev->hbm_f_dot_supported = 1; + + /* Notification Event Support */ + dev->hbm_f_ev_supported = 0; + if (dev->version.major_version >= HBM_MAJOR_VERSION_EV) + dev->hbm_f_ev_supported = 1; + + /* Fixed Address Client Support */ + dev->hbm_f_fa_supported = 0; + if (dev->version.major_version >= HBM_MAJOR_VERSION_FA) + dev->hbm_f_fa_supported = 1; + + /* OS ver message Support */ + dev->hbm_f_os_supported = 0; + if (dev->version.major_version >= HBM_MAJOR_VERSION_OS) + dev->hbm_f_os_supported = 1; + + /* DMA Ring Support */ + dev->hbm_f_dr_supported = 0; + if (dev->version.major_version > HBM_MAJOR_VERSION_DR || + (dev->version.major_version == HBM_MAJOR_VERSION_DR && + dev->version.minor_version >= HBM_MINOR_VERSION_DR)) + dev->hbm_f_dr_supported = 1; + + /* VTag Support */ + dev->hbm_f_vt_supported = 0; + if (dev->version.major_version > HBM_MAJOR_VERSION_VT || + (dev->version.major_version == HBM_MAJOR_VERSION_VT && + dev->version.minor_version >= HBM_MINOR_VERSION_VT)) + dev->hbm_f_vt_supported = 1; + + /* Capability message Support */ + dev->hbm_f_cap_supported = 0; + if (dev->version.major_version > HBM_MAJOR_VERSION_CAP || + (dev->version.major_version == HBM_MAJOR_VERSION_CAP && + dev->version.minor_version >= HBM_MINOR_VERSION_CAP)) + dev->hbm_f_cap_supported = 1; +} + +/** + * mei_hbm_version_is_supported - checks whether the driver can + * support the hbm version of the device + * + * @dev: the device structure + * Return: true if driver can support hbm version of the device + */ +bool mei_hbm_version_is_supported(struct mei_device *dev) +{ + return (dev->version.major_version < HBM_MAJOR_VERSION) || + (dev->version.major_version == HBM_MAJOR_VERSION && + dev->version.minor_version <= HBM_MINOR_VERSION); +} + +/** + * mei_hbm_dispatch - bottom half read routine after ISR to + * handle the read bus message cmd processing. + * + * @dev: the device structure + * @hdr: header of bus message + * + * Return: 0 on success and < 0 on failure + */ +int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) +{ + struct mei_bus_message *mei_msg; + struct hbm_host_version_response *version_res; + struct hbm_props_response *props_res; + struct hbm_host_enum_response *enum_res; + struct hbm_dma_setup_response *dma_setup_res; + struct hbm_add_client_request *add_cl_req; + struct hbm_capability_response *capability_res; + int ret; + + struct mei_hbm_cl_cmd *cl_cmd; + struct hbm_client_connect_request *disconnect_req; + struct hbm_flow_control *fctrl; + + /* read the message to our buffer */ + BUG_ON(hdr->length >= sizeof(dev->rd_msg_buf)); + mei_read_slots(dev, dev->rd_msg_buf, hdr->length); + mei_msg = (struct mei_bus_message *)dev->rd_msg_buf; + cl_cmd = (struct mei_hbm_cl_cmd *)mei_msg; + + /* ignore spurious message and prevent reset nesting + * hbm is put to idle during system reset + */ + if (dev->hbm_state == MEI_HBM_IDLE) { + dev_dbg(dev->dev, "hbm: state is idle ignore spurious messages\n"); + return 0; + } + + switch (mei_msg->hbm_cmd) { + case HOST_START_RES_CMD: + dev_dbg(dev->dev, "hbm: start: response message received.\n"); + + dev->init_clients_timer = 0; + + version_res = (struct hbm_host_version_response *)mei_msg; + + dev_dbg(dev->dev, "HBM VERSION: DRIVER=%02d:%02d DEVICE=%02d:%02d\n", + HBM_MAJOR_VERSION, HBM_MINOR_VERSION, + version_res->me_max_version.major_version, + version_res->me_max_version.minor_version); + + if (version_res->host_version_supported) { + dev->version.major_version = HBM_MAJOR_VERSION; + dev->version.minor_version = HBM_MINOR_VERSION; + } else { + dev->version.major_version = + version_res->me_max_version.major_version; + dev->version.minor_version = + version_res->me_max_version.minor_version; + } + + if (!mei_hbm_version_is_supported(dev)) { + dev_warn(dev->dev, "hbm: start: version mismatch - stopping the driver.\n"); + + dev->hbm_state = MEI_HBM_STOPPED; + if (mei_hbm_stop_req(dev)) { + dev_err(dev->dev, "hbm: start: failed to send stop request\n"); + return -EIO; + } + break; + } + + mei_hbm_config_features(dev); + + if (dev->dev_state != MEI_DEV_INIT_CLIENTS || + dev->hbm_state != MEI_HBM_STARTING) { + dev_err(dev->dev, "hbm: start: state mismatch, [%d, %d]\n", + dev->dev_state, dev->hbm_state); + return -EPROTO; + } + + if (dev->hbm_f_cap_supported) { + if (mei_hbm_capabilities_req(dev)) + return -EIO; + wake_up(&dev->wait_hbm_start); + break; + } + + if (dev->hbm_f_dr_supported) { + if (mei_dmam_ring_alloc(dev)) + dev_info(dev->dev, "running w/o dma ring\n"); + if (mei_dma_ring_is_allocated(dev)) { + if (mei_hbm_dma_setup_req(dev)) + return -EIO; + + wake_up(&dev->wait_hbm_start); + break; + } + } + + dev->hbm_f_dr_supported = 0; + mei_dmam_ring_free(dev); + + if (mei_hbm_enum_clients_req(dev)) + return -EIO; + + wake_up(&dev->wait_hbm_start); + break; + + case MEI_HBM_CAPABILITIES_RES_CMD: + dev_dbg(dev->dev, "hbm: capabilities response: message received.\n"); + + dev->init_clients_timer = 0; + + if (dev->hbm_state != MEI_HBM_CAP_SETUP) { + dev_err(dev->dev, "hbm: capabilities response: state mismatch, [%d, %d]\n", + dev->dev_state, dev->hbm_state); + return -EPROTO; + } + + capability_res = (struct hbm_capability_response *)mei_msg; + if (!(capability_res->capability_granted[0] & HBM_CAP_VT)) + dev->hbm_f_vt_supported = 0; + + if (dev->hbm_f_dr_supported) { + if (mei_dmam_ring_alloc(dev)) + dev_info(dev->dev, "running w/o dma ring\n"); + if (mei_dma_ring_is_allocated(dev)) { + if (mei_hbm_dma_setup_req(dev)) + return -EIO; + break; + } + } + + dev->hbm_f_dr_supported = 0; + mei_dmam_ring_free(dev); + + if (mei_hbm_enum_clients_req(dev)) + return -EIO; + break; + + case MEI_HBM_DMA_SETUP_RES_CMD: + dev_dbg(dev->dev, "hbm: dma setup response: message received.\n"); + + dev->init_clients_timer = 0; + + if (dev->hbm_state != MEI_HBM_DR_SETUP) { + dev_err(dev->dev, "hbm: dma setup response: state mismatch, [%d, %d]\n", + dev->dev_state, dev->hbm_state); + return -EPROTO; + } + + dma_setup_res = (struct hbm_dma_setup_response *)mei_msg; + + if (dma_setup_res->status) { + u8 status = dma_setup_res->status; + + if (status == MEI_HBMS_NOT_ALLOWED) { + dev_dbg(dev->dev, "hbm: dma setup not allowed\n"); + } else { + dev_info(dev->dev, "hbm: dma setup response: failure = %d %s\n", + status, + mei_hbm_status_str(status)); + } + dev->hbm_f_dr_supported = 0; + mei_dmam_ring_free(dev); + } + + if (mei_hbm_enum_clients_req(dev)) + return -EIO; + break; + + case CLIENT_CONNECT_RES_CMD: + dev_dbg(dev->dev, "hbm: client connect response: message received.\n"); + mei_hbm_cl_res(dev, cl_cmd, MEI_FOP_CONNECT); + break; + + case CLIENT_DISCONNECT_RES_CMD: + dev_dbg(dev->dev, "hbm: client disconnect response: message received.\n"); + mei_hbm_cl_res(dev, cl_cmd, MEI_FOP_DISCONNECT); + break; + + case MEI_FLOW_CONTROL_CMD: + dev_dbg(dev->dev, "hbm: client flow control response: message received.\n"); + + fctrl = (struct hbm_flow_control *)mei_msg; + mei_hbm_cl_tx_flow_ctrl_creds_res(dev, fctrl); + break; + + case MEI_PG_ISOLATION_ENTRY_RES_CMD: + dev_dbg(dev->dev, "hbm: power gate isolation entry response received\n"); + ret = mei_hbm_pg_enter_res(dev); + if (ret) + return ret; + break; + + case MEI_PG_ISOLATION_EXIT_REQ_CMD: + dev_dbg(dev->dev, "hbm: power gate isolation exit request received\n"); + ret = mei_hbm_pg_exit_res(dev); + if (ret) + return ret; + break; + + case HOST_CLIENT_PROPERTIES_RES_CMD: + dev_dbg(dev->dev, "hbm: properties response: message received.\n"); + + dev->init_clients_timer = 0; + + if (dev->dev_state != MEI_DEV_INIT_CLIENTS || + dev->hbm_state != MEI_HBM_CLIENT_PROPERTIES) { + dev_err(dev->dev, "hbm: properties response: state mismatch, [%d, %d]\n", + dev->dev_state, dev->hbm_state); + return -EPROTO; + } + + props_res = (struct hbm_props_response *)mei_msg; + + if (props_res->status == MEI_HBMS_CLIENT_NOT_FOUND) { + dev_dbg(dev->dev, "hbm: properties response: %d CLIENT_NOT_FOUND\n", + props_res->me_addr); + } else if (props_res->status) { + dev_err(dev->dev, "hbm: properties response: wrong status = %d %s\n", + props_res->status, + mei_hbm_status_str(props_res->status)); + return -EPROTO; + } else { + mei_hbm_me_cl_add(dev, props_res); + } + + /* request property for the next client */ + if (mei_hbm_prop_req(dev, props_res->me_addr + 1)) + return -EIO; + + break; + + case HOST_ENUM_RES_CMD: + dev_dbg(dev->dev, "hbm: enumeration response: message received\n"); + + dev->init_clients_timer = 0; + + enum_res = (struct hbm_host_enum_response *) mei_msg; + BUILD_BUG_ON(sizeof(dev->me_clients_map) + < sizeof(enum_res->valid_addresses)); + memcpy(dev->me_clients_map, enum_res->valid_addresses, + sizeof(enum_res->valid_addresses)); + + if (dev->dev_state != MEI_DEV_INIT_CLIENTS || + dev->hbm_state != MEI_HBM_ENUM_CLIENTS) { + dev_err(dev->dev, "hbm: enumeration response: state mismatch, [%d, %d]\n", + dev->dev_state, dev->hbm_state); + return -EPROTO; + } + + dev->hbm_state = MEI_HBM_CLIENT_PROPERTIES; + + /* first property request */ + if (mei_hbm_prop_req(dev, 0)) + return -EIO; + + break; + + case HOST_STOP_RES_CMD: + dev_dbg(dev->dev, "hbm: stop response: message received\n"); + + dev->init_clients_timer = 0; + + if (dev->hbm_state != MEI_HBM_STOPPED) { + dev_err(dev->dev, "hbm: stop response: state mismatch, [%d, %d]\n", + dev->dev_state, dev->hbm_state); + return -EPROTO; + } + + mei_set_devstate(dev, MEI_DEV_POWER_DOWN); + dev_info(dev->dev, "hbm: stop response: resetting.\n"); + /* force the reset */ + return -EPROTO; + break; + + case CLIENT_DISCONNECT_REQ_CMD: + dev_dbg(dev->dev, "hbm: disconnect request: message received\n"); + + disconnect_req = (struct hbm_client_connect_request *)mei_msg; + mei_hbm_fw_disconnect_req(dev, disconnect_req); + break; + + case ME_STOP_REQ_CMD: + dev_dbg(dev->dev, "hbm: stop request: message received\n"); + dev->hbm_state = MEI_HBM_STOPPED; + if (mei_hbm_stop_req(dev)) { + dev_err(dev->dev, "hbm: stop request: failed to send stop request\n"); + return -EIO; + } + break; + + case MEI_HBM_ADD_CLIENT_REQ_CMD: + dev_dbg(dev->dev, "hbm: add client request received\n"); + /* + * after the host receives the enum_resp + * message clients may be added or removed + */ + if (dev->hbm_state <= MEI_HBM_ENUM_CLIENTS || + dev->hbm_state >= MEI_HBM_STOPPED) { + dev_err(dev->dev, "hbm: add client: state mismatch, [%d, %d]\n", + dev->dev_state, dev->hbm_state); + return -EPROTO; + } + add_cl_req = (struct hbm_add_client_request *)mei_msg; + ret = mei_hbm_fw_add_cl_req(dev, add_cl_req); + if (ret) { + dev_err(dev->dev, "hbm: add client: failed to send response %d\n", + ret); + return -EIO; + } + dev_dbg(dev->dev, "hbm: add client request processed\n"); + break; + + case MEI_HBM_NOTIFY_RES_CMD: + dev_dbg(dev->dev, "hbm: notify response received\n"); + mei_hbm_cl_res(dev, cl_cmd, notify_res_to_fop(cl_cmd)); + break; + + case MEI_HBM_NOTIFICATION_CMD: + dev_dbg(dev->dev, "hbm: notification\n"); + mei_hbm_cl_notify(dev, cl_cmd); + break; + + default: + WARN(1, "hbm: wrong command %d\n", mei_msg->hbm_cmd); + return -EPROTO; + + } + return 0; +} + diff --git a/drivers/misc/mei/hbm.h b/drivers/misc/mei/hbm.h new file mode 100644 index 000000000..4d95e38e4 --- /dev/null +++ b/drivers/misc/mei/hbm.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2003-2018, Intel Corporation. All rights reserved. + * Intel Management Engine Interface (Intel MEI) Linux driver + */ + +#ifndef _MEI_HBM_H_ +#define _MEI_HBM_H_ + +struct mei_device; +struct mei_msg_hdr; +struct mei_cl; + +/** + * enum mei_hbm_state - host bus message protocol state + * + * @MEI_HBM_IDLE : protocol not started + * @MEI_HBM_STARTING : start request message was sent + * @MEI_HBM_CAP_SETUP : capabilities request message was sent + * @MEI_HBM_DR_SETUP : dma ring setup request message was sent + * @MEI_HBM_ENUM_CLIENTS : enumeration request was sent + * @MEI_HBM_CLIENT_PROPERTIES : acquiring clients properties + * @MEI_HBM_STARTED : enumeration was completed + * @MEI_HBM_STOPPED : stopping exchange + */ +enum mei_hbm_state { + MEI_HBM_IDLE = 0, + MEI_HBM_STARTING, + MEI_HBM_CAP_SETUP, + MEI_HBM_DR_SETUP, + MEI_HBM_ENUM_CLIENTS, + MEI_HBM_CLIENT_PROPERTIES, + MEI_HBM_STARTED, + MEI_HBM_STOPPED, +}; + +const char *mei_hbm_state_str(enum mei_hbm_state state); + +int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr); + +void mei_hbm_idle(struct mei_device *dev); +void mei_hbm_reset(struct mei_device *dev); +int mei_hbm_start_req(struct mei_device *dev); +int mei_hbm_start_wait(struct mei_device *dev); +int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl); +int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl); +int mei_hbm_cl_disconnect_rsp(struct mei_device *dev, struct mei_cl *cl); +int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl); +bool mei_hbm_version_is_supported(struct mei_device *dev); +int mei_hbm_pg(struct mei_device *dev, u8 pg_cmd); +void mei_hbm_pg_resume(struct mei_device *dev); +int mei_hbm_cl_notify_req(struct mei_device *dev, + struct mei_cl *cl, u8 request); + +#endif /* _MEI_HBM_H_ */ + diff --git a/drivers/misc/mei/hdcp/Kconfig b/drivers/misc/mei/hdcp/Kconfig new file mode 100644 index 000000000..95b2d6d37 --- /dev/null +++ b/drivers/misc/mei/hdcp/Kconfig @@ -0,0 +1,13 @@ + +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2019, Intel Corporation. All rights reserved. +# +config INTEL_MEI_HDCP + tristate "Intel HDCP2.2 services of ME Interface" + select INTEL_MEI_ME + depends on DRM_I915 + help + MEI Support for HDCP2.2 Services on Intel platforms. + + Enables the ME FW services required for HDCP2.2 support through + I915 display driver of Intel. diff --git a/drivers/misc/mei/hdcp/Makefile b/drivers/misc/mei/hdcp/Makefile new file mode 100644 index 000000000..3fbb56485 --- /dev/null +++ b/drivers/misc/mei/hdcp/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Copyright (c) 2019, Intel Corporation. All rights reserved. +# +# Makefile - HDCP client driver for Intel MEI Bus Driver. + +obj-$(CONFIG_INTEL_MEI_HDCP) += mei_hdcp.o diff --git a/drivers/misc/mei/hdcp/mei_hdcp.c b/drivers/misc/mei/hdcp/mei_hdcp.c new file mode 100644 index 000000000..9ae9669e4 --- /dev/null +++ b/drivers/misc/mei/hdcp/mei_hdcp.c @@ -0,0 +1,880 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright © 2019 Intel Corporation + * + * mei_hdcp.c: HDCP client driver for mei bus + * + * Author: + * Ramalingam C <ramalingam.c@intel.com> + */ + +/** + * DOC: MEI_HDCP Client Driver + * + * The mei_hdcp driver acts as a translation layer between HDCP 2.2 + * protocol implementer (I915) and ME FW by translating HDCP2.2 + * negotiation messages to ME FW command payloads and vice versa. + */ + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/uuid.h> +#include <linux/mei_cl_bus.h> +#include <linux/component.h> +#include <drm/drm_connector.h> +#include <drm/i915_component.h> +#include <drm/i915_mei_hdcp_interface.h> + +#include "mei_hdcp.h" + +/** + * mei_hdcp_initiate_session() - Initiate a Wired HDCP2.2 Tx Session in ME FW + * @dev: device corresponding to the mei_cl_device + * @data: Intel HW specific hdcp data + * @ake_data: AKE_Init msg output. + * + * Return: 0 on Success, <0 on Failure. + */ +static int +mei_hdcp_initiate_session(struct device *dev, struct hdcp_port_data *data, + struct hdcp2_ake_init *ake_data) +{ + struct wired_cmd_initiate_hdcp2_session_in session_init_in = { { 0 } }; + struct wired_cmd_initiate_hdcp2_session_out + session_init_out = { { 0 } }; + struct mei_cl_device *cldev; + ssize_t byte; + + if (!dev || !data || !ake_data) + return -EINVAL; + + cldev = to_mei_cl_device(dev); + + session_init_in.header.api_version = HDCP_API_VERSION; + session_init_in.header.command_id = WIRED_INITIATE_HDCP2_SESSION; + session_init_in.header.status = ME_HDCP_STATUS_SUCCESS; + session_init_in.header.buffer_len = + WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_IN; + + session_init_in.port.integrated_port_type = data->port_type; + session_init_in.port.physical_port = (u8)data->fw_ddi; + session_init_in.port.attached_transcoder = (u8)data->fw_tc; + session_init_in.protocol = data->protocol; + + byte = mei_cldev_send(cldev, (u8 *)&session_init_in, + sizeof(session_init_in)); + if (byte < 0) { + dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte); + return byte; + } + + byte = mei_cldev_recv(cldev, (u8 *)&session_init_out, + sizeof(session_init_out)); + if (byte < 0) { + dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte); + return byte; + } + + if (session_init_out.header.status != ME_HDCP_STATUS_SUCCESS) { + dev_dbg(dev, "ME cmd 0x%08X Failed. Status: 0x%X\n", + WIRED_INITIATE_HDCP2_SESSION, + session_init_out.header.status); + return -EIO; + } + + ake_data->msg_id = HDCP_2_2_AKE_INIT; + ake_data->tx_caps = session_init_out.tx_caps; + memcpy(ake_data->r_tx, session_init_out.r_tx, HDCP_2_2_RTX_LEN); + + return 0; +} + +/** + * mei_hdcp_verify_receiver_cert_prepare_km() - Verify the Receiver Certificate + * AKE_Send_Cert and prepare AKE_Stored_Km/AKE_No_Stored_Km + * @dev: device corresponding to the mei_cl_device + * @data: Intel HW specific hdcp data + * @rx_cert: AKE_Send_Cert for verification + * @km_stored: Pairing status flag output + * @ek_pub_km: AKE_Stored_Km/AKE_No_Stored_Km output msg + * @msg_sz : size of AKE_XXXXX_Km output msg + * + * Return: 0 on Success, <0 on Failure + */ +static int +mei_hdcp_verify_receiver_cert_prepare_km(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_ake_send_cert *rx_cert, + bool *km_stored, + struct hdcp2_ake_no_stored_km + *ek_pub_km, + size_t *msg_sz) +{ + struct wired_cmd_verify_receiver_cert_in verify_rxcert_in = { { 0 } }; + struct wired_cmd_verify_receiver_cert_out verify_rxcert_out = { { 0 } }; + struct mei_cl_device *cldev; + ssize_t byte; + + if (!dev || !data || !rx_cert || !km_stored || !ek_pub_km || !msg_sz) + return -EINVAL; + + cldev = to_mei_cl_device(dev); + + verify_rxcert_in.header.api_version = HDCP_API_VERSION; + verify_rxcert_in.header.command_id = WIRED_VERIFY_RECEIVER_CERT; + verify_rxcert_in.header.status = ME_HDCP_STATUS_SUCCESS; + verify_rxcert_in.header.buffer_len = + WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_IN; + + verify_rxcert_in.port.integrated_port_type = data->port_type; + verify_rxcert_in.port.physical_port = (u8)data->fw_ddi; + verify_rxcert_in.port.attached_transcoder = (u8)data->fw_tc; + + verify_rxcert_in.cert_rx = rx_cert->cert_rx; + memcpy(verify_rxcert_in.r_rx, &rx_cert->r_rx, HDCP_2_2_RRX_LEN); + memcpy(verify_rxcert_in.rx_caps, rx_cert->rx_caps, HDCP_2_2_RXCAPS_LEN); + + byte = mei_cldev_send(cldev, (u8 *)&verify_rxcert_in, + sizeof(verify_rxcert_in)); + if (byte < 0) { + dev_dbg(dev, "mei_cldev_send failed: %zd\n", byte); + return byte; + } + + byte = mei_cldev_recv(cldev, (u8 *)&verify_rxcert_out, + sizeof(verify_rxcert_out)); + if (byte < 0) { + dev_dbg(dev, "mei_cldev_recv failed: %zd\n", byte); + return byte; + } + + if (verify_rxcert_out.header.status != ME_HDCP_STATUS_SUCCESS) { + dev_dbg(dev, "ME cmd 0x%08X Failed. Status: 0x%X\n", + WIRED_VERIFY_RECEIVER_CERT, + verify_rxcert_out.header.status); + return -EIO; + } + + *km_stored = !!verify_rxcert_out.km_stored; + if (verify_rxcert_out.km_stored) { + ek_pub_km->msg_id = HDCP_2_2_AKE_STORED_KM; + *msg_sz = sizeof(struct hdcp2_ake_stored_km); + } else { + ek_pub_km->msg_id = HDCP_2_2_AKE_NO_STORED_KM; + *msg_sz = sizeof(struct hdcp2_ake_no_stored_km); + } + + memcpy(ek_pub_km->e_kpub_km, &verify_rxcert_out.ekm_buff, + sizeof(verify_rxcert_out.ekm_buff)); + + return 0; +} + +/** + * mei_hdcp_verify_hprime() - Verify AKE_Send_H_prime at ME FW. + * @dev: device corresponding to the mei_cl_device + * @data: Intel HW specific hdcp data + * @rx_hprime: AKE_Send_H_prime msg for ME FW verification + * + * Return: 0 on Success, <0 on Failure + */ +static int +mei_hdcp_verify_hprime(struct device *dev, struct hdcp_port_data *data, + struct hdcp2_ake_send_hprime *rx_hprime) +{ + struct wired_cmd_ake_send_hprime_in send_hprime_in = { { 0 } }; + struct wired_cmd_ake_send_hprime_out send_hprime_out = { { 0 } }; + struct mei_cl_device *cldev; + ssize_t byte; + + if (!dev || !data || !rx_hprime) + return -EINVAL; + + cldev = to_mei_cl_device(dev); + + send_hprime_in.header.api_version = HDCP_API_VERSION; + send_hprime_in.header.command_id = WIRED_AKE_SEND_HPRIME; + send_hprime_in.header.status = ME_HDCP_STATUS_SUCCESS; + send_hprime_in.header.buffer_len = WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_IN; + + send_hprime_in.port.integrated_port_type = data->port_type; + send_hprime_in.port.physical_port = (u8)data->fw_ddi; + send_hprime_in.port.attached_transcoder = (u8)data->fw_tc; + + memcpy(send_hprime_in.h_prime, rx_hprime->h_prime, + HDCP_2_2_H_PRIME_LEN); + + byte = mei_cldev_send(cldev, (u8 *)&send_hprime_in, + sizeof(send_hprime_in)); + if (byte < 0) { + dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte); + return byte; + } + + byte = mei_cldev_recv(cldev, (u8 *)&send_hprime_out, + sizeof(send_hprime_out)); + if (byte < 0) { + dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte); + return byte; + } + + if (send_hprime_out.header.status != ME_HDCP_STATUS_SUCCESS) { + dev_dbg(dev, "ME cmd 0x%08X Failed. Status: 0x%X\n", + WIRED_AKE_SEND_HPRIME, send_hprime_out.header.status); + return -EIO; + } + + return 0; +} + +/** + * mei_hdcp_store_pairing_info() - Store pairing info received at ME FW + * @dev: device corresponding to the mei_cl_device + * @data: Intel HW specific hdcp data + * @pairing_info: AKE_Send_Pairing_Info msg input to ME FW + * + * Return: 0 on Success, <0 on Failure + */ +static int +mei_hdcp_store_pairing_info(struct device *dev, struct hdcp_port_data *data, + struct hdcp2_ake_send_pairing_info *pairing_info) +{ + struct wired_cmd_ake_send_pairing_info_in pairing_info_in = { { 0 } }; + struct wired_cmd_ake_send_pairing_info_out pairing_info_out = { { 0 } }; + struct mei_cl_device *cldev; + ssize_t byte; + + if (!dev || !data || !pairing_info) + return -EINVAL; + + cldev = to_mei_cl_device(dev); + + pairing_info_in.header.api_version = HDCP_API_VERSION; + pairing_info_in.header.command_id = WIRED_AKE_SEND_PAIRING_INFO; + pairing_info_in.header.status = ME_HDCP_STATUS_SUCCESS; + pairing_info_in.header.buffer_len = + WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_IN; + + pairing_info_in.port.integrated_port_type = data->port_type; + pairing_info_in.port.physical_port = (u8)data->fw_ddi; + pairing_info_in.port.attached_transcoder = (u8)data->fw_tc; + + memcpy(pairing_info_in.e_kh_km, pairing_info->e_kh_km, + HDCP_2_2_E_KH_KM_LEN); + + byte = mei_cldev_send(cldev, (u8 *)&pairing_info_in, + sizeof(pairing_info_in)); + if (byte < 0) { + dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte); + return byte; + } + + byte = mei_cldev_recv(cldev, (u8 *)&pairing_info_out, + sizeof(pairing_info_out)); + if (byte < 0) { + dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte); + return byte; + } + + if (pairing_info_out.header.status != ME_HDCP_STATUS_SUCCESS) { + dev_dbg(dev, "ME cmd 0x%08X failed. Status: 0x%X\n", + WIRED_AKE_SEND_PAIRING_INFO, + pairing_info_out.header.status); + return -EIO; + } + + return 0; +} + +/** + * mei_hdcp_initiate_locality_check() - Prepare LC_Init + * @dev: device corresponding to the mei_cl_device + * @data: Intel HW specific hdcp data + * @lc_init_data: LC_Init msg output + * + * Return: 0 on Success, <0 on Failure + */ +static int +mei_hdcp_initiate_locality_check(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_lc_init *lc_init_data) +{ + struct wired_cmd_init_locality_check_in lc_init_in = { { 0 } }; + struct wired_cmd_init_locality_check_out lc_init_out = { { 0 } }; + struct mei_cl_device *cldev; + ssize_t byte; + + if (!dev || !data || !lc_init_data) + return -EINVAL; + + cldev = to_mei_cl_device(dev); + + lc_init_in.header.api_version = HDCP_API_VERSION; + lc_init_in.header.command_id = WIRED_INIT_LOCALITY_CHECK; + lc_init_in.header.status = ME_HDCP_STATUS_SUCCESS; + lc_init_in.header.buffer_len = WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_IN; + + lc_init_in.port.integrated_port_type = data->port_type; + lc_init_in.port.physical_port = (u8)data->fw_ddi; + lc_init_in.port.attached_transcoder = (u8)data->fw_tc; + + byte = mei_cldev_send(cldev, (u8 *)&lc_init_in, sizeof(lc_init_in)); + if (byte < 0) { + dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte); + return byte; + } + + byte = mei_cldev_recv(cldev, (u8 *)&lc_init_out, sizeof(lc_init_out)); + if (byte < 0) { + dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte); + return byte; + } + + if (lc_init_out.header.status != ME_HDCP_STATUS_SUCCESS) { + dev_dbg(dev, "ME cmd 0x%08X Failed. status: 0x%X\n", + WIRED_INIT_LOCALITY_CHECK, lc_init_out.header.status); + return -EIO; + } + + lc_init_data->msg_id = HDCP_2_2_LC_INIT; + memcpy(lc_init_data->r_n, lc_init_out.r_n, HDCP_2_2_RN_LEN); + + return 0; +} + +/** + * mei_hdcp_verify_lprime() - Verify lprime. + * @dev: device corresponding to the mei_cl_device + * @data: Intel HW specific hdcp data + * @rx_lprime: LC_Send_L_prime msg for ME FW verification + * + * Return: 0 on Success, <0 on Failure + */ +static int +mei_hdcp_verify_lprime(struct device *dev, struct hdcp_port_data *data, + struct hdcp2_lc_send_lprime *rx_lprime) +{ + struct wired_cmd_validate_locality_in verify_lprime_in = { { 0 } }; + struct wired_cmd_validate_locality_out verify_lprime_out = { { 0 } }; + struct mei_cl_device *cldev; + ssize_t byte; + + if (!dev || !data || !rx_lprime) + return -EINVAL; + + cldev = to_mei_cl_device(dev); + + verify_lprime_in.header.api_version = HDCP_API_VERSION; + verify_lprime_in.header.command_id = WIRED_VALIDATE_LOCALITY; + verify_lprime_in.header.status = ME_HDCP_STATUS_SUCCESS; + verify_lprime_in.header.buffer_len = + WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_IN; + + verify_lprime_in.port.integrated_port_type = data->port_type; + verify_lprime_in.port.physical_port = (u8)data->fw_ddi; + verify_lprime_in.port.attached_transcoder = (u8)data->fw_tc; + + memcpy(verify_lprime_in.l_prime, rx_lprime->l_prime, + HDCP_2_2_L_PRIME_LEN); + + byte = mei_cldev_send(cldev, (u8 *)&verify_lprime_in, + sizeof(verify_lprime_in)); + if (byte < 0) { + dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte); + return byte; + } + + byte = mei_cldev_recv(cldev, (u8 *)&verify_lprime_out, + sizeof(verify_lprime_out)); + if (byte < 0) { + dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte); + return byte; + } + + if (verify_lprime_out.header.status != ME_HDCP_STATUS_SUCCESS) { + dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n", + WIRED_VALIDATE_LOCALITY, + verify_lprime_out.header.status); + return -EIO; + } + + return 0; +} + +/** + * mei_hdcp_get_session_key() - Prepare SKE_Send_Eks. + * @dev: device corresponding to the mei_cl_device + * @data: Intel HW specific hdcp data + * @ske_data: SKE_Send_Eks msg output from ME FW. + * + * Return: 0 on Success, <0 on Failure + */ +static int mei_hdcp_get_session_key(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_ske_send_eks *ske_data) +{ + struct wired_cmd_get_session_key_in get_skey_in = { { 0 } }; + struct wired_cmd_get_session_key_out get_skey_out = { { 0 } }; + struct mei_cl_device *cldev; + ssize_t byte; + + if (!dev || !data || !ske_data) + return -EINVAL; + + cldev = to_mei_cl_device(dev); + + get_skey_in.header.api_version = HDCP_API_VERSION; + get_skey_in.header.command_id = WIRED_GET_SESSION_KEY; + get_skey_in.header.status = ME_HDCP_STATUS_SUCCESS; + get_skey_in.header.buffer_len = WIRED_CMD_BUF_LEN_GET_SESSION_KEY_IN; + + get_skey_in.port.integrated_port_type = data->port_type; + get_skey_in.port.physical_port = (u8)data->fw_ddi; + get_skey_in.port.attached_transcoder = (u8)data->fw_tc; + + byte = mei_cldev_send(cldev, (u8 *)&get_skey_in, sizeof(get_skey_in)); + if (byte < 0) { + dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte); + return byte; + } + + byte = mei_cldev_recv(cldev, (u8 *)&get_skey_out, sizeof(get_skey_out)); + + if (byte < 0) { + dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte); + return byte; + } + + if (get_skey_out.header.status != ME_HDCP_STATUS_SUCCESS) { + dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n", + WIRED_GET_SESSION_KEY, get_skey_out.header.status); + return -EIO; + } + + ske_data->msg_id = HDCP_2_2_SKE_SEND_EKS; + memcpy(ske_data->e_dkey_ks, get_skey_out.e_dkey_ks, + HDCP_2_2_E_DKEY_KS_LEN); + memcpy(ske_data->riv, get_skey_out.r_iv, HDCP_2_2_RIV_LEN); + + return 0; +} + +/** + * mei_hdcp_repeater_check_flow_prepare_ack() - Validate the Downstream topology + * and prepare rep_ack. + * @dev: device corresponding to the mei_cl_device + * @data: Intel HW specific hdcp data + * @rep_topology: Receiver ID List to be validated + * @rep_send_ack : repeater ack from ME FW. + * + * Return: 0 on Success, <0 on Failure + */ +static int +mei_hdcp_repeater_check_flow_prepare_ack(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_rep_send_receiverid_list + *rep_topology, + struct hdcp2_rep_send_ack + *rep_send_ack) +{ + struct wired_cmd_verify_repeater_in verify_repeater_in = { { 0 } }; + struct wired_cmd_verify_repeater_out verify_repeater_out = { { 0 } }; + struct mei_cl_device *cldev; + ssize_t byte; + + if (!dev || !rep_topology || !rep_send_ack || !data) + return -EINVAL; + + cldev = to_mei_cl_device(dev); + + verify_repeater_in.header.api_version = HDCP_API_VERSION; + verify_repeater_in.header.command_id = WIRED_VERIFY_REPEATER; + verify_repeater_in.header.status = ME_HDCP_STATUS_SUCCESS; + verify_repeater_in.header.buffer_len = + WIRED_CMD_BUF_LEN_VERIFY_REPEATER_IN; + + verify_repeater_in.port.integrated_port_type = data->port_type; + verify_repeater_in.port.physical_port = (u8)data->fw_ddi; + verify_repeater_in.port.attached_transcoder = (u8)data->fw_tc; + + memcpy(verify_repeater_in.rx_info, rep_topology->rx_info, + HDCP_2_2_RXINFO_LEN); + memcpy(verify_repeater_in.seq_num_v, rep_topology->seq_num_v, + HDCP_2_2_SEQ_NUM_LEN); + memcpy(verify_repeater_in.v_prime, rep_topology->v_prime, + HDCP_2_2_V_PRIME_HALF_LEN); + memcpy(verify_repeater_in.receiver_ids, rep_topology->receiver_ids, + HDCP_2_2_RECEIVER_IDS_MAX_LEN); + + byte = mei_cldev_send(cldev, (u8 *)&verify_repeater_in, + sizeof(verify_repeater_in)); + if (byte < 0) { + dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte); + return byte; + } + + byte = mei_cldev_recv(cldev, (u8 *)&verify_repeater_out, + sizeof(verify_repeater_out)); + if (byte < 0) { + dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte); + return byte; + } + + if (verify_repeater_out.header.status != ME_HDCP_STATUS_SUCCESS) { + dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n", + WIRED_VERIFY_REPEATER, + verify_repeater_out.header.status); + return -EIO; + } + + memcpy(rep_send_ack->v, verify_repeater_out.v, + HDCP_2_2_V_PRIME_HALF_LEN); + rep_send_ack->msg_id = HDCP_2_2_REP_SEND_ACK; + + return 0; +} + +/** + * mei_hdcp_verify_mprime() - Verify mprime. + * @dev: device corresponding to the mei_cl_device + * @data: Intel HW specific hdcp data + * @stream_ready: RepeaterAuth_Stream_Ready msg for ME FW verification. + * + * Return: 0 on Success, <0 on Failure + */ +static int mei_hdcp_verify_mprime(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_rep_stream_ready *stream_ready) +{ + struct wired_cmd_repeater_auth_stream_req_in *verify_mprime_in; + struct wired_cmd_repeater_auth_stream_req_out + verify_mprime_out = { { 0 } }; + struct mei_cl_device *cldev; + ssize_t byte; + size_t cmd_size; + + if (!dev || !stream_ready || !data) + return -EINVAL; + + cldev = to_mei_cl_device(dev); + + cmd_size = struct_size(verify_mprime_in, streams, data->k); + if (cmd_size == SIZE_MAX) + return -EINVAL; + + verify_mprime_in = kzalloc(cmd_size, GFP_KERNEL); + if (!verify_mprime_in) + return -ENOMEM; + + verify_mprime_in->header.api_version = HDCP_API_VERSION; + verify_mprime_in->header.command_id = WIRED_REPEATER_AUTH_STREAM_REQ; + verify_mprime_in->header.status = ME_HDCP_STATUS_SUCCESS; + verify_mprime_in->header.buffer_len = + WIRED_CMD_BUF_LEN_REPEATER_AUTH_STREAM_REQ_MIN_IN; + + verify_mprime_in->port.integrated_port_type = data->port_type; + verify_mprime_in->port.physical_port = (u8)data->fw_ddi; + verify_mprime_in->port.attached_transcoder = (u8)data->fw_tc; + + memcpy(verify_mprime_in->m_prime, stream_ready->m_prime, HDCP_2_2_MPRIME_LEN); + drm_hdcp_cpu_to_be24(verify_mprime_in->seq_num_m, data->seq_num_m); + + memcpy(verify_mprime_in->streams, data->streams, + array_size(data->k, sizeof(*data->streams))); + + verify_mprime_in->k = cpu_to_be16(data->k); + + byte = mei_cldev_send(cldev, (u8 *)verify_mprime_in, cmd_size); + kfree(verify_mprime_in); + if (byte < 0) { + dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte); + return byte; + } + + byte = mei_cldev_recv(cldev, (u8 *)&verify_mprime_out, + sizeof(verify_mprime_out)); + if (byte < 0) { + dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte); + return byte; + } + + if (verify_mprime_out.header.status != ME_HDCP_STATUS_SUCCESS) { + dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n", + WIRED_REPEATER_AUTH_STREAM_REQ, + verify_mprime_out.header.status); + return -EIO; + } + + return 0; +} + +/** + * mei_hdcp_enable_authentication() - Mark a port as authenticated + * through ME FW + * @dev: device corresponding to the mei_cl_device + * @data: Intel HW specific hdcp data + * + * Return: 0 on Success, <0 on Failure + */ +static int mei_hdcp_enable_authentication(struct device *dev, + struct hdcp_port_data *data) +{ + struct wired_cmd_enable_auth_in enable_auth_in = { { 0 } }; + struct wired_cmd_enable_auth_out enable_auth_out = { { 0 } }; + struct mei_cl_device *cldev; + ssize_t byte; + + if (!dev || !data) + return -EINVAL; + + cldev = to_mei_cl_device(dev); + + enable_auth_in.header.api_version = HDCP_API_VERSION; + enable_auth_in.header.command_id = WIRED_ENABLE_AUTH; + enable_auth_in.header.status = ME_HDCP_STATUS_SUCCESS; + enable_auth_in.header.buffer_len = WIRED_CMD_BUF_LEN_ENABLE_AUTH_IN; + + enable_auth_in.port.integrated_port_type = data->port_type; + enable_auth_in.port.physical_port = (u8)data->fw_ddi; + enable_auth_in.port.attached_transcoder = (u8)data->fw_tc; + enable_auth_in.stream_type = data->streams[0].stream_type; + + byte = mei_cldev_send(cldev, (u8 *)&enable_auth_in, + sizeof(enable_auth_in)); + if (byte < 0) { + dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte); + return byte; + } + + byte = mei_cldev_recv(cldev, (u8 *)&enable_auth_out, + sizeof(enable_auth_out)); + if (byte < 0) { + dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte); + return byte; + } + + if (enable_auth_out.header.status != ME_HDCP_STATUS_SUCCESS) { + dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n", + WIRED_ENABLE_AUTH, enable_auth_out.header.status); + return -EIO; + } + + return 0; +} + +/** + * mei_hdcp_close_session() - Close the Wired HDCP Tx session of ME FW per port. + * This also disables the authenticated state of the port. + * @dev: device corresponding to the mei_cl_device + * @data: Intel HW specific hdcp data + * + * Return: 0 on Success, <0 on Failure + */ +static int +mei_hdcp_close_session(struct device *dev, struct hdcp_port_data *data) +{ + struct wired_cmd_close_session_in session_close_in = { { 0 } }; + struct wired_cmd_close_session_out session_close_out = { { 0 } }; + struct mei_cl_device *cldev; + ssize_t byte; + + if (!dev || !data) + return -EINVAL; + + cldev = to_mei_cl_device(dev); + + session_close_in.header.api_version = HDCP_API_VERSION; + session_close_in.header.command_id = WIRED_CLOSE_SESSION; + session_close_in.header.status = ME_HDCP_STATUS_SUCCESS; + session_close_in.header.buffer_len = + WIRED_CMD_BUF_LEN_CLOSE_SESSION_IN; + + session_close_in.port.integrated_port_type = data->port_type; + session_close_in.port.physical_port = (u8)data->fw_ddi; + session_close_in.port.attached_transcoder = (u8)data->fw_tc; + + byte = mei_cldev_send(cldev, (u8 *)&session_close_in, + sizeof(session_close_in)); + if (byte < 0) { + dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte); + return byte; + } + + byte = mei_cldev_recv(cldev, (u8 *)&session_close_out, + sizeof(session_close_out)); + if (byte < 0) { + dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte); + return byte; + } + + if (session_close_out.header.status != ME_HDCP_STATUS_SUCCESS) { + dev_dbg(dev, "Session Close Failed. status: 0x%X\n", + session_close_out.header.status); + return -EIO; + } + + return 0; +} + +static const struct i915_hdcp_component_ops mei_hdcp_ops = { + .owner = THIS_MODULE, + .initiate_hdcp2_session = mei_hdcp_initiate_session, + .verify_receiver_cert_prepare_km = + mei_hdcp_verify_receiver_cert_prepare_km, + .verify_hprime = mei_hdcp_verify_hprime, + .store_pairing_info = mei_hdcp_store_pairing_info, + .initiate_locality_check = mei_hdcp_initiate_locality_check, + .verify_lprime = mei_hdcp_verify_lprime, + .get_session_key = mei_hdcp_get_session_key, + .repeater_check_flow_prepare_ack = + mei_hdcp_repeater_check_flow_prepare_ack, + .verify_mprime = mei_hdcp_verify_mprime, + .enable_hdcp_authentication = mei_hdcp_enable_authentication, + .close_hdcp_session = mei_hdcp_close_session, +}; + +static int mei_component_master_bind(struct device *dev) +{ + struct mei_cl_device *cldev = to_mei_cl_device(dev); + struct i915_hdcp_comp_master *comp_master = + mei_cldev_get_drvdata(cldev); + int ret; + + dev_dbg(dev, "%s\n", __func__); + comp_master->ops = &mei_hdcp_ops; + comp_master->mei_dev = dev; + ret = component_bind_all(dev, comp_master); + if (ret < 0) + return ret; + + return 0; +} + +static void mei_component_master_unbind(struct device *dev) +{ + struct mei_cl_device *cldev = to_mei_cl_device(dev); + struct i915_hdcp_comp_master *comp_master = + mei_cldev_get_drvdata(cldev); + + dev_dbg(dev, "%s\n", __func__); + component_unbind_all(dev, comp_master); +} + +static const struct component_master_ops mei_component_master_ops = { + .bind = mei_component_master_bind, + .unbind = mei_component_master_unbind, +}; + +/** + * mei_hdcp_component_match - compare function for matching mei hdcp. + * + * The function checks if the driver is i915, the subcomponent is HDCP + * and the grand parent of hdcp and the parent of i915 are the same + * PCH device. + * + * @dev: master device + * @subcomponent: subcomponent to match (I915_COMPONENT_HDCP) + * @data: compare data (mei hdcp device) + * + * Return: + * * 1 - if components match + * * 0 - otherwise + */ +static int mei_hdcp_component_match(struct device *dev, int subcomponent, + void *data) +{ + struct device *base = data; + + if (strcmp(dev->driver->name, "i915") || + subcomponent != I915_COMPONENT_HDCP) + return 0; + + base = base->parent; + if (!base) + return 0; + + base = base->parent; + dev = dev->parent; + + return (base && dev && dev == base); +} + +static int mei_hdcp_probe(struct mei_cl_device *cldev, + const struct mei_cl_device_id *id) +{ + struct i915_hdcp_comp_master *comp_master; + struct component_match *master_match; + int ret; + + ret = mei_cldev_enable(cldev); + if (ret < 0) { + dev_err(&cldev->dev, "mei_cldev_enable Failed. %d\n", ret); + goto enable_err_exit; + } + + comp_master = kzalloc(sizeof(*comp_master), GFP_KERNEL); + if (!comp_master) { + ret = -ENOMEM; + goto err_exit; + } + + master_match = NULL; + component_match_add_typed(&cldev->dev, &master_match, + mei_hdcp_component_match, &cldev->dev); + if (IS_ERR_OR_NULL(master_match)) { + ret = -ENOMEM; + goto err_exit; + } + + mei_cldev_set_drvdata(cldev, comp_master); + ret = component_master_add_with_match(&cldev->dev, + &mei_component_master_ops, + master_match); + if (ret < 0) { + dev_err(&cldev->dev, "Master comp add failed %d\n", ret); + goto err_exit; + } + + return 0; + +err_exit: + mei_cldev_set_drvdata(cldev, NULL); + kfree(comp_master); + mei_cldev_disable(cldev); +enable_err_exit: + return ret; +} + +static int mei_hdcp_remove(struct mei_cl_device *cldev) +{ + struct i915_hdcp_comp_master *comp_master = + mei_cldev_get_drvdata(cldev); + + component_master_del(&cldev->dev, &mei_component_master_ops); + kfree(comp_master); + mei_cldev_set_drvdata(cldev, NULL); + + return mei_cldev_disable(cldev); +} + +#define MEI_UUID_HDCP GUID_INIT(0xB638AB7E, 0x94E2, 0x4EA2, 0xA5, \ + 0x52, 0xD1, 0xC5, 0x4B, 0x62, 0x7F, 0x04) + +static const struct mei_cl_device_id mei_hdcp_tbl[] = { + { .uuid = MEI_UUID_HDCP, .version = MEI_CL_VERSION_ANY }, + { } +}; +MODULE_DEVICE_TABLE(mei, mei_hdcp_tbl); + +static struct mei_cl_driver mei_hdcp_driver = { + .id_table = mei_hdcp_tbl, + .name = KBUILD_MODNAME, + .probe = mei_hdcp_probe, + .remove = mei_hdcp_remove, +}; + +module_mei_cl_driver(mei_hdcp_driver); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("MEI HDCP"); diff --git a/drivers/misc/mei/hdcp/mei_hdcp.h b/drivers/misc/mei/hdcp/mei_hdcp.h new file mode 100644 index 000000000..834757f5e --- /dev/null +++ b/drivers/misc/mei/hdcp/mei_hdcp.h @@ -0,0 +1,368 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright © 2019 Intel Corporation + * + * Authors: + * Ramalingam C <ramalingam.c@intel.com> + */ + +#ifndef __MEI_HDCP_H__ +#define __MEI_HDCP_H__ + +#include <drm/drm_hdcp.h> + +/* me_hdcp_status: Enumeration of all HDCP Status Codes */ +enum me_hdcp_status { + ME_HDCP_STATUS_SUCCESS = 0x0000, + + /* WiDi Generic Status Codes */ + ME_HDCP_STATUS_INTERNAL_ERROR = 0x1000, + ME_HDCP_STATUS_UNKNOWN_ERROR = 0x1001, + ME_HDCP_STATUS_INCORRECT_API_VERSION = 0x1002, + ME_HDCP_STATUS_INVALID_FUNCTION = 0x1003, + ME_HDCP_STATUS_INVALID_BUFFER_LENGTH = 0x1004, + ME_HDCP_STATUS_INVALID_PARAMS = 0x1005, + ME_HDCP_STATUS_AUTHENTICATION_FAILED = 0x1006, + + /* WiDi Status Codes */ + ME_HDCP_INVALID_SESSION_STATE = 0x6000, + ME_HDCP_SRM_FRAGMENT_UNEXPECTED = 0x6001, + ME_HDCP_SRM_INVALID_LENGTH = 0x6002, + ME_HDCP_SRM_FRAGMENT_OFFSET_INVALID = 0x6003, + ME_HDCP_SRM_VERIFICATION_FAILED = 0x6004, + ME_HDCP_SRM_VERSION_TOO_OLD = 0x6005, + ME_HDCP_RX_CERT_VERIFICATION_FAILED = 0x6006, + ME_HDCP_RX_REVOKED = 0x6007, + ME_HDCP_H_VERIFICATION_FAILED = 0x6008, + ME_HDCP_REPEATER_CHECK_UNEXPECTED = 0x6009, + ME_HDCP_TOPOLOGY_MAX_EXCEEDED = 0x600A, + ME_HDCP_V_VERIFICATION_FAILED = 0x600B, + ME_HDCP_L_VERIFICATION_FAILED = 0x600C, + ME_HDCP_STREAM_KEY_ALLOC_FAILED = 0x600D, + ME_HDCP_BASE_KEY_RESET_FAILED = 0x600E, + ME_HDCP_NONCE_GENERATION_FAILED = 0x600F, + ME_HDCP_STATUS_INVALID_E_KEY_STATE = 0x6010, + ME_HDCP_STATUS_INVALID_CS_ICV = 0x6011, + ME_HDCP_STATUS_INVALID_KB_KEY_STATE = 0x6012, + ME_HDCP_STATUS_INVALID_PAVP_MODE_ICV = 0x6013, + ME_HDCP_STATUS_INVALID_PAVP_MODE = 0x6014, + ME_HDCP_STATUS_LC_MAX_ATTEMPTS = 0x6015, + + /* New status for HDCP 2.1 */ + ME_HDCP_STATUS_MISMATCH_IN_M = 0x6016, + + /* New status code for HDCP 2.2 Rx */ + ME_HDCP_STATUS_RX_PROV_NOT_ALLOWED = 0x6017, + ME_HDCP_STATUS_RX_PROV_WRONG_SUBJECT = 0x6018, + ME_HDCP_RX_NEEDS_PROVISIONING = 0x6019, + ME_HDCP_BKSV_ICV_AUTH_FAILED = 0x6020, + ME_HDCP_STATUS_INVALID_STREAM_ID = 0x6021, + ME_HDCP_STATUS_CHAIN_NOT_INITIALIZED = 0x6022, + ME_HDCP_FAIL_NOT_EXPECTED = 0x6023, + ME_HDCP_FAIL_HDCP_OFF = 0x6024, + ME_HDCP_FAIL_INVALID_PAVP_MEMORY_MODE = 0x6025, + ME_HDCP_FAIL_AES_ECB_FAILURE = 0x6026, + ME_HDCP_FEATURE_NOT_SUPPORTED = 0x6027, + ME_HDCP_DMA_READ_ERROR = 0x6028, + ME_HDCP_DMA_WRITE_ERROR = 0x6029, + ME_HDCP_FAIL_INVALID_PACKET_SIZE = 0x6030, + ME_HDCP_H264_PARSING_ERROR = 0x6031, + ME_HDCP_HDCP2_ERRATA_VIDEO_VIOLATION = 0x6032, + ME_HDCP_HDCP2_ERRATA_AUDIO_VIOLATION = 0x6033, + ME_HDCP_TX_ACTIVE_ERROR = 0x6034, + ME_HDCP_MODE_CHANGE_ERROR = 0x6035, + ME_HDCP_STREAM_TYPE_ERROR = 0x6036, + ME_HDCP_STREAM_MANAGE_NOT_POSSIBLE = 0x6037, + + ME_HDCP_STATUS_PORT_INVALID_COMMAND = 0x6038, + ME_HDCP_STATUS_UNSUPPORTED_PROTOCOL = 0x6039, + ME_HDCP_STATUS_INVALID_PORT_INDEX = 0x603a, + ME_HDCP_STATUS_TX_AUTH_NEEDED = 0x603b, + ME_HDCP_STATUS_NOT_INTEGRATED_PORT = 0x603c, + ME_HDCP_STATUS_SESSION_MAX_REACHED = 0x603d, + + /* hdcp capable bit is not set in rx_caps(error is unique to DP) */ + ME_HDCP_STATUS_NOT_HDCP_CAPABLE = 0x6041, + + ME_HDCP_STATUS_INVALID_STREAM_COUNT = 0x6042, +}; + +#define HDCP_API_VERSION 0x00010000 + +#define HDCP_M_LEN 16 +#define HDCP_KH_LEN 16 + +/* Payload Buffer size(Excluding Header) for CMDs and corresponding response */ +/* Wired_Tx_AKE */ +#define WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_IN (4 + 1) +#define WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_OUT (4 + 8 + 3) + +#define WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_IN (4 + 522 + 8 + 3) +#define WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_MIN_OUT (4 + 1 + 3 + 16 + 16) +#define WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_MAX_OUT (4 + 1 + 3 + 128) + +#define WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_IN (4 + 32) +#define WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_OUT (4) + +#define WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_IN (4 + 16) +#define WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_OUT (4) + +#define WIRED_CMD_BUF_LEN_CLOSE_SESSION_IN (4) +#define WIRED_CMD_BUF_LEN_CLOSE_SESSION_OUT (4) + +/* Wired_Tx_LC */ +#define WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_IN (4) +#define WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_OUT (4 + 8) + +#define WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_IN (4 + 32) +#define WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_OUT (4) + +/* Wired_Tx_SKE */ +#define WIRED_CMD_BUF_LEN_GET_SESSION_KEY_IN (4) +#define WIRED_CMD_BUF_LEN_GET_SESSION_KEY_OUT (4 + 16 + 8) + +/* Wired_Tx_SKE */ +#define WIRED_CMD_BUF_LEN_ENABLE_AUTH_IN (4 + 1) +#define WIRED_CMD_BUF_LEN_ENABLE_AUTH_OUT (4) + +/* Wired_Tx_Repeater */ +#define WIRED_CMD_BUF_LEN_VERIFY_REPEATER_IN (4 + 2 + 3 + 16 + 155) +#define WIRED_CMD_BUF_LEN_VERIFY_REPEATER_OUT (4 + 1 + 16) + +#define WIRED_CMD_BUF_LEN_REPEATER_AUTH_STREAM_REQ_MIN_IN (4 + 3 + \ + 32 + 2 + 2) + +#define WIRED_CMD_BUF_LEN_REPEATER_AUTH_STREAM_REQ_OUT (4) + +/* hdcp_command_id: Enumeration of all WIRED HDCP Command IDs */ +enum hdcp_command_id { + _WIDI_COMMAND_BASE = 0x00030000, + WIDI_INITIATE_HDCP2_SESSION = _WIDI_COMMAND_BASE, + HDCP_GET_SRM_STATUS, + HDCP_SEND_SRM_FRAGMENT, + + /* The wired HDCP Tx commands */ + _WIRED_COMMAND_BASE = 0x00031000, + WIRED_INITIATE_HDCP2_SESSION = _WIRED_COMMAND_BASE, + WIRED_VERIFY_RECEIVER_CERT, + WIRED_AKE_SEND_HPRIME, + WIRED_AKE_SEND_PAIRING_INFO, + WIRED_INIT_LOCALITY_CHECK, + WIRED_VALIDATE_LOCALITY, + WIRED_GET_SESSION_KEY, + WIRED_ENABLE_AUTH, + WIRED_VERIFY_REPEATER, + WIRED_REPEATER_AUTH_STREAM_REQ, + WIRED_CLOSE_SESSION, + + _WIRED_COMMANDS_COUNT, +}; + +union encrypted_buff { + u8 e_kpub_km[HDCP_2_2_E_KPUB_KM_LEN]; + u8 e_kh_km_m[HDCP_2_2_E_KH_KM_M_LEN]; + struct { + u8 e_kh_km[HDCP_KH_LEN]; + u8 m[HDCP_M_LEN]; + } __packed; +}; + +/* HDCP HECI message header. All header values are little endian. */ +struct hdcp_cmd_header { + u32 api_version; + u32 command_id; + enum me_hdcp_status status; + /* Length of the HECI message (excluding the header) */ + u32 buffer_len; +} __packed; + +/* Empty command request or response. No data follows the header. */ +struct hdcp_cmd_no_data { + struct hdcp_cmd_header header; +} __packed; + +/* Uniquely identifies the hdcp port being addressed for a given command. */ +struct hdcp_port_id { + u8 integrated_port_type; + /* physical_port is used until Gen11.5. Must be zero for Gen11.5+ */ + u8 physical_port; + /* attached_transcoder is for Gen11.5+. Set to zero for <Gen11.5 */ + u8 attached_transcoder; + u8 reserved; +} __packed; + +/* + * Data structures for integrated wired HDCP2 Tx in + * support of the AKE protocol + */ +/* HECI struct for integrated wired HDCP Tx session initiation. */ +struct wired_cmd_initiate_hdcp2_session_in { + struct hdcp_cmd_header header; + struct hdcp_port_id port; + u8 protocol; /* for HDMI vs DP */ +} __packed; + +struct wired_cmd_initiate_hdcp2_session_out { + struct hdcp_cmd_header header; + struct hdcp_port_id port; + u8 r_tx[HDCP_2_2_RTX_LEN]; + struct hdcp2_tx_caps tx_caps; +} __packed; + +/* HECI struct for ending an integrated wired HDCP Tx session. */ +struct wired_cmd_close_session_in { + struct hdcp_cmd_header header; + struct hdcp_port_id port; +} __packed; + +struct wired_cmd_close_session_out { + struct hdcp_cmd_header header; + struct hdcp_port_id port; +} __packed; + +/* HECI struct for integrated wired HDCP Tx Rx Cert verification. */ +struct wired_cmd_verify_receiver_cert_in { + struct hdcp_cmd_header header; + struct hdcp_port_id port; + struct hdcp2_cert_rx cert_rx; + u8 r_rx[HDCP_2_2_RRX_LEN]; + u8 rx_caps[HDCP_2_2_RXCAPS_LEN]; +} __packed; + +struct wired_cmd_verify_receiver_cert_out { + struct hdcp_cmd_header header; + struct hdcp_port_id port; + u8 km_stored; + u8 reserved[3]; + union encrypted_buff ekm_buff; +} __packed; + +/* HECI struct for verification of Rx's Hprime in a HDCP Tx session */ +struct wired_cmd_ake_send_hprime_in { + struct hdcp_cmd_header header; + struct hdcp_port_id port; + u8 h_prime[HDCP_2_2_H_PRIME_LEN]; +} __packed; + +struct wired_cmd_ake_send_hprime_out { + struct hdcp_cmd_header header; + struct hdcp_port_id port; +} __packed; + +/* + * HECI struct for sending in AKE pairing data generated by the Rx in an + * integrated wired HDCP Tx session. + */ +struct wired_cmd_ake_send_pairing_info_in { + struct hdcp_cmd_header header; + struct hdcp_port_id port; + u8 e_kh_km[HDCP_2_2_E_KH_KM_LEN]; +} __packed; + +struct wired_cmd_ake_send_pairing_info_out { + struct hdcp_cmd_header header; + struct hdcp_port_id port; +} __packed; + +/* Data structures for integrated wired HDCP2 Tx in support of the LC protocol*/ +/* + * HECI struct for initiating locality check with an + * integrated wired HDCP Tx session. + */ +struct wired_cmd_init_locality_check_in { + struct hdcp_cmd_header header; + struct hdcp_port_id port; +} __packed; + +struct wired_cmd_init_locality_check_out { + struct hdcp_cmd_header header; + struct hdcp_port_id port; + u8 r_n[HDCP_2_2_RN_LEN]; +} __packed; + +/* + * HECI struct for validating an Rx's LPrime value in an + * integrated wired HDCP Tx session. + */ +struct wired_cmd_validate_locality_in { + struct hdcp_cmd_header header; + struct hdcp_port_id port; + u8 l_prime[HDCP_2_2_L_PRIME_LEN]; +} __packed; + +struct wired_cmd_validate_locality_out { + struct hdcp_cmd_header header; + struct hdcp_port_id port; +} __packed; + +/* + * Data structures for integrated wired HDCP2 Tx in support of the + * SKE protocol + */ +/* HECI struct for creating session key */ +struct wired_cmd_get_session_key_in { + struct hdcp_cmd_header header; + struct hdcp_port_id port; +} __packed; + +struct wired_cmd_get_session_key_out { + struct hdcp_cmd_header header; + struct hdcp_port_id port; + u8 e_dkey_ks[HDCP_2_2_E_DKEY_KS_LEN]; + u8 r_iv[HDCP_2_2_RIV_LEN]; +} __packed; + +/* HECI struct for the Tx enable authentication command */ +struct wired_cmd_enable_auth_in { + struct hdcp_cmd_header header; + struct hdcp_port_id port; + u8 stream_type; +} __packed; + +struct wired_cmd_enable_auth_out { + struct hdcp_cmd_header header; + struct hdcp_port_id port; +} __packed; + +/* + * Data structures for integrated wired HDCP2 Tx in support of + * the repeater protocols + */ +/* + * HECI struct for verifying the downstream repeater's HDCP topology in an + * integrated wired HDCP Tx session. + */ +struct wired_cmd_verify_repeater_in { + struct hdcp_cmd_header header; + struct hdcp_port_id port; + u8 rx_info[HDCP_2_2_RXINFO_LEN]; + u8 seq_num_v[HDCP_2_2_SEQ_NUM_LEN]; + u8 v_prime[HDCP_2_2_V_PRIME_HALF_LEN]; + u8 receiver_ids[HDCP_2_2_RECEIVER_IDS_MAX_LEN]; +} __packed; + +struct wired_cmd_verify_repeater_out { + struct hdcp_cmd_header header; + struct hdcp_port_id port; + u8 content_type_supported; + u8 v[HDCP_2_2_V_PRIME_HALF_LEN]; +} __packed; + +/* + * HECI struct in support of stream management in an + * integrated wired HDCP Tx session. + */ +struct wired_cmd_repeater_auth_stream_req_in { + struct hdcp_cmd_header header; + struct hdcp_port_id port; + u8 seq_num_m[HDCP_2_2_SEQ_NUM_LEN]; + u8 m_prime[HDCP_2_2_MPRIME_LEN]; + __be16 k; + struct hdcp2_streamid_type streams[]; +} __packed; + +struct wired_cmd_repeater_auth_stream_req_out { + struct hdcp_cmd_header header; + struct hdcp_port_id port; +} __packed; +#endif /* __MEI_HDCP_H__ */ diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h new file mode 100644 index 000000000..eabbdf17b --- /dev/null +++ b/drivers/misc/mei/hw-me-regs.h @@ -0,0 +1,206 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (c) 2003-2019, Intel Corporation. All rights reserved. + * Intel Management Engine Interface (Intel MEI) Linux driver + */ +#ifndef _MEI_HW_MEI_REGS_H_ +#define _MEI_HW_MEI_REGS_H_ + +/* + * MEI device IDs + */ +#define MEI_DEV_ID_82946GZ 0x2974 /* 82946GZ/GL */ +#define MEI_DEV_ID_82G35 0x2984 /* 82G35 Express */ +#define MEI_DEV_ID_82Q965 0x2994 /* 82Q963/Q965 */ +#define MEI_DEV_ID_82G965 0x29A4 /* 82P965/G965 */ + +#define MEI_DEV_ID_82GM965 0x2A04 /* Mobile PM965/GM965 */ +#define MEI_DEV_ID_82GME965 0x2A14 /* Mobile GME965/GLE960 */ + +#define MEI_DEV_ID_ICH9_82Q35 0x29B4 /* 82Q35 Express */ +#define MEI_DEV_ID_ICH9_82G33 0x29C4 /* 82G33/G31/P35/P31 Express */ +#define MEI_DEV_ID_ICH9_82Q33 0x29D4 /* 82Q33 Express */ +#define MEI_DEV_ID_ICH9_82X38 0x29E4 /* 82X38/X48 Express */ +#define MEI_DEV_ID_ICH9_3200 0x29F4 /* 3200/3210 Server */ + +#define MEI_DEV_ID_ICH9_6 0x28B4 /* Bearlake */ +#define MEI_DEV_ID_ICH9_7 0x28C4 /* Bearlake */ +#define MEI_DEV_ID_ICH9_8 0x28D4 /* Bearlake */ +#define MEI_DEV_ID_ICH9_9 0x28E4 /* Bearlake */ +#define MEI_DEV_ID_ICH9_10 0x28F4 /* Bearlake */ + +#define MEI_DEV_ID_ICH9M_1 0x2A44 /* Cantiga */ +#define MEI_DEV_ID_ICH9M_2 0x2A54 /* Cantiga */ +#define MEI_DEV_ID_ICH9M_3 0x2A64 /* Cantiga */ +#define MEI_DEV_ID_ICH9M_4 0x2A74 /* Cantiga */ + +#define MEI_DEV_ID_ICH10_1 0x2E04 /* Eaglelake */ +#define MEI_DEV_ID_ICH10_2 0x2E14 /* Eaglelake */ +#define MEI_DEV_ID_ICH10_3 0x2E24 /* Eaglelake */ +#define MEI_DEV_ID_ICH10_4 0x2E34 /* Eaglelake */ + +#define MEI_DEV_ID_IBXPK_1 0x3B64 /* Calpella */ +#define MEI_DEV_ID_IBXPK_2 0x3B65 /* Calpella */ + +#define MEI_DEV_ID_CPT_1 0x1C3A /* Couger Point */ +#define MEI_DEV_ID_PBG_1 0x1D3A /* C600/X79 Patsburg */ + +#define MEI_DEV_ID_PPT_1 0x1E3A /* Panther Point */ +#define MEI_DEV_ID_PPT_2 0x1CBA /* Panther Point */ +#define MEI_DEV_ID_PPT_3 0x1DBA /* Panther Point */ + +#define MEI_DEV_ID_LPT_H 0x8C3A /* Lynx Point H */ +#define MEI_DEV_ID_LPT_W 0x8D3A /* Lynx Point - Wellsburg */ +#define MEI_DEV_ID_LPT_LP 0x9C3A /* Lynx Point LP */ +#define MEI_DEV_ID_LPT_HR 0x8CBA /* Lynx Point H Refresh */ + +#define MEI_DEV_ID_WPT_LP 0x9CBA /* Wildcat Point LP */ +#define MEI_DEV_ID_WPT_LP_2 0x9CBB /* Wildcat Point LP 2 */ + +#define MEI_DEV_ID_SPT 0x9D3A /* Sunrise Point */ +#define MEI_DEV_ID_SPT_2 0x9D3B /* Sunrise Point 2 */ +#define MEI_DEV_ID_SPT_3 0x9D3E /* Sunrise Point 3 (iToutch) */ +#define MEI_DEV_ID_SPT_H 0xA13A /* Sunrise Point H */ +#define MEI_DEV_ID_SPT_H_2 0xA13B /* Sunrise Point H 2 */ + +#define MEI_DEV_ID_LBG 0xA1BA /* Lewisburg (SPT) */ + +#define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */ +#define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */ + +#define MEI_DEV_ID_DNV_IE 0x19E5 /* Denverton IE */ + +#define MEI_DEV_ID_GLK 0x319A /* Gemini Lake */ + +#define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */ +#define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */ +#define MEI_DEV_ID_KBP_3 0xA2BE /* Kaby Point 3 (iTouch) */ + +#define MEI_DEV_ID_CNP_LP 0x9DE0 /* Cannon Point LP */ +#define MEI_DEV_ID_CNP_LP_3 0x9DE4 /* Cannon Point LP 3 (iTouch) */ +#define MEI_DEV_ID_CNP_H 0xA360 /* Cannon Point H */ +#define MEI_DEV_ID_CNP_H_3 0xA364 /* Cannon Point H 3 (iTouch) */ + +#define MEI_DEV_ID_CMP_LP 0x02e0 /* Comet Point LP */ +#define MEI_DEV_ID_CMP_LP_3 0x02e4 /* Comet Point LP 3 (iTouch) */ + +#define MEI_DEV_ID_CMP_V 0xA3BA /* Comet Point Lake V */ + +#define MEI_DEV_ID_CMP_H 0x06e0 /* Comet Lake H */ +#define MEI_DEV_ID_CMP_H_3 0x06e4 /* Comet Lake H 3 (iTouch) */ + +#define MEI_DEV_ID_CDF 0x18D3 /* Cedar Fork */ + +#define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */ +#define MEI_DEV_ID_ICP_N 0x38E0 /* Ice Lake Point N */ + +#define MEI_DEV_ID_JSP_N 0x4DE0 /* Jasper Lake Point N */ + +#define MEI_DEV_ID_TGP_LP 0xA0E0 /* Tiger Lake Point LP */ +#define MEI_DEV_ID_TGP_H 0x43E0 /* Tiger Lake Point H */ + +#define MEI_DEV_ID_MCC 0x4B70 /* Mule Creek Canyon (EHL) */ +#define MEI_DEV_ID_MCC_4 0x4B75 /* Mule Creek Canyon 4 (EHL) */ + +#define MEI_DEV_ID_EBG 0x1BE0 /* Emmitsburg WS */ + +#define MEI_DEV_ID_ADP_S 0x7AE8 /* Alder Lake Point S */ +#define MEI_DEV_ID_ADP_LP 0x7A60 /* Alder Lake Point LP */ +#define MEI_DEV_ID_ADP_P 0x51E0 /* Alder Lake Point P */ +#define MEI_DEV_ID_ADP_N 0x54E0 /* Alder Lake Point N */ + +#define MEI_DEV_ID_RPL_S 0x7A68 /* Raptor Lake Point S */ + +#define MEI_DEV_ID_MTL_M 0x7E70 /* Meteor Lake Point M */ + +/* + * MEI HW Section + */ + +/* Host Firmware Status Registers in PCI Config Space */ +#define PCI_CFG_HFS_1 0x40 +# define PCI_CFG_HFS_1_D0I3_MSK 0x80000000 +# define PCI_CFG_HFS_1_OPMODE_MSK 0xf0000 /* OP MODE Mask: SPS <= 4.0 */ +# define PCI_CFG_HFS_1_OPMODE_SPS 0xf0000 /* SPS SKU : SPS <= 4.0 */ +#define PCI_CFG_HFS_2 0x48 +#define PCI_CFG_HFS_3 0x60 +# define PCI_CFG_HFS_3_FW_SKU_MSK 0x00000070 +# define PCI_CFG_HFS_3_FW_SKU_SPS 0x00000060 +#define PCI_CFG_HFS_4 0x64 +#define PCI_CFG_HFS_5 0x68 +#define PCI_CFG_HFS_6 0x6C + +/* MEI registers */ +/* H_CB_WW - Host Circular Buffer (CB) Write Window register */ +#define H_CB_WW 0 +/* H_CSR - Host Control Status register */ +#define H_CSR 4 +/* ME_CB_RW - ME Circular Buffer Read Window register (read only) */ +#define ME_CB_RW 8 +/* ME_CSR_HA - ME Control Status Host Access register (read only) */ +#define ME_CSR_HA 0xC +/* H_HGC_CSR - PGI register */ +#define H_HPG_CSR 0x10 +/* H_D0I3C - D0I3 Control */ +#define H_D0I3C 0x800 + +/* register bits of H_CSR (Host Control Status register) */ +/* Host Circular Buffer Depth - maximum number of 32-bit entries in CB */ +#define H_CBD 0xFF000000 +/* Host Circular Buffer Write Pointer */ +#define H_CBWP 0x00FF0000 +/* Host Circular Buffer Read Pointer */ +#define H_CBRP 0x0000FF00 +/* Host Reset */ +#define H_RST 0x00000010 +/* Host Ready */ +#define H_RDY 0x00000008 +/* Host Interrupt Generate */ +#define H_IG 0x00000004 +/* Host Interrupt Status */ +#define H_IS 0x00000002 +/* Host Interrupt Enable */ +#define H_IE 0x00000001 +/* Host D0I3 Interrupt Enable */ +#define H_D0I3C_IE 0x00000020 +/* Host D0I3 Interrupt Status */ +#define H_D0I3C_IS 0x00000040 + +/* H_CSR masks */ +#define H_CSR_IE_MASK (H_IE | H_D0I3C_IE) +#define H_CSR_IS_MASK (H_IS | H_D0I3C_IS) + +/* register bits of ME_CSR_HA (ME Control Status Host Access register) */ +/* ME CB (Circular Buffer) Depth HRA (Host Read Access) - host read only +access to ME_CBD */ +#define ME_CBD_HRA 0xFF000000 +/* ME CB Write Pointer HRA - host read only access to ME_CBWP */ +#define ME_CBWP_HRA 0x00FF0000 +/* ME CB Read Pointer HRA - host read only access to ME_CBRP */ +#define ME_CBRP_HRA 0x0000FF00 +/* ME Power Gate Isolation Capability HRA - host ready only access */ +#define ME_PGIC_HRA 0x00000040 +/* ME Reset HRA - host read only access to ME_RST */ +#define ME_RST_HRA 0x00000010 +/* ME Ready HRA - host read only access to ME_RDY */ +#define ME_RDY_HRA 0x00000008 +/* ME Interrupt Generate HRA - host read only access to ME_IG */ +#define ME_IG_HRA 0x00000004 +/* ME Interrupt Status HRA - host read only access to ME_IS */ +#define ME_IS_HRA 0x00000002 +/* ME Interrupt Enable HRA - host read only access to ME_IE */ +#define ME_IE_HRA 0x00000001 +/* TRC control shadow register */ +#define ME_TRC 0x00000030 + +/* H_HPG_CSR register bits */ +#define H_HPG_CSR_PGIHEXR 0x00000001 +#define H_HPG_CSR_PGI 0x00000002 + +/* H_D0I3C register bits */ +#define H_D0I3C_CIP 0x00000001 +#define H_D0I3C_IR 0x00000002 +#define H_D0I3C_I3 0x00000004 +#define H_D0I3C_RR 0x00000008 + +#endif /* _MEI_HW_MEI_REGS_H_ */ diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c new file mode 100644 index 000000000..cda0829ac --- /dev/null +++ b/drivers/misc/mei/hw-me.c @@ -0,0 +1,1633 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2003-2020, Intel Corporation. All rights reserved. + * Intel Management Engine Interface (Intel MEI) Linux driver + */ + +#include <linux/pci.h> + +#include <linux/kthread.h> +#include <linux/interrupt.h> +#include <linux/pm_runtime.h> +#include <linux/sizes.h> + +#include "mei_dev.h" +#include "hbm.h" + +#include "hw-me.h" +#include "hw-me-regs.h" + +#include "mei-trace.h" + +/** + * mei_me_reg_read - Reads 32bit data from the mei device + * + * @hw: the me hardware structure + * @offset: offset from which to read the data + * + * Return: register value (u32) + */ +static inline u32 mei_me_reg_read(const struct mei_me_hw *hw, + unsigned long offset) +{ + return ioread32(hw->mem_addr + offset); +} + + +/** + * mei_me_reg_write - Writes 32bit data to the mei device + * + * @hw: the me hardware structure + * @offset: offset from which to write the data + * @value: register value to write (u32) + */ +static inline void mei_me_reg_write(const struct mei_me_hw *hw, + unsigned long offset, u32 value) +{ + iowrite32(value, hw->mem_addr + offset); +} + +/** + * mei_me_mecbrw_read - Reads 32bit data from ME circular buffer + * read window register + * + * @dev: the device structure + * + * Return: ME_CB_RW register value (u32) + */ +static inline u32 mei_me_mecbrw_read(const struct mei_device *dev) +{ + return mei_me_reg_read(to_me_hw(dev), ME_CB_RW); +} + +/** + * mei_me_hcbww_write - write 32bit data to the host circular buffer + * + * @dev: the device structure + * @data: 32bit data to be written to the host circular buffer + */ +static inline void mei_me_hcbww_write(struct mei_device *dev, u32 data) +{ + mei_me_reg_write(to_me_hw(dev), H_CB_WW, data); +} + +/** + * mei_me_mecsr_read - Reads 32bit data from the ME CSR + * + * @dev: the device structure + * + * Return: ME_CSR_HA register value (u32) + */ +static inline u32 mei_me_mecsr_read(const struct mei_device *dev) +{ + u32 reg; + + reg = mei_me_reg_read(to_me_hw(dev), ME_CSR_HA); + trace_mei_reg_read(dev->dev, "ME_CSR_HA", ME_CSR_HA, reg); + + return reg; +} + +/** + * mei_hcsr_read - Reads 32bit data from the host CSR + * + * @dev: the device structure + * + * Return: H_CSR register value (u32) + */ +static inline u32 mei_hcsr_read(const struct mei_device *dev) +{ + u32 reg; + + reg = mei_me_reg_read(to_me_hw(dev), H_CSR); + trace_mei_reg_read(dev->dev, "H_CSR", H_CSR, reg); + + return reg; +} + +/** + * mei_hcsr_write - writes H_CSR register to the mei device + * + * @dev: the device structure + * @reg: new register value + */ +static inline void mei_hcsr_write(struct mei_device *dev, u32 reg) +{ + trace_mei_reg_write(dev->dev, "H_CSR", H_CSR, reg); + mei_me_reg_write(to_me_hw(dev), H_CSR, reg); +} + +/** + * mei_hcsr_set - writes H_CSR register to the mei device, + * and ignores the H_IS bit for it is write-one-to-zero. + * + * @dev: the device structure + * @reg: new register value + */ +static inline void mei_hcsr_set(struct mei_device *dev, u32 reg) +{ + reg &= ~H_CSR_IS_MASK; + mei_hcsr_write(dev, reg); +} + +/** + * mei_hcsr_set_hig - set host interrupt (set H_IG) + * + * @dev: the device structure + */ +static inline void mei_hcsr_set_hig(struct mei_device *dev) +{ + u32 hcsr; + + hcsr = mei_hcsr_read(dev) | H_IG; + mei_hcsr_set(dev, hcsr); +} + +/** + * mei_me_d0i3c_read - Reads 32bit data from the D0I3C register + * + * @dev: the device structure + * + * Return: H_D0I3C register value (u32) + */ +static inline u32 mei_me_d0i3c_read(const struct mei_device *dev) +{ + u32 reg; + + reg = mei_me_reg_read(to_me_hw(dev), H_D0I3C); + trace_mei_reg_read(dev->dev, "H_D0I3C", H_D0I3C, reg); + + return reg; +} + +/** + * mei_me_d0i3c_write - writes H_D0I3C register to device + * + * @dev: the device structure + * @reg: new register value + */ +static inline void mei_me_d0i3c_write(struct mei_device *dev, u32 reg) +{ + trace_mei_reg_write(dev->dev, "H_D0I3C", H_D0I3C, reg); + mei_me_reg_write(to_me_hw(dev), H_D0I3C, reg); +} + +/** + * mei_me_trc_status - read trc status register + * + * @dev: mei device + * @trc: trc status register value + * + * Return: 0 on success, error otherwise + */ +static int mei_me_trc_status(struct mei_device *dev, u32 *trc) +{ + struct mei_me_hw *hw = to_me_hw(dev); + + if (!hw->cfg->hw_trc_supported) + return -EOPNOTSUPP; + + *trc = mei_me_reg_read(hw, ME_TRC); + trace_mei_reg_read(dev->dev, "ME_TRC", ME_TRC, *trc); + + return 0; +} + +/** + * mei_me_fw_status - read fw status register from pci config space + * + * @dev: mei device + * @fw_status: fw status register values + * + * Return: 0 on success, error otherwise + */ +static int mei_me_fw_status(struct mei_device *dev, + struct mei_fw_status *fw_status) +{ + struct mei_me_hw *hw = to_me_hw(dev); + const struct mei_fw_status *fw_src = &hw->cfg->fw_status; + int ret; + int i; + + if (!fw_status || !hw->read_fws) + return -EINVAL; + + fw_status->count = fw_src->count; + for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) { + ret = hw->read_fws(dev, fw_src->status[i], + &fw_status->status[i]); + trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_X", + fw_src->status[i], + fw_status->status[i]); + if (ret) + return ret; + } + + return 0; +} + +/** + * mei_me_hw_config - configure hw dependent settings + * + * @dev: mei device + * + * Return: + * * -EINVAL when read_fws is not set + * * 0 on success + * + */ +static int mei_me_hw_config(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + u32 hcsr, reg; + + if (WARN_ON(!hw->read_fws)) + return -EINVAL; + + /* Doesn't change in runtime */ + hcsr = mei_hcsr_read(dev); + hw->hbuf_depth = (hcsr & H_CBD) >> 24; + + reg = 0; + hw->read_fws(dev, PCI_CFG_HFS_1, ®); + trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg); + hw->d0i3_supported = + ((reg & PCI_CFG_HFS_1_D0I3_MSK) == PCI_CFG_HFS_1_D0I3_MSK); + + hw->pg_state = MEI_PG_OFF; + if (hw->d0i3_supported) { + reg = mei_me_d0i3c_read(dev); + if (reg & H_D0I3C_I3) + hw->pg_state = MEI_PG_ON; + } + + return 0; +} + +/** + * mei_me_pg_state - translate internal pg state + * to the mei power gating state + * + * @dev: mei device + * + * Return: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise + */ +static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + + return hw->pg_state; +} + +static inline u32 me_intr_src(u32 hcsr) +{ + return hcsr & H_CSR_IS_MASK; +} + +/** + * me_intr_disable - disables mei device interrupts + * using supplied hcsr register value. + * + * @dev: the device structure + * @hcsr: supplied hcsr register value + */ +static inline void me_intr_disable(struct mei_device *dev, u32 hcsr) +{ + hcsr &= ~H_CSR_IE_MASK; + mei_hcsr_set(dev, hcsr); +} + +/** + * me_intr_clear - clear and stop interrupts + * + * @dev: the device structure + * @hcsr: supplied hcsr register value + */ +static inline void me_intr_clear(struct mei_device *dev, u32 hcsr) +{ + if (me_intr_src(hcsr)) + mei_hcsr_write(dev, hcsr); +} + +/** + * mei_me_intr_clear - clear and stop interrupts + * + * @dev: the device structure + */ +static void mei_me_intr_clear(struct mei_device *dev) +{ + u32 hcsr = mei_hcsr_read(dev); + + me_intr_clear(dev, hcsr); +} +/** + * mei_me_intr_enable - enables mei device interrupts + * + * @dev: the device structure + */ +static void mei_me_intr_enable(struct mei_device *dev) +{ + u32 hcsr = mei_hcsr_read(dev); + + hcsr |= H_CSR_IE_MASK; + mei_hcsr_set(dev, hcsr); +} + +/** + * mei_me_intr_disable - disables mei device interrupts + * + * @dev: the device structure + */ +static void mei_me_intr_disable(struct mei_device *dev) +{ + u32 hcsr = mei_hcsr_read(dev); + + me_intr_disable(dev, hcsr); +} + +/** + * mei_me_synchronize_irq - wait for pending IRQ handlers + * + * @dev: the device structure + */ +static void mei_me_synchronize_irq(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + + synchronize_irq(hw->irq); +} + +/** + * mei_me_hw_reset_release - release device from the reset + * + * @dev: the device structure + */ +static void mei_me_hw_reset_release(struct mei_device *dev) +{ + u32 hcsr = mei_hcsr_read(dev); + + hcsr |= H_IG; + hcsr &= ~H_RST; + mei_hcsr_set(dev, hcsr); +} + +/** + * mei_me_host_set_ready - enable device + * + * @dev: mei device + */ +static void mei_me_host_set_ready(struct mei_device *dev) +{ + u32 hcsr = mei_hcsr_read(dev); + + hcsr |= H_CSR_IE_MASK | H_IG | H_RDY; + mei_hcsr_set(dev, hcsr); +} + +/** + * mei_me_host_is_ready - check whether the host has turned ready + * + * @dev: mei device + * Return: bool + */ +static bool mei_me_host_is_ready(struct mei_device *dev) +{ + u32 hcsr = mei_hcsr_read(dev); + + return (hcsr & H_RDY) == H_RDY; +} + +/** + * mei_me_hw_is_ready - check whether the me(hw) has turned ready + * + * @dev: mei device + * Return: bool + */ +static bool mei_me_hw_is_ready(struct mei_device *dev) +{ + u32 mecsr = mei_me_mecsr_read(dev); + + return (mecsr & ME_RDY_HRA) == ME_RDY_HRA; +} + +/** + * mei_me_hw_is_resetting - check whether the me(hw) is in reset + * + * @dev: mei device + * Return: bool + */ +static bool mei_me_hw_is_resetting(struct mei_device *dev) +{ + u32 mecsr = mei_me_mecsr_read(dev); + + return (mecsr & ME_RST_HRA) == ME_RST_HRA; +} + +/** + * mei_me_hw_ready_wait - wait until the me(hw) has turned ready + * or timeout is reached + * + * @dev: mei device + * Return: 0 on success, error otherwise + */ +static int mei_me_hw_ready_wait(struct mei_device *dev) +{ + mutex_unlock(&dev->device_lock); + wait_event_timeout(dev->wait_hw_ready, + dev->recvd_hw_ready, + mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT)); + mutex_lock(&dev->device_lock); + if (!dev->recvd_hw_ready) { + dev_err(dev->dev, "wait hw ready failed\n"); + return -ETIME; + } + + mei_me_hw_reset_release(dev); + dev->recvd_hw_ready = false; + return 0; +} + +/** + * mei_me_hw_start - hw start routine + * + * @dev: mei device + * Return: 0 on success, error otherwise + */ +static int mei_me_hw_start(struct mei_device *dev) +{ + int ret = mei_me_hw_ready_wait(dev); + + if (ret) + return ret; + dev_dbg(dev->dev, "hw is ready\n"); + + mei_me_host_set_ready(dev); + return ret; +} + + +/** + * mei_hbuf_filled_slots - gets number of device filled buffer slots + * + * @dev: the device structure + * + * Return: number of filled slots + */ +static unsigned char mei_hbuf_filled_slots(struct mei_device *dev) +{ + u32 hcsr; + char read_ptr, write_ptr; + + hcsr = mei_hcsr_read(dev); + + read_ptr = (char) ((hcsr & H_CBRP) >> 8); + write_ptr = (char) ((hcsr & H_CBWP) >> 16); + + return (unsigned char) (write_ptr - read_ptr); +} + +/** + * mei_me_hbuf_is_empty - checks if host buffer is empty. + * + * @dev: the device structure + * + * Return: true if empty, false - otherwise. + */ +static bool mei_me_hbuf_is_empty(struct mei_device *dev) +{ + return mei_hbuf_filled_slots(dev) == 0; +} + +/** + * mei_me_hbuf_empty_slots - counts write empty slots. + * + * @dev: the device structure + * + * Return: -EOVERFLOW if overflow, otherwise empty slots count + */ +static int mei_me_hbuf_empty_slots(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + unsigned char filled_slots, empty_slots; + + filled_slots = mei_hbuf_filled_slots(dev); + empty_slots = hw->hbuf_depth - filled_slots; + + /* check for overflow */ + if (filled_slots > hw->hbuf_depth) + return -EOVERFLOW; + + return empty_slots; +} + +/** + * mei_me_hbuf_depth - returns depth of the hw buffer. + * + * @dev: the device structure + * + * Return: size of hw buffer in slots + */ +static u32 mei_me_hbuf_depth(const struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + + return hw->hbuf_depth; +} + +/** + * mei_me_hbuf_write - writes a message to host hw buffer. + * + * @dev: the device structure + * @hdr: header of message + * @hdr_len: header length in bytes: must be multiplication of a slot (4bytes) + * @data: payload + * @data_len: payload length in bytes + * + * Return: 0 if success, < 0 - otherwise. + */ +static int mei_me_hbuf_write(struct mei_device *dev, + const void *hdr, size_t hdr_len, + const void *data, size_t data_len) +{ + unsigned long rem; + unsigned long i; + const u32 *reg_buf; + u32 dw_cnt; + int empty_slots; + + if (WARN_ON(!hdr || !data || hdr_len & 0x3)) + return -EINVAL; + + dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM((struct mei_msg_hdr *)hdr)); + + empty_slots = mei_hbuf_empty_slots(dev); + dev_dbg(dev->dev, "empty slots = %hu.\n", empty_slots); + + if (empty_slots < 0) + return -EOVERFLOW; + + dw_cnt = mei_data2slots(hdr_len + data_len); + if (dw_cnt > (u32)empty_slots) + return -EMSGSIZE; + + reg_buf = hdr; + for (i = 0; i < hdr_len / MEI_SLOT_SIZE; i++) + mei_me_hcbww_write(dev, reg_buf[i]); + + reg_buf = data; + for (i = 0; i < data_len / MEI_SLOT_SIZE; i++) + mei_me_hcbww_write(dev, reg_buf[i]); + + rem = data_len & 0x3; + if (rem > 0) { + u32 reg = 0; + + memcpy(®, (const u8 *)data + data_len - rem, rem); + mei_me_hcbww_write(dev, reg); + } + + mei_hcsr_set_hig(dev); + if (!mei_me_hw_is_ready(dev)) + return -EIO; + + return 0; +} + +/** + * mei_me_count_full_read_slots - counts read full slots. + * + * @dev: the device structure + * + * Return: -EOVERFLOW if overflow, otherwise filled slots count + */ +static int mei_me_count_full_read_slots(struct mei_device *dev) +{ + u32 me_csr; + char read_ptr, write_ptr; + unsigned char buffer_depth, filled_slots; + + me_csr = mei_me_mecsr_read(dev); + buffer_depth = (unsigned char)((me_csr & ME_CBD_HRA) >> 24); + read_ptr = (char) ((me_csr & ME_CBRP_HRA) >> 8); + write_ptr = (char) ((me_csr & ME_CBWP_HRA) >> 16); + filled_slots = (unsigned char) (write_ptr - read_ptr); + + /* check for overflow */ + if (filled_slots > buffer_depth) + return -EOVERFLOW; + + dev_dbg(dev->dev, "filled_slots =%08x\n", filled_slots); + return (int)filled_slots; +} + +/** + * mei_me_read_slots - reads a message from mei device. + * + * @dev: the device structure + * @buffer: message buffer will be written + * @buffer_length: message size will be read + * + * Return: always 0 + */ +static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer, + unsigned long buffer_length) +{ + u32 *reg_buf = (u32 *)buffer; + + for (; buffer_length >= MEI_SLOT_SIZE; buffer_length -= MEI_SLOT_SIZE) + *reg_buf++ = mei_me_mecbrw_read(dev); + + if (buffer_length > 0) { + u32 reg = mei_me_mecbrw_read(dev); + + memcpy(reg_buf, ®, buffer_length); + } + + mei_hcsr_set_hig(dev); + return 0; +} + +/** + * mei_me_pg_set - write pg enter register + * + * @dev: the device structure + */ +static void mei_me_pg_set(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + u32 reg; + + reg = mei_me_reg_read(hw, H_HPG_CSR); + trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg); + + reg |= H_HPG_CSR_PGI; + + trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg); + mei_me_reg_write(hw, H_HPG_CSR, reg); +} + +/** + * mei_me_pg_unset - write pg exit register + * + * @dev: the device structure + */ +static void mei_me_pg_unset(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + u32 reg; + + reg = mei_me_reg_read(hw, H_HPG_CSR); + trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg); + + WARN(!(reg & H_HPG_CSR_PGI), "PGI is not set\n"); + + reg |= H_HPG_CSR_PGIHEXR; + + trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg); + mei_me_reg_write(hw, H_HPG_CSR, reg); +} + +/** + * mei_me_pg_legacy_enter_sync - perform legacy pg entry procedure + * + * @dev: the device structure + * + * Return: 0 on success an error code otherwise + */ +static int mei_me_pg_legacy_enter_sync(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT); + int ret; + + dev->pg_event = MEI_PG_EVENT_WAIT; + + ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD); + if (ret) + return ret; + + mutex_unlock(&dev->device_lock); + wait_event_timeout(dev->wait_pg, + dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout); + mutex_lock(&dev->device_lock); + + if (dev->pg_event == MEI_PG_EVENT_RECEIVED) { + mei_me_pg_set(dev); + ret = 0; + } else { + ret = -ETIME; + } + + dev->pg_event = MEI_PG_EVENT_IDLE; + hw->pg_state = MEI_PG_ON; + + return ret; +} + +/** + * mei_me_pg_legacy_exit_sync - perform legacy pg exit procedure + * + * @dev: the device structure + * + * Return: 0 on success an error code otherwise + */ +static int mei_me_pg_legacy_exit_sync(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT); + int ret; + + if (dev->pg_event == MEI_PG_EVENT_RECEIVED) + goto reply; + + dev->pg_event = MEI_PG_EVENT_WAIT; + + mei_me_pg_unset(dev); + + mutex_unlock(&dev->device_lock); + wait_event_timeout(dev->wait_pg, + dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout); + mutex_lock(&dev->device_lock); + +reply: + if (dev->pg_event != MEI_PG_EVENT_RECEIVED) { + ret = -ETIME; + goto out; + } + + dev->pg_event = MEI_PG_EVENT_INTR_WAIT; + ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD); + if (ret) + return ret; + + mutex_unlock(&dev->device_lock); + wait_event_timeout(dev->wait_pg, + dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout); + mutex_lock(&dev->device_lock); + + if (dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED) + ret = 0; + else + ret = -ETIME; + +out: + dev->pg_event = MEI_PG_EVENT_IDLE; + hw->pg_state = MEI_PG_OFF; + + return ret; +} + +/** + * mei_me_pg_in_transition - is device now in pg transition + * + * @dev: the device structure + * + * Return: true if in pg transition, false otherwise + */ +static bool mei_me_pg_in_transition(struct mei_device *dev) +{ + return dev->pg_event >= MEI_PG_EVENT_WAIT && + dev->pg_event <= MEI_PG_EVENT_INTR_WAIT; +} + +/** + * mei_me_pg_is_enabled - detect if PG is supported by HW + * + * @dev: the device structure + * + * Return: true is pg supported, false otherwise + */ +static bool mei_me_pg_is_enabled(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + u32 reg = mei_me_mecsr_read(dev); + + if (hw->d0i3_supported) + return true; + + if ((reg & ME_PGIC_HRA) == 0) + goto notsupported; + + if (!dev->hbm_f_pg_supported) + goto notsupported; + + return true; + +notsupported: + dev_dbg(dev->dev, "pg: not supported: d0i3 = %d HGP = %d hbm version %d.%d ?= %d.%d\n", + hw->d0i3_supported, + !!(reg & ME_PGIC_HRA), + dev->version.major_version, + dev->version.minor_version, + HBM_MAJOR_VERSION_PGI, + HBM_MINOR_VERSION_PGI); + + return false; +} + +/** + * mei_me_d0i3_set - write d0i3 register bit on mei device. + * + * @dev: the device structure + * @intr: ask for interrupt + * + * Return: D0I3C register value + */ +static u32 mei_me_d0i3_set(struct mei_device *dev, bool intr) +{ + u32 reg = mei_me_d0i3c_read(dev); + + reg |= H_D0I3C_I3; + if (intr) + reg |= H_D0I3C_IR; + else + reg &= ~H_D0I3C_IR; + mei_me_d0i3c_write(dev, reg); + /* read it to ensure HW consistency */ + reg = mei_me_d0i3c_read(dev); + return reg; +} + +/** + * mei_me_d0i3_unset - clean d0i3 register bit on mei device. + * + * @dev: the device structure + * + * Return: D0I3C register value + */ +static u32 mei_me_d0i3_unset(struct mei_device *dev) +{ + u32 reg = mei_me_d0i3c_read(dev); + + reg &= ~H_D0I3C_I3; + reg |= H_D0I3C_IR; + mei_me_d0i3c_write(dev, reg); + /* read it to ensure HW consistency */ + reg = mei_me_d0i3c_read(dev); + return reg; +} + +/** + * mei_me_d0i3_enter_sync - perform d0i3 entry procedure + * + * @dev: the device structure + * + * Return: 0 on success an error code otherwise + */ +static int mei_me_d0i3_enter_sync(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + unsigned long d0i3_timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT); + unsigned long pgi_timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT); + int ret; + u32 reg; + + reg = mei_me_d0i3c_read(dev); + if (reg & H_D0I3C_I3) { + /* we are in d0i3, nothing to do */ + dev_dbg(dev->dev, "d0i3 set not needed\n"); + ret = 0; + goto on; + } + + /* PGI entry procedure */ + dev->pg_event = MEI_PG_EVENT_WAIT; + + ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD); + if (ret) + /* FIXME: should we reset here? */ + goto out; + + mutex_unlock(&dev->device_lock); + wait_event_timeout(dev->wait_pg, + dev->pg_event == MEI_PG_EVENT_RECEIVED, pgi_timeout); + mutex_lock(&dev->device_lock); + + if (dev->pg_event != MEI_PG_EVENT_RECEIVED) { + ret = -ETIME; + goto out; + } + /* end PGI entry procedure */ + + dev->pg_event = MEI_PG_EVENT_INTR_WAIT; + + reg = mei_me_d0i3_set(dev, true); + if (!(reg & H_D0I3C_CIP)) { + dev_dbg(dev->dev, "d0i3 enter wait not needed\n"); + ret = 0; + goto on; + } + + mutex_unlock(&dev->device_lock); + wait_event_timeout(dev->wait_pg, + dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, d0i3_timeout); + mutex_lock(&dev->device_lock); + + if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) { + reg = mei_me_d0i3c_read(dev); + if (!(reg & H_D0I3C_I3)) { + ret = -ETIME; + goto out; + } + } + + ret = 0; +on: + hw->pg_state = MEI_PG_ON; +out: + dev->pg_event = MEI_PG_EVENT_IDLE; + dev_dbg(dev->dev, "d0i3 enter ret = %d\n", ret); + return ret; +} + +/** + * mei_me_d0i3_enter - perform d0i3 entry procedure + * no hbm PG handshake + * no waiting for confirmation; runs with interrupts + * disabled + * + * @dev: the device structure + * + * Return: 0 on success an error code otherwise + */ +static int mei_me_d0i3_enter(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + u32 reg; + + reg = mei_me_d0i3c_read(dev); + if (reg & H_D0I3C_I3) { + /* we are in d0i3, nothing to do */ + dev_dbg(dev->dev, "already d0i3 : set not needed\n"); + goto on; + } + + mei_me_d0i3_set(dev, false); +on: + hw->pg_state = MEI_PG_ON; + dev->pg_event = MEI_PG_EVENT_IDLE; + dev_dbg(dev->dev, "d0i3 enter\n"); + return 0; +} + +/** + * mei_me_d0i3_exit_sync - perform d0i3 exit procedure + * + * @dev: the device structure + * + * Return: 0 on success an error code otherwise + */ +static int mei_me_d0i3_exit_sync(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + unsigned long timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT); + int ret; + u32 reg; + + dev->pg_event = MEI_PG_EVENT_INTR_WAIT; + + reg = mei_me_d0i3c_read(dev); + if (!(reg & H_D0I3C_I3)) { + /* we are not in d0i3, nothing to do */ + dev_dbg(dev->dev, "d0i3 exit not needed\n"); + ret = 0; + goto off; + } + + reg = mei_me_d0i3_unset(dev); + if (!(reg & H_D0I3C_CIP)) { + dev_dbg(dev->dev, "d0i3 exit wait not needed\n"); + ret = 0; + goto off; + } + + mutex_unlock(&dev->device_lock); + wait_event_timeout(dev->wait_pg, + dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout); + mutex_lock(&dev->device_lock); + + if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) { + reg = mei_me_d0i3c_read(dev); + if (reg & H_D0I3C_I3) { + ret = -ETIME; + goto out; + } + } + + ret = 0; +off: + hw->pg_state = MEI_PG_OFF; +out: + dev->pg_event = MEI_PG_EVENT_IDLE; + + dev_dbg(dev->dev, "d0i3 exit ret = %d\n", ret); + return ret; +} + +/** + * mei_me_pg_legacy_intr - perform legacy pg processing + * in interrupt thread handler + * + * @dev: the device structure + */ +static void mei_me_pg_legacy_intr(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + + if (dev->pg_event != MEI_PG_EVENT_INTR_WAIT) + return; + + dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED; + hw->pg_state = MEI_PG_OFF; + if (waitqueue_active(&dev->wait_pg)) + wake_up(&dev->wait_pg); +} + +/** + * mei_me_d0i3_intr - perform d0i3 processing in interrupt thread handler + * + * @dev: the device structure + * @intr_source: interrupt source + */ +static void mei_me_d0i3_intr(struct mei_device *dev, u32 intr_source) +{ + struct mei_me_hw *hw = to_me_hw(dev); + + if (dev->pg_event == MEI_PG_EVENT_INTR_WAIT && + (intr_source & H_D0I3C_IS)) { + dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED; + if (hw->pg_state == MEI_PG_ON) { + hw->pg_state = MEI_PG_OFF; + if (dev->hbm_state != MEI_HBM_IDLE) { + /* + * force H_RDY because it could be + * wiped off during PG + */ + dev_dbg(dev->dev, "d0i3 set host ready\n"); + mei_me_host_set_ready(dev); + } + } else { + hw->pg_state = MEI_PG_ON; + } + + wake_up(&dev->wait_pg); + } + + if (hw->pg_state == MEI_PG_ON && (intr_source & H_IS)) { + /* + * HW sent some data and we are in D0i3, so + * we got here because of HW initiated exit from D0i3. + * Start runtime pm resume sequence to exit low power state. + */ + dev_dbg(dev->dev, "d0i3 want resume\n"); + mei_hbm_pg_resume(dev); + } +} + +/** + * mei_me_pg_intr - perform pg processing in interrupt thread handler + * + * @dev: the device structure + * @intr_source: interrupt source + */ +static void mei_me_pg_intr(struct mei_device *dev, u32 intr_source) +{ + struct mei_me_hw *hw = to_me_hw(dev); + + if (hw->d0i3_supported) + mei_me_d0i3_intr(dev, intr_source); + else + mei_me_pg_legacy_intr(dev); +} + +/** + * mei_me_pg_enter_sync - perform runtime pm entry procedure + * + * @dev: the device structure + * + * Return: 0 on success an error code otherwise + */ +int mei_me_pg_enter_sync(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + + if (hw->d0i3_supported) + return mei_me_d0i3_enter_sync(dev); + else + return mei_me_pg_legacy_enter_sync(dev); +} + +/** + * mei_me_pg_exit_sync - perform runtime pm exit procedure + * + * @dev: the device structure + * + * Return: 0 on success an error code otherwise + */ +int mei_me_pg_exit_sync(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + + if (hw->d0i3_supported) + return mei_me_d0i3_exit_sync(dev); + else + return mei_me_pg_legacy_exit_sync(dev); +} + +/** + * mei_me_hw_reset - resets fw via mei csr register. + * + * @dev: the device structure + * @intr_enable: if interrupt should be enabled after reset. + * + * Return: 0 on success an error code otherwise + */ +static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable) +{ + struct mei_me_hw *hw = to_me_hw(dev); + int ret; + u32 hcsr; + + if (intr_enable) { + mei_me_intr_enable(dev); + if (hw->d0i3_supported) { + ret = mei_me_d0i3_exit_sync(dev); + if (ret) + return ret; + } + } + + pm_runtime_set_active(dev->dev); + + hcsr = mei_hcsr_read(dev); + /* H_RST may be found lit before reset is started, + * for example if preceding reset flow hasn't completed. + * In that case asserting H_RST will be ignored, therefore + * we need to clean H_RST bit to start a successful reset sequence. + */ + if ((hcsr & H_RST) == H_RST) { + dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr); + hcsr &= ~H_RST; + mei_hcsr_set(dev, hcsr); + hcsr = mei_hcsr_read(dev); + } + + hcsr |= H_RST | H_IG | H_CSR_IS_MASK; + + if (!intr_enable) + hcsr &= ~H_CSR_IE_MASK; + + dev->recvd_hw_ready = false; + mei_hcsr_write(dev, hcsr); + + /* + * Host reads the H_CSR once to ensure that the + * posted write to H_CSR completes. + */ + hcsr = mei_hcsr_read(dev); + + if ((hcsr & H_RST) == 0) + dev_warn(dev->dev, "H_RST is not set = 0x%08X", hcsr); + + if ((hcsr & H_RDY) == H_RDY) + dev_warn(dev->dev, "H_RDY is not cleared 0x%08X", hcsr); + + if (!intr_enable) { + mei_me_hw_reset_release(dev); + if (hw->d0i3_supported) { + ret = mei_me_d0i3_enter(dev); + if (ret) + return ret; + } + } + return 0; +} + +/** + * mei_me_irq_quick_handler - The ISR of the MEI device + * + * @irq: The irq number + * @dev_id: pointer to the device structure + * + * Return: irqreturn_t + */ +irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id) +{ + struct mei_device *dev = (struct mei_device *)dev_id; + u32 hcsr; + + hcsr = mei_hcsr_read(dev); + if (!me_intr_src(hcsr)) + return IRQ_NONE; + + dev_dbg(dev->dev, "interrupt source 0x%08X\n", me_intr_src(hcsr)); + + /* disable interrupts on device */ + me_intr_disable(dev, hcsr); + return IRQ_WAKE_THREAD; +} + +/** + * mei_me_irq_thread_handler - function called after ISR to handle the interrupt + * processing. + * + * @irq: The irq number + * @dev_id: pointer to the device structure + * + * Return: irqreturn_t + * + */ +irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id) +{ + struct mei_device *dev = (struct mei_device *) dev_id; + struct list_head cmpl_list; + s32 slots; + u32 hcsr; + int rets = 0; + + dev_dbg(dev->dev, "function called after ISR to handle the interrupt processing.\n"); + /* initialize our complete list */ + mutex_lock(&dev->device_lock); + + hcsr = mei_hcsr_read(dev); + me_intr_clear(dev, hcsr); + + INIT_LIST_HEAD(&cmpl_list); + + /* check if ME wants a reset */ + if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) { + dev_warn(dev->dev, "FW not ready: resetting.\n"); + schedule_work(&dev->reset_work); + goto end; + } + + if (mei_me_hw_is_resetting(dev)) + mei_hcsr_set_hig(dev); + + mei_me_pg_intr(dev, me_intr_src(hcsr)); + + /* check if we need to start the dev */ + if (!mei_host_is_ready(dev)) { + if (mei_hw_is_ready(dev)) { + dev_dbg(dev->dev, "we need to start the dev.\n"); + dev->recvd_hw_ready = true; + wake_up(&dev->wait_hw_ready); + } else { + dev_dbg(dev->dev, "Spurious Interrupt\n"); + } + goto end; + } + /* check slots available for reading */ + slots = mei_count_full_read_slots(dev); + while (slots > 0) { + dev_dbg(dev->dev, "slots to read = %08x\n", slots); + rets = mei_irq_read_handler(dev, &cmpl_list, &slots); + /* There is a race between ME write and interrupt delivery: + * Not all data is always available immediately after the + * interrupt, so try to read again on the next interrupt. + */ + if (rets == -ENODATA) + break; + + if (rets && + (dev->dev_state != MEI_DEV_RESETTING && + dev->dev_state != MEI_DEV_POWER_DOWN)) { + dev_err(dev->dev, "mei_irq_read_handler ret = %d.\n", + rets); + schedule_work(&dev->reset_work); + goto end; + } + } + + dev->hbuf_is_ready = mei_hbuf_is_ready(dev); + + /* + * During PG handshake only allowed write is the replay to the + * PG exit message, so block calling write function + * if the pg event is in PG handshake + */ + if (dev->pg_event != MEI_PG_EVENT_WAIT && + dev->pg_event != MEI_PG_EVENT_RECEIVED) { + rets = mei_irq_write_handler(dev, &cmpl_list); + dev->hbuf_is_ready = mei_hbuf_is_ready(dev); + } + + mei_irq_compl_handler(dev, &cmpl_list); + +end: + dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets); + mei_me_intr_enable(dev); + mutex_unlock(&dev->device_lock); + return IRQ_HANDLED; +} + +static const struct mei_hw_ops mei_me_hw_ops = { + + .trc_status = mei_me_trc_status, + .fw_status = mei_me_fw_status, + .pg_state = mei_me_pg_state, + + .host_is_ready = mei_me_host_is_ready, + + .hw_is_ready = mei_me_hw_is_ready, + .hw_reset = mei_me_hw_reset, + .hw_config = mei_me_hw_config, + .hw_start = mei_me_hw_start, + + .pg_in_transition = mei_me_pg_in_transition, + .pg_is_enabled = mei_me_pg_is_enabled, + + .intr_clear = mei_me_intr_clear, + .intr_enable = mei_me_intr_enable, + .intr_disable = mei_me_intr_disable, + .synchronize_irq = mei_me_synchronize_irq, + + .hbuf_free_slots = mei_me_hbuf_empty_slots, + .hbuf_is_ready = mei_me_hbuf_is_empty, + .hbuf_depth = mei_me_hbuf_depth, + + .write = mei_me_hbuf_write, + + .rdbuf_full_slots = mei_me_count_full_read_slots, + .read_hdr = mei_me_mecbrw_read, + .read = mei_me_read_slots +}; + +/** + * mei_me_fw_type_nm() - check for nm sku + * + * Read ME FW Status register to check for the Node Manager (NM) Firmware. + * The NM FW is only signaled in PCI function 0. + * __Note__: Deprecated by PCH8 and newer. + * + * @pdev: pci device + * + * Return: true in case of NM firmware + */ +static bool mei_me_fw_type_nm(const struct pci_dev *pdev) +{ + u32 reg; + unsigned int devfn; + + devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0); + pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_2, ®); + trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_2", PCI_CFG_HFS_2, reg); + /* make sure that bit 9 (NM) is up and bit 10 (DM) is down */ + return (reg & 0x600) == 0x200; +} + +#define MEI_CFG_FW_NM \ + .quirk_probe = mei_me_fw_type_nm + +/** + * mei_me_fw_sku_sps_4() - check for sps 4.0 sku + * + * Read ME FW Status register to check for SPS Firmware. + * The SPS FW is only signaled in the PCI function 0. + * __Note__: Deprecated by SPS 5.0 and newer. + * + * @pdev: pci device + * + * Return: true in case of SPS firmware + */ +static bool mei_me_fw_type_sps_4(const struct pci_dev *pdev) +{ + u32 reg; + unsigned int devfn; + + devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0); + pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_1, ®); + trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg); + return (reg & PCI_CFG_HFS_1_OPMODE_MSK) == PCI_CFG_HFS_1_OPMODE_SPS; +} + +#define MEI_CFG_FW_SPS_4 \ + .quirk_probe = mei_me_fw_type_sps_4 + +/** + * mei_me_fw_sku_sps() - check for sps sku + * + * Read ME FW Status register to check for SPS Firmware. + * The SPS FW is only signaled in pci function 0 + * + * @pdev: pci device + * + * Return: true in case of SPS firmware + */ +static bool mei_me_fw_type_sps(const struct pci_dev *pdev) +{ + u32 reg; + u32 fw_type; + unsigned int devfn; + + devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0); + pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_3, ®); + trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_3", PCI_CFG_HFS_3, reg); + fw_type = (reg & PCI_CFG_HFS_3_FW_SKU_MSK); + + dev_dbg(&pdev->dev, "fw type is %d\n", fw_type); + + return fw_type == PCI_CFG_HFS_3_FW_SKU_SPS; +} + +#define MEI_CFG_KIND_ITOUCH \ + .kind = "itouch" + +#define MEI_CFG_FW_SPS \ + .quirk_probe = mei_me_fw_type_sps + +#define MEI_CFG_FW_VER_SUPP \ + .fw_ver_supported = 1 + +#define MEI_CFG_ICH_HFS \ + .fw_status.count = 0 + +#define MEI_CFG_ICH10_HFS \ + .fw_status.count = 1, \ + .fw_status.status[0] = PCI_CFG_HFS_1 + +#define MEI_CFG_PCH_HFS \ + .fw_status.count = 2, \ + .fw_status.status[0] = PCI_CFG_HFS_1, \ + .fw_status.status[1] = PCI_CFG_HFS_2 + +#define MEI_CFG_PCH8_HFS \ + .fw_status.count = 6, \ + .fw_status.status[0] = PCI_CFG_HFS_1, \ + .fw_status.status[1] = PCI_CFG_HFS_2, \ + .fw_status.status[2] = PCI_CFG_HFS_3, \ + .fw_status.status[3] = PCI_CFG_HFS_4, \ + .fw_status.status[4] = PCI_CFG_HFS_5, \ + .fw_status.status[5] = PCI_CFG_HFS_6 + +#define MEI_CFG_DMA_128 \ + .dma_size[DMA_DSCR_HOST] = SZ_128K, \ + .dma_size[DMA_DSCR_DEVICE] = SZ_128K, \ + .dma_size[DMA_DSCR_CTRL] = PAGE_SIZE + +#define MEI_CFG_TRC \ + .hw_trc_supported = 1 + +/* ICH Legacy devices */ +static const struct mei_cfg mei_me_ich_cfg = { + MEI_CFG_ICH_HFS, +}; + +/* ICH devices */ +static const struct mei_cfg mei_me_ich10_cfg = { + MEI_CFG_ICH10_HFS, +}; + +/* PCH6 devices */ +static const struct mei_cfg mei_me_pch6_cfg = { + MEI_CFG_PCH_HFS, +}; + +/* PCH7 devices */ +static const struct mei_cfg mei_me_pch7_cfg = { + MEI_CFG_PCH_HFS, + MEI_CFG_FW_VER_SUPP, +}; + +/* PCH Cougar Point and Patsburg with quirk for Node Manager exclusion */ +static const struct mei_cfg mei_me_pch_cpt_pbg_cfg = { + MEI_CFG_PCH_HFS, + MEI_CFG_FW_VER_SUPP, + MEI_CFG_FW_NM, +}; + +/* PCH8 Lynx Point and newer devices */ +static const struct mei_cfg mei_me_pch8_cfg = { + MEI_CFG_PCH8_HFS, + MEI_CFG_FW_VER_SUPP, +}; + +/* PCH8 Lynx Point and newer devices - iTouch */ +static const struct mei_cfg mei_me_pch8_itouch_cfg = { + MEI_CFG_KIND_ITOUCH, + MEI_CFG_PCH8_HFS, + MEI_CFG_FW_VER_SUPP, +}; + +/* PCH8 Lynx Point with quirk for SPS Firmware exclusion */ +static const struct mei_cfg mei_me_pch8_sps_4_cfg = { + MEI_CFG_PCH8_HFS, + MEI_CFG_FW_VER_SUPP, + MEI_CFG_FW_SPS_4, +}; + +/* LBG with quirk for SPS (4.0) Firmware exclusion */ +static const struct mei_cfg mei_me_pch12_sps_4_cfg = { + MEI_CFG_PCH8_HFS, + MEI_CFG_FW_VER_SUPP, + MEI_CFG_FW_SPS_4, +}; + +/* Cannon Lake and newer devices */ +static const struct mei_cfg mei_me_pch12_cfg = { + MEI_CFG_PCH8_HFS, + MEI_CFG_FW_VER_SUPP, + MEI_CFG_DMA_128, +}; + +/* Cannon Lake with quirk for SPS 5.0 and newer Firmware exclusion */ +static const struct mei_cfg mei_me_pch12_sps_cfg = { + MEI_CFG_PCH8_HFS, + MEI_CFG_FW_VER_SUPP, + MEI_CFG_DMA_128, + MEI_CFG_FW_SPS, +}; + +/* Cannon Lake itouch with quirk for SPS 5.0 and newer Firmware exclusion + * w/o DMA support. + */ +static const struct mei_cfg mei_me_pch12_itouch_sps_cfg = { + MEI_CFG_KIND_ITOUCH, + MEI_CFG_PCH8_HFS, + MEI_CFG_FW_VER_SUPP, + MEI_CFG_FW_SPS, +}; + +/* Tiger Lake and newer devices */ +static const struct mei_cfg mei_me_pch15_cfg = { + MEI_CFG_PCH8_HFS, + MEI_CFG_FW_VER_SUPP, + MEI_CFG_DMA_128, + MEI_CFG_TRC, +}; + +/* Tiger Lake with quirk for SPS 5.0 and newer Firmware exclusion */ +static const struct mei_cfg mei_me_pch15_sps_cfg = { + MEI_CFG_PCH8_HFS, + MEI_CFG_FW_VER_SUPP, + MEI_CFG_DMA_128, + MEI_CFG_TRC, + MEI_CFG_FW_SPS, +}; + +/* + * mei_cfg_list - A list of platform platform specific configurations. + * Note: has to be synchronized with enum mei_cfg_idx. + */ +static const struct mei_cfg *const mei_cfg_list[] = { + [MEI_ME_UNDEF_CFG] = NULL, + [MEI_ME_ICH_CFG] = &mei_me_ich_cfg, + [MEI_ME_ICH10_CFG] = &mei_me_ich10_cfg, + [MEI_ME_PCH6_CFG] = &mei_me_pch6_cfg, + [MEI_ME_PCH7_CFG] = &mei_me_pch7_cfg, + [MEI_ME_PCH_CPT_PBG_CFG] = &mei_me_pch_cpt_pbg_cfg, + [MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg, + [MEI_ME_PCH8_ITOUCH_CFG] = &mei_me_pch8_itouch_cfg, + [MEI_ME_PCH8_SPS_4_CFG] = &mei_me_pch8_sps_4_cfg, + [MEI_ME_PCH12_CFG] = &mei_me_pch12_cfg, + [MEI_ME_PCH12_SPS_4_CFG] = &mei_me_pch12_sps_4_cfg, + [MEI_ME_PCH12_SPS_CFG] = &mei_me_pch12_sps_cfg, + [MEI_ME_PCH12_SPS_ITOUCH_CFG] = &mei_me_pch12_itouch_sps_cfg, + [MEI_ME_PCH15_CFG] = &mei_me_pch15_cfg, + [MEI_ME_PCH15_SPS_CFG] = &mei_me_pch15_sps_cfg, +}; + +const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx) +{ + BUILD_BUG_ON(ARRAY_SIZE(mei_cfg_list) != MEI_ME_NUM_CFG); + + if (idx >= MEI_ME_NUM_CFG) + return NULL; + + return mei_cfg_list[idx]; +}; + +/** + * mei_me_dev_init - allocates and initializes the mei device structure + * + * @parent: device associated with physical device (pci/platform) + * @cfg: per device generation config + * + * Return: The mei_device pointer on success, NULL on failure. + */ +struct mei_device *mei_me_dev_init(struct device *parent, + const struct mei_cfg *cfg) +{ + struct mei_device *dev; + struct mei_me_hw *hw; + int i; + + dev = devm_kzalloc(parent, sizeof(*dev) + sizeof(*hw), GFP_KERNEL); + if (!dev) + return NULL; + + hw = to_me_hw(dev); + + for (i = 0; i < DMA_DSCR_NUM; i++) + dev->dr_dscr[i].size = cfg->dma_size[i]; + + mei_device_init(dev, parent, &mei_me_hw_ops); + hw->cfg = cfg; + + dev->fw_f_fw_ver_supported = cfg->fw_ver_supported; + + dev->kind = cfg->kind; + + return dev; +} + diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h new file mode 100644 index 000000000..00a7132ac --- /dev/null +++ b/drivers/misc/mei/hw-me.h @@ -0,0 +1,129 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2012-2020, Intel Corporation. All rights reserved. + * Intel Management Engine Interface (Intel MEI) Linux driver + */ + +#ifndef _MEI_INTERFACE_H_ +#define _MEI_INTERFACE_H_ + +#include <linux/irqreturn.h> +#include <linux/pci.h> +#include <linux/mei.h> + +#include "mei_dev.h" +#include "client.h" + +/* + * mei_cfg - mei device configuration + * + * @fw_status: FW status + * @quirk_probe: device exclusion quirk + * @kind: MEI head kind + * @dma_size: device DMA buffers size + * @fw_ver_supported: is fw version retrievable from FW + * @hw_trc_supported: does the hw support trc register + */ +struct mei_cfg { + const struct mei_fw_status fw_status; + bool (*quirk_probe)(const struct pci_dev *pdev); + const char *kind; + size_t dma_size[DMA_DSCR_NUM]; + u32 fw_ver_supported:1; + u32 hw_trc_supported:1; +}; + + +#define MEI_PCI_DEVICE(dev, cfg) \ + .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \ + .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, \ + .driver_data = (kernel_ulong_t)(cfg), + +#define MEI_ME_RPM_TIMEOUT 500 /* ms */ + +/** + * struct mei_me_hw - me hw specific data + * + * @cfg: per device generation config and ops + * @mem_addr: io memory address + * @irq: irq number + * @pg_state: power gating state + * @d0i3_supported: di03 support + * @hbuf_depth: depth of hardware host/write buffer in slots + * @read_fws: read FW status register handler + */ +struct mei_me_hw { + const struct mei_cfg *cfg; + void __iomem *mem_addr; + int irq; + enum mei_pg_state pg_state; + bool d0i3_supported; + u8 hbuf_depth; + int (*read_fws)(const struct mei_device *dev, int where, u32 *val); +}; + +#define to_me_hw(dev) (struct mei_me_hw *)((dev)->hw) + +/** + * enum mei_cfg_idx - indices to platform specific configurations. + * + * Note: has to be synchronized with mei_cfg_list[] + * + * @MEI_ME_UNDEF_CFG: Lower sentinel. + * @MEI_ME_ICH_CFG: I/O Controller Hub legacy devices. + * @MEI_ME_ICH10_CFG: I/O Controller Hub platforms Gen10 + * @MEI_ME_PCH6_CFG: Platform Controller Hub platforms (Gen6). + * @MEI_ME_PCH7_CFG: Platform Controller Hub platforms (Gen7). + * @MEI_ME_PCH_CPT_PBG_CFG:Platform Controller Hub workstations + * with quirk for Node Manager exclusion. + * @MEI_ME_PCH8_CFG: Platform Controller Hub Gen8 and newer + * client platforms. + * @MEI_ME_PCH8_ITOUCH_CFG:Platform Controller Hub Gen8 and newer + * client platforms (iTouch). + * @MEI_ME_PCH8_SPS_4_CFG: Platform Controller Hub Gen8 and newer + * servers platforms with quirk for + * SPS firmware exclusion. + * @MEI_ME_PCH12_CFG: Platform Controller Hub Gen12 and newer + * @MEI_ME_PCH12_SPS_4_CFG:Platform Controller Hub Gen12 up to 4.0 + * servers platforms with quirk for + * SPS firmware exclusion. + * @MEI_ME_PCH12_SPS_CFG: Platform Controller Hub Gen12 5.0 and newer + * servers platforms with quirk for + * SPS firmware exclusion. + * @MEI_ME_PCH15_CFG: Platform Controller Hub Gen15 and newer + * @MEI_ME_PCH15_SPS_CFG: Platform Controller Hub Gen15 and newer + * servers platforms with quirk for + * SPS firmware exclusion. + * @MEI_ME_NUM_CFG: Upper Sentinel. + */ +enum mei_cfg_idx { + MEI_ME_UNDEF_CFG, + MEI_ME_ICH_CFG, + MEI_ME_ICH10_CFG, + MEI_ME_PCH6_CFG, + MEI_ME_PCH7_CFG, + MEI_ME_PCH_CPT_PBG_CFG, + MEI_ME_PCH8_CFG, + MEI_ME_PCH8_ITOUCH_CFG, + MEI_ME_PCH8_SPS_4_CFG, + MEI_ME_PCH12_CFG, + MEI_ME_PCH12_SPS_4_CFG, + MEI_ME_PCH12_SPS_CFG, + MEI_ME_PCH12_SPS_ITOUCH_CFG, + MEI_ME_PCH15_CFG, + MEI_ME_PCH15_SPS_CFG, + MEI_ME_NUM_CFG, +}; + +const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx); + +struct mei_device *mei_me_dev_init(struct device *parent, + const struct mei_cfg *cfg); + +int mei_me_pg_enter_sync(struct mei_device *dev); +int mei_me_pg_exit_sync(struct mei_device *dev); + +irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id); +irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id); + +#endif /* _MEI_INTERFACE_H_ */ diff --git a/drivers/misc/mei/hw-txe-regs.h b/drivers/misc/mei/hw-txe-regs.h new file mode 100644 index 000000000..a92b306da --- /dev/null +++ b/drivers/misc/mei/hw-txe-regs.h @@ -0,0 +1,239 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (c) 2013-2014, Intel Corporation. All rights reserved. + * Intel Management Engine Interface (Intel MEI) Linux driver + */ +#ifndef _MEI_HW_TXE_REGS_H_ +#define _MEI_HW_TXE_REGS_H_ + +#include "hw.h" + +#define SEC_ALIVENESS_TIMER_TIMEOUT (5 * MSEC_PER_SEC) +#define SEC_ALIVENESS_WAIT_TIMEOUT (1 * MSEC_PER_SEC) +#define SEC_RESET_WAIT_TIMEOUT (1 * MSEC_PER_SEC) +#define SEC_READY_WAIT_TIMEOUT (5 * MSEC_PER_SEC) +#define START_MESSAGE_RESPONSE_WAIT_TIMEOUT (5 * MSEC_PER_SEC) +#define RESET_CANCEL_WAIT_TIMEOUT (1 * MSEC_PER_SEC) + +enum { + SEC_BAR, + BRIDGE_BAR, + + NUM_OF_MEM_BARS +}; + +/* SeC FW Status Register + * + * FW uses this register in order to report its status to host. + * This register resides in PCI-E config space. + */ +#define PCI_CFG_TXE_FW_STS0 0x40 +# define PCI_CFG_TXE_FW_STS0_WRK_ST_MSK 0x0000000F +# define PCI_CFG_TXE_FW_STS0_OP_ST_MSK 0x000001C0 +# define PCI_CFG_TXE_FW_STS0_FW_INIT_CMPLT 0x00000200 +# define PCI_CFG_TXE_FW_STS0_ERR_CODE_MSK 0x0000F000 +# define PCI_CFG_TXE_FW_STS0_OP_MODE_MSK 0x000F0000 +# define PCI_CFG_TXE_FW_STS0_RST_CNT_MSK 0x00F00000 +#define PCI_CFG_TXE_FW_STS1 0x48 + +#define IPC_BASE_ADDR 0x80400 /* SeC IPC Base Address */ + +/* IPC Input Doorbell Register */ +#define SEC_IPC_INPUT_DOORBELL_REG (0x0000 + IPC_BASE_ADDR) + +/* IPC Input Status Register + * This register indicates whether or not processing of + * the most recent command has been completed by the SEC + * New commands and payloads should not be written by the Host + * until this indicates that the previous command has been processed. + */ +#define SEC_IPC_INPUT_STATUS_REG (0x0008 + IPC_BASE_ADDR) +# define SEC_IPC_INPUT_STATUS_RDY BIT(0) + +/* IPC Host Interrupt Status Register */ +#define SEC_IPC_HOST_INT_STATUS_REG (0x0010 + IPC_BASE_ADDR) +#define SEC_IPC_HOST_INT_STATUS_OUT_DB BIT(0) +#define SEC_IPC_HOST_INT_STATUS_IN_RDY BIT(1) +#define SEC_IPC_HOST_INT_STATUS_HDCP_M0_RCVD BIT(5) +#define SEC_IPC_HOST_INT_STATUS_ILL_MEM_ACCESS BIT(17) +#define SEC_IPC_HOST_INT_STATUS_AES_HKEY_ERR BIT(18) +#define SEC_IPC_HOST_INT_STATUS_DES_HKEY_ERR BIT(19) +#define SEC_IPC_HOST_INT_STATUS_TMRMTB_OVERFLOW BIT(21) + +/* Convenient mask for pending interrupts */ +#define SEC_IPC_HOST_INT_STATUS_PENDING \ + (SEC_IPC_HOST_INT_STATUS_OUT_DB| \ + SEC_IPC_HOST_INT_STATUS_IN_RDY) + +/* IPC Host Interrupt Mask Register */ +#define SEC_IPC_HOST_INT_MASK_REG (0x0014 + IPC_BASE_ADDR) + +# define SEC_IPC_HOST_INT_MASK_OUT_DB BIT(0) /* Output Doorbell Int Mask */ +# define SEC_IPC_HOST_INT_MASK_IN_RDY BIT(1) /* Input Ready Int Mask */ + +/* IPC Input Payload RAM */ +#define SEC_IPC_INPUT_PAYLOAD_REG (0x0100 + IPC_BASE_ADDR) +/* IPC Shared Payload RAM */ +#define IPC_SHARED_PAYLOAD_REG (0x0200 + IPC_BASE_ADDR) + +/* SeC Address Translation Table Entry 2 - Ctrl + * + * This register resides also in SeC's PCI-E Memory space. + */ +#define SATT2_CTRL_REG 0x1040 +# define SATT2_CTRL_VALID_MSK BIT(0) +# define SATT2_CTRL_BR_BASE_ADDR_REG_SHIFT 8 +# define SATT2_CTRL_BRIDGE_HOST_EN_MSK BIT(12) + +/* SATT Table Entry 2 SAP Base Address Register */ +#define SATT2_SAP_BA_REG 0x1044 +/* SATT Table Entry 2 SAP Size Register. */ +#define SATT2_SAP_SIZE_REG 0x1048 + /* SATT Table Entry 2 SAP Bridge Address - LSB Register */ +#define SATT2_BRG_BA_LSB_REG 0x104C + +/* Host High-level Interrupt Status Register */ +#define HHISR_REG 0x2020 +/* Host High-level Interrupt Enable Register + * + * Resides in PCI memory space. This is the top hierarchy for + * interrupts from SeC to host, aggregating both interrupts that + * arrive through HICR registers as well as interrupts + * that arrive via IPC. + */ +#define HHIER_REG 0x2024 +#define IPC_HHIER_SEC BIT(0) +#define IPC_HHIER_BRIDGE BIT(1) +#define IPC_HHIER_MSK (IPC_HHIER_SEC | IPC_HHIER_BRIDGE) + +/* Host High-level Interrupt Mask Register. + * + * Resides in PCI memory space. + * This is the top hierarchy for masking interrupts from SeC to host. + */ +#define HHIMR_REG 0x2028 +#define IPC_HHIMR_SEC BIT(0) +#define IPC_HHIMR_BRIDGE BIT(1) + +/* Host High-level IRQ Status Register */ +#define HHIRQSR_REG 0x202C + +/* Host Interrupt Cause Register 0 - SeC IPC Readiness + * + * This register is both an ICR to Host from PCI Memory Space + * and it is also exposed in the SeC memory space. + * This register is used by SeC's IPC driver in order + * to synchronize with host about IPC interface state. + */ +#define HICR_SEC_IPC_READINESS_REG 0x2040 +#define HICR_SEC_IPC_READINESS_HOST_RDY BIT(0) +#define HICR_SEC_IPC_READINESS_SEC_RDY BIT(1) +#define HICR_SEC_IPC_READINESS_SYS_RDY \ + (HICR_SEC_IPC_READINESS_HOST_RDY | \ + HICR_SEC_IPC_READINESS_SEC_RDY) +#define HICR_SEC_IPC_READINESS_RDY_CLR BIT(2) + +/* Host Interrupt Cause Register 1 - Aliveness Response */ +/* This register is both an ICR to Host from PCI Memory Space + * and it is also exposed in the SeC memory space. + * The register may be used by SeC to ACK a host request for aliveness. + */ +#define HICR_HOST_ALIVENESS_RESP_REG 0x2044 +#define HICR_HOST_ALIVENESS_RESP_ACK BIT(0) + +/* Host Interrupt Cause Register 2 - SeC IPC Output Doorbell */ +#define HICR_SEC_IPC_OUTPUT_DOORBELL_REG 0x2048 + +/* Host Interrupt Status Register. + * + * Resides in PCI memory space. + * This is the main register involved in generating interrupts + * from SeC to host via HICRs. + * The interrupt generation rules are as follows: + * An interrupt will be generated whenever for any i, + * there is a transition from a state where at least one of + * the following conditions did not hold, to a state where + * ALL the following conditions hold: + * A) HISR.INT[i]_STS == 1. + * B) HIER.INT[i]_EN == 1. + */ +#define HISR_REG 0x2060 +#define HISR_INT_0_STS BIT(0) +#define HISR_INT_1_STS BIT(1) +#define HISR_INT_2_STS BIT(2) +#define HISR_INT_3_STS BIT(3) +#define HISR_INT_4_STS BIT(4) +#define HISR_INT_5_STS BIT(5) +#define HISR_INT_6_STS BIT(6) +#define HISR_INT_7_STS BIT(7) +#define HISR_INT_STS_MSK \ + (HISR_INT_0_STS | HISR_INT_1_STS | HISR_INT_2_STS) + +/* Host Interrupt Enable Register. Resides in PCI memory space. */ +#define HIER_REG 0x2064 +#define HIER_INT_0_EN BIT(0) +#define HIER_INT_1_EN BIT(1) +#define HIER_INT_2_EN BIT(2) +#define HIER_INT_3_EN BIT(3) +#define HIER_INT_4_EN BIT(4) +#define HIER_INT_5_EN BIT(5) +#define HIER_INT_6_EN BIT(6) +#define HIER_INT_7_EN BIT(7) + +#define HIER_INT_EN_MSK \ + (HIER_INT_0_EN | HIER_INT_1_EN | HIER_INT_2_EN) + + +/* SEC Memory Space IPC output payload. + * + * This register is part of the output payload which SEC provides to host. + */ +#define BRIDGE_IPC_OUTPUT_PAYLOAD_REG 0x20C0 + +/* SeC Interrupt Cause Register - Host Aliveness Request + * This register is both an ICR to SeC and it is also exposed + * in the host-visible PCI memory space. + * The register is used by host to request SeC aliveness. + */ +#define SICR_HOST_ALIVENESS_REQ_REG 0x214C +#define SICR_HOST_ALIVENESS_REQ_REQUESTED BIT(0) + + +/* SeC Interrupt Cause Register - Host IPC Readiness + * + * This register is both an ICR to SeC and it is also exposed + * in the host-visible PCI memory space. + * This register is used by the host's SeC driver uses in order + * to synchronize with SeC about IPC interface state. + */ +#define SICR_HOST_IPC_READINESS_REQ_REG 0x2150 + + +#define SICR_HOST_IPC_READINESS_HOST_RDY BIT(0) +#define SICR_HOST_IPC_READINESS_SEC_RDY BIT(1) +#define SICR_HOST_IPC_READINESS_SYS_RDY \ + (SICR_HOST_IPC_READINESS_HOST_RDY | \ + SICR_HOST_IPC_READINESS_SEC_RDY) +#define SICR_HOST_IPC_READINESS_RDY_CLR BIT(2) + +/* SeC Interrupt Cause Register - SeC IPC Output Status + * + * This register indicates whether or not processing of the most recent + * command has been completed by the Host. + * New commands and payloads should not be written by SeC until this + * register indicates that the previous command has been processed. + */ +#define SICR_SEC_IPC_OUTPUT_STATUS_REG 0x2154 +# define SEC_IPC_OUTPUT_STATUS_RDY BIT(0) + + + +/* MEI IPC Message payload size 64 bytes */ +#define PAYLOAD_SIZE 64 + +/* MAX size for SATT range 32MB */ +#define SATT_RANGE_MAX (32 << 20) + + +#endif /* _MEI_HW_TXE_REGS_H_ */ + diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c new file mode 100644 index 000000000..a4e854b9b --- /dev/null +++ b/drivers/misc/mei/hw-txe.c @@ -0,0 +1,1260 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2013-2020, Intel Corporation. All rights reserved. + * Intel Management Engine Interface (Intel MEI) Linux driver + */ + +#include <linux/pci.h> +#include <linux/jiffies.h> +#include <linux/ktime.h> +#include <linux/delay.h> +#include <linux/kthread.h> +#include <linux/interrupt.h> +#include <linux/pm_runtime.h> + +#include <linux/mei.h> + +#include "mei_dev.h" +#include "hw-txe.h" +#include "client.h" +#include "hbm.h" + +#include "mei-trace.h" + +#define TXE_HBUF_DEPTH (PAYLOAD_SIZE / MEI_SLOT_SIZE) + +/** + * mei_txe_reg_read - Reads 32bit data from the txe device + * + * @base_addr: registers base address + * @offset: register offset + * + * Return: register value + */ +static inline u32 mei_txe_reg_read(void __iomem *base_addr, + unsigned long offset) +{ + return ioread32(base_addr + offset); +} + +/** + * mei_txe_reg_write - Writes 32bit data to the txe device + * + * @base_addr: registers base address + * @offset: register offset + * @value: the value to write + */ +static inline void mei_txe_reg_write(void __iomem *base_addr, + unsigned long offset, u32 value) +{ + iowrite32(value, base_addr + offset); +} + +/** + * mei_txe_sec_reg_read_silent - Reads 32bit data from the SeC BAR + * + * @hw: the txe hardware structure + * @offset: register offset + * + * Doesn't check for aliveness while Reads 32bit data from the SeC BAR + * + * Return: register value + */ +static inline u32 mei_txe_sec_reg_read_silent(struct mei_txe_hw *hw, + unsigned long offset) +{ + return mei_txe_reg_read(hw->mem_addr[SEC_BAR], offset); +} + +/** + * mei_txe_sec_reg_read - Reads 32bit data from the SeC BAR + * + * @hw: the txe hardware structure + * @offset: register offset + * + * Reads 32bit data from the SeC BAR and shout loud if aliveness is not set + * + * Return: register value + */ +static inline u32 mei_txe_sec_reg_read(struct mei_txe_hw *hw, + unsigned long offset) +{ + WARN(!hw->aliveness, "sec read: aliveness not asserted\n"); + return mei_txe_sec_reg_read_silent(hw, offset); +} +/** + * mei_txe_sec_reg_write_silent - Writes 32bit data to the SeC BAR + * doesn't check for aliveness + * + * @hw: the txe hardware structure + * @offset: register offset + * @value: value to write + * + * Doesn't check for aliveness while writes 32bit data from to the SeC BAR + */ +static inline void mei_txe_sec_reg_write_silent(struct mei_txe_hw *hw, + unsigned long offset, u32 value) +{ + mei_txe_reg_write(hw->mem_addr[SEC_BAR], offset, value); +} + +/** + * mei_txe_sec_reg_write - Writes 32bit data to the SeC BAR + * + * @hw: the txe hardware structure + * @offset: register offset + * @value: value to write + * + * Writes 32bit data from the SeC BAR and shout loud if aliveness is not set + */ +static inline void mei_txe_sec_reg_write(struct mei_txe_hw *hw, + unsigned long offset, u32 value) +{ + WARN(!hw->aliveness, "sec write: aliveness not asserted\n"); + mei_txe_sec_reg_write_silent(hw, offset, value); +} +/** + * mei_txe_br_reg_read - Reads 32bit data from the Bridge BAR + * + * @hw: the txe hardware structure + * @offset: offset from which to read the data + * + * Return: the byte read. + */ +static inline u32 mei_txe_br_reg_read(struct mei_txe_hw *hw, + unsigned long offset) +{ + return mei_txe_reg_read(hw->mem_addr[BRIDGE_BAR], offset); +} + +/** + * mei_txe_br_reg_write - Writes 32bit data to the Bridge BAR + * + * @hw: the txe hardware structure + * @offset: offset from which to write the data + * @value: the byte to write + */ +static inline void mei_txe_br_reg_write(struct mei_txe_hw *hw, + unsigned long offset, u32 value) +{ + mei_txe_reg_write(hw->mem_addr[BRIDGE_BAR], offset, value); +} + +/** + * mei_txe_aliveness_set - request for aliveness change + * + * @dev: the device structure + * @req: requested aliveness value + * + * Request for aliveness change and returns true if the change is + * really needed and false if aliveness is already + * in the requested state + * + * Locking: called under "dev->device_lock" lock + * + * Return: true if request was send + */ +static bool mei_txe_aliveness_set(struct mei_device *dev, u32 req) +{ + + struct mei_txe_hw *hw = to_txe_hw(dev); + bool do_req = hw->aliveness != req; + + dev_dbg(dev->dev, "Aliveness current=%d request=%d\n", + hw->aliveness, req); + if (do_req) { + dev->pg_event = MEI_PG_EVENT_WAIT; + mei_txe_br_reg_write(hw, SICR_HOST_ALIVENESS_REQ_REG, req); + } + return do_req; +} + + +/** + * mei_txe_aliveness_req_get - get aliveness requested register value + * + * @dev: the device structure + * + * Extract HICR_HOST_ALIVENESS_RESP_ACK bit from + * from HICR_HOST_ALIVENESS_REQ register value + * + * Return: SICR_HOST_ALIVENESS_REQ_REQUESTED bit value + */ +static u32 mei_txe_aliveness_req_get(struct mei_device *dev) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + u32 reg; + + reg = mei_txe_br_reg_read(hw, SICR_HOST_ALIVENESS_REQ_REG); + return reg & SICR_HOST_ALIVENESS_REQ_REQUESTED; +} + +/** + * mei_txe_aliveness_get - get aliveness response register value + * + * @dev: the device structure + * + * Return: HICR_HOST_ALIVENESS_RESP_ACK bit from HICR_HOST_ALIVENESS_RESP + * register + */ +static u32 mei_txe_aliveness_get(struct mei_device *dev) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + u32 reg; + + reg = mei_txe_br_reg_read(hw, HICR_HOST_ALIVENESS_RESP_REG); + return reg & HICR_HOST_ALIVENESS_RESP_ACK; +} + +/** + * mei_txe_aliveness_poll - waits for aliveness to settle + * + * @dev: the device structure + * @expected: expected aliveness value + * + * Polls for HICR_HOST_ALIVENESS_RESP.ALIVENESS_RESP to be set + * + * Return: 0 if the expected value was received, -ETIME otherwise + */ +static int mei_txe_aliveness_poll(struct mei_device *dev, u32 expected) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + ktime_t stop, start; + + start = ktime_get(); + stop = ktime_add(start, ms_to_ktime(SEC_ALIVENESS_WAIT_TIMEOUT)); + do { + hw->aliveness = mei_txe_aliveness_get(dev); + if (hw->aliveness == expected) { + dev->pg_event = MEI_PG_EVENT_IDLE; + dev_dbg(dev->dev, "aliveness settled after %lld usecs\n", + ktime_to_us(ktime_sub(ktime_get(), start))); + return 0; + } + usleep_range(20, 50); + } while (ktime_before(ktime_get(), stop)); + + dev->pg_event = MEI_PG_EVENT_IDLE; + dev_err(dev->dev, "aliveness timed out\n"); + return -ETIME; +} + +/** + * mei_txe_aliveness_wait - waits for aliveness to settle + * + * @dev: the device structure + * @expected: expected aliveness value + * + * Waits for HICR_HOST_ALIVENESS_RESP.ALIVENESS_RESP to be set + * + * Return: 0 on success and < 0 otherwise + */ +static int mei_txe_aliveness_wait(struct mei_device *dev, u32 expected) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + const unsigned long timeout = + msecs_to_jiffies(SEC_ALIVENESS_WAIT_TIMEOUT); + long err; + int ret; + + hw->aliveness = mei_txe_aliveness_get(dev); + if (hw->aliveness == expected) + return 0; + + mutex_unlock(&dev->device_lock); + err = wait_event_timeout(hw->wait_aliveness_resp, + dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout); + mutex_lock(&dev->device_lock); + + hw->aliveness = mei_txe_aliveness_get(dev); + ret = hw->aliveness == expected ? 0 : -ETIME; + + if (ret) + dev_warn(dev->dev, "aliveness timed out = %ld aliveness = %d event = %d\n", + err, hw->aliveness, dev->pg_event); + else + dev_dbg(dev->dev, "aliveness settled after = %d msec aliveness = %d event = %d\n", + jiffies_to_msecs(timeout - err), + hw->aliveness, dev->pg_event); + + dev->pg_event = MEI_PG_EVENT_IDLE; + return ret; +} + +/** + * mei_txe_aliveness_set_sync - sets an wait for aliveness to complete + * + * @dev: the device structure + * @req: requested aliveness value + * + * Return: 0 on success and < 0 otherwise + */ +int mei_txe_aliveness_set_sync(struct mei_device *dev, u32 req) +{ + if (mei_txe_aliveness_set(dev, req)) + return mei_txe_aliveness_wait(dev, req); + return 0; +} + +/** + * mei_txe_pg_in_transition - is device now in pg transition + * + * @dev: the device structure + * + * Return: true if in pg transition, false otherwise + */ +static bool mei_txe_pg_in_transition(struct mei_device *dev) +{ + return dev->pg_event == MEI_PG_EVENT_WAIT; +} + +/** + * mei_txe_pg_is_enabled - detect if PG is supported by HW + * + * @dev: the device structure + * + * Return: true is pg supported, false otherwise + */ +static bool mei_txe_pg_is_enabled(struct mei_device *dev) +{ + return true; +} + +/** + * mei_txe_pg_state - translate aliveness register value + * to the mei power gating state + * + * @dev: the device structure + * + * Return: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise + */ +static inline enum mei_pg_state mei_txe_pg_state(struct mei_device *dev) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + + return hw->aliveness ? MEI_PG_OFF : MEI_PG_ON; +} + +/** + * mei_txe_input_ready_interrupt_enable - sets the Input Ready Interrupt + * + * @dev: the device structure + */ +static void mei_txe_input_ready_interrupt_enable(struct mei_device *dev) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + u32 hintmsk; + /* Enable the SEC_IPC_HOST_INT_MASK_IN_RDY interrupt */ + hintmsk = mei_txe_sec_reg_read(hw, SEC_IPC_HOST_INT_MASK_REG); + hintmsk |= SEC_IPC_HOST_INT_MASK_IN_RDY; + mei_txe_sec_reg_write(hw, SEC_IPC_HOST_INT_MASK_REG, hintmsk); +} + +/** + * mei_txe_input_doorbell_set - sets bit 0 in + * SEC_IPC_INPUT_DOORBELL.IPC_INPUT_DOORBELL. + * + * @hw: the txe hardware structure + */ +static void mei_txe_input_doorbell_set(struct mei_txe_hw *hw) +{ + /* Clear the interrupt cause */ + clear_bit(TXE_INTR_IN_READY_BIT, &hw->intr_cause); + mei_txe_sec_reg_write(hw, SEC_IPC_INPUT_DOORBELL_REG, 1); +} + +/** + * mei_txe_output_ready_set - Sets the SICR_SEC_IPC_OUTPUT_STATUS bit to 1 + * + * @hw: the txe hardware structure + */ +static void mei_txe_output_ready_set(struct mei_txe_hw *hw) +{ + mei_txe_br_reg_write(hw, + SICR_SEC_IPC_OUTPUT_STATUS_REG, + SEC_IPC_OUTPUT_STATUS_RDY); +} + +/** + * mei_txe_is_input_ready - check if TXE is ready for receiving data + * + * @dev: the device structure + * + * Return: true if INPUT STATUS READY bit is set + */ +static bool mei_txe_is_input_ready(struct mei_device *dev) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + u32 status; + + status = mei_txe_sec_reg_read(hw, SEC_IPC_INPUT_STATUS_REG); + return !!(SEC_IPC_INPUT_STATUS_RDY & status); +} + +/** + * mei_txe_intr_clear - clear all interrupts + * + * @dev: the device structure + */ +static inline void mei_txe_intr_clear(struct mei_device *dev) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + + mei_txe_sec_reg_write_silent(hw, SEC_IPC_HOST_INT_STATUS_REG, + SEC_IPC_HOST_INT_STATUS_PENDING); + mei_txe_br_reg_write(hw, HISR_REG, HISR_INT_STS_MSK); + mei_txe_br_reg_write(hw, HHISR_REG, IPC_HHIER_MSK); +} + +/** + * mei_txe_intr_disable - disable all interrupts + * + * @dev: the device structure + */ +static void mei_txe_intr_disable(struct mei_device *dev) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + + mei_txe_br_reg_write(hw, HHIER_REG, 0); + mei_txe_br_reg_write(hw, HIER_REG, 0); +} +/** + * mei_txe_intr_enable - enable all interrupts + * + * @dev: the device structure + */ +static void mei_txe_intr_enable(struct mei_device *dev) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + + mei_txe_br_reg_write(hw, HHIER_REG, IPC_HHIER_MSK); + mei_txe_br_reg_write(hw, HIER_REG, HIER_INT_EN_MSK); +} + +/** + * mei_txe_synchronize_irq - wait for pending IRQ handlers + * + * @dev: the device structure + */ +static void mei_txe_synchronize_irq(struct mei_device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev->dev); + + synchronize_irq(pdev->irq); +} + +/** + * mei_txe_pending_interrupts - check if there are pending interrupts + * only Aliveness, Input ready, and output doorbell are of relevance + * + * @dev: the device structure + * + * Checks if there are pending interrupts + * only Aliveness, Readiness, Input ready, and Output doorbell are relevant + * + * Return: true if there are pending interrupts + */ +static bool mei_txe_pending_interrupts(struct mei_device *dev) +{ + + struct mei_txe_hw *hw = to_txe_hw(dev); + bool ret = (hw->intr_cause & (TXE_INTR_READINESS | + TXE_INTR_ALIVENESS | + TXE_INTR_IN_READY | + TXE_INTR_OUT_DB)); + + if (ret) { + dev_dbg(dev->dev, + "Pending Interrupts InReady=%01d Readiness=%01d, Aliveness=%01d, OutDoor=%01d\n", + !!(hw->intr_cause & TXE_INTR_IN_READY), + !!(hw->intr_cause & TXE_INTR_READINESS), + !!(hw->intr_cause & TXE_INTR_ALIVENESS), + !!(hw->intr_cause & TXE_INTR_OUT_DB)); + } + return ret; +} + +/** + * mei_txe_input_payload_write - write a dword to the host buffer + * at offset idx + * + * @dev: the device structure + * @idx: index in the host buffer + * @value: value + */ +static void mei_txe_input_payload_write(struct mei_device *dev, + unsigned long idx, u32 value) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + + mei_txe_sec_reg_write(hw, SEC_IPC_INPUT_PAYLOAD_REG + + (idx * sizeof(u32)), value); +} + +/** + * mei_txe_out_data_read - read dword from the device buffer + * at offset idx + * + * @dev: the device structure + * @idx: index in the device buffer + * + * Return: register value at index + */ +static u32 mei_txe_out_data_read(const struct mei_device *dev, + unsigned long idx) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + + return mei_txe_br_reg_read(hw, + BRIDGE_IPC_OUTPUT_PAYLOAD_REG + (idx * sizeof(u32))); +} + +/* Readiness */ + +/** + * mei_txe_readiness_set_host_rdy - set host readiness bit + * + * @dev: the device structure + */ +static void mei_txe_readiness_set_host_rdy(struct mei_device *dev) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + + mei_txe_br_reg_write(hw, + SICR_HOST_IPC_READINESS_REQ_REG, + SICR_HOST_IPC_READINESS_HOST_RDY); +} + +/** + * mei_txe_readiness_clear - clear host readiness bit + * + * @dev: the device structure + */ +static void mei_txe_readiness_clear(struct mei_device *dev) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + + mei_txe_br_reg_write(hw, SICR_HOST_IPC_READINESS_REQ_REG, + SICR_HOST_IPC_READINESS_RDY_CLR); +} +/** + * mei_txe_readiness_get - Reads and returns + * the HICR_SEC_IPC_READINESS register value + * + * @dev: the device structure + * + * Return: the HICR_SEC_IPC_READINESS register value + */ +static u32 mei_txe_readiness_get(struct mei_device *dev) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + + return mei_txe_br_reg_read(hw, HICR_SEC_IPC_READINESS_REG); +} + + +/** + * mei_txe_readiness_is_sec_rdy - check readiness + * for HICR_SEC_IPC_READINESS_SEC_RDY + * + * @readiness: cached readiness state + * + * Return: true if readiness bit is set + */ +static inline bool mei_txe_readiness_is_sec_rdy(u32 readiness) +{ + return !!(readiness & HICR_SEC_IPC_READINESS_SEC_RDY); +} + +/** + * mei_txe_hw_is_ready - check if the hw is ready + * + * @dev: the device structure + * + * Return: true if sec is ready + */ +static bool mei_txe_hw_is_ready(struct mei_device *dev) +{ + u32 readiness = mei_txe_readiness_get(dev); + + return mei_txe_readiness_is_sec_rdy(readiness); +} + +/** + * mei_txe_host_is_ready - check if the host is ready + * + * @dev: the device structure + * + * Return: true if host is ready + */ +static inline bool mei_txe_host_is_ready(struct mei_device *dev) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + u32 reg = mei_txe_br_reg_read(hw, HICR_SEC_IPC_READINESS_REG); + + return !!(reg & HICR_SEC_IPC_READINESS_HOST_RDY); +} + +/** + * mei_txe_readiness_wait - wait till readiness settles + * + * @dev: the device structure + * + * Return: 0 on success and -ETIME on timeout + */ +static int mei_txe_readiness_wait(struct mei_device *dev) +{ + if (mei_txe_hw_is_ready(dev)) + return 0; + + mutex_unlock(&dev->device_lock); + wait_event_timeout(dev->wait_hw_ready, dev->recvd_hw_ready, + msecs_to_jiffies(SEC_RESET_WAIT_TIMEOUT)); + mutex_lock(&dev->device_lock); + if (!dev->recvd_hw_ready) { + dev_err(dev->dev, "wait for readiness failed\n"); + return -ETIME; + } + + dev->recvd_hw_ready = false; + return 0; +} + +static const struct mei_fw_status mei_txe_fw_sts = { + .count = 2, + .status[0] = PCI_CFG_TXE_FW_STS0, + .status[1] = PCI_CFG_TXE_FW_STS1 +}; + +/** + * mei_txe_fw_status - read fw status register from pci config space + * + * @dev: mei device + * @fw_status: fw status register values + * + * Return: 0 on success, error otherwise + */ +static int mei_txe_fw_status(struct mei_device *dev, + struct mei_fw_status *fw_status) +{ + const struct mei_fw_status *fw_src = &mei_txe_fw_sts; + struct pci_dev *pdev = to_pci_dev(dev->dev); + int ret; + int i; + + if (!fw_status) + return -EINVAL; + + fw_status->count = fw_src->count; + for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) { + ret = pci_read_config_dword(pdev, fw_src->status[i], + &fw_status->status[i]); + trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HSF_X", + fw_src->status[i], + fw_status->status[i]); + if (ret) + return ret; + } + + return 0; +} + +/** + * mei_txe_hw_config - configure hardware at the start of the devices + * + * @dev: the device structure + * + * Configure hardware at the start of the device should be done only + * once at the device probe time + * + * Return: always 0 + */ +static int mei_txe_hw_config(struct mei_device *dev) +{ + + struct mei_txe_hw *hw = to_txe_hw(dev); + + hw->aliveness = mei_txe_aliveness_get(dev); + hw->readiness = mei_txe_readiness_get(dev); + + dev_dbg(dev->dev, "aliveness_resp = 0x%08x, readiness = 0x%08x.\n", + hw->aliveness, hw->readiness); + + return 0; +} + +/** + * mei_txe_write - writes a message to device. + * + * @dev: the device structure + * @hdr: header of message + * @hdr_len: header length in bytes - must multiplication of a slot (4bytes) + * @data: payload + * @data_len: paylead length in bytes + * + * Return: 0 if success, < 0 - otherwise. + */ +static int mei_txe_write(struct mei_device *dev, + const void *hdr, size_t hdr_len, + const void *data, size_t data_len) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + unsigned long rem; + const u32 *reg_buf; + u32 slots = TXE_HBUF_DEPTH; + u32 dw_cnt; + unsigned long i, j; + + if (WARN_ON(!hdr || !data || hdr_len & 0x3)) + return -EINVAL; + + dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM((struct mei_msg_hdr *)hdr)); + + dw_cnt = mei_data2slots(hdr_len + data_len); + if (dw_cnt > slots) + return -EMSGSIZE; + + if (WARN(!hw->aliveness, "txe write: aliveness not asserted\n")) + return -EAGAIN; + + /* Enable Input Ready Interrupt. */ + mei_txe_input_ready_interrupt_enable(dev); + + if (!mei_txe_is_input_ready(dev)) { + char fw_sts_str[MEI_FW_STATUS_STR_SZ]; + + mei_fw_status_str(dev, fw_sts_str, MEI_FW_STATUS_STR_SZ); + dev_err(dev->dev, "Input is not ready %s\n", fw_sts_str); + return -EAGAIN; + } + + reg_buf = hdr; + for (i = 0; i < hdr_len / MEI_SLOT_SIZE; i++) + mei_txe_input_payload_write(dev, i, reg_buf[i]); + + reg_buf = data; + for (j = 0; j < data_len / MEI_SLOT_SIZE; j++) + mei_txe_input_payload_write(dev, i + j, reg_buf[j]); + + rem = data_len & 0x3; + if (rem > 0) { + u32 reg = 0; + + memcpy(®, (const u8 *)data + data_len - rem, rem); + mei_txe_input_payload_write(dev, i + j, reg); + } + + /* after each write the whole buffer is consumed */ + hw->slots = 0; + + /* Set Input-Doorbell */ + mei_txe_input_doorbell_set(hw); + + return 0; +} + +/** + * mei_txe_hbuf_depth - mimics the me hbuf circular buffer + * + * @dev: the device structure + * + * Return: the TXE_HBUF_DEPTH + */ +static u32 mei_txe_hbuf_depth(const struct mei_device *dev) +{ + return TXE_HBUF_DEPTH; +} + +/** + * mei_txe_hbuf_empty_slots - mimics the me hbuf circular buffer + * + * @dev: the device structure + * + * Return: always TXE_HBUF_DEPTH + */ +static int mei_txe_hbuf_empty_slots(struct mei_device *dev) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + + return hw->slots; +} + +/** + * mei_txe_count_full_read_slots - mimics the me device circular buffer + * + * @dev: the device structure + * + * Return: always buffer size in dwords count + */ +static int mei_txe_count_full_read_slots(struct mei_device *dev) +{ + /* read buffers has static size */ + return TXE_HBUF_DEPTH; +} + +/** + * mei_txe_read_hdr - read message header which is always in 4 first bytes + * + * @dev: the device structure + * + * Return: mei message header + */ + +static u32 mei_txe_read_hdr(const struct mei_device *dev) +{ + return mei_txe_out_data_read(dev, 0); +} +/** + * mei_txe_read - reads a message from the txe device. + * + * @dev: the device structure + * @buf: message buffer will be written + * @len: message size will be read + * + * Return: -EINVAL on error wrong argument and 0 on success + */ +static int mei_txe_read(struct mei_device *dev, + unsigned char *buf, unsigned long len) +{ + + struct mei_txe_hw *hw = to_txe_hw(dev); + u32 *reg_buf, reg; + u32 rem; + u32 i; + + if (WARN_ON(!buf || !len)) + return -EINVAL; + + reg_buf = (u32 *)buf; + rem = len & 0x3; + + dev_dbg(dev->dev, "buffer-length = %lu buf[0]0x%08X\n", + len, mei_txe_out_data_read(dev, 0)); + + for (i = 0; i < len / MEI_SLOT_SIZE; i++) { + /* skip header: index starts from 1 */ + reg = mei_txe_out_data_read(dev, i + 1); + dev_dbg(dev->dev, "buf[%d] = 0x%08X\n", i, reg); + *reg_buf++ = reg; + } + + if (rem) { + reg = mei_txe_out_data_read(dev, i + 1); + memcpy(reg_buf, ®, rem); + } + + mei_txe_output_ready_set(hw); + return 0; +} + +/** + * mei_txe_hw_reset - resets host and fw. + * + * @dev: the device structure + * @intr_enable: if interrupt should be enabled after reset. + * + * Return: 0 on success and < 0 in case of error + */ +static int mei_txe_hw_reset(struct mei_device *dev, bool intr_enable) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + + u32 aliveness_req; + /* + * read input doorbell to ensure consistency between Bridge and SeC + * return value might be garbage return + */ + (void)mei_txe_sec_reg_read_silent(hw, SEC_IPC_INPUT_DOORBELL_REG); + + aliveness_req = mei_txe_aliveness_req_get(dev); + hw->aliveness = mei_txe_aliveness_get(dev); + + /* Disable interrupts in this stage we will poll */ + mei_txe_intr_disable(dev); + + /* + * If Aliveness Request and Aliveness Response are not equal then + * wait for them to be equal + * Since we might have interrupts disabled - poll for it + */ + if (aliveness_req != hw->aliveness) + if (mei_txe_aliveness_poll(dev, aliveness_req) < 0) { + dev_err(dev->dev, "wait for aliveness settle failed ... bailing out\n"); + return -EIO; + } + + /* + * If Aliveness Request and Aliveness Response are set then clear them + */ + if (aliveness_req) { + mei_txe_aliveness_set(dev, 0); + if (mei_txe_aliveness_poll(dev, 0) < 0) { + dev_err(dev->dev, "wait for aliveness failed ... bailing out\n"); + return -EIO; + } + } + + /* + * Set readiness RDY_CLR bit + */ + mei_txe_readiness_clear(dev); + + return 0; +} + +/** + * mei_txe_hw_start - start the hardware after reset + * + * @dev: the device structure + * + * Return: 0 on success an error code otherwise + */ +static int mei_txe_hw_start(struct mei_device *dev) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + int ret; + + u32 hisr; + + /* bring back interrupts */ + mei_txe_intr_enable(dev); + + ret = mei_txe_readiness_wait(dev); + if (ret < 0) { + dev_err(dev->dev, "waiting for readiness failed\n"); + return ret; + } + + /* + * If HISR.INT2_STS interrupt status bit is set then clear it. + */ + hisr = mei_txe_br_reg_read(hw, HISR_REG); + if (hisr & HISR_INT_2_STS) + mei_txe_br_reg_write(hw, HISR_REG, HISR_INT_2_STS); + + /* Clear the interrupt cause of OutputDoorbell */ + clear_bit(TXE_INTR_OUT_DB_BIT, &hw->intr_cause); + + ret = mei_txe_aliveness_set_sync(dev, 1); + if (ret < 0) { + dev_err(dev->dev, "wait for aliveness failed ... bailing out\n"); + return ret; + } + + pm_runtime_set_active(dev->dev); + + /* enable input ready interrupts: + * SEC_IPC_HOST_INT_MASK.IPC_INPUT_READY_INT_MASK + */ + mei_txe_input_ready_interrupt_enable(dev); + + + /* Set the SICR_SEC_IPC_OUTPUT_STATUS.IPC_OUTPUT_READY bit */ + mei_txe_output_ready_set(hw); + + /* Set bit SICR_HOST_IPC_READINESS.HOST_RDY + */ + mei_txe_readiness_set_host_rdy(dev); + + return 0; +} + +/** + * mei_txe_check_and_ack_intrs - translate multi BAR interrupt into + * single bit mask and acknowledge the interrupts + * + * @dev: the device structure + * @do_ack: acknowledge interrupts + * + * Return: true if found interrupts to process. + */ +static bool mei_txe_check_and_ack_intrs(struct mei_device *dev, bool do_ack) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + u32 hisr; + u32 hhisr; + u32 ipc_isr; + u32 aliveness; + bool generated; + + /* read interrupt registers */ + hhisr = mei_txe_br_reg_read(hw, HHISR_REG); + generated = (hhisr & IPC_HHIER_MSK); + if (!generated) + goto out; + + hisr = mei_txe_br_reg_read(hw, HISR_REG); + + aliveness = mei_txe_aliveness_get(dev); + if (hhisr & IPC_HHIER_SEC && aliveness) { + ipc_isr = mei_txe_sec_reg_read_silent(hw, + SEC_IPC_HOST_INT_STATUS_REG); + } else { + ipc_isr = 0; + hhisr &= ~IPC_HHIER_SEC; + } + + generated = generated || + (hisr & HISR_INT_STS_MSK) || + (ipc_isr & SEC_IPC_HOST_INT_STATUS_PENDING); + + if (generated && do_ack) { + /* Save the interrupt causes */ + hw->intr_cause |= hisr & HISR_INT_STS_MSK; + if (ipc_isr & SEC_IPC_HOST_INT_STATUS_IN_RDY) + hw->intr_cause |= TXE_INTR_IN_READY; + + + mei_txe_intr_disable(dev); + /* Clear the interrupts in hierarchy: + * IPC and Bridge, than the High Level */ + mei_txe_sec_reg_write_silent(hw, + SEC_IPC_HOST_INT_STATUS_REG, ipc_isr); + mei_txe_br_reg_write(hw, HISR_REG, hisr); + mei_txe_br_reg_write(hw, HHISR_REG, hhisr); + } + +out: + return generated; +} + +/** + * mei_txe_irq_quick_handler - The ISR of the MEI device + * + * @irq: The irq number + * @dev_id: pointer to the device structure + * + * Return: IRQ_WAKE_THREAD if interrupt is designed for the device + * IRQ_NONE otherwise + */ +irqreturn_t mei_txe_irq_quick_handler(int irq, void *dev_id) +{ + struct mei_device *dev = dev_id; + + if (mei_txe_check_and_ack_intrs(dev, true)) + return IRQ_WAKE_THREAD; + return IRQ_NONE; +} + + +/** + * mei_txe_irq_thread_handler - txe interrupt thread + * + * @irq: The irq number + * @dev_id: pointer to the device structure + * + * Return: IRQ_HANDLED + */ +irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id) +{ + struct mei_device *dev = (struct mei_device *) dev_id; + struct mei_txe_hw *hw = to_txe_hw(dev); + struct list_head cmpl_list; + s32 slots; + int rets = 0; + + dev_dbg(dev->dev, "irq thread: Interrupt Registers HHISR|HISR|SEC=%02X|%04X|%02X\n", + mei_txe_br_reg_read(hw, HHISR_REG), + mei_txe_br_reg_read(hw, HISR_REG), + mei_txe_sec_reg_read_silent(hw, SEC_IPC_HOST_INT_STATUS_REG)); + + + /* initialize our complete list */ + mutex_lock(&dev->device_lock); + INIT_LIST_HEAD(&cmpl_list); + + if (pci_dev_msi_enabled(to_pci_dev(dev->dev))) + mei_txe_check_and_ack_intrs(dev, true); + + /* show irq events */ + mei_txe_pending_interrupts(dev); + + hw->aliveness = mei_txe_aliveness_get(dev); + hw->readiness = mei_txe_readiness_get(dev); + + /* Readiness: + * Detection of TXE driver going through reset + * or TXE driver resetting the HECI interface. + */ + if (test_and_clear_bit(TXE_INTR_READINESS_BIT, &hw->intr_cause)) { + dev_dbg(dev->dev, "Readiness Interrupt was received...\n"); + + /* Check if SeC is going through reset */ + if (mei_txe_readiness_is_sec_rdy(hw->readiness)) { + dev_dbg(dev->dev, "we need to start the dev.\n"); + dev->recvd_hw_ready = true; + } else { + dev->recvd_hw_ready = false; + if (dev->dev_state != MEI_DEV_RESETTING) { + + dev_warn(dev->dev, "FW not ready: resetting.\n"); + schedule_work(&dev->reset_work); + goto end; + + } + } + wake_up(&dev->wait_hw_ready); + } + + /************************************************************/ + /* Check interrupt cause: + * Aliveness: Detection of SeC acknowledge of host request that + * it remain alive or host cancellation of that request. + */ + + if (test_and_clear_bit(TXE_INTR_ALIVENESS_BIT, &hw->intr_cause)) { + /* Clear the interrupt cause */ + dev_dbg(dev->dev, + "Aliveness Interrupt: Status: %d\n", hw->aliveness); + dev->pg_event = MEI_PG_EVENT_RECEIVED; + if (waitqueue_active(&hw->wait_aliveness_resp)) + wake_up(&hw->wait_aliveness_resp); + } + + + /* Output Doorbell: + * Detection of SeC having sent output to host + */ + slots = mei_count_full_read_slots(dev); + if (test_and_clear_bit(TXE_INTR_OUT_DB_BIT, &hw->intr_cause)) { + /* Read from TXE */ + rets = mei_irq_read_handler(dev, &cmpl_list, &slots); + if (rets && + (dev->dev_state != MEI_DEV_RESETTING && + dev->dev_state != MEI_DEV_POWER_DOWN)) { + dev_err(dev->dev, + "mei_irq_read_handler ret = %d.\n", rets); + + schedule_work(&dev->reset_work); + goto end; + } + } + /* Input Ready: Detection if host can write to SeC */ + if (test_and_clear_bit(TXE_INTR_IN_READY_BIT, &hw->intr_cause)) { + dev->hbuf_is_ready = true; + hw->slots = TXE_HBUF_DEPTH; + } + + if (hw->aliveness && dev->hbuf_is_ready) { + /* get the real register value */ + dev->hbuf_is_ready = mei_hbuf_is_ready(dev); + rets = mei_irq_write_handler(dev, &cmpl_list); + if (rets && rets != -EMSGSIZE) + dev_err(dev->dev, "mei_irq_write_handler ret = %d.\n", + rets); + dev->hbuf_is_ready = mei_hbuf_is_ready(dev); + } + + mei_irq_compl_handler(dev, &cmpl_list); + +end: + dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets); + + mutex_unlock(&dev->device_lock); + + mei_enable_interrupts(dev); + return IRQ_HANDLED; +} + +static const struct mei_hw_ops mei_txe_hw_ops = { + + .host_is_ready = mei_txe_host_is_ready, + + .fw_status = mei_txe_fw_status, + .pg_state = mei_txe_pg_state, + + .hw_is_ready = mei_txe_hw_is_ready, + .hw_reset = mei_txe_hw_reset, + .hw_config = mei_txe_hw_config, + .hw_start = mei_txe_hw_start, + + .pg_in_transition = mei_txe_pg_in_transition, + .pg_is_enabled = mei_txe_pg_is_enabled, + + .intr_clear = mei_txe_intr_clear, + .intr_enable = mei_txe_intr_enable, + .intr_disable = mei_txe_intr_disable, + .synchronize_irq = mei_txe_synchronize_irq, + + .hbuf_free_slots = mei_txe_hbuf_empty_slots, + .hbuf_is_ready = mei_txe_is_input_ready, + .hbuf_depth = mei_txe_hbuf_depth, + + .write = mei_txe_write, + + .rdbuf_full_slots = mei_txe_count_full_read_slots, + .read_hdr = mei_txe_read_hdr, + + .read = mei_txe_read, + +}; + +/** + * mei_txe_dev_init - allocates and initializes txe hardware specific structure + * + * @pdev: pci device + * + * Return: struct mei_device * on success or NULL + */ +struct mei_device *mei_txe_dev_init(struct pci_dev *pdev) +{ + struct mei_device *dev; + struct mei_txe_hw *hw; + + dev = devm_kzalloc(&pdev->dev, sizeof(*dev) + sizeof(*hw), GFP_KERNEL); + if (!dev) + return NULL; + + mei_device_init(dev, &pdev->dev, &mei_txe_hw_ops); + + hw = to_txe_hw(dev); + + init_waitqueue_head(&hw->wait_aliveness_resp); + + return dev; +} + +/** + * mei_txe_setup_satt2 - SATT2 configuration for DMA support. + * + * @dev: the device structure + * @addr: physical address start of the range + * @range: physical range size + * + * Return: 0 on success an error code otherwise + */ +int mei_txe_setup_satt2(struct mei_device *dev, phys_addr_t addr, u32 range) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + + u32 lo32 = lower_32_bits(addr); + u32 hi32 = upper_32_bits(addr); + u32 ctrl; + + /* SATT is limited to 36 Bits */ + if (hi32 & ~0xF) + return -EINVAL; + + /* SATT has to be 16Byte aligned */ + if (lo32 & 0xF) + return -EINVAL; + + /* SATT range has to be 4Bytes aligned */ + if (range & 0x4) + return -EINVAL; + + /* SATT is limited to 32 MB range*/ + if (range > SATT_RANGE_MAX) + return -EINVAL; + + ctrl = SATT2_CTRL_VALID_MSK; + ctrl |= hi32 << SATT2_CTRL_BR_BASE_ADDR_REG_SHIFT; + + mei_txe_br_reg_write(hw, SATT2_SAP_SIZE_REG, range); + mei_txe_br_reg_write(hw, SATT2_BRG_BA_LSB_REG, lo32); + mei_txe_br_reg_write(hw, SATT2_CTRL_REG, ctrl); + dev_dbg(dev->dev, "SATT2: SAP_SIZE_OFFSET=0x%08X, BRG_BA_LSB_OFFSET=0x%08X, CTRL_OFFSET=0x%08X\n", + range, lo32, ctrl); + + return 0; +} diff --git a/drivers/misc/mei/hw-txe.h b/drivers/misc/mei/hw-txe.h new file mode 100644 index 000000000..96511b04b --- /dev/null +++ b/drivers/misc/mei/hw-txe.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2013-2016, Intel Corporation. All rights reserved. + * Intel Management Engine Interface (Intel MEI) Linux driver + */ + +#ifndef _MEI_HW_TXE_H_ +#define _MEI_HW_TXE_H_ + +#include <linux/irqreturn.h> + +#include "hw.h" +#include "hw-txe-regs.h" + +#define MEI_TXI_RPM_TIMEOUT 500 /* ms */ + +/* Flatten Hierarchy interrupt cause */ +#define TXE_INTR_READINESS_BIT 0 /* HISR_INT_0_STS */ +#define TXE_INTR_READINESS HISR_INT_0_STS +#define TXE_INTR_ALIVENESS_BIT 1 /* HISR_INT_1_STS */ +#define TXE_INTR_ALIVENESS HISR_INT_1_STS +#define TXE_INTR_OUT_DB_BIT 2 /* HISR_INT_2_STS */ +#define TXE_INTR_OUT_DB HISR_INT_2_STS +#define TXE_INTR_IN_READY_BIT 8 /* beyond HISR */ +#define TXE_INTR_IN_READY BIT(8) + +/** + * struct mei_txe_hw - txe hardware specifics + * + * @mem_addr: SeC and BRIDGE bars + * @aliveness: aliveness (power gating) state of the hardware + * @readiness: readiness state of the hardware + * @slots: number of empty slots + * @wait_aliveness_resp: aliveness wait queue + * @intr_cause: translated interrupt cause + */ +struct mei_txe_hw { + void __iomem * const *mem_addr; + u32 aliveness; + u32 readiness; + u32 slots; + + wait_queue_head_t wait_aliveness_resp; + + unsigned long intr_cause; +}; + +#define to_txe_hw(dev) (struct mei_txe_hw *)((dev)->hw) + +static inline struct mei_device *hw_txe_to_mei(struct mei_txe_hw *hw) +{ + return container_of((void *)hw, struct mei_device, hw); +} + +struct mei_device *mei_txe_dev_init(struct pci_dev *pdev); + +irqreturn_t mei_txe_irq_quick_handler(int irq, void *dev_id); +irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id); + +int mei_txe_aliveness_set_sync(struct mei_device *dev, u32 req); + +int mei_txe_setup_satt2(struct mei_device *dev, phys_addr_t addr, u32 range); + + +#endif /* _MEI_HW_TXE_H_ */ diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h new file mode 100644 index 000000000..df2fb9520 --- /dev/null +++ b/drivers/misc/mei/hw.h @@ -0,0 +1,674 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2003-2020, Intel Corporation. All rights reserved + * Intel Management Engine Interface (Intel MEI) Linux driver + */ + +#ifndef _MEI_HW_TYPES_H_ +#define _MEI_HW_TYPES_H_ + +#include <linux/uuid.h> + +/* + * Timeouts in Seconds + */ +#define MEI_HW_READY_TIMEOUT 2 /* Timeout on ready message */ +#define MEI_CONNECT_TIMEOUT 3 /* HPS: at least 2 seconds */ + +#define MEI_CL_CONNECT_TIMEOUT 15 /* HPS: Client Connect Timeout */ +#define MEI_CLIENTS_INIT_TIMEOUT 15 /* HPS: Clients Enumeration Timeout */ + +#define MEI_PGI_TIMEOUT 1 /* PG Isolation time response 1 sec */ +#define MEI_D0I3_TIMEOUT 5 /* D0i3 set/unset max response time */ +#define MEI_HBM_TIMEOUT 1 /* 1 second */ + +/* + * MEI Version + */ +#define HBM_MINOR_VERSION 2 +#define HBM_MAJOR_VERSION 2 + +/* + * MEI version with PGI support + */ +#define HBM_MINOR_VERSION_PGI 1 +#define HBM_MAJOR_VERSION_PGI 1 + +/* + * MEI version with Dynamic clients support + */ +#define HBM_MINOR_VERSION_DC 0 +#define HBM_MAJOR_VERSION_DC 2 + +/* + * MEI version with immediate reply to enum request support + */ +#define HBM_MINOR_VERSION_IE 0 +#define HBM_MAJOR_VERSION_IE 2 + +/* + * MEI version with disconnect on connection timeout support + */ +#define HBM_MINOR_VERSION_DOT 0 +#define HBM_MAJOR_VERSION_DOT 2 + +/* + * MEI version with notification support + */ +#define HBM_MINOR_VERSION_EV 0 +#define HBM_MAJOR_VERSION_EV 2 + +/* + * MEI version with fixed address client support + */ +#define HBM_MINOR_VERSION_FA 0 +#define HBM_MAJOR_VERSION_FA 2 + +/* + * MEI version with OS ver message support + */ +#define HBM_MINOR_VERSION_OS 0 +#define HBM_MAJOR_VERSION_OS 2 + +/* + * MEI version with dma ring support + */ +#define HBM_MINOR_VERSION_DR 1 +#define HBM_MAJOR_VERSION_DR 2 + +/* + * MEI version with vm tag support + */ +#define HBM_MINOR_VERSION_VT 2 +#define HBM_MAJOR_VERSION_VT 2 + +/* + * MEI version with capabilities message support + */ +#define HBM_MINOR_VERSION_CAP 2 +#define HBM_MAJOR_VERSION_CAP 2 + +/* Host bus message command opcode */ +#define MEI_HBM_CMD_OP_MSK 0x7f +/* Host bus message command RESPONSE */ +#define MEI_HBM_CMD_RES_MSK 0x80 + +/* + * MEI Bus Message Command IDs + */ +#define HOST_START_REQ_CMD 0x01 +#define HOST_START_RES_CMD 0x81 + +#define HOST_STOP_REQ_CMD 0x02 +#define HOST_STOP_RES_CMD 0x82 + +#define ME_STOP_REQ_CMD 0x03 + +#define HOST_ENUM_REQ_CMD 0x04 +#define HOST_ENUM_RES_CMD 0x84 + +#define HOST_CLIENT_PROPERTIES_REQ_CMD 0x05 +#define HOST_CLIENT_PROPERTIES_RES_CMD 0x85 + +#define CLIENT_CONNECT_REQ_CMD 0x06 +#define CLIENT_CONNECT_RES_CMD 0x86 + +#define CLIENT_DISCONNECT_REQ_CMD 0x07 +#define CLIENT_DISCONNECT_RES_CMD 0x87 + +#define MEI_FLOW_CONTROL_CMD 0x08 + +#define MEI_PG_ISOLATION_ENTRY_REQ_CMD 0x0a +#define MEI_PG_ISOLATION_ENTRY_RES_CMD 0x8a +#define MEI_PG_ISOLATION_EXIT_REQ_CMD 0x0b +#define MEI_PG_ISOLATION_EXIT_RES_CMD 0x8b + +#define MEI_HBM_ADD_CLIENT_REQ_CMD 0x0f +#define MEI_HBM_ADD_CLIENT_RES_CMD 0x8f + +#define MEI_HBM_NOTIFY_REQ_CMD 0x10 +#define MEI_HBM_NOTIFY_RES_CMD 0x90 +#define MEI_HBM_NOTIFICATION_CMD 0x11 + +#define MEI_HBM_DMA_SETUP_REQ_CMD 0x12 +#define MEI_HBM_DMA_SETUP_RES_CMD 0x92 + +#define MEI_HBM_CAPABILITIES_REQ_CMD 0x13 +#define MEI_HBM_CAPABILITIES_RES_CMD 0x93 + +/* + * MEI Stop Reason + * used by hbm_host_stop_request.reason + */ +enum mei_stop_reason_types { + DRIVER_STOP_REQUEST = 0x00, + DEVICE_D1_ENTRY = 0x01, + DEVICE_D2_ENTRY = 0x02, + DEVICE_D3_ENTRY = 0x03, + SYSTEM_S1_ENTRY = 0x04, + SYSTEM_S2_ENTRY = 0x05, + SYSTEM_S3_ENTRY = 0x06, + SYSTEM_S4_ENTRY = 0x07, + SYSTEM_S5_ENTRY = 0x08 +}; + + +/** + * enum mei_hbm_status - mei host bus messages return values + * + * @MEI_HBMS_SUCCESS : status success + * @MEI_HBMS_CLIENT_NOT_FOUND : client not found + * @MEI_HBMS_ALREADY_EXISTS : connection already established + * @MEI_HBMS_REJECTED : connection is rejected + * @MEI_HBMS_INVALID_PARAMETER : invalid parameter + * @MEI_HBMS_NOT_ALLOWED : operation not allowed + * @MEI_HBMS_ALREADY_STARTED : system is already started + * @MEI_HBMS_NOT_STARTED : system not started + * + * @MEI_HBMS_MAX : sentinel + */ +enum mei_hbm_status { + MEI_HBMS_SUCCESS = 0, + MEI_HBMS_CLIENT_NOT_FOUND = 1, + MEI_HBMS_ALREADY_EXISTS = 2, + MEI_HBMS_REJECTED = 3, + MEI_HBMS_INVALID_PARAMETER = 4, + MEI_HBMS_NOT_ALLOWED = 5, + MEI_HBMS_ALREADY_STARTED = 6, + MEI_HBMS_NOT_STARTED = 7, + + MEI_HBMS_MAX +}; + + +/* + * Client Connect Status + * used by hbm_client_connect_response.status + */ +enum mei_cl_connect_status { + MEI_CL_CONN_SUCCESS = MEI_HBMS_SUCCESS, + MEI_CL_CONN_NOT_FOUND = MEI_HBMS_CLIENT_NOT_FOUND, + MEI_CL_CONN_ALREADY_STARTED = MEI_HBMS_ALREADY_EXISTS, + MEI_CL_CONN_OUT_OF_RESOURCES = MEI_HBMS_REJECTED, + MEI_CL_CONN_MESSAGE_SMALL = MEI_HBMS_INVALID_PARAMETER, + MEI_CL_CONN_NOT_ALLOWED = MEI_HBMS_NOT_ALLOWED, +}; + +/* + * Client Disconnect Status + */ +enum mei_cl_disconnect_status { + MEI_CL_DISCONN_SUCCESS = MEI_HBMS_SUCCESS +}; + +/** + * enum mei_ext_hdr_type - extended header type used in + * extended header TLV + * + * @MEI_EXT_HDR_NONE: sentinel + * @MEI_EXT_HDR_VTAG: vtag header + */ +enum mei_ext_hdr_type { + MEI_EXT_HDR_NONE = 0, + MEI_EXT_HDR_VTAG = 1, +}; + +/** + * struct mei_ext_hdr - extend header descriptor (TLV) + * @type: enum mei_ext_hdr_type + * @length: length excluding descriptor + * @ext_payload: payload of the specific extended header + * @hdr: place holder for actual header + */ +struct mei_ext_hdr { + u8 type; + u8 length; + u8 ext_payload[2]; + u8 hdr[]; +}; + +/** + * struct mei_ext_meta_hdr - extend header meta data + * @count: number of headers + * @size: total size of the extended header list excluding meta header + * @reserved: reserved + * @hdrs: extended headers TLV list + */ +struct mei_ext_meta_hdr { + u8 count; + u8 size; + u8 reserved[2]; + struct mei_ext_hdr hdrs[]; +}; + +/* + * Extended header iterator functions + */ +/** + * mei_ext_hdr - extended header iterator begin + * + * @meta: meta header of the extended header list + * + * Return: + * The first extended header + */ +static inline struct mei_ext_hdr *mei_ext_begin(struct mei_ext_meta_hdr *meta) +{ + return meta->hdrs; +} + +/** + * mei_ext_last - check if the ext is the last one in the TLV list + * + * @meta: meta header of the extended header list + * @ext: a meta header on the list + * + * Return: true if ext is the last header on the list + */ +static inline bool mei_ext_last(struct mei_ext_meta_hdr *meta, + struct mei_ext_hdr *ext) +{ + return (u8 *)ext >= (u8 *)meta + sizeof(*meta) + (meta->size * 4); +} + +/** + *mei_ext_next - following extended header on the TLV list + * + * @ext: current extend header + * + * Context: The function does not check for the overflows, + * one should call mei_ext_last before. + * + * Return: The following extend header after @ext + */ +static inline struct mei_ext_hdr *mei_ext_next(struct mei_ext_hdr *ext) +{ + return (struct mei_ext_hdr *)(ext->hdr + (ext->length * 4)); +} + +/** + * struct mei_msg_hdr - MEI BUS Interface Section + * + * @me_addr: device address + * @host_addr: host address + * @length: message length + * @reserved: reserved + * @extended: message has extended header + * @dma_ring: message is on dma ring + * @internal: message is internal + * @msg_complete: last packet of the message + * @extension: extension of the header + */ +struct mei_msg_hdr { + u32 me_addr:8; + u32 host_addr:8; + u32 length:9; + u32 reserved:3; + u32 extended:1; + u32 dma_ring:1; + u32 internal:1; + u32 msg_complete:1; + u32 extension[]; +} __packed; + +/* The length is up to 9 bits */ +#define MEI_MSG_MAX_LEN_MASK GENMASK(9, 0) + +struct mei_bus_message { + u8 hbm_cmd; + u8 data[]; +} __packed; + +/** + * struct hbm_cl_cmd - client specific host bus command + * CONNECT, DISCONNECT, and FlOW CONTROL + * + * @hbm_cmd: bus message command header + * @me_addr: address of the client in ME + * @host_addr: address of the client in the driver + * @data: generic data + */ +struct mei_hbm_cl_cmd { + u8 hbm_cmd; + u8 me_addr; + u8 host_addr; + u8 data; +}; + +struct hbm_version { + u8 minor_version; + u8 major_version; +} __packed; + +struct hbm_host_version_request { + u8 hbm_cmd; + u8 reserved; + struct hbm_version host_version; +} __packed; + +struct hbm_host_version_response { + u8 hbm_cmd; + u8 host_version_supported; + struct hbm_version me_max_version; +} __packed; + +struct hbm_host_stop_request { + u8 hbm_cmd; + u8 reason; + u8 reserved[2]; +} __packed; + +struct hbm_host_stop_response { + u8 hbm_cmd; + u8 reserved[3]; +} __packed; + +struct hbm_me_stop_request { + u8 hbm_cmd; + u8 reason; + u8 reserved[2]; +} __packed; + +/** + * enum hbm_host_enum_flags - enumeration request flags (HBM version >= 2.0) + * + * @MEI_HBM_ENUM_F_ALLOW_ADD: allow dynamic clients add + * @MEI_HBM_ENUM_F_IMMEDIATE_ENUM: allow FW to send answer immediately + */ +enum hbm_host_enum_flags { + MEI_HBM_ENUM_F_ALLOW_ADD = BIT(0), + MEI_HBM_ENUM_F_IMMEDIATE_ENUM = BIT(1), +}; + +/** + * struct hbm_host_enum_request - enumeration request from host to fw + * + * @hbm_cmd : bus message command header + * @flags : request flags + * @reserved: reserved + */ +struct hbm_host_enum_request { + u8 hbm_cmd; + u8 flags; + u8 reserved[2]; +} __packed; + +struct hbm_host_enum_response { + u8 hbm_cmd; + u8 reserved[3]; + u8 valid_addresses[32]; +} __packed; + +/** + * struct mei_client_properties - mei client properties + * + * @protocol_name: guid of the client + * @protocol_version: client protocol version + * @max_number_of_connections: number of possible connections. + * @fixed_address: fixed me address (0 if the client is dynamic) + * @single_recv_buf: 1 if all connections share a single receive buffer. + * @vt_supported: the client support vtag + * @reserved: reserved + * @max_msg_length: MTU of the client + */ +struct mei_client_properties { + uuid_le protocol_name; + u8 protocol_version; + u8 max_number_of_connections; + u8 fixed_address; + u8 single_recv_buf:1; + u8 vt_supported:1; + u8 reserved:6; + u32 max_msg_length; +} __packed; + +struct hbm_props_request { + u8 hbm_cmd; + u8 me_addr; + u8 reserved[2]; +} __packed; + +struct hbm_props_response { + u8 hbm_cmd; + u8 me_addr; + u8 status; + u8 reserved; + struct mei_client_properties client_properties; +} __packed; + +/** + * struct hbm_add_client_request - request to add a client + * might be sent by fw after enumeration has already completed + * + * @hbm_cmd: bus message command header + * @me_addr: address of the client in ME + * @reserved: reserved + * @client_properties: client properties + */ +struct hbm_add_client_request { + u8 hbm_cmd; + u8 me_addr; + u8 reserved[2]; + struct mei_client_properties client_properties; +} __packed; + +/** + * struct hbm_add_client_response - response to add a client + * sent by the host to report client addition status to fw + * + * @hbm_cmd: bus message command header + * @me_addr: address of the client in ME + * @status: if HBMS_SUCCESS then the client can now accept connections. + * @reserved: reserved + */ +struct hbm_add_client_response { + u8 hbm_cmd; + u8 me_addr; + u8 status; + u8 reserved; +} __packed; + +/** + * struct hbm_power_gate - power gate request/response + * + * @hbm_cmd: bus message command header + * @reserved: reserved + */ +struct hbm_power_gate { + u8 hbm_cmd; + u8 reserved[3]; +} __packed; + +/** + * struct hbm_client_connect_request - connect/disconnect request + * + * @hbm_cmd: bus message command header + * @me_addr: address of the client in ME + * @host_addr: address of the client in the driver + * @reserved: reserved + */ +struct hbm_client_connect_request { + u8 hbm_cmd; + u8 me_addr; + u8 host_addr; + u8 reserved; +} __packed; + +/** + * struct hbm_client_connect_response - connect/disconnect response + * + * @hbm_cmd: bus message command header + * @me_addr: address of the client in ME + * @host_addr: address of the client in the driver + * @status: status of the request + */ +struct hbm_client_connect_response { + u8 hbm_cmd; + u8 me_addr; + u8 host_addr; + u8 status; +} __packed; + + +#define MEI_FC_MESSAGE_RESERVED_LENGTH 5 + +struct hbm_flow_control { + u8 hbm_cmd; + u8 me_addr; + u8 host_addr; + u8 reserved[MEI_FC_MESSAGE_RESERVED_LENGTH]; +} __packed; + +#define MEI_HBM_NOTIFICATION_START 1 +#define MEI_HBM_NOTIFICATION_STOP 0 +/** + * struct hbm_notification_request - start/stop notification request + * + * @hbm_cmd: bus message command header + * @me_addr: address of the client in ME + * @host_addr: address of the client in the driver + * @start: start = 1 or stop = 0 asynchronous notifications + */ +struct hbm_notification_request { + u8 hbm_cmd; + u8 me_addr; + u8 host_addr; + u8 start; +} __packed; + +/** + * struct hbm_notification_response - start/stop notification response + * + * @hbm_cmd: bus message command header + * @me_addr: address of the client in ME + * @host_addr: - address of the client in the driver + * @status: (mei_hbm_status) response status for the request + * - MEI_HBMS_SUCCESS: successful stop/start + * - MEI_HBMS_CLIENT_NOT_FOUND: if the connection could not be found. + * - MEI_HBMS_ALREADY_STARTED: for start requests for a previously + * started notification. + * - MEI_HBMS_NOT_STARTED: for stop request for a connected client for whom + * asynchronous notifications are currently disabled. + * + * @start: start = 1 or stop = 0 asynchronous notifications + * @reserved: reserved + */ +struct hbm_notification_response { + u8 hbm_cmd; + u8 me_addr; + u8 host_addr; + u8 status; + u8 start; + u8 reserved[3]; +} __packed; + +/** + * struct hbm_notification - notification event + * + * @hbm_cmd: bus message command header + * @me_addr: address of the client in ME + * @host_addr: address of the client in the driver + * @reserved: reserved for alignment + */ +struct hbm_notification { + u8 hbm_cmd; + u8 me_addr; + u8 host_addr; + u8 reserved; +} __packed; + +/** + * struct hbm_dma_mem_dscr - dma ring + * + * @addr_hi: the high 32bits of 64 bit address + * @addr_lo: the low 32bits of 64 bit address + * @size : size in bytes (must be power of 2) + */ +struct hbm_dma_mem_dscr { + u32 addr_hi; + u32 addr_lo; + u32 size; +} __packed; + +enum { + DMA_DSCR_HOST = 0, + DMA_DSCR_DEVICE = 1, + DMA_DSCR_CTRL = 2, + DMA_DSCR_NUM, +}; + +/** + * struct hbm_dma_setup_request - dma setup request + * + * @hbm_cmd: bus message command header + * @reserved: reserved for alignment + * @dma_dscr: dma descriptor for HOST, DEVICE, and CTRL + */ +struct hbm_dma_setup_request { + u8 hbm_cmd; + u8 reserved[3]; + struct hbm_dma_mem_dscr dma_dscr[DMA_DSCR_NUM]; +} __packed; + +/** + * struct hbm_dma_setup_response - dma setup response + * + * @hbm_cmd: bus message command header + * @status: 0 on success; otherwise DMA setup failed. + * @reserved: reserved for alignment + */ +struct hbm_dma_setup_response { + u8 hbm_cmd; + u8 status; + u8 reserved[2]; +} __packed; + +/** + * struct mei_dma_ring_ctrl - dma ring control block + * + * @hbuf_wr_idx: host circular buffer write index in slots + * @reserved1: reserved for alignment + * @hbuf_rd_idx: host circular buffer read index in slots + * @reserved2: reserved for alignment + * @dbuf_wr_idx: device circular buffer write index in slots + * @reserved3: reserved for alignment + * @dbuf_rd_idx: device circular buffer read index in slots + * @reserved4: reserved for alignment + */ +struct hbm_dma_ring_ctrl { + u32 hbuf_wr_idx; + u32 reserved1; + u32 hbuf_rd_idx; + u32 reserved2; + u32 dbuf_wr_idx; + u32 reserved3; + u32 dbuf_rd_idx; + u32 reserved4; +} __packed; + +/* virtual tag supported */ +#define HBM_CAP_VT BIT(0) + +/** + * struct hbm_capability_request - capability request from host to fw + * + * @hbm_cmd : bus message command header + * @capability_requested: bitmask of capabilities requested by host + */ +struct hbm_capability_request { + u8 hbm_cmd; + u8 capability_requested[3]; +} __packed; + +/** + * struct hbm_capability_response - capability response from fw to host + * + * @hbm_cmd : bus message command header + * @capability_granted: bitmask of capabilities granted by FW + */ +struct hbm_capability_response { + u8 hbm_cmd; + u8 capability_granted[3]; +} __packed; + +#endif diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c new file mode 100644 index 000000000..bcee77768 --- /dev/null +++ b/drivers/misc/mei/init.c @@ -0,0 +1,398 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2012-2019, Intel Corporation. All rights reserved. + * Intel Management Engine Interface (Intel MEI) Linux driver + */ + +#include <linux/export.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/delay.h> + +#include <linux/mei.h> + +#include "mei_dev.h" +#include "hbm.h" +#include "client.h" + +const char *mei_dev_state_str(int state) +{ +#define MEI_DEV_STATE(state) case MEI_DEV_##state: return #state + switch (state) { + MEI_DEV_STATE(INITIALIZING); + MEI_DEV_STATE(INIT_CLIENTS); + MEI_DEV_STATE(ENABLED); + MEI_DEV_STATE(RESETTING); + MEI_DEV_STATE(DISABLED); + MEI_DEV_STATE(POWER_DOWN); + MEI_DEV_STATE(POWER_UP); + default: + return "unknown"; + } +#undef MEI_DEV_STATE +} + +const char *mei_pg_state_str(enum mei_pg_state state) +{ +#define MEI_PG_STATE(state) case MEI_PG_##state: return #state + switch (state) { + MEI_PG_STATE(OFF); + MEI_PG_STATE(ON); + default: + return "unknown"; + } +#undef MEI_PG_STATE +} + +/** + * mei_fw_status2str - convert fw status registers to printable string + * + * @fw_status: firmware status + * @buf: string buffer at minimal size MEI_FW_STATUS_STR_SZ + * @len: buffer len must be >= MEI_FW_STATUS_STR_SZ + * + * Return: number of bytes written or -EINVAL if buffer is to small + */ +ssize_t mei_fw_status2str(struct mei_fw_status *fw_status, + char *buf, size_t len) +{ + ssize_t cnt = 0; + int i; + + buf[0] = '\0'; + + if (len < MEI_FW_STATUS_STR_SZ) + return -EINVAL; + + for (i = 0; i < fw_status->count; i++) + cnt += scnprintf(buf + cnt, len - cnt, "%08X ", + fw_status->status[i]); + + /* drop last space */ + buf[cnt] = '\0'; + return cnt; +} +EXPORT_SYMBOL_GPL(mei_fw_status2str); + +/** + * mei_cancel_work - Cancel mei background jobs + * + * @dev: the device structure + */ +void mei_cancel_work(struct mei_device *dev) +{ + cancel_work_sync(&dev->reset_work); + cancel_work_sync(&dev->bus_rescan_work); + + cancel_delayed_work_sync(&dev->timer_work); +} +EXPORT_SYMBOL_GPL(mei_cancel_work); + +/** + * mei_reset - resets host and fw. + * + * @dev: the device structure + * + * Return: 0 on success or < 0 if the reset hasn't succeeded + */ +int mei_reset(struct mei_device *dev) +{ + enum mei_dev_state state = dev->dev_state; + bool interrupts_enabled; + int ret; + + if (state != MEI_DEV_INITIALIZING && + state != MEI_DEV_DISABLED && + state != MEI_DEV_POWER_DOWN && + state != MEI_DEV_POWER_UP) { + char fw_sts_str[MEI_FW_STATUS_STR_SZ]; + + mei_fw_status_str(dev, fw_sts_str, MEI_FW_STATUS_STR_SZ); + dev_warn(dev->dev, "unexpected reset: dev_state = %s fw status = %s\n", + mei_dev_state_str(state), fw_sts_str); + } + + mei_clear_interrupts(dev); + + /* we're already in reset, cancel the init timer + * if the reset was called due the hbm protocol error + * we need to call it before hw start + * so the hbm watchdog won't kick in + */ + mei_hbm_idle(dev); + + /* enter reset flow */ + interrupts_enabled = state != MEI_DEV_POWER_DOWN; + mei_set_devstate(dev, MEI_DEV_RESETTING); + + dev->reset_count++; + if (dev->reset_count > MEI_MAX_CONSEC_RESET) { + dev_err(dev->dev, "reset: reached maximal consecutive resets: disabling the device\n"); + mei_set_devstate(dev, MEI_DEV_DISABLED); + return -ENODEV; + } + + ret = mei_hw_reset(dev, interrupts_enabled); + /* fall through and remove the sw state even if hw reset has failed */ + + /* no need to clean up software state in case of power up */ + if (state != MEI_DEV_INITIALIZING && state != MEI_DEV_POWER_UP) + mei_cl_all_disconnect(dev); + + mei_hbm_reset(dev); + + memset(dev->rd_msg_hdr, 0, sizeof(dev->rd_msg_hdr)); + + if (ret) { + dev_err(dev->dev, "hw_reset failed ret = %d\n", ret); + return ret; + } + + if (state == MEI_DEV_POWER_DOWN) { + dev_dbg(dev->dev, "powering down: end of reset\n"); + mei_set_devstate(dev, MEI_DEV_DISABLED); + return 0; + } + + ret = mei_hw_start(dev); + if (ret) { + dev_err(dev->dev, "hw_start failed ret = %d\n", ret); + return ret; + } + + dev_dbg(dev->dev, "link is established start sending messages.\n"); + + mei_set_devstate(dev, MEI_DEV_INIT_CLIENTS); + ret = mei_hbm_start_req(dev); + if (ret) { + dev_err(dev->dev, "hbm_start failed ret = %d\n", ret); + mei_set_devstate(dev, MEI_DEV_RESETTING); + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(mei_reset); + +/** + * mei_start - initializes host and fw to start work. + * + * @dev: the device structure + * + * Return: 0 on success, <0 on failure. + */ +int mei_start(struct mei_device *dev) +{ + int ret; + + mutex_lock(&dev->device_lock); + + /* acknowledge interrupt and stop interrupts */ + mei_clear_interrupts(dev); + + ret = mei_hw_config(dev); + if (ret) + goto err; + + dev_dbg(dev->dev, "reset in start the mei device.\n"); + + dev->reset_count = 0; + do { + mei_set_devstate(dev, MEI_DEV_INITIALIZING); + ret = mei_reset(dev); + + if (ret == -ENODEV || dev->dev_state == MEI_DEV_DISABLED) { + dev_err(dev->dev, "reset failed ret = %d", ret); + goto err; + } + } while (ret); + + if (mei_hbm_start_wait(dev)) { + dev_err(dev->dev, "HBM haven't started"); + goto err; + } + + if (!mei_host_is_ready(dev)) { + dev_err(dev->dev, "host is not ready.\n"); + goto err; + } + + if (!mei_hw_is_ready(dev)) { + dev_err(dev->dev, "ME is not ready.\n"); + goto err; + } + + if (!mei_hbm_version_is_supported(dev)) { + dev_dbg(dev->dev, "MEI start failed.\n"); + goto err; + } + + dev_dbg(dev->dev, "link layer has been established.\n"); + + mutex_unlock(&dev->device_lock); + return 0; +err: + dev_err(dev->dev, "link layer initialization failed.\n"); + mei_set_devstate(dev, MEI_DEV_DISABLED); + mutex_unlock(&dev->device_lock); + return -ENODEV; +} +EXPORT_SYMBOL_GPL(mei_start); + +/** + * mei_restart - restart device after suspend + * + * @dev: the device structure + * + * Return: 0 on success or -ENODEV if the restart hasn't succeeded + */ +int mei_restart(struct mei_device *dev) +{ + int err; + + mutex_lock(&dev->device_lock); + + mei_set_devstate(dev, MEI_DEV_POWER_UP); + dev->reset_count = 0; + + err = mei_reset(dev); + + mutex_unlock(&dev->device_lock); + + if (err == -ENODEV || dev->dev_state == MEI_DEV_DISABLED) { + dev_err(dev->dev, "device disabled = %d\n", err); + return -ENODEV; + } + + /* try to start again */ + if (err) + schedule_work(&dev->reset_work); + + + return 0; +} +EXPORT_SYMBOL_GPL(mei_restart); + +static void mei_reset_work(struct work_struct *work) +{ + struct mei_device *dev = + container_of(work, struct mei_device, reset_work); + int ret; + + mei_clear_interrupts(dev); + mei_synchronize_irq(dev); + + mutex_lock(&dev->device_lock); + + ret = mei_reset(dev); + + mutex_unlock(&dev->device_lock); + + if (dev->dev_state == MEI_DEV_DISABLED) { + dev_err(dev->dev, "device disabled = %d\n", ret); + return; + } + + /* retry reset in case of failure */ + if (ret) + schedule_work(&dev->reset_work); +} + +void mei_stop(struct mei_device *dev) +{ + dev_dbg(dev->dev, "stopping the device.\n"); + + mutex_lock(&dev->device_lock); + mei_set_devstate(dev, MEI_DEV_POWER_DOWN); + mutex_unlock(&dev->device_lock); + mei_cl_bus_remove_devices(dev); + + mei_cancel_work(dev); + + mei_clear_interrupts(dev); + mei_synchronize_irq(dev); + + mutex_lock(&dev->device_lock); + + mei_reset(dev); + /* move device to disabled state unconditionally */ + mei_set_devstate(dev, MEI_DEV_DISABLED); + + mutex_unlock(&dev->device_lock); +} +EXPORT_SYMBOL_GPL(mei_stop); + +/** + * mei_write_is_idle - check if the write queues are idle + * + * @dev: the device structure + * + * Return: true of there is no pending write + */ +bool mei_write_is_idle(struct mei_device *dev) +{ + bool idle = (dev->dev_state == MEI_DEV_ENABLED && + list_empty(&dev->ctrl_wr_list) && + list_empty(&dev->write_list) && + list_empty(&dev->write_waiting_list)); + + dev_dbg(dev->dev, "write pg: is idle[%d] state=%s ctrl=%01d write=%01d wwait=%01d\n", + idle, + mei_dev_state_str(dev->dev_state), + list_empty(&dev->ctrl_wr_list), + list_empty(&dev->write_list), + list_empty(&dev->write_waiting_list)); + + return idle; +} +EXPORT_SYMBOL_GPL(mei_write_is_idle); + +/** + * mei_device_init -- initialize mei_device structure + * + * @dev: the mei device + * @device: the device structure + * @hw_ops: hw operations + */ +void mei_device_init(struct mei_device *dev, + struct device *device, + const struct mei_hw_ops *hw_ops) +{ + /* setup our list array */ + INIT_LIST_HEAD(&dev->file_list); + INIT_LIST_HEAD(&dev->device_list); + INIT_LIST_HEAD(&dev->me_clients); + mutex_init(&dev->device_lock); + init_rwsem(&dev->me_clients_rwsem); + mutex_init(&dev->cl_bus_lock); + init_waitqueue_head(&dev->wait_hw_ready); + init_waitqueue_head(&dev->wait_pg); + init_waitqueue_head(&dev->wait_hbm_start); + dev->dev_state = MEI_DEV_INITIALIZING; + dev->reset_count = 0; + + INIT_LIST_HEAD(&dev->write_list); + INIT_LIST_HEAD(&dev->write_waiting_list); + INIT_LIST_HEAD(&dev->ctrl_wr_list); + INIT_LIST_HEAD(&dev->ctrl_rd_list); + dev->tx_queue_limit = MEI_TX_QUEUE_LIMIT_DEFAULT; + + INIT_DELAYED_WORK(&dev->timer_work, mei_timer); + INIT_WORK(&dev->reset_work, mei_reset_work); + INIT_WORK(&dev->bus_rescan_work, mei_cl_bus_rescan_work); + + bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX); + dev->open_handle_count = 0; + + /* + * Reserving the first client ID + * 0: Reserved for MEI Bus Message communications + */ + bitmap_set(dev->host_clients_map, 0, 1); + + dev->pg_event = MEI_PG_EVENT_IDLE; + dev->ops = hw_ops; + dev->dev = device; +} +EXPORT_SYMBOL_GPL(mei_device_init); + diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c new file mode 100644 index 000000000..ca3067fa6 --- /dev/null +++ b/drivers/misc/mei/interrupt.c @@ -0,0 +1,648 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2003-2018, Intel Corporation. All rights reserved. + * Intel Management Engine Interface (Intel MEI) Linux driver + */ + +#include <linux/export.h> +#include <linux/kthread.h> +#include <linux/interrupt.h> +#include <linux/fs.h> +#include <linux/jiffies.h> +#include <linux/slab.h> +#include <linux/pm_runtime.h> + +#include <linux/mei.h> + +#include "mei_dev.h" +#include "hbm.h" +#include "client.h" + + +/** + * mei_irq_compl_handler - dispatch complete handlers + * for the completed callbacks + * + * @dev: mei device + * @cmpl_list: list of completed cbs + */ +void mei_irq_compl_handler(struct mei_device *dev, struct list_head *cmpl_list) +{ + struct mei_cl_cb *cb, *next; + struct mei_cl *cl; + + list_for_each_entry_safe(cb, next, cmpl_list, list) { + cl = cb->cl; + list_del_init(&cb->list); + + dev_dbg(dev->dev, "completing call back.\n"); + mei_cl_complete(cl, cb); + } +} +EXPORT_SYMBOL_GPL(mei_irq_compl_handler); + +/** + * mei_cl_hbm_equal - check if hbm is addressed to the client + * + * @cl: host client + * @mei_hdr: header of mei client message + * + * Return: true if matches, false otherwise + */ +static inline int mei_cl_hbm_equal(struct mei_cl *cl, + struct mei_msg_hdr *mei_hdr) +{ + return mei_cl_host_addr(cl) == mei_hdr->host_addr && + mei_cl_me_id(cl) == mei_hdr->me_addr; +} + +/** + * mei_irq_discard_msg - discard received message + * + * @dev: mei device + * @hdr: message header + * @discard_len: the length of the message to discard (excluding header) + */ +static void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr, + size_t discard_len) +{ + if (hdr->dma_ring) { + mei_dma_ring_read(dev, NULL, + hdr->extension[dev->rd_msg_hdr_count - 2]); + discard_len = 0; + } + /* + * no need to check for size as it is guarantied + * that length fits into rd_msg_buf + */ + mei_read_slots(dev, dev->rd_msg_buf, discard_len); + dev_dbg(dev->dev, "discarding message " MEI_HDR_FMT "\n", + MEI_HDR_PRM(hdr)); +} + +/** + * mei_cl_irq_read_msg - process client message + * + * @cl: reading client + * @mei_hdr: header of mei client message + * @meta: extend meta header + * @cmpl_list: completion list + * + * Return: always 0 + */ +static int mei_cl_irq_read_msg(struct mei_cl *cl, + struct mei_msg_hdr *mei_hdr, + struct mei_ext_meta_hdr *meta, + struct list_head *cmpl_list) +{ + struct mei_device *dev = cl->dev; + struct mei_cl_cb *cb; + + size_t buf_sz; + u32 length; + int ext_len; + + length = mei_hdr->length; + ext_len = 0; + if (mei_hdr->extended) { + ext_len = sizeof(*meta) + mei_slots2data(meta->size); + length -= ext_len; + } + + cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list); + if (!cb) { + if (!mei_cl_is_fixed_address(cl)) { + cl_err(dev, cl, "pending read cb not found\n"); + goto discard; + } + cb = mei_cl_alloc_cb(cl, mei_cl_mtu(cl), MEI_FOP_READ, cl->fp); + if (!cb) + goto discard; + list_add_tail(&cb->list, &cl->rd_pending); + } + + if (mei_hdr->extended) { + struct mei_ext_hdr *ext; + struct mei_ext_hdr *vtag = NULL; + + ext = mei_ext_begin(meta); + do { + switch (ext->type) { + case MEI_EXT_HDR_VTAG: + vtag = ext; + break; + case MEI_EXT_HDR_NONE: + fallthrough; + default: + cb->status = -EPROTO; + break; + } + + ext = mei_ext_next(ext); + } while (!mei_ext_last(meta, ext)); + + if (!vtag) { + cl_dbg(dev, cl, "vtag not found in extended header.\n"); + cb->status = -EPROTO; + goto discard; + } + + cl_dbg(dev, cl, "vtag: %d\n", vtag->ext_payload[0]); + if (cb->vtag && cb->vtag != vtag->ext_payload[0]) { + cl_err(dev, cl, "mismatched tag: %d != %d\n", + cb->vtag, vtag->ext_payload[0]); + cb->status = -EPROTO; + goto discard; + } + cb->vtag = vtag->ext_payload[0]; + } + + if (!mei_cl_is_connected(cl)) { + cl_dbg(dev, cl, "not connected\n"); + cb->status = -ENODEV; + goto discard; + } + + if (mei_hdr->dma_ring) + length = mei_hdr->extension[mei_data2slots(ext_len)]; + + buf_sz = length + cb->buf_idx; + /* catch for integer overflow */ + if (buf_sz < cb->buf_idx) { + cl_err(dev, cl, "message is too big len %d idx %zu\n", + length, cb->buf_idx); + cb->status = -EMSGSIZE; + goto discard; + } + + if (cb->buf.size < buf_sz) { + cl_dbg(dev, cl, "message overflow. size %zu len %d idx %zu\n", + cb->buf.size, length, cb->buf_idx); + cb->status = -EMSGSIZE; + goto discard; + } + + if (mei_hdr->dma_ring) { + mei_dma_ring_read(dev, cb->buf.data + cb->buf_idx, length); + /* for DMA read 0 length to generate interrupt to the device */ + mei_read_slots(dev, cb->buf.data + cb->buf_idx, 0); + } else { + mei_read_slots(dev, cb->buf.data + cb->buf_idx, length); + } + + cb->buf_idx += length; + + if (mei_hdr->msg_complete) { + cl_dbg(dev, cl, "completed read length = %zu\n", cb->buf_idx); + list_move_tail(&cb->list, cmpl_list); + } else { + pm_runtime_mark_last_busy(dev->dev); + pm_request_autosuspend(dev->dev); + } + + return 0; + +discard: + if (cb) + list_move_tail(&cb->list, cmpl_list); + mei_irq_discard_msg(dev, mei_hdr, length); + return 0; +} + +/** + * mei_cl_irq_disconnect_rsp - send disconnection response message + * + * @cl: client + * @cb: callback block. + * @cmpl_list: complete list. + * + * Return: 0, OK; otherwise, error. + */ +static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb, + struct list_head *cmpl_list) +{ + struct mei_device *dev = cl->dev; + u32 msg_slots; + int slots; + int ret; + + msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_response)); + slots = mei_hbuf_empty_slots(dev); + if (slots < 0) + return -EOVERFLOW; + + if ((u32)slots < msg_slots) + return -EMSGSIZE; + + ret = mei_hbm_cl_disconnect_rsp(dev, cl); + list_move_tail(&cb->list, cmpl_list); + + return ret; +} + +/** + * mei_cl_irq_read - processes client read related operation from the + * interrupt thread context - request for flow control credits + * + * @cl: client + * @cb: callback block. + * @cmpl_list: complete list. + * + * Return: 0, OK; otherwise, error. + */ +static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb, + struct list_head *cmpl_list) +{ + struct mei_device *dev = cl->dev; + u32 msg_slots; + int slots; + int ret; + + if (!list_empty(&cl->rd_pending)) + return 0; + + msg_slots = mei_hbm2slots(sizeof(struct hbm_flow_control)); + slots = mei_hbuf_empty_slots(dev); + if (slots < 0) + return -EOVERFLOW; + + if ((u32)slots < msg_slots) + return -EMSGSIZE; + + ret = mei_hbm_cl_flow_control_req(dev, cl); + if (ret) { + cl->status = ret; + cb->buf_idx = 0; + list_move_tail(&cb->list, cmpl_list); + return ret; + } + + pm_runtime_mark_last_busy(dev->dev); + pm_request_autosuspend(dev->dev); + + list_move_tail(&cb->list, &cl->rd_pending); + + return 0; +} + +static inline bool hdr_is_hbm(struct mei_msg_hdr *mei_hdr) +{ + return mei_hdr->host_addr == 0 && mei_hdr->me_addr == 0; +} + +static inline bool hdr_is_fixed(struct mei_msg_hdr *mei_hdr) +{ + return mei_hdr->host_addr == 0 && mei_hdr->me_addr != 0; +} + +static inline int hdr_is_valid(u32 msg_hdr) +{ + struct mei_msg_hdr *mei_hdr; + u32 expected_len = 0; + + mei_hdr = (struct mei_msg_hdr *)&msg_hdr; + if (!msg_hdr || mei_hdr->reserved) + return -EBADMSG; + + if (mei_hdr->dma_ring) + expected_len += MEI_SLOT_SIZE; + if (mei_hdr->extended) + expected_len += MEI_SLOT_SIZE; + if (mei_hdr->length < expected_len) + return -EBADMSG; + + return 0; +} + +/** + * mei_irq_read_handler - bottom half read routine after ISR to + * handle the read processing. + * + * @dev: the device structure + * @cmpl_list: An instance of our list structure + * @slots: slots to read. + * + * Return: 0 on success, <0 on failure. + */ +int mei_irq_read_handler(struct mei_device *dev, + struct list_head *cmpl_list, s32 *slots) +{ + struct mei_msg_hdr *mei_hdr; + struct mei_ext_meta_hdr *meta_hdr = NULL; + struct mei_cl *cl; + int ret; + u32 ext_meta_hdr_u32; + u32 hdr_size_left; + u32 hdr_size_ext; + int i; + int ext_hdr_end; + + if (!dev->rd_msg_hdr[0]) { + dev->rd_msg_hdr[0] = mei_read_hdr(dev); + dev->rd_msg_hdr_count = 1; + (*slots)--; + dev_dbg(dev->dev, "slots =%08x.\n", *slots); + + ret = hdr_is_valid(dev->rd_msg_hdr[0]); + if (ret) { + dev_err(dev->dev, "corrupted message header 0x%08X\n", + dev->rd_msg_hdr[0]); + goto end; + } + } + + mei_hdr = (struct mei_msg_hdr *)dev->rd_msg_hdr; + dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr)); + + if (mei_slots2data(*slots) < mei_hdr->length) { + dev_err(dev->dev, "less data available than length=%08x.\n", + *slots); + /* we can't read the message */ + ret = -ENODATA; + goto end; + } + + ext_hdr_end = 1; + hdr_size_left = mei_hdr->length; + + if (mei_hdr->extended) { + if (!dev->rd_msg_hdr[1]) { + ext_meta_hdr_u32 = mei_read_hdr(dev); + dev->rd_msg_hdr[1] = ext_meta_hdr_u32; + dev->rd_msg_hdr_count++; + (*slots)--; + dev_dbg(dev->dev, "extended header is %08x\n", + ext_meta_hdr_u32); + } + meta_hdr = ((struct mei_ext_meta_hdr *)dev->rd_msg_hdr + 1); + if (check_add_overflow((u32)sizeof(*meta_hdr), + mei_slots2data(meta_hdr->size), + &hdr_size_ext)) { + dev_err(dev->dev, "extended message size too big %d\n", + meta_hdr->size); + return -EBADMSG; + } + if (hdr_size_left < hdr_size_ext) { + dev_err(dev->dev, "corrupted message header len %d\n", + mei_hdr->length); + return -EBADMSG; + } + hdr_size_left -= hdr_size_ext; + + ext_hdr_end = meta_hdr->size + 2; + for (i = dev->rd_msg_hdr_count; i < ext_hdr_end; i++) { + dev->rd_msg_hdr[i] = mei_read_hdr(dev); + dev_dbg(dev->dev, "extended header %d is %08x\n", i, + dev->rd_msg_hdr[i]); + dev->rd_msg_hdr_count++; + (*slots)--; + } + } + + if (mei_hdr->dma_ring) { + if (hdr_size_left != sizeof(dev->rd_msg_hdr[ext_hdr_end])) { + dev_err(dev->dev, "corrupted message header len %d\n", + mei_hdr->length); + return -EBADMSG; + } + + dev->rd_msg_hdr[ext_hdr_end] = mei_read_hdr(dev); + dev->rd_msg_hdr_count++; + (*slots)--; + mei_hdr->length -= sizeof(dev->rd_msg_hdr[ext_hdr_end]); + } + + /* HBM message */ + if (hdr_is_hbm(mei_hdr)) { + ret = mei_hbm_dispatch(dev, mei_hdr); + if (ret) { + dev_dbg(dev->dev, "mei_hbm_dispatch failed ret = %d\n", + ret); + goto end; + } + goto reset_slots; + } + + /* find recipient cl */ + list_for_each_entry(cl, &dev->file_list, link) { + if (mei_cl_hbm_equal(cl, mei_hdr)) { + cl_dbg(dev, cl, "got a message\n"); + ret = mei_cl_irq_read_msg(cl, mei_hdr, meta_hdr, cmpl_list); + goto reset_slots; + } + } + + /* if no recipient cl was found we assume corrupted header */ + /* A message for not connected fixed address clients + * should be silently discarded + * On power down client may be force cleaned, + * silently discard such messages + */ + if (hdr_is_fixed(mei_hdr) || + dev->dev_state == MEI_DEV_POWER_DOWN) { + mei_irq_discard_msg(dev, mei_hdr, mei_hdr->length); + ret = 0; + goto reset_slots; + } + dev_err(dev->dev, "no destination client found 0x%08X\n", dev->rd_msg_hdr[0]); + ret = -EBADMSG; + goto end; + +reset_slots: + /* reset the number of slots and header */ + memset(dev->rd_msg_hdr, 0, sizeof(dev->rd_msg_hdr)); + dev->rd_msg_hdr_count = 0; + *slots = mei_count_full_read_slots(dev); + if (*slots == -EOVERFLOW) { + /* overflow - reset */ + dev_err(dev->dev, "resetting due to slots overflow.\n"); + /* set the event since message has been read */ + ret = -ERANGE; + goto end; + } +end: + return ret; +} +EXPORT_SYMBOL_GPL(mei_irq_read_handler); + + +/** + * mei_irq_write_handler - dispatch write requests + * after irq received + * + * @dev: the device structure + * @cmpl_list: An instance of our list structure + * + * Return: 0 on success, <0 on failure. + */ +int mei_irq_write_handler(struct mei_device *dev, struct list_head *cmpl_list) +{ + + struct mei_cl *cl; + struct mei_cl_cb *cb, *next; + s32 slots; + int ret; + + + if (!mei_hbuf_acquire(dev)) + return 0; + + slots = mei_hbuf_empty_slots(dev); + if (slots < 0) + return -EOVERFLOW; + + if (slots == 0) + return -EMSGSIZE; + + /* complete all waiting for write CB */ + dev_dbg(dev->dev, "complete all waiting for write cb.\n"); + + list_for_each_entry_safe(cb, next, &dev->write_waiting_list, list) { + cl = cb->cl; + + cl->status = 0; + cl_dbg(dev, cl, "MEI WRITE COMPLETE\n"); + cl->writing_state = MEI_WRITE_COMPLETE; + list_move_tail(&cb->list, cmpl_list); + } + + /* complete control write list CB */ + dev_dbg(dev->dev, "complete control write list cb.\n"); + list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list, list) { + cl = cb->cl; + switch (cb->fop_type) { + case MEI_FOP_DISCONNECT: + /* send disconnect message */ + ret = mei_cl_irq_disconnect(cl, cb, cmpl_list); + if (ret) + return ret; + + break; + case MEI_FOP_READ: + /* send flow control message */ + ret = mei_cl_irq_read(cl, cb, cmpl_list); + if (ret) + return ret; + + break; + case MEI_FOP_CONNECT: + /* connect message */ + ret = mei_cl_irq_connect(cl, cb, cmpl_list); + if (ret) + return ret; + + break; + case MEI_FOP_DISCONNECT_RSP: + /* send disconnect resp */ + ret = mei_cl_irq_disconnect_rsp(cl, cb, cmpl_list); + if (ret) + return ret; + break; + + case MEI_FOP_NOTIFY_START: + case MEI_FOP_NOTIFY_STOP: + ret = mei_cl_irq_notify(cl, cb, cmpl_list); + if (ret) + return ret; + break; + default: + BUG(); + } + + } + /* complete write list CB */ + dev_dbg(dev->dev, "complete write list cb.\n"); + list_for_each_entry_safe(cb, next, &dev->write_list, list) { + cl = cb->cl; + ret = mei_cl_irq_write(cl, cb, cmpl_list); + if (ret) + return ret; + } + return 0; +} +EXPORT_SYMBOL_GPL(mei_irq_write_handler); + + +/** + * mei_connect_timeout - connect/disconnect timeouts + * + * @cl: host client + */ +static void mei_connect_timeout(struct mei_cl *cl) +{ + struct mei_device *dev = cl->dev; + + if (cl->state == MEI_FILE_CONNECTING) { + if (dev->hbm_f_dot_supported) { + cl->state = MEI_FILE_DISCONNECT_REQUIRED; + wake_up(&cl->wait); + return; + } + } + mei_reset(dev); +} + +#define MEI_STALL_TIMER_FREQ (2 * HZ) +/** + * mei_schedule_stall_timer - re-arm stall_timer work + * + * Schedule stall timer + * + * @dev: the device structure + */ +void mei_schedule_stall_timer(struct mei_device *dev) +{ + schedule_delayed_work(&dev->timer_work, MEI_STALL_TIMER_FREQ); +} + +/** + * mei_timer - timer function. + * + * @work: pointer to the work_struct structure + * + */ +void mei_timer(struct work_struct *work) +{ + struct mei_cl *cl; + struct mei_device *dev = container_of(work, + struct mei_device, timer_work.work); + bool reschedule_timer = false; + + mutex_lock(&dev->device_lock); + + /* Catch interrupt stalls during HBM init handshake */ + if (dev->dev_state == MEI_DEV_INIT_CLIENTS && + dev->hbm_state != MEI_HBM_IDLE) { + + if (dev->init_clients_timer) { + if (--dev->init_clients_timer == 0) { + dev_err(dev->dev, "timer: init clients timeout hbm_state = %d.\n", + dev->hbm_state); + mei_reset(dev); + goto out; + } + reschedule_timer = true; + } + } + + if (dev->dev_state != MEI_DEV_ENABLED) + goto out; + + /*** connect/disconnect timeouts ***/ + list_for_each_entry(cl, &dev->file_list, link) { + if (cl->timer_count) { + if (--cl->timer_count == 0) { + dev_err(dev->dev, "timer: connect/disconnect timeout.\n"); + mei_connect_timeout(cl); + goto out; + } + reschedule_timer = true; + } + } + +out: + if (dev->dev_state != MEI_DEV_DISABLED && reschedule_timer) + mei_schedule_stall_timer(dev); + + mutex_unlock(&dev->device_lock); +} diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c new file mode 100644 index 000000000..9f6682033 --- /dev/null +++ b/drivers/misc/mei/main.c @@ -0,0 +1,1322 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2003-2020, Intel Corporation. All rights reserved. + * Intel Management Engine Interface (Intel MEI) Linux driver + */ + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/slab.h> +#include <linux/fs.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/fcntl.h> +#include <linux/poll.h> +#include <linux/init.h> +#include <linux/ioctl.h> +#include <linux/cdev.h> +#include <linux/sched/signal.h> +#include <linux/uuid.h> +#include <linux/compat.h> +#include <linux/jiffies.h> +#include <linux/interrupt.h> + +#include <linux/mei.h> + +#include "mei_dev.h" +#include "client.h" + +static struct class *mei_class; +static dev_t mei_devt; +#define MEI_MAX_DEVS MINORMASK +static DEFINE_MUTEX(mei_minor_lock); +static DEFINE_IDR(mei_idr); + +/** + * mei_open - the open function + * + * @inode: pointer to inode structure + * @file: pointer to file structure + * + * Return: 0 on success, <0 on error + */ +static int mei_open(struct inode *inode, struct file *file) +{ + struct mei_device *dev; + struct mei_cl *cl; + + int err; + + dev = container_of(inode->i_cdev, struct mei_device, cdev); + if (!dev) + return -ENODEV; + + mutex_lock(&dev->device_lock); + + if (dev->dev_state != MEI_DEV_ENABLED) { + dev_dbg(dev->dev, "dev_state != MEI_ENABLED dev_state = %s\n", + mei_dev_state_str(dev->dev_state)); + err = -ENODEV; + goto err_unlock; + } + + cl = mei_cl_alloc_linked(dev); + if (IS_ERR(cl)) { + err = PTR_ERR(cl); + goto err_unlock; + } + + cl->fp = file; + file->private_data = cl; + + mutex_unlock(&dev->device_lock); + + return nonseekable_open(inode, file); + +err_unlock: + mutex_unlock(&dev->device_lock); + return err; +} + +/** + * mei_cl_vtag_remove_by_fp - remove vtag that corresponds to fp from list + * + * @cl: host client + * @fp: pointer to file structure + * + */ +static void mei_cl_vtag_remove_by_fp(const struct mei_cl *cl, + const struct file *fp) +{ + struct mei_cl_vtag *vtag_l, *next; + + list_for_each_entry_safe(vtag_l, next, &cl->vtag_map, list) { + if (vtag_l->fp == fp) { + list_del(&vtag_l->list); + kfree(vtag_l); + return; + } + } +} + +/** + * mei_release - the release function + * + * @inode: pointer to inode structure + * @file: pointer to file structure + * + * Return: 0 on success, <0 on error + */ +static int mei_release(struct inode *inode, struct file *file) +{ + struct mei_cl *cl = file->private_data; + struct mei_device *dev; + int rets; + + if (WARN_ON(!cl || !cl->dev)) + return -ENODEV; + + dev = cl->dev; + + mutex_lock(&dev->device_lock); + + mei_cl_vtag_remove_by_fp(cl, file); + + if (!list_empty(&cl->vtag_map)) { + cl_dbg(dev, cl, "not the last vtag\n"); + mei_cl_flush_queues(cl, file); + rets = 0; + goto out; + } + + rets = mei_cl_disconnect(cl); + /* + * Check again: This is necessary since disconnect releases the lock + * and another client can connect in the meantime. + */ + if (!list_empty(&cl->vtag_map)) { + cl_dbg(dev, cl, "not the last vtag after disconnect\n"); + mei_cl_flush_queues(cl, file); + goto out; + } + + mei_cl_flush_queues(cl, NULL); + cl_dbg(dev, cl, "removing\n"); + + mei_cl_unlink(cl); + kfree(cl); + +out: + file->private_data = NULL; + + mutex_unlock(&dev->device_lock); + return rets; +} + + +/** + * mei_read - the read function. + * + * @file: pointer to file structure + * @ubuf: pointer to user buffer + * @length: buffer length + * @offset: data offset in buffer + * + * Return: >=0 data length on success , <0 on error + */ +static ssize_t mei_read(struct file *file, char __user *ubuf, + size_t length, loff_t *offset) +{ + struct mei_cl *cl = file->private_data; + struct mei_device *dev; + struct mei_cl_cb *cb = NULL; + bool nonblock = !!(file->f_flags & O_NONBLOCK); + ssize_t rets; + + if (WARN_ON(!cl || !cl->dev)) + return -ENODEV; + + dev = cl->dev; + + + mutex_lock(&dev->device_lock); + if (dev->dev_state != MEI_DEV_ENABLED) { + rets = -ENODEV; + goto out; + } + + if (length == 0) { + rets = 0; + goto out; + } + + if (ubuf == NULL) { + rets = -EMSGSIZE; + goto out; + } + + cb = mei_cl_read_cb(cl, file); + if (cb) + goto copy_buffer; + + if (*offset > 0) + *offset = 0; + + rets = mei_cl_read_start(cl, length, file); + if (rets && rets != -EBUSY) { + cl_dbg(dev, cl, "mei start read failure status = %zd\n", rets); + goto out; + } + + if (nonblock) { + rets = -EAGAIN; + goto out; + } + + mutex_unlock(&dev->device_lock); + if (wait_event_interruptible(cl->rx_wait, + mei_cl_read_cb(cl, file) || + !mei_cl_is_connected(cl))) { + if (signal_pending(current)) + return -EINTR; + return -ERESTARTSYS; + } + mutex_lock(&dev->device_lock); + + if (!mei_cl_is_connected(cl)) { + rets = -ENODEV; + goto out; + } + + cb = mei_cl_read_cb(cl, file); + if (!cb) { + rets = 0; + goto out; + } + +copy_buffer: + /* now copy the data to user space */ + if (cb->status) { + rets = cb->status; + cl_dbg(dev, cl, "read operation failed %zd\n", rets); + goto free; + } + + cl_dbg(dev, cl, "buf.size = %zu buf.idx = %zu offset = %lld\n", + cb->buf.size, cb->buf_idx, *offset); + if (*offset >= cb->buf_idx) { + rets = 0; + goto free; + } + + /* length is being truncated to PAGE_SIZE, + * however buf_idx may point beyond that */ + length = min_t(size_t, length, cb->buf_idx - *offset); + + if (copy_to_user(ubuf, cb->buf.data + *offset, length)) { + dev_dbg(dev->dev, "failed to copy data to userland\n"); + rets = -EFAULT; + goto free; + } + + rets = length; + *offset += length; + /* not all data was read, keep the cb */ + if (*offset < cb->buf_idx) + goto out; + +free: + mei_cl_del_rd_completed(cl, cb); + *offset = 0; + +out: + cl_dbg(dev, cl, "end mei read rets = %zd\n", rets); + mutex_unlock(&dev->device_lock); + return rets; +} + +/** + * mei_cl_vtag_by_fp - obtain the vtag by file pointer + * + * @cl: host client + * @fp: pointer to file structure + * + * Return: vtag value on success, otherwise 0 + */ +static u8 mei_cl_vtag_by_fp(const struct mei_cl *cl, const struct file *fp) +{ + struct mei_cl_vtag *cl_vtag; + + if (!fp) + return 0; + + list_for_each_entry(cl_vtag, &cl->vtag_map, list) + if (cl_vtag->fp == fp) + return cl_vtag->vtag; + return 0; +} + +/** + * mei_write - the write function. + * + * @file: pointer to file structure + * @ubuf: pointer to user buffer + * @length: buffer length + * @offset: data offset in buffer + * + * Return: >=0 data length on success , <0 on error + */ +static ssize_t mei_write(struct file *file, const char __user *ubuf, + size_t length, loff_t *offset) +{ + struct mei_cl *cl = file->private_data; + struct mei_cl_cb *cb; + struct mei_device *dev; + ssize_t rets; + + if (WARN_ON(!cl || !cl->dev)) + return -ENODEV; + + dev = cl->dev; + + mutex_lock(&dev->device_lock); + + if (dev->dev_state != MEI_DEV_ENABLED) { + rets = -ENODEV; + goto out; + } + + if (!mei_cl_is_connected(cl)) { + cl_err(dev, cl, "is not connected"); + rets = -ENODEV; + goto out; + } + + if (!mei_me_cl_is_active(cl->me_cl)) { + rets = -ENOTTY; + goto out; + } + + if (length > mei_cl_mtu(cl)) { + rets = -EFBIG; + goto out; + } + + if (length == 0) { + rets = 0; + goto out; + } + + while (cl->tx_cb_queued >= dev->tx_queue_limit) { + if (file->f_flags & O_NONBLOCK) { + rets = -EAGAIN; + goto out; + } + mutex_unlock(&dev->device_lock); + rets = wait_event_interruptible(cl->tx_wait, + cl->writing_state == MEI_WRITE_COMPLETE || + (!mei_cl_is_connected(cl))); + mutex_lock(&dev->device_lock); + if (rets) { + if (signal_pending(current)) + rets = -EINTR; + goto out; + } + if (!mei_cl_is_connected(cl)) { + rets = -ENODEV; + goto out; + } + } + + cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, file); + if (!cb) { + rets = -ENOMEM; + goto out; + } + cb->vtag = mei_cl_vtag_by_fp(cl, file); + + rets = copy_from_user(cb->buf.data, ubuf, length); + if (rets) { + dev_dbg(dev->dev, "failed to copy data from userland\n"); + rets = -EFAULT; + mei_io_cb_free(cb); + goto out; + } + + rets = mei_cl_write(cl, cb); +out: + mutex_unlock(&dev->device_lock); + return rets; +} + +/** + * mei_ioctl_connect_client - the connect to fw client IOCTL function + * + * @file: private data of the file object + * @in_client_uuid: requested UUID for connection + * @client: IOCTL connect data, output parameters + * + * Locking: called under "dev->device_lock" lock + * + * Return: 0 on success, <0 on failure. + */ +static int mei_ioctl_connect_client(struct file *file, + const uuid_le *in_client_uuid, + struct mei_client *client) +{ + struct mei_device *dev; + struct mei_me_client *me_cl; + struct mei_cl *cl; + int rets; + + cl = file->private_data; + dev = cl->dev; + + if (cl->state != MEI_FILE_INITIALIZING && + cl->state != MEI_FILE_DISCONNECTED) + return -EBUSY; + + /* find ME client we're trying to connect to */ + me_cl = mei_me_cl_by_uuid(dev, in_client_uuid); + if (!me_cl) { + dev_dbg(dev->dev, "Cannot connect to FW Client UUID = %pUl\n", + in_client_uuid); + rets = -ENOTTY; + goto end; + } + + if (me_cl->props.fixed_address) { + bool forbidden = dev->override_fixed_address ? + !dev->allow_fixed_address : !dev->hbm_f_fa_supported; + if (forbidden) { + dev_dbg(dev->dev, "Connection forbidden to FW Client UUID = %pUl\n", + in_client_uuid); + rets = -ENOTTY; + goto end; + } + } + + dev_dbg(dev->dev, "Connect to FW Client ID = %d\n", + me_cl->client_id); + dev_dbg(dev->dev, "FW Client - Protocol Version = %d\n", + me_cl->props.protocol_version); + dev_dbg(dev->dev, "FW Client - Max Msg Len = %d\n", + me_cl->props.max_msg_length); + + /* prepare the output buffer */ + client->max_msg_length = me_cl->props.max_msg_length; + client->protocol_version = me_cl->props.protocol_version; + dev_dbg(dev->dev, "Can connect?\n"); + + rets = mei_cl_connect(cl, me_cl, file); + +end: + mei_me_cl_put(me_cl); + return rets; +} + +/** + * mei_vt_support_check - check if client support vtags + * + * Locking: called under "dev->device_lock" lock + * + * @dev: mei_device + * @uuid: client UUID + * + * Return: + * 0 - supported + * -ENOTTY - no such client + * -EOPNOTSUPP - vtags are not supported by client + */ +static int mei_vt_support_check(struct mei_device *dev, const uuid_le *uuid) +{ + struct mei_me_client *me_cl; + int ret; + + if (!dev->hbm_f_vt_supported) + return -EOPNOTSUPP; + + me_cl = mei_me_cl_by_uuid(dev, uuid); + if (!me_cl) { + dev_dbg(dev->dev, "Cannot connect to FW Client UUID = %pUl\n", + uuid); + return -ENOTTY; + } + ret = me_cl->props.vt_supported ? 0 : -EOPNOTSUPP; + mei_me_cl_put(me_cl); + + return ret; +} + +/** + * mei_ioctl_connect_vtag - connect to fw client with vtag IOCTL function + * + * @file: private data of the file object + * @in_client_uuid: requested UUID for connection + * @client: IOCTL connect data, output parameters + * @vtag: vm tag + * + * Locking: called under "dev->device_lock" lock + * + * Return: 0 on success, <0 on failure. + */ +static int mei_ioctl_connect_vtag(struct file *file, + const uuid_le *in_client_uuid, + struct mei_client *client, + u8 vtag) +{ + struct mei_device *dev; + struct mei_cl *cl; + struct mei_cl *pos; + struct mei_cl_vtag *cl_vtag; + + cl = file->private_data; + dev = cl->dev; + + dev_dbg(dev->dev, "FW Client %pUl vtag %d\n", in_client_uuid, vtag); + + switch (cl->state) { + case MEI_FILE_DISCONNECTED: + if (mei_cl_vtag_by_fp(cl, file) != vtag) { + dev_err(dev->dev, "reconnect with different vtag\n"); + return -EINVAL; + } + break; + case MEI_FILE_INITIALIZING: + /* malicious connect from another thread may push vtag */ + if (!IS_ERR(mei_cl_fp_by_vtag(cl, vtag))) { + dev_err(dev->dev, "vtag already filled\n"); + return -EINVAL; + } + + list_for_each_entry(pos, &dev->file_list, link) { + if (pos == cl) + continue; + if (!pos->me_cl) + continue; + + /* only search for same UUID */ + if (uuid_le_cmp(*mei_cl_uuid(pos), *in_client_uuid)) + continue; + + /* if tag already exist try another fp */ + if (!IS_ERR(mei_cl_fp_by_vtag(pos, vtag))) + continue; + + /* replace cl with acquired one */ + dev_dbg(dev->dev, "replacing with existing cl\n"); + mei_cl_unlink(cl); + kfree(cl); + file->private_data = pos; + cl = pos; + break; + } + + cl_vtag = mei_cl_vtag_alloc(file, vtag); + if (IS_ERR(cl_vtag)) + return -ENOMEM; + + list_add_tail(&cl_vtag->list, &cl->vtag_map); + break; + default: + return -EBUSY; + } + + while (cl->state != MEI_FILE_INITIALIZING && + cl->state != MEI_FILE_DISCONNECTED && + cl->state != MEI_FILE_CONNECTED) { + mutex_unlock(&dev->device_lock); + wait_event_timeout(cl->wait, + (cl->state == MEI_FILE_CONNECTED || + cl->state == MEI_FILE_DISCONNECTED || + cl->state == MEI_FILE_DISCONNECT_REQUIRED || + cl->state == MEI_FILE_DISCONNECT_REPLY), + mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); + mutex_lock(&dev->device_lock); + } + + if (!mei_cl_is_connected(cl)) + return mei_ioctl_connect_client(file, in_client_uuid, client); + + client->max_msg_length = cl->me_cl->props.max_msg_length; + client->protocol_version = cl->me_cl->props.protocol_version; + + return 0; +} + +/** + * mei_ioctl_client_notify_request - + * propagate event notification request to client + * + * @file: pointer to file structure + * @request: 0 - disable, 1 - enable + * + * Return: 0 on success , <0 on error + */ +static int mei_ioctl_client_notify_request(const struct file *file, u32 request) +{ + struct mei_cl *cl = file->private_data; + + if (request != MEI_HBM_NOTIFICATION_START && + request != MEI_HBM_NOTIFICATION_STOP) + return -EINVAL; + + return mei_cl_notify_request(cl, file, (u8)request); +} + +/** + * mei_ioctl_client_notify_get - wait for notification request + * + * @file: pointer to file structure + * @notify_get: 0 - disable, 1 - enable + * + * Return: 0 on success , <0 on error + */ +static int mei_ioctl_client_notify_get(const struct file *file, u32 *notify_get) +{ + struct mei_cl *cl = file->private_data; + bool notify_ev; + bool block = (file->f_flags & O_NONBLOCK) == 0; + int rets; + + rets = mei_cl_notify_get(cl, block, ¬ify_ev); + if (rets) + return rets; + + *notify_get = notify_ev ? 1 : 0; + return 0; +} + +/** + * mei_ioctl - the IOCTL function + * + * @file: pointer to file structure + * @cmd: ioctl command + * @data: pointer to mei message structure + * + * Return: 0 on success , <0 on error + */ +static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data) +{ + struct mei_device *dev; + struct mei_cl *cl = file->private_data; + struct mei_connect_client_data conn; + struct mei_connect_client_data_vtag conn_vtag; + const uuid_le *cl_uuid; + struct mei_client *props; + u8 vtag; + u32 notify_get, notify_req; + int rets; + + + if (WARN_ON(!cl || !cl->dev)) + return -ENODEV; + + dev = cl->dev; + + dev_dbg(dev->dev, "IOCTL cmd = 0x%x", cmd); + + mutex_lock(&dev->device_lock); + if (dev->dev_state != MEI_DEV_ENABLED) { + rets = -ENODEV; + goto out; + } + + switch (cmd) { + case IOCTL_MEI_CONNECT_CLIENT: + dev_dbg(dev->dev, ": IOCTL_MEI_CONNECT_CLIENT.\n"); + if (copy_from_user(&conn, (char __user *)data, sizeof(conn))) { + dev_dbg(dev->dev, "failed to copy data from userland\n"); + rets = -EFAULT; + goto out; + } + cl_uuid = &conn.in_client_uuid; + props = &conn.out_client_properties; + vtag = 0; + + rets = mei_vt_support_check(dev, cl_uuid); + if (rets == -ENOTTY) + goto out; + if (!rets) + rets = mei_ioctl_connect_vtag(file, cl_uuid, props, + vtag); + else + rets = mei_ioctl_connect_client(file, cl_uuid, props); + if (rets) + goto out; + + /* if all is ok, copying the data back to user. */ + if (copy_to_user((char __user *)data, &conn, sizeof(conn))) { + dev_dbg(dev->dev, "failed to copy data to userland\n"); + rets = -EFAULT; + goto out; + } + + break; + + case IOCTL_MEI_CONNECT_CLIENT_VTAG: + dev_dbg(dev->dev, "IOCTL_MEI_CONNECT_CLIENT_VTAG\n"); + if (copy_from_user(&conn_vtag, (char __user *)data, + sizeof(conn_vtag))) { + dev_dbg(dev->dev, "failed to copy data from userland\n"); + rets = -EFAULT; + goto out; + } + + cl_uuid = &conn_vtag.connect.in_client_uuid; + props = &conn_vtag.out_client_properties; + vtag = conn_vtag.connect.vtag; + + rets = mei_vt_support_check(dev, cl_uuid); + if (rets == -EOPNOTSUPP) + dev_dbg(dev->dev, "FW Client %pUl does not support vtags\n", + cl_uuid); + if (rets) + goto out; + + if (!vtag) { + dev_dbg(dev->dev, "vtag can't be zero\n"); + rets = -EINVAL; + goto out; + } + + rets = mei_ioctl_connect_vtag(file, cl_uuid, props, vtag); + if (rets) + goto out; + + /* if all is ok, copying the data back to user. */ + if (copy_to_user((char __user *)data, &conn_vtag, + sizeof(conn_vtag))) { + dev_dbg(dev->dev, "failed to copy data to userland\n"); + rets = -EFAULT; + goto out; + } + + break; + + case IOCTL_MEI_NOTIFY_SET: + dev_dbg(dev->dev, ": IOCTL_MEI_NOTIFY_SET.\n"); + if (copy_from_user(¬ify_req, + (char __user *)data, sizeof(notify_req))) { + dev_dbg(dev->dev, "failed to copy data from userland\n"); + rets = -EFAULT; + goto out; + } + rets = mei_ioctl_client_notify_request(file, notify_req); + break; + + case IOCTL_MEI_NOTIFY_GET: + dev_dbg(dev->dev, ": IOCTL_MEI_NOTIFY_GET.\n"); + rets = mei_ioctl_client_notify_get(file, ¬ify_get); + if (rets) + goto out; + + dev_dbg(dev->dev, "copy connect data to user\n"); + if (copy_to_user((char __user *)data, + ¬ify_get, sizeof(notify_get))) { + dev_dbg(dev->dev, "failed to copy data to userland\n"); + rets = -EFAULT; + goto out; + + } + break; + + default: + rets = -ENOIOCTLCMD; + } + +out: + mutex_unlock(&dev->device_lock); + return rets; +} + +/** + * mei_poll - the poll function + * + * @file: pointer to file structure + * @wait: pointer to poll_table structure + * + * Return: poll mask + */ +static __poll_t mei_poll(struct file *file, poll_table *wait) +{ + __poll_t req_events = poll_requested_events(wait); + struct mei_cl *cl = file->private_data; + struct mei_device *dev; + __poll_t mask = 0; + bool notify_en; + + if (WARN_ON(!cl || !cl->dev)) + return EPOLLERR; + + dev = cl->dev; + + mutex_lock(&dev->device_lock); + + notify_en = cl->notify_en && (req_events & EPOLLPRI); + + if (dev->dev_state != MEI_DEV_ENABLED || + !mei_cl_is_connected(cl)) { + mask = EPOLLERR; + goto out; + } + + if (notify_en) { + poll_wait(file, &cl->ev_wait, wait); + if (cl->notify_ev) + mask |= EPOLLPRI; + } + + if (req_events & (EPOLLIN | EPOLLRDNORM)) { + poll_wait(file, &cl->rx_wait, wait); + + if (mei_cl_read_cb(cl, file)) + mask |= EPOLLIN | EPOLLRDNORM; + else + mei_cl_read_start(cl, mei_cl_mtu(cl), file); + } + + if (req_events & (EPOLLOUT | EPOLLWRNORM)) { + poll_wait(file, &cl->tx_wait, wait); + if (cl->tx_cb_queued < dev->tx_queue_limit) + mask |= EPOLLOUT | EPOLLWRNORM; + } + +out: + mutex_unlock(&dev->device_lock); + return mask; +} + +/** + * mei_cl_is_write_queued - check if the client has pending writes. + * + * @cl: writing host client + * + * Return: true if client is writing, false otherwise. + */ +static bool mei_cl_is_write_queued(struct mei_cl *cl) +{ + struct mei_device *dev = cl->dev; + struct mei_cl_cb *cb; + + list_for_each_entry(cb, &dev->write_list, list) + if (cb->cl == cl) + return true; + list_for_each_entry(cb, &dev->write_waiting_list, list) + if (cb->cl == cl) + return true; + return false; +} + +/** + * mei_fsync - the fsync handler + * + * @fp: pointer to file structure + * @start: unused + * @end: unused + * @datasync: unused + * + * Return: 0 on success, -ENODEV if client is not connected + */ +static int mei_fsync(struct file *fp, loff_t start, loff_t end, int datasync) +{ + struct mei_cl *cl = fp->private_data; + struct mei_device *dev; + int rets; + + if (WARN_ON(!cl || !cl->dev)) + return -ENODEV; + + dev = cl->dev; + + mutex_lock(&dev->device_lock); + + if (dev->dev_state != MEI_DEV_ENABLED || !mei_cl_is_connected(cl)) { + rets = -ENODEV; + goto out; + } + + while (mei_cl_is_write_queued(cl)) { + mutex_unlock(&dev->device_lock); + rets = wait_event_interruptible(cl->tx_wait, + cl->writing_state == MEI_WRITE_COMPLETE || + !mei_cl_is_connected(cl)); + mutex_lock(&dev->device_lock); + if (rets) { + if (signal_pending(current)) + rets = -EINTR; + goto out; + } + if (!mei_cl_is_connected(cl)) { + rets = -ENODEV; + goto out; + } + } + rets = 0; +out: + mutex_unlock(&dev->device_lock); + return rets; +} + +/** + * mei_fasync - asynchronous io support + * + * @fd: file descriptor + * @file: pointer to file structure + * @band: band bitmap + * + * Return: negative on error, + * 0 if it did no changes, + * and positive a process was added or deleted + */ +static int mei_fasync(int fd, struct file *file, int band) +{ + + struct mei_cl *cl = file->private_data; + + if (!mei_cl_is_connected(cl)) + return -ENODEV; + + return fasync_helper(fd, file, band, &cl->ev_async); +} + +/** + * trc_show - mei device trc attribute show method + * + * @device: device pointer + * @attr: attribute pointer + * @buf: char out buffer + * + * Return: number of the bytes printed into buf or error + */ +static ssize_t trc_show(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct mei_device *dev = dev_get_drvdata(device); + u32 trc; + int ret; + + ret = mei_trc_status(dev, &trc); + if (ret) + return ret; + return sprintf(buf, "%08X\n", trc); +} +static DEVICE_ATTR_RO(trc); + +/** + * fw_status_show - mei device fw_status attribute show method + * + * @device: device pointer + * @attr: attribute pointer + * @buf: char out buffer + * + * Return: number of the bytes printed into buf or error + */ +static ssize_t fw_status_show(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct mei_device *dev = dev_get_drvdata(device); + struct mei_fw_status fw_status; + int err, i; + ssize_t cnt = 0; + + mutex_lock(&dev->device_lock); + err = mei_fw_status(dev, &fw_status); + mutex_unlock(&dev->device_lock); + if (err) { + dev_err(device, "read fw_status error = %d\n", err); + return err; + } + + for (i = 0; i < fw_status.count; i++) + cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "%08X\n", + fw_status.status[i]); + return cnt; +} +static DEVICE_ATTR_RO(fw_status); + +/** + * hbm_ver_show - display HBM protocol version negotiated with FW + * + * @device: device pointer + * @attr: attribute pointer + * @buf: char out buffer + * + * Return: number of the bytes printed into buf or error + */ +static ssize_t hbm_ver_show(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct mei_device *dev = dev_get_drvdata(device); + struct hbm_version ver; + + mutex_lock(&dev->device_lock); + ver = dev->version; + mutex_unlock(&dev->device_lock); + + return sprintf(buf, "%u.%u\n", ver.major_version, ver.minor_version); +} +static DEVICE_ATTR_RO(hbm_ver); + +/** + * hbm_ver_drv_show - display HBM protocol version advertised by driver + * + * @device: device pointer + * @attr: attribute pointer + * @buf: char out buffer + * + * Return: number of the bytes printed into buf or error + */ +static ssize_t hbm_ver_drv_show(struct device *device, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%u.%u\n", HBM_MAJOR_VERSION, HBM_MINOR_VERSION); +} +static DEVICE_ATTR_RO(hbm_ver_drv); + +static ssize_t tx_queue_limit_show(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct mei_device *dev = dev_get_drvdata(device); + u8 size = 0; + + mutex_lock(&dev->device_lock); + size = dev->tx_queue_limit; + mutex_unlock(&dev->device_lock); + + return snprintf(buf, PAGE_SIZE, "%u\n", size); +} + +static ssize_t tx_queue_limit_store(struct device *device, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct mei_device *dev = dev_get_drvdata(device); + u8 limit; + unsigned int inp; + int err; + + err = kstrtouint(buf, 10, &inp); + if (err) + return err; + if (inp > MEI_TX_QUEUE_LIMIT_MAX || inp < MEI_TX_QUEUE_LIMIT_MIN) + return -EINVAL; + limit = inp; + + mutex_lock(&dev->device_lock); + dev->tx_queue_limit = limit; + mutex_unlock(&dev->device_lock); + + return count; +} +static DEVICE_ATTR_RW(tx_queue_limit); + +/** + * fw_ver_show - display ME FW version + * + * @device: device pointer + * @attr: attribute pointer + * @buf: char out buffer + * + * Return: number of the bytes printed into buf or error + */ +static ssize_t fw_ver_show(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct mei_device *dev = dev_get_drvdata(device); + struct mei_fw_version *ver; + ssize_t cnt = 0; + int i; + + ver = dev->fw_ver; + + for (i = 0; i < MEI_MAX_FW_VER_BLOCKS; i++) + cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "%u:%u.%u.%u.%u\n", + ver[i].platform, ver[i].major, ver[i].minor, + ver[i].hotfix, ver[i].buildno); + return cnt; +} +static DEVICE_ATTR_RO(fw_ver); + +/** + * dev_state_show - display device state + * + * @device: device pointer + * @attr: attribute pointer + * @buf: char out buffer + * + * Return: number of the bytes printed into buf or error + */ +static ssize_t dev_state_show(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct mei_device *dev = dev_get_drvdata(device); + enum mei_dev_state dev_state; + + mutex_lock(&dev->device_lock); + dev_state = dev->dev_state; + mutex_unlock(&dev->device_lock); + + return sprintf(buf, "%s", mei_dev_state_str(dev_state)); +} +static DEVICE_ATTR_RO(dev_state); + +/** + * dev_set_devstate: set to new device state and notify sysfs file. + * + * @dev: mei_device + * @state: new device state + */ +void mei_set_devstate(struct mei_device *dev, enum mei_dev_state state) +{ + struct device *clsdev; + + if (dev->dev_state == state) + return; + + dev->dev_state = state; + + clsdev = class_find_device_by_devt(mei_class, dev->cdev.dev); + if (clsdev) { + sysfs_notify(&clsdev->kobj, NULL, "dev_state"); + put_device(clsdev); + } +} + +/** + * kind_show - display device kind + * + * @device: device pointer + * @attr: attribute pointer + * @buf: char out buffer + * + * Return: number of the bytes printed into buf or error + */ +static ssize_t kind_show(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct mei_device *dev = dev_get_drvdata(device); + ssize_t ret; + + if (dev->kind) + ret = sprintf(buf, "%s\n", dev->kind); + else + ret = sprintf(buf, "%s\n", "mei"); + + return ret; +} +static DEVICE_ATTR_RO(kind); + +static struct attribute *mei_attrs[] = { + &dev_attr_fw_status.attr, + &dev_attr_hbm_ver.attr, + &dev_attr_hbm_ver_drv.attr, + &dev_attr_tx_queue_limit.attr, + &dev_attr_fw_ver.attr, + &dev_attr_dev_state.attr, + &dev_attr_trc.attr, + &dev_attr_kind.attr, + NULL +}; +ATTRIBUTE_GROUPS(mei); + +/* + * file operations structure will be used for mei char device. + */ +static const struct file_operations mei_fops = { + .owner = THIS_MODULE, + .read = mei_read, + .unlocked_ioctl = mei_ioctl, + .compat_ioctl = compat_ptr_ioctl, + .open = mei_open, + .release = mei_release, + .write = mei_write, + .poll = mei_poll, + .fsync = mei_fsync, + .fasync = mei_fasync, + .llseek = no_llseek +}; + +/** + * mei_minor_get - obtain next free device minor number + * + * @dev: device pointer + * + * Return: allocated minor, or -ENOSPC if no free minor left + */ +static int mei_minor_get(struct mei_device *dev) +{ + int ret; + + mutex_lock(&mei_minor_lock); + ret = idr_alloc(&mei_idr, dev, 0, MEI_MAX_DEVS, GFP_KERNEL); + if (ret >= 0) + dev->minor = ret; + else if (ret == -ENOSPC) + dev_err(dev->dev, "too many mei devices\n"); + + mutex_unlock(&mei_minor_lock); + return ret; +} + +/** + * mei_minor_free - mark device minor number as free + * + * @dev: device pointer + */ +static void mei_minor_free(struct mei_device *dev) +{ + mutex_lock(&mei_minor_lock); + idr_remove(&mei_idr, dev->minor); + mutex_unlock(&mei_minor_lock); +} + +int mei_register(struct mei_device *dev, struct device *parent) +{ + struct device *clsdev; /* class device */ + int ret, devno; + + ret = mei_minor_get(dev); + if (ret < 0) + return ret; + + /* Fill in the data structures */ + devno = MKDEV(MAJOR(mei_devt), dev->minor); + cdev_init(&dev->cdev, &mei_fops); + dev->cdev.owner = parent->driver->owner; + + /* Add the device */ + ret = cdev_add(&dev->cdev, devno, 1); + if (ret) { + dev_err(parent, "unable to add device %d:%d\n", + MAJOR(mei_devt), dev->minor); + goto err_dev_add; + } + + clsdev = device_create_with_groups(mei_class, parent, devno, + dev, mei_groups, + "mei%d", dev->minor); + + if (IS_ERR(clsdev)) { + dev_err(parent, "unable to create device %d:%d\n", + MAJOR(mei_devt), dev->minor); + ret = PTR_ERR(clsdev); + goto err_dev_create; + } + + mei_dbgfs_register(dev, dev_name(clsdev)); + + return 0; + +err_dev_create: + cdev_del(&dev->cdev); +err_dev_add: + mei_minor_free(dev); + return ret; +} +EXPORT_SYMBOL_GPL(mei_register); + +void mei_deregister(struct mei_device *dev) +{ + int devno; + + devno = dev->cdev.dev; + cdev_del(&dev->cdev); + + mei_dbgfs_deregister(dev); + + device_destroy(mei_class, devno); + + mei_minor_free(dev); +} +EXPORT_SYMBOL_GPL(mei_deregister); + +static int __init mei_init(void) +{ + int ret; + + mei_class = class_create(THIS_MODULE, "mei"); + if (IS_ERR(mei_class)) { + pr_err("couldn't create class\n"); + ret = PTR_ERR(mei_class); + goto err; + } + + ret = alloc_chrdev_region(&mei_devt, 0, MEI_MAX_DEVS, "mei"); + if (ret < 0) { + pr_err("unable to allocate char dev region\n"); + goto err_class; + } + + ret = mei_cl_bus_init(); + if (ret < 0) { + pr_err("unable to initialize bus\n"); + goto err_chrdev; + } + + return 0; + +err_chrdev: + unregister_chrdev_region(mei_devt, MEI_MAX_DEVS); +err_class: + class_destroy(mei_class); +err: + return ret; +} + +static void __exit mei_exit(void) +{ + unregister_chrdev_region(mei_devt, MEI_MAX_DEVS); + class_destroy(mei_class); + mei_cl_bus_exit(); +} + +module_init(mei_init); +module_exit(mei_exit); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("Intel(R) Management Engine Interface"); +MODULE_LICENSE("GPL v2"); + diff --git a/drivers/misc/mei/mei-trace.c b/drivers/misc/mei/mei-trace.c new file mode 100644 index 000000000..48d4c4fce --- /dev/null +++ b/drivers/misc/mei/mei-trace.c @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2015-2016, Intel Corporation. All rights reserved. + * Intel Management Engine Interface (Intel MEI) Linux driver + */ +#include <linux/module.h> + +/* sparse doesn't like tracepoint macros */ +#ifndef __CHECKER__ +#define CREATE_TRACE_POINTS +#include "mei-trace.h" + +EXPORT_TRACEPOINT_SYMBOL(mei_reg_read); +EXPORT_TRACEPOINT_SYMBOL(mei_reg_write); +EXPORT_TRACEPOINT_SYMBOL(mei_pci_cfg_read); +#endif /* __CHECKER__ */ diff --git a/drivers/misc/mei/mei-trace.h b/drivers/misc/mei/mei-trace.h new file mode 100644 index 000000000..df758033d --- /dev/null +++ b/drivers/misc/mei/mei-trace.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2015-2016, Intel Corporation. All rights reserved. + * Intel Management Engine Interface (Intel MEI) Linux driver + */ + +#if !defined(_MEI_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _MEI_TRACE_H_ + +#include <linux/stringify.h> +#include <linux/types.h> +#include <linux/tracepoint.h> + +#include <linux/device.h> + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM mei + +TRACE_EVENT(mei_reg_read, + TP_PROTO(const struct device *dev, const char *reg, u32 offs, u32 val), + TP_ARGS(dev, reg, offs, val), + TP_STRUCT__entry( + __string(dev, dev_name(dev)) + __field(const char *, reg) + __field(u32, offs) + __field(u32, val) + ), + TP_fast_assign( + __assign_str(dev, dev_name(dev)) + __entry->reg = reg; + __entry->offs = offs; + __entry->val = val; + ), + TP_printk("[%s] read %s:[%#x] = %#x", + __get_str(dev), __entry->reg, __entry->offs, __entry->val) +); + +TRACE_EVENT(mei_reg_write, + TP_PROTO(const struct device *dev, const char *reg, u32 offs, u32 val), + TP_ARGS(dev, reg, offs, val), + TP_STRUCT__entry( + __string(dev, dev_name(dev)) + __field(const char *, reg) + __field(u32, offs) + __field(u32, val) + ), + TP_fast_assign( + __assign_str(dev, dev_name(dev)) + __entry->reg = reg; + __entry->offs = offs; + __entry->val = val; + ), + TP_printk("[%s] write %s[%#x] = %#x", + __get_str(dev), __entry->reg, __entry->offs, __entry->val) +); + +TRACE_EVENT(mei_pci_cfg_read, + TP_PROTO(const struct device *dev, const char *reg, u32 offs, u32 val), + TP_ARGS(dev, reg, offs, val), + TP_STRUCT__entry( + __string(dev, dev_name(dev)) + __field(const char *, reg) + __field(u32, offs) + __field(u32, val) + ), + TP_fast_assign( + __assign_str(dev, dev_name(dev)) + __entry->reg = reg; + __entry->offs = offs; + __entry->val = val; + ), + TP_printk("[%s] pci cfg read %s:[%#x] = %#x", + __get_str(dev), __entry->reg, __entry->offs, __entry->val) +); + +#endif /* _MEI_TRACE_H_ */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE mei-trace +#include <trace/define_trace.h> diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h new file mode 100644 index 000000000..2f4cc1a8a --- /dev/null +++ b/drivers/misc/mei/mei_dev.h @@ -0,0 +1,810 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2003-2019, Intel Corporation. All rights reserved. + * Intel Management Engine Interface (Intel MEI) Linux driver + */ + +#ifndef _MEI_DEV_H_ +#define _MEI_DEV_H_ + +#include <linux/types.h> +#include <linux/cdev.h> +#include <linux/poll.h> +#include <linux/mei.h> +#include <linux/mei_cl_bus.h> + +#include "hw.h" +#include "hbm.h" + +#define MEI_SLOT_SIZE sizeof(u32) +#define MEI_RD_MSG_BUF_SIZE (128 * MEI_SLOT_SIZE) + +/* + * Number of Maximum MEI Clients + */ +#define MEI_CLIENTS_MAX 256 + +/* + * maximum number of consecutive resets + */ +#define MEI_MAX_CONSEC_RESET 3 + +/* + * Number of File descriptors/handles + * that can be opened to the driver. + * + * Limit to 255: 256 Total Clients + * minus internal client for MEI Bus Messages + */ +#define MEI_MAX_OPEN_HANDLE_COUNT (MEI_CLIENTS_MAX - 1) + +/* File state */ +enum file_state { + MEI_FILE_UNINITIALIZED = 0, + MEI_FILE_INITIALIZING, + MEI_FILE_CONNECTING, + MEI_FILE_CONNECTED, + MEI_FILE_DISCONNECTING, + MEI_FILE_DISCONNECT_REPLY, + MEI_FILE_DISCONNECT_REQUIRED, + MEI_FILE_DISCONNECTED, +}; + +/* MEI device states */ +enum mei_dev_state { + MEI_DEV_INITIALIZING = 0, + MEI_DEV_INIT_CLIENTS, + MEI_DEV_ENABLED, + MEI_DEV_RESETTING, + MEI_DEV_DISABLED, + MEI_DEV_POWER_DOWN, + MEI_DEV_POWER_UP +}; + +const char *mei_dev_state_str(int state); + +enum mei_file_transaction_states { + MEI_IDLE, + MEI_WRITING, + MEI_WRITE_COMPLETE, +}; + +/** + * enum mei_cb_file_ops - file operation associated with the callback + * @MEI_FOP_READ: read + * @MEI_FOP_WRITE: write + * @MEI_FOP_CONNECT: connect + * @MEI_FOP_DISCONNECT: disconnect + * @MEI_FOP_DISCONNECT_RSP: disconnect response + * @MEI_FOP_NOTIFY_START: start notification + * @MEI_FOP_NOTIFY_STOP: stop notification + */ +enum mei_cb_file_ops { + MEI_FOP_READ = 0, + MEI_FOP_WRITE, + MEI_FOP_CONNECT, + MEI_FOP_DISCONNECT, + MEI_FOP_DISCONNECT_RSP, + MEI_FOP_NOTIFY_START, + MEI_FOP_NOTIFY_STOP, +}; + +/** + * enum mei_cl_io_mode - io mode between driver and fw + * + * @MEI_CL_IO_TX_BLOCKING: send is blocking + * @MEI_CL_IO_TX_INTERNAL: internal communication between driver and FW + * + * @MEI_CL_IO_RX_NONBLOCK: recv is non-blocking + */ +enum mei_cl_io_mode { + MEI_CL_IO_TX_BLOCKING = BIT(0), + MEI_CL_IO_TX_INTERNAL = BIT(1), + + MEI_CL_IO_RX_NONBLOCK = BIT(2), +}; + +/* + * Intel MEI message data struct + */ +struct mei_msg_data { + size_t size; + unsigned char *data; +}; + +/** + * struct mei_dma_dscr - dma address descriptor + * + * @vaddr: dma buffer virtual address + * @daddr: dma buffer physical address + * @size : dma buffer size + */ +struct mei_dma_dscr { + void *vaddr; + dma_addr_t daddr; + size_t size; +}; + +/* Maximum number of processed FW status registers */ +#define MEI_FW_STATUS_MAX 6 +/* Minimal buffer for FW status string (8 bytes in dw + space or '\0') */ +#define MEI_FW_STATUS_STR_SZ (MEI_FW_STATUS_MAX * (8 + 1)) + + +/* + * struct mei_fw_status - storage of FW status data + * + * @count: number of actually available elements in array + * @status: FW status registers + */ +struct mei_fw_status { + int count; + u32 status[MEI_FW_STATUS_MAX]; +}; + +/** + * struct mei_me_client - representation of me (fw) client + * + * @list: link in me client list + * @refcnt: struct reference count + * @props: client properties + * @client_id: me client id + * @tx_flow_ctrl_creds: flow control credits + * @connect_count: number connections to this client + * @bus_added: added to bus + */ +struct mei_me_client { + struct list_head list; + struct kref refcnt; + struct mei_client_properties props; + u8 client_id; + u8 tx_flow_ctrl_creds; + u8 connect_count; + u8 bus_added; +}; + + +struct mei_cl; + +/** + * struct mei_cl_cb - file operation callback structure + * + * @list: link in callback queue + * @cl: file client who is running this operation + * @fop_type: file operation type + * @buf: buffer for data associated with the callback + * @buf_idx: last read index + * @vtag: virtual tag + * @fp: pointer to file structure + * @status: io status of the cb + * @internal: communication between driver and FW flag + * @blocking: transmission blocking mode + */ +struct mei_cl_cb { + struct list_head list; + struct mei_cl *cl; + enum mei_cb_file_ops fop_type; + struct mei_msg_data buf; + size_t buf_idx; + u8 vtag; + const struct file *fp; + int status; + u32 internal:1; + u32 blocking:1; +}; + +/** + * struct mei_cl_vtag - file pointer to vtag mapping structure + * + * @list: link in map queue + * @fp: file pointer + * @vtag: corresponding vtag + * @pending_read: the read is pending on this file + */ +struct mei_cl_vtag { + struct list_head list; + const struct file *fp; + u8 vtag; + u8 pending_read:1; +}; + +/** + * struct mei_cl - me client host representation + * carried in file->private_data + * + * @link: link in the clients list + * @dev: mei parent device + * @state: file operation state + * @tx_wait: wait queue for tx completion + * @rx_wait: wait queue for rx completion + * @wait: wait queue for management operation + * @ev_wait: notification wait queue + * @ev_async: event async notification + * @status: connection status + * @me_cl: fw client connected + * @fp: file associated with client + * @host_client_id: host id + * @vtag_map: vtag map + * @tx_flow_ctrl_creds: transmit flow credentials + * @rx_flow_ctrl_creds: receive flow credentials + * @timer_count: watchdog timer for operation completion + * @notify_en: notification - enabled/disabled + * @notify_ev: pending notification event + * @tx_cb_queued: number of tx callbacks in queue + * @writing_state: state of the tx + * @rd_pending: pending read credits + * @rd_completed_lock: protects rd_completed queue + * @rd_completed: completed read + * + * @cldev: device on the mei client bus + */ +struct mei_cl { + struct list_head link; + struct mei_device *dev; + enum file_state state; + wait_queue_head_t tx_wait; + wait_queue_head_t rx_wait; + wait_queue_head_t wait; + wait_queue_head_t ev_wait; + struct fasync_struct *ev_async; + int status; + struct mei_me_client *me_cl; + const struct file *fp; + u8 host_client_id; + struct list_head vtag_map; + u8 tx_flow_ctrl_creds; + u8 rx_flow_ctrl_creds; + u8 timer_count; + u8 notify_en; + u8 notify_ev; + u8 tx_cb_queued; + enum mei_file_transaction_states writing_state; + struct list_head rd_pending; + spinlock_t rd_completed_lock; /* protects rd_completed queue */ + struct list_head rd_completed; + + struct mei_cl_device *cldev; +}; + +#define MEI_TX_QUEUE_LIMIT_DEFAULT 50 +#define MEI_TX_QUEUE_LIMIT_MAX 255 +#define MEI_TX_QUEUE_LIMIT_MIN 30 + +/** + * struct mei_hw_ops - hw specific ops + * + * @host_is_ready : query for host readiness + * + * @hw_is_ready : query if hw is ready + * @hw_reset : reset hw + * @hw_start : start hw after reset + * @hw_config : configure hw + * + * @fw_status : get fw status registers + * @trc_status : get trc status register + * @pg_state : power gating state of the device + * @pg_in_transition : is device now in pg transition + * @pg_is_enabled : is power gating enabled + * + * @intr_clear : clear pending interrupts + * @intr_enable : enable interrupts + * @intr_disable : disable interrupts + * @synchronize_irq : synchronize irqs + * + * @hbuf_free_slots : query for write buffer empty slots + * @hbuf_is_ready : query if write buffer is empty + * @hbuf_depth : query for write buffer depth + * + * @write : write a message to FW + * + * @rdbuf_full_slots : query how many slots are filled + * + * @read_hdr : get first 4 bytes (header) + * @read : read a buffer from the FW + */ +struct mei_hw_ops { + + bool (*host_is_ready)(struct mei_device *dev); + + bool (*hw_is_ready)(struct mei_device *dev); + int (*hw_reset)(struct mei_device *dev, bool enable); + int (*hw_start)(struct mei_device *dev); + int (*hw_config)(struct mei_device *dev); + + int (*fw_status)(struct mei_device *dev, struct mei_fw_status *fw_sts); + int (*trc_status)(struct mei_device *dev, u32 *trc); + + enum mei_pg_state (*pg_state)(struct mei_device *dev); + bool (*pg_in_transition)(struct mei_device *dev); + bool (*pg_is_enabled)(struct mei_device *dev); + + void (*intr_clear)(struct mei_device *dev); + void (*intr_enable)(struct mei_device *dev); + void (*intr_disable)(struct mei_device *dev); + void (*synchronize_irq)(struct mei_device *dev); + + int (*hbuf_free_slots)(struct mei_device *dev); + bool (*hbuf_is_ready)(struct mei_device *dev); + u32 (*hbuf_depth)(const struct mei_device *dev); + int (*write)(struct mei_device *dev, + const void *hdr, size_t hdr_len, + const void *data, size_t data_len); + + int (*rdbuf_full_slots)(struct mei_device *dev); + + u32 (*read_hdr)(const struct mei_device *dev); + int (*read)(struct mei_device *dev, + unsigned char *buf, unsigned long len); +}; + +/* MEI bus API*/ +void mei_cl_bus_rescan_work(struct work_struct *work); +void mei_cl_bus_dev_fixup(struct mei_cl_device *dev); +ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, + unsigned int mode); +ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, + unsigned int mode, unsigned long timeout); +bool mei_cl_bus_rx_event(struct mei_cl *cl); +bool mei_cl_bus_notify_event(struct mei_cl *cl); +void mei_cl_bus_remove_devices(struct mei_device *bus); +int mei_cl_bus_init(void); +void mei_cl_bus_exit(void); + +/** + * enum mei_pg_event - power gating transition events + * + * @MEI_PG_EVENT_IDLE: the driver is not in power gating transition + * @MEI_PG_EVENT_WAIT: the driver is waiting for a pg event to complete + * @MEI_PG_EVENT_RECEIVED: the driver received pg event + * @MEI_PG_EVENT_INTR_WAIT: the driver is waiting for a pg event interrupt + * @MEI_PG_EVENT_INTR_RECEIVED: the driver received pg event interrupt + */ +enum mei_pg_event { + MEI_PG_EVENT_IDLE, + MEI_PG_EVENT_WAIT, + MEI_PG_EVENT_RECEIVED, + MEI_PG_EVENT_INTR_WAIT, + MEI_PG_EVENT_INTR_RECEIVED, +}; + +/** + * enum mei_pg_state - device internal power gating state + * + * @MEI_PG_OFF: device is not power gated - it is active + * @MEI_PG_ON: device is power gated - it is in lower power state + */ +enum mei_pg_state { + MEI_PG_OFF = 0, + MEI_PG_ON = 1, +}; + +const char *mei_pg_state_str(enum mei_pg_state state); + +/** + * struct mei_fw_version - MEI FW version struct + * + * @platform: platform identifier + * @major: major version field + * @minor: minor version field + * @buildno: build number version field + * @hotfix: hotfix number version field + */ +struct mei_fw_version { + u8 platform; + u8 major; + u16 minor; + u16 buildno; + u16 hotfix; +}; + +#define MEI_MAX_FW_VER_BLOCKS 3 + +/** + * struct mei_device - MEI private device struct + * + * @dev : device on a bus + * @cdev : character device + * @minor : minor number allocated for device + * + * @write_list : write pending list + * @write_waiting_list : write completion list + * @ctrl_wr_list : pending control write list + * @ctrl_rd_list : pending control read list + * @tx_queue_limit: tx queues per client linit + * + * @file_list : list of opened handles + * @open_handle_count: number of opened handles + * + * @device_lock : big device lock + * @timer_work : MEI timer delayed work (timeouts) + * + * @recvd_hw_ready : hw ready message received flag + * + * @wait_hw_ready : wait queue for receive HW ready message form FW + * @wait_pg : wait queue for receive PG message from FW + * @wait_hbm_start : wait queue for receive HBM start message from FW + * + * @reset_count : number of consecutive resets + * @dev_state : device state + * @hbm_state : state of host bus message protocol + * @init_clients_timer : HBM init handshake timeout + * + * @pg_event : power gating event + * @pg_domain : runtime PM domain + * + * @rd_msg_buf : control messages buffer + * @rd_msg_hdr : read message header storage + * @rd_msg_hdr_count : how many dwords were already read from header + * + * @hbuf_is_ready : query if the host host/write buffer is ready + * @dr_dscr: DMA ring descriptors: TX, RX, and CTRL + * + * @version : HBM protocol version in use + * @hbm_f_pg_supported : hbm feature pgi protocol + * @hbm_f_dc_supported : hbm feature dynamic clients + * @hbm_f_dot_supported : hbm feature disconnect on timeout + * @hbm_f_ev_supported : hbm feature event notification + * @hbm_f_fa_supported : hbm feature fixed address client + * @hbm_f_ie_supported : hbm feature immediate reply to enum request + * @hbm_f_os_supported : hbm feature support OS ver message + * @hbm_f_dr_supported : hbm feature dma ring supported + * @hbm_f_vt_supported : hbm feature vtag supported + * @hbm_f_cap_supported : hbm feature capabilities message supported + * + * @fw_ver : FW versions + * + * @fw_f_fw_ver_supported : fw feature: fw version supported + * + * @me_clients_rwsem: rw lock over me_clients list + * @me_clients : list of FW clients + * @me_clients_map : FW clients bit map + * @host_clients_map : host clients id pool + * + * @allow_fixed_address: allow user space to connect a fixed client + * @override_fixed_address: force allow fixed address behavior + * + * @reset_work : work item for the device reset + * @bus_rescan_work : work item for the bus rescan + * + * @device_list : mei client bus list + * @cl_bus_lock : client bus list lock + * + * @kind : kind of mei device + * + * @dbgfs_dir : debugfs mei root directory + * + * @ops: : hw specific operations + * @hw : hw specific data + */ +struct mei_device { + struct device *dev; + struct cdev cdev; + int minor; + + struct list_head write_list; + struct list_head write_waiting_list; + struct list_head ctrl_wr_list; + struct list_head ctrl_rd_list; + u8 tx_queue_limit; + + struct list_head file_list; + long open_handle_count; + + struct mutex device_lock; + struct delayed_work timer_work; + + bool recvd_hw_ready; + /* + * waiting queue for receive message from FW + */ + wait_queue_head_t wait_hw_ready; + wait_queue_head_t wait_pg; + wait_queue_head_t wait_hbm_start; + + /* + * mei device states + */ + unsigned long reset_count; + enum mei_dev_state dev_state; + enum mei_hbm_state hbm_state; + u16 init_clients_timer; + + /* + * Power Gating support + */ + enum mei_pg_event pg_event; +#ifdef CONFIG_PM + struct dev_pm_domain pg_domain; +#endif /* CONFIG_PM */ + + unsigned char rd_msg_buf[MEI_RD_MSG_BUF_SIZE]; + u32 rd_msg_hdr[MEI_RD_MSG_BUF_SIZE]; + int rd_msg_hdr_count; + + /* write buffer */ + bool hbuf_is_ready; + + struct mei_dma_dscr dr_dscr[DMA_DSCR_NUM]; + + struct hbm_version version; + unsigned int hbm_f_pg_supported:1; + unsigned int hbm_f_dc_supported:1; + unsigned int hbm_f_dot_supported:1; + unsigned int hbm_f_ev_supported:1; + unsigned int hbm_f_fa_supported:1; + unsigned int hbm_f_ie_supported:1; + unsigned int hbm_f_os_supported:1; + unsigned int hbm_f_dr_supported:1; + unsigned int hbm_f_vt_supported:1; + unsigned int hbm_f_cap_supported:1; + + struct mei_fw_version fw_ver[MEI_MAX_FW_VER_BLOCKS]; + + unsigned int fw_f_fw_ver_supported:1; + + struct rw_semaphore me_clients_rwsem; + struct list_head me_clients; + DECLARE_BITMAP(me_clients_map, MEI_CLIENTS_MAX); + DECLARE_BITMAP(host_clients_map, MEI_CLIENTS_MAX); + + bool allow_fixed_address; + bool override_fixed_address; + + struct work_struct reset_work; + struct work_struct bus_rescan_work; + + /* List of bus devices */ + struct list_head device_list; + struct mutex cl_bus_lock; + + const char *kind; + +#if IS_ENABLED(CONFIG_DEBUG_FS) + struct dentry *dbgfs_dir; +#endif /* CONFIG_DEBUG_FS */ + + const struct mei_hw_ops *ops; + char hw[] __aligned(sizeof(void *)); +}; + +static inline unsigned long mei_secs_to_jiffies(unsigned long sec) +{ + return msecs_to_jiffies(sec * MSEC_PER_SEC); +} + +/** + * mei_data2slots - get slots number from a message length + * + * @length: size of the messages in bytes + * + * Return: number of slots + */ +static inline u32 mei_data2slots(size_t length) +{ + return DIV_ROUND_UP(length, MEI_SLOT_SIZE); +} + +/** + * mei_hbm2slots - get slots number from a hbm message length + * length + size of the mei message header + * + * @length: size of the messages in bytes + * + * Return: number of slots + */ +static inline u32 mei_hbm2slots(size_t length) +{ + return DIV_ROUND_UP(sizeof(struct mei_msg_hdr) + length, MEI_SLOT_SIZE); +} + +/** + * mei_slots2data - get data in slots - bytes from slots + * + * @slots: number of available slots + * + * Return: number of bytes in slots + */ +static inline u32 mei_slots2data(int slots) +{ + return slots * MEI_SLOT_SIZE; +} + +/* + * mei init function prototypes + */ +void mei_device_init(struct mei_device *dev, + struct device *device, + const struct mei_hw_ops *hw_ops); +int mei_reset(struct mei_device *dev); +int mei_start(struct mei_device *dev); +int mei_restart(struct mei_device *dev); +void mei_stop(struct mei_device *dev); +void mei_cancel_work(struct mei_device *dev); + +void mei_set_devstate(struct mei_device *dev, enum mei_dev_state state); + +int mei_dmam_ring_alloc(struct mei_device *dev); +void mei_dmam_ring_free(struct mei_device *dev); +bool mei_dma_ring_is_allocated(struct mei_device *dev); +void mei_dma_ring_reset(struct mei_device *dev); +void mei_dma_ring_read(struct mei_device *dev, unsigned char *buf, u32 len); +void mei_dma_ring_write(struct mei_device *dev, unsigned char *buf, u32 len); +u32 mei_dma_ring_empty_slots(struct mei_device *dev); + +/* + * MEI interrupt functions prototype + */ + +void mei_timer(struct work_struct *work); +void mei_schedule_stall_timer(struct mei_device *dev); +int mei_irq_read_handler(struct mei_device *dev, + struct list_head *cmpl_list, s32 *slots); + +int mei_irq_write_handler(struct mei_device *dev, struct list_head *cmpl_list); +void mei_irq_compl_handler(struct mei_device *dev, struct list_head *cmpl_list); + +/* + * Register Access Function + */ + + +static inline int mei_hw_config(struct mei_device *dev) +{ + return dev->ops->hw_config(dev); +} + +static inline enum mei_pg_state mei_pg_state(struct mei_device *dev) +{ + return dev->ops->pg_state(dev); +} + +static inline bool mei_pg_in_transition(struct mei_device *dev) +{ + return dev->ops->pg_in_transition(dev); +} + +static inline bool mei_pg_is_enabled(struct mei_device *dev) +{ + return dev->ops->pg_is_enabled(dev); +} + +static inline int mei_hw_reset(struct mei_device *dev, bool enable) +{ + return dev->ops->hw_reset(dev, enable); +} + +static inline int mei_hw_start(struct mei_device *dev) +{ + return dev->ops->hw_start(dev); +} + +static inline void mei_clear_interrupts(struct mei_device *dev) +{ + dev->ops->intr_clear(dev); +} + +static inline void mei_enable_interrupts(struct mei_device *dev) +{ + dev->ops->intr_enable(dev); +} + +static inline void mei_disable_interrupts(struct mei_device *dev) +{ + dev->ops->intr_disable(dev); +} + +static inline void mei_synchronize_irq(struct mei_device *dev) +{ + dev->ops->synchronize_irq(dev); +} + +static inline bool mei_host_is_ready(struct mei_device *dev) +{ + return dev->ops->host_is_ready(dev); +} +static inline bool mei_hw_is_ready(struct mei_device *dev) +{ + return dev->ops->hw_is_ready(dev); +} + +static inline bool mei_hbuf_is_ready(struct mei_device *dev) +{ + return dev->ops->hbuf_is_ready(dev); +} + +static inline int mei_hbuf_empty_slots(struct mei_device *dev) +{ + return dev->ops->hbuf_free_slots(dev); +} + +static inline u32 mei_hbuf_depth(const struct mei_device *dev) +{ + return dev->ops->hbuf_depth(dev); +} + +static inline int mei_write_message(struct mei_device *dev, + const void *hdr, size_t hdr_len, + const void *data, size_t data_len) +{ + return dev->ops->write(dev, hdr, hdr_len, data, data_len); +} + +static inline u32 mei_read_hdr(const struct mei_device *dev) +{ + return dev->ops->read_hdr(dev); +} + +static inline void mei_read_slots(struct mei_device *dev, + unsigned char *buf, unsigned long len) +{ + dev->ops->read(dev, buf, len); +} + +static inline int mei_count_full_read_slots(struct mei_device *dev) +{ + return dev->ops->rdbuf_full_slots(dev); +} + +static inline int mei_trc_status(struct mei_device *dev, u32 *trc) +{ + if (dev->ops->trc_status) + return dev->ops->trc_status(dev, trc); + return -EOPNOTSUPP; +} + +static inline int mei_fw_status(struct mei_device *dev, + struct mei_fw_status *fw_status) +{ + return dev->ops->fw_status(dev, fw_status); +} + +bool mei_hbuf_acquire(struct mei_device *dev); + +bool mei_write_is_idle(struct mei_device *dev); + +#if IS_ENABLED(CONFIG_DEBUG_FS) +void mei_dbgfs_register(struct mei_device *dev, const char *name); +void mei_dbgfs_deregister(struct mei_device *dev); +#else +static inline void mei_dbgfs_register(struct mei_device *dev, const char *name) {} +static inline void mei_dbgfs_deregister(struct mei_device *dev) {} +#endif /* CONFIG_DEBUG_FS */ + +int mei_register(struct mei_device *dev, struct device *parent); +void mei_deregister(struct mei_device *dev); + +#define MEI_HDR_FMT "hdr:host=%02d me=%02d len=%d dma=%1d ext=%1d internal=%1d comp=%1d" +#define MEI_HDR_PRM(hdr) \ + (hdr)->host_addr, (hdr)->me_addr, \ + (hdr)->length, (hdr)->dma_ring, (hdr)->extended, \ + (hdr)->internal, (hdr)->msg_complete + +ssize_t mei_fw_status2str(struct mei_fw_status *fw_sts, char *buf, size_t len); +/** + * mei_fw_status_str - fetch and convert fw status registers to printable string + * + * @dev: the device structure + * @buf: string buffer at minimal size MEI_FW_STATUS_STR_SZ + * @len: buffer len must be >= MEI_FW_STATUS_STR_SZ + * + * Return: number of bytes written or < 0 on failure + */ +static inline ssize_t mei_fw_status_str(struct mei_device *dev, + char *buf, size_t len) +{ + struct mei_fw_status fw_status; + int ret; + + buf[0] = '\0'; + + ret = mei_fw_status(dev, &fw_status); + if (ret) + return ret; + + ret = mei_fw_status2str(&fw_status, buf, MEI_FW_STATUS_STR_SZ); + + return ret; +} + + +#endif diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c new file mode 100644 index 000000000..f2765d6b8 --- /dev/null +++ b/drivers/misc/mei/pci-me.c @@ -0,0 +1,539 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2003-2020, Intel Corporation. All rights reserved. + * Intel Management Engine Interface (Intel MEI) Linux driver + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/sched.h> +#include <linux/interrupt.h> + +#include <linux/pm_domain.h> +#include <linux/pm_runtime.h> + +#include <linux/mei.h> + +#include "mei_dev.h" +#include "client.h" +#include "hw-me-regs.h" +#include "hw-me.h" + +/* mei_pci_tbl - PCI Device ID Table */ +static const struct pci_device_id mei_me_pci_tbl[] = { + {MEI_PCI_DEVICE(MEI_DEV_ID_82946GZ, MEI_ME_ICH_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_82G35, MEI_ME_ICH_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_82Q965, MEI_ME_ICH_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_82G965, MEI_ME_ICH_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_82GM965, MEI_ME_ICH_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_82GME965, MEI_ME_ICH_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q35, MEI_ME_ICH_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82G33, MEI_ME_ICH_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q33, MEI_ME_ICH_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82X38, MEI_ME_ICH_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_3200, MEI_ME_ICH_CFG)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_6, MEI_ME_ICH_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_7, MEI_ME_ICH_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_8, MEI_ME_ICH_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_9, MEI_ME_ICH_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_10, MEI_ME_ICH_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_1, MEI_ME_ICH_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_2, MEI_ME_ICH_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_3, MEI_ME_ICH_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_4, MEI_ME_ICH_CFG)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_1, MEI_ME_ICH10_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_2, MEI_ME_ICH10_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_3, MEI_ME_ICH10_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_4, MEI_ME_ICH10_CFG)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_1, MEI_ME_PCH6_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_2, MEI_ME_PCH6_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_CPT_1, MEI_ME_PCH_CPT_PBG_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_PBG_1, MEI_ME_PCH_CPT_PBG_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, MEI_ME_PCH7_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, MEI_ME_PCH7_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, MEI_ME_PCH7_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, MEI_ME_PCH8_SPS_4_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, MEI_ME_PCH8_SPS_4_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, MEI_ME_PCH8_SPS_4_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP_2, MEI_ME_PCH8_CFG)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_3, MEI_ME_PCH8_ITOUCH_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_4_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_4_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_SPS_4_CFG)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_DNV_IE, MEI_ME_PCH8_CFG)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_3, MEI_ME_PCH8_CFG)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP, MEI_ME_PCH12_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP_3, MEI_ME_PCH8_ITOUCH_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_SPS_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_3, MEI_ME_PCH12_SPS_ITOUCH_CFG)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP, MEI_ME_PCH12_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP_3, MEI_ME_PCH8_ITOUCH_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_V, MEI_ME_PCH12_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_H, MEI_ME_PCH12_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_H_3, MEI_ME_PCH8_ITOUCH_CFG)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_N, MEI_ME_PCH12_CFG)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH15_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_H, MEI_ME_PCH15_SPS_CFG)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_JSP_N, MEI_ME_PCH15_CFG)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH15_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_MCC_4, MEI_ME_PCH8_CFG)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_CDF, MEI_ME_PCH8_CFG)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_EBG, MEI_ME_PCH15_SPS_CFG)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_S, MEI_ME_PCH15_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_LP, MEI_ME_PCH15_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_N, MEI_ME_PCH15_CFG)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_MTL_M, MEI_ME_PCH15_CFG)}, + + /* required last entry */ + {0, } +}; + +MODULE_DEVICE_TABLE(pci, mei_me_pci_tbl); + +#ifdef CONFIG_PM +static inline void mei_me_set_pm_domain(struct mei_device *dev); +static inline void mei_me_unset_pm_domain(struct mei_device *dev); +#else +static inline void mei_me_set_pm_domain(struct mei_device *dev) {} +static inline void mei_me_unset_pm_domain(struct mei_device *dev) {} +#endif /* CONFIG_PM */ + +static int mei_me_read_fws(const struct mei_device *dev, int where, u32 *val) +{ + struct pci_dev *pdev = to_pci_dev(dev->dev); + + return pci_read_config_dword(pdev, where, val); +} + +/** + * mei_me_quirk_probe - probe for devices that doesn't valid ME interface + * + * @pdev: PCI device structure + * @cfg: per generation config + * + * Return: true if ME Interface is valid, false otherwise + */ +static bool mei_me_quirk_probe(struct pci_dev *pdev, + const struct mei_cfg *cfg) +{ + if (cfg->quirk_probe && cfg->quirk_probe(pdev)) { + dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n"); + return false; + } + + return true; +} + +/** + * mei_me_probe - Device Initialization Routine + * + * @pdev: PCI device structure + * @ent: entry in kcs_pci_tbl + * + * Return: 0 on success, <0 on failure. + */ +static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + const struct mei_cfg *cfg; + struct mei_device *dev; + struct mei_me_hw *hw; + unsigned int irqflags; + int err; + + cfg = mei_me_get_cfg(ent->driver_data); + if (!cfg) + return -ENODEV; + + if (!mei_me_quirk_probe(pdev, cfg)) + return -ENODEV; + + /* enable pci dev */ + err = pcim_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "failed to enable pci device.\n"); + goto end; + } + /* set PCI host mastering */ + pci_set_master(pdev); + /* pci request regions and mapping IO device memory for mei driver */ + err = pcim_iomap_regions(pdev, BIT(0), KBUILD_MODNAME); + if (err) { + dev_err(&pdev->dev, "failed to get pci regions.\n"); + goto end; + } + + if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) || + dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { + + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (err) + err = dma_set_coherent_mask(&pdev->dev, + DMA_BIT_MASK(32)); + } + if (err) { + dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); + goto end; + } + + /* allocates and initializes the mei dev structure */ + dev = mei_me_dev_init(&pdev->dev, cfg); + if (!dev) { + err = -ENOMEM; + goto end; + } + hw = to_me_hw(dev); + hw->mem_addr = pcim_iomap_table(pdev)[0]; + hw->read_fws = mei_me_read_fws; + + pci_enable_msi(pdev); + + hw->irq = pdev->irq; + + /* request and enable interrupt */ + irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED; + + err = request_threaded_irq(pdev->irq, + mei_me_irq_quick_handler, + mei_me_irq_thread_handler, + irqflags, KBUILD_MODNAME, dev); + if (err) { + dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n", + pdev->irq); + goto end; + } + + if (mei_start(dev)) { + dev_err(&pdev->dev, "init hw failure.\n"); + err = -ENODEV; + goto release_irq; + } + + pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_ME_RPM_TIMEOUT); + pm_runtime_use_autosuspend(&pdev->dev); + + err = mei_register(dev, &pdev->dev); + if (err) + goto stop; + + pci_set_drvdata(pdev, dev); + + /* + * MEI requires to resume from runtime suspend mode + * in order to perform link reset flow upon system suspend. + */ + dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); + + /* + * ME maps runtime suspend/resume to D0i states, + * hence we need to go around native PCI runtime service which + * eventually brings the device into D3cold/hot state, + * but the mei device cannot wake up from D3 unlike from D0i3. + * To get around the PCI device native runtime pm, + * ME uses runtime pm domain handlers which take precedence + * over the driver's pm handlers. + */ + mei_me_set_pm_domain(dev); + + if (mei_pg_is_enabled(dev)) { + pm_runtime_put_noidle(&pdev->dev); + if (hw->d0i3_supported) + pm_runtime_allow(&pdev->dev); + } + + dev_dbg(&pdev->dev, "initialization successful.\n"); + + return 0; + +stop: + mei_stop(dev); +release_irq: + mei_cancel_work(dev); + mei_disable_interrupts(dev); + free_irq(pdev->irq, dev); +end: + dev_err(&pdev->dev, "initialization failed.\n"); + return err; +} + +/** + * mei_me_shutdown - Device Removal Routine + * + * @pdev: PCI device structure + * + * mei_me_shutdown is called from the reboot notifier + * it's a simplified version of remove so we go down + * faster. + */ +static void mei_me_shutdown(struct pci_dev *pdev) +{ + struct mei_device *dev; + + dev = pci_get_drvdata(pdev); + if (!dev) + return; + + dev_dbg(&pdev->dev, "shutdown\n"); + mei_stop(dev); + + mei_me_unset_pm_domain(dev); + + mei_disable_interrupts(dev); + free_irq(pdev->irq, dev); +} + +/** + * mei_me_remove - Device Removal Routine + * + * @pdev: PCI device structure + * + * mei_me_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. + */ +static void mei_me_remove(struct pci_dev *pdev) +{ + struct mei_device *dev; + + dev = pci_get_drvdata(pdev); + if (!dev) + return; + + if (mei_pg_is_enabled(dev)) + pm_runtime_get_noresume(&pdev->dev); + + dev_dbg(&pdev->dev, "stop\n"); + mei_stop(dev); + + mei_me_unset_pm_domain(dev); + + mei_disable_interrupts(dev); + + free_irq(pdev->irq, dev); + + mei_deregister(dev); +} + +#ifdef CONFIG_PM_SLEEP +static int mei_me_pci_suspend(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct mei_device *dev = pci_get_drvdata(pdev); + + if (!dev) + return -ENODEV; + + dev_dbg(&pdev->dev, "suspend\n"); + + mei_stop(dev); + + mei_disable_interrupts(dev); + + free_irq(pdev->irq, dev); + pci_disable_msi(pdev); + + return 0; +} + +static int mei_me_pci_resume(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct mei_device *dev; + unsigned int irqflags; + int err; + + dev = pci_get_drvdata(pdev); + if (!dev) + return -ENODEV; + + pci_enable_msi(pdev); + + irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED; + + /* request and enable interrupt */ + err = request_threaded_irq(pdev->irq, + mei_me_irq_quick_handler, + mei_me_irq_thread_handler, + irqflags, KBUILD_MODNAME, dev); + + if (err) { + dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n", + pdev->irq); + return err; + } + + err = mei_restart(dev); + if (err) + return err; + + /* Start timer if stopped in suspend */ + schedule_delayed_work(&dev->timer_work, HZ); + + return 0; +} +#endif /* CONFIG_PM_SLEEP */ + +#ifdef CONFIG_PM +static int mei_me_pm_runtime_idle(struct device *device) +{ + struct mei_device *dev; + + dev_dbg(device, "rpm: me: runtime_idle\n"); + + dev = dev_get_drvdata(device); + if (!dev) + return -ENODEV; + if (mei_write_is_idle(dev)) + pm_runtime_autosuspend(device); + + return -EBUSY; +} + +static int mei_me_pm_runtime_suspend(struct device *device) +{ + struct mei_device *dev; + int ret; + + dev_dbg(device, "rpm: me: runtime suspend\n"); + + dev = dev_get_drvdata(device); + if (!dev) + return -ENODEV; + + mutex_lock(&dev->device_lock); + + if (mei_write_is_idle(dev)) + ret = mei_me_pg_enter_sync(dev); + else + ret = -EAGAIN; + + mutex_unlock(&dev->device_lock); + + dev_dbg(device, "rpm: me: runtime suspend ret=%d\n", ret); + + if (ret && ret != -EAGAIN) + schedule_work(&dev->reset_work); + + return ret; +} + +static int mei_me_pm_runtime_resume(struct device *device) +{ + struct mei_device *dev; + int ret; + + dev_dbg(device, "rpm: me: runtime resume\n"); + + dev = dev_get_drvdata(device); + if (!dev) + return -ENODEV; + + mutex_lock(&dev->device_lock); + + ret = mei_me_pg_exit_sync(dev); + + mutex_unlock(&dev->device_lock); + + dev_dbg(device, "rpm: me: runtime resume ret = %d\n", ret); + + if (ret) + schedule_work(&dev->reset_work); + + return ret; +} + +/** + * mei_me_set_pm_domain - fill and set pm domain structure for device + * + * @dev: mei_device + */ +static inline void mei_me_set_pm_domain(struct mei_device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev->dev); + + if (pdev->dev.bus && pdev->dev.bus->pm) { + dev->pg_domain.ops = *pdev->dev.bus->pm; + + dev->pg_domain.ops.runtime_suspend = mei_me_pm_runtime_suspend; + dev->pg_domain.ops.runtime_resume = mei_me_pm_runtime_resume; + dev->pg_domain.ops.runtime_idle = mei_me_pm_runtime_idle; + + dev_pm_domain_set(&pdev->dev, &dev->pg_domain); + } +} + +/** + * mei_me_unset_pm_domain - clean pm domain structure for device + * + * @dev: mei_device + */ +static inline void mei_me_unset_pm_domain(struct mei_device *dev) +{ + /* stop using pm callbacks if any */ + dev_pm_domain_set(dev->dev, NULL); +} + +static const struct dev_pm_ops mei_me_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(mei_me_pci_suspend, + mei_me_pci_resume) + SET_RUNTIME_PM_OPS( + mei_me_pm_runtime_suspend, + mei_me_pm_runtime_resume, + mei_me_pm_runtime_idle) +}; + +#define MEI_ME_PM_OPS (&mei_me_pm_ops) +#else +#define MEI_ME_PM_OPS NULL +#endif /* CONFIG_PM */ +/* + * PCI driver structure + */ +static struct pci_driver mei_me_driver = { + .name = KBUILD_MODNAME, + .id_table = mei_me_pci_tbl, + .probe = mei_me_probe, + .remove = mei_me_remove, + .shutdown = mei_me_shutdown, + .driver.pm = MEI_ME_PM_OPS, + .driver.probe_type = PROBE_PREFER_ASYNCHRONOUS, +}; + +module_pci_driver(mei_me_driver); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("Intel(R) Management Engine Interface"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c new file mode 100644 index 000000000..4bf26ce61 --- /dev/null +++ b/drivers/misc/mei/pci-txe.c @@ -0,0 +1,406 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2013-2020, Intel Corporation. All rights reserved. + * Intel Management Engine Interface (Intel MEI) Linux driver + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/sched.h> +#include <linux/interrupt.h> +#include <linux/workqueue.h> +#include <linux/pm_domain.h> +#include <linux/pm_runtime.h> + +#include <linux/mei.h> + + +#include "mei_dev.h" +#include "hw-txe.h" + +static const struct pci_device_id mei_txe_pci_tbl[] = { + {PCI_VDEVICE(INTEL, 0x0F18)}, /* Baytrail */ + {PCI_VDEVICE(INTEL, 0x2298)}, /* Cherrytrail */ + + {0, } +}; +MODULE_DEVICE_TABLE(pci, mei_txe_pci_tbl); + +#ifdef CONFIG_PM +static inline void mei_txe_set_pm_domain(struct mei_device *dev); +static inline void mei_txe_unset_pm_domain(struct mei_device *dev); +#else +static inline void mei_txe_set_pm_domain(struct mei_device *dev) {} +static inline void mei_txe_unset_pm_domain(struct mei_device *dev) {} +#endif /* CONFIG_PM */ + +/** + * mei_txe_probe - Device Initialization Routine + * + * @pdev: PCI device structure + * @ent: entry in mei_txe_pci_tbl + * + * Return: 0 on success, <0 on failure. + */ +static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct mei_device *dev; + struct mei_txe_hw *hw; + const int mask = BIT(SEC_BAR) | BIT(BRIDGE_BAR); + int err; + + /* enable pci dev */ + err = pcim_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "failed to enable pci device.\n"); + goto end; + } + /* set PCI host mastering */ + pci_set_master(pdev); + /* pci request regions and mapping IO device memory for mei driver */ + err = pcim_iomap_regions(pdev, mask, KBUILD_MODNAME); + if (err) { + dev_err(&pdev->dev, "failed to get pci regions.\n"); + goto end; + } + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); + if (err) { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "No suitable DMA available.\n"); + goto end; + } + } + + /* allocates and initializes the mei dev structure */ + dev = mei_txe_dev_init(pdev); + if (!dev) { + err = -ENOMEM; + goto end; + } + hw = to_txe_hw(dev); + hw->mem_addr = pcim_iomap_table(pdev); + + pci_enable_msi(pdev); + + /* clear spurious interrupts */ + mei_clear_interrupts(dev); + + /* request and enable interrupt */ + if (pci_dev_msi_enabled(pdev)) + err = request_threaded_irq(pdev->irq, + NULL, + mei_txe_irq_thread_handler, + IRQF_ONESHOT, KBUILD_MODNAME, dev); + else + err = request_threaded_irq(pdev->irq, + mei_txe_irq_quick_handler, + mei_txe_irq_thread_handler, + IRQF_SHARED, KBUILD_MODNAME, dev); + if (err) { + dev_err(&pdev->dev, "mei: request_threaded_irq failure. irq = %d\n", + pdev->irq); + goto end; + } + + if (mei_start(dev)) { + dev_err(&pdev->dev, "init hw failure.\n"); + err = -ENODEV; + goto release_irq; + } + + pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_TXI_RPM_TIMEOUT); + pm_runtime_use_autosuspend(&pdev->dev); + + err = mei_register(dev, &pdev->dev); + if (err) + goto stop; + + pci_set_drvdata(pdev, dev); + + /* + * MEI requires to resume from runtime suspend mode + * in order to perform link reset flow upon system suspend. + */ + dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); + + /* + * TXE maps runtime suspend/resume to own power gating states, + * hence we need to go around native PCI runtime service which + * eventually brings the device into D3cold/hot state. + * But the TXE device cannot wake up from D3 unlike from own + * power gating. To get around PCI device native runtime pm, + * TXE uses runtime pm domain handlers which take precedence. + */ + mei_txe_set_pm_domain(dev); + + pm_runtime_put_noidle(&pdev->dev); + + return 0; + +stop: + mei_stop(dev); +release_irq: + mei_cancel_work(dev); + mei_disable_interrupts(dev); + free_irq(pdev->irq, dev); +end: + dev_err(&pdev->dev, "initialization failed.\n"); + return err; +} + +/** + * mei_txe_remove - Device Shutdown Routine + * + * @pdev: PCI device structure + * + * mei_txe_shutdown is called from the reboot notifier + * it's a simplified version of remove so we go down + * faster. + */ +static void mei_txe_shutdown(struct pci_dev *pdev) +{ + struct mei_device *dev; + + dev = pci_get_drvdata(pdev); + if (!dev) + return; + + dev_dbg(&pdev->dev, "shutdown\n"); + mei_stop(dev); + + mei_txe_unset_pm_domain(dev); + + mei_disable_interrupts(dev); + free_irq(pdev->irq, dev); +} + +/** + * mei_txe_remove - Device Removal Routine + * + * @pdev: PCI device structure + * + * mei_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. + */ +static void mei_txe_remove(struct pci_dev *pdev) +{ + struct mei_device *dev; + + dev = pci_get_drvdata(pdev); + if (!dev) { + dev_err(&pdev->dev, "mei: dev == NULL\n"); + return; + } + + pm_runtime_get_noresume(&pdev->dev); + + mei_stop(dev); + + mei_txe_unset_pm_domain(dev); + + mei_disable_interrupts(dev); + free_irq(pdev->irq, dev); + + mei_deregister(dev); +} + + +#ifdef CONFIG_PM_SLEEP +static int mei_txe_pci_suspend(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct mei_device *dev = pci_get_drvdata(pdev); + + if (!dev) + return -ENODEV; + + dev_dbg(&pdev->dev, "suspend\n"); + + mei_stop(dev); + + mei_disable_interrupts(dev); + + free_irq(pdev->irq, dev); + pci_disable_msi(pdev); + + return 0; +} + +static int mei_txe_pci_resume(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct mei_device *dev; + int err; + + dev = pci_get_drvdata(pdev); + if (!dev) + return -ENODEV; + + pci_enable_msi(pdev); + + mei_clear_interrupts(dev); + + /* request and enable interrupt */ + if (pci_dev_msi_enabled(pdev)) + err = request_threaded_irq(pdev->irq, + NULL, + mei_txe_irq_thread_handler, + IRQF_ONESHOT, KBUILD_MODNAME, dev); + else + err = request_threaded_irq(pdev->irq, + mei_txe_irq_quick_handler, + mei_txe_irq_thread_handler, + IRQF_SHARED, KBUILD_MODNAME, dev); + if (err) { + dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n", + pdev->irq); + return err; + } + + err = mei_restart(dev); + + return err; +} +#endif /* CONFIG_PM_SLEEP */ + +#ifdef CONFIG_PM +static int mei_txe_pm_runtime_idle(struct device *device) +{ + struct mei_device *dev; + + dev_dbg(device, "rpm: txe: runtime_idle\n"); + + dev = dev_get_drvdata(device); + if (!dev) + return -ENODEV; + if (mei_write_is_idle(dev)) + pm_runtime_autosuspend(device); + + return -EBUSY; +} +static int mei_txe_pm_runtime_suspend(struct device *device) +{ + struct mei_device *dev; + int ret; + + dev_dbg(device, "rpm: txe: runtime suspend\n"); + + dev = dev_get_drvdata(device); + if (!dev) + return -ENODEV; + + mutex_lock(&dev->device_lock); + + if (mei_write_is_idle(dev)) + ret = mei_txe_aliveness_set_sync(dev, 0); + else + ret = -EAGAIN; + + /* keep irq on we are staying in D0 */ + + dev_dbg(device, "rpm: txe: runtime suspend ret=%d\n", ret); + + mutex_unlock(&dev->device_lock); + + if (ret && ret != -EAGAIN) + schedule_work(&dev->reset_work); + + return ret; +} + +static int mei_txe_pm_runtime_resume(struct device *device) +{ + struct mei_device *dev; + int ret; + + dev_dbg(device, "rpm: txe: runtime resume\n"); + + dev = dev_get_drvdata(device); + if (!dev) + return -ENODEV; + + mutex_lock(&dev->device_lock); + + mei_enable_interrupts(dev); + + ret = mei_txe_aliveness_set_sync(dev, 1); + + mutex_unlock(&dev->device_lock); + + dev_dbg(device, "rpm: txe: runtime resume ret = %d\n", ret); + + if (ret) + schedule_work(&dev->reset_work); + + return ret; +} + +/** + * mei_txe_set_pm_domain - fill and set pm domain structure for device + * + * @dev: mei_device + */ +static inline void mei_txe_set_pm_domain(struct mei_device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev->dev); + + if (pdev->dev.bus && pdev->dev.bus->pm) { + dev->pg_domain.ops = *pdev->dev.bus->pm; + + dev->pg_domain.ops.runtime_suspend = mei_txe_pm_runtime_suspend; + dev->pg_domain.ops.runtime_resume = mei_txe_pm_runtime_resume; + dev->pg_domain.ops.runtime_idle = mei_txe_pm_runtime_idle; + + dev_pm_domain_set(&pdev->dev, &dev->pg_domain); + } +} + +/** + * mei_txe_unset_pm_domain - clean pm domain structure for device + * + * @dev: mei_device + */ +static inline void mei_txe_unset_pm_domain(struct mei_device *dev) +{ + /* stop using pm callbacks if any */ + dev_pm_domain_set(dev->dev, NULL); +} + +static const struct dev_pm_ops mei_txe_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(mei_txe_pci_suspend, + mei_txe_pci_resume) + SET_RUNTIME_PM_OPS( + mei_txe_pm_runtime_suspend, + mei_txe_pm_runtime_resume, + mei_txe_pm_runtime_idle) +}; + +#define MEI_TXE_PM_OPS (&mei_txe_pm_ops) +#else +#define MEI_TXE_PM_OPS NULL +#endif /* CONFIG_PM */ + +/* + * PCI driver structure + */ +static struct pci_driver mei_txe_driver = { + .name = KBUILD_MODNAME, + .id_table = mei_txe_pci_tbl, + .probe = mei_txe_probe, + .remove = mei_txe_remove, + .shutdown = mei_txe_shutdown, + .driver.pm = MEI_TXE_PM_OPS, +}; + +module_pci_driver(mei_txe_driver); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("Intel(R) Trusted Execution Environment Interface"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/ocxl/Kconfig b/drivers/misc/ocxl/Kconfig new file mode 100644 index 000000000..c9b0a27ca --- /dev/null +++ b/drivers/misc/ocxl/Kconfig @@ -0,0 +1,31 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Open Coherent Accelerator (OCXL) compatible devices +# + +config OCXL_BASE + bool + select PPC_COPRO_BASE + +config OCXL + tristate "OpenCAPI coherent accelerator support" + depends on HOTPLUG_PCI_POWERNV + select OCXL_BASE + default m + help + Select this option to enable the ocxl driver for Open + Coherent Accelerator Processor Interface (OpenCAPI) devices. + + OpenCAPI allows FPGA and ASIC accelerators to be coherently + attached to a CPU over an OpenCAPI link. + + The ocxl driver enables userspace programs to access these + accelerators through devices in /dev/ocxl/. + + For more information, see https://opencapi.org. + + This is not to be confused with the support for IBM CAPI + accelerators (CONFIG_CXL), which are PCI-based instead of a + dedicated OpenCAPI link, and don't follow the same protocol. + + If unsure, say N. diff --git a/drivers/misc/ocxl/Makefile b/drivers/misc/ocxl/Makefile new file mode 100644 index 000000000..d07d1bb8e --- /dev/null +++ b/drivers/misc/ocxl/Makefile @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0+ +ccflags-$(CONFIG_PPC_WERROR) += -Werror + +ocxl-y += main.o pci.o config.o file.o pasid.o mmio.o +ocxl-y += link.o context.o afu_irq.o sysfs.o trace.o +ocxl-y += core.o +obj-$(CONFIG_OCXL) += ocxl.o + +# For tracepoints to include our trace.h from tracepoint infrastructure: +CFLAGS_trace.o := -I$(src) + +# ccflags-y += -DDEBUG diff --git a/drivers/misc/ocxl/afu_irq.c b/drivers/misc/ocxl/afu_irq.c new file mode 100644 index 000000000..ecdcfae02 --- /dev/null +++ b/drivers/misc/ocxl/afu_irq.c @@ -0,0 +1,211 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright 2017 IBM Corp. +#include <linux/interrupt.h> +#include <asm/pnv-ocxl.h> +#include <asm/xive.h> +#include "ocxl_internal.h" +#include "trace.h" + +struct afu_irq { + int id; + int hw_irq; + unsigned int virq; + char *name; + irqreturn_t (*handler)(void *private); + void (*free_private)(void *private); + void *private; +}; + +int ocxl_irq_offset_to_id(struct ocxl_context *ctx, u64 offset) +{ + return (offset - ctx->afu->irq_base_offset) >> PAGE_SHIFT; +} + +u64 ocxl_irq_id_to_offset(struct ocxl_context *ctx, int irq_id) +{ + return ctx->afu->irq_base_offset + (irq_id << PAGE_SHIFT); +} + +int ocxl_irq_set_handler(struct ocxl_context *ctx, int irq_id, + irqreturn_t (*handler)(void *private), + void (*free_private)(void *private), + void *private) +{ + struct afu_irq *irq; + int rc; + + mutex_lock(&ctx->irq_lock); + irq = idr_find(&ctx->irq_idr, irq_id); + if (!irq) { + rc = -EINVAL; + goto unlock; + } + + irq->handler = handler; + irq->private = private; + irq->free_private = free_private; + + rc = 0; + // Fall through to unlock + +unlock: + mutex_unlock(&ctx->irq_lock); + return rc; +} +EXPORT_SYMBOL_GPL(ocxl_irq_set_handler); + +static irqreturn_t afu_irq_handler(int virq, void *data) +{ + struct afu_irq *irq = (struct afu_irq *) data; + + trace_ocxl_afu_irq_receive(virq); + + if (irq->handler) + return irq->handler(irq->private); + + return IRQ_HANDLED; // Just drop it on the ground +} + +static int setup_afu_irq(struct ocxl_context *ctx, struct afu_irq *irq) +{ + int rc; + + irq->virq = irq_create_mapping(NULL, irq->hw_irq); + if (!irq->virq) { + pr_err("irq_create_mapping failed\n"); + return -ENOMEM; + } + pr_debug("hw_irq %d mapped to virq %u\n", irq->hw_irq, irq->virq); + + irq->name = kasprintf(GFP_KERNEL, "ocxl-afu-%u", irq->virq); + if (!irq->name) { + irq_dispose_mapping(irq->virq); + return -ENOMEM; + } + + rc = request_irq(irq->virq, afu_irq_handler, 0, irq->name, irq); + if (rc) { + kfree(irq->name); + irq->name = NULL; + irq_dispose_mapping(irq->virq); + pr_err("request_irq failed: %d\n", rc); + return rc; + } + return 0; +} + +static void release_afu_irq(struct afu_irq *irq) +{ + free_irq(irq->virq, irq); + irq_dispose_mapping(irq->virq); + kfree(irq->name); +} + +int ocxl_afu_irq_alloc(struct ocxl_context *ctx, int *irq_id) +{ + struct afu_irq *irq; + int rc; + + irq = kzalloc(sizeof(struct afu_irq), GFP_KERNEL); + if (!irq) + return -ENOMEM; + + /* + * We limit the number of afu irqs per context and per link to + * avoid a single process or user depleting the pool of IPIs + */ + + mutex_lock(&ctx->irq_lock); + + irq->id = idr_alloc(&ctx->irq_idr, irq, 0, MAX_IRQ_PER_CONTEXT, + GFP_KERNEL); + if (irq->id < 0) { + rc = -ENOSPC; + goto err_unlock; + } + + rc = ocxl_link_irq_alloc(ctx->afu->fn->link, &irq->hw_irq); + if (rc) + goto err_idr; + + rc = setup_afu_irq(ctx, irq); + if (rc) + goto err_alloc; + + trace_ocxl_afu_irq_alloc(ctx->pasid, irq->id, irq->virq, irq->hw_irq); + mutex_unlock(&ctx->irq_lock); + + *irq_id = irq->id; + + return 0; + +err_alloc: + ocxl_link_free_irq(ctx->afu->fn->link, irq->hw_irq); +err_idr: + idr_remove(&ctx->irq_idr, irq->id); +err_unlock: + mutex_unlock(&ctx->irq_lock); + kfree(irq); + return rc; +} +EXPORT_SYMBOL_GPL(ocxl_afu_irq_alloc); + +static void afu_irq_free(struct afu_irq *irq, struct ocxl_context *ctx) +{ + trace_ocxl_afu_irq_free(ctx->pasid, irq->id); + if (ctx->mapping) + unmap_mapping_range(ctx->mapping, + ocxl_irq_id_to_offset(ctx, irq->id), + 1 << PAGE_SHIFT, 1); + release_afu_irq(irq); + if (irq->free_private) + irq->free_private(irq->private); + ocxl_link_free_irq(ctx->afu->fn->link, irq->hw_irq); + kfree(irq); +} + +int ocxl_afu_irq_free(struct ocxl_context *ctx, int irq_id) +{ + struct afu_irq *irq; + + mutex_lock(&ctx->irq_lock); + + irq = idr_find(&ctx->irq_idr, irq_id); + if (!irq) { + mutex_unlock(&ctx->irq_lock); + return -EINVAL; + } + idr_remove(&ctx->irq_idr, irq->id); + afu_irq_free(irq, ctx); + mutex_unlock(&ctx->irq_lock); + return 0; +} +EXPORT_SYMBOL_GPL(ocxl_afu_irq_free); + +void ocxl_afu_irq_free_all(struct ocxl_context *ctx) +{ + struct afu_irq *irq; + int id; + + mutex_lock(&ctx->irq_lock); + idr_for_each_entry(&ctx->irq_idr, irq, id) + afu_irq_free(irq, ctx); + mutex_unlock(&ctx->irq_lock); +} + +u64 ocxl_afu_irq_get_addr(struct ocxl_context *ctx, int irq_id) +{ + struct xive_irq_data *xd; + struct afu_irq *irq; + u64 addr = 0; + + mutex_lock(&ctx->irq_lock); + irq = idr_find(&ctx->irq_idr, irq_id); + if (irq) { + xd = irq_get_handler_data(irq->virq); + addr = xd ? xd->trig_page : 0; + } + mutex_unlock(&ctx->irq_lock); + return addr; +} +EXPORT_SYMBOL_GPL(ocxl_afu_irq_get_addr); diff --git a/drivers/misc/ocxl/config.c b/drivers/misc/ocxl/config.c new file mode 100644 index 000000000..3ced98b50 --- /dev/null +++ b/drivers/misc/ocxl/config.c @@ -0,0 +1,948 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright 2017 IBM Corp. +#include <linux/pci.h> +#include <asm/pnv-ocxl.h> +#include <misc/ocxl-config.h> +#include "ocxl_internal.h" + +#define EXTRACT_BIT(val, bit) (!!(val & BIT(bit))) +#define EXTRACT_BITS(val, s, e) ((val & GENMASK(e, s)) >> s) + +#define OCXL_DVSEC_AFU_IDX_MASK GENMASK(5, 0) +#define OCXL_DVSEC_ACTAG_MASK GENMASK(11, 0) +#define OCXL_DVSEC_PASID_MASK GENMASK(19, 0) +#define OCXL_DVSEC_PASID_LOG_MASK GENMASK(4, 0) + +#define OCXL_DVSEC_TEMPL_VERSION 0x0 +#define OCXL_DVSEC_TEMPL_NAME 0x4 +#define OCXL_DVSEC_TEMPL_AFU_VERSION 0x1C +#define OCXL_DVSEC_TEMPL_MMIO_GLOBAL 0x20 +#define OCXL_DVSEC_TEMPL_MMIO_GLOBAL_SZ 0x28 +#define OCXL_DVSEC_TEMPL_MMIO_PP 0x30 +#define OCXL_DVSEC_TEMPL_MMIO_PP_SZ 0x38 +#define OCXL_DVSEC_TEMPL_ALL_MEM_SZ 0x3C +#define OCXL_DVSEC_TEMPL_LPC_MEM_START 0x40 +#define OCXL_DVSEC_TEMPL_WWID 0x48 +#define OCXL_DVSEC_TEMPL_LPC_MEM_SZ 0x58 + +#define OCXL_MAX_AFU_PER_FUNCTION 64 +#define OCXL_TEMPL_LEN_1_0 0x58 +#define OCXL_TEMPL_LEN_1_1 0x60 +#define OCXL_TEMPL_NAME_LEN 24 +#define OCXL_CFG_TIMEOUT 3 + +static int find_dvsec(struct pci_dev *dev, int dvsec_id) +{ + int vsec = 0; + u16 vendor, id; + + while ((vsec = pci_find_next_ext_capability(dev, vsec, + OCXL_EXT_CAP_ID_DVSEC))) { + pci_read_config_word(dev, vsec + OCXL_DVSEC_VENDOR_OFFSET, + &vendor); + pci_read_config_word(dev, vsec + OCXL_DVSEC_ID_OFFSET, &id); + if (vendor == PCI_VENDOR_ID_IBM && id == dvsec_id) + return vsec; + } + return 0; +} + +static int find_dvsec_afu_ctrl(struct pci_dev *dev, u8 afu_idx) +{ + int vsec = 0; + u16 vendor, id; + u8 idx; + + while ((vsec = pci_find_next_ext_capability(dev, vsec, + OCXL_EXT_CAP_ID_DVSEC))) { + pci_read_config_word(dev, vsec + OCXL_DVSEC_VENDOR_OFFSET, + &vendor); + pci_read_config_word(dev, vsec + OCXL_DVSEC_ID_OFFSET, &id); + + if (vendor == PCI_VENDOR_ID_IBM && + id == OCXL_DVSEC_AFU_CTRL_ID) { + pci_read_config_byte(dev, + vsec + OCXL_DVSEC_AFU_CTRL_AFU_IDX, + &idx); + if (idx == afu_idx) + return vsec; + } + } + return 0; +} + +/** + * get_function_0() - Find a related PCI device (function 0) + * @device: PCI device to match + * + * Returns a pointer to the related device, or null if not found + */ +static struct pci_dev *get_function_0(struct pci_dev *dev) +{ + unsigned int devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 0); + + return pci_get_domain_bus_and_slot(pci_domain_nr(dev->bus), + dev->bus->number, devfn); +} + +static void read_pasid(struct pci_dev *dev, struct ocxl_fn_config *fn) +{ + u16 val; + int pos; + + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PASID); + if (!pos) { + /* + * PASID capability is not mandatory, but there + * shouldn't be any AFU + */ + dev_dbg(&dev->dev, "Function doesn't require any PASID\n"); + fn->max_pasid_log = -1; + goto out; + } + pci_read_config_word(dev, pos + PCI_PASID_CAP, &val); + fn->max_pasid_log = EXTRACT_BITS(val, 8, 12); + +out: + dev_dbg(&dev->dev, "PASID capability:\n"); + dev_dbg(&dev->dev, " Max PASID log = %d\n", fn->max_pasid_log); +} + +static int read_dvsec_tl(struct pci_dev *dev, struct ocxl_fn_config *fn) +{ + int pos; + + pos = find_dvsec(dev, OCXL_DVSEC_TL_ID); + if (!pos && PCI_FUNC(dev->devfn) == 0) { + dev_err(&dev->dev, "Can't find TL DVSEC\n"); + return -ENODEV; + } + if (pos && PCI_FUNC(dev->devfn) != 0) { + dev_err(&dev->dev, "TL DVSEC is only allowed on function 0\n"); + return -ENODEV; + } + fn->dvsec_tl_pos = pos; + return 0; +} + +static int read_dvsec_function(struct pci_dev *dev, struct ocxl_fn_config *fn) +{ + int pos, afu_present; + u32 val; + + pos = find_dvsec(dev, OCXL_DVSEC_FUNC_ID); + if (!pos) { + dev_err(&dev->dev, "Can't find function DVSEC\n"); + return -ENODEV; + } + fn->dvsec_function_pos = pos; + + pci_read_config_dword(dev, pos + OCXL_DVSEC_FUNC_OFF_INDEX, &val); + afu_present = EXTRACT_BIT(val, 31); + if (!afu_present) { + fn->max_afu_index = -1; + dev_dbg(&dev->dev, "Function doesn't define any AFU\n"); + goto out; + } + fn->max_afu_index = EXTRACT_BITS(val, 24, 29); + +out: + dev_dbg(&dev->dev, "Function DVSEC:\n"); + dev_dbg(&dev->dev, " Max AFU index = %d\n", fn->max_afu_index); + return 0; +} + +static int read_dvsec_afu_info(struct pci_dev *dev, struct ocxl_fn_config *fn) +{ + int pos; + + if (fn->max_afu_index < 0) { + fn->dvsec_afu_info_pos = -1; + return 0; + } + + pos = find_dvsec(dev, OCXL_DVSEC_AFU_INFO_ID); + if (!pos) { + dev_err(&dev->dev, "Can't find AFU information DVSEC\n"); + return -ENODEV; + } + fn->dvsec_afu_info_pos = pos; + return 0; +} + +static int read_dvsec_vendor(struct pci_dev *dev) +{ + int pos; + u32 cfg, tlx, dlx, reset_reload; + + /* + * vendor specific DVSEC, for IBM images only. Some older + * images may not have it + * + * It's only used on function 0 to specify the version of some + * logic blocks and to give access to special registers to + * enable host-based flashing. + */ + if (PCI_FUNC(dev->devfn) != 0) + return 0; + + pos = find_dvsec(dev, OCXL_DVSEC_VENDOR_ID); + if (!pos) + return 0; + + pci_read_config_dword(dev, pos + OCXL_DVSEC_VENDOR_CFG_VERS, &cfg); + pci_read_config_dword(dev, pos + OCXL_DVSEC_VENDOR_TLX_VERS, &tlx); + pci_read_config_dword(dev, pos + OCXL_DVSEC_VENDOR_DLX_VERS, &dlx); + pci_read_config_dword(dev, pos + OCXL_DVSEC_VENDOR_RESET_RELOAD, + &reset_reload); + + dev_dbg(&dev->dev, "Vendor specific DVSEC:\n"); + dev_dbg(&dev->dev, " CFG version = 0x%x\n", cfg); + dev_dbg(&dev->dev, " TLX version = 0x%x\n", tlx); + dev_dbg(&dev->dev, " DLX version = 0x%x\n", dlx); + dev_dbg(&dev->dev, " ResetReload = 0x%x\n", reset_reload); + return 0; +} + +/** + * get_dvsec_vendor0() - Find a related PCI device (function 0) + * @dev: PCI device to match + * @dev0: The PCI device (function 0) found + * @out_pos: The position of PCI device (function 0) + * + * Returns 0 on success, negative on failure. + * + * NOTE: If it's successful, the reference of dev0 is increased, + * so after using it, the callers must call pci_dev_put() to give + * up the reference. + */ +static int get_dvsec_vendor0(struct pci_dev *dev, struct pci_dev **dev0, + int *out_pos) +{ + int pos; + + if (PCI_FUNC(dev->devfn) != 0) { + dev = get_function_0(dev); + if (!dev) + return -1; + } else { + dev = pci_dev_get(dev); + } + pos = find_dvsec(dev, OCXL_DVSEC_VENDOR_ID); + if (!pos) { + pci_dev_put(dev); + return -1; + } + *dev0 = dev; + *out_pos = pos; + return 0; +} + +int ocxl_config_get_reset_reload(struct pci_dev *dev, int *val) +{ + struct pci_dev *dev0; + u32 reset_reload; + int pos; + + if (get_dvsec_vendor0(dev, &dev0, &pos)) + return -1; + + pci_read_config_dword(dev0, pos + OCXL_DVSEC_VENDOR_RESET_RELOAD, + &reset_reload); + pci_dev_put(dev0); + *val = !!(reset_reload & BIT(0)); + return 0; +} + +int ocxl_config_set_reset_reload(struct pci_dev *dev, int val) +{ + struct pci_dev *dev0; + u32 reset_reload; + int pos; + + if (get_dvsec_vendor0(dev, &dev0, &pos)) + return -1; + + pci_read_config_dword(dev0, pos + OCXL_DVSEC_VENDOR_RESET_RELOAD, + &reset_reload); + if (val) + reset_reload |= BIT(0); + else + reset_reload &= ~BIT(0); + pci_write_config_dword(dev0, pos + OCXL_DVSEC_VENDOR_RESET_RELOAD, + reset_reload); + pci_dev_put(dev0); + return 0; +} + +static int validate_function(struct pci_dev *dev, struct ocxl_fn_config *fn) +{ + if (fn->max_pasid_log == -1 && fn->max_afu_index >= 0) { + dev_err(&dev->dev, + "AFUs are defined but no PASIDs are requested\n"); + return -EINVAL; + } + + if (fn->max_afu_index > OCXL_MAX_AFU_PER_FUNCTION) { + dev_err(&dev->dev, + "Max AFU index out of architectural limit (%d vs %d)\n", + fn->max_afu_index, OCXL_MAX_AFU_PER_FUNCTION); + return -EINVAL; + } + return 0; +} + +int ocxl_config_read_function(struct pci_dev *dev, struct ocxl_fn_config *fn) +{ + int rc; + + read_pasid(dev, fn); + + rc = read_dvsec_tl(dev, fn); + if (rc) { + dev_err(&dev->dev, + "Invalid Transaction Layer DVSEC configuration: %d\n", + rc); + return -ENODEV; + } + + rc = read_dvsec_function(dev, fn); + if (rc) { + dev_err(&dev->dev, + "Invalid Function DVSEC configuration: %d\n", rc); + return -ENODEV; + } + + rc = read_dvsec_afu_info(dev, fn); + if (rc) { + dev_err(&dev->dev, "Invalid AFU configuration: %d\n", rc); + return -ENODEV; + } + + rc = read_dvsec_vendor(dev); + if (rc) { + dev_err(&dev->dev, + "Invalid vendor specific DVSEC configuration: %d\n", + rc); + return -ENODEV; + } + + rc = validate_function(dev, fn); + return rc; +} +EXPORT_SYMBOL_GPL(ocxl_config_read_function); + +static int read_afu_info(struct pci_dev *dev, struct ocxl_fn_config *fn, + int offset, u32 *data) +{ + u32 val; + unsigned long timeout = jiffies + (HZ * OCXL_CFG_TIMEOUT); + int pos = fn->dvsec_afu_info_pos; + + /* Protect 'data valid' bit */ + if (EXTRACT_BIT(offset, 31)) { + dev_err(&dev->dev, "Invalid offset in AFU info DVSEC\n"); + return -EINVAL; + } + + pci_write_config_dword(dev, pos + OCXL_DVSEC_AFU_INFO_OFF, offset); + pci_read_config_dword(dev, pos + OCXL_DVSEC_AFU_INFO_OFF, &val); + while (!EXTRACT_BIT(val, 31)) { + if (time_after_eq(jiffies, timeout)) { + dev_err(&dev->dev, + "Timeout while reading AFU info DVSEC (offset=%d)\n", + offset); + return -EBUSY; + } + cpu_relax(); + pci_read_config_dword(dev, pos + OCXL_DVSEC_AFU_INFO_OFF, &val); + } + pci_read_config_dword(dev, pos + OCXL_DVSEC_AFU_INFO_DATA, data); + return 0; +} + +/** + * read_template_version() - Read the template version from the AFU + * @dev: the device for the AFU + * @fn: the AFU offsets + * @len: outputs the template length + * @version: outputs the major<<8,minor version + * + * Returns 0 on success, negative on failure + */ +static int read_template_version(struct pci_dev *dev, struct ocxl_fn_config *fn, + u16 *len, u16 *version) +{ + u32 val32; + u8 major, minor; + int rc; + + rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_VERSION, &val32); + if (rc) + return rc; + + *len = EXTRACT_BITS(val32, 16, 31); + major = EXTRACT_BITS(val32, 8, 15); + minor = EXTRACT_BITS(val32, 0, 7); + *version = (major << 8) + minor; + return 0; +} + +int ocxl_config_check_afu_index(struct pci_dev *dev, + struct ocxl_fn_config *fn, int afu_idx) +{ + int rc; + u16 templ_version; + u16 len, expected_len; + + pci_write_config_byte(dev, + fn->dvsec_afu_info_pos + OCXL_DVSEC_AFU_INFO_AFU_IDX, + afu_idx); + + rc = read_template_version(dev, fn, &len, &templ_version); + if (rc) + return rc; + + /* AFU index map can have holes, in which case we read all 0's */ + if (!templ_version && !len) + return 0; + + dev_dbg(&dev->dev, "AFU descriptor template version %d.%d\n", + templ_version >> 8, templ_version & 0xFF); + + switch (templ_version) { + case 0x0005: // v0.5 was used prior to the spec approval + case 0x0100: + expected_len = OCXL_TEMPL_LEN_1_0; + break; + case 0x0101: + expected_len = OCXL_TEMPL_LEN_1_1; + break; + default: + dev_warn(&dev->dev, "Unknown AFU template version %#x\n", + templ_version); + expected_len = len; + } + if (len != expected_len) + dev_warn(&dev->dev, + "Unexpected template length %#x in AFU information, expected %#x for version %#x\n", + len, expected_len, templ_version); + return 1; +} + +static int read_afu_name(struct pci_dev *dev, struct ocxl_fn_config *fn, + struct ocxl_afu_config *afu) +{ + int i, rc; + u32 val, *ptr; + + BUILD_BUG_ON(OCXL_AFU_NAME_SZ < OCXL_TEMPL_NAME_LEN); + for (i = 0; i < OCXL_TEMPL_NAME_LEN; i += 4) { + rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_NAME + i, &val); + if (rc) + return rc; + ptr = (u32 *) &afu->name[i]; + *ptr = le32_to_cpu((__force __le32) val); + } + afu->name[OCXL_AFU_NAME_SZ - 1] = '\0'; /* play safe */ + return 0; +} + +static int read_afu_mmio(struct pci_dev *dev, struct ocxl_fn_config *fn, + struct ocxl_afu_config *afu) +{ + int rc; + u32 val; + + /* + * Global MMIO + */ + rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_MMIO_GLOBAL, &val); + if (rc) + return rc; + afu->global_mmio_bar = EXTRACT_BITS(val, 0, 2); + afu->global_mmio_offset = EXTRACT_BITS(val, 16, 31) << 16; + + rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_MMIO_GLOBAL + 4, &val); + if (rc) + return rc; + afu->global_mmio_offset += (u64) val << 32; + + rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_MMIO_GLOBAL_SZ, &val); + if (rc) + return rc; + afu->global_mmio_size = val; + + /* + * Per-process MMIO + */ + rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_MMIO_PP, &val); + if (rc) + return rc; + afu->pp_mmio_bar = EXTRACT_BITS(val, 0, 2); + afu->pp_mmio_offset = EXTRACT_BITS(val, 16, 31) << 16; + + rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_MMIO_PP + 4, &val); + if (rc) + return rc; + afu->pp_mmio_offset += (u64) val << 32; + + rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_MMIO_PP_SZ, &val); + if (rc) + return rc; + afu->pp_mmio_stride = val; + + return 0; +} + +static int read_afu_control(struct pci_dev *dev, struct ocxl_afu_config *afu) +{ + int pos; + u8 val8; + u16 val16; + + pos = find_dvsec_afu_ctrl(dev, afu->idx); + if (!pos) { + dev_err(&dev->dev, "Can't find AFU control DVSEC for AFU %d\n", + afu->idx); + return -ENODEV; + } + afu->dvsec_afu_control_pos = pos; + + pci_read_config_byte(dev, pos + OCXL_DVSEC_AFU_CTRL_PASID_SUP, &val8); + afu->pasid_supported_log = EXTRACT_BITS(val8, 0, 4); + + pci_read_config_word(dev, pos + OCXL_DVSEC_AFU_CTRL_ACTAG_SUP, &val16); + afu->actag_supported = EXTRACT_BITS(val16, 0, 11); + return 0; +} + +static bool char_allowed(int c) +{ + /* + * Permitted Characters : Alphanumeric, hyphen, underscore, comma + */ + if ((c >= 0x30 && c <= 0x39) /* digits */ || + (c >= 0x41 && c <= 0x5A) /* upper case */ || + (c >= 0x61 && c <= 0x7A) /* lower case */ || + c == 0 /* NULL */ || + c == 0x2D /* - */ || + c == 0x5F /* _ */ || + c == 0x2C /* , */) + return true; + return false; +} + +static int validate_afu(struct pci_dev *dev, struct ocxl_afu_config *afu) +{ + int i; + + if (!afu->name[0]) { + dev_err(&dev->dev, "Empty AFU name\n"); + return -EINVAL; + } + for (i = 0; i < OCXL_TEMPL_NAME_LEN; i++) { + if (!char_allowed(afu->name[i])) { + dev_err(&dev->dev, + "Invalid character in AFU name\n"); + return -EINVAL; + } + } + + if (afu->global_mmio_bar != 0 && + afu->global_mmio_bar != 2 && + afu->global_mmio_bar != 4) { + dev_err(&dev->dev, "Invalid global MMIO bar number\n"); + return -EINVAL; + } + if (afu->pp_mmio_bar != 0 && + afu->pp_mmio_bar != 2 && + afu->pp_mmio_bar != 4) { + dev_err(&dev->dev, "Invalid per-process MMIO bar number\n"); + return -EINVAL; + } + return 0; +} + +/** + * read_afu_lpc_memory_info() - Populate AFU metadata regarding LPC memory + * @dev: the device for the AFU + * @fn: the AFU offsets + * @afu: the AFU struct to populate the LPC metadata into + * + * Returns 0 on success, negative on failure + */ +static int read_afu_lpc_memory_info(struct pci_dev *dev, + struct ocxl_fn_config *fn, + struct ocxl_afu_config *afu) +{ + int rc; + u32 val32; + u16 templ_version; + u16 templ_len; + u64 total_mem_size = 0; + u64 lpc_mem_size = 0; + + afu->lpc_mem_offset = 0; + afu->lpc_mem_size = 0; + afu->special_purpose_mem_offset = 0; + afu->special_purpose_mem_size = 0; + /* + * For AFUs following template v1.0, the LPC memory covers the + * total memory. Its size is a power of 2. + * + * For AFUs with template >= v1.01, the total memory size is + * still a power of 2, but it is split in 2 parts: + * - the LPC memory, whose size can now be anything + * - the remainder memory is a special purpose memory, whose + * definition is AFU-dependent. It is not accessible through + * the usual commands for LPC memory + */ + rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_ALL_MEM_SZ, &val32); + if (rc) + return rc; + + val32 = EXTRACT_BITS(val32, 0, 7); + if (!val32) + return 0; /* No LPC memory */ + + /* + * The configuration space spec allows for a memory size of up + * to 2^255 bytes. + * + * Current generation hardware uses 56-bit physical addresses, + * but we won't be able to get near close to that, as we won't + * have a hole big enough in the memory map. Let it pass in + * the driver for now. We'll get an error from the firmware + * when trying to configure something too big. + */ + total_mem_size = 1ull << val32; + + rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_LPC_MEM_START, &val32); + if (rc) + return rc; + + afu->lpc_mem_offset = val32; + + rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_LPC_MEM_START + 4, &val32); + if (rc) + return rc; + + afu->lpc_mem_offset |= (u64) val32 << 32; + + rc = read_template_version(dev, fn, &templ_len, &templ_version); + if (rc) + return rc; + + if (templ_version >= 0x0101) { + rc = read_afu_info(dev, fn, + OCXL_DVSEC_TEMPL_LPC_MEM_SZ, &val32); + if (rc) + return rc; + lpc_mem_size = val32; + + rc = read_afu_info(dev, fn, + OCXL_DVSEC_TEMPL_LPC_MEM_SZ + 4, &val32); + if (rc) + return rc; + lpc_mem_size |= (u64) val32 << 32; + } else { + lpc_mem_size = total_mem_size; + } + afu->lpc_mem_size = lpc_mem_size; + + if (lpc_mem_size < total_mem_size) { + afu->special_purpose_mem_offset = + afu->lpc_mem_offset + lpc_mem_size; + afu->special_purpose_mem_size = + total_mem_size - lpc_mem_size; + } + return 0; +} + +int ocxl_config_read_afu(struct pci_dev *dev, struct ocxl_fn_config *fn, + struct ocxl_afu_config *afu, u8 afu_idx) +{ + int rc; + u32 val32; + + /* + * First, we need to write the AFU idx for the AFU we want to + * access. + */ + WARN_ON((afu_idx & OCXL_DVSEC_AFU_IDX_MASK) != afu_idx); + afu->idx = afu_idx; + pci_write_config_byte(dev, + fn->dvsec_afu_info_pos + OCXL_DVSEC_AFU_INFO_AFU_IDX, + afu->idx); + + rc = read_afu_name(dev, fn, afu); + if (rc) + return rc; + + rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_AFU_VERSION, &val32); + if (rc) + return rc; + afu->version_major = EXTRACT_BITS(val32, 24, 31); + afu->version_minor = EXTRACT_BITS(val32, 16, 23); + afu->afuc_type = EXTRACT_BITS(val32, 14, 15); + afu->afum_type = EXTRACT_BITS(val32, 12, 13); + afu->profile = EXTRACT_BITS(val32, 0, 7); + + rc = read_afu_mmio(dev, fn, afu); + if (rc) + return rc; + + rc = read_afu_lpc_memory_info(dev, fn, afu); + if (rc) + return rc; + + rc = read_afu_control(dev, afu); + if (rc) + return rc; + + dev_dbg(&dev->dev, "AFU configuration:\n"); + dev_dbg(&dev->dev, " name = %s\n", afu->name); + dev_dbg(&dev->dev, " version = %d.%d\n", afu->version_major, + afu->version_minor); + dev_dbg(&dev->dev, " global mmio bar = %hhu\n", afu->global_mmio_bar); + dev_dbg(&dev->dev, " global mmio offset = %#llx\n", + afu->global_mmio_offset); + dev_dbg(&dev->dev, " global mmio size = %#x\n", afu->global_mmio_size); + dev_dbg(&dev->dev, " pp mmio bar = %hhu\n", afu->pp_mmio_bar); + dev_dbg(&dev->dev, " pp mmio offset = %#llx\n", afu->pp_mmio_offset); + dev_dbg(&dev->dev, " pp mmio stride = %#x\n", afu->pp_mmio_stride); + dev_dbg(&dev->dev, " lpc_mem offset = %#llx\n", afu->lpc_mem_offset); + dev_dbg(&dev->dev, " lpc_mem size = %#llx\n", afu->lpc_mem_size); + dev_dbg(&dev->dev, " special purpose mem offset = %#llx\n", + afu->special_purpose_mem_offset); + dev_dbg(&dev->dev, " special purpose mem size = %#llx\n", + afu->special_purpose_mem_size); + dev_dbg(&dev->dev, " pasid supported (log) = %u\n", + afu->pasid_supported_log); + dev_dbg(&dev->dev, " actag supported = %u\n", + afu->actag_supported); + + rc = validate_afu(dev, afu); + return rc; +} +EXPORT_SYMBOL_GPL(ocxl_config_read_afu); + +int ocxl_config_get_actag_info(struct pci_dev *dev, u16 *base, u16 *enabled, + u16 *supported) +{ + int rc; + + /* + * This is really a simple wrapper for the kernel API, to + * avoid an external driver using ocxl as a library to call + * platform-dependent code + */ + rc = pnv_ocxl_get_actag(dev, base, enabled, supported); + if (rc) { + dev_err(&dev->dev, "Can't get actag for device: %d\n", rc); + return rc; + } + return 0; +} +EXPORT_SYMBOL_GPL(ocxl_config_get_actag_info); + +void ocxl_config_set_afu_actag(struct pci_dev *dev, int pos, int actag_base, + int actag_count) +{ + u16 val; + + val = actag_count & OCXL_DVSEC_ACTAG_MASK; + pci_write_config_byte(dev, pos + OCXL_DVSEC_AFU_CTRL_ACTAG_EN, val); + + val = actag_base & OCXL_DVSEC_ACTAG_MASK; + pci_write_config_dword(dev, pos + OCXL_DVSEC_AFU_CTRL_ACTAG_BASE, val); +} +EXPORT_SYMBOL_GPL(ocxl_config_set_afu_actag); + +int ocxl_config_get_pasid_info(struct pci_dev *dev, int *count) +{ + return pnv_ocxl_get_pasid_count(dev, count); +} + +void ocxl_config_set_afu_pasid(struct pci_dev *dev, int pos, int pasid_base, + u32 pasid_count_log) +{ + u8 val8; + u32 val32; + + val8 = pasid_count_log & OCXL_DVSEC_PASID_LOG_MASK; + pci_write_config_byte(dev, pos + OCXL_DVSEC_AFU_CTRL_PASID_EN, val8); + + pci_read_config_dword(dev, pos + OCXL_DVSEC_AFU_CTRL_PASID_BASE, + &val32); + val32 &= ~OCXL_DVSEC_PASID_MASK; + val32 |= pasid_base & OCXL_DVSEC_PASID_MASK; + pci_write_config_dword(dev, pos + OCXL_DVSEC_AFU_CTRL_PASID_BASE, + val32); +} +EXPORT_SYMBOL_GPL(ocxl_config_set_afu_pasid); + +void ocxl_config_set_afu_state(struct pci_dev *dev, int pos, int enable) +{ + u8 val; + + pci_read_config_byte(dev, pos + OCXL_DVSEC_AFU_CTRL_ENABLE, &val); + if (enable) + val |= 1; + else + val &= 0xFE; + pci_write_config_byte(dev, pos + OCXL_DVSEC_AFU_CTRL_ENABLE, val); +} +EXPORT_SYMBOL_GPL(ocxl_config_set_afu_state); + +int ocxl_config_set_TL(struct pci_dev *dev, int tl_dvsec) +{ + u32 val; + __be32 *be32ptr; + u8 timers; + int i, rc; + long recv_cap; + char *recv_rate; + + /* + * Skip on function != 0, as the TL can only be defined on 0 + */ + if (PCI_FUNC(dev->devfn) != 0) + return 0; + + recv_rate = kzalloc(PNV_OCXL_TL_RATE_BUF_SIZE, GFP_KERNEL); + if (!recv_rate) + return -ENOMEM; + /* + * The spec defines 64 templates for messages in the + * Transaction Layer (TL). + * + * The host and device each support a subset, so we need to + * configure the transmitters on each side to send only + * templates the receiver understands, at a rate the receiver + * can process. Per the spec, template 0 must be supported by + * everybody. That's the template which has been used by the + * host and device so far. + * + * The sending rate limit must be set before the template is + * enabled. + */ + + /* + * Device -> host + */ + rc = pnv_ocxl_get_tl_cap(dev, &recv_cap, recv_rate, + PNV_OCXL_TL_RATE_BUF_SIZE); + if (rc) + goto out; + + for (i = 0; i < PNV_OCXL_TL_RATE_BUF_SIZE; i += 4) { + be32ptr = (__be32 *) &recv_rate[i]; + pci_write_config_dword(dev, + tl_dvsec + OCXL_DVSEC_TL_SEND_RATE + i, + be32_to_cpu(*be32ptr)); + } + val = recv_cap >> 32; + pci_write_config_dword(dev, tl_dvsec + OCXL_DVSEC_TL_SEND_CAP, val); + val = recv_cap & GENMASK(31, 0); + pci_write_config_dword(dev, tl_dvsec + OCXL_DVSEC_TL_SEND_CAP + 4, val); + + /* + * Host -> device + */ + for (i = 0; i < PNV_OCXL_TL_RATE_BUF_SIZE; i += 4) { + pci_read_config_dword(dev, + tl_dvsec + OCXL_DVSEC_TL_RECV_RATE + i, + &val); + be32ptr = (__be32 *) &recv_rate[i]; + *be32ptr = cpu_to_be32(val); + } + pci_read_config_dword(dev, tl_dvsec + OCXL_DVSEC_TL_RECV_CAP, &val); + recv_cap = (long) val << 32; + pci_read_config_dword(dev, tl_dvsec + OCXL_DVSEC_TL_RECV_CAP + 4, &val); + recv_cap |= val; + + rc = pnv_ocxl_set_tl_conf(dev, recv_cap, __pa(recv_rate), + PNV_OCXL_TL_RATE_BUF_SIZE); + if (rc) + goto out; + + /* + * Opencapi commands needing to be retried are classified per + * the TL in 2 groups: short and long commands. + * + * The short back off timer it not used for now. It will be + * for opencapi 4.0. + * + * The long back off timer is typically used when an AFU hits + * a page fault but the NPU is already processing one. So the + * AFU needs to wait before it can resubmit. Having a value + * too low doesn't break anything, but can generate extra + * traffic on the link. + * We set it to 1.6 us for now. It's shorter than, but in the + * same order of magnitude as the time spent to process a page + * fault. + */ + timers = 0x2 << 4; /* long timer = 1.6 us */ + pci_write_config_byte(dev, tl_dvsec + OCXL_DVSEC_TL_BACKOFF_TIMERS, + timers); + + rc = 0; +out: + kfree(recv_rate); + return rc; +} +EXPORT_SYMBOL_GPL(ocxl_config_set_TL); + +int ocxl_config_terminate_pasid(struct pci_dev *dev, int afu_control, int pasid) +{ + u32 val; + unsigned long timeout; + + pci_read_config_dword(dev, afu_control + OCXL_DVSEC_AFU_CTRL_TERM_PASID, + &val); + if (EXTRACT_BIT(val, 20)) { + dev_err(&dev->dev, + "Can't terminate PASID %#x, previous termination didn't complete\n", + pasid); + return -EBUSY; + } + + val &= ~OCXL_DVSEC_PASID_MASK; + val |= pasid & OCXL_DVSEC_PASID_MASK; + val |= BIT(20); + pci_write_config_dword(dev, + afu_control + OCXL_DVSEC_AFU_CTRL_TERM_PASID, + val); + + timeout = jiffies + (HZ * OCXL_CFG_TIMEOUT); + pci_read_config_dword(dev, afu_control + OCXL_DVSEC_AFU_CTRL_TERM_PASID, + &val); + while (EXTRACT_BIT(val, 20)) { + if (time_after_eq(jiffies, timeout)) { + dev_err(&dev->dev, + "Timeout while waiting for AFU to terminate PASID %#x\n", + pasid); + return -EBUSY; + } + cpu_relax(); + pci_read_config_dword(dev, + afu_control + OCXL_DVSEC_AFU_CTRL_TERM_PASID, + &val); + } + return 0; +} +EXPORT_SYMBOL_GPL(ocxl_config_terminate_pasid); + +void ocxl_config_set_actag(struct pci_dev *dev, int func_dvsec, u32 tag_first, + u32 tag_count) +{ + u32 val; + + val = (tag_first & OCXL_DVSEC_ACTAG_MASK) << 16; + val |= tag_count & OCXL_DVSEC_ACTAG_MASK; + pci_write_config_dword(dev, func_dvsec + OCXL_DVSEC_FUNC_OFF_ACTAG, + val); +} +EXPORT_SYMBOL_GPL(ocxl_config_set_actag); diff --git a/drivers/misc/ocxl/context.c b/drivers/misc/ocxl/context.c new file mode 100644 index 000000000..c21f65a5c --- /dev/null +++ b/drivers/misc/ocxl/context.c @@ -0,0 +1,294 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright 2017 IBM Corp. +#include <linux/sched/mm.h> +#include "trace.h" +#include "ocxl_internal.h" + +int ocxl_context_alloc(struct ocxl_context **context, struct ocxl_afu *afu, + struct address_space *mapping) +{ + int pasid; + struct ocxl_context *ctx; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->afu = afu; + mutex_lock(&afu->contexts_lock); + pasid = idr_alloc(&afu->contexts_idr, ctx, afu->pasid_base, + afu->pasid_base + afu->pasid_max, GFP_KERNEL); + if (pasid < 0) { + mutex_unlock(&afu->contexts_lock); + kfree(ctx); + return pasid; + } + afu->pasid_count++; + mutex_unlock(&afu->contexts_lock); + + ctx->pasid = pasid; + ctx->status = OPENED; + mutex_init(&ctx->status_mutex); + ctx->mapping = mapping; + mutex_init(&ctx->mapping_lock); + init_waitqueue_head(&ctx->events_wq); + mutex_init(&ctx->xsl_error_lock); + mutex_init(&ctx->irq_lock); + idr_init(&ctx->irq_idr); + ctx->tidr = 0; + + /* + * Keep a reference on the AFU to make sure it's valid for the + * duration of the life of the context + */ + ocxl_afu_get(afu); + *context = ctx; + return 0; +} +EXPORT_SYMBOL_GPL(ocxl_context_alloc); + +/* + * Callback for when a translation fault triggers an error + * data: a pointer to the context which triggered the fault + * addr: the address that triggered the error + * dsisr: the value of the PPC64 dsisr register + */ +static void xsl_fault_error(void *data, u64 addr, u64 dsisr) +{ + struct ocxl_context *ctx = (struct ocxl_context *) data; + + mutex_lock(&ctx->xsl_error_lock); + ctx->xsl_error.addr = addr; + ctx->xsl_error.dsisr = dsisr; + ctx->xsl_error.count++; + mutex_unlock(&ctx->xsl_error_lock); + + wake_up_all(&ctx->events_wq); +} + +int ocxl_context_attach(struct ocxl_context *ctx, u64 amr, struct mm_struct *mm) +{ + int rc; + unsigned long pidr = 0; + + // Locks both status & tidr + mutex_lock(&ctx->status_mutex); + if (ctx->status != OPENED) { + rc = -EIO; + goto out; + } + + if (mm) + pidr = mm->context.id; + + rc = ocxl_link_add_pe(ctx->afu->fn->link, ctx->pasid, pidr, ctx->tidr, + amr, mm, xsl_fault_error, ctx); + if (rc) + goto out; + + ctx->status = ATTACHED; +out: + mutex_unlock(&ctx->status_mutex); + return rc; +} +EXPORT_SYMBOL_GPL(ocxl_context_attach); + +static vm_fault_t map_afu_irq(struct vm_area_struct *vma, unsigned long address, + u64 offset, struct ocxl_context *ctx) +{ + u64 trigger_addr; + int irq_id = ocxl_irq_offset_to_id(ctx, offset); + + trigger_addr = ocxl_afu_irq_get_addr(ctx, irq_id); + if (!trigger_addr) + return VM_FAULT_SIGBUS; + + return vmf_insert_pfn(vma, address, trigger_addr >> PAGE_SHIFT); +} + +static vm_fault_t map_pp_mmio(struct vm_area_struct *vma, unsigned long address, + u64 offset, struct ocxl_context *ctx) +{ + u64 pp_mmio_addr; + int pasid_off; + vm_fault_t ret; + + if (offset >= ctx->afu->config.pp_mmio_stride) + return VM_FAULT_SIGBUS; + + mutex_lock(&ctx->status_mutex); + if (ctx->status != ATTACHED) { + mutex_unlock(&ctx->status_mutex); + pr_debug("%s: Context not attached, failing mmio mmap\n", + __func__); + return VM_FAULT_SIGBUS; + } + + pasid_off = ctx->pasid - ctx->afu->pasid_base; + pp_mmio_addr = ctx->afu->pp_mmio_start + + pasid_off * ctx->afu->config.pp_mmio_stride + + offset; + + ret = vmf_insert_pfn(vma, address, pp_mmio_addr >> PAGE_SHIFT); + mutex_unlock(&ctx->status_mutex); + return ret; +} + +static vm_fault_t ocxl_mmap_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct ocxl_context *ctx = vma->vm_file->private_data; + u64 offset; + vm_fault_t ret; + + offset = vmf->pgoff << PAGE_SHIFT; + pr_debug("%s: pasid %d address 0x%lx offset 0x%llx\n", __func__, + ctx->pasid, vmf->address, offset); + + if (offset < ctx->afu->irq_base_offset) + ret = map_pp_mmio(vma, vmf->address, offset, ctx); + else + ret = map_afu_irq(vma, vmf->address, offset, ctx); + return ret; +} + +static const struct vm_operations_struct ocxl_vmops = { + .fault = ocxl_mmap_fault, +}; + +static int check_mmap_afu_irq(struct ocxl_context *ctx, + struct vm_area_struct *vma) +{ + int irq_id = ocxl_irq_offset_to_id(ctx, vma->vm_pgoff << PAGE_SHIFT); + + /* only one page */ + if (vma_pages(vma) != 1) + return -EINVAL; + + /* check offset validty */ + if (!ocxl_afu_irq_get_addr(ctx, irq_id)) + return -EINVAL; + + /* + * trigger page should only be accessible in write mode. + * + * It's a bit theoretical, as a page mmaped with only + * PROT_WRITE is currently readable, but it doesn't hurt. + */ + if ((vma->vm_flags & VM_READ) || (vma->vm_flags & VM_EXEC) || + !(vma->vm_flags & VM_WRITE)) + return -EINVAL; + vma->vm_flags &= ~(VM_MAYREAD | VM_MAYEXEC); + return 0; +} + +static int check_mmap_mmio(struct ocxl_context *ctx, + struct vm_area_struct *vma) +{ + if ((vma_pages(vma) + vma->vm_pgoff) > + (ctx->afu->config.pp_mmio_stride >> PAGE_SHIFT)) + return -EINVAL; + return 0; +} + +int ocxl_context_mmap(struct ocxl_context *ctx, struct vm_area_struct *vma) +{ + int rc; + + if ((vma->vm_pgoff << PAGE_SHIFT) < ctx->afu->irq_base_offset) + rc = check_mmap_mmio(ctx, vma); + else + rc = check_mmap_afu_irq(ctx, vma); + if (rc) + return rc; + + vma->vm_flags |= VM_IO | VM_PFNMAP; + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + vma->vm_ops = &ocxl_vmops; + return 0; +} + +int ocxl_context_detach(struct ocxl_context *ctx) +{ + struct pci_dev *dev; + int afu_control_pos; + enum ocxl_context_status status; + int rc; + + mutex_lock(&ctx->status_mutex); + status = ctx->status; + ctx->status = CLOSED; + mutex_unlock(&ctx->status_mutex); + if (status != ATTACHED) + return 0; + + dev = to_pci_dev(ctx->afu->fn->dev.parent); + afu_control_pos = ctx->afu->config.dvsec_afu_control_pos; + + mutex_lock(&ctx->afu->afu_control_lock); + rc = ocxl_config_terminate_pasid(dev, afu_control_pos, ctx->pasid); + mutex_unlock(&ctx->afu->afu_control_lock); + trace_ocxl_terminate_pasid(ctx->pasid, rc); + if (rc) { + /* + * If we timeout waiting for the AFU to terminate the + * pasid, then it's dangerous to clean up the Process + * Element entry in the SPA, as it may be referenced + * in the future by the AFU. In which case, we would + * checkstop because of an invalid PE access (FIR + * register 2, bit 42). So leave the PE + * defined. Caller shouldn't free the context so that + * PASID remains allocated. + * + * A link reset will be required to cleanup the AFU + * and the SPA. + */ + if (rc == -EBUSY) + return rc; + } + rc = ocxl_link_remove_pe(ctx->afu->fn->link, ctx->pasid); + if (rc) { + dev_warn(&dev->dev, + "Couldn't remove PE entry cleanly: %d\n", rc); + } + return 0; +} +EXPORT_SYMBOL_GPL(ocxl_context_detach); + +void ocxl_context_detach_all(struct ocxl_afu *afu) +{ + struct ocxl_context *ctx; + int tmp; + + mutex_lock(&afu->contexts_lock); + idr_for_each_entry(&afu->contexts_idr, ctx, tmp) { + ocxl_context_detach(ctx); + /* + * We are force detaching - remove any active mmio + * mappings so userspace cannot interfere with the + * card if it comes back. Easiest way to exercise + * this is to unbind and rebind the driver via sysfs + * while it is in use. + */ + mutex_lock(&ctx->mapping_lock); + if (ctx->mapping) + unmap_mapping_range(ctx->mapping, 0, 0, 1); + mutex_unlock(&ctx->mapping_lock); + } + mutex_unlock(&afu->contexts_lock); +} + +void ocxl_context_free(struct ocxl_context *ctx) +{ + mutex_lock(&ctx->afu->contexts_lock); + ctx->afu->pasid_count--; + idr_remove(&ctx->afu->contexts_idr, ctx->pasid); + mutex_unlock(&ctx->afu->contexts_lock); + + ocxl_afu_irq_free_all(ctx); + idr_destroy(&ctx->irq_idr); + /* reference to the AFU taken in ocxl_context_alloc() */ + ocxl_afu_put(ctx->afu); + kfree(ctx); +} +EXPORT_SYMBOL_GPL(ocxl_context_free); diff --git a/drivers/misc/ocxl/core.c b/drivers/misc/ocxl/core.c new file mode 100644 index 000000000..aebfc53a2 --- /dev/null +++ b/drivers/misc/ocxl/core.c @@ -0,0 +1,569 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright 2019 IBM Corp. +#include <linux/idr.h> +#include "ocxl_internal.h" + +static struct ocxl_fn *ocxl_fn_get(struct ocxl_fn *fn) +{ + return (get_device(&fn->dev) == NULL) ? NULL : fn; +} + +static void ocxl_fn_put(struct ocxl_fn *fn) +{ + put_device(&fn->dev); +} + +static struct ocxl_afu *alloc_afu(struct ocxl_fn *fn) +{ + struct ocxl_afu *afu; + + afu = kzalloc(sizeof(struct ocxl_afu), GFP_KERNEL); + if (!afu) + return NULL; + + kref_init(&afu->kref); + mutex_init(&afu->contexts_lock); + mutex_init(&afu->afu_control_lock); + idr_init(&afu->contexts_idr); + afu->fn = fn; + ocxl_fn_get(fn); + return afu; +} + +static void free_afu(struct kref *kref) +{ + struct ocxl_afu *afu = container_of(kref, struct ocxl_afu, kref); + + idr_destroy(&afu->contexts_idr); + ocxl_fn_put(afu->fn); + kfree(afu); +} + +void ocxl_afu_get(struct ocxl_afu *afu) +{ + kref_get(&afu->kref); +} +EXPORT_SYMBOL_GPL(ocxl_afu_get); + +void ocxl_afu_put(struct ocxl_afu *afu) +{ + kref_put(&afu->kref, free_afu); +} +EXPORT_SYMBOL_GPL(ocxl_afu_put); + +static int assign_afu_actag(struct ocxl_afu *afu) +{ + struct ocxl_fn *fn = afu->fn; + int actag_count, actag_offset; + struct pci_dev *pci_dev = to_pci_dev(fn->dev.parent); + + /* + * if there were not enough actags for the function, each afu + * reduces its count as well + */ + actag_count = afu->config.actag_supported * + fn->actag_enabled / fn->actag_supported; + actag_offset = ocxl_actag_afu_alloc(fn, actag_count); + if (actag_offset < 0) { + dev_err(&pci_dev->dev, "Can't allocate %d actags for AFU: %d\n", + actag_count, actag_offset); + return actag_offset; + } + afu->actag_base = fn->actag_base + actag_offset; + afu->actag_enabled = actag_count; + + ocxl_config_set_afu_actag(pci_dev, afu->config.dvsec_afu_control_pos, + afu->actag_base, afu->actag_enabled); + dev_dbg(&pci_dev->dev, "actag base=%d enabled=%d\n", + afu->actag_base, afu->actag_enabled); + return 0; +} + +static void reclaim_afu_actag(struct ocxl_afu *afu) +{ + struct ocxl_fn *fn = afu->fn; + int start_offset, size; + + start_offset = afu->actag_base - fn->actag_base; + size = afu->actag_enabled; + ocxl_actag_afu_free(afu->fn, start_offset, size); +} + +static int assign_afu_pasid(struct ocxl_afu *afu) +{ + struct ocxl_fn *fn = afu->fn; + int pasid_count, pasid_offset; + struct pci_dev *pci_dev = to_pci_dev(fn->dev.parent); + + /* + * We only support the case where the function configuration + * requested enough PASIDs to cover all AFUs. + */ + pasid_count = 1 << afu->config.pasid_supported_log; + pasid_offset = ocxl_pasid_afu_alloc(fn, pasid_count); + if (pasid_offset < 0) { + dev_err(&pci_dev->dev, "Can't allocate %d PASIDs for AFU: %d\n", + pasid_count, pasid_offset); + return pasid_offset; + } + afu->pasid_base = fn->pasid_base + pasid_offset; + afu->pasid_count = 0; + afu->pasid_max = pasid_count; + + ocxl_config_set_afu_pasid(pci_dev, afu->config.dvsec_afu_control_pos, + afu->pasid_base, + afu->config.pasid_supported_log); + dev_dbg(&pci_dev->dev, "PASID base=%d, enabled=%d\n", + afu->pasid_base, pasid_count); + return 0; +} + +static void reclaim_afu_pasid(struct ocxl_afu *afu) +{ + struct ocxl_fn *fn = afu->fn; + int start_offset, size; + + start_offset = afu->pasid_base - fn->pasid_base; + size = 1 << afu->config.pasid_supported_log; + ocxl_pasid_afu_free(afu->fn, start_offset, size); +} + +static int reserve_fn_bar(struct ocxl_fn *fn, int bar) +{ + struct pci_dev *dev = to_pci_dev(fn->dev.parent); + int rc, idx; + + if (bar != 0 && bar != 2 && bar != 4) + return -EINVAL; + + idx = bar >> 1; + if (fn->bar_used[idx]++ == 0) { + rc = pci_request_region(dev, bar, "ocxl"); + if (rc) + return rc; + } + return 0; +} + +static void release_fn_bar(struct ocxl_fn *fn, int bar) +{ + struct pci_dev *dev = to_pci_dev(fn->dev.parent); + int idx; + + if (bar != 0 && bar != 2 && bar != 4) + return; + + idx = bar >> 1; + if (--fn->bar_used[idx] == 0) + pci_release_region(dev, bar); + WARN_ON(fn->bar_used[idx] < 0); +} + +static int map_mmio_areas(struct ocxl_afu *afu) +{ + int rc; + struct pci_dev *pci_dev = to_pci_dev(afu->fn->dev.parent); + + rc = reserve_fn_bar(afu->fn, afu->config.global_mmio_bar); + if (rc) + return rc; + + rc = reserve_fn_bar(afu->fn, afu->config.pp_mmio_bar); + if (rc) { + release_fn_bar(afu->fn, afu->config.global_mmio_bar); + return rc; + } + + afu->global_mmio_start = + pci_resource_start(pci_dev, afu->config.global_mmio_bar) + + afu->config.global_mmio_offset; + afu->pp_mmio_start = + pci_resource_start(pci_dev, afu->config.pp_mmio_bar) + + afu->config.pp_mmio_offset; + + afu->global_mmio_ptr = ioremap(afu->global_mmio_start, + afu->config.global_mmio_size); + if (!afu->global_mmio_ptr) { + release_fn_bar(afu->fn, afu->config.pp_mmio_bar); + release_fn_bar(afu->fn, afu->config.global_mmio_bar); + dev_err(&pci_dev->dev, "Error mapping global mmio area\n"); + return -ENOMEM; + } + + /* + * Leave an empty page between the per-process mmio area and + * the AFU interrupt mappings + */ + afu->irq_base_offset = afu->config.pp_mmio_stride + PAGE_SIZE; + return 0; +} + +static void unmap_mmio_areas(struct ocxl_afu *afu) +{ + if (afu->global_mmio_ptr) { + iounmap(afu->global_mmio_ptr); + afu->global_mmio_ptr = NULL; + } + afu->global_mmio_start = 0; + afu->pp_mmio_start = 0; + release_fn_bar(afu->fn, afu->config.pp_mmio_bar); + release_fn_bar(afu->fn, afu->config.global_mmio_bar); +} + +static int configure_afu(struct ocxl_afu *afu, u8 afu_idx, struct pci_dev *dev) +{ + int rc; + + rc = ocxl_config_read_afu(dev, &afu->fn->config, &afu->config, afu_idx); + if (rc) + return rc; + + rc = assign_afu_actag(afu); + if (rc) + return rc; + + rc = assign_afu_pasid(afu); + if (rc) + goto err_free_actag; + + rc = map_mmio_areas(afu); + if (rc) + goto err_free_pasid; + + return 0; + +err_free_pasid: + reclaim_afu_pasid(afu); +err_free_actag: + reclaim_afu_actag(afu); + return rc; +} + +static void deconfigure_afu(struct ocxl_afu *afu) +{ + unmap_mmio_areas(afu); + reclaim_afu_pasid(afu); + reclaim_afu_actag(afu); +} + +static int activate_afu(struct pci_dev *dev, struct ocxl_afu *afu) +{ + ocxl_config_set_afu_state(dev, afu->config.dvsec_afu_control_pos, 1); + + return 0; +} + +static void deactivate_afu(struct ocxl_afu *afu) +{ + struct pci_dev *dev = to_pci_dev(afu->fn->dev.parent); + + ocxl_config_set_afu_state(dev, afu->config.dvsec_afu_control_pos, 0); +} + +static int init_afu(struct pci_dev *dev, struct ocxl_fn *fn, u8 afu_idx) +{ + int rc; + struct ocxl_afu *afu; + + afu = alloc_afu(fn); + if (!afu) + return -ENOMEM; + + rc = configure_afu(afu, afu_idx, dev); + if (rc) { + ocxl_afu_put(afu); + return rc; + } + + rc = activate_afu(dev, afu); + if (rc) { + deconfigure_afu(afu); + ocxl_afu_put(afu); + return rc; + } + + list_add_tail(&afu->list, &fn->afu_list); + + return 0; +} + +static void remove_afu(struct ocxl_afu *afu) +{ + list_del(&afu->list); + ocxl_context_detach_all(afu); + deactivate_afu(afu); + deconfigure_afu(afu); + ocxl_afu_put(afu); // matches the implicit get in alloc_afu +} + +static struct ocxl_fn *alloc_function(void) +{ + struct ocxl_fn *fn; + + fn = kzalloc(sizeof(struct ocxl_fn), GFP_KERNEL); + if (!fn) + return NULL; + + INIT_LIST_HEAD(&fn->afu_list); + INIT_LIST_HEAD(&fn->pasid_list); + INIT_LIST_HEAD(&fn->actag_list); + + return fn; +} + +static void free_function(struct ocxl_fn *fn) +{ + WARN_ON(!list_empty(&fn->afu_list)); + WARN_ON(!list_empty(&fn->pasid_list)); + kfree(fn); +} + +static void free_function_dev(struct device *dev) +{ + struct ocxl_fn *fn = container_of(dev, struct ocxl_fn, dev); + + free_function(fn); +} + +static int set_function_device(struct ocxl_fn *fn, struct pci_dev *dev) +{ + fn->dev.parent = &dev->dev; + fn->dev.release = free_function_dev; + return dev_set_name(&fn->dev, "ocxlfn.%s", dev_name(&dev->dev)); +} + +static int assign_function_actag(struct ocxl_fn *fn) +{ + struct pci_dev *dev = to_pci_dev(fn->dev.parent); + u16 base, enabled, supported; + int rc; + + rc = ocxl_config_get_actag_info(dev, &base, &enabled, &supported); + if (rc) + return rc; + + fn->actag_base = base; + fn->actag_enabled = enabled; + fn->actag_supported = supported; + + ocxl_config_set_actag(dev, fn->config.dvsec_function_pos, + fn->actag_base, fn->actag_enabled); + dev_dbg(&fn->dev, "actag range starting at %d, enabled %d\n", + fn->actag_base, fn->actag_enabled); + return 0; +} + +static int set_function_pasid(struct ocxl_fn *fn) +{ + struct pci_dev *dev = to_pci_dev(fn->dev.parent); + int rc, desired_count, max_count; + + /* A function may not require any PASID */ + if (fn->config.max_pasid_log < 0) + return 0; + + rc = ocxl_config_get_pasid_info(dev, &max_count); + if (rc) + return rc; + + desired_count = 1 << fn->config.max_pasid_log; + + if (desired_count > max_count) { + dev_err(&fn->dev, + "Function requires more PASIDs than is available (%d vs. %d)\n", + desired_count, max_count); + return -ENOSPC; + } + + fn->pasid_base = 0; + return 0; +} + +static int configure_function(struct ocxl_fn *fn, struct pci_dev *dev) +{ + int rc; + + rc = pci_enable_device(dev); + if (rc) { + dev_err(&dev->dev, "pci_enable_device failed: %d\n", rc); + return rc; + } + + /* + * Once it has been confirmed to work on our hardware, we + * should reset the function, to force the adapter to restart + * from scratch. + * A function reset would also reset all its AFUs. + * + * Some hints for implementation: + * + * - there's not status bit to know when the reset is done. We + * should try reading the config space to know when it's + * done. + * - probably something like: + * Reset + * wait 100ms + * issue config read + * allow device up to 1 sec to return success on config + * read before declaring it broken + * + * Some shared logic on the card (CFG, TLX) won't be reset, so + * there's no guarantee that it will be enough. + */ + rc = ocxl_config_read_function(dev, &fn->config); + if (rc) + return rc; + + rc = set_function_device(fn, dev); + if (rc) + return rc; + + rc = assign_function_actag(fn); + if (rc) + return rc; + + rc = set_function_pasid(fn); + if (rc) + return rc; + + rc = ocxl_link_setup(dev, 0, &fn->link); + if (rc) + return rc; + + rc = ocxl_config_set_TL(dev, fn->config.dvsec_tl_pos); + if (rc) { + ocxl_link_release(dev, fn->link); + return rc; + } + return 0; +} + +static void deconfigure_function(struct ocxl_fn *fn) +{ + struct pci_dev *dev = to_pci_dev(fn->dev.parent); + + ocxl_link_release(dev, fn->link); + pci_disable_device(dev); +} + +static struct ocxl_fn *init_function(struct pci_dev *dev) +{ + struct ocxl_fn *fn; + int rc; + + fn = alloc_function(); + if (!fn) + return ERR_PTR(-ENOMEM); + + rc = configure_function(fn, dev); + if (rc) { + free_function(fn); + return ERR_PTR(rc); + } + + rc = device_register(&fn->dev); + if (rc) { + deconfigure_function(fn); + put_device(&fn->dev); + return ERR_PTR(rc); + } + return fn; +} + +// Device detection & initialisation + +struct ocxl_fn *ocxl_function_open(struct pci_dev *dev) +{ + int rc, afu_count = 0; + u8 afu; + struct ocxl_fn *fn; + + if (!radix_enabled()) { + dev_err(&dev->dev, "Unsupported memory model (hash)\n"); + return ERR_PTR(-ENODEV); + } + + fn = init_function(dev); + if (IS_ERR(fn)) { + dev_err(&dev->dev, "function init failed: %li\n", + PTR_ERR(fn)); + return fn; + } + + for (afu = 0; afu <= fn->config.max_afu_index; afu++) { + rc = ocxl_config_check_afu_index(dev, &fn->config, afu); + if (rc > 0) { + rc = init_afu(dev, fn, afu); + if (rc) { + dev_err(&dev->dev, + "Can't initialize AFU index %d\n", afu); + continue; + } + afu_count++; + } + } + dev_info(&dev->dev, "%d AFU(s) configured\n", afu_count); + return fn; +} +EXPORT_SYMBOL_GPL(ocxl_function_open); + +struct list_head *ocxl_function_afu_list(struct ocxl_fn *fn) +{ + return &fn->afu_list; +} +EXPORT_SYMBOL_GPL(ocxl_function_afu_list); + +struct ocxl_afu *ocxl_function_fetch_afu(struct ocxl_fn *fn, u8 afu_idx) +{ + struct ocxl_afu *afu; + + list_for_each_entry(afu, &fn->afu_list, list) { + if (afu->config.idx == afu_idx) + return afu; + } + + return NULL; +} +EXPORT_SYMBOL_GPL(ocxl_function_fetch_afu); + +const struct ocxl_fn_config *ocxl_function_config(struct ocxl_fn *fn) +{ + return &fn->config; +} +EXPORT_SYMBOL_GPL(ocxl_function_config); + +void ocxl_function_close(struct ocxl_fn *fn) +{ + struct ocxl_afu *afu, *tmp; + + list_for_each_entry_safe(afu, tmp, &fn->afu_list, list) { + remove_afu(afu); + } + + deconfigure_function(fn); + device_unregister(&fn->dev); +} +EXPORT_SYMBOL_GPL(ocxl_function_close); + +// AFU Metadata + +struct ocxl_afu_config *ocxl_afu_config(struct ocxl_afu *afu) +{ + return &afu->config; +} +EXPORT_SYMBOL_GPL(ocxl_afu_config); + +void ocxl_afu_set_private(struct ocxl_afu *afu, void *private) +{ + afu->private = private; +} +EXPORT_SYMBOL_GPL(ocxl_afu_set_private); + +void *ocxl_afu_get_private(struct ocxl_afu *afu) +{ + if (afu) + return afu->private; + + return NULL; +} +EXPORT_SYMBOL_GPL(ocxl_afu_get_private); diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c new file mode 100644 index 000000000..524ded879 --- /dev/null +++ b/drivers/misc/ocxl/file.c @@ -0,0 +1,623 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright 2017 IBM Corp. +#include <linux/fs.h> +#include <linux/poll.h> +#include <linux/sched/signal.h> +#include <linux/eventfd.h> +#include <linux/uaccess.h> +#include <uapi/misc/ocxl.h> +#include <asm/reg.h> +#include <asm/switch_to.h> +#include "ocxl_internal.h" + + +#define OCXL_NUM_MINORS 256 /* Total to reserve */ + +static dev_t ocxl_dev; +static struct class *ocxl_class; +static struct mutex minors_idr_lock; +static struct idr minors_idr; + +static struct ocxl_file_info *find_and_get_file_info(dev_t devno) +{ + struct ocxl_file_info *info; + + mutex_lock(&minors_idr_lock); + info = idr_find(&minors_idr, MINOR(devno)); + if (info) + get_device(&info->dev); + mutex_unlock(&minors_idr_lock); + return info; +} + +static int allocate_minor(struct ocxl_file_info *info) +{ + int minor; + + mutex_lock(&minors_idr_lock); + minor = idr_alloc(&minors_idr, info, 0, OCXL_NUM_MINORS, GFP_KERNEL); + mutex_unlock(&minors_idr_lock); + return minor; +} + +static void free_minor(struct ocxl_file_info *info) +{ + mutex_lock(&minors_idr_lock); + idr_remove(&minors_idr, MINOR(info->dev.devt)); + mutex_unlock(&minors_idr_lock); +} + +static int afu_open(struct inode *inode, struct file *file) +{ + struct ocxl_file_info *info; + struct ocxl_context *ctx; + int rc; + + pr_debug("%s for device %x\n", __func__, inode->i_rdev); + + info = find_and_get_file_info(inode->i_rdev); + if (!info) + return -ENODEV; + + rc = ocxl_context_alloc(&ctx, info->afu, inode->i_mapping); + if (rc) { + put_device(&info->dev); + return rc; + } + put_device(&info->dev); + file->private_data = ctx; + return 0; +} + +static long afu_ioctl_attach(struct ocxl_context *ctx, + struct ocxl_ioctl_attach __user *uarg) +{ + struct ocxl_ioctl_attach arg; + u64 amr = 0; + int rc; + + pr_debug("%s for context %d\n", __func__, ctx->pasid); + + if (copy_from_user(&arg, uarg, sizeof(arg))) + return -EFAULT; + + /* Make sure reserved fields are not set for forward compatibility */ + if (arg.reserved1 || arg.reserved2 || arg.reserved3) + return -EINVAL; + + amr = arg.amr & mfspr(SPRN_UAMOR); + rc = ocxl_context_attach(ctx, amr, current->mm); + return rc; +} + +static long afu_ioctl_get_metadata(struct ocxl_context *ctx, + struct ocxl_ioctl_metadata __user *uarg) +{ + struct ocxl_ioctl_metadata arg; + + memset(&arg, 0, sizeof(arg)); + + arg.version = 0; + + arg.afu_version_major = ctx->afu->config.version_major; + arg.afu_version_minor = ctx->afu->config.version_minor; + arg.pasid = ctx->pasid; + arg.pp_mmio_size = ctx->afu->config.pp_mmio_stride; + arg.global_mmio_size = ctx->afu->config.global_mmio_size; + + if (copy_to_user(uarg, &arg, sizeof(arg))) + return -EFAULT; + + return 0; +} + +#ifdef CONFIG_PPC64 +static long afu_ioctl_enable_p9_wait(struct ocxl_context *ctx, + struct ocxl_ioctl_p9_wait __user *uarg) +{ + struct ocxl_ioctl_p9_wait arg; + + memset(&arg, 0, sizeof(arg)); + + if (cpu_has_feature(CPU_FTR_P9_TIDR)) { + enum ocxl_context_status status; + + // Locks both status & tidr + mutex_lock(&ctx->status_mutex); + if (!ctx->tidr) { + if (set_thread_tidr(current)) { + mutex_unlock(&ctx->status_mutex); + return -ENOENT; + } + + ctx->tidr = current->thread.tidr; + } + + status = ctx->status; + mutex_unlock(&ctx->status_mutex); + + if (status == ATTACHED) { + int rc = ocxl_link_update_pe(ctx->afu->fn->link, + ctx->pasid, ctx->tidr); + + if (rc) + return rc; + } + + arg.thread_id = ctx->tidr; + } else + return -ENOENT; + + if (copy_to_user(uarg, &arg, sizeof(arg))) + return -EFAULT; + + return 0; +} +#endif + + +static long afu_ioctl_get_features(struct ocxl_context *ctx, + struct ocxl_ioctl_features __user *uarg) +{ + struct ocxl_ioctl_features arg; + + memset(&arg, 0, sizeof(arg)); + +#ifdef CONFIG_PPC64 + if (cpu_has_feature(CPU_FTR_P9_TIDR)) + arg.flags[0] |= OCXL_IOCTL_FEATURES_FLAGS0_P9_WAIT; +#endif + + if (copy_to_user(uarg, &arg, sizeof(arg))) + return -EFAULT; + + return 0; +} + +#define CMD_STR(x) (x == OCXL_IOCTL_ATTACH ? "ATTACH" : \ + x == OCXL_IOCTL_IRQ_ALLOC ? "IRQ_ALLOC" : \ + x == OCXL_IOCTL_IRQ_FREE ? "IRQ_FREE" : \ + x == OCXL_IOCTL_IRQ_SET_FD ? "IRQ_SET_FD" : \ + x == OCXL_IOCTL_GET_METADATA ? "GET_METADATA" : \ + x == OCXL_IOCTL_ENABLE_P9_WAIT ? "ENABLE_P9_WAIT" : \ + x == OCXL_IOCTL_GET_FEATURES ? "GET_FEATURES" : \ + "UNKNOWN") + +static irqreturn_t irq_handler(void *private) +{ + struct eventfd_ctx *ev_ctx = private; + + eventfd_signal(ev_ctx, 1); + return IRQ_HANDLED; +} + +static void irq_free(void *private) +{ + struct eventfd_ctx *ev_ctx = private; + + eventfd_ctx_put(ev_ctx); +} + +static long afu_ioctl(struct file *file, unsigned int cmd, + unsigned long args) +{ + struct ocxl_context *ctx = file->private_data; + struct ocxl_ioctl_irq_fd irq_fd; + struct eventfd_ctx *ev_ctx; + int irq_id; + u64 irq_offset; + long rc; + bool closed; + + pr_debug("%s for context %d, command %s\n", __func__, ctx->pasid, + CMD_STR(cmd)); + + mutex_lock(&ctx->status_mutex); + closed = (ctx->status == CLOSED); + mutex_unlock(&ctx->status_mutex); + + if (closed) + return -EIO; + + switch (cmd) { + case OCXL_IOCTL_ATTACH: + rc = afu_ioctl_attach(ctx, + (struct ocxl_ioctl_attach __user *) args); + break; + + case OCXL_IOCTL_IRQ_ALLOC: + rc = ocxl_afu_irq_alloc(ctx, &irq_id); + if (!rc) { + irq_offset = ocxl_irq_id_to_offset(ctx, irq_id); + rc = copy_to_user((u64 __user *) args, &irq_offset, + sizeof(irq_offset)); + if (rc) { + ocxl_afu_irq_free(ctx, irq_id); + return -EFAULT; + } + } + break; + + case OCXL_IOCTL_IRQ_FREE: + rc = copy_from_user(&irq_offset, (u64 __user *) args, + sizeof(irq_offset)); + if (rc) + return -EFAULT; + irq_id = ocxl_irq_offset_to_id(ctx, irq_offset); + rc = ocxl_afu_irq_free(ctx, irq_id); + break; + + case OCXL_IOCTL_IRQ_SET_FD: + rc = copy_from_user(&irq_fd, (u64 __user *) args, + sizeof(irq_fd)); + if (rc) + return -EFAULT; + if (irq_fd.reserved) + return -EINVAL; + irq_id = ocxl_irq_offset_to_id(ctx, irq_fd.irq_offset); + ev_ctx = eventfd_ctx_fdget(irq_fd.eventfd); + if (IS_ERR(ev_ctx)) + return PTR_ERR(ev_ctx); + rc = ocxl_irq_set_handler(ctx, irq_id, irq_handler, irq_free, ev_ctx); + if (rc) + eventfd_ctx_put(ev_ctx); + break; + + case OCXL_IOCTL_GET_METADATA: + rc = afu_ioctl_get_metadata(ctx, + (struct ocxl_ioctl_metadata __user *) args); + break; + +#ifdef CONFIG_PPC64 + case OCXL_IOCTL_ENABLE_P9_WAIT: + rc = afu_ioctl_enable_p9_wait(ctx, + (struct ocxl_ioctl_p9_wait __user *) args); + break; +#endif + + case OCXL_IOCTL_GET_FEATURES: + rc = afu_ioctl_get_features(ctx, + (struct ocxl_ioctl_features __user *) args); + break; + + default: + rc = -EINVAL; + } + return rc; +} + +static long afu_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long args) +{ + return afu_ioctl(file, cmd, args); +} + +static int afu_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct ocxl_context *ctx = file->private_data; + + pr_debug("%s for context %d\n", __func__, ctx->pasid); + return ocxl_context_mmap(ctx, vma); +} + +static bool has_xsl_error(struct ocxl_context *ctx) +{ + bool ret; + + mutex_lock(&ctx->xsl_error_lock); + ret = !!ctx->xsl_error.addr; + mutex_unlock(&ctx->xsl_error_lock); + + return ret; +} + +/* + * Are there any events pending on the AFU + * ctx: The AFU context + * Returns: true if there are events pending + */ +static bool afu_events_pending(struct ocxl_context *ctx) +{ + if (has_xsl_error(ctx)) + return true; + return false; +} + +static unsigned int afu_poll(struct file *file, struct poll_table_struct *wait) +{ + struct ocxl_context *ctx = file->private_data; + unsigned int mask = 0; + bool closed; + + pr_debug("%s for context %d\n", __func__, ctx->pasid); + + poll_wait(file, &ctx->events_wq, wait); + + mutex_lock(&ctx->status_mutex); + closed = (ctx->status == CLOSED); + mutex_unlock(&ctx->status_mutex); + + if (afu_events_pending(ctx)) + mask = EPOLLIN | EPOLLRDNORM; + else if (closed) + mask = EPOLLERR; + + return mask; +} + +/* + * Populate the supplied buffer with a single XSL error + * ctx: The AFU context to report the error from + * header: the event header to populate + * buf: The buffer to write the body into (should be at least + * AFU_EVENT_BODY_XSL_ERROR_SIZE) + * Return: the amount of buffer that was populated + */ +static ssize_t append_xsl_error(struct ocxl_context *ctx, + struct ocxl_kernel_event_header *header, + char __user *buf) +{ + struct ocxl_kernel_event_xsl_fault_error body; + + memset(&body, 0, sizeof(body)); + + mutex_lock(&ctx->xsl_error_lock); + if (!ctx->xsl_error.addr) { + mutex_unlock(&ctx->xsl_error_lock); + return 0; + } + + body.addr = ctx->xsl_error.addr; + body.dsisr = ctx->xsl_error.dsisr; + body.count = ctx->xsl_error.count; + + ctx->xsl_error.addr = 0; + ctx->xsl_error.dsisr = 0; + ctx->xsl_error.count = 0; + + mutex_unlock(&ctx->xsl_error_lock); + + header->type = OCXL_AFU_EVENT_XSL_FAULT_ERROR; + + if (copy_to_user(buf, &body, sizeof(body))) + return -EFAULT; + + return sizeof(body); +} + +#define AFU_EVENT_BODY_MAX_SIZE sizeof(struct ocxl_kernel_event_xsl_fault_error) + +/* + * Reports events on the AFU + * Format: + * Header (struct ocxl_kernel_event_header) + * Body (struct ocxl_kernel_event_*) + * Header... + */ +static ssize_t afu_read(struct file *file, char __user *buf, size_t count, + loff_t *off) +{ + struct ocxl_context *ctx = file->private_data; + struct ocxl_kernel_event_header header; + ssize_t rc; + ssize_t used = 0; + DEFINE_WAIT(event_wait); + + memset(&header, 0, sizeof(header)); + + /* Require offset to be 0 */ + if (*off != 0) + return -EINVAL; + + if (count < (sizeof(struct ocxl_kernel_event_header) + + AFU_EVENT_BODY_MAX_SIZE)) + return -EINVAL; + + for (;;) { + prepare_to_wait(&ctx->events_wq, &event_wait, + TASK_INTERRUPTIBLE); + + if (afu_events_pending(ctx)) + break; + + if (ctx->status == CLOSED) + break; + + if (file->f_flags & O_NONBLOCK) { + finish_wait(&ctx->events_wq, &event_wait); + return -EAGAIN; + } + + if (signal_pending(current)) { + finish_wait(&ctx->events_wq, &event_wait); + return -ERESTARTSYS; + } + + schedule(); + } + + finish_wait(&ctx->events_wq, &event_wait); + + if (has_xsl_error(ctx)) { + used = append_xsl_error(ctx, &header, buf + sizeof(header)); + if (used < 0) + return used; + } + + if (!afu_events_pending(ctx)) + header.flags |= OCXL_KERNEL_EVENT_FLAG_LAST; + + if (copy_to_user(buf, &header, sizeof(header))) + return -EFAULT; + + used += sizeof(header); + + rc = used; + return rc; +} + +static int afu_release(struct inode *inode, struct file *file) +{ + struct ocxl_context *ctx = file->private_data; + int rc; + + pr_debug("%s for device %x\n", __func__, inode->i_rdev); + rc = ocxl_context_detach(ctx); + mutex_lock(&ctx->mapping_lock); + ctx->mapping = NULL; + mutex_unlock(&ctx->mapping_lock); + wake_up_all(&ctx->events_wq); + if (rc != -EBUSY) + ocxl_context_free(ctx); + return 0; +} + +static const struct file_operations ocxl_afu_fops = { + .owner = THIS_MODULE, + .open = afu_open, + .unlocked_ioctl = afu_ioctl, + .compat_ioctl = afu_compat_ioctl, + .mmap = afu_mmap, + .poll = afu_poll, + .read = afu_read, + .release = afu_release, +}; + +// Free the info struct +static void info_release(struct device *dev) +{ + struct ocxl_file_info *info = container_of(dev, struct ocxl_file_info, dev); + + ocxl_afu_put(info->afu); + kfree(info); +} + +static int ocxl_file_make_visible(struct ocxl_file_info *info) +{ + int rc; + + cdev_init(&info->cdev, &ocxl_afu_fops); + rc = cdev_add(&info->cdev, info->dev.devt, 1); + if (rc) { + dev_err(&info->dev, "Unable to add afu char device: %d\n", rc); + return rc; + } + + return 0; +} + +static void ocxl_file_make_invisible(struct ocxl_file_info *info) +{ + cdev_del(&info->cdev); +} + +int ocxl_file_register_afu(struct ocxl_afu *afu) +{ + int minor; + int rc; + struct ocxl_file_info *info; + struct ocxl_fn *fn = afu->fn; + struct pci_dev *pci_dev = to_pci_dev(fn->dev.parent); + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (info == NULL) + return -ENOMEM; + + minor = allocate_minor(info); + if (minor < 0) { + kfree(info); + return minor; + } + + info->dev.parent = &fn->dev; + info->dev.devt = MKDEV(MAJOR(ocxl_dev), minor); + info->dev.class = ocxl_class; + info->dev.release = info_release; + + info->afu = afu; + ocxl_afu_get(afu); + + rc = dev_set_name(&info->dev, "%s.%s.%hhu", + afu->config.name, dev_name(&pci_dev->dev), afu->config.idx); + if (rc) + goto err_put; + + rc = device_register(&info->dev); + if (rc) { + free_minor(info); + put_device(&info->dev); + return rc; + } + + rc = ocxl_sysfs_register_afu(info); + if (rc) + goto err_unregister; + + rc = ocxl_file_make_visible(info); + if (rc) + goto err_unregister; + + ocxl_afu_set_private(afu, info); + + return 0; + +err_unregister: + ocxl_sysfs_unregister_afu(info); // safe to call even if register failed + free_minor(info); + device_unregister(&info->dev); + return rc; +err_put: + ocxl_afu_put(afu); + free_minor(info); + kfree(info); + return rc; +} + +void ocxl_file_unregister_afu(struct ocxl_afu *afu) +{ + struct ocxl_file_info *info = ocxl_afu_get_private(afu); + + if (!info) + return; + + ocxl_file_make_invisible(info); + ocxl_sysfs_unregister_afu(info); + free_minor(info); + device_unregister(&info->dev); +} + +static char *ocxl_devnode(struct device *dev, umode_t *mode) +{ + return kasprintf(GFP_KERNEL, "ocxl/%s", dev_name(dev)); +} + +int ocxl_file_init(void) +{ + int rc; + + mutex_init(&minors_idr_lock); + idr_init(&minors_idr); + + rc = alloc_chrdev_region(&ocxl_dev, 0, OCXL_NUM_MINORS, "ocxl"); + if (rc) { + pr_err("Unable to allocate ocxl major number: %d\n", rc); + return rc; + } + + ocxl_class = class_create(THIS_MODULE, "ocxl"); + if (IS_ERR(ocxl_class)) { + pr_err("Unable to create ocxl class\n"); + unregister_chrdev_region(ocxl_dev, OCXL_NUM_MINORS); + return PTR_ERR(ocxl_class); + } + + ocxl_class->devnode = ocxl_devnode; + return 0; +} + +void ocxl_file_exit(void) +{ + class_destroy(ocxl_class); + unregister_chrdev_region(ocxl_dev, OCXL_NUM_MINORS); + idr_destroy(&minors_idr); +} diff --git a/drivers/misc/ocxl/link.c b/drivers/misc/ocxl/link.c new file mode 100644 index 000000000..fd73d3bc0 --- /dev/null +++ b/drivers/misc/ocxl/link.c @@ -0,0 +1,712 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright 2017 IBM Corp. +#include <linux/sched/mm.h> +#include <linux/mutex.h> +#include <linux/mm_types.h> +#include <linux/mmu_context.h> +#include <asm/copro.h> +#include <asm/pnv-ocxl.h> +#include <asm/xive.h> +#include <misc/ocxl.h> +#include "ocxl_internal.h" +#include "trace.h" + + +#define SPA_PASID_BITS 15 +#define SPA_PASID_MAX ((1 << SPA_PASID_BITS) - 1) +#define SPA_PE_MASK SPA_PASID_MAX +#define SPA_SPA_SIZE_LOG 22 /* Each SPA is 4 Mb */ + +#define SPA_CFG_SF (1ull << (63-0)) +#define SPA_CFG_TA (1ull << (63-1)) +#define SPA_CFG_HV (1ull << (63-3)) +#define SPA_CFG_UV (1ull << (63-4)) +#define SPA_CFG_XLAT_hpt (0ull << (63-6)) /* Hashed page table (HPT) mode */ +#define SPA_CFG_XLAT_roh (2ull << (63-6)) /* Radix on HPT mode */ +#define SPA_CFG_XLAT_ror (3ull << (63-6)) /* Radix on Radix mode */ +#define SPA_CFG_PR (1ull << (63-49)) +#define SPA_CFG_TC (1ull << (63-54)) +#define SPA_CFG_DR (1ull << (63-59)) + +#define SPA_XSL_TF (1ull << (63-3)) /* Translation fault */ +#define SPA_XSL_S (1ull << (63-38)) /* Store operation */ + +#define SPA_PE_VALID 0x80000000 + + +struct pe_data { + struct mm_struct *mm; + /* callback to trigger when a translation fault occurs */ + void (*xsl_err_cb)(void *data, u64 addr, u64 dsisr); + /* opaque pointer to be passed to the above callback */ + void *xsl_err_data; + struct rcu_head rcu; +}; + +struct spa { + struct ocxl_process_element *spa_mem; + int spa_order; + struct mutex spa_lock; + struct radix_tree_root pe_tree; /* Maps PE handles to pe_data */ + char *irq_name; + int virq; + void __iomem *reg_dsisr; + void __iomem *reg_dar; + void __iomem *reg_tfc; + void __iomem *reg_pe_handle; + /* + * The following field are used by the memory fault + * interrupt handler. We can only have one interrupt at a + * time. The NPU won't raise another interrupt until the + * previous one has been ack'd by writing to the TFC register + */ + struct xsl_fault { + struct work_struct fault_work; + u64 pe; + u64 dsisr; + u64 dar; + struct pe_data pe_data; + } xsl_fault; +}; + +/* + * A opencapi link can be used be by several PCI functions. We have + * one link per device slot. + * + * A linked list of opencapi links should suffice, as there's a + * limited number of opencapi slots on a system and lookup is only + * done when the device is probed + */ +struct ocxl_link { + struct list_head list; + struct kref ref; + int domain; + int bus; + int dev; + atomic_t irq_available; + struct spa *spa; + void *platform_data; +}; +static struct list_head links_list = LIST_HEAD_INIT(links_list); +static DEFINE_MUTEX(links_list_lock); + +enum xsl_response { + CONTINUE, + ADDRESS_ERROR, + RESTART, +}; + + +static void read_irq(struct spa *spa, u64 *dsisr, u64 *dar, u64 *pe) +{ + u64 reg; + + *dsisr = in_be64(spa->reg_dsisr); + *dar = in_be64(spa->reg_dar); + reg = in_be64(spa->reg_pe_handle); + *pe = reg & SPA_PE_MASK; +} + +static void ack_irq(struct spa *spa, enum xsl_response r) +{ + u64 reg = 0; + + /* continue is not supported */ + if (r == RESTART) + reg = PPC_BIT(31); + else if (r == ADDRESS_ERROR) + reg = PPC_BIT(30); + else + WARN(1, "Invalid irq response %d\n", r); + + if (reg) { + trace_ocxl_fault_ack(spa->spa_mem, spa->xsl_fault.pe, + spa->xsl_fault.dsisr, spa->xsl_fault.dar, reg); + out_be64(spa->reg_tfc, reg); + } +} + +static void xsl_fault_handler_bh(struct work_struct *fault_work) +{ + vm_fault_t flt = 0; + unsigned long access, flags, inv_flags = 0; + enum xsl_response r; + struct xsl_fault *fault = container_of(fault_work, struct xsl_fault, + fault_work); + struct spa *spa = container_of(fault, struct spa, xsl_fault); + + int rc; + + /* + * We must release a reference on mm_users whenever exiting this + * function (taken in the memory fault interrupt handler) + */ + rc = copro_handle_mm_fault(fault->pe_data.mm, fault->dar, fault->dsisr, + &flt); + if (rc) { + pr_debug("copro_handle_mm_fault failed: %d\n", rc); + if (fault->pe_data.xsl_err_cb) { + fault->pe_data.xsl_err_cb( + fault->pe_data.xsl_err_data, + fault->dar, fault->dsisr); + } + r = ADDRESS_ERROR; + goto ack; + } + + if (!radix_enabled()) { + /* + * update_mmu_cache() will not have loaded the hash + * since current->trap is not a 0x400 or 0x300, so + * just call hash_page_mm() here. + */ + access = _PAGE_PRESENT | _PAGE_READ; + if (fault->dsisr & SPA_XSL_S) + access |= _PAGE_WRITE; + + if (get_region_id(fault->dar) != USER_REGION_ID) + access |= _PAGE_PRIVILEGED; + + local_irq_save(flags); + hash_page_mm(fault->pe_data.mm, fault->dar, access, 0x300, + inv_flags); + local_irq_restore(flags); + } + r = RESTART; +ack: + mmput(fault->pe_data.mm); + ack_irq(spa, r); +} + +static irqreturn_t xsl_fault_handler(int irq, void *data) +{ + struct ocxl_link *link = (struct ocxl_link *) data; + struct spa *spa = link->spa; + u64 dsisr, dar, pe_handle; + struct pe_data *pe_data; + struct ocxl_process_element *pe; + int pid; + bool schedule = false; + + read_irq(spa, &dsisr, &dar, &pe_handle); + trace_ocxl_fault(spa->spa_mem, pe_handle, dsisr, dar, -1); + + WARN_ON(pe_handle > SPA_PE_MASK); + pe = spa->spa_mem + pe_handle; + pid = be32_to_cpu(pe->pid); + /* We could be reading all null values here if the PE is being + * removed while an interrupt kicks in. It's not supposed to + * happen if the driver notified the AFU to terminate the + * PASID, and the AFU waited for pending operations before + * acknowledging. But even if it happens, we won't find a + * memory context below and fail silently, so it should be ok. + */ + if (!(dsisr & SPA_XSL_TF)) { + WARN(1, "Invalid xsl interrupt fault register %#llx\n", dsisr); + ack_irq(spa, ADDRESS_ERROR); + return IRQ_HANDLED; + } + + rcu_read_lock(); + pe_data = radix_tree_lookup(&spa->pe_tree, pe_handle); + if (!pe_data) { + /* + * Could only happen if the driver didn't notify the + * AFU about PASID termination before removing the PE, + * or the AFU didn't wait for all memory access to + * have completed. + * + * Either way, we fail early, but we shouldn't log an + * error message, as it is a valid (if unexpected) + * scenario + */ + rcu_read_unlock(); + pr_debug("Unknown mm context for xsl interrupt\n"); + ack_irq(spa, ADDRESS_ERROR); + return IRQ_HANDLED; + } + + if (!pe_data->mm) { + /* + * translation fault from a kernel context - an OpenCAPI + * device tried to access a bad kernel address + */ + rcu_read_unlock(); + pr_warn("Unresolved OpenCAPI xsl fault in kernel context\n"); + ack_irq(spa, ADDRESS_ERROR); + return IRQ_HANDLED; + } + WARN_ON(pe_data->mm->context.id != pid); + + if (mmget_not_zero(pe_data->mm)) { + spa->xsl_fault.pe = pe_handle; + spa->xsl_fault.dar = dar; + spa->xsl_fault.dsisr = dsisr; + spa->xsl_fault.pe_data = *pe_data; + schedule = true; + /* mm_users count released by bottom half */ + } + rcu_read_unlock(); + if (schedule) + schedule_work(&spa->xsl_fault.fault_work); + else + ack_irq(spa, ADDRESS_ERROR); + return IRQ_HANDLED; +} + +static void unmap_irq_registers(struct spa *spa) +{ + pnv_ocxl_unmap_xsl_regs(spa->reg_dsisr, spa->reg_dar, spa->reg_tfc, + spa->reg_pe_handle); +} + +static int map_irq_registers(struct pci_dev *dev, struct spa *spa) +{ + return pnv_ocxl_map_xsl_regs(dev, &spa->reg_dsisr, &spa->reg_dar, + &spa->reg_tfc, &spa->reg_pe_handle); +} + +static int setup_xsl_irq(struct pci_dev *dev, struct ocxl_link *link) +{ + struct spa *spa = link->spa; + int rc; + int hwirq; + + rc = pnv_ocxl_get_xsl_irq(dev, &hwirq); + if (rc) + return rc; + + rc = map_irq_registers(dev, spa); + if (rc) + return rc; + + spa->irq_name = kasprintf(GFP_KERNEL, "ocxl-xsl-%x-%x-%x", + link->domain, link->bus, link->dev); + if (!spa->irq_name) { + dev_err(&dev->dev, "Can't allocate name for xsl interrupt\n"); + rc = -ENOMEM; + goto err_xsl; + } + /* + * At some point, we'll need to look into allowing a higher + * number of interrupts. Could we have an IRQ domain per link? + */ + spa->virq = irq_create_mapping(NULL, hwirq); + if (!spa->virq) { + dev_err(&dev->dev, + "irq_create_mapping failed for translation interrupt\n"); + rc = -EINVAL; + goto err_name; + } + + dev_dbg(&dev->dev, "hwirq %d mapped to virq %d\n", hwirq, spa->virq); + + rc = request_irq(spa->virq, xsl_fault_handler, 0, spa->irq_name, + link); + if (rc) { + dev_err(&dev->dev, + "request_irq failed for translation interrupt: %d\n", + rc); + rc = -EINVAL; + goto err_mapping; + } + return 0; + +err_mapping: + irq_dispose_mapping(spa->virq); +err_name: + kfree(spa->irq_name); +err_xsl: + unmap_irq_registers(spa); + return rc; +} + +static void release_xsl_irq(struct ocxl_link *link) +{ + struct spa *spa = link->spa; + + if (spa->virq) { + free_irq(spa->virq, link); + irq_dispose_mapping(spa->virq); + } + kfree(spa->irq_name); + unmap_irq_registers(spa); +} + +static int alloc_spa(struct pci_dev *dev, struct ocxl_link *link) +{ + struct spa *spa; + + spa = kzalloc(sizeof(struct spa), GFP_KERNEL); + if (!spa) + return -ENOMEM; + + mutex_init(&spa->spa_lock); + INIT_RADIX_TREE(&spa->pe_tree, GFP_KERNEL); + INIT_WORK(&spa->xsl_fault.fault_work, xsl_fault_handler_bh); + + spa->spa_order = SPA_SPA_SIZE_LOG - PAGE_SHIFT; + spa->spa_mem = (struct ocxl_process_element *) + __get_free_pages(GFP_KERNEL | __GFP_ZERO, spa->spa_order); + if (!spa->spa_mem) { + dev_err(&dev->dev, "Can't allocate Shared Process Area\n"); + kfree(spa); + return -ENOMEM; + } + pr_debug("Allocated SPA for %x:%x:%x at %p\n", link->domain, link->bus, + link->dev, spa->spa_mem); + + link->spa = spa; + return 0; +} + +static void free_spa(struct ocxl_link *link) +{ + struct spa *spa = link->spa; + + pr_debug("Freeing SPA for %x:%x:%x\n", link->domain, link->bus, + link->dev); + + if (spa && spa->spa_mem) { + free_pages((unsigned long) spa->spa_mem, spa->spa_order); + kfree(spa); + link->spa = NULL; + } +} + +static int alloc_link(struct pci_dev *dev, int PE_mask, struct ocxl_link **out_link) +{ + struct ocxl_link *link; + int rc; + + link = kzalloc(sizeof(struct ocxl_link), GFP_KERNEL); + if (!link) + return -ENOMEM; + + kref_init(&link->ref); + link->domain = pci_domain_nr(dev->bus); + link->bus = dev->bus->number; + link->dev = PCI_SLOT(dev->devfn); + atomic_set(&link->irq_available, MAX_IRQ_PER_LINK); + + rc = alloc_spa(dev, link); + if (rc) + goto err_free; + + rc = setup_xsl_irq(dev, link); + if (rc) + goto err_spa; + + /* platform specific hook */ + rc = pnv_ocxl_spa_setup(dev, link->spa->spa_mem, PE_mask, + &link->platform_data); + if (rc) + goto err_xsl_irq; + + *out_link = link; + return 0; + +err_xsl_irq: + release_xsl_irq(link); +err_spa: + free_spa(link); +err_free: + kfree(link); + return rc; +} + +static void free_link(struct ocxl_link *link) +{ + release_xsl_irq(link); + free_spa(link); + kfree(link); +} + +int ocxl_link_setup(struct pci_dev *dev, int PE_mask, void **link_handle) +{ + int rc = 0; + struct ocxl_link *link; + + mutex_lock(&links_list_lock); + list_for_each_entry(link, &links_list, list) { + /* The functions of a device all share the same link */ + if (link->domain == pci_domain_nr(dev->bus) && + link->bus == dev->bus->number && + link->dev == PCI_SLOT(dev->devfn)) { + kref_get(&link->ref); + *link_handle = link; + goto unlock; + } + } + rc = alloc_link(dev, PE_mask, &link); + if (rc) + goto unlock; + + list_add(&link->list, &links_list); + *link_handle = link; +unlock: + mutex_unlock(&links_list_lock); + return rc; +} +EXPORT_SYMBOL_GPL(ocxl_link_setup); + +static void release_xsl(struct kref *ref) +{ + struct ocxl_link *link = container_of(ref, struct ocxl_link, ref); + + list_del(&link->list); + /* call platform code before releasing data */ + pnv_ocxl_spa_release(link->platform_data); + free_link(link); +} + +void ocxl_link_release(struct pci_dev *dev, void *link_handle) +{ + struct ocxl_link *link = (struct ocxl_link *) link_handle; + + mutex_lock(&links_list_lock); + kref_put(&link->ref, release_xsl); + mutex_unlock(&links_list_lock); +} +EXPORT_SYMBOL_GPL(ocxl_link_release); + +static u64 calculate_cfg_state(bool kernel) +{ + u64 state; + + state = SPA_CFG_DR; + if (mfspr(SPRN_LPCR) & LPCR_TC) + state |= SPA_CFG_TC; + if (radix_enabled()) + state |= SPA_CFG_XLAT_ror; + else + state |= SPA_CFG_XLAT_hpt; + state |= SPA_CFG_HV; + if (kernel) { + if (mfmsr() & MSR_SF) + state |= SPA_CFG_SF; + } else { + state |= SPA_CFG_PR; + if (!test_tsk_thread_flag(current, TIF_32BIT)) + state |= SPA_CFG_SF; + } + return state; +} + +int ocxl_link_add_pe(void *link_handle, int pasid, u32 pidr, u32 tidr, + u64 amr, struct mm_struct *mm, + void (*xsl_err_cb)(void *data, u64 addr, u64 dsisr), + void *xsl_err_data) +{ + struct ocxl_link *link = (struct ocxl_link *) link_handle; + struct spa *spa = link->spa; + struct ocxl_process_element *pe; + int pe_handle, rc = 0; + struct pe_data *pe_data; + + BUILD_BUG_ON(sizeof(struct ocxl_process_element) != 128); + if (pasid > SPA_PASID_MAX) + return -EINVAL; + + mutex_lock(&spa->spa_lock); + pe_handle = pasid & SPA_PE_MASK; + pe = spa->spa_mem + pe_handle; + + if (pe->software_state) { + rc = -EBUSY; + goto unlock; + } + + pe_data = kmalloc(sizeof(*pe_data), GFP_KERNEL); + if (!pe_data) { + rc = -ENOMEM; + goto unlock; + } + + pe_data->mm = mm; + pe_data->xsl_err_cb = xsl_err_cb; + pe_data->xsl_err_data = xsl_err_data; + + memset(pe, 0, sizeof(struct ocxl_process_element)); + pe->config_state = cpu_to_be64(calculate_cfg_state(pidr == 0)); + pe->lpid = cpu_to_be32(mfspr(SPRN_LPID)); + pe->pid = cpu_to_be32(pidr); + pe->tid = cpu_to_be32(tidr); + pe->amr = cpu_to_be64(amr); + pe->software_state = cpu_to_be32(SPA_PE_VALID); + + /* + * For user contexts, register a copro so that TLBIs are seen + * by the nest MMU. If we have a kernel context, TLBIs are + * already global. + */ + if (mm) + mm_context_add_copro(mm); + /* + * Barrier is to make sure PE is visible in the SPA before it + * is used by the device. It also helps with the global TLBI + * invalidation + */ + mb(); + radix_tree_insert(&spa->pe_tree, pe_handle, pe_data); + + /* + * The mm must stay valid for as long as the device uses it. We + * lower the count when the context is removed from the SPA. + * + * We grab mm_count (and not mm_users), as we don't want to + * end up in a circular dependency if a process mmaps its + * mmio, therefore incrementing the file ref count when + * calling mmap(), and forgets to unmap before exiting. In + * that scenario, when the kernel handles the death of the + * process, the file is not cleaned because unmap was not + * called, and the mm wouldn't be freed because we would still + * have a reference on mm_users. Incrementing mm_count solves + * the problem. + */ + if (mm) + mmgrab(mm); + trace_ocxl_context_add(current->pid, spa->spa_mem, pasid, pidr, tidr); +unlock: + mutex_unlock(&spa->spa_lock); + return rc; +} +EXPORT_SYMBOL_GPL(ocxl_link_add_pe); + +int ocxl_link_update_pe(void *link_handle, int pasid, __u16 tid) +{ + struct ocxl_link *link = (struct ocxl_link *) link_handle; + struct spa *spa = link->spa; + struct ocxl_process_element *pe; + int pe_handle, rc; + + if (pasid > SPA_PASID_MAX) + return -EINVAL; + + pe_handle = pasid & SPA_PE_MASK; + pe = spa->spa_mem + pe_handle; + + mutex_lock(&spa->spa_lock); + + pe->tid = cpu_to_be32(tid); + + /* + * The barrier makes sure the PE is updated + * before we clear the NPU context cache below, so that the + * old PE cannot be reloaded erroneously. + */ + mb(); + + /* + * hook to platform code + * On powerpc, the entry needs to be cleared from the context + * cache of the NPU. + */ + rc = pnv_ocxl_spa_remove_pe_from_cache(link->platform_data, pe_handle); + WARN_ON(rc); + + mutex_unlock(&spa->spa_lock); + return rc; +} + +int ocxl_link_remove_pe(void *link_handle, int pasid) +{ + struct ocxl_link *link = (struct ocxl_link *) link_handle; + struct spa *spa = link->spa; + struct ocxl_process_element *pe; + struct pe_data *pe_data; + int pe_handle, rc; + + if (pasid > SPA_PASID_MAX) + return -EINVAL; + + /* + * About synchronization with our memory fault handler: + * + * Before removing the PE, the driver is supposed to have + * notified the AFU, which should have cleaned up and make + * sure the PASID is no longer in use, including pending + * interrupts. However, there's no way to be sure... + * + * We clear the PE and remove the context from our radix + * tree. From that point on, any new interrupt for that + * context will fail silently, which is ok. As mentioned + * above, that's not expected, but it could happen if the + * driver or AFU didn't do the right thing. + * + * There could still be a bottom half running, but we don't + * need to wait/flush, as it is managing a reference count on + * the mm it reads from the radix tree. + */ + pe_handle = pasid & SPA_PE_MASK; + pe = spa->spa_mem + pe_handle; + + mutex_lock(&spa->spa_lock); + + if (!(be32_to_cpu(pe->software_state) & SPA_PE_VALID)) { + rc = -EINVAL; + goto unlock; + } + + trace_ocxl_context_remove(current->pid, spa->spa_mem, pasid, + be32_to_cpu(pe->pid), be32_to_cpu(pe->tid)); + + memset(pe, 0, sizeof(struct ocxl_process_element)); + /* + * The barrier makes sure the PE is removed from the SPA + * before we clear the NPU context cache below, so that the + * old PE cannot be reloaded erroneously. + */ + mb(); + + /* + * hook to platform code + * On powerpc, the entry needs to be cleared from the context + * cache of the NPU. + */ + rc = pnv_ocxl_spa_remove_pe_from_cache(link->platform_data, pe_handle); + WARN_ON(rc); + + pe_data = radix_tree_delete(&spa->pe_tree, pe_handle); + if (!pe_data) { + WARN(1, "Couldn't find pe data when removing PE\n"); + } else { + if (pe_data->mm) { + mm_context_remove_copro(pe_data->mm); + mmdrop(pe_data->mm); + } + kfree_rcu(pe_data, rcu); + } +unlock: + mutex_unlock(&spa->spa_lock); + return rc; +} +EXPORT_SYMBOL_GPL(ocxl_link_remove_pe); + +int ocxl_link_irq_alloc(void *link_handle, int *hw_irq) +{ + struct ocxl_link *link = (struct ocxl_link *) link_handle; + int irq; + + if (atomic_dec_if_positive(&link->irq_available) < 0) + return -ENOSPC; + + irq = xive_native_alloc_irq(); + if (!irq) { + atomic_inc(&link->irq_available); + return -ENXIO; + } + + *hw_irq = irq; + return 0; +} +EXPORT_SYMBOL_GPL(ocxl_link_irq_alloc); + +void ocxl_link_free_irq(void *link_handle, int hw_irq) +{ + struct ocxl_link *link = (struct ocxl_link *) link_handle; + + xive_native_free_irq(hw_irq); + atomic_inc(&link->irq_available); +} +EXPORT_SYMBOL_GPL(ocxl_link_free_irq); diff --git a/drivers/misc/ocxl/main.c b/drivers/misc/ocxl/main.c new file mode 100644 index 000000000..ef73cf35d --- /dev/null +++ b/drivers/misc/ocxl/main.c @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright 2017 IBM Corp. +#include <linux/module.h> +#include <linux/pci.h> +#include <asm/mmu.h> +#include "ocxl_internal.h" + +static int __init init_ocxl(void) +{ + int rc = 0; + + if (!tlbie_capable) + return -EINVAL; + + rc = ocxl_file_init(); + if (rc) + return rc; + + rc = pci_register_driver(&ocxl_pci_driver); + if (rc) { + ocxl_file_exit(); + return rc; + } + return 0; +} + +static void exit_ocxl(void) +{ + pci_unregister_driver(&ocxl_pci_driver); + ocxl_file_exit(); +} + +module_init(init_ocxl); +module_exit(exit_ocxl); + +MODULE_DESCRIPTION("Open Coherent Accelerator"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/ocxl/mmio.c b/drivers/misc/ocxl/mmio.c new file mode 100644 index 000000000..aae713db4 --- /dev/null +++ b/drivers/misc/ocxl/mmio.c @@ -0,0 +1,234 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright 2019 IBM Corp. +#include <linux/sched/mm.h> +#include "trace.h" +#include "ocxl_internal.h" + +int ocxl_global_mmio_read32(struct ocxl_afu *afu, size_t offset, + enum ocxl_endian endian, u32 *val) +{ + if (offset > afu->config.global_mmio_size - 4) + return -EINVAL; + +#ifdef __BIG_ENDIAN__ + if (endian == OCXL_HOST_ENDIAN) + endian = OCXL_BIG_ENDIAN; +#endif + + switch (endian) { + case OCXL_BIG_ENDIAN: + *val = readl_be((char *)afu->global_mmio_ptr + offset); + break; + + default: + *val = readl((char *)afu->global_mmio_ptr + offset); + break; + } + + return 0; +} +EXPORT_SYMBOL_GPL(ocxl_global_mmio_read32); + +int ocxl_global_mmio_read64(struct ocxl_afu *afu, size_t offset, + enum ocxl_endian endian, u64 *val) +{ + if (offset > afu->config.global_mmio_size - 8) + return -EINVAL; + +#ifdef __BIG_ENDIAN__ + if (endian == OCXL_HOST_ENDIAN) + endian = OCXL_BIG_ENDIAN; +#endif + + switch (endian) { + case OCXL_BIG_ENDIAN: + *val = readq_be((char *)afu->global_mmio_ptr + offset); + break; + + default: + *val = readq((char *)afu->global_mmio_ptr + offset); + break; + } + + return 0; +} +EXPORT_SYMBOL_GPL(ocxl_global_mmio_read64); + +int ocxl_global_mmio_write32(struct ocxl_afu *afu, size_t offset, + enum ocxl_endian endian, u32 val) +{ + if (offset > afu->config.global_mmio_size - 4) + return -EINVAL; + +#ifdef __BIG_ENDIAN__ + if (endian == OCXL_HOST_ENDIAN) + endian = OCXL_BIG_ENDIAN; +#endif + + switch (endian) { + case OCXL_BIG_ENDIAN: + writel_be(val, (char *)afu->global_mmio_ptr + offset); + break; + + default: + writel(val, (char *)afu->global_mmio_ptr + offset); + break; + } + + + return 0; +} +EXPORT_SYMBOL_GPL(ocxl_global_mmio_write32); + +int ocxl_global_mmio_write64(struct ocxl_afu *afu, size_t offset, + enum ocxl_endian endian, u64 val) +{ + if (offset > afu->config.global_mmio_size - 8) + return -EINVAL; + +#ifdef __BIG_ENDIAN__ + if (endian == OCXL_HOST_ENDIAN) + endian = OCXL_BIG_ENDIAN; +#endif + + switch (endian) { + case OCXL_BIG_ENDIAN: + writeq_be(val, (char *)afu->global_mmio_ptr + offset); + break; + + default: + writeq(val, (char *)afu->global_mmio_ptr + offset); + break; + } + + + return 0; +} +EXPORT_SYMBOL_GPL(ocxl_global_mmio_write64); + +int ocxl_global_mmio_set32(struct ocxl_afu *afu, size_t offset, + enum ocxl_endian endian, u32 mask) +{ + u32 tmp; + + if (offset > afu->config.global_mmio_size - 4) + return -EINVAL; + +#ifdef __BIG_ENDIAN__ + if (endian == OCXL_HOST_ENDIAN) + endian = OCXL_BIG_ENDIAN; +#endif + + switch (endian) { + case OCXL_BIG_ENDIAN: + tmp = readl_be((char *)afu->global_mmio_ptr + offset); + tmp |= mask; + writel_be(tmp, (char *)afu->global_mmio_ptr + offset); + break; + + default: + tmp = readl((char *)afu->global_mmio_ptr + offset); + tmp |= mask; + writel(tmp, (char *)afu->global_mmio_ptr + offset); + break; + } + + return 0; +} +EXPORT_SYMBOL_GPL(ocxl_global_mmio_set32); + +int ocxl_global_mmio_set64(struct ocxl_afu *afu, size_t offset, + enum ocxl_endian endian, u64 mask) +{ + u64 tmp; + + if (offset > afu->config.global_mmio_size - 8) + return -EINVAL; + +#ifdef __BIG_ENDIAN__ + if (endian == OCXL_HOST_ENDIAN) + endian = OCXL_BIG_ENDIAN; +#endif + + switch (endian) { + case OCXL_BIG_ENDIAN: + tmp = readq_be((char *)afu->global_mmio_ptr + offset); + tmp |= mask; + writeq_be(tmp, (char *)afu->global_mmio_ptr + offset); + break; + + default: + tmp = readq((char *)afu->global_mmio_ptr + offset); + tmp |= mask; + writeq(tmp, (char *)afu->global_mmio_ptr + offset); + break; + } + + return 0; +} +EXPORT_SYMBOL_GPL(ocxl_global_mmio_set64); + +int ocxl_global_mmio_clear32(struct ocxl_afu *afu, size_t offset, + enum ocxl_endian endian, u32 mask) +{ + u32 tmp; + + if (offset > afu->config.global_mmio_size - 4) + return -EINVAL; + +#ifdef __BIG_ENDIAN__ + if (endian == OCXL_HOST_ENDIAN) + endian = OCXL_BIG_ENDIAN; +#endif + + switch (endian) { + case OCXL_BIG_ENDIAN: + tmp = readl_be((char *)afu->global_mmio_ptr + offset); + tmp &= ~mask; + writel_be(tmp, (char *)afu->global_mmio_ptr + offset); + break; + + default: + tmp = readl((char *)afu->global_mmio_ptr + offset); + tmp &= ~mask; + writel(tmp, (char *)afu->global_mmio_ptr + offset); + break; + } + + + return 0; +} +EXPORT_SYMBOL_GPL(ocxl_global_mmio_clear32); + +int ocxl_global_mmio_clear64(struct ocxl_afu *afu, size_t offset, + enum ocxl_endian endian, u64 mask) +{ + u64 tmp; + + if (offset > afu->config.global_mmio_size - 8) + return -EINVAL; + +#ifdef __BIG_ENDIAN__ + if (endian == OCXL_HOST_ENDIAN) + endian = OCXL_BIG_ENDIAN; +#endif + + switch (endian) { + case OCXL_BIG_ENDIAN: + tmp = readq_be((char *)afu->global_mmio_ptr + offset); + tmp &= ~mask; + writeq_be(tmp, (char *)afu->global_mmio_ptr + offset); + break; + + default: + tmp = readq((char *)afu->global_mmio_ptr + offset); + tmp &= ~mask; + writeq(tmp, (char *)afu->global_mmio_ptr + offset); + break; + } + + writeq(tmp, (char *)afu->global_mmio_ptr + offset); + + return 0; +} +EXPORT_SYMBOL_GPL(ocxl_global_mmio_clear64); diff --git a/drivers/misc/ocxl/ocxl_internal.h b/drivers/misc/ocxl/ocxl_internal.h new file mode 100644 index 000000000..0bad0a123 --- /dev/null +++ b/drivers/misc/ocxl/ocxl_internal.h @@ -0,0 +1,151 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +// Copyright 2017 IBM Corp. +#ifndef _OCXL_INTERNAL_H_ +#define _OCXL_INTERNAL_H_ + +#include <linux/pci.h> +#include <linux/cdev.h> +#include <linux/list.h> +#include <misc/ocxl.h> + +#define MAX_IRQ_PER_LINK 2000 +#define MAX_IRQ_PER_CONTEXT MAX_IRQ_PER_LINK + +extern struct pci_driver ocxl_pci_driver; + +struct ocxl_fn { + struct device dev; + int bar_used[3]; + struct ocxl_fn_config config; + struct list_head afu_list; + int pasid_base; + int actag_base; + int actag_enabled; + int actag_supported; + struct list_head pasid_list; + struct list_head actag_list; + void *link; +}; + +struct ocxl_file_info { + struct ocxl_afu *afu; + struct device dev; + struct cdev cdev; + struct bin_attribute attr_global_mmio; +}; + +struct ocxl_afu { + struct kref kref; + struct ocxl_fn *fn; + struct list_head list; + struct ocxl_afu_config config; + int pasid_base; + int pasid_count; /* opened contexts */ + int pasid_max; /* maximum number of contexts */ + int actag_base; + int actag_enabled; + struct mutex contexts_lock; + struct idr contexts_idr; + struct mutex afu_control_lock; + u64 global_mmio_start; + u64 irq_base_offset; + void __iomem *global_mmio_ptr; + u64 pp_mmio_start; + void *private; +}; + +enum ocxl_context_status { + CLOSED, + OPENED, + ATTACHED, +}; + +// Contains metadata about a translation fault +struct ocxl_xsl_error { + u64 addr; // The address that triggered the fault + u64 dsisr; // the value of the dsisr register + u64 count; // The number of times this fault has been triggered +}; + +struct ocxl_context { + struct ocxl_afu *afu; + int pasid; + struct mutex status_mutex; + enum ocxl_context_status status; + struct address_space *mapping; + struct mutex mapping_lock; + wait_queue_head_t events_wq; + struct mutex xsl_error_lock; + struct ocxl_xsl_error xsl_error; + struct mutex irq_lock; + struct idr irq_idr; + u16 tidr; // Thread ID used for P9 wait implementation +}; + +struct ocxl_process_element { + __be64 config_state; + __be32 reserved1[11]; + __be32 lpid; + __be32 tid; + __be32 pid; + __be32 reserved2[10]; + __be64 amr; + __be32 reserved3[3]; + __be32 software_state; +}; + +int ocxl_create_cdev(struct ocxl_afu *afu); +void ocxl_destroy_cdev(struct ocxl_afu *afu); +int ocxl_file_register_afu(struct ocxl_afu *afu); +void ocxl_file_unregister_afu(struct ocxl_afu *afu); + +int ocxl_file_init(void); +void ocxl_file_exit(void); + +int ocxl_pasid_afu_alloc(struct ocxl_fn *fn, u32 size); +void ocxl_pasid_afu_free(struct ocxl_fn *fn, u32 start, u32 size); +int ocxl_actag_afu_alloc(struct ocxl_fn *fn, u32 size); +void ocxl_actag_afu_free(struct ocxl_fn *fn, u32 start, u32 size); + +/* + * Get the max PASID value that can be used by the function + */ +int ocxl_config_get_pasid_info(struct pci_dev *dev, int *count); + +/* + * Control whether the FPGA is reloaded on a link reset + */ +int ocxl_config_get_reset_reload(struct pci_dev *dev, int *val); +int ocxl_config_set_reset_reload(struct pci_dev *dev, int val); + +/* + * Check if an AFU index is valid for the given function. + * + * AFU indexes can be sparse, so a driver should check all indexes up + * to the maximum found in the function description + */ +int ocxl_config_check_afu_index(struct pci_dev *dev, + struct ocxl_fn_config *fn, int afu_idx); + +/** + * ocxl_link_update_pe() - Update values within a Process Element + * @link_handle: the link handle associated with the process element + * @pasid: the PASID for the AFU context + * @tid: the new thread id for the process element + * + * Returns 0 on success + */ +int ocxl_link_update_pe(void *link_handle, int pasid, __u16 tid); + +int ocxl_context_mmap(struct ocxl_context *ctx, + struct vm_area_struct *vma); +void ocxl_context_detach_all(struct ocxl_afu *afu); + +int ocxl_sysfs_register_afu(struct ocxl_file_info *info); +void ocxl_sysfs_unregister_afu(struct ocxl_file_info *info); + +int ocxl_irq_offset_to_id(struct ocxl_context *ctx, u64 offset); +u64 ocxl_irq_id_to_offset(struct ocxl_context *ctx, int irq_id); +void ocxl_afu_irq_free_all(struct ocxl_context *ctx); + +#endif /* _OCXL_INTERNAL_H_ */ diff --git a/drivers/misc/ocxl/pasid.c b/drivers/misc/ocxl/pasid.c new file mode 100644 index 000000000..d14cb56e6 --- /dev/null +++ b/drivers/misc/ocxl/pasid.c @@ -0,0 +1,107 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright 2017 IBM Corp. +#include "ocxl_internal.h" + + +struct id_range { + struct list_head list; + u32 start; + u32 end; +}; + +#ifdef DEBUG +static void dump_list(struct list_head *head, char *type_str) +{ + struct id_range *cur; + + pr_debug("%s ranges allocated:\n", type_str); + list_for_each_entry(cur, head, list) { + pr_debug("Range %d->%d\n", cur->start, cur->end); + } +} +#endif + +static int range_alloc(struct list_head *head, u32 size, int max_id, + char *type_str) +{ + struct list_head *pos; + struct id_range *cur, *new; + int rc, last_end; + + new = kmalloc(sizeof(struct id_range), GFP_KERNEL); + if (!new) + return -ENOMEM; + + pos = head; + last_end = -1; + list_for_each_entry(cur, head, list) { + if ((cur->start - last_end) > size) + break; + last_end = cur->end; + pos = &cur->list; + } + + new->start = last_end + 1; + new->end = new->start + size - 1; + + if (new->end > max_id) { + kfree(new); + rc = -ENOSPC; + } else { + list_add(&new->list, pos); + rc = new->start; + } + +#ifdef DEBUG + dump_list(head, type_str); +#endif + return rc; +} + +static void range_free(struct list_head *head, u32 start, u32 size, + char *type_str) +{ + bool found = false; + struct id_range *cur, *tmp; + + list_for_each_entry_safe(cur, tmp, head, list) { + if (cur->start == start && cur->end == (start + size - 1)) { + found = true; + list_del(&cur->list); + kfree(cur); + break; + } + } + WARN_ON(!found); +#ifdef DEBUG + dump_list(head, type_str); +#endif +} + +int ocxl_pasid_afu_alloc(struct ocxl_fn *fn, u32 size) +{ + int max_pasid; + + if (fn->config.max_pasid_log < 0) + return -ENOSPC; + max_pasid = 1 << fn->config.max_pasid_log; + return range_alloc(&fn->pasid_list, size, max_pasid, "afu pasid"); +} + +void ocxl_pasid_afu_free(struct ocxl_fn *fn, u32 start, u32 size) +{ + return range_free(&fn->pasid_list, start, size, "afu pasid"); +} + +int ocxl_actag_afu_alloc(struct ocxl_fn *fn, u32 size) +{ + int max_actag; + + max_actag = fn->actag_enabled; + return range_alloc(&fn->actag_list, size, max_actag, "afu actag"); +} + +void ocxl_actag_afu_free(struct ocxl_fn *fn, u32 start, u32 size) +{ + return range_free(&fn->actag_list, start, size, "afu actag"); +} diff --git a/drivers/misc/ocxl/pci.c b/drivers/misc/ocxl/pci.c new file mode 100644 index 000000000..cb920aa88 --- /dev/null +++ b/drivers/misc/ocxl/pci.c @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright 2019 IBM Corp. +#include <linux/module.h> +#include "ocxl_internal.h" + +/* + * Any opencapi device which wants to use this 'generic' driver should + * use the 0x062B device ID. Vendors should define the subsystem + * vendor/device ID to help differentiate devices. + */ +static const struct pci_device_id ocxl_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x062B), }, + { } +}; +MODULE_DEVICE_TABLE(pci, ocxl_pci_tbl); + +static int ocxl_probe(struct pci_dev *dev, const struct pci_device_id *id) +{ + int rc; + struct ocxl_afu *afu, *tmp; + struct ocxl_fn *fn; + struct list_head *afu_list; + + fn = ocxl_function_open(dev); + if (IS_ERR(fn)) + return PTR_ERR(fn); + + pci_set_drvdata(dev, fn); + + afu_list = ocxl_function_afu_list(fn); + + list_for_each_entry_safe(afu, tmp, afu_list, list) { + // Cleanup handled within ocxl_file_register_afu() + rc = ocxl_file_register_afu(afu); + if (rc) { + dev_err(&dev->dev, "Failed to register AFU '%s' index %d", + afu->config.name, afu->config.idx); + } + } + + return 0; +} + +static void ocxl_remove(struct pci_dev *dev) +{ + struct ocxl_fn *fn; + struct ocxl_afu *afu; + struct list_head *afu_list; + + fn = pci_get_drvdata(dev); + afu_list = ocxl_function_afu_list(fn); + + list_for_each_entry(afu, afu_list, list) { + ocxl_file_unregister_afu(afu); + } + + ocxl_function_close(fn); +} + +struct pci_driver ocxl_pci_driver = { + .name = "ocxl", + .id_table = ocxl_pci_tbl, + .probe = ocxl_probe, + .remove = ocxl_remove, + .shutdown = ocxl_remove, +}; diff --git a/drivers/misc/ocxl/sysfs.c b/drivers/misc/ocxl/sysfs.c new file mode 100644 index 000000000..25c78df80 --- /dev/null +++ b/drivers/misc/ocxl/sysfs.c @@ -0,0 +1,186 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright 2017 IBM Corp. +#include <linux/sysfs.h> +#include "ocxl_internal.h" + +static inline struct ocxl_afu *to_afu(struct device *device) +{ + struct ocxl_file_info *info = container_of(device, struct ocxl_file_info, dev); + + return info->afu; +} + +static ssize_t global_mmio_size_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct ocxl_afu *afu = to_afu(device); + + return scnprintf(buf, PAGE_SIZE, "%d\n", + afu->config.global_mmio_size); +} + +static ssize_t pp_mmio_size_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct ocxl_afu *afu = to_afu(device); + + return scnprintf(buf, PAGE_SIZE, "%d\n", + afu->config.pp_mmio_stride); +} + +static ssize_t afu_version_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct ocxl_afu *afu = to_afu(device); + + return scnprintf(buf, PAGE_SIZE, "%hhu:%hhu\n", + afu->config.version_major, + afu->config.version_minor); +} + +static ssize_t contexts_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct ocxl_afu *afu = to_afu(device); + + return scnprintf(buf, PAGE_SIZE, "%d/%d\n", + afu->pasid_count, afu->pasid_max); +} + +static ssize_t reload_on_reset_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct ocxl_afu *afu = to_afu(device); + struct ocxl_fn *fn = afu->fn; + struct pci_dev *pci_dev = to_pci_dev(fn->dev.parent); + int val; + + if (ocxl_config_get_reset_reload(pci_dev, &val)) + return scnprintf(buf, PAGE_SIZE, "unavailable\n"); + + return scnprintf(buf, PAGE_SIZE, "%d\n", val); +} + +static ssize_t reload_on_reset_store(struct device *device, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ocxl_afu *afu = to_afu(device); + struct ocxl_fn *fn = afu->fn; + struct pci_dev *pci_dev = to_pci_dev(fn->dev.parent); + int rc, val; + + rc = kstrtoint(buf, 0, &val); + if (rc || (val != 0 && val != 1)) + return -EINVAL; + + if (ocxl_config_set_reset_reload(pci_dev, val)) + return -ENODEV; + + return count; +} + +static struct device_attribute afu_attrs[] = { + __ATTR_RO(global_mmio_size), + __ATTR_RO(pp_mmio_size), + __ATTR_RO(afu_version), + __ATTR_RO(contexts), + __ATTR_RW(reload_on_reset), +}; + +static ssize_t global_mmio_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buf, + loff_t off, size_t count) +{ + struct ocxl_afu *afu = to_afu(kobj_to_dev(kobj)); + + if (count == 0 || off < 0 || + off >= afu->config.global_mmio_size) + return 0; + memcpy_fromio(buf, afu->global_mmio_ptr + off, count); + return count; +} + +static vm_fault_t global_mmio_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct ocxl_afu *afu = vma->vm_private_data; + unsigned long offset; + + if (vmf->pgoff >= (afu->config.global_mmio_size >> PAGE_SHIFT)) + return VM_FAULT_SIGBUS; + + offset = vmf->pgoff; + offset += (afu->global_mmio_start >> PAGE_SHIFT); + return vmf_insert_pfn(vma, vmf->address, offset); +} + +static const struct vm_operations_struct global_mmio_vmops = { + .fault = global_mmio_fault, +}; + +static int global_mmio_mmap(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + struct vm_area_struct *vma) +{ + struct ocxl_afu *afu = to_afu(kobj_to_dev(kobj)); + + if ((vma_pages(vma) + vma->vm_pgoff) > + (afu->config.global_mmio_size >> PAGE_SHIFT)) + return -EINVAL; + + vma->vm_flags |= VM_IO | VM_PFNMAP; + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + vma->vm_ops = &global_mmio_vmops; + vma->vm_private_data = afu; + return 0; +} + +int ocxl_sysfs_register_afu(struct ocxl_file_info *info) +{ + int i, rc; + + for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) { + rc = device_create_file(&info->dev, &afu_attrs[i]); + if (rc) + goto err; + } + + sysfs_attr_init(&info->attr_global_mmio.attr); + info->attr_global_mmio.attr.name = "global_mmio_area"; + info->attr_global_mmio.attr.mode = 0600; + info->attr_global_mmio.size = info->afu->config.global_mmio_size; + info->attr_global_mmio.read = global_mmio_read; + info->attr_global_mmio.mmap = global_mmio_mmap; + rc = device_create_bin_file(&info->dev, &info->attr_global_mmio); + if (rc) { + dev_err(&info->dev, "Unable to create global mmio attr for afu: %d\n", rc); + goto err; + } + + return 0; + +err: + for (i--; i >= 0; i--) + device_remove_file(&info->dev, &afu_attrs[i]); + + return rc; +} + +void ocxl_sysfs_unregister_afu(struct ocxl_file_info *info) +{ + int i; + + /* + * device_remove_bin_file is safe to call if the file is not added as + * the files are removed by name, and early exit if not found + */ + for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) + device_remove_file(&info->dev, &afu_attrs[i]); + device_remove_bin_file(&info->dev, &info->attr_global_mmio); +} diff --git a/drivers/misc/ocxl/trace.c b/drivers/misc/ocxl/trace.c new file mode 100644 index 000000000..1e6947049 --- /dev/null +++ b/drivers/misc/ocxl/trace.c @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright 2017 IBM Corp. +#ifndef __CHECKER__ +#define CREATE_TRACE_POINTS +#include "trace.h" +#endif diff --git a/drivers/misc/ocxl/trace.h b/drivers/misc/ocxl/trace.h new file mode 100644 index 000000000..17e21cb2a --- /dev/null +++ b/drivers/misc/ocxl/trace.h @@ -0,0 +1,178 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +// Copyright 2017 IBM Corp. +#undef TRACE_SYSTEM +#define TRACE_SYSTEM ocxl + +#if !defined(_TRACE_OCXL_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_OCXL_H + +#include <linux/tracepoint.h> + +DECLARE_EVENT_CLASS(ocxl_context, + TP_PROTO(pid_t pid, void *spa, int pasid, u32 pidr, u32 tidr), + TP_ARGS(pid, spa, pasid, pidr, tidr), + + TP_STRUCT__entry( + __field(pid_t, pid) + __field(void*, spa) + __field(int, pasid) + __field(u32, pidr) + __field(u32, tidr) + ), + + TP_fast_assign( + __entry->pid = pid; + __entry->spa = spa; + __entry->pasid = pasid; + __entry->pidr = pidr; + __entry->tidr = tidr; + ), + + TP_printk("linux pid=%d spa=0x%p pasid=0x%x pidr=0x%x tidr=0x%x", + __entry->pid, + __entry->spa, + __entry->pasid, + __entry->pidr, + __entry->tidr + ) +); + +DEFINE_EVENT(ocxl_context, ocxl_context_add, + TP_PROTO(pid_t pid, void *spa, int pasid, u32 pidr, u32 tidr), + TP_ARGS(pid, spa, pasid, pidr, tidr) +); + +DEFINE_EVENT(ocxl_context, ocxl_context_remove, + TP_PROTO(pid_t pid, void *spa, int pasid, u32 pidr, u32 tidr), + TP_ARGS(pid, spa, pasid, pidr, tidr) +); + +TRACE_EVENT(ocxl_terminate_pasid, + TP_PROTO(int pasid, int rc), + TP_ARGS(pasid, rc), + + TP_STRUCT__entry( + __field(int, pasid) + __field(int, rc) + ), + + TP_fast_assign( + __entry->pasid = pasid; + __entry->rc = rc; + ), + + TP_printk("pasid=0x%x rc=%d", + __entry->pasid, + __entry->rc + ) +); + +DECLARE_EVENT_CLASS(ocxl_fault_handler, + TP_PROTO(void *spa, u64 pe, u64 dsisr, u64 dar, u64 tfc), + TP_ARGS(spa, pe, dsisr, dar, tfc), + + TP_STRUCT__entry( + __field(void *, spa) + __field(u64, pe) + __field(u64, dsisr) + __field(u64, dar) + __field(u64, tfc) + ), + + TP_fast_assign( + __entry->spa = spa; + __entry->pe = pe; + __entry->dsisr = dsisr; + __entry->dar = dar; + __entry->tfc = tfc; + ), + + TP_printk("spa=%p pe=0x%llx dsisr=0x%llx dar=0x%llx tfc=0x%llx", + __entry->spa, + __entry->pe, + __entry->dsisr, + __entry->dar, + __entry->tfc + ) +); + +DEFINE_EVENT(ocxl_fault_handler, ocxl_fault, + TP_PROTO(void *spa, u64 pe, u64 dsisr, u64 dar, u64 tfc), + TP_ARGS(spa, pe, dsisr, dar, tfc) +); + +DEFINE_EVENT(ocxl_fault_handler, ocxl_fault_ack, + TP_PROTO(void *spa, u64 pe, u64 dsisr, u64 dar, u64 tfc), + TP_ARGS(spa, pe, dsisr, dar, tfc) +); + +TRACE_EVENT(ocxl_afu_irq_alloc, + TP_PROTO(int pasid, int irq_id, unsigned int virq, int hw_irq), + TP_ARGS(pasid, irq_id, virq, hw_irq), + + TP_STRUCT__entry( + __field(int, pasid) + __field(int, irq_id) + __field(unsigned int, virq) + __field(int, hw_irq) + ), + + TP_fast_assign( + __entry->pasid = pasid; + __entry->irq_id = irq_id; + __entry->virq = virq; + __entry->hw_irq = hw_irq; + ), + + TP_printk("pasid=0x%x irq_id=%d virq=%u hw_irq=%d", + __entry->pasid, + __entry->irq_id, + __entry->virq, + __entry->hw_irq + ) +); + +TRACE_EVENT(ocxl_afu_irq_free, + TP_PROTO(int pasid, int irq_id), + TP_ARGS(pasid, irq_id), + + TP_STRUCT__entry( + __field(int, pasid) + __field(int, irq_id) + ), + + TP_fast_assign( + __entry->pasid = pasid; + __entry->irq_id = irq_id; + ), + + TP_printk("pasid=0x%x irq_id=%d", + __entry->pasid, + __entry->irq_id + ) +); + +TRACE_EVENT(ocxl_afu_irq_receive, + TP_PROTO(int virq), + TP_ARGS(virq), + + TP_STRUCT__entry( + __field(int, virq) + ), + + TP_fast_assign( + __entry->virq = virq; + ), + + TP_printk("virq=%d", + __entry->virq + ) +); + +#endif /* _TRACE_OCXL_H */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE trace +#include <trace/define_trace.h> diff --git a/drivers/misc/pch_phub.c b/drivers/misc/pch_phub.c new file mode 100644 index 000000000..8d2b71357 --- /dev/null +++ b/drivers/misc/pch_phub.c @@ -0,0 +1,878 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/fs.h> +#include <linux/uaccess.h> +#include <linux/string.h> +#include <linux/pci.h> +#include <linux/io.h> +#include <linux/delay.h> +#include <linux/mutex.h> +#include <linux/if_ether.h> +#include <linux/ctype.h> +#include <linux/dmi.h> +#include <linux/of.h> + +#define PHUB_STATUS 0x00 /* Status Register offset */ +#define PHUB_CONTROL 0x04 /* Control Register offset */ +#define PHUB_TIMEOUT 0x05 /* Time out value for Status Register */ +#define PCH_PHUB_ROM_WRITE_ENABLE 0x01 /* Enabling for writing ROM */ +#define PCH_PHUB_ROM_WRITE_DISABLE 0x00 /* Disabling for writing ROM */ +#define PCH_PHUB_MAC_START_ADDR_EG20T 0x14 /* MAC data area start address + offset */ +#define PCH_PHUB_MAC_START_ADDR_ML7223 0x20C /* MAC data area start address + offset */ +#define PCH_PHUB_ROM_START_ADDR_EG20T 0x80 /* ROM data area start address offset + (Intel EG20T PCH)*/ +#define PCH_PHUB_ROM_START_ADDR_ML7213 0x400 /* ROM data area start address + offset(LAPIS Semicon ML7213) + */ +#define PCH_PHUB_ROM_START_ADDR_ML7223 0x400 /* ROM data area start address + offset(LAPIS Semicon ML7223) + */ + +/* MAX number of INT_REDUCE_CONTROL registers */ +#define MAX_NUM_INT_REDUCE_CONTROL_REG 128 +#define PCI_DEVICE_ID_PCH1_PHUB 0x8801 +#define PCH_MINOR_NOS 1 +#define CLKCFG_CAN_50MHZ 0x12000000 +#define CLKCFG_CANCLK_MASK 0xFF000000 +#define CLKCFG_UART_MASK 0xFFFFFF + +/* CM-iTC */ +#define CLKCFG_UART_48MHZ (1 << 16) +#define CLKCFG_UART_25MHZ (2 << 16) +#define CLKCFG_BAUDDIV (2 << 20) +#define CLKCFG_PLL2VCO (8 << 9) +#define CLKCFG_UARTCLKSEL (1 << 18) + +/* Macros for ML7213 */ +#define PCI_DEVICE_ID_ROHM_ML7213_PHUB 0x801A + +/* Macros for ML7223 */ +#define PCI_DEVICE_ID_ROHM_ML7223_mPHUB 0x8012 /* for Bus-m */ +#define PCI_DEVICE_ID_ROHM_ML7223_nPHUB 0x8002 /* for Bus-n */ + +/* Macros for ML7831 */ +#define PCI_DEVICE_ID_ROHM_ML7831_PHUB 0x8801 + +/* SROM ACCESS Macro */ +#define PCH_WORD_ADDR_MASK (~((1 << 2) - 1)) + +/* Registers address offset */ +#define PCH_PHUB_ID_REG 0x0000 +#define PCH_PHUB_QUEUE_PRI_VAL_REG 0x0004 +#define PCH_PHUB_RC_QUEUE_MAXSIZE_REG 0x0008 +#define PCH_PHUB_BRI_QUEUE_MAXSIZE_REG 0x000C +#define PCH_PHUB_COMP_RESP_TIMEOUT_REG 0x0010 +#define PCH_PHUB_BUS_SLAVE_CONTROL_REG 0x0014 +#define PCH_PHUB_DEADLOCK_AVOID_TYPE_REG 0x0018 +#define PCH_PHUB_INTPIN_REG_WPERMIT_REG0 0x0020 +#define PCH_PHUB_INTPIN_REG_WPERMIT_REG1 0x0024 +#define PCH_PHUB_INTPIN_REG_WPERMIT_REG2 0x0028 +#define PCH_PHUB_INTPIN_REG_WPERMIT_REG3 0x002C +#define PCH_PHUB_INT_REDUCE_CONTROL_REG_BASE 0x0040 +#define CLKCFG_REG_OFFSET 0x500 +#define FUNCSEL_REG_OFFSET 0x508 + +#define PCH_PHUB_OROM_SIZE 15360 + +/** + * struct pch_phub_reg - PHUB register structure + * @phub_id_reg: PHUB_ID register val + * @q_pri_val_reg: QUEUE_PRI_VAL register val + * @rc_q_maxsize_reg: RC_QUEUE_MAXSIZE register val + * @bri_q_maxsize_reg: BRI_QUEUE_MAXSIZE register val + * @comp_resp_timeout_reg: COMP_RESP_TIMEOUT register val + * @bus_slave_control_reg: BUS_SLAVE_CONTROL_REG register val + * @deadlock_avoid_type_reg: DEADLOCK_AVOID_TYPE register val + * @intpin_reg_wpermit_reg0: INTPIN_REG_WPERMIT register 0 val + * @intpin_reg_wpermit_reg1: INTPIN_REG_WPERMIT register 1 val + * @intpin_reg_wpermit_reg2: INTPIN_REG_WPERMIT register 2 val + * @intpin_reg_wpermit_reg3: INTPIN_REG_WPERMIT register 3 val + * @int_reduce_control_reg: INT_REDUCE_CONTROL registers val + * @clkcfg_reg: CLK CFG register val + * @funcsel_reg: Function select register value + * @pch_phub_base_address: Register base address + * @pch_phub_extrom_base_address: external rom base address + * @pch_mac_start_address: MAC address area start address + * @pch_opt_rom_start_address: Option ROM start address + * @ioh_type: Save IOH type + * @pdev: pointer to pci device struct + */ +struct pch_phub_reg { + u32 phub_id_reg; + u32 q_pri_val_reg; + u32 rc_q_maxsize_reg; + u32 bri_q_maxsize_reg; + u32 comp_resp_timeout_reg; + u32 bus_slave_control_reg; + u32 deadlock_avoid_type_reg; + u32 intpin_reg_wpermit_reg0; + u32 intpin_reg_wpermit_reg1; + u32 intpin_reg_wpermit_reg2; + u32 intpin_reg_wpermit_reg3; + u32 int_reduce_control_reg[MAX_NUM_INT_REDUCE_CONTROL_REG]; + u32 clkcfg_reg; + u32 funcsel_reg; + void __iomem *pch_phub_base_address; + void __iomem *pch_phub_extrom_base_address; + u32 pch_mac_start_address; + u32 pch_opt_rom_start_address; + int ioh_type; + struct pci_dev *pdev; +}; + +/* SROM SPEC for MAC address assignment offset */ +static const int pch_phub_mac_offset[ETH_ALEN] = {0x3, 0x2, 0x1, 0x0, 0xb, 0xa}; + +static DEFINE_MUTEX(pch_phub_mutex); + +/** + * pch_phub_read_modify_write_reg() - Reading modifying and writing register + * @chip: Pointer to the PHUB register structure + * @reg_addr_offset: Register offset address value. + * @data: Writing value. + * @mask: Mask value. + */ +static void pch_phub_read_modify_write_reg(struct pch_phub_reg *chip, + unsigned int reg_addr_offset, + unsigned int data, unsigned int mask) +{ + void __iomem *reg_addr = chip->pch_phub_base_address + reg_addr_offset; + iowrite32(((ioread32(reg_addr) & ~mask)) | data, reg_addr); +} + +/* pch_phub_save_reg_conf - saves register configuration */ +static void __maybe_unused pch_phub_save_reg_conf(struct pci_dev *pdev) +{ + unsigned int i; + struct pch_phub_reg *chip = pci_get_drvdata(pdev); + + void __iomem *p = chip->pch_phub_base_address; + + chip->phub_id_reg = ioread32(p + PCH_PHUB_ID_REG); + chip->q_pri_val_reg = ioread32(p + PCH_PHUB_QUEUE_PRI_VAL_REG); + chip->rc_q_maxsize_reg = ioread32(p + PCH_PHUB_RC_QUEUE_MAXSIZE_REG); + chip->bri_q_maxsize_reg = ioread32(p + PCH_PHUB_BRI_QUEUE_MAXSIZE_REG); + chip->comp_resp_timeout_reg = + ioread32(p + PCH_PHUB_COMP_RESP_TIMEOUT_REG); + chip->bus_slave_control_reg = + ioread32(p + PCH_PHUB_BUS_SLAVE_CONTROL_REG); + chip->deadlock_avoid_type_reg = + ioread32(p + PCH_PHUB_DEADLOCK_AVOID_TYPE_REG); + chip->intpin_reg_wpermit_reg0 = + ioread32(p + PCH_PHUB_INTPIN_REG_WPERMIT_REG0); + chip->intpin_reg_wpermit_reg1 = + ioread32(p + PCH_PHUB_INTPIN_REG_WPERMIT_REG1); + chip->intpin_reg_wpermit_reg2 = + ioread32(p + PCH_PHUB_INTPIN_REG_WPERMIT_REG2); + chip->intpin_reg_wpermit_reg3 = + ioread32(p + PCH_PHUB_INTPIN_REG_WPERMIT_REG3); + dev_dbg(&pdev->dev, "%s : " + "chip->phub_id_reg=%x, " + "chip->q_pri_val_reg=%x, " + "chip->rc_q_maxsize_reg=%x, " + "chip->bri_q_maxsize_reg=%x, " + "chip->comp_resp_timeout_reg=%x, " + "chip->bus_slave_control_reg=%x, " + "chip->deadlock_avoid_type_reg=%x, " + "chip->intpin_reg_wpermit_reg0=%x, " + "chip->intpin_reg_wpermit_reg1=%x, " + "chip->intpin_reg_wpermit_reg2=%x, " + "chip->intpin_reg_wpermit_reg3=%x\n", __func__, + chip->phub_id_reg, + chip->q_pri_val_reg, + chip->rc_q_maxsize_reg, + chip->bri_q_maxsize_reg, + chip->comp_resp_timeout_reg, + chip->bus_slave_control_reg, + chip->deadlock_avoid_type_reg, + chip->intpin_reg_wpermit_reg0, + chip->intpin_reg_wpermit_reg1, + chip->intpin_reg_wpermit_reg2, + chip->intpin_reg_wpermit_reg3); + for (i = 0; i < MAX_NUM_INT_REDUCE_CONTROL_REG; i++) { + chip->int_reduce_control_reg[i] = + ioread32(p + PCH_PHUB_INT_REDUCE_CONTROL_REG_BASE + 4 * i); + dev_dbg(&pdev->dev, "%s : " + "chip->int_reduce_control_reg[%d]=%x\n", + __func__, i, chip->int_reduce_control_reg[i]); + } + chip->clkcfg_reg = ioread32(p + CLKCFG_REG_OFFSET); + if ((chip->ioh_type == 2) || (chip->ioh_type == 4)) + chip->funcsel_reg = ioread32(p + FUNCSEL_REG_OFFSET); +} + +/* pch_phub_restore_reg_conf - restore register configuration */ +static void __maybe_unused pch_phub_restore_reg_conf(struct pci_dev *pdev) +{ + unsigned int i; + struct pch_phub_reg *chip = pci_get_drvdata(pdev); + void __iomem *p; + p = chip->pch_phub_base_address; + + iowrite32(chip->phub_id_reg, p + PCH_PHUB_ID_REG); + iowrite32(chip->q_pri_val_reg, p + PCH_PHUB_QUEUE_PRI_VAL_REG); + iowrite32(chip->rc_q_maxsize_reg, p + PCH_PHUB_RC_QUEUE_MAXSIZE_REG); + iowrite32(chip->bri_q_maxsize_reg, p + PCH_PHUB_BRI_QUEUE_MAXSIZE_REG); + iowrite32(chip->comp_resp_timeout_reg, + p + PCH_PHUB_COMP_RESP_TIMEOUT_REG); + iowrite32(chip->bus_slave_control_reg, + p + PCH_PHUB_BUS_SLAVE_CONTROL_REG); + iowrite32(chip->deadlock_avoid_type_reg, + p + PCH_PHUB_DEADLOCK_AVOID_TYPE_REG); + iowrite32(chip->intpin_reg_wpermit_reg0, + p + PCH_PHUB_INTPIN_REG_WPERMIT_REG0); + iowrite32(chip->intpin_reg_wpermit_reg1, + p + PCH_PHUB_INTPIN_REG_WPERMIT_REG1); + iowrite32(chip->intpin_reg_wpermit_reg2, + p + PCH_PHUB_INTPIN_REG_WPERMIT_REG2); + iowrite32(chip->intpin_reg_wpermit_reg3, + p + PCH_PHUB_INTPIN_REG_WPERMIT_REG3); + dev_dbg(&pdev->dev, "%s : " + "chip->phub_id_reg=%x, " + "chip->q_pri_val_reg=%x, " + "chip->rc_q_maxsize_reg=%x, " + "chip->bri_q_maxsize_reg=%x, " + "chip->comp_resp_timeout_reg=%x, " + "chip->bus_slave_control_reg=%x, " + "chip->deadlock_avoid_type_reg=%x, " + "chip->intpin_reg_wpermit_reg0=%x, " + "chip->intpin_reg_wpermit_reg1=%x, " + "chip->intpin_reg_wpermit_reg2=%x, " + "chip->intpin_reg_wpermit_reg3=%x\n", __func__, + chip->phub_id_reg, + chip->q_pri_val_reg, + chip->rc_q_maxsize_reg, + chip->bri_q_maxsize_reg, + chip->comp_resp_timeout_reg, + chip->bus_slave_control_reg, + chip->deadlock_avoid_type_reg, + chip->intpin_reg_wpermit_reg0, + chip->intpin_reg_wpermit_reg1, + chip->intpin_reg_wpermit_reg2, + chip->intpin_reg_wpermit_reg3); + for (i = 0; i < MAX_NUM_INT_REDUCE_CONTROL_REG; i++) { + iowrite32(chip->int_reduce_control_reg[i], + p + PCH_PHUB_INT_REDUCE_CONTROL_REG_BASE + 4 * i); + dev_dbg(&pdev->dev, "%s : " + "chip->int_reduce_control_reg[%d]=%x\n", + __func__, i, chip->int_reduce_control_reg[i]); + } + + iowrite32(chip->clkcfg_reg, p + CLKCFG_REG_OFFSET); + if ((chip->ioh_type == 2) || (chip->ioh_type == 4)) + iowrite32(chip->funcsel_reg, p + FUNCSEL_REG_OFFSET); +} + +/** + * pch_phub_read_serial_rom() - Reading Serial ROM + * @chip: Pointer to the PHUB register structure + * @offset_address: Serial ROM offset address to read. + * @data: Read buffer for specified Serial ROM value. + */ +static void pch_phub_read_serial_rom(struct pch_phub_reg *chip, + unsigned int offset_address, u8 *data) +{ + void __iomem *mem_addr = chip->pch_phub_extrom_base_address + + offset_address; + + *data = ioread8(mem_addr); +} + +/** + * pch_phub_write_serial_rom() - Writing Serial ROM + * @chip: Pointer to the PHUB register structure + * @offset_address: Serial ROM offset address. + * @data: Serial ROM value to write. + */ +static int pch_phub_write_serial_rom(struct pch_phub_reg *chip, + unsigned int offset_address, u8 data) +{ + void __iomem *mem_addr = chip->pch_phub_extrom_base_address + + (offset_address & PCH_WORD_ADDR_MASK); + int i; + unsigned int word_data; + unsigned int pos; + unsigned int mask; + pos = (offset_address % 4) * 8; + mask = ~(0xFF << pos); + + iowrite32(PCH_PHUB_ROM_WRITE_ENABLE, + chip->pch_phub_extrom_base_address + PHUB_CONTROL); + + word_data = ioread32(mem_addr); + iowrite32((word_data & mask) | (u32)data << pos, mem_addr); + + i = 0; + while (ioread8(chip->pch_phub_extrom_base_address + + PHUB_STATUS) != 0x00) { + msleep(1); + if (i == PHUB_TIMEOUT) + return -ETIMEDOUT; + i++; + } + + iowrite32(PCH_PHUB_ROM_WRITE_DISABLE, + chip->pch_phub_extrom_base_address + PHUB_CONTROL); + + return 0; +} + +/** + * pch_phub_read_serial_rom_val() - Read Serial ROM value + * @chip: Pointer to the PHUB register structure + * @offset_address: Serial ROM address offset value. + * @data: Serial ROM value to read. + */ +static void pch_phub_read_serial_rom_val(struct pch_phub_reg *chip, + unsigned int offset_address, u8 *data) +{ + unsigned int mem_addr; + + mem_addr = chip->pch_mac_start_address + + pch_phub_mac_offset[offset_address]; + + pch_phub_read_serial_rom(chip, mem_addr, data); +} + +/** + * pch_phub_write_serial_rom_val() - writing Serial ROM value + * @chip: Pointer to the PHUB register structure + * @offset_address: Serial ROM address offset value. + * @data: Serial ROM value. + */ +static int pch_phub_write_serial_rom_val(struct pch_phub_reg *chip, + unsigned int offset_address, u8 data) +{ + int retval; + unsigned int mem_addr; + + mem_addr = chip->pch_mac_start_address + + pch_phub_mac_offset[offset_address]; + + retval = pch_phub_write_serial_rom(chip, mem_addr, data); + + return retval; +} + +/* pch_phub_gbe_serial_rom_conf - makes Serial ROM header format configuration + * for Gigabit Ethernet MAC address + */ +static int pch_phub_gbe_serial_rom_conf(struct pch_phub_reg *chip) +{ + int retval; + + retval = pch_phub_write_serial_rom(chip, 0x0b, 0xbc); + retval |= pch_phub_write_serial_rom(chip, 0x0a, 0x10); + retval |= pch_phub_write_serial_rom(chip, 0x09, 0x01); + retval |= pch_phub_write_serial_rom(chip, 0x08, 0x02); + + retval |= pch_phub_write_serial_rom(chip, 0x0f, 0x00); + retval |= pch_phub_write_serial_rom(chip, 0x0e, 0x00); + retval |= pch_phub_write_serial_rom(chip, 0x0d, 0x00); + retval |= pch_phub_write_serial_rom(chip, 0x0c, 0x80); + + retval |= pch_phub_write_serial_rom(chip, 0x13, 0xbc); + retval |= pch_phub_write_serial_rom(chip, 0x12, 0x10); + retval |= pch_phub_write_serial_rom(chip, 0x11, 0x01); + retval |= pch_phub_write_serial_rom(chip, 0x10, 0x18); + + retval |= pch_phub_write_serial_rom(chip, 0x1b, 0xbc); + retval |= pch_phub_write_serial_rom(chip, 0x1a, 0x10); + retval |= pch_phub_write_serial_rom(chip, 0x19, 0x01); + retval |= pch_phub_write_serial_rom(chip, 0x18, 0x19); + + retval |= pch_phub_write_serial_rom(chip, 0x23, 0xbc); + retval |= pch_phub_write_serial_rom(chip, 0x22, 0x10); + retval |= pch_phub_write_serial_rom(chip, 0x21, 0x01); + retval |= pch_phub_write_serial_rom(chip, 0x20, 0x3a); + + retval |= pch_phub_write_serial_rom(chip, 0x27, 0x01); + retval |= pch_phub_write_serial_rom(chip, 0x26, 0x00); + retval |= pch_phub_write_serial_rom(chip, 0x25, 0x00); + retval |= pch_phub_write_serial_rom(chip, 0x24, 0x00); + + return retval; +} + +/* pch_phub_gbe_serial_rom_conf_mp - makes SerialROM header format configuration + * for Gigabit Ethernet MAC address + */ +static int pch_phub_gbe_serial_rom_conf_mp(struct pch_phub_reg *chip) +{ + int retval; + u32 offset_addr; + + offset_addr = 0x200; + retval = pch_phub_write_serial_rom(chip, 0x03 + offset_addr, 0xbc); + retval |= pch_phub_write_serial_rom(chip, 0x02 + offset_addr, 0x00); + retval |= pch_phub_write_serial_rom(chip, 0x01 + offset_addr, 0x40); + retval |= pch_phub_write_serial_rom(chip, 0x00 + offset_addr, 0x02); + + retval |= pch_phub_write_serial_rom(chip, 0x07 + offset_addr, 0x00); + retval |= pch_phub_write_serial_rom(chip, 0x06 + offset_addr, 0x00); + retval |= pch_phub_write_serial_rom(chip, 0x05 + offset_addr, 0x00); + retval |= pch_phub_write_serial_rom(chip, 0x04 + offset_addr, 0x80); + + retval |= pch_phub_write_serial_rom(chip, 0x0b + offset_addr, 0xbc); + retval |= pch_phub_write_serial_rom(chip, 0x0a + offset_addr, 0x00); + retval |= pch_phub_write_serial_rom(chip, 0x09 + offset_addr, 0x40); + retval |= pch_phub_write_serial_rom(chip, 0x08 + offset_addr, 0x18); + + retval |= pch_phub_write_serial_rom(chip, 0x13 + offset_addr, 0xbc); + retval |= pch_phub_write_serial_rom(chip, 0x12 + offset_addr, 0x00); + retval |= pch_phub_write_serial_rom(chip, 0x11 + offset_addr, 0x40); + retval |= pch_phub_write_serial_rom(chip, 0x10 + offset_addr, 0x19); + + retval |= pch_phub_write_serial_rom(chip, 0x1b + offset_addr, 0xbc); + retval |= pch_phub_write_serial_rom(chip, 0x1a + offset_addr, 0x00); + retval |= pch_phub_write_serial_rom(chip, 0x19 + offset_addr, 0x40); + retval |= pch_phub_write_serial_rom(chip, 0x18 + offset_addr, 0x3a); + + retval |= pch_phub_write_serial_rom(chip, 0x1f + offset_addr, 0x01); + retval |= pch_phub_write_serial_rom(chip, 0x1e + offset_addr, 0x00); + retval |= pch_phub_write_serial_rom(chip, 0x1d + offset_addr, 0x00); + retval |= pch_phub_write_serial_rom(chip, 0x1c + offset_addr, 0x00); + + return retval; +} + +/** + * pch_phub_read_gbe_mac_addr() - Read Gigabit Ethernet MAC address + * @chip: Pointer to the PHUB register structure + * @data: Buffer of the Gigabit Ethernet MAC address value. + */ +static void pch_phub_read_gbe_mac_addr(struct pch_phub_reg *chip, u8 *data) +{ + int i; + for (i = 0; i < ETH_ALEN; i++) + pch_phub_read_serial_rom_val(chip, i, &data[i]); +} + +/** + * pch_phub_write_gbe_mac_addr() - Write MAC address + * @chip: Pointer to the PHUB register structure + * @data: Gigabit Ethernet MAC address value. + */ +static int pch_phub_write_gbe_mac_addr(struct pch_phub_reg *chip, u8 *data) +{ + int retval; + int i; + + if ((chip->ioh_type == 1) || (chip->ioh_type == 5)) /* EG20T or ML7831*/ + retval = pch_phub_gbe_serial_rom_conf(chip); + else /* ML7223 */ + retval = pch_phub_gbe_serial_rom_conf_mp(chip); + if (retval) + return retval; + + for (i = 0; i < ETH_ALEN; i++) { + retval = pch_phub_write_serial_rom_val(chip, i, data[i]); + if (retval) + return retval; + } + + return retval; +} + +static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t off, size_t count) +{ + unsigned int rom_signature; + unsigned char rom_length; + unsigned int tmp; + unsigned int addr_offset; + unsigned int orom_size; + int ret; + int err; + ssize_t rom_size; + + struct pch_phub_reg *chip = dev_get_drvdata(kobj_to_dev(kobj)); + + ret = mutex_lock_interruptible(&pch_phub_mutex); + if (ret) { + err = -ERESTARTSYS; + goto return_err_nomutex; + } + + /* Get Rom signature */ + chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size); + if (!chip->pch_phub_extrom_base_address) { + err = -ENODATA; + goto exrom_map_err; + } + + pch_phub_read_serial_rom(chip, chip->pch_opt_rom_start_address, + (unsigned char *)&rom_signature); + rom_signature &= 0xff; + pch_phub_read_serial_rom(chip, chip->pch_opt_rom_start_address + 1, + (unsigned char *)&tmp); + rom_signature |= (tmp & 0xff) << 8; + if (rom_signature == 0xAA55) { + pch_phub_read_serial_rom(chip, + chip->pch_opt_rom_start_address + 2, + &rom_length); + orom_size = rom_length * 512; + if (orom_size < off) { + addr_offset = 0; + goto return_ok; + } + if (orom_size < count) { + addr_offset = 0; + goto return_ok; + } + + for (addr_offset = 0; addr_offset < count; addr_offset++) { + pch_phub_read_serial_rom(chip, + chip->pch_opt_rom_start_address + addr_offset + off, + &buf[addr_offset]); + } + } else { + err = -ENODATA; + goto return_err; + } +return_ok: + pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address); + mutex_unlock(&pch_phub_mutex); + return addr_offset; + +return_err: + pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address); +exrom_map_err: + mutex_unlock(&pch_phub_mutex); +return_err_nomutex: + return err; +} + +static ssize_t pch_phub_bin_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + int err; + unsigned int addr_offset; + int ret; + ssize_t rom_size; + struct pch_phub_reg *chip = dev_get_drvdata(kobj_to_dev(kobj)); + + ret = mutex_lock_interruptible(&pch_phub_mutex); + if (ret) + return -ERESTARTSYS; + + if (off > PCH_PHUB_OROM_SIZE) { + addr_offset = 0; + goto return_ok; + } + if (count > PCH_PHUB_OROM_SIZE) { + addr_offset = 0; + goto return_ok; + } + + chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size); + if (!chip->pch_phub_extrom_base_address) { + err = -ENOMEM; + goto exrom_map_err; + } + + for (addr_offset = 0; addr_offset < count; addr_offset++) { + if (PCH_PHUB_OROM_SIZE < off + addr_offset) + goto return_ok; + + ret = pch_phub_write_serial_rom(chip, + chip->pch_opt_rom_start_address + addr_offset + off, + buf[addr_offset]); + if (ret) { + err = ret; + goto return_err; + } + } + +return_ok: + pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address); + mutex_unlock(&pch_phub_mutex); + return addr_offset; + +return_err: + pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address); + +exrom_map_err: + mutex_unlock(&pch_phub_mutex); + return err; +} + +static ssize_t show_pch_mac(struct device *dev, struct device_attribute *attr, + char *buf) +{ + u8 mac[8]; + struct pch_phub_reg *chip = dev_get_drvdata(dev); + ssize_t rom_size; + + chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size); + if (!chip->pch_phub_extrom_base_address) + return -ENOMEM; + + pch_phub_read_gbe_mac_addr(chip, mac); + pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address); + + return sprintf(buf, "%pM\n", mac); +} + +static ssize_t store_pch_mac(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + u8 mac[ETH_ALEN]; + ssize_t rom_size; + struct pch_phub_reg *chip = dev_get_drvdata(dev); + int ret; + + if (!mac_pton(buf, mac)) + return -EINVAL; + + chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size); + if (!chip->pch_phub_extrom_base_address) + return -ENOMEM; + + ret = pch_phub_write_gbe_mac_addr(chip, mac); + pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address); + if (ret) + return ret; + + return count; +} + +static DEVICE_ATTR(pch_mac, S_IRUGO | S_IWUSR, show_pch_mac, store_pch_mac); + +static const struct bin_attribute pch_bin_attr = { + .attr = { + .name = "pch_firmware", + .mode = S_IRUGO | S_IWUSR, + }, + .size = PCH_PHUB_OROM_SIZE + 1, + .read = pch_phub_bin_read, + .write = pch_phub_bin_write, +}; + +static int pch_phub_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + int ret; + struct pch_phub_reg *chip; + + chip = kzalloc(sizeof(struct pch_phub_reg), GFP_KERNEL); + if (chip == NULL) + return -ENOMEM; + + ret = pci_enable_device(pdev); + if (ret) { + dev_err(&pdev->dev, + "%s : pci_enable_device FAILED(ret=%d)", __func__, ret); + goto err_pci_enable_dev; + } + dev_dbg(&pdev->dev, "%s : pci_enable_device returns %d\n", __func__, + ret); + + ret = pci_request_regions(pdev, KBUILD_MODNAME); + if (ret) { + dev_err(&pdev->dev, + "%s : pci_request_regions FAILED(ret=%d)", __func__, ret); + goto err_req_regions; + } + dev_dbg(&pdev->dev, "%s : " + "pci_request_regions returns %d\n", __func__, ret); + + chip->pch_phub_base_address = pci_iomap(pdev, 1, 0); + + + if (chip->pch_phub_base_address == NULL) { + dev_err(&pdev->dev, "%s : pci_iomap FAILED", __func__); + ret = -ENOMEM; + goto err_pci_iomap; + } + dev_dbg(&pdev->dev, "%s : pci_iomap SUCCESS and value " + "in pch_phub_base_address variable is %p\n", __func__, + chip->pch_phub_base_address); + + chip->pdev = pdev; /* Save pci device struct */ + + if (id->driver_data == 1) { /* EG20T PCH */ + const char *board_name; + unsigned int prefetch = 0x000affaa; + + if (pdev->dev.of_node) + of_property_read_u32(pdev->dev.of_node, + "intel,eg20t-prefetch", + &prefetch); + + ret = sysfs_create_file(&pdev->dev.kobj, + &dev_attr_pch_mac.attr); + if (ret) + goto err_sysfs_create; + + ret = sysfs_create_bin_file(&pdev->dev.kobj, &pch_bin_attr); + if (ret) + goto exit_bin_attr; + + pch_phub_read_modify_write_reg(chip, + (unsigned int)CLKCFG_REG_OFFSET, + CLKCFG_CAN_50MHZ, + CLKCFG_CANCLK_MASK); + + /* quirk for CM-iTC board */ + board_name = dmi_get_system_info(DMI_BOARD_NAME); + if (board_name && strstr(board_name, "CM-iTC")) + pch_phub_read_modify_write_reg(chip, + (unsigned int)CLKCFG_REG_OFFSET, + CLKCFG_UART_48MHZ | CLKCFG_BAUDDIV | + CLKCFG_PLL2VCO | CLKCFG_UARTCLKSEL, + CLKCFG_UART_MASK); + + /* set the prefech value */ + iowrite32(prefetch, chip->pch_phub_base_address + 0x14); + /* set the interrupt delay value */ + iowrite32(0x25, chip->pch_phub_base_address + 0x44); + chip->pch_opt_rom_start_address = PCH_PHUB_ROM_START_ADDR_EG20T; + chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_EG20T; + + /* quirk for MIPS Boston platform */ + if (pdev->dev.of_node) { + if (of_machine_is_compatible("img,boston")) { + pch_phub_read_modify_write_reg(chip, + (unsigned int)CLKCFG_REG_OFFSET, + CLKCFG_UART_25MHZ, + CLKCFG_UART_MASK); + } + } + } else if (id->driver_data == 2) { /* ML7213 IOH */ + ret = sysfs_create_bin_file(&pdev->dev.kobj, &pch_bin_attr); + if (ret) + goto err_sysfs_create; + /* set the prefech value + * Device2(USB OHCI #1/ USB EHCI #1/ USB Device):a + * Device4(SDIO #0,1,2):f + * Device6(SATA 2):f + * Device8(USB OHCI #0/ USB EHCI #0):a + */ + iowrite32(0x000affa0, chip->pch_phub_base_address + 0x14); + chip->pch_opt_rom_start_address =\ + PCH_PHUB_ROM_START_ADDR_ML7213; + } else if (id->driver_data == 3) { /* ML7223 IOH Bus-m*/ + /* set the prefech value + * Device8(GbE) + */ + iowrite32(0x000a0000, chip->pch_phub_base_address + 0x14); + /* set the interrupt delay value */ + iowrite32(0x25, chip->pch_phub_base_address + 0x140); + chip->pch_opt_rom_start_address =\ + PCH_PHUB_ROM_START_ADDR_ML7223; + chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_ML7223; + } else if (id->driver_data == 4) { /* ML7223 IOH Bus-n*/ + ret = sysfs_create_file(&pdev->dev.kobj, + &dev_attr_pch_mac.attr); + if (ret) + goto err_sysfs_create; + ret = sysfs_create_bin_file(&pdev->dev.kobj, &pch_bin_attr); + if (ret) + goto exit_bin_attr; + /* set the prefech value + * Device2(USB OHCI #0,1,2,3/ USB EHCI #0):a + * Device4(SDIO #0,1):f + * Device6(SATA 2):f + */ + iowrite32(0x0000ffa0, chip->pch_phub_base_address + 0x14); + chip->pch_opt_rom_start_address =\ + PCH_PHUB_ROM_START_ADDR_ML7223; + chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_ML7223; + } else if (id->driver_data == 5) { /* ML7831 */ + ret = sysfs_create_file(&pdev->dev.kobj, + &dev_attr_pch_mac.attr); + if (ret) + goto err_sysfs_create; + + ret = sysfs_create_bin_file(&pdev->dev.kobj, &pch_bin_attr); + if (ret) + goto exit_bin_attr; + + /* set the prefech value */ + iowrite32(0x000affaa, chip->pch_phub_base_address + 0x14); + /* set the interrupt delay value */ + iowrite32(0x25, chip->pch_phub_base_address + 0x44); + chip->pch_opt_rom_start_address = PCH_PHUB_ROM_START_ADDR_EG20T; + chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_EG20T; + } + + chip->ioh_type = id->driver_data; + pci_set_drvdata(pdev, chip); + + return 0; +exit_bin_attr: + sysfs_remove_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr); + +err_sysfs_create: + pci_iounmap(pdev, chip->pch_phub_base_address); +err_pci_iomap: + pci_release_regions(pdev); +err_req_regions: + pci_disable_device(pdev); +err_pci_enable_dev: + kfree(chip); + dev_err(&pdev->dev, "%s returns %d\n", __func__, ret); + return ret; +} + +static void pch_phub_remove(struct pci_dev *pdev) +{ + struct pch_phub_reg *chip = pci_get_drvdata(pdev); + + sysfs_remove_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr); + sysfs_remove_bin_file(&pdev->dev.kobj, &pch_bin_attr); + pci_iounmap(pdev, chip->pch_phub_base_address); + pci_release_regions(pdev); + pci_disable_device(pdev); + kfree(chip); +} + +static int __maybe_unused pch_phub_suspend(struct device *dev_d) +{ + device_wakeup_disable(dev_d); + + return 0; +} + +static int __maybe_unused pch_phub_resume(struct device *dev_d) +{ + device_wakeup_disable(dev_d); + + return 0; +} + +static const struct pci_device_id pch_phub_pcidev_id[] = { + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH1_PHUB), 1, }, + { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7213_PHUB), 2, }, + { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7223_mPHUB), 3, }, + { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7223_nPHUB), 4, }, + { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7831_PHUB), 5, }, + { } +}; +MODULE_DEVICE_TABLE(pci, pch_phub_pcidev_id); + +static SIMPLE_DEV_PM_OPS(pch_phub_pm_ops, pch_phub_suspend, pch_phub_resume); + +static struct pci_driver pch_phub_driver = { + .name = "pch_phub", + .id_table = pch_phub_pcidev_id, + .probe = pch_phub_probe, + .remove = pch_phub_remove, + .driver.pm = &pch_phub_pm_ops, +}; + +module_pci_driver(pch_phub_driver); + +MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semiconductor IOH(ML7213/ML7223) PHUB"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c new file mode 100644 index 000000000..cdb9a55a2 --- /dev/null +++ b/drivers/misc/pci_endpoint_test.c @@ -0,0 +1,1029 @@ +// SPDX-License-Identifier: GPL-2.0-only +/** + * Host side test driver to test endpoint functionality + * + * Copyright (C) 2017 Texas Instruments + * Author: Kishon Vijay Abraham I <kishon@ti.com> + */ + +#include <linux/crc32.h> +#include <linux/delay.h> +#include <linux/fs.h> +#include <linux/io.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/miscdevice.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/random.h> +#include <linux/slab.h> +#include <linux/uaccess.h> +#include <linux/pci.h> +#include <linux/pci_ids.h> + +#include <linux/pci_regs.h> + +#include <uapi/linux/pcitest.h> + +#define DRV_MODULE_NAME "pci-endpoint-test" + +#define IRQ_TYPE_UNDEFINED -1 +#define IRQ_TYPE_LEGACY 0 +#define IRQ_TYPE_MSI 1 +#define IRQ_TYPE_MSIX 2 + +#define PCI_ENDPOINT_TEST_MAGIC 0x0 + +#define PCI_ENDPOINT_TEST_COMMAND 0x4 +#define COMMAND_RAISE_LEGACY_IRQ BIT(0) +#define COMMAND_RAISE_MSI_IRQ BIT(1) +#define COMMAND_RAISE_MSIX_IRQ BIT(2) +#define COMMAND_READ BIT(3) +#define COMMAND_WRITE BIT(4) +#define COMMAND_COPY BIT(5) + +#define PCI_ENDPOINT_TEST_STATUS 0x8 +#define STATUS_READ_SUCCESS BIT(0) +#define STATUS_READ_FAIL BIT(1) +#define STATUS_WRITE_SUCCESS BIT(2) +#define STATUS_WRITE_FAIL BIT(3) +#define STATUS_COPY_SUCCESS BIT(4) +#define STATUS_COPY_FAIL BIT(5) +#define STATUS_IRQ_RAISED BIT(6) +#define STATUS_SRC_ADDR_INVALID BIT(7) +#define STATUS_DST_ADDR_INVALID BIT(8) + +#define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0x0c +#define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR 0x10 + +#define PCI_ENDPOINT_TEST_LOWER_DST_ADDR 0x14 +#define PCI_ENDPOINT_TEST_UPPER_DST_ADDR 0x18 + +#define PCI_ENDPOINT_TEST_SIZE 0x1c +#define PCI_ENDPOINT_TEST_CHECKSUM 0x20 + +#define PCI_ENDPOINT_TEST_IRQ_TYPE 0x24 +#define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28 + +#define PCI_ENDPOINT_TEST_FLAGS 0x2c +#define FLAG_USE_DMA BIT(0) + +#define PCI_DEVICE_ID_TI_J721E 0xb00d +#define PCI_DEVICE_ID_TI_AM654 0xb00c +#define PCI_DEVICE_ID_TI_J7200 0xb00f +#define PCI_DEVICE_ID_TI_AM64 0xb010 +#define PCI_DEVICE_ID_TI_J721S2 0xb013 +#define PCI_DEVICE_ID_LS1088A 0x80c0 + +#define is_am654_pci_dev(pdev) \ + ((pdev)->device == PCI_DEVICE_ID_TI_AM654) + +#define PCI_DEVICE_ID_RENESAS_R8A774A1 0x0028 +#define PCI_DEVICE_ID_RENESAS_R8A774B1 0x002b +#define PCI_DEVICE_ID_RENESAS_R8A774C0 0x002d +#define PCI_DEVICE_ID_RENESAS_R8A774E1 0x0025 +#define PCI_DEVICE_ID_RENESAS_R8A779F0 0x0031 + +static DEFINE_IDA(pci_endpoint_test_ida); + +#define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \ + miscdev) + +static bool no_msi; +module_param(no_msi, bool, 0444); +MODULE_PARM_DESC(no_msi, "Disable MSI interrupt in pci_endpoint_test"); + +static int irq_type = IRQ_TYPE_MSI; +module_param(irq_type, int, 0444); +MODULE_PARM_DESC(irq_type, "IRQ mode selection in pci_endpoint_test (0 - Legacy, 1 - MSI, 2 - MSI-X)"); + +enum pci_barno { + BAR_0, + BAR_1, + BAR_2, + BAR_3, + BAR_4, + BAR_5, +}; + +struct pci_endpoint_test { + struct pci_dev *pdev; + void __iomem *base; + void __iomem *bar[PCI_STD_NUM_BARS]; + struct completion irq_raised; + int last_irq; + int num_irqs; + int irq_type; + /* mutex to protect the ioctls */ + struct mutex mutex; + struct miscdevice miscdev; + enum pci_barno test_reg_bar; + size_t alignment; + const char *name; +}; + +struct pci_endpoint_test_data { + enum pci_barno test_reg_bar; + size_t alignment; + int irq_type; +}; + +static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test, + u32 offset) +{ + return readl(test->base + offset); +} + +static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test, + u32 offset, u32 value) +{ + writel(value, test->base + offset); +} + +static inline u32 pci_endpoint_test_bar_readl(struct pci_endpoint_test *test, + int bar, int offset) +{ + return readl(test->bar[bar] + offset); +} + +static inline void pci_endpoint_test_bar_writel(struct pci_endpoint_test *test, + int bar, u32 offset, u32 value) +{ + writel(value, test->bar[bar] + offset); +} + +static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id) +{ + struct pci_endpoint_test *test = dev_id; + u32 reg; + + reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS); + if (reg & STATUS_IRQ_RAISED) { + test->last_irq = irq; + complete(&test->irq_raised); + reg &= ~STATUS_IRQ_RAISED; + } + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_STATUS, + reg); + + return IRQ_HANDLED; +} + +static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test) +{ + struct pci_dev *pdev = test->pdev; + + pci_free_irq_vectors(pdev); + test->irq_type = IRQ_TYPE_UNDEFINED; +} + +static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test, + int type) +{ + int irq = -1; + struct pci_dev *pdev = test->pdev; + struct device *dev = &pdev->dev; + bool res = true; + + switch (type) { + case IRQ_TYPE_LEGACY: + irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY); + if (irq < 0) + dev_err(dev, "Failed to get Legacy interrupt\n"); + break; + case IRQ_TYPE_MSI: + irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI); + if (irq < 0) + dev_err(dev, "Failed to get MSI interrupts\n"); + break; + case IRQ_TYPE_MSIX: + irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX); + if (irq < 0) + dev_err(dev, "Failed to get MSI-X interrupts\n"); + break; + default: + dev_err(dev, "Invalid IRQ type selected\n"); + } + + if (irq < 0) { + irq = 0; + res = false; + } + + test->irq_type = type; + test->num_irqs = irq; + + return res; +} + +static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test) +{ + int i; + struct pci_dev *pdev = test->pdev; + struct device *dev = &pdev->dev; + + for (i = 0; i < test->num_irqs; i++) + devm_free_irq(dev, pci_irq_vector(pdev, i), test); + + test->num_irqs = 0; +} + +static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test) +{ + int i; + int err; + struct pci_dev *pdev = test->pdev; + struct device *dev = &pdev->dev; + + for (i = 0; i < test->num_irqs; i++) { + err = devm_request_irq(dev, pci_irq_vector(pdev, i), + pci_endpoint_test_irqhandler, + IRQF_SHARED, test->name, test); + if (err) + goto fail; + } + + return true; + +fail: + switch (irq_type) { + case IRQ_TYPE_LEGACY: + dev_err(dev, "Failed to request IRQ %d for Legacy\n", + pci_irq_vector(pdev, i)); + break; + case IRQ_TYPE_MSI: + dev_err(dev, "Failed to request IRQ %d for MSI %d\n", + pci_irq_vector(pdev, i), + i + 1); + break; + case IRQ_TYPE_MSIX: + dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n", + pci_irq_vector(pdev, i), + i + 1); + break; + } + + return false; +} + +static bool pci_endpoint_test_bar(struct pci_endpoint_test *test, + enum pci_barno barno) +{ + int j; + u32 val; + int size; + struct pci_dev *pdev = test->pdev; + + if (!test->bar[barno]) + return false; + + size = pci_resource_len(pdev, barno); + + if (barno == test->test_reg_bar) + size = 0x4; + + for (j = 0; j < size; j += 4) + pci_endpoint_test_bar_writel(test, barno, j, 0xA0A0A0A0); + + for (j = 0; j < size; j += 4) { + val = pci_endpoint_test_bar_readl(test, barno, j); + if (val != 0xA0A0A0A0) + return false; + } + + return true; +} + +static bool pci_endpoint_test_legacy_irq(struct pci_endpoint_test *test) +{ + u32 val; + + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, + IRQ_TYPE_LEGACY); + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0); + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND, + COMMAND_RAISE_LEGACY_IRQ); + val = wait_for_completion_timeout(&test->irq_raised, + msecs_to_jiffies(1000)); + if (!val) + return false; + + return true; +} + +static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test, + u16 msi_num, bool msix) +{ + u32 val; + struct pci_dev *pdev = test->pdev; + + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, + msix == false ? IRQ_TYPE_MSI : + IRQ_TYPE_MSIX); + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num); + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND, + msix == false ? COMMAND_RAISE_MSI_IRQ : + COMMAND_RAISE_MSIX_IRQ); + val = wait_for_completion_timeout(&test->irq_raised, + msecs_to_jiffies(1000)); + if (!val) + return false; + + if (pci_irq_vector(pdev, msi_num - 1) == test->last_irq) + return true; + + return false; +} + +static int pci_endpoint_test_validate_xfer_params(struct device *dev, + struct pci_endpoint_test_xfer_param *param, size_t alignment) +{ + if (!param->size) { + dev_dbg(dev, "Data size is zero\n"); + return -EINVAL; + } + + if (param->size > SIZE_MAX - alignment) { + dev_dbg(dev, "Maximum transfer data size exceeded\n"); + return -EINVAL; + } + + return 0; +} + +static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, + unsigned long arg) +{ + struct pci_endpoint_test_xfer_param param; + bool ret = false; + void *src_addr; + void *dst_addr; + u32 flags = 0; + bool use_dma; + size_t size; + dma_addr_t src_phys_addr; + dma_addr_t dst_phys_addr; + struct pci_dev *pdev = test->pdev; + struct device *dev = &pdev->dev; + void *orig_src_addr; + dma_addr_t orig_src_phys_addr; + void *orig_dst_addr; + dma_addr_t orig_dst_phys_addr; + size_t offset; + size_t alignment = test->alignment; + int irq_type = test->irq_type; + u32 src_crc32; + u32 dst_crc32; + int err; + + err = copy_from_user(¶m, (void __user *)arg, sizeof(param)); + if (err) { + dev_err(dev, "Failed to get transfer param\n"); + return false; + } + + err = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment); + if (err) + return false; + + size = param.size; + + use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA); + if (use_dma) + flags |= FLAG_USE_DMA; + + if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) { + dev_err(dev, "Invalid IRQ type option\n"); + goto err; + } + + orig_src_addr = kzalloc(size + alignment, GFP_KERNEL); + if (!orig_src_addr) { + dev_err(dev, "Failed to allocate source buffer\n"); + ret = false; + goto err; + } + + get_random_bytes(orig_src_addr, size + alignment); + orig_src_phys_addr = dma_map_single(dev, orig_src_addr, + size + alignment, DMA_TO_DEVICE); + if (dma_mapping_error(dev, orig_src_phys_addr)) { + dev_err(dev, "failed to map source buffer address\n"); + ret = false; + goto err_src_phys_addr; + } + + if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) { + src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment); + offset = src_phys_addr - orig_src_phys_addr; + src_addr = orig_src_addr + offset; + } else { + src_phys_addr = orig_src_phys_addr; + src_addr = orig_src_addr; + } + + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR, + lower_32_bits(src_phys_addr)); + + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR, + upper_32_bits(src_phys_addr)); + + src_crc32 = crc32_le(~0, src_addr, size); + + orig_dst_addr = kzalloc(size + alignment, GFP_KERNEL); + if (!orig_dst_addr) { + dev_err(dev, "Failed to allocate destination address\n"); + ret = false; + goto err_dst_addr; + } + + orig_dst_phys_addr = dma_map_single(dev, orig_dst_addr, + size + alignment, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, orig_dst_phys_addr)) { + dev_err(dev, "failed to map destination buffer address\n"); + ret = false; + goto err_dst_phys_addr; + } + + if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) { + dst_phys_addr = PTR_ALIGN(orig_dst_phys_addr, alignment); + offset = dst_phys_addr - orig_dst_phys_addr; + dst_addr = orig_dst_addr + offset; + } else { + dst_phys_addr = orig_dst_phys_addr; + dst_addr = orig_dst_addr; + } + + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR, + lower_32_bits(dst_phys_addr)); + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR, + upper_32_bits(dst_phys_addr)); + + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, + size); + + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags); + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type); + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1); + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND, + COMMAND_COPY); + + wait_for_completion(&test->irq_raised); + + dma_unmap_single(dev, orig_dst_phys_addr, size + alignment, + DMA_FROM_DEVICE); + + dst_crc32 = crc32_le(~0, dst_addr, size); + if (dst_crc32 == src_crc32) + ret = true; + +err_dst_phys_addr: + kfree(orig_dst_addr); + +err_dst_addr: + dma_unmap_single(dev, orig_src_phys_addr, size + alignment, + DMA_TO_DEVICE); + +err_src_phys_addr: + kfree(orig_src_addr); + +err: + return ret; +} + +static bool pci_endpoint_test_write(struct pci_endpoint_test *test, + unsigned long arg) +{ + struct pci_endpoint_test_xfer_param param; + bool ret = false; + u32 flags = 0; + bool use_dma; + u32 reg; + void *addr; + dma_addr_t phys_addr; + struct pci_dev *pdev = test->pdev; + struct device *dev = &pdev->dev; + void *orig_addr; + dma_addr_t orig_phys_addr; + size_t offset; + size_t alignment = test->alignment; + int irq_type = test->irq_type; + size_t size; + u32 crc32; + int err; + + err = copy_from_user(¶m, (void __user *)arg, sizeof(param)); + if (err != 0) { + dev_err(dev, "Failed to get transfer param\n"); + return false; + } + + err = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment); + if (err) + return false; + + size = param.size; + + use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA); + if (use_dma) + flags |= FLAG_USE_DMA; + + if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) { + dev_err(dev, "Invalid IRQ type option\n"); + goto err; + } + + orig_addr = kzalloc(size + alignment, GFP_KERNEL); + if (!orig_addr) { + dev_err(dev, "Failed to allocate address\n"); + ret = false; + goto err; + } + + get_random_bytes(orig_addr, size + alignment); + + orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, orig_phys_addr)) { + dev_err(dev, "failed to map source buffer address\n"); + ret = false; + goto err_phys_addr; + } + + if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) { + phys_addr = PTR_ALIGN(orig_phys_addr, alignment); + offset = phys_addr - orig_phys_addr; + addr = orig_addr + offset; + } else { + phys_addr = orig_phys_addr; + addr = orig_addr; + } + + crc32 = crc32_le(~0, addr, size); + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM, + crc32); + + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR, + lower_32_bits(phys_addr)); + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR, + upper_32_bits(phys_addr)); + + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size); + + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags); + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type); + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1); + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND, + COMMAND_READ); + + wait_for_completion(&test->irq_raised); + + reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS); + if (reg & STATUS_READ_SUCCESS) + ret = true; + + dma_unmap_single(dev, orig_phys_addr, size + alignment, + DMA_TO_DEVICE); + +err_phys_addr: + kfree(orig_addr); + +err: + return ret; +} + +static bool pci_endpoint_test_read(struct pci_endpoint_test *test, + unsigned long arg) +{ + struct pci_endpoint_test_xfer_param param; + bool ret = false; + u32 flags = 0; + bool use_dma; + size_t size; + void *addr; + dma_addr_t phys_addr; + struct pci_dev *pdev = test->pdev; + struct device *dev = &pdev->dev; + void *orig_addr; + dma_addr_t orig_phys_addr; + size_t offset; + size_t alignment = test->alignment; + int irq_type = test->irq_type; + u32 crc32; + int err; + + err = copy_from_user(¶m, (void __user *)arg, sizeof(param)); + if (err) { + dev_err(dev, "Failed to get transfer param\n"); + return false; + } + + err = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment); + if (err) + return false; + + size = param.size; + + use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA); + if (use_dma) + flags |= FLAG_USE_DMA; + + if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) { + dev_err(dev, "Invalid IRQ type option\n"); + goto err; + } + + orig_addr = kzalloc(size + alignment, GFP_KERNEL); + if (!orig_addr) { + dev_err(dev, "Failed to allocate destination address\n"); + ret = false; + goto err; + } + + orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment, + DMA_FROM_DEVICE); + if (dma_mapping_error(dev, orig_phys_addr)) { + dev_err(dev, "failed to map source buffer address\n"); + ret = false; + goto err_phys_addr; + } + + if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) { + phys_addr = PTR_ALIGN(orig_phys_addr, alignment); + offset = phys_addr - orig_phys_addr; + addr = orig_addr + offset; + } else { + phys_addr = orig_phys_addr; + addr = orig_addr; + } + + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR, + lower_32_bits(phys_addr)); + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR, + upper_32_bits(phys_addr)); + + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size); + + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags); + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type); + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1); + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND, + COMMAND_WRITE); + + wait_for_completion(&test->irq_raised); + + dma_unmap_single(dev, orig_phys_addr, size + alignment, + DMA_FROM_DEVICE); + + crc32 = crc32_le(~0, addr, size); + if (crc32 == pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM)) + ret = true; + +err_phys_addr: + kfree(orig_addr); +err: + return ret; +} + +static bool pci_endpoint_test_clear_irq(struct pci_endpoint_test *test) +{ + pci_endpoint_test_release_irq(test); + pci_endpoint_test_free_irq_vectors(test); + return true; +} + +static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test, + int req_irq_type) +{ + struct pci_dev *pdev = test->pdev; + struct device *dev = &pdev->dev; + + if (req_irq_type < IRQ_TYPE_LEGACY || req_irq_type > IRQ_TYPE_MSIX) { + dev_err(dev, "Invalid IRQ type option\n"); + return false; + } + + if (test->irq_type == req_irq_type) + return true; + + pci_endpoint_test_release_irq(test); + pci_endpoint_test_free_irq_vectors(test); + + if (!pci_endpoint_test_alloc_irq_vectors(test, req_irq_type)) + goto err; + + if (!pci_endpoint_test_request_irq(test)) + goto err; + + return true; + +err: + pci_endpoint_test_free_irq_vectors(test); + return false; +} + +static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + int ret = -EINVAL; + enum pci_barno bar; + struct pci_endpoint_test *test = to_endpoint_test(file->private_data); + struct pci_dev *pdev = test->pdev; + + mutex_lock(&test->mutex); + + reinit_completion(&test->irq_raised); + test->last_irq = -ENODATA; + + switch (cmd) { + case PCITEST_BAR: + bar = arg; + if (bar < 0 || bar > 5) + goto ret; + if (is_am654_pci_dev(pdev) && bar == BAR_0) + goto ret; + ret = pci_endpoint_test_bar(test, bar); + break; + case PCITEST_LEGACY_IRQ: + ret = pci_endpoint_test_legacy_irq(test); + break; + case PCITEST_MSI: + case PCITEST_MSIX: + ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX); + break; + case PCITEST_WRITE: + ret = pci_endpoint_test_write(test, arg); + break; + case PCITEST_READ: + ret = pci_endpoint_test_read(test, arg); + break; + case PCITEST_COPY: + ret = pci_endpoint_test_copy(test, arg); + break; + case PCITEST_SET_IRQTYPE: + ret = pci_endpoint_test_set_irq(test, arg); + break; + case PCITEST_GET_IRQTYPE: + ret = irq_type; + break; + case PCITEST_CLEAR_IRQ: + ret = pci_endpoint_test_clear_irq(test); + break; + } + +ret: + mutex_unlock(&test->mutex); + return ret; +} + +static const struct file_operations pci_endpoint_test_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = pci_endpoint_test_ioctl, +}; + +static int pci_endpoint_test_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int err; + int id; + char name[24]; + enum pci_barno bar; + void __iomem *base; + struct device *dev = &pdev->dev; + struct pci_endpoint_test *test; + struct pci_endpoint_test_data *data; + enum pci_barno test_reg_bar = BAR_0; + struct miscdevice *misc_device; + + if (pci_is_bridge(pdev)) + return -ENODEV; + + test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL); + if (!test) + return -ENOMEM; + + test->test_reg_bar = 0; + test->alignment = 0; + test->pdev = pdev; + test->irq_type = IRQ_TYPE_UNDEFINED; + + if (no_msi) + irq_type = IRQ_TYPE_LEGACY; + + data = (struct pci_endpoint_test_data *)ent->driver_data; + if (data) { + test_reg_bar = data->test_reg_bar; + test->test_reg_bar = test_reg_bar; + test->alignment = data->alignment; + irq_type = data->irq_type; + } + + init_completion(&test->irq_raised); + mutex_init(&test->mutex); + + if ((dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)) != 0) && + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) { + dev_err(dev, "Cannot set DMA mask\n"); + return -EINVAL; + } + + err = pci_enable_device(pdev); + if (err) { + dev_err(dev, "Cannot enable PCI device\n"); + return err; + } + + err = pci_request_regions(pdev, DRV_MODULE_NAME); + if (err) { + dev_err(dev, "Cannot obtain PCI resources\n"); + goto err_disable_pdev; + } + + pci_set_master(pdev); + + if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type)) { + err = -EINVAL; + goto err_disable_irq; + } + + for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { + if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { + base = pci_ioremap_bar(pdev, bar); + if (!base) { + dev_err(dev, "Failed to read BAR%d\n", bar); + WARN_ON(bar == test_reg_bar); + } + test->bar[bar] = base; + } + } + + test->base = test->bar[test_reg_bar]; + if (!test->base) { + err = -ENOMEM; + dev_err(dev, "Cannot perform PCI test without BAR%d\n", + test_reg_bar); + goto err_iounmap; + } + + pci_set_drvdata(pdev, test); + + id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL); + if (id < 0) { + err = id; + dev_err(dev, "Unable to get id\n"); + goto err_iounmap; + } + + snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id); + test->name = kstrdup(name, GFP_KERNEL); + if (!test->name) { + err = -ENOMEM; + goto err_ida_remove; + } + + if (!pci_endpoint_test_request_irq(test)) { + err = -EINVAL; + goto err_kfree_test_name; + } + + misc_device = &test->miscdev; + misc_device->minor = MISC_DYNAMIC_MINOR; + misc_device->name = kstrdup(name, GFP_KERNEL); + if (!misc_device->name) { + err = -ENOMEM; + goto err_release_irq; + } + misc_device->fops = &pci_endpoint_test_fops, + + err = misc_register(misc_device); + if (err) { + dev_err(dev, "Failed to register device\n"); + goto err_kfree_name; + } + + return 0; + +err_kfree_name: + kfree(misc_device->name); + +err_release_irq: + pci_endpoint_test_release_irq(test); + +err_kfree_test_name: + kfree(test->name); + +err_ida_remove: + ida_simple_remove(&pci_endpoint_test_ida, id); + +err_iounmap: + for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { + if (test->bar[bar]) + pci_iounmap(pdev, test->bar[bar]); + } + +err_disable_irq: + pci_endpoint_test_free_irq_vectors(test); + pci_release_regions(pdev); + +err_disable_pdev: + pci_disable_device(pdev); + + return err; +} + +static void pci_endpoint_test_remove(struct pci_dev *pdev) +{ + int id; + enum pci_barno bar; + struct pci_endpoint_test *test = pci_get_drvdata(pdev); + struct miscdevice *misc_device = &test->miscdev; + + if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1) + return; + if (id < 0) + return; + + pci_endpoint_test_release_irq(test); + pci_endpoint_test_free_irq_vectors(test); + + misc_deregister(&test->miscdev); + kfree(misc_device->name); + kfree(test->name); + ida_simple_remove(&pci_endpoint_test_ida, id); + for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { + if (test->bar[bar]) + pci_iounmap(pdev, test->bar[bar]); + } + + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +static const struct pci_endpoint_test_data default_data = { + .test_reg_bar = BAR_0, + .alignment = SZ_4K, + .irq_type = IRQ_TYPE_MSI, +}; + +static const struct pci_endpoint_test_data am654_data = { + .test_reg_bar = BAR_2, + .alignment = SZ_64K, + .irq_type = IRQ_TYPE_MSI, +}; + +static const struct pci_endpoint_test_data j721e_data = { + .alignment = 256, + .irq_type = IRQ_TYPE_MSI, +}; + +static const struct pci_device_id pci_endpoint_test_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x), + .driver_data = (kernel_ulong_t)&default_data, + }, + { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x), + .driver_data = (kernel_ulong_t)&default_data, + }, + { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0), + .driver_data = (kernel_ulong_t)&default_data, + }, + { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_LS1088A), + .driver_data = (kernel_ulong_t)&default_data, + }, + { PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) }, + { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654), + .driver_data = (kernel_ulong_t)&am654_data + }, + { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774A1),}, + { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774B1),}, + { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0),}, + { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774E1),}, + { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A779F0), + .driver_data = (kernel_ulong_t)&default_data, + }, + { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E), + .driver_data = (kernel_ulong_t)&j721e_data, + }, + { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J7200), + .driver_data = (kernel_ulong_t)&j721e_data, + }, + { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM64), + .driver_data = (kernel_ulong_t)&j721e_data, + }, + { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721S2), + .driver_data = (kernel_ulong_t)&j721e_data, + }, + { } +}; +MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl); + +static struct pci_driver pci_endpoint_test_driver = { + .name = DRV_MODULE_NAME, + .id_table = pci_endpoint_test_tbl, + .probe = pci_endpoint_test_probe, + .remove = pci_endpoint_test_remove, +}; +module_pci_driver(pci_endpoint_test_driver); + +MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER"); +MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/phantom.c b/drivers/misc/phantom.c new file mode 100644 index 000000000..ce72e46a2 --- /dev/null +++ b/drivers/misc/phantom.c @@ -0,0 +1,563 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2005-2007 Jiri Slaby <jirislaby@gmail.com> + * + * You need a userspace library to cooperate with this driver. It (and other + * info) may be obtained here: + * http://www.fi.muni.cz/~xslaby/phantom.html + * or alternatively, you might use OpenHaptics provided by Sensable. + */ + +#include <linux/compat.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/pci.h> +#include <linux/fs.h> +#include <linux/poll.h> +#include <linux/interrupt.h> +#include <linux/cdev.h> +#include <linux/slab.h> +#include <linux/phantom.h> +#include <linux/sched.h> +#include <linux/mutex.h> + +#include <linux/atomic.h> +#include <asm/io.h> + +#define PHANTOM_VERSION "n0.9.8" + +#define PHANTOM_MAX_MINORS 8 + +#define PHN_IRQCTL 0x4c /* irq control in caddr space */ + +#define PHB_RUNNING 1 +#define PHB_NOT_OH 2 + +static DEFINE_MUTEX(phantom_mutex); +static struct class *phantom_class; +static int phantom_major; + +struct phantom_device { + unsigned int opened; + void __iomem *caddr; + u32 __iomem *iaddr; + u32 __iomem *oaddr; + unsigned long status; + atomic_t counter; + + wait_queue_head_t wait; + struct cdev cdev; + + struct mutex open_lock; + spinlock_t regs_lock; + + /* used in NOT_OH mode */ + struct phm_regs oregs; + u32 ctl_reg; +}; + +static unsigned char phantom_devices[PHANTOM_MAX_MINORS]; + +static int phantom_status(struct phantom_device *dev, unsigned long newstat) +{ + pr_debug("phantom_status %lx %lx\n", dev->status, newstat); + + if (!(dev->status & PHB_RUNNING) && (newstat & PHB_RUNNING)) { + atomic_set(&dev->counter, 0); + iowrite32(PHN_CTL_IRQ, dev->iaddr + PHN_CONTROL); + iowrite32(0x43, dev->caddr + PHN_IRQCTL); + ioread32(dev->caddr + PHN_IRQCTL); /* PCI posting */ + } else if ((dev->status & PHB_RUNNING) && !(newstat & PHB_RUNNING)) { + iowrite32(0, dev->caddr + PHN_IRQCTL); + ioread32(dev->caddr + PHN_IRQCTL); /* PCI posting */ + } + + dev->status = newstat; + + return 0; +} + +/* + * File ops + */ + +static long phantom_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct phantom_device *dev = file->private_data; + struct phm_regs rs; + struct phm_reg r; + void __user *argp = (void __user *)arg; + unsigned long flags; + unsigned int i; + + switch (cmd) { + case PHN_SETREG: + case PHN_SET_REG: + if (copy_from_user(&r, argp, sizeof(r))) + return -EFAULT; + + if (r.reg > 7) + return -EINVAL; + + spin_lock_irqsave(&dev->regs_lock, flags); + if (r.reg == PHN_CONTROL && (r.value & PHN_CTL_IRQ) && + phantom_status(dev, dev->status | PHB_RUNNING)){ + spin_unlock_irqrestore(&dev->regs_lock, flags); + return -ENODEV; + } + + pr_debug("phantom: writing %x to %u\n", r.value, r.reg); + + /* preserve amp bit (don't allow to change it when in NOT_OH) */ + if (r.reg == PHN_CONTROL && (dev->status & PHB_NOT_OH)) { + r.value &= ~PHN_CTL_AMP; + r.value |= dev->ctl_reg & PHN_CTL_AMP; + dev->ctl_reg = r.value; + } + + iowrite32(r.value, dev->iaddr + r.reg); + ioread32(dev->iaddr); /* PCI posting */ + + if (r.reg == PHN_CONTROL && !(r.value & PHN_CTL_IRQ)) + phantom_status(dev, dev->status & ~PHB_RUNNING); + spin_unlock_irqrestore(&dev->regs_lock, flags); + break; + case PHN_SETREGS: + case PHN_SET_REGS: + if (copy_from_user(&rs, argp, sizeof(rs))) + return -EFAULT; + + pr_debug("phantom: SRS %u regs %x\n", rs.count, rs.mask); + spin_lock_irqsave(&dev->regs_lock, flags); + if (dev->status & PHB_NOT_OH) + memcpy(&dev->oregs, &rs, sizeof(rs)); + else { + u32 m = min(rs.count, 8U); + for (i = 0; i < m; i++) + if (rs.mask & BIT(i)) + iowrite32(rs.values[i], dev->oaddr + i); + ioread32(dev->iaddr); /* PCI posting */ + } + spin_unlock_irqrestore(&dev->regs_lock, flags); + break; + case PHN_GETREG: + case PHN_GET_REG: + if (copy_from_user(&r, argp, sizeof(r))) + return -EFAULT; + + if (r.reg > 7) + return -EINVAL; + + r.value = ioread32(dev->iaddr + r.reg); + + if (copy_to_user(argp, &r, sizeof(r))) + return -EFAULT; + break; + case PHN_GETREGS: + case PHN_GET_REGS: { + u32 m; + + if (copy_from_user(&rs, argp, sizeof(rs))) + return -EFAULT; + + m = min(rs.count, 8U); + + pr_debug("phantom: GRS %u regs %x\n", rs.count, rs.mask); + spin_lock_irqsave(&dev->regs_lock, flags); + for (i = 0; i < m; i++) + if (rs.mask & BIT(i)) + rs.values[i] = ioread32(dev->iaddr + i); + atomic_set(&dev->counter, 0); + spin_unlock_irqrestore(&dev->regs_lock, flags); + + if (copy_to_user(argp, &rs, sizeof(rs))) + return -EFAULT; + break; + } case PHN_NOT_OH: + spin_lock_irqsave(&dev->regs_lock, flags); + if (dev->status & PHB_RUNNING) { + printk(KERN_ERR "phantom: you need to set NOT_OH " + "before you start the device!\n"); + spin_unlock_irqrestore(&dev->regs_lock, flags); + return -EINVAL; + } + dev->status |= PHB_NOT_OH; + spin_unlock_irqrestore(&dev->regs_lock, flags); + break; + default: + return -ENOTTY; + } + + return 0; +} + +#ifdef CONFIG_COMPAT +static long phantom_compat_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + if (_IOC_NR(cmd) <= 3 && _IOC_SIZE(cmd) == sizeof(compat_uptr_t)) { + cmd &= ~(_IOC_SIZEMASK << _IOC_SIZESHIFT); + cmd |= sizeof(void *) << _IOC_SIZESHIFT; + } + return phantom_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); +} +#else +#define phantom_compat_ioctl NULL +#endif + +static int phantom_open(struct inode *inode, struct file *file) +{ + struct phantom_device *dev = container_of(inode->i_cdev, + struct phantom_device, cdev); + + mutex_lock(&phantom_mutex); + nonseekable_open(inode, file); + + if (mutex_lock_interruptible(&dev->open_lock)) { + mutex_unlock(&phantom_mutex); + return -ERESTARTSYS; + } + + if (dev->opened) { + mutex_unlock(&dev->open_lock); + mutex_unlock(&phantom_mutex); + return -EINVAL; + } + + WARN_ON(dev->status & PHB_NOT_OH); + + file->private_data = dev; + + atomic_set(&dev->counter, 0); + dev->opened++; + mutex_unlock(&dev->open_lock); + mutex_unlock(&phantom_mutex); + return 0; +} + +static int phantom_release(struct inode *inode, struct file *file) +{ + struct phantom_device *dev = file->private_data; + + mutex_lock(&dev->open_lock); + + dev->opened = 0; + phantom_status(dev, dev->status & ~PHB_RUNNING); + dev->status &= ~PHB_NOT_OH; + + mutex_unlock(&dev->open_lock); + + return 0; +} + +static __poll_t phantom_poll(struct file *file, poll_table *wait) +{ + struct phantom_device *dev = file->private_data; + __poll_t mask = 0; + + pr_debug("phantom_poll: %d\n", atomic_read(&dev->counter)); + poll_wait(file, &dev->wait, wait); + + if (!(dev->status & PHB_RUNNING)) + mask = EPOLLERR; + else if (atomic_read(&dev->counter)) + mask = EPOLLIN | EPOLLRDNORM; + + pr_debug("phantom_poll end: %x/%d\n", mask, atomic_read(&dev->counter)); + + return mask; +} + +static const struct file_operations phantom_file_ops = { + .open = phantom_open, + .release = phantom_release, + .unlocked_ioctl = phantom_ioctl, + .compat_ioctl = phantom_compat_ioctl, + .poll = phantom_poll, + .llseek = no_llseek, +}; + +static irqreturn_t phantom_isr(int irq, void *data) +{ + struct phantom_device *dev = data; + unsigned int i; + u32 ctl; + + spin_lock(&dev->regs_lock); + ctl = ioread32(dev->iaddr + PHN_CONTROL); + if (!(ctl & PHN_CTL_IRQ)) { + spin_unlock(&dev->regs_lock); + return IRQ_NONE; + } + + iowrite32(0, dev->iaddr); + iowrite32(0xc0, dev->iaddr); + + if (dev->status & PHB_NOT_OH) { + struct phm_regs *r = &dev->oregs; + u32 m = min(r->count, 8U); + + for (i = 0; i < m; i++) + if (r->mask & BIT(i)) + iowrite32(r->values[i], dev->oaddr + i); + + dev->ctl_reg ^= PHN_CTL_AMP; + iowrite32(dev->ctl_reg, dev->iaddr + PHN_CONTROL); + } + spin_unlock(&dev->regs_lock); + + ioread32(dev->iaddr); /* PCI posting */ + + atomic_inc(&dev->counter); + wake_up_interruptible(&dev->wait); + + return IRQ_HANDLED; +} + +/* + * Init and deinit driver + */ + +static unsigned int phantom_get_free(void) +{ + unsigned int i; + + for (i = 0; i < PHANTOM_MAX_MINORS; i++) + if (phantom_devices[i] == 0) + break; + + return i; +} + +static int phantom_probe(struct pci_dev *pdev, + const struct pci_device_id *pci_id) +{ + struct phantom_device *pht; + unsigned int minor; + int retval; + + retval = pci_enable_device(pdev); + if (retval) { + dev_err(&pdev->dev, "pci_enable_device failed!\n"); + goto err; + } + + minor = phantom_get_free(); + if (minor == PHANTOM_MAX_MINORS) { + dev_err(&pdev->dev, "too many devices found!\n"); + retval = -EIO; + goto err_dis; + } + + phantom_devices[minor] = 1; + + retval = pci_request_regions(pdev, "phantom"); + if (retval) { + dev_err(&pdev->dev, "pci_request_regions failed!\n"); + goto err_null; + } + + retval = -ENOMEM; + pht = kzalloc(sizeof(*pht), GFP_KERNEL); + if (pht == NULL) { + dev_err(&pdev->dev, "unable to allocate device\n"); + goto err_reg; + } + + pht->caddr = pci_iomap(pdev, 0, 0); + if (pht->caddr == NULL) { + dev_err(&pdev->dev, "can't remap conf space\n"); + goto err_fr; + } + pht->iaddr = pci_iomap(pdev, 2, 0); + if (pht->iaddr == NULL) { + dev_err(&pdev->dev, "can't remap input space\n"); + goto err_unmc; + } + pht->oaddr = pci_iomap(pdev, 3, 0); + if (pht->oaddr == NULL) { + dev_err(&pdev->dev, "can't remap output space\n"); + goto err_unmi; + } + + mutex_init(&pht->open_lock); + spin_lock_init(&pht->regs_lock); + init_waitqueue_head(&pht->wait); + cdev_init(&pht->cdev, &phantom_file_ops); + pht->cdev.owner = THIS_MODULE; + + iowrite32(0, pht->caddr + PHN_IRQCTL); + ioread32(pht->caddr + PHN_IRQCTL); /* PCI posting */ + retval = request_irq(pdev->irq, phantom_isr, + IRQF_SHARED, "phantom", pht); + if (retval) { + dev_err(&pdev->dev, "can't establish ISR\n"); + goto err_unmo; + } + + retval = cdev_add(&pht->cdev, MKDEV(phantom_major, minor), 1); + if (retval) { + dev_err(&pdev->dev, "chardev registration failed\n"); + goto err_irq; + } + + if (IS_ERR(device_create(phantom_class, &pdev->dev, + MKDEV(phantom_major, minor), NULL, + "phantom%u", minor))) + dev_err(&pdev->dev, "can't create device\n"); + + pci_set_drvdata(pdev, pht); + + return 0; +err_irq: + free_irq(pdev->irq, pht); +err_unmo: + pci_iounmap(pdev, pht->oaddr); +err_unmi: + pci_iounmap(pdev, pht->iaddr); +err_unmc: + pci_iounmap(pdev, pht->caddr); +err_fr: + kfree(pht); +err_reg: + pci_release_regions(pdev); +err_null: + phantom_devices[minor] = 0; +err_dis: + pci_disable_device(pdev); +err: + return retval; +} + +static void phantom_remove(struct pci_dev *pdev) +{ + struct phantom_device *pht = pci_get_drvdata(pdev); + unsigned int minor = MINOR(pht->cdev.dev); + + device_destroy(phantom_class, MKDEV(phantom_major, minor)); + + cdev_del(&pht->cdev); + + iowrite32(0, pht->caddr + PHN_IRQCTL); + ioread32(pht->caddr + PHN_IRQCTL); /* PCI posting */ + free_irq(pdev->irq, pht); + + pci_iounmap(pdev, pht->oaddr); + pci_iounmap(pdev, pht->iaddr); + pci_iounmap(pdev, pht->caddr); + + kfree(pht); + + pci_release_regions(pdev); + + phantom_devices[minor] = 0; + + pci_disable_device(pdev); +} + +static int __maybe_unused phantom_suspend(struct device *dev_d) +{ + struct phantom_device *dev = dev_get_drvdata(dev_d); + + iowrite32(0, dev->caddr + PHN_IRQCTL); + ioread32(dev->caddr + PHN_IRQCTL); /* PCI posting */ + + synchronize_irq(to_pci_dev(dev_d)->irq); + + return 0; +} + +static int __maybe_unused phantom_resume(struct device *dev_d) +{ + struct phantom_device *dev = dev_get_drvdata(dev_d); + + iowrite32(0, dev->caddr + PHN_IRQCTL); + + return 0; +} + +static struct pci_device_id phantom_pci_tbl[] = { + { .vendor = PCI_VENDOR_ID_PLX, .device = PCI_DEVICE_ID_PLX_9050, + .subvendor = PCI_VENDOR_ID_PLX, .subdevice = PCI_DEVICE_ID_PLX_9050, + .class = PCI_CLASS_BRIDGE_OTHER << 8, .class_mask = 0xffff00 }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, phantom_pci_tbl); + +static SIMPLE_DEV_PM_OPS(phantom_pm_ops, phantom_suspend, phantom_resume); + +static struct pci_driver phantom_pci_driver = { + .name = "phantom", + .id_table = phantom_pci_tbl, + .probe = phantom_probe, + .remove = phantom_remove, + .driver.pm = &phantom_pm_ops, +}; + +static CLASS_ATTR_STRING(version, 0444, PHANTOM_VERSION); + +static int __init phantom_init(void) +{ + int retval; + dev_t dev; + + phantom_class = class_create(THIS_MODULE, "phantom"); + if (IS_ERR(phantom_class)) { + retval = PTR_ERR(phantom_class); + printk(KERN_ERR "phantom: can't register phantom class\n"); + goto err; + } + retval = class_create_file(phantom_class, &class_attr_version.attr); + if (retval) { + printk(KERN_ERR "phantom: can't create sysfs version file\n"); + goto err_class; + } + + retval = alloc_chrdev_region(&dev, 0, PHANTOM_MAX_MINORS, "phantom"); + if (retval) { + printk(KERN_ERR "phantom: can't register character device\n"); + goto err_attr; + } + phantom_major = MAJOR(dev); + + retval = pci_register_driver(&phantom_pci_driver); + if (retval) { + printk(KERN_ERR "phantom: can't register pci driver\n"); + goto err_unchr; + } + + printk(KERN_INFO "Phantom Linux Driver, version " PHANTOM_VERSION ", " + "init OK\n"); + + return 0; +err_unchr: + unregister_chrdev_region(dev, PHANTOM_MAX_MINORS); +err_attr: + class_remove_file(phantom_class, &class_attr_version.attr); +err_class: + class_destroy(phantom_class); +err: + return retval; +} + +static void __exit phantom_exit(void) +{ + pci_unregister_driver(&phantom_pci_driver); + + unregister_chrdev_region(MKDEV(phantom_major, 0), PHANTOM_MAX_MINORS); + + class_remove_file(phantom_class, &class_attr_version.attr); + class_destroy(phantom_class); + + pr_debug("phantom: module successfully removed\n"); +} + +module_init(phantom_init); +module_exit(phantom_exit); + +MODULE_AUTHOR("Jiri Slaby <jirislaby@gmail.com>"); +MODULE_DESCRIPTION("Sensable Phantom driver (PCI devices)"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(PHANTOM_VERSION); diff --git a/drivers/misc/pti.c b/drivers/misc/pti.c new file mode 100644 index 000000000..7236ae527 --- /dev/null +++ b/drivers/misc/pti.c @@ -0,0 +1,978 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * pti.c - PTI driver for cJTAG data extration + * + * Copyright (C) Intel 2010 + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * The PTI (Parallel Trace Interface) driver directs trace data routed from + * various parts in the system out through the Intel Penwell PTI port and + * out of the mobile device for analysis with a debugging tool + * (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7, + * compact JTAG, standard. + */ + +#include <linux/init.h> +#include <linux/sched.h> +#include <linux/interrupt.h> +#include <linux/console.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/tty.h> +#include <linux/tty_driver.h> +#include <linux/pci.h> +#include <linux/mutex.h> +#include <linux/miscdevice.h> +#include <linux/intel-pti.h> +#include <linux/slab.h> +#include <linux/uaccess.h> + +#define DRIVERNAME "pti" +#define PCINAME "pciPTI" +#define TTYNAME "ttyPTI" +#define CHARNAME "pti" +#define PTITTY_MINOR_START 0 +#define PTITTY_MINOR_NUM 2 +#define MAX_APP_IDS 16 /* 128 channel ids / u8 bit size */ +#define MAX_OS_IDS 16 /* 128 channel ids / u8 bit size */ +#define MAX_MODEM_IDS 16 /* 128 channel ids / u8 bit size */ +#define MODEM_BASE_ID 71 /* modem master ID address */ +#define CONTROL_ID 72 /* control master ID address */ +#define CONSOLE_ID 73 /* console master ID address */ +#define OS_BASE_ID 74 /* base OS master ID address */ +#define APP_BASE_ID 80 /* base App master ID address */ +#define CONTROL_FRAME_LEN 32 /* PTI control frame maximum size */ +#define USER_COPY_SIZE 8192 /* 8Kb buffer for user space copy */ +#define APERTURE_14 0x3800000 /* offset to first OS write addr */ +#define APERTURE_LEN 0x400000 /* address length */ + +struct pti_tty { + struct pti_masterchannel *mc; +}; + +struct pti_dev { + struct tty_port port[PTITTY_MINOR_NUM]; + unsigned long pti_addr; + unsigned long aperture_base; + void __iomem *pti_ioaddr; + u8 ia_app[MAX_APP_IDS]; + u8 ia_os[MAX_OS_IDS]; + u8 ia_modem[MAX_MODEM_IDS]; +}; + +/* + * This protects access to ia_app, ia_os, and ia_modem, + * which keeps track of channels allocated in + * an aperture write id. + */ +static DEFINE_MUTEX(alloclock); + +static const struct pci_device_id pci_ids[] = { + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x82B)}, + {0} +}; + +static struct tty_driver *pti_tty_driver; +static struct pti_dev *drv_data; + +static unsigned int pti_console_channel; +static unsigned int pti_control_channel; + +/** + * pti_write_to_aperture()- The private write function to PTI HW. + * + * @mc: The 'aperture'. It's part of a write address that holds + * a master and channel ID. + * @buf: Data being written to the HW that will ultimately be seen + * in a debugging tool (Fido, Lauterbach). + * @len: Size of buffer. + * + * Since each aperture is specified by a unique + * master/channel ID, no two processes will be writing + * to the same aperture at the same time so no lock is required. The + * PTI-Output agent will send these out in the order that they arrived, and + * thus, it will intermix these messages. The debug tool can then later + * regroup the appropriate message segments together reconstituting each + * message. + */ +static void pti_write_to_aperture(struct pti_masterchannel *mc, + u8 *buf, + int len) +{ + int dwordcnt; + int final; + int i; + u32 ptiword; + u32 __iomem *aperture; + u8 *p = buf; + + /* + * calculate the aperture offset from the base using the master and + * channel id's. + */ + aperture = drv_data->pti_ioaddr + (mc->master << 15) + + (mc->channel << 8); + + dwordcnt = len >> 2; + final = len - (dwordcnt << 2); /* final = trailing bytes */ + if (final == 0 && dwordcnt != 0) { /* always need a final dword */ + final += 4; + dwordcnt--; + } + + for (i = 0; i < dwordcnt; i++) { + ptiword = be32_to_cpu(*(u32 *)p); + p += 4; + iowrite32(ptiword, aperture); + } + + aperture += PTI_LASTDWORD_DTS; /* adding DTS signals that is EOM */ + + ptiword = 0; + for (i = 0; i < final; i++) + ptiword |= *p++ << (24-(8*i)); + + iowrite32(ptiword, aperture); + return; +} + +/** + * pti_control_frame_built_and_sent()- control frame build and send function. + * + * @mc: The master / channel structure on which the function + * built a control frame. + * @thread_name: The thread name associated with the master / channel or + * 'NULL' if using the 'current' global variable. + * + * To be able to post process the PTI contents on host side, a control frame + * is added before sending any PTI content. So the host side knows on + * each PTI frame the name of the thread using a dedicated master / channel. + * The thread name is retrieved from 'current' global variable if 'thread_name' + * is 'NULL', else it is retrieved from 'thread_name' parameter. + * This function builds this frame and sends it to a master ID CONTROL_ID. + * The overhead is only 32 bytes since the driver only writes to HW + * in 32 byte chunks. + */ +static void pti_control_frame_built_and_sent(struct pti_masterchannel *mc, + const char *thread_name) +{ + /* + * Since we access the comm member in current's task_struct, we only + * need to be as large as what 'comm' in that structure is. + */ + char comm[TASK_COMM_LEN]; + struct pti_masterchannel mccontrol = {.master = CONTROL_ID, + .channel = 0}; + const char *thread_name_p; + const char *control_format = "%3d %3d %s"; + u8 control_frame[CONTROL_FRAME_LEN]; + + if (!thread_name) { + if (!in_interrupt()) + get_task_comm(comm, current); + else + strncpy(comm, "Interrupt", TASK_COMM_LEN); + + /* Absolutely ensure our buffer is zero terminated. */ + comm[TASK_COMM_LEN-1] = 0; + thread_name_p = comm; + } else { + thread_name_p = thread_name; + } + + mccontrol.channel = pti_control_channel; + pti_control_channel = (pti_control_channel + 1) & 0x7f; + + snprintf(control_frame, CONTROL_FRAME_LEN, control_format, mc->master, + mc->channel, thread_name_p); + pti_write_to_aperture(&mccontrol, control_frame, strlen(control_frame)); +} + +/** + * pti_write_full_frame_to_aperture()- high level function to + * write to PTI. + * + * @mc: The 'aperture'. It's part of a write address that holds + * a master and channel ID. + * @buf: Data being written to the HW that will ultimately be seen + * in a debugging tool (Fido, Lauterbach). + * @len: Size of buffer. + * + * All threads sending data (either console, user space application, ...) + * are calling the high level function to write to PTI meaning that it is + * possible to add a control frame before sending the content. + */ +static void pti_write_full_frame_to_aperture(struct pti_masterchannel *mc, + const unsigned char *buf, + int len) +{ + pti_control_frame_built_and_sent(mc, NULL); + pti_write_to_aperture(mc, (u8 *)buf, len); +} + +/** + * get_id()- Allocate a master and channel ID. + * + * @id_array: an array of bits representing what channel + * id's are allocated for writing. + * @max_ids: The max amount of available write IDs to use. + * @base_id: The starting SW channel ID, based on the Intel + * PTI arch. + * @thread_name: The thread name associated with the master / channel or + * 'NULL' if using the 'current' global variable. + * + * Returns: + * pti_masterchannel struct with master, channel ID address + * 0 for error + * + * Each bit in the arrays ia_app and ia_os correspond to a master and + * channel id. The bit is one if the id is taken and 0 if free. For + * every master there are 128 channel id's. + */ +static struct pti_masterchannel *get_id(u8 *id_array, + int max_ids, + int base_id, + const char *thread_name) +{ + struct pti_masterchannel *mc; + int i, j, mask; + + mc = kmalloc(sizeof(struct pti_masterchannel), GFP_KERNEL); + if (mc == NULL) + return NULL; + + /* look for a byte with a free bit */ + for (i = 0; i < max_ids; i++) + if (id_array[i] != 0xff) + break; + if (i == max_ids) { + kfree(mc); + return NULL; + } + /* find the bit in the 128 possible channel opportunities */ + mask = 0x80; + for (j = 0; j < 8; j++) { + if ((id_array[i] & mask) == 0) + break; + mask >>= 1; + } + + /* grab it */ + id_array[i] |= mask; + mc->master = base_id; + mc->channel = ((i & 0xf)<<3) + j; + /* write new master Id / channel Id allocation to channel control */ + pti_control_frame_built_and_sent(mc, thread_name); + return mc; +} + +/* + * The following three functions: + * pti_request_mastercahannel(), mipi_release_masterchannel() + * and pti_writedata() are an API for other kernel drivers to + * access PTI. + */ + +/** + * pti_request_masterchannel()- Kernel API function used to allocate + * a master, channel ID address + * to write to PTI HW. + * + * @type: 0- request Application master, channel aperture ID + * write address. + * 1- request OS master, channel aperture ID write + * address. + * 2- request Modem master, channel aperture ID + * write address. + * Other values, error. + * @thread_name: The thread name associated with the master / channel or + * 'NULL' if using the 'current' global variable. + * + * Returns: + * pti_masterchannel struct + * 0 for error + */ +struct pti_masterchannel *pti_request_masterchannel(u8 type, + const char *thread_name) +{ + struct pti_masterchannel *mc; + + mutex_lock(&alloclock); + + switch (type) { + + case 0: + mc = get_id(drv_data->ia_app, MAX_APP_IDS, + APP_BASE_ID, thread_name); + break; + + case 1: + mc = get_id(drv_data->ia_os, MAX_OS_IDS, + OS_BASE_ID, thread_name); + break; + + case 2: + mc = get_id(drv_data->ia_modem, MAX_MODEM_IDS, + MODEM_BASE_ID, thread_name); + break; + default: + mc = NULL; + } + + mutex_unlock(&alloclock); + return mc; +} +EXPORT_SYMBOL_GPL(pti_request_masterchannel); + +/** + * pti_release_masterchannel()- Kernel API function used to release + * a master, channel ID address + * used to write to PTI HW. + * + * @mc: master, channel apeture ID address to be released. This + * will de-allocate the structure via kfree(). + */ +void pti_release_masterchannel(struct pti_masterchannel *mc) +{ + u8 master, channel, i; + + mutex_lock(&alloclock); + + if (mc) { + master = mc->master; + channel = mc->channel; + + if (master == APP_BASE_ID) { + i = channel >> 3; + drv_data->ia_app[i] &= ~(0x80>>(channel & 0x7)); + } else if (master == OS_BASE_ID) { + i = channel >> 3; + drv_data->ia_os[i] &= ~(0x80>>(channel & 0x7)); + } else { + i = channel >> 3; + drv_data->ia_modem[i] &= ~(0x80>>(channel & 0x7)); + } + + kfree(mc); + } + + mutex_unlock(&alloclock); +} +EXPORT_SYMBOL_GPL(pti_release_masterchannel); + +/** + * pti_writedata()- Kernel API function used to write trace + * debugging data to PTI HW. + * + * @mc: Master, channel aperture ID address to write to. + * Null value will return with no write occurring. + * @buf: Trace debuging data to write to the PTI HW. + * Null value will return with no write occurring. + * @count: Size of buf. Value of 0 or a negative number will + * return with no write occuring. + */ +void pti_writedata(struct pti_masterchannel *mc, u8 *buf, int count) +{ + /* + * since this function is exported, this is treated like an + * API function, thus, all parameters should + * be checked for validity. + */ + if ((mc != NULL) && (buf != NULL) && (count > 0)) + pti_write_to_aperture(mc, buf, count); + return; +} +EXPORT_SYMBOL_GPL(pti_writedata); + +/* + * for the tty_driver_*() basic function descriptions, see tty_driver.h. + * Specific header comments made for PTI-related specifics. + */ + +/** + * pti_tty_driver_open()- Open an Application master, channel aperture + * ID to the PTI device via tty device. + * + * @tty: tty interface. + * @filp: filp interface pased to tty_port_open() call. + * + * Returns: + * int, 0 for success + * otherwise, fail value + * + * The main purpose of using the tty device interface is for + * each tty port to have a unique PTI write aperture. In an + * example use case, ttyPTI0 gets syslogd and an APP aperture + * ID and ttyPTI1 is where the n_tracesink ldisc hooks to route + * modem messages into PTI. Modem trace data does not have to + * go to ttyPTI1, but ttyPTI0 and ttyPTI1 do need to be distinct + * master IDs. These messages go through the PTI HW and out of + * the handheld platform and to the Fido/Lauterbach device. + */ +static int pti_tty_driver_open(struct tty_struct *tty, struct file *filp) +{ + /* + * we actually want to allocate a new channel per open, per + * system arch. HW gives more than plenty channels for a single + * system task to have its own channel to write trace data. This + * also removes a locking requirement for the actual write + * procedure. + */ + return tty_port_open(tty->port, tty, filp); +} + +/** + * pti_tty_driver_close()- close tty device and release Application + * master, channel aperture ID to the PTI device via tty device. + * + * @tty: tty interface. + * @filp: filp interface pased to tty_port_close() call. + * + * The main purpose of using the tty device interface is to route + * syslog daemon messages to the PTI HW and out of the handheld platform + * and to the Fido/Lauterbach device. + */ +static void pti_tty_driver_close(struct tty_struct *tty, struct file *filp) +{ + tty_port_close(tty->port, tty, filp); +} + +/** + * pti_tty_install()- Used to set up specific master-channels + * to tty ports for organizational purposes when + * tracing viewed from debuging tools. + * + * @driver: tty driver information. + * @tty: tty struct containing pti information. + * + * Returns: + * 0 for success + * otherwise, error + */ +static int pti_tty_install(struct tty_driver *driver, struct tty_struct *tty) +{ + int idx = tty->index; + struct pti_tty *pti_tty_data; + int ret = tty_standard_install(driver, tty); + + if (ret == 0) { + pti_tty_data = kmalloc(sizeof(struct pti_tty), GFP_KERNEL); + if (pti_tty_data == NULL) + return -ENOMEM; + + if (idx == PTITTY_MINOR_START) + pti_tty_data->mc = pti_request_masterchannel(0, NULL); + else + pti_tty_data->mc = pti_request_masterchannel(2, NULL); + + if (pti_tty_data->mc == NULL) { + kfree(pti_tty_data); + return -ENXIO; + } + tty->driver_data = pti_tty_data; + } + + return ret; +} + +/** + * pti_tty_cleanup()- Used to de-allocate master-channel resources + * tied to tty's of this driver. + * + * @tty: tty struct containing pti information. + */ +static void pti_tty_cleanup(struct tty_struct *tty) +{ + struct pti_tty *pti_tty_data = tty->driver_data; + if (pti_tty_data == NULL) + return; + pti_release_masterchannel(pti_tty_data->mc); + kfree(pti_tty_data); + tty->driver_data = NULL; +} + +/** + * pti_tty_driver_write()- Write trace debugging data through the char + * interface to the PTI HW. Part of the misc device implementation. + * + * @tty: tty struct containing pti information. + * @buf: trace data to be written. + * @len: # of byte to write. + * + * Returns: + * int, # of bytes written + * otherwise, error + */ +static int pti_tty_driver_write(struct tty_struct *tty, + const unsigned char *buf, int len) +{ + struct pti_tty *pti_tty_data = tty->driver_data; + if ((pti_tty_data != NULL) && (pti_tty_data->mc != NULL)) { + pti_write_to_aperture(pti_tty_data->mc, (u8 *)buf, len); + return len; + } + /* + * we can't write to the pti hardware if the private driver_data + * and the mc address is not there. + */ + else + return -EFAULT; +} + +/** + * pti_tty_write_room()- Always returns 2048. + * + * @tty: contains tty info of the pti driver. + */ +static int pti_tty_write_room(struct tty_struct *tty) +{ + return 2048; +} + +/** + * pti_char_open()- Open an Application master, channel aperture + * ID to the PTI device. Part of the misc device implementation. + * + * @inode: not used. + * @filp: Output- will have a masterchannel struct set containing + * the allocated application PTI aperture write address. + * + * Returns: + * int, 0 for success + * otherwise, a fail value + */ +static int pti_char_open(struct inode *inode, struct file *filp) +{ + struct pti_masterchannel *mc; + + /* + * We really do want to fail immediately if + * pti_request_masterchannel() fails, + * before assigning the value to filp->private_data. + * Slightly easier to debug if this driver needs debugging. + */ + mc = pti_request_masterchannel(0, NULL); + if (mc == NULL) + return -ENOMEM; + filp->private_data = mc; + return 0; +} + +/** + * pti_char_release()- Close a char channel to the PTI device. Part + * of the misc device implementation. + * + * @inode: Not used in this implementaiton. + * @filp: Contains private_data that contains the master, channel + * ID to be released by the PTI device. + * + * Returns: + * always 0 + */ +static int pti_char_release(struct inode *inode, struct file *filp) +{ + pti_release_masterchannel(filp->private_data); + filp->private_data = NULL; + return 0; +} + +/** + * pti_char_write()- Write trace debugging data through the char + * interface to the PTI HW. Part of the misc device implementation. + * + * @filp: Contains private data which is used to obtain + * master, channel write ID. + * @data: trace data to be written. + * @len: # of byte to write. + * @ppose: Not used in this function implementation. + * + * Returns: + * int, # of bytes written + * otherwise, error value + * + * Notes: From side discussions with Alan Cox and experimenting + * with PTI debug HW like Nokia's Fido box and Lauterbach + * devices, 8192 byte write buffer used by USER_COPY_SIZE was + * deemed an appropriate size for this type of usage with + * debugging HW. + */ +static ssize_t pti_char_write(struct file *filp, const char __user *data, + size_t len, loff_t *ppose) +{ + struct pti_masterchannel *mc; + void *kbuf; + const char __user *tmp; + size_t size = USER_COPY_SIZE; + size_t n = 0; + + tmp = data; + mc = filp->private_data; + + kbuf = kmalloc(size, GFP_KERNEL); + if (kbuf == NULL) { + pr_err("%s(%d): buf allocation failed\n", + __func__, __LINE__); + return -ENOMEM; + } + + do { + if (len - n > USER_COPY_SIZE) + size = USER_COPY_SIZE; + else + size = len - n; + + if (copy_from_user(kbuf, tmp, size)) { + kfree(kbuf); + return n ? n : -EFAULT; + } + + pti_write_to_aperture(mc, kbuf, size); + n += size; + tmp += size; + + } while (len > n); + + kfree(kbuf); + return len; +} + +static const struct tty_operations pti_tty_driver_ops = { + .open = pti_tty_driver_open, + .close = pti_tty_driver_close, + .write = pti_tty_driver_write, + .write_room = pti_tty_write_room, + .install = pti_tty_install, + .cleanup = pti_tty_cleanup +}; + +static const struct file_operations pti_char_driver_ops = { + .owner = THIS_MODULE, + .write = pti_char_write, + .open = pti_char_open, + .release = pti_char_release, +}; + +static struct miscdevice pti_char_driver = { + .minor = MISC_DYNAMIC_MINOR, + .name = CHARNAME, + .fops = &pti_char_driver_ops +}; + +/** + * pti_console_write()- Write to the console that has been acquired. + * + * @c: Not used in this implementaiton. + * @buf: Data to be written. + * @len: Length of buf. + */ +static void pti_console_write(struct console *c, const char *buf, unsigned len) +{ + static struct pti_masterchannel mc = {.master = CONSOLE_ID, + .channel = 0}; + + mc.channel = pti_console_channel; + pti_console_channel = (pti_console_channel + 1) & 0x7f; + + pti_write_full_frame_to_aperture(&mc, buf, len); +} + +/** + * pti_console_device()- Return the driver tty structure and set the + * associated index implementation. + * + * @c: Console device of the driver. + * @index: index associated with c. + * + * Returns: + * always value of pti_tty_driver structure when this function + * is called. + */ +static struct tty_driver *pti_console_device(struct console *c, int *index) +{ + *index = c->index; + return pti_tty_driver; +} + +/** + * pti_console_setup()- Initialize console variables used by the driver. + * + * @c: Not used. + * @opts: Not used. + * + * Returns: + * always 0. + */ +static int pti_console_setup(struct console *c, char *opts) +{ + pti_console_channel = 0; + pti_control_channel = 0; + return 0; +} + +/* + * pti_console struct, used to capture OS printk()'s and shift + * out to the PTI device for debugging. This cannot be + * enabled upon boot because of the possibility of eating + * any serial console printk's (race condition discovered). + * The console should be enabled upon when the tty port is + * used for the first time. Since the primary purpose for + * the tty port is to hook up syslog to it, the tty port + * will be open for a really long time. + */ +static struct console pti_console = { + .name = TTYNAME, + .write = pti_console_write, + .device = pti_console_device, + .setup = pti_console_setup, + .flags = CON_PRINTBUFFER, + .index = 0, +}; + +/** + * pti_port_activate()- Used to start/initialize any items upon + * first opening of tty_port(). + * + * @port: The tty port number of the PTI device. + * @tty: The tty struct associated with this device. + * + * Returns: + * always returns 0 + * + * Notes: The primary purpose of the PTI tty port 0 is to hook + * the syslog daemon to it; thus this port will be open for a + * very long time. + */ +static int pti_port_activate(struct tty_port *port, struct tty_struct *tty) +{ + if (port->tty->index == PTITTY_MINOR_START) + console_start(&pti_console); + return 0; +} + +/** + * pti_port_shutdown()- Used to stop/shutdown any items upon the + * last tty port close. + * + * @port: The tty port number of the PTI device. + * + * Notes: The primary purpose of the PTI tty port 0 is to hook + * the syslog daemon to it; thus this port will be open for a + * very long time. + */ +static void pti_port_shutdown(struct tty_port *port) +{ + if (port->tty->index == PTITTY_MINOR_START) + console_stop(&pti_console); +} + +static const struct tty_port_operations tty_port_ops = { + .activate = pti_port_activate, + .shutdown = pti_port_shutdown, +}; + +/* + * Note the _probe() call sets everything up and ties the char and tty + * to successfully detecting the PTI device on the pci bus. + */ + +/** + * pti_pci_probe()- Used to detect pti on the pci bus and set + * things up in the driver. + * + * @pdev: pci_dev struct values for pti. + * @ent: pci_device_id struct for pti driver. + * + * Returns: + * 0 for success + * otherwise, error + */ +static int pti_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + unsigned int a; + int retval; + int pci_bar = 1; + + dev_dbg(&pdev->dev, "%s %s(%d): PTI PCI ID %04x:%04x\n", __FILE__, + __func__, __LINE__, pdev->vendor, pdev->device); + + retval = misc_register(&pti_char_driver); + if (retval) { + pr_err("%s(%d): CHAR registration failed of pti driver\n", + __func__, __LINE__); + pr_err("%s(%d): Error value returned: %d\n", + __func__, __LINE__, retval); + goto err; + } + + retval = pci_enable_device(pdev); + if (retval != 0) { + dev_err(&pdev->dev, + "%s: pci_enable_device() returned error %d\n", + __func__, retval); + goto err_unreg_misc; + } + + drv_data = kzalloc(sizeof(*drv_data), GFP_KERNEL); + if (drv_data == NULL) { + retval = -ENOMEM; + dev_err(&pdev->dev, + "%s(%d): kmalloc() returned NULL memory.\n", + __func__, __LINE__); + goto err_disable_pci; + } + drv_data->pti_addr = pci_resource_start(pdev, pci_bar); + + retval = pci_request_region(pdev, pci_bar, dev_name(&pdev->dev)); + if (retval != 0) { + dev_err(&pdev->dev, + "%s(%d): pci_request_region() returned error %d\n", + __func__, __LINE__, retval); + goto err_free_dd; + } + drv_data->aperture_base = drv_data->pti_addr+APERTURE_14; + drv_data->pti_ioaddr = + ioremap((u32)drv_data->aperture_base, + APERTURE_LEN); + if (!drv_data->pti_ioaddr) { + retval = -ENOMEM; + goto err_rel_reg; + } + + pci_set_drvdata(pdev, drv_data); + + for (a = 0; a < PTITTY_MINOR_NUM; a++) { + struct tty_port *port = &drv_data->port[a]; + tty_port_init(port); + port->ops = &tty_port_ops; + + tty_port_register_device(port, pti_tty_driver, a, &pdev->dev); + } + + register_console(&pti_console); + + return 0; +err_rel_reg: + pci_release_region(pdev, pci_bar); +err_free_dd: + kfree(drv_data); +err_disable_pci: + pci_disable_device(pdev); +err_unreg_misc: + misc_deregister(&pti_char_driver); +err: + return retval; +} + +/** + * pti_pci_remove()- Driver exit method to remove PTI from + * PCI bus. + * @pdev: variable containing pci info of PTI. + */ +static void pti_pci_remove(struct pci_dev *pdev) +{ + struct pti_dev *drv_data = pci_get_drvdata(pdev); + unsigned int a; + + unregister_console(&pti_console); + + for (a = 0; a < PTITTY_MINOR_NUM; a++) { + tty_unregister_device(pti_tty_driver, a); + tty_port_destroy(&drv_data->port[a]); + } + + iounmap(drv_data->pti_ioaddr); + kfree(drv_data); + pci_release_region(pdev, 1); + pci_disable_device(pdev); + + misc_deregister(&pti_char_driver); +} + +static struct pci_driver pti_pci_driver = { + .name = PCINAME, + .id_table = pci_ids, + .probe = pti_pci_probe, + .remove = pti_pci_remove, +}; + +/** + * pti_init()- Overall entry/init call to the pti driver. + * It starts the registration process with the kernel. + * + * Returns: + * int __init, 0 for success + * otherwise value is an error + * + */ +static int __init pti_init(void) +{ + int retval; + + /* First register module as tty device */ + + pti_tty_driver = alloc_tty_driver(PTITTY_MINOR_NUM); + if (pti_tty_driver == NULL) { + pr_err("%s(%d): Memory allocation failed for ptiTTY driver\n", + __func__, __LINE__); + return -ENOMEM; + } + + pti_tty_driver->driver_name = DRIVERNAME; + pti_tty_driver->name = TTYNAME; + pti_tty_driver->major = 0; + pti_tty_driver->minor_start = PTITTY_MINOR_START; + pti_tty_driver->type = TTY_DRIVER_TYPE_SYSTEM; + pti_tty_driver->subtype = SYSTEM_TYPE_SYSCONS; + pti_tty_driver->flags = TTY_DRIVER_REAL_RAW | + TTY_DRIVER_DYNAMIC_DEV; + pti_tty_driver->init_termios = tty_std_termios; + + tty_set_operations(pti_tty_driver, &pti_tty_driver_ops); + + retval = tty_register_driver(pti_tty_driver); + if (retval) { + pr_err("%s(%d): TTY registration failed of pti driver\n", + __func__, __LINE__); + pr_err("%s(%d): Error value returned: %d\n", + __func__, __LINE__, retval); + + goto put_tty; + } + + retval = pci_register_driver(&pti_pci_driver); + if (retval) { + pr_err("%s(%d): PCI registration failed of pti driver\n", + __func__, __LINE__); + pr_err("%s(%d): Error value returned: %d\n", + __func__, __LINE__, retval); + goto unreg_tty; + } + + return 0; +unreg_tty: + tty_unregister_driver(pti_tty_driver); +put_tty: + put_tty_driver(pti_tty_driver); + pti_tty_driver = NULL; + return retval; +} + +/** + * pti_exit()- Unregisters this module as a tty and pci driver. + */ +static void __exit pti_exit(void) +{ + tty_unregister_driver(pti_tty_driver); + pci_unregister_driver(&pti_pci_driver); + put_tty_driver(pti_tty_driver); +} + +module_init(pti_init); +module_exit(pti_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Ken Mills, Jay Freyensee"); +MODULE_DESCRIPTION("PTI Driver"); + diff --git a/drivers/misc/pvpanic.c b/drivers/misc/pvpanic.c new file mode 100644 index 000000000..d9140e756 --- /dev/null +++ b/drivers/misc/pvpanic.c @@ -0,0 +1,197 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Pvpanic Device Support + * + * Copyright (C) 2013 Fujitsu. + * Copyright (C) 2018 ZTE. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/acpi.h> +#include <linux/kernel.h> +#include <linux/kexec.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/platform_device.h> +#include <linux/types.h> +#include <uapi/misc/pvpanic.h> + +static void __iomem *base; + +MODULE_AUTHOR("Hu Tao <hutao@cn.fujitsu.com>"); +MODULE_DESCRIPTION("pvpanic device driver"); +MODULE_LICENSE("GPL"); + +static void +pvpanic_send_event(unsigned int event) +{ + iowrite8(event, base); +} + +static int +pvpanic_panic_notify(struct notifier_block *nb, unsigned long code, + void *unused) +{ + unsigned int event = PVPANIC_PANICKED; + + if (kexec_crash_loaded()) + event = PVPANIC_CRASH_LOADED; + + pvpanic_send_event(event); + + return NOTIFY_DONE; +} + +static struct notifier_block pvpanic_panic_nb = { + .notifier_call = pvpanic_panic_notify, + .priority = 1, /* let this called before broken drm_fb_helper */ +}; + +#ifdef CONFIG_ACPI +static int pvpanic_add(struct acpi_device *device); +static int pvpanic_remove(struct acpi_device *device); + +static const struct acpi_device_id pvpanic_device_ids[] = { + { "QEMU0001", 0 }, + { "", 0 } +}; +MODULE_DEVICE_TABLE(acpi, pvpanic_device_ids); + +static struct acpi_driver pvpanic_driver = { + .name = "pvpanic", + .class = "QEMU", + .ids = pvpanic_device_ids, + .ops = { + .add = pvpanic_add, + .remove = pvpanic_remove, + }, + .owner = THIS_MODULE, +}; + +static acpi_status +pvpanic_walk_resources(struct acpi_resource *res, void *context) +{ + struct resource r; + + if (acpi_dev_resource_io(res, &r)) { +#ifdef CONFIG_HAS_IOPORT_MAP + base = ioport_map(r.start, resource_size(&r)); + return AE_OK; +#else + return AE_ERROR; +#endif + } else if (acpi_dev_resource_memory(res, &r)) { + base = ioremap(r.start, resource_size(&r)); + return AE_OK; + } + + return AE_ERROR; +} + +static int pvpanic_add(struct acpi_device *device) +{ + int ret; + + ret = acpi_bus_get_status(device); + if (ret < 0) + return ret; + + if (!device->status.enabled || !device->status.functional) + return -ENODEV; + + acpi_walk_resources(device->handle, METHOD_NAME__CRS, + pvpanic_walk_resources, NULL); + + if (!base) + return -ENODEV; + + atomic_notifier_chain_register(&panic_notifier_list, + &pvpanic_panic_nb); + + return 0; +} + +static int pvpanic_remove(struct acpi_device *device) +{ + + atomic_notifier_chain_unregister(&panic_notifier_list, + &pvpanic_panic_nb); + iounmap(base); + + return 0; +} + +static int pvpanic_register_acpi_driver(void) +{ + return acpi_bus_register_driver(&pvpanic_driver); +} + +static void pvpanic_unregister_acpi_driver(void) +{ + acpi_bus_unregister_driver(&pvpanic_driver); +} +#else +static int pvpanic_register_acpi_driver(void) +{ + return -ENODEV; +} + +static void pvpanic_unregister_acpi_driver(void) {} +#endif + +static int pvpanic_mmio_probe(struct platform_device *pdev) +{ + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return PTR_ERR(base); + + atomic_notifier_chain_register(&panic_notifier_list, + &pvpanic_panic_nb); + + return 0; +} + +static int pvpanic_mmio_remove(struct platform_device *pdev) +{ + + atomic_notifier_chain_unregister(&panic_notifier_list, + &pvpanic_panic_nb); + + return 0; +} + +static const struct of_device_id pvpanic_mmio_match[] = { + { .compatible = "qemu,pvpanic-mmio", }, + {} +}; +MODULE_DEVICE_TABLE(of, pvpanic_mmio_match); + +static struct platform_driver pvpanic_mmio_driver = { + .driver = { + .name = "pvpanic-mmio", + .of_match_table = pvpanic_mmio_match, + }, + .probe = pvpanic_mmio_probe, + .remove = pvpanic_mmio_remove, +}; + +static int __init pvpanic_mmio_init(void) +{ + if (acpi_disabled) + return platform_driver_register(&pvpanic_mmio_driver); + else + return pvpanic_register_acpi_driver(); +} + +static void __exit pvpanic_mmio_exit(void) +{ + if (acpi_disabled) + platform_driver_unregister(&pvpanic_mmio_driver); + else + pvpanic_unregister_acpi_driver(); +} + +module_init(pvpanic_mmio_init); +module_exit(pvpanic_mmio_exit); diff --git a/drivers/misc/qcom-coincell.c b/drivers/misc/qcom-coincell.c new file mode 100644 index 000000000..54d4f6ee8 --- /dev/null +++ b/drivers/misc/qcom-coincell.c @@ -0,0 +1,145 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2013, The Linux Foundation. All rights reserved. + * Copyright (c) 2015, Sony Mobile Communications Inc. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/of.h> +#include <linux/regmap.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> + +struct qcom_coincell { + struct device *dev; + struct regmap *regmap; + u32 base_addr; +}; + +#define QCOM_COINCELL_REG_RSET 0x44 +#define QCOM_COINCELL_REG_VSET 0x45 +#define QCOM_COINCELL_REG_ENABLE 0x46 + +#define QCOM_COINCELL_ENABLE BIT(7) + +static const int qcom_rset_map[] = { 2100, 1700, 1200, 800 }; +static const int qcom_vset_map[] = { 2500, 3200, 3100, 3000 }; +/* NOTE: for pm8921 and others, voltage of 2500 is 16 (10000b), not 0 */ + +/* if enable==0, rset and vset are ignored */ +static int qcom_coincell_chgr_config(struct qcom_coincell *chgr, int rset, + int vset, bool enable) +{ + int i, j, rc; + + /* if disabling, just do that and skip other operations */ + if (!enable) + return regmap_write(chgr->regmap, + chgr->base_addr + QCOM_COINCELL_REG_ENABLE, 0); + + /* find index for current-limiting resistor */ + for (i = 0; i < ARRAY_SIZE(qcom_rset_map); i++) + if (rset == qcom_rset_map[i]) + break; + + if (i >= ARRAY_SIZE(qcom_rset_map)) { + dev_err(chgr->dev, "invalid rset-ohms value %d\n", rset); + return -EINVAL; + } + + /* find index for charge voltage */ + for (j = 0; j < ARRAY_SIZE(qcom_vset_map); j++) + if (vset == qcom_vset_map[j]) + break; + + if (j >= ARRAY_SIZE(qcom_vset_map)) { + dev_err(chgr->dev, "invalid vset-millivolts value %d\n", vset); + return -EINVAL; + } + + rc = regmap_write(chgr->regmap, + chgr->base_addr + QCOM_COINCELL_REG_RSET, i); + if (rc) { + /* + * This is mainly to flag a bad base_addr (reg) from dts. + * Other failures writing to the registers should be + * extremely rare, or indicative of problems that + * should be reported elsewhere (eg. spmi failure). + */ + dev_err(chgr->dev, "could not write to RSET register\n"); + return rc; + } + + rc = regmap_write(chgr->regmap, + chgr->base_addr + QCOM_COINCELL_REG_VSET, j); + if (rc) + return rc; + + /* set 'enable' register */ + return regmap_write(chgr->regmap, + chgr->base_addr + QCOM_COINCELL_REG_ENABLE, + QCOM_COINCELL_ENABLE); +} + +static int qcom_coincell_probe(struct platform_device *pdev) +{ + struct device_node *node = pdev->dev.of_node; + struct qcom_coincell chgr; + u32 rset = 0; + u32 vset = 0; + bool enable; + int rc; + + chgr.dev = &pdev->dev; + + chgr.regmap = dev_get_regmap(pdev->dev.parent, NULL); + if (!chgr.regmap) { + dev_err(chgr.dev, "Unable to get regmap\n"); + return -EINVAL; + } + + rc = of_property_read_u32(node, "reg", &chgr.base_addr); + if (rc) + return rc; + + enable = !of_property_read_bool(node, "qcom,charger-disable"); + + if (enable) { + rc = of_property_read_u32(node, "qcom,rset-ohms", &rset); + if (rc) { + dev_err(chgr.dev, + "can't find 'qcom,rset-ohms' in DT block"); + return rc; + } + + rc = of_property_read_u32(node, "qcom,vset-millivolts", &vset); + if (rc) { + dev_err(chgr.dev, + "can't find 'qcom,vset-millivolts' in DT block"); + return rc; + } + } + + return qcom_coincell_chgr_config(&chgr, rset, vset, enable); +} + +static const struct of_device_id qcom_coincell_match_table[] = { + { .compatible = "qcom,pm8941-coincell", }, + {} +}; + +MODULE_DEVICE_TABLE(of, qcom_coincell_match_table); + +static struct platform_driver qcom_coincell_driver = { + .driver = { + .name = "qcom-spmi-coincell", + .of_match_table = qcom_coincell_match_table, + }, + .probe = qcom_coincell_probe, +}; + +module_platform_driver(qcom_coincell_driver); + +MODULE_DESCRIPTION("Qualcomm PMIC coincell charger driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/sgi-gru/Makefile b/drivers/misc/sgi-gru/Makefile new file mode 100644 index 000000000..8132116ec --- /dev/null +++ b/drivers/misc/sgi-gru/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-only +ccflags-$(CONFIG_SGI_GRU_DEBUG) := -DDEBUG + +obj-$(CONFIG_SGI_GRU) := gru.o +gru-y := grufile.o grumain.o grufault.o grutlbpurge.o gruprocfs.o grukservices.o gruhandles.o grukdump.o + diff --git a/drivers/misc/sgi-gru/gru.h b/drivers/misc/sgi-gru/gru.h new file mode 100644 index 000000000..3ad76cd18 --- /dev/null +++ b/drivers/misc/sgi-gru/gru.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published by + * the Free Software Foundation; either version 2.1 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __GRU_H__ +#define __GRU_H__ + +/* + * GRU architectural definitions + */ +#define GRU_CACHE_LINE_BYTES 64 +#define GRU_HANDLE_STRIDE 256 +#define GRU_CB_BASE 0 +#define GRU_DS_BASE 0x20000 + +/* + * Size used to map GRU GSeg + */ +#if defined(CONFIG_IA64) +#define GRU_GSEG_PAGESIZE (256 * 1024UL) +#elif defined(CONFIG_X86_64) +#define GRU_GSEG_PAGESIZE (256 * 1024UL) /* ZZZ 2MB ??? */ +#else +#error "Unsupported architecture" +#endif + +/* + * Structure for obtaining GRU resource information + */ +struct gru_chiplet_info { + int node; + int chiplet; + int blade; + int total_dsr_bytes; + int total_cbr; + int total_user_dsr_bytes; + int total_user_cbr; + int free_user_dsr_bytes; + int free_user_cbr; +}; + +/* + * Statictics kept for each context. + */ +struct gru_gseg_statistics { + unsigned long fmm_tlbmiss; + unsigned long upm_tlbmiss; + unsigned long tlbdropin; + unsigned long context_stolen; + unsigned long reserved[10]; +}; + +/* Flags for GRU options on the gru_create_context() call */ +/* Select one of the follow 4 options to specify how TLB misses are handled */ +#define GRU_OPT_MISS_DEFAULT 0x0000 /* Use default mode */ +#define GRU_OPT_MISS_USER_POLL 0x0001 /* User will poll CB for faults */ +#define GRU_OPT_MISS_FMM_INTR 0x0002 /* Send interrupt to cpu to + handle fault */ +#define GRU_OPT_MISS_FMM_POLL 0x0003 /* Use system polling thread */ +#define GRU_OPT_MISS_MASK 0x0003 /* Mask for TLB MISS option */ + + + +#endif /* __GRU_H__ */ diff --git a/drivers/misc/sgi-gru/gru_instructions.h b/drivers/misc/sgi-gru/gru_instructions.h new file mode 100644 index 000000000..04d5170ac --- /dev/null +++ b/drivers/misc/sgi-gru/gru_instructions.h @@ -0,0 +1,736 @@ +/* + * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published by + * the Free Software Foundation; either version 2.1 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __GRU_INSTRUCTIONS_H__ +#define __GRU_INSTRUCTIONS_H__ + +extern int gru_check_status_proc(void *cb); +extern int gru_wait_proc(void *cb); +extern void gru_wait_abort_proc(void *cb); + + + +/* + * Architecture dependent functions + */ + +#if defined(CONFIG_IA64) +#include <linux/compiler.h> +#include <asm/intrinsics.h> +#define __flush_cache(p) ia64_fc((unsigned long)p) +/* Use volatile on IA64 to ensure ordering via st4.rel */ +#define gru_ordered_store_ulong(p, v) \ + do { \ + barrier(); \ + *((volatile unsigned long *)(p)) = v; /* force st.rel */ \ + } while (0) +#elif defined(CONFIG_X86_64) +#include <asm/cacheflush.h> +#define __flush_cache(p) clflush(p) +#define gru_ordered_store_ulong(p, v) \ + do { \ + barrier(); \ + *(unsigned long *)p = v; \ + } while (0) +#else +#error "Unsupported architecture" +#endif + +/* + * Control block status and exception codes + */ +#define CBS_IDLE 0 +#define CBS_EXCEPTION 1 +#define CBS_ACTIVE 2 +#define CBS_CALL_OS 3 + +/* CB substatus bitmasks */ +#define CBSS_MSG_QUEUE_MASK 7 +#define CBSS_IMPLICIT_ABORT_ACTIVE_MASK 8 + +/* CB substatus message queue values (low 3 bits of substatus) */ +#define CBSS_NO_ERROR 0 +#define CBSS_LB_OVERFLOWED 1 +#define CBSS_QLIMIT_REACHED 2 +#define CBSS_PAGE_OVERFLOW 3 +#define CBSS_AMO_NACKED 4 +#define CBSS_PUT_NACKED 5 + +/* + * Structure used to fetch exception detail for CBs that terminate with + * CBS_EXCEPTION + */ +struct control_block_extended_exc_detail { + unsigned long cb; + int opc; + int ecause; + int exopc; + long exceptdet0; + int exceptdet1; + int cbrstate; + int cbrexecstatus; +}; + +/* + * Instruction formats + */ + +/* + * Generic instruction format. + * This definition has precise bit field definitions. + */ +struct gru_instruction_bits { + /* DW 0 - low */ + unsigned int icmd: 1; + unsigned char ima: 3; /* CB_DelRep, unmapped mode */ + unsigned char reserved0: 4; + unsigned int xtype: 3; + unsigned int iaa0: 2; + unsigned int iaa1: 2; + unsigned char reserved1: 1; + unsigned char opc: 8; /* opcode */ + unsigned char exopc: 8; /* extended opcode */ + /* DW 0 - high */ + unsigned int idef2: 22; /* TRi0 */ + unsigned char reserved2: 2; + unsigned char istatus: 2; + unsigned char isubstatus:4; + unsigned char reserved3: 1; + unsigned char tlb_fault_color: 1; + /* DW 1 */ + unsigned long idef4; /* 42 bits: TRi1, BufSize */ + /* DW 2-6 */ + unsigned long idef1; /* BAddr0 */ + unsigned long idef5; /* Nelem */ + unsigned long idef6; /* Stride, Operand1 */ + unsigned long idef3; /* BAddr1, Value, Operand2 */ + unsigned long reserved4; + /* DW 7 */ + unsigned long avalue; /* AValue */ +}; + +/* + * Generic instruction with friendlier names. This format is used + * for inline instructions. + */ +struct gru_instruction { + /* DW 0 */ + union { + unsigned long op64; /* icmd,xtype,iaa0,ima,opc,tri0 */ + struct { + unsigned int op32; + unsigned int tri0; + }; + }; + unsigned long tri1_bufsize; /* DW 1 */ + unsigned long baddr0; /* DW 2 */ + unsigned long nelem; /* DW 3 */ + unsigned long op1_stride; /* DW 4 */ + unsigned long op2_value_baddr1; /* DW 5 */ + unsigned long reserved0; /* DW 6 */ + unsigned long avalue; /* DW 7 */ +}; + +/* Some shifts and masks for the low 64 bits of a GRU command */ +#define GRU_CB_ICMD_SHFT 0 +#define GRU_CB_ICMD_MASK 0x1 +#define GRU_CB_XTYPE_SHFT 8 +#define GRU_CB_XTYPE_MASK 0x7 +#define GRU_CB_IAA0_SHFT 11 +#define GRU_CB_IAA0_MASK 0x3 +#define GRU_CB_IAA1_SHFT 13 +#define GRU_CB_IAA1_MASK 0x3 +#define GRU_CB_IMA_SHFT 1 +#define GRU_CB_IMA_MASK 0x3 +#define GRU_CB_OPC_SHFT 16 +#define GRU_CB_OPC_MASK 0xff +#define GRU_CB_EXOPC_SHFT 24 +#define GRU_CB_EXOPC_MASK 0xff +#define GRU_IDEF2_SHFT 32 +#define GRU_IDEF2_MASK 0x3ffff +#define GRU_ISTATUS_SHFT 56 +#define GRU_ISTATUS_MASK 0x3 + +/* GRU instruction opcodes (opc field) */ +#define OP_NOP 0x00 +#define OP_BCOPY 0x01 +#define OP_VLOAD 0x02 +#define OP_IVLOAD 0x03 +#define OP_VSTORE 0x04 +#define OP_IVSTORE 0x05 +#define OP_VSET 0x06 +#define OP_IVSET 0x07 +#define OP_MESQ 0x08 +#define OP_GAMXR 0x09 +#define OP_GAMIR 0x0a +#define OP_GAMIRR 0x0b +#define OP_GAMER 0x0c +#define OP_GAMERR 0x0d +#define OP_BSTORE 0x0e +#define OP_VFLUSH 0x0f + + +/* Extended opcodes values (exopc field) */ + +/* GAMIR - AMOs with implicit operands */ +#define EOP_IR_FETCH 0x01 /* Plain fetch of memory */ +#define EOP_IR_CLR 0x02 /* Fetch and clear */ +#define EOP_IR_INC 0x05 /* Fetch and increment */ +#define EOP_IR_DEC 0x07 /* Fetch and decrement */ +#define EOP_IR_QCHK1 0x0d /* Queue check, 64 byte msg */ +#define EOP_IR_QCHK2 0x0e /* Queue check, 128 byte msg */ + +/* GAMIRR - Registered AMOs with implicit operands */ +#define EOP_IRR_FETCH 0x01 /* Registered fetch of memory */ +#define EOP_IRR_CLR 0x02 /* Registered fetch and clear */ +#define EOP_IRR_INC 0x05 /* Registered fetch and increment */ +#define EOP_IRR_DEC 0x07 /* Registered fetch and decrement */ +#define EOP_IRR_DECZ 0x0f /* Registered fetch and decrement, update on zero*/ + +/* GAMER - AMOs with explicit operands */ +#define EOP_ER_SWAP 0x00 /* Exchange argument and memory */ +#define EOP_ER_OR 0x01 /* Logical OR with memory */ +#define EOP_ER_AND 0x02 /* Logical AND with memory */ +#define EOP_ER_XOR 0x03 /* Logical XOR with memory */ +#define EOP_ER_ADD 0x04 /* Add value to memory */ +#define EOP_ER_CSWAP 0x08 /* Compare with operand2, write operand1 if match*/ +#define EOP_ER_CADD 0x0c /* Queue check, operand1*64 byte msg */ + +/* GAMERR - Registered AMOs with explicit operands */ +#define EOP_ERR_SWAP 0x00 /* Exchange argument and memory */ +#define EOP_ERR_OR 0x01 /* Logical OR with memory */ +#define EOP_ERR_AND 0x02 /* Logical AND with memory */ +#define EOP_ERR_XOR 0x03 /* Logical XOR with memory */ +#define EOP_ERR_ADD 0x04 /* Add value to memory */ +#define EOP_ERR_CSWAP 0x08 /* Compare with operand2, write operand1 if match*/ +#define EOP_ERR_EPOLL 0x09 /* Poll for equality */ +#define EOP_ERR_NPOLL 0x0a /* Poll for inequality */ + +/* GAMXR - SGI Arithmetic unit */ +#define EOP_XR_CSWAP 0x0b /* Masked compare exchange */ + + +/* Transfer types (xtype field) */ +#define XTYPE_B 0x0 /* byte */ +#define XTYPE_S 0x1 /* short (2-byte) */ +#define XTYPE_W 0x2 /* word (4-byte) */ +#define XTYPE_DW 0x3 /* doubleword (8-byte) */ +#define XTYPE_CL 0x6 /* cacheline (64-byte) */ + + +/* Instruction access attributes (iaa0, iaa1 fields) */ +#define IAA_RAM 0x0 /* normal cached RAM access */ +#define IAA_NCRAM 0x2 /* noncoherent RAM access */ +#define IAA_MMIO 0x1 /* noncoherent memory-mapped I/O space */ +#define IAA_REGISTER 0x3 /* memory-mapped registers, etc. */ + + +/* Instruction mode attributes (ima field) */ +#define IMA_MAPPED 0x0 /* Virtual mode */ +#define IMA_CB_DELAY 0x1 /* hold read responses until status changes */ +#define IMA_UNMAPPED 0x2 /* bypass the TLBs (OS only) */ +#define IMA_INTERRUPT 0x4 /* Interrupt when instruction completes */ + +/* CBE ecause bits */ +#define CBE_CAUSE_RI (1 << 0) +#define CBE_CAUSE_INVALID_INSTRUCTION (1 << 1) +#define CBE_CAUSE_UNMAPPED_MODE_FORBIDDEN (1 << 2) +#define CBE_CAUSE_PE_CHECK_DATA_ERROR (1 << 3) +#define CBE_CAUSE_IAA_GAA_MISMATCH (1 << 4) +#define CBE_CAUSE_DATA_SEGMENT_LIMIT_EXCEPTION (1 << 5) +#define CBE_CAUSE_OS_FATAL_TLB_FAULT (1 << 6) +#define CBE_CAUSE_EXECUTION_HW_ERROR (1 << 7) +#define CBE_CAUSE_TLBHW_ERROR (1 << 8) +#define CBE_CAUSE_RA_REQUEST_TIMEOUT (1 << 9) +#define CBE_CAUSE_HA_REQUEST_TIMEOUT (1 << 10) +#define CBE_CAUSE_RA_RESPONSE_FATAL (1 << 11) +#define CBE_CAUSE_RA_RESPONSE_NON_FATAL (1 << 12) +#define CBE_CAUSE_HA_RESPONSE_FATAL (1 << 13) +#define CBE_CAUSE_HA_RESPONSE_NON_FATAL (1 << 14) +#define CBE_CAUSE_ADDRESS_SPACE_DECODE_ERROR (1 << 15) +#define CBE_CAUSE_PROTOCOL_STATE_DATA_ERROR (1 << 16) +#define CBE_CAUSE_RA_RESPONSE_DATA_ERROR (1 << 17) +#define CBE_CAUSE_HA_RESPONSE_DATA_ERROR (1 << 18) +#define CBE_CAUSE_FORCED_ERROR (1 << 19) + +/* CBE cbrexecstatus bits */ +#define CBR_EXS_ABORT_OCC_BIT 0 +#define CBR_EXS_INT_OCC_BIT 1 +#define CBR_EXS_PENDING_BIT 2 +#define CBR_EXS_QUEUED_BIT 3 +#define CBR_EXS_TLB_INVAL_BIT 4 +#define CBR_EXS_EXCEPTION_BIT 5 +#define CBR_EXS_CB_INT_PENDING_BIT 6 + +#define CBR_EXS_ABORT_OCC (1 << CBR_EXS_ABORT_OCC_BIT) +#define CBR_EXS_INT_OCC (1 << CBR_EXS_INT_OCC_BIT) +#define CBR_EXS_PENDING (1 << CBR_EXS_PENDING_BIT) +#define CBR_EXS_QUEUED (1 << CBR_EXS_QUEUED_BIT) +#define CBR_EXS_TLB_INVAL (1 << CBR_EXS_TLB_INVAL_BIT) +#define CBR_EXS_EXCEPTION (1 << CBR_EXS_EXCEPTION_BIT) +#define CBR_EXS_CB_INT_PENDING (1 << CBR_EXS_CB_INT_PENDING_BIT) + +/* + * Exceptions are retried for the following cases. If any OTHER bits are set + * in ecause, the exception is not retryable. + */ +#define EXCEPTION_RETRY_BITS (CBE_CAUSE_EXECUTION_HW_ERROR | \ + CBE_CAUSE_TLBHW_ERROR | \ + CBE_CAUSE_RA_REQUEST_TIMEOUT | \ + CBE_CAUSE_RA_RESPONSE_NON_FATAL | \ + CBE_CAUSE_HA_RESPONSE_NON_FATAL | \ + CBE_CAUSE_RA_RESPONSE_DATA_ERROR | \ + CBE_CAUSE_HA_RESPONSE_DATA_ERROR \ + ) + +/* Message queue head structure */ +union gru_mesqhead { + unsigned long val; + struct { + unsigned int head; + unsigned int limit; + }; +}; + + +/* Generate the low word of a GRU instruction */ +static inline unsigned long +__opdword(unsigned char opcode, unsigned char exopc, unsigned char xtype, + unsigned char iaa0, unsigned char iaa1, + unsigned long idef2, unsigned char ima) +{ + return (1 << GRU_CB_ICMD_SHFT) | + ((unsigned long)CBS_ACTIVE << GRU_ISTATUS_SHFT) | + (idef2<< GRU_IDEF2_SHFT) | + (iaa0 << GRU_CB_IAA0_SHFT) | + (iaa1 << GRU_CB_IAA1_SHFT) | + (ima << GRU_CB_IMA_SHFT) | + (xtype << GRU_CB_XTYPE_SHFT) | + (opcode << GRU_CB_OPC_SHFT) | + (exopc << GRU_CB_EXOPC_SHFT); +} + +/* + * Architecture specific intrinsics + */ +static inline void gru_flush_cache(void *p) +{ + __flush_cache(p); +} + +/* + * Store the lower 64 bits of the command including the "start" bit. Then + * start the instruction executing. + */ +static inline void gru_start_instruction(struct gru_instruction *ins, unsigned long op64) +{ + gru_ordered_store_ulong(ins, op64); + mb(); + gru_flush_cache(ins); +} + + +/* Convert "hints" to IMA */ +#define CB_IMA(h) ((h) | IMA_UNMAPPED) + +/* Convert data segment cache line index into TRI0 / TRI1 value */ +#define GRU_DINDEX(i) ((i) * GRU_CACHE_LINE_BYTES) + +/* Inline functions for GRU instructions. + * Note: + * - nelem and stride are in elements + * - tri0/tri1 is in bytes for the beginning of the data segment. + */ +static inline void gru_vload_phys(void *cb, unsigned long gpa, + unsigned int tri0, int iaa, unsigned long hints) +{ + struct gru_instruction *ins = (struct gru_instruction *)cb; + + ins->baddr0 = (long)gpa | ((unsigned long)iaa << 62); + ins->nelem = 1; + ins->op1_stride = 1; + gru_start_instruction(ins, __opdword(OP_VLOAD, 0, XTYPE_DW, iaa, 0, + (unsigned long)tri0, CB_IMA(hints))); +} + +static inline void gru_vstore_phys(void *cb, unsigned long gpa, + unsigned int tri0, int iaa, unsigned long hints) +{ + struct gru_instruction *ins = (struct gru_instruction *)cb; + + ins->baddr0 = (long)gpa | ((unsigned long)iaa << 62); + ins->nelem = 1; + ins->op1_stride = 1; + gru_start_instruction(ins, __opdword(OP_VSTORE, 0, XTYPE_DW, iaa, 0, + (unsigned long)tri0, CB_IMA(hints))); +} + +static inline void gru_vload(void *cb, unsigned long mem_addr, + unsigned int tri0, unsigned char xtype, unsigned long nelem, + unsigned long stride, unsigned long hints) +{ + struct gru_instruction *ins = (struct gru_instruction *)cb; + + ins->baddr0 = (long)mem_addr; + ins->nelem = nelem; + ins->op1_stride = stride; + gru_start_instruction(ins, __opdword(OP_VLOAD, 0, xtype, IAA_RAM, 0, + (unsigned long)tri0, CB_IMA(hints))); +} + +static inline void gru_vstore(void *cb, unsigned long mem_addr, + unsigned int tri0, unsigned char xtype, unsigned long nelem, + unsigned long stride, unsigned long hints) +{ + struct gru_instruction *ins = (void *)cb; + + ins->baddr0 = (long)mem_addr; + ins->nelem = nelem; + ins->op1_stride = stride; + gru_start_instruction(ins, __opdword(OP_VSTORE, 0, xtype, IAA_RAM, 0, + tri0, CB_IMA(hints))); +} + +static inline void gru_ivload(void *cb, unsigned long mem_addr, + unsigned int tri0, unsigned int tri1, unsigned char xtype, + unsigned long nelem, unsigned long hints) +{ + struct gru_instruction *ins = (void *)cb; + + ins->baddr0 = (long)mem_addr; + ins->nelem = nelem; + ins->tri1_bufsize = tri1; + gru_start_instruction(ins, __opdword(OP_IVLOAD, 0, xtype, IAA_RAM, 0, + tri0, CB_IMA(hints))); +} + +static inline void gru_ivstore(void *cb, unsigned long mem_addr, + unsigned int tri0, unsigned int tri1, + unsigned char xtype, unsigned long nelem, unsigned long hints) +{ + struct gru_instruction *ins = (void *)cb; + + ins->baddr0 = (long)mem_addr; + ins->nelem = nelem; + ins->tri1_bufsize = tri1; + gru_start_instruction(ins, __opdword(OP_IVSTORE, 0, xtype, IAA_RAM, 0, + tri0, CB_IMA(hints))); +} + +static inline void gru_vset(void *cb, unsigned long mem_addr, + unsigned long value, unsigned char xtype, unsigned long nelem, + unsigned long stride, unsigned long hints) +{ + struct gru_instruction *ins = (void *)cb; + + ins->baddr0 = (long)mem_addr; + ins->op2_value_baddr1 = value; + ins->nelem = nelem; + ins->op1_stride = stride; + gru_start_instruction(ins, __opdword(OP_VSET, 0, xtype, IAA_RAM, 0, + 0, CB_IMA(hints))); +} + +static inline void gru_ivset(void *cb, unsigned long mem_addr, + unsigned int tri1, unsigned long value, unsigned char xtype, + unsigned long nelem, unsigned long hints) +{ + struct gru_instruction *ins = (void *)cb; + + ins->baddr0 = (long)mem_addr; + ins->op2_value_baddr1 = value; + ins->nelem = nelem; + ins->tri1_bufsize = tri1; + gru_start_instruction(ins, __opdword(OP_IVSET, 0, xtype, IAA_RAM, 0, + 0, CB_IMA(hints))); +} + +static inline void gru_vflush(void *cb, unsigned long mem_addr, + unsigned long nelem, unsigned char xtype, unsigned long stride, + unsigned long hints) +{ + struct gru_instruction *ins = (void *)cb; + + ins->baddr0 = (long)mem_addr; + ins->op1_stride = stride; + ins->nelem = nelem; + gru_start_instruction(ins, __opdword(OP_VFLUSH, 0, xtype, IAA_RAM, 0, + 0, CB_IMA(hints))); +} + +static inline void gru_nop(void *cb, int hints) +{ + struct gru_instruction *ins = (void *)cb; + + gru_start_instruction(ins, __opdword(OP_NOP, 0, 0, 0, 0, 0, CB_IMA(hints))); +} + + +static inline void gru_bcopy(void *cb, const unsigned long src, + unsigned long dest, + unsigned int tri0, unsigned int xtype, unsigned long nelem, + unsigned int bufsize, unsigned long hints) +{ + struct gru_instruction *ins = (void *)cb; + + ins->baddr0 = (long)src; + ins->op2_value_baddr1 = (long)dest; + ins->nelem = nelem; + ins->tri1_bufsize = bufsize; + gru_start_instruction(ins, __opdword(OP_BCOPY, 0, xtype, IAA_RAM, + IAA_RAM, tri0, CB_IMA(hints))); +} + +static inline void gru_bstore(void *cb, const unsigned long src, + unsigned long dest, unsigned int tri0, unsigned int xtype, + unsigned long nelem, unsigned long hints) +{ + struct gru_instruction *ins = (void *)cb; + + ins->baddr0 = (long)src; + ins->op2_value_baddr1 = (long)dest; + ins->nelem = nelem; + gru_start_instruction(ins, __opdword(OP_BSTORE, 0, xtype, 0, IAA_RAM, + tri0, CB_IMA(hints))); +} + +static inline void gru_gamir(void *cb, int exopc, unsigned long src, + unsigned int xtype, unsigned long hints) +{ + struct gru_instruction *ins = (void *)cb; + + ins->baddr0 = (long)src; + gru_start_instruction(ins, __opdword(OP_GAMIR, exopc, xtype, IAA_RAM, 0, + 0, CB_IMA(hints))); +} + +static inline void gru_gamirr(void *cb, int exopc, unsigned long src, + unsigned int xtype, unsigned long hints) +{ + struct gru_instruction *ins = (void *)cb; + + ins->baddr0 = (long)src; + gru_start_instruction(ins, __opdword(OP_GAMIRR, exopc, xtype, IAA_RAM, 0, + 0, CB_IMA(hints))); +} + +static inline void gru_gamer(void *cb, int exopc, unsigned long src, + unsigned int xtype, + unsigned long operand1, unsigned long operand2, + unsigned long hints) +{ + struct gru_instruction *ins = (void *)cb; + + ins->baddr0 = (long)src; + ins->op1_stride = operand1; + ins->op2_value_baddr1 = operand2; + gru_start_instruction(ins, __opdword(OP_GAMER, exopc, xtype, IAA_RAM, 0, + 0, CB_IMA(hints))); +} + +static inline void gru_gamerr(void *cb, int exopc, unsigned long src, + unsigned int xtype, unsigned long operand1, + unsigned long operand2, unsigned long hints) +{ + struct gru_instruction *ins = (void *)cb; + + ins->baddr0 = (long)src; + ins->op1_stride = operand1; + ins->op2_value_baddr1 = operand2; + gru_start_instruction(ins, __opdword(OP_GAMERR, exopc, xtype, IAA_RAM, 0, + 0, CB_IMA(hints))); +} + +static inline void gru_gamxr(void *cb, unsigned long src, + unsigned int tri0, unsigned long hints) +{ + struct gru_instruction *ins = (void *)cb; + + ins->baddr0 = (long)src; + ins->nelem = 4; + gru_start_instruction(ins, __opdword(OP_GAMXR, EOP_XR_CSWAP, XTYPE_DW, + IAA_RAM, 0, 0, CB_IMA(hints))); +} + +static inline void gru_mesq(void *cb, unsigned long queue, + unsigned long tri0, unsigned long nelem, + unsigned long hints) +{ + struct gru_instruction *ins = (void *)cb; + + ins->baddr0 = (long)queue; + ins->nelem = nelem; + gru_start_instruction(ins, __opdword(OP_MESQ, 0, XTYPE_CL, IAA_RAM, 0, + tri0, CB_IMA(hints))); +} + +static inline unsigned long gru_get_amo_value(void *cb) +{ + struct gru_instruction *ins = (void *)cb; + + return ins->avalue; +} + +static inline int gru_get_amo_value_head(void *cb) +{ + struct gru_instruction *ins = (void *)cb; + + return ins->avalue & 0xffffffff; +} + +static inline int gru_get_amo_value_limit(void *cb) +{ + struct gru_instruction *ins = (void *)cb; + + return ins->avalue >> 32; +} + +static inline union gru_mesqhead gru_mesq_head(int head, int limit) +{ + union gru_mesqhead mqh; + + mqh.head = head; + mqh.limit = limit; + return mqh; +} + +/* + * Get struct control_block_extended_exc_detail for CB. + */ +extern int gru_get_cb_exception_detail(void *cb, + struct control_block_extended_exc_detail *excdet); + +#define GRU_EXC_STR_SIZE 256 + + +/* + * Control block definition for checking status + */ +struct gru_control_block_status { + unsigned int icmd :1; + unsigned int ima :3; + unsigned int reserved0 :4; + unsigned int unused1 :24; + unsigned int unused2 :24; + unsigned int istatus :2; + unsigned int isubstatus :4; + unsigned int unused3 :2; +}; + +/* Get CB status */ +static inline int gru_get_cb_status(void *cb) +{ + struct gru_control_block_status *cbs = (void *)cb; + + return cbs->istatus; +} + +/* Get CB message queue substatus */ +static inline int gru_get_cb_message_queue_substatus(void *cb) +{ + struct gru_control_block_status *cbs = (void *)cb; + + return cbs->isubstatus & CBSS_MSG_QUEUE_MASK; +} + +/* Get CB substatus */ +static inline int gru_get_cb_substatus(void *cb) +{ + struct gru_control_block_status *cbs = (void *)cb; + + return cbs->isubstatus; +} + +/* + * User interface to check an instruction status. UPM and exceptions + * are handled automatically. However, this function does NOT wait + * for an active instruction to complete. + * + */ +static inline int gru_check_status(void *cb) +{ + struct gru_control_block_status *cbs = (void *)cb; + int ret; + + ret = cbs->istatus; + if (ret != CBS_ACTIVE) + ret = gru_check_status_proc(cb); + return ret; +} + +/* + * User interface (via inline function) to wait for an instruction + * to complete. Completion status (IDLE or EXCEPTION is returned + * to the user. Exception due to hardware errors are automatically + * retried before returning an exception. + * + */ +static inline int gru_wait(void *cb) +{ + return gru_wait_proc(cb); +} + +/* + * Wait for CB to complete. Aborts program if error. (Note: error does NOT + * mean TLB mis - only fatal errors such as memory parity error or user + * bugs will cause termination. + */ +static inline void gru_wait_abort(void *cb) +{ + gru_wait_abort_proc(cb); +} + +/* + * Get a pointer to the start of a gseg + * p - Any valid pointer within the gseg + */ +static inline void *gru_get_gseg_pointer (void *p) +{ + return (void *)((unsigned long)p & ~(GRU_GSEG_PAGESIZE - 1)); +} + +/* + * Get a pointer to a control block + * gseg - GSeg address returned from gru_get_thread_gru_segment() + * index - index of desired CB + */ +static inline void *gru_get_cb_pointer(void *gseg, + int index) +{ + return gseg + GRU_CB_BASE + index * GRU_HANDLE_STRIDE; +} + +/* + * Get a pointer to a cacheline in the data segment portion of a GSeg + * gseg - GSeg address returned from gru_get_thread_gru_segment() + * index - index of desired cache line + */ +static inline void *gru_get_data_pointer(void *gseg, int index) +{ + return gseg + GRU_DS_BASE + index * GRU_CACHE_LINE_BYTES; +} + +/* + * Convert a vaddr into the tri index within the GSEG + * vaddr - virtual address of within gseg + */ +static inline int gru_get_tri(void *vaddr) +{ + return ((unsigned long)vaddr & (GRU_GSEG_PAGESIZE - 1)) - GRU_DS_BASE; +} +#endif /* __GRU_INSTRUCTIONS_H__ */ diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c new file mode 100644 index 000000000..9c7d475d1 --- /dev/null +++ b/drivers/misc/sgi-gru/grufault.c @@ -0,0 +1,903 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * SN Platform GRU Driver + * + * FAULT HANDLER FOR GRU DETECTED TLB MISSES + * + * This file contains code that handles TLB misses within the GRU. + * These misses are reported either via interrupts or user polling of + * the user CB. + * + * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. + */ + +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/spinlock.h> +#include <linux/mm.h> +#include <linux/hugetlb.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/uaccess.h> +#include <linux/security.h> +#include <linux/sync_core.h> +#include <linux/prefetch.h> +#include "gru.h" +#include "grutables.h" +#include "grulib.h" +#include "gru_instructions.h" +#include <asm/uv/uv_hub.h> + +/* Return codes for vtop functions */ +#define VTOP_SUCCESS 0 +#define VTOP_INVALID -1 +#define VTOP_RETRY -2 + + +/* + * Test if a physical address is a valid GRU GSEG address + */ +static inline int is_gru_paddr(unsigned long paddr) +{ + return paddr >= gru_start_paddr && paddr < gru_end_paddr; +} + +/* + * Find the vma of a GRU segment. Caller must hold mmap_lock. + */ +struct vm_area_struct *gru_find_vma(unsigned long vaddr) +{ + struct vm_area_struct *vma; + + vma = find_vma(current->mm, vaddr); + if (vma && vma->vm_start <= vaddr && vma->vm_ops == &gru_vm_ops) + return vma; + return NULL; +} + +/* + * Find and lock the gts that contains the specified user vaddr. + * + * Returns: + * - *gts with the mmap_lock locked for read and the GTS locked. + * - NULL if vaddr invalid OR is not a valid GSEG vaddr. + */ + +static struct gru_thread_state *gru_find_lock_gts(unsigned long vaddr) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + struct gru_thread_state *gts = NULL; + + mmap_read_lock(mm); + vma = gru_find_vma(vaddr); + if (vma) + gts = gru_find_thread_state(vma, TSID(vaddr, vma)); + if (gts) + mutex_lock(>s->ts_ctxlock); + else + mmap_read_unlock(mm); + return gts; +} + +static struct gru_thread_state *gru_alloc_locked_gts(unsigned long vaddr) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + struct gru_thread_state *gts = ERR_PTR(-EINVAL); + + mmap_write_lock(mm); + vma = gru_find_vma(vaddr); + if (!vma) + goto err; + + gts = gru_alloc_thread_state(vma, TSID(vaddr, vma)); + if (IS_ERR(gts)) + goto err; + mutex_lock(>s->ts_ctxlock); + mmap_write_downgrade(mm); + return gts; + +err: + mmap_write_unlock(mm); + return gts; +} + +/* + * Unlock a GTS that was previously locked with gru_find_lock_gts(). + */ +static void gru_unlock_gts(struct gru_thread_state *gts) +{ + mutex_unlock(>s->ts_ctxlock); + mmap_read_unlock(current->mm); +} + +/* + * Set a CB.istatus to active using a user virtual address. This must be done + * just prior to a TFH RESTART. The new cb.istatus is an in-cache status ONLY. + * If the line is evicted, the status may be lost. The in-cache update + * is necessary to prevent the user from seeing a stale cb.istatus that will + * change as soon as the TFH restart is complete. Races may cause an + * occasional failure to clear the cb.istatus, but that is ok. + */ +static void gru_cb_set_istatus_active(struct gru_instruction_bits *cbk) +{ + if (cbk) { + cbk->istatus = CBS_ACTIVE; + } +} + +/* + * Read & clear a TFM + * + * The GRU has an array of fault maps. A map is private to a cpu + * Only one cpu will be accessing a cpu's fault map. + * + * This function scans the cpu-private fault map & clears all bits that + * are set. The function returns a bitmap that indicates the bits that + * were cleared. Note that sense the maps may be updated asynchronously by + * the GRU, atomic operations must be used to clear bits. + */ +static void get_clear_fault_map(struct gru_state *gru, + struct gru_tlb_fault_map *imap, + struct gru_tlb_fault_map *dmap) +{ + unsigned long i, k; + struct gru_tlb_fault_map *tfm; + + tfm = get_tfm_for_cpu(gru, gru_cpu_fault_map_id()); + prefetchw(tfm); /* Helps on hardware, required for emulator */ + for (i = 0; i < BITS_TO_LONGS(GRU_NUM_CBE); i++) { + k = tfm->fault_bits[i]; + if (k) + k = xchg(&tfm->fault_bits[i], 0UL); + imap->fault_bits[i] = k; + k = tfm->done_bits[i]; + if (k) + k = xchg(&tfm->done_bits[i], 0UL); + dmap->fault_bits[i] = k; + } + + /* + * Not functionally required but helps performance. (Required + * on emulator) + */ + gru_flush_cache(tfm); +} + +/* + * Atomic (interrupt context) & non-atomic (user context) functions to + * convert a vaddr into a physical address. The size of the page + * is returned in pageshift. + * returns: + * 0 - successful + * < 0 - error code + * 1 - (atomic only) try again in non-atomic context + */ +static int non_atomic_pte_lookup(struct vm_area_struct *vma, + unsigned long vaddr, int write, + unsigned long *paddr, int *pageshift) +{ + struct page *page; + +#ifdef CONFIG_HUGETLB_PAGE + *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT; +#else + *pageshift = PAGE_SHIFT; +#endif + if (get_user_pages(vaddr, 1, write ? FOLL_WRITE : 0, &page, NULL) <= 0) + return -EFAULT; + *paddr = page_to_phys(page); + put_page(page); + return 0; +} + +/* + * atomic_pte_lookup + * + * Convert a user virtual address to a physical address + * Only supports Intel large pages (2MB only) on x86_64. + * ZZZ - hugepage support is incomplete + * + * NOTE: mmap_lock is already held on entry to this function. This + * guarantees existence of the page tables. + */ +static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr, + int write, unsigned long *paddr, int *pageshift) +{ + pgd_t *pgdp; + p4d_t *p4dp; + pud_t *pudp; + pmd_t *pmdp; + pte_t pte; + + pgdp = pgd_offset(vma->vm_mm, vaddr); + if (unlikely(pgd_none(*pgdp))) + goto err; + + p4dp = p4d_offset(pgdp, vaddr); + if (unlikely(p4d_none(*p4dp))) + goto err; + + pudp = pud_offset(p4dp, vaddr); + if (unlikely(pud_none(*pudp))) + goto err; + + pmdp = pmd_offset(pudp, vaddr); + if (unlikely(pmd_none(*pmdp))) + goto err; +#ifdef CONFIG_X86_64 + if (unlikely(pmd_large(*pmdp))) + pte = *(pte_t *) pmdp; + else +#endif + pte = *pte_offset_kernel(pmdp, vaddr); + + if (unlikely(!pte_present(pte) || + (write && (!pte_write(pte) || !pte_dirty(pte))))) + return 1; + + *paddr = pte_pfn(pte) << PAGE_SHIFT; +#ifdef CONFIG_HUGETLB_PAGE + *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT; +#else + *pageshift = PAGE_SHIFT; +#endif + return 0; + +err: + return 1; +} + +static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr, + int write, int atomic, unsigned long *gpa, int *pageshift) +{ + struct mm_struct *mm = gts->ts_mm; + struct vm_area_struct *vma; + unsigned long paddr; + int ret, ps; + + vma = find_vma(mm, vaddr); + if (!vma) + goto inval; + + /* + * Atomic lookup is faster & usually works even if called in non-atomic + * context. + */ + rmb(); /* Must/check ms_range_active before loading PTEs */ + ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &ps); + if (ret) { + if (atomic) + goto upm; + if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, &ps)) + goto inval; + } + if (is_gru_paddr(paddr)) + goto inval; + paddr = paddr & ~((1UL << ps) - 1); + *gpa = uv_soc_phys_ram_to_gpa(paddr); + *pageshift = ps; + return VTOP_SUCCESS; + +inval: + return VTOP_INVALID; +upm: + return VTOP_RETRY; +} + + +/* + * Flush a CBE from cache. The CBE is clean in the cache. Dirty the + * CBE cacheline so that the line will be written back to home agent. + * Otherwise the line may be silently dropped. This has no impact + * except on performance. + */ +static void gru_flush_cache_cbe(struct gru_control_block_extended *cbe) +{ + if (unlikely(cbe)) { + cbe->cbrexecstatus = 0; /* make CL dirty */ + gru_flush_cache(cbe); + } +} + +/* + * Preload the TLB with entries that may be required. Currently, preloading + * is implemented only for BCOPY. Preload <tlb_preload_count> pages OR to + * the end of the bcopy tranfer, whichever is smaller. + */ +static void gru_preload_tlb(struct gru_state *gru, + struct gru_thread_state *gts, int atomic, + unsigned long fault_vaddr, int asid, int write, + unsigned char tlb_preload_count, + struct gru_tlb_fault_handle *tfh, + struct gru_control_block_extended *cbe) +{ + unsigned long vaddr = 0, gpa; + int ret, pageshift; + + if (cbe->opccpy != OP_BCOPY) + return; + + if (fault_vaddr == cbe->cbe_baddr0) + vaddr = fault_vaddr + GRU_CACHE_LINE_BYTES * cbe->cbe_src_cl - 1; + else if (fault_vaddr == cbe->cbe_baddr1) + vaddr = fault_vaddr + (1 << cbe->xtypecpy) * cbe->cbe_nelemcur - 1; + + fault_vaddr &= PAGE_MASK; + vaddr &= PAGE_MASK; + vaddr = min(vaddr, fault_vaddr + tlb_preload_count * PAGE_SIZE); + + while (vaddr > fault_vaddr) { + ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift); + if (ret || tfh_write_only(tfh, gpa, GAA_RAM, vaddr, asid, write, + GRU_PAGESIZE(pageshift))) + return; + gru_dbg(grudev, + "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, rw %d, ps %d, gpa 0x%lx\n", + atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh, + vaddr, asid, write, pageshift, gpa); + vaddr -= PAGE_SIZE; + STAT(tlb_preload_page); + } +} + +/* + * Drop a TLB entry into the GRU. The fault is described by info in an TFH. + * Input: + * cb Address of user CBR. Null if not running in user context + * Return: + * 0 = dropin, exception, or switch to UPM successful + * 1 = range invalidate active + * < 0 = error code + * + */ +static int gru_try_dropin(struct gru_state *gru, + struct gru_thread_state *gts, + struct gru_tlb_fault_handle *tfh, + struct gru_instruction_bits *cbk) +{ + struct gru_control_block_extended *cbe = NULL; + unsigned char tlb_preload_count = gts->ts_tlb_preload_count; + int pageshift = 0, asid, write, ret, atomic = !cbk, indexway; + unsigned long gpa = 0, vaddr = 0; + + /* + * NOTE: The GRU contains magic hardware that eliminates races between + * TLB invalidates and TLB dropins. If an invalidate occurs + * in the window between reading the TFH and the subsequent TLB dropin, + * the dropin is ignored. This eliminates the need for additional locks. + */ + + /* + * Prefetch the CBE if doing TLB preloading + */ + if (unlikely(tlb_preload_count)) { + cbe = gru_tfh_to_cbe(tfh); + prefetchw(cbe); + } + + /* + * Error if TFH state is IDLE or FMM mode & the user issuing a UPM call. + * Might be a hardware race OR a stupid user. Ignore FMM because FMM + * is a transient state. + */ + if (tfh->status != TFHSTATUS_EXCEPTION) { + gru_flush_cache(tfh); + sync_core(); + if (tfh->status != TFHSTATUS_EXCEPTION) + goto failnoexception; + STAT(tfh_stale_on_fault); + } + if (tfh->state == TFHSTATE_IDLE) + goto failidle; + if (tfh->state == TFHSTATE_MISS_FMM && cbk) + goto failfmm; + + write = (tfh->cause & TFHCAUSE_TLB_MOD) != 0; + vaddr = tfh->missvaddr; + asid = tfh->missasid; + indexway = tfh->indexway; + if (asid == 0) + goto failnoasid; + + rmb(); /* TFH must be cache resident before reading ms_range_active */ + + /* + * TFH is cache resident - at least briefly. Fail the dropin + * if a range invalidate is active. + */ + if (atomic_read(>s->ts_gms->ms_range_active)) + goto failactive; + + ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift); + if (ret == VTOP_INVALID) + goto failinval; + if (ret == VTOP_RETRY) + goto failupm; + + if (!(gts->ts_sizeavail & GRU_SIZEAVAIL(pageshift))) { + gts->ts_sizeavail |= GRU_SIZEAVAIL(pageshift); + if (atomic || !gru_update_cch(gts)) { + gts->ts_force_cch_reload = 1; + goto failupm; + } + } + + if (unlikely(cbe) && pageshift == PAGE_SHIFT) { + gru_preload_tlb(gru, gts, atomic, vaddr, asid, write, tlb_preload_count, tfh, cbe); + gru_flush_cache_cbe(cbe); + } + + gru_cb_set_istatus_active(cbk); + gts->ustats.tlbdropin++; + tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write, + GRU_PAGESIZE(pageshift)); + gru_dbg(grudev, + "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, indexway 0x%x," + " rw %d, ps %d, gpa 0x%lx\n", + atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh, vaddr, asid, + indexway, write, pageshift, gpa); + STAT(tlb_dropin); + return 0; + +failnoasid: + /* No asid (delayed unload). */ + STAT(tlb_dropin_fail_no_asid); + gru_dbg(grudev, "FAILED no_asid tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr); + if (!cbk) + tfh_user_polling_mode(tfh); + else + gru_flush_cache(tfh); + gru_flush_cache_cbe(cbe); + return -EAGAIN; + +failupm: + /* Atomic failure switch CBR to UPM */ + tfh_user_polling_mode(tfh); + gru_flush_cache_cbe(cbe); + STAT(tlb_dropin_fail_upm); + gru_dbg(grudev, "FAILED upm tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr); + return 1; + +failfmm: + /* FMM state on UPM call */ + gru_flush_cache(tfh); + gru_flush_cache_cbe(cbe); + STAT(tlb_dropin_fail_fmm); + gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state); + return 0; + +failnoexception: + /* TFH status did not show exception pending */ + gru_flush_cache(tfh); + gru_flush_cache_cbe(cbe); + if (cbk) + gru_flush_cache(cbk); + STAT(tlb_dropin_fail_no_exception); + gru_dbg(grudev, "FAILED non-exception tfh: 0x%p, status %d, state %d\n", + tfh, tfh->status, tfh->state); + return 0; + +failidle: + /* TFH state was idle - no miss pending */ + gru_flush_cache(tfh); + gru_flush_cache_cbe(cbe); + if (cbk) + gru_flush_cache(cbk); + STAT(tlb_dropin_fail_idle); + gru_dbg(grudev, "FAILED idle tfh: 0x%p, state %d\n", tfh, tfh->state); + return 0; + +failinval: + /* All errors (atomic & non-atomic) switch CBR to EXCEPTION state */ + tfh_exception(tfh); + gru_flush_cache_cbe(cbe); + STAT(tlb_dropin_fail_invalid); + gru_dbg(grudev, "FAILED inval tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr); + return -EFAULT; + +failactive: + /* Range invalidate active. Switch to UPM iff atomic */ + if (!cbk) + tfh_user_polling_mode(tfh); + else + gru_flush_cache(tfh); + gru_flush_cache_cbe(cbe); + STAT(tlb_dropin_fail_range_active); + gru_dbg(grudev, "FAILED range active: tfh 0x%p, vaddr 0x%lx\n", + tfh, vaddr); + return 1; +} + +/* + * Process an external interrupt from the GRU. This interrupt is + * caused by a TLB miss. + * Note that this is the interrupt handler that is registered with linux + * interrupt handlers. + */ +static irqreturn_t gru_intr(int chiplet, int blade) +{ + struct gru_state *gru; + struct gru_tlb_fault_map imap, dmap; + struct gru_thread_state *gts; + struct gru_tlb_fault_handle *tfh = NULL; + struct completion *cmp; + int cbrnum, ctxnum; + + STAT(intr); + + gru = &gru_base[blade]->bs_grus[chiplet]; + if (!gru) { + dev_err(grudev, "GRU: invalid interrupt: cpu %d, chiplet %d\n", + raw_smp_processor_id(), chiplet); + return IRQ_NONE; + } + get_clear_fault_map(gru, &imap, &dmap); + gru_dbg(grudev, + "cpu %d, chiplet %d, gid %d, imap %016lx %016lx, dmap %016lx %016lx\n", + smp_processor_id(), chiplet, gru->gs_gid, + imap.fault_bits[0], imap.fault_bits[1], + dmap.fault_bits[0], dmap.fault_bits[1]); + + for_each_cbr_in_tfm(cbrnum, dmap.fault_bits) { + STAT(intr_cbr); + cmp = gru->gs_blade->bs_async_wq; + if (cmp) + complete(cmp); + gru_dbg(grudev, "gid %d, cbr_done %d, done %d\n", + gru->gs_gid, cbrnum, cmp ? cmp->done : -1); + } + + for_each_cbr_in_tfm(cbrnum, imap.fault_bits) { + STAT(intr_tfh); + tfh = get_tfh_by_index(gru, cbrnum); + prefetchw(tfh); /* Helps on hdw, required for emulator */ + + /* + * When hardware sets a bit in the faultmap, it implicitly + * locks the GRU context so that it cannot be unloaded. + * The gts cannot change until a TFH start/writestart command + * is issued. + */ + ctxnum = tfh->ctxnum; + gts = gru->gs_gts[ctxnum]; + + /* Spurious interrupts can cause this. Ignore. */ + if (!gts) { + STAT(intr_spurious); + continue; + } + + /* + * This is running in interrupt context. Trylock the mmap_lock. + * If it fails, retry the fault in user context. + */ + gts->ustats.fmm_tlbmiss++; + if (!gts->ts_force_cch_reload && + mmap_read_trylock(gts->ts_mm)) { + gru_try_dropin(gru, gts, tfh, NULL); + mmap_read_unlock(gts->ts_mm); + } else { + tfh_user_polling_mode(tfh); + STAT(intr_mm_lock_failed); + } + } + return IRQ_HANDLED; +} + +irqreturn_t gru0_intr(int irq, void *dev_id) +{ + return gru_intr(0, uv_numa_blade_id()); +} + +irqreturn_t gru1_intr(int irq, void *dev_id) +{ + return gru_intr(1, uv_numa_blade_id()); +} + +irqreturn_t gru_intr_mblade(int irq, void *dev_id) +{ + int blade; + + for_each_possible_blade(blade) { + if (uv_blade_nr_possible_cpus(blade)) + continue; + gru_intr(0, blade); + gru_intr(1, blade); + } + return IRQ_HANDLED; +} + + +static int gru_user_dropin(struct gru_thread_state *gts, + struct gru_tlb_fault_handle *tfh, + void *cb) +{ + struct gru_mm_struct *gms = gts->ts_gms; + int ret; + + gts->ustats.upm_tlbmiss++; + while (1) { + wait_event(gms->ms_wait_queue, + atomic_read(&gms->ms_range_active) == 0); + prefetchw(tfh); /* Helps on hdw, required for emulator */ + ret = gru_try_dropin(gts->ts_gru, gts, tfh, cb); + if (ret <= 0) + return ret; + STAT(call_os_wait_queue); + } +} + +/* + * This interface is called as a result of a user detecting a "call OS" bit + * in a user CB. Normally means that a TLB fault has occurred. + * cb - user virtual address of the CB + */ +int gru_handle_user_call_os(unsigned long cb) +{ + struct gru_tlb_fault_handle *tfh; + struct gru_thread_state *gts; + void *cbk; + int ucbnum, cbrnum, ret = -EINVAL; + + STAT(call_os); + + /* sanity check the cb pointer */ + ucbnum = get_cb_number((void *)cb); + if ((cb & (GRU_HANDLE_STRIDE - 1)) || ucbnum >= GRU_NUM_CB) + return -EINVAL; + +again: + gts = gru_find_lock_gts(cb); + if (!gts) + return -EINVAL; + gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n", cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts); + + if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) + goto exit; + + if (gru_check_context_placement(gts)) { + gru_unlock_gts(gts); + gru_unload_context(gts, 1); + goto again; + } + + /* + * CCH may contain stale data if ts_force_cch_reload is set. + */ + if (gts->ts_gru && gts->ts_force_cch_reload) { + gts->ts_force_cch_reload = 0; + gru_update_cch(gts); + } + + ret = -EAGAIN; + cbrnum = thread_cbr_number(gts, ucbnum); + if (gts->ts_gru) { + tfh = get_tfh_by_index(gts->ts_gru, cbrnum); + cbk = get_gseg_base_address_cb(gts->ts_gru->gs_gru_base_vaddr, + gts->ts_ctxnum, ucbnum); + ret = gru_user_dropin(gts, tfh, cbk); + } +exit: + gru_unlock_gts(gts); + return ret; +} + +/* + * Fetch the exception detail information for a CB that terminated with + * an exception. + */ +int gru_get_exception_detail(unsigned long arg) +{ + struct control_block_extended_exc_detail excdet; + struct gru_control_block_extended *cbe; + struct gru_thread_state *gts; + int ucbnum, cbrnum, ret; + + STAT(user_exception); + if (copy_from_user(&excdet, (void __user *)arg, sizeof(excdet))) + return -EFAULT; + + gts = gru_find_lock_gts(excdet.cb); + if (!gts) + return -EINVAL; + + gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n", excdet.cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts); + ucbnum = get_cb_number((void *)excdet.cb); + if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) { + ret = -EINVAL; + } else if (gts->ts_gru) { + cbrnum = thread_cbr_number(gts, ucbnum); + cbe = get_cbe_by_index(gts->ts_gru, cbrnum); + gru_flush_cache(cbe); /* CBE not coherent */ + sync_core(); /* make sure we are have current data */ + excdet.opc = cbe->opccpy; + excdet.exopc = cbe->exopccpy; + excdet.ecause = cbe->ecause; + excdet.exceptdet0 = cbe->idef1upd; + excdet.exceptdet1 = cbe->idef3upd; + excdet.cbrstate = cbe->cbrstate; + excdet.cbrexecstatus = cbe->cbrexecstatus; + gru_flush_cache_cbe(cbe); + ret = 0; + } else { + ret = -EAGAIN; + } + gru_unlock_gts(gts); + + gru_dbg(grudev, + "cb 0x%lx, op %d, exopc %d, cbrstate %d, cbrexecstatus 0x%x, ecause 0x%x, " + "exdet0 0x%lx, exdet1 0x%x\n", + excdet.cb, excdet.opc, excdet.exopc, excdet.cbrstate, excdet.cbrexecstatus, + excdet.ecause, excdet.exceptdet0, excdet.exceptdet1); + if (!ret && copy_to_user((void __user *)arg, &excdet, sizeof(excdet))) + ret = -EFAULT; + return ret; +} + +/* + * User request to unload a context. Content is saved for possible reload. + */ +static int gru_unload_all_contexts(void) +{ + struct gru_thread_state *gts; + struct gru_state *gru; + int gid, ctxnum; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + foreach_gid(gid) { + gru = GID_TO_GRU(gid); + spin_lock(&gru->gs_lock); + for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) { + gts = gru->gs_gts[ctxnum]; + if (gts && mutex_trylock(>s->ts_ctxlock)) { + spin_unlock(&gru->gs_lock); + gru_unload_context(gts, 1); + mutex_unlock(>s->ts_ctxlock); + spin_lock(&gru->gs_lock); + } + } + spin_unlock(&gru->gs_lock); + } + return 0; +} + +int gru_user_unload_context(unsigned long arg) +{ + struct gru_thread_state *gts; + struct gru_unload_context_req req; + + STAT(user_unload_context); + if (copy_from_user(&req, (void __user *)arg, sizeof(req))) + return -EFAULT; + + gru_dbg(grudev, "gseg 0x%lx\n", req.gseg); + + if (!req.gseg) + return gru_unload_all_contexts(); + + gts = gru_find_lock_gts(req.gseg); + if (!gts) + return -EINVAL; + + if (gts->ts_gru) + gru_unload_context(gts, 1); + gru_unlock_gts(gts); + + return 0; +} + +/* + * User request to flush a range of virtual addresses from the GRU TLB + * (Mainly for testing). + */ +int gru_user_flush_tlb(unsigned long arg) +{ + struct gru_thread_state *gts; + struct gru_flush_tlb_req req; + struct gru_mm_struct *gms; + + STAT(user_flush_tlb); + if (copy_from_user(&req, (void __user *)arg, sizeof(req))) + return -EFAULT; + + gru_dbg(grudev, "gseg 0x%lx, vaddr 0x%lx, len 0x%lx\n", req.gseg, + req.vaddr, req.len); + + gts = gru_find_lock_gts(req.gseg); + if (!gts) + return -EINVAL; + + gms = gts->ts_gms; + gru_unlock_gts(gts); + gru_flush_tlb_range(gms, req.vaddr, req.len); + + return 0; +} + +/* + * Fetch GSEG statisticss + */ +long gru_get_gseg_statistics(unsigned long arg) +{ + struct gru_thread_state *gts; + struct gru_get_gseg_statistics_req req; + + if (copy_from_user(&req, (void __user *)arg, sizeof(req))) + return -EFAULT; + + /* + * The library creates arrays of contexts for threaded programs. + * If no gts exists in the array, the context has never been used & all + * statistics are implicitly 0. + */ + gts = gru_find_lock_gts(req.gseg); + if (gts) { + memcpy(&req.stats, >s->ustats, sizeof(gts->ustats)); + gru_unlock_gts(gts); + } else { + memset(&req.stats, 0, sizeof(gts->ustats)); + } + + if (copy_to_user((void __user *)arg, &req, sizeof(req))) + return -EFAULT; + + return 0; +} + +/* + * Register the current task as the user of the GSEG slice. + * Needed for TLB fault interrupt targeting. + */ +int gru_set_context_option(unsigned long arg) +{ + struct gru_thread_state *gts; + struct gru_set_context_option_req req; + int ret = 0; + + STAT(set_context_option); + if (copy_from_user(&req, (void __user *)arg, sizeof(req))) + return -EFAULT; + gru_dbg(grudev, "op %d, gseg 0x%lx, value1 0x%lx\n", req.op, req.gseg, req.val1); + + gts = gru_find_lock_gts(req.gseg); + if (!gts) { + gts = gru_alloc_locked_gts(req.gseg); + if (IS_ERR(gts)) + return PTR_ERR(gts); + } + + switch (req.op) { + case sco_blade_chiplet: + /* Select blade/chiplet for GRU context */ + if (req.val0 < -1 || req.val0 >= GRU_CHIPLETS_PER_HUB || + req.val1 < -1 || req.val1 >= GRU_MAX_BLADES || + (req.val1 >= 0 && !gru_base[req.val1])) { + ret = -EINVAL; + } else { + gts->ts_user_blade_id = req.val1; + gts->ts_user_chiplet_id = req.val0; + if (gru_check_context_placement(gts)) { + gru_unlock_gts(gts); + gru_unload_context(gts, 1); + return ret; + } + } + break; + case sco_gseg_owner: + /* Register the current task as the GSEG owner */ + gts->ts_tgid_owner = current->tgid; + break; + case sco_cch_req_slice: + /* Set the CCH slice option */ + gts->ts_cch_req_slice = req.val1 & 3; + break; + default: + ret = -EINVAL; + } + gru_unlock_gts(gts); + + return ret; +} diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c new file mode 100644 index 000000000..7ffcfc0bb --- /dev/null +++ b/drivers/misc/sgi-gru/grufile.c @@ -0,0 +1,612 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * SN Platform GRU Driver + * + * FILE OPERATIONS & DRIVER INITIALIZATION + * + * This file supports the user system call for file open, close, mmap, etc. + * This also incudes the driver initialization code. + * + * (C) Copyright 2020 Hewlett Packard Enterprise Development LP + * Copyright (c) 2008-2014 Silicon Graphics, Inc. All Rights Reserved. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/slab.h> +#include <linux/mm.h> +#include <linux/io.h> +#include <linux/spinlock.h> +#include <linux/device.h> +#include <linux/miscdevice.h> +#include <linux/interrupt.h> +#include <linux/proc_fs.h> +#include <linux/uaccess.h> +#ifdef CONFIG_X86_64 +#include <asm/uv/uv_irq.h> +#endif +#include <asm/uv/uv.h> +#include "gru.h" +#include "grulib.h" +#include "grutables.h" + +#include <asm/uv/uv_hub.h> +#include <asm/uv/uv_mmrs.h> + +struct gru_blade_state *gru_base[GRU_MAX_BLADES] __read_mostly; +unsigned long gru_start_paddr __read_mostly; +void *gru_start_vaddr __read_mostly; +unsigned long gru_end_paddr __read_mostly; +unsigned int gru_max_gids __read_mostly; +struct gru_stats_s gru_stats; + +/* Guaranteed user available resources on each node */ +static int max_user_cbrs, max_user_dsr_bytes; + +static struct miscdevice gru_miscdev; + +static int gru_supported(void) +{ + return is_uv_system() && + (uv_hub_info->hub_revision < UV3_HUB_REVISION_BASE); +} + +/* + * gru_vma_close + * + * Called when unmapping a device mapping. Frees all gru resources + * and tables belonging to the vma. + */ +static void gru_vma_close(struct vm_area_struct *vma) +{ + struct gru_vma_data *vdata; + struct gru_thread_state *gts; + struct list_head *entry, *next; + + if (!vma->vm_private_data) + return; + + vdata = vma->vm_private_data; + vma->vm_private_data = NULL; + gru_dbg(grudev, "vma %p, file %p, vdata %p\n", vma, vma->vm_file, + vdata); + list_for_each_safe(entry, next, &vdata->vd_head) { + gts = + list_entry(entry, struct gru_thread_state, ts_next); + list_del(>s->ts_next); + mutex_lock(>s->ts_ctxlock); + if (gts->ts_gru) + gru_unload_context(gts, 0); + mutex_unlock(>s->ts_ctxlock); + gts_drop(gts); + } + kfree(vdata); + STAT(vdata_free); +} + +/* + * gru_file_mmap + * + * Called when mmapping the device. Initializes the vma with a fault handler + * and private data structure necessary to allocate, track, and free the + * underlying pages. + */ +static int gru_file_mmap(struct file *file, struct vm_area_struct *vma) +{ + if ((vma->vm_flags & (VM_SHARED | VM_WRITE)) != (VM_SHARED | VM_WRITE)) + return -EPERM; + + if (vma->vm_start & (GRU_GSEG_PAGESIZE - 1) || + vma->vm_end & (GRU_GSEG_PAGESIZE - 1)) + return -EINVAL; + + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_LOCKED | + VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP; + vma->vm_page_prot = PAGE_SHARED; + vma->vm_ops = &gru_vm_ops; + + vma->vm_private_data = gru_alloc_vma_data(vma, 0); + if (!vma->vm_private_data) + return -ENOMEM; + + gru_dbg(grudev, "file %p, vaddr 0x%lx, vma %p, vdata %p\n", + file, vma->vm_start, vma, vma->vm_private_data); + return 0; +} + +/* + * Create a new GRU context + */ +static int gru_create_new_context(unsigned long arg) +{ + struct gru_create_context_req req; + struct vm_area_struct *vma; + struct gru_vma_data *vdata; + int ret = -EINVAL; + + if (copy_from_user(&req, (void __user *)arg, sizeof(req))) + return -EFAULT; + + if (req.data_segment_bytes > max_user_dsr_bytes) + return -EINVAL; + if (req.control_blocks > max_user_cbrs || !req.maximum_thread_count) + return -EINVAL; + + if (!(req.options & GRU_OPT_MISS_MASK)) + req.options |= GRU_OPT_MISS_FMM_INTR; + + mmap_write_lock(current->mm); + vma = gru_find_vma(req.gseg); + if (vma) { + vdata = vma->vm_private_data; + vdata->vd_user_options = req.options; + vdata->vd_dsr_au_count = + GRU_DS_BYTES_TO_AU(req.data_segment_bytes); + vdata->vd_cbr_au_count = GRU_CB_COUNT_TO_AU(req.control_blocks); + vdata->vd_tlb_preload_count = req.tlb_preload_count; + ret = 0; + } + mmap_write_unlock(current->mm); + + return ret; +} + +/* + * Get GRU configuration info (temp - for emulator testing) + */ +static long gru_get_config_info(unsigned long arg) +{ + struct gru_config_info info; + int nodesperblade; + + if (num_online_nodes() > 1 && + (uv_node_to_blade_id(1) == uv_node_to_blade_id(0))) + nodesperblade = 2; + else + nodesperblade = 1; + memset(&info, 0, sizeof(info)); + info.cpus = num_online_cpus(); + info.nodes = num_online_nodes(); + info.blades = info.nodes / nodesperblade; + info.chiplets = GRU_CHIPLETS_PER_BLADE * info.blades; + + if (copy_to_user((void __user *)arg, &info, sizeof(info))) + return -EFAULT; + return 0; +} + +/* + * gru_file_unlocked_ioctl + * + * Called to update file attributes via IOCTL calls. + */ +static long gru_file_unlocked_ioctl(struct file *file, unsigned int req, + unsigned long arg) +{ + int err = -EBADRQC; + + gru_dbg(grudev, "file %p, req 0x%x, 0x%lx\n", file, req, arg); + + switch (req) { + case GRU_CREATE_CONTEXT: + err = gru_create_new_context(arg); + break; + case GRU_SET_CONTEXT_OPTION: + err = gru_set_context_option(arg); + break; + case GRU_USER_GET_EXCEPTION_DETAIL: + err = gru_get_exception_detail(arg); + break; + case GRU_USER_UNLOAD_CONTEXT: + err = gru_user_unload_context(arg); + break; + case GRU_USER_FLUSH_TLB: + err = gru_user_flush_tlb(arg); + break; + case GRU_USER_CALL_OS: + err = gru_handle_user_call_os(arg); + break; + case GRU_GET_GSEG_STATISTICS: + err = gru_get_gseg_statistics(arg); + break; + case GRU_KTEST: + err = gru_ktest(arg); + break; + case GRU_GET_CONFIG_INFO: + err = gru_get_config_info(arg); + break; + case GRU_DUMP_CHIPLET_STATE: + err = gru_dump_chiplet_request(arg); + break; + } + return err; +} + +/* + * Called at init time to build tables for all GRUs that are present in the + * system. + */ +static void gru_init_chiplet(struct gru_state *gru, unsigned long paddr, + void *vaddr, int blade_id, int chiplet_id) +{ + spin_lock_init(&gru->gs_lock); + spin_lock_init(&gru->gs_asid_lock); + gru->gs_gru_base_paddr = paddr; + gru->gs_gru_base_vaddr = vaddr; + gru->gs_gid = blade_id * GRU_CHIPLETS_PER_BLADE + chiplet_id; + gru->gs_blade = gru_base[blade_id]; + gru->gs_blade_id = blade_id; + gru->gs_chiplet_id = chiplet_id; + gru->gs_cbr_map = (GRU_CBR_AU == 64) ? ~0 : (1UL << GRU_CBR_AU) - 1; + gru->gs_dsr_map = (1UL << GRU_DSR_AU) - 1; + gru->gs_asid_limit = MAX_ASID; + gru_tgh_flush_init(gru); + if (gru->gs_gid >= gru_max_gids) + gru_max_gids = gru->gs_gid + 1; + gru_dbg(grudev, "bid %d, gid %d, vaddr %p (0x%lx)\n", + blade_id, gru->gs_gid, gru->gs_gru_base_vaddr, + gru->gs_gru_base_paddr); +} + +static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr) +{ + int pnode, nid, bid, chip; + int cbrs, dsrbytes, n; + int order = get_order(sizeof(struct gru_blade_state)); + struct page *page; + struct gru_state *gru; + unsigned long paddr; + void *vaddr; + + max_user_cbrs = GRU_NUM_CB; + max_user_dsr_bytes = GRU_NUM_DSR_BYTES; + for_each_possible_blade(bid) { + pnode = uv_blade_to_pnode(bid); + nid = uv_blade_to_memory_nid(bid);/* -1 if no memory on blade */ + page = alloc_pages_node(nid, GFP_KERNEL, order); + if (!page) + goto fail; + gru_base[bid] = page_address(page); + memset(gru_base[bid], 0, sizeof(struct gru_blade_state)); + gru_base[bid]->bs_lru_gru = &gru_base[bid]->bs_grus[0]; + spin_lock_init(&gru_base[bid]->bs_lock); + init_rwsem(&gru_base[bid]->bs_kgts_sema); + + dsrbytes = 0; + cbrs = 0; + for (gru = gru_base[bid]->bs_grus, chip = 0; + chip < GRU_CHIPLETS_PER_BLADE; + chip++, gru++) { + paddr = gru_chiplet_paddr(gru_base_paddr, pnode, chip); + vaddr = gru_chiplet_vaddr(gru_base_vaddr, pnode, chip); + gru_init_chiplet(gru, paddr, vaddr, bid, chip); + n = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE; + cbrs = max(cbrs, n); + n = hweight64(gru->gs_dsr_map) * GRU_DSR_AU_BYTES; + dsrbytes = max(dsrbytes, n); + } + max_user_cbrs = min(max_user_cbrs, cbrs); + max_user_dsr_bytes = min(max_user_dsr_bytes, dsrbytes); + } + + return 0; + +fail: + for (bid--; bid >= 0; bid--) + free_pages((unsigned long)gru_base[bid], order); + return -ENOMEM; +} + +static void gru_free_tables(void) +{ + int bid; + int order = get_order(sizeof(struct gru_state) * + GRU_CHIPLETS_PER_BLADE); + + for (bid = 0; bid < GRU_MAX_BLADES; bid++) + free_pages((unsigned long)gru_base[bid], order); +} + +static unsigned long gru_chiplet_cpu_to_mmr(int chiplet, int cpu, int *corep) +{ + unsigned long mmr = 0; + int core; + + /* + * We target the cores of a blade and not the hyperthreads themselves. + * There is a max of 8 cores per socket and 2 sockets per blade, + * making for a max total of 16 cores (i.e., 16 CPUs without + * hyperthreading and 32 CPUs with hyperthreading). + */ + core = uv_cpu_core_number(cpu) + UV_MAX_INT_CORES * uv_cpu_socket_number(cpu); + if (core >= GRU_NUM_TFM || uv_cpu_ht_number(cpu)) + return 0; + + if (chiplet == 0) { + mmr = UVH_GR0_TLB_INT0_CONFIG + + core * (UVH_GR0_TLB_INT1_CONFIG - UVH_GR0_TLB_INT0_CONFIG); + } else if (chiplet == 1) { + mmr = UVH_GR1_TLB_INT0_CONFIG + + core * (UVH_GR1_TLB_INT1_CONFIG - UVH_GR1_TLB_INT0_CONFIG); + } else { + BUG(); + } + + *corep = core; + return mmr; +} + +#ifdef CONFIG_IA64 + +static int gru_irq_count[GRU_CHIPLETS_PER_BLADE]; + +static void gru_noop(struct irq_data *d) +{ +} + +static struct irq_chip gru_chip[GRU_CHIPLETS_PER_BLADE] = { + [0 ... GRU_CHIPLETS_PER_BLADE - 1] { + .irq_mask = gru_noop, + .irq_unmask = gru_noop, + .irq_ack = gru_noop + } +}; + +static int gru_chiplet_setup_tlb_irq(int chiplet, char *irq_name, + irq_handler_t irq_handler, int cpu, int blade) +{ + unsigned long mmr; + int irq = IRQ_GRU + chiplet; + int ret, core; + + mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core); + if (mmr == 0) + return 0; + + if (gru_irq_count[chiplet] == 0) { + gru_chip[chiplet].name = irq_name; + ret = irq_set_chip(irq, &gru_chip[chiplet]); + if (ret) { + printk(KERN_ERR "%s: set_irq_chip failed, errno=%d\n", + GRU_DRIVER_ID_STR, -ret); + return ret; + } + + ret = request_irq(irq, irq_handler, 0, irq_name, NULL); + if (ret) { + printk(KERN_ERR "%s: request_irq failed, errno=%d\n", + GRU_DRIVER_ID_STR, -ret); + return ret; + } + } + gru_irq_count[chiplet]++; + + return 0; +} + +static void gru_chiplet_teardown_tlb_irq(int chiplet, int cpu, int blade) +{ + unsigned long mmr; + int core, irq = IRQ_GRU + chiplet; + + if (gru_irq_count[chiplet] == 0) + return; + + mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core); + if (mmr == 0) + return; + + if (--gru_irq_count[chiplet] == 0) + free_irq(irq, NULL); +} + +#elif defined CONFIG_X86_64 + +static int gru_chiplet_setup_tlb_irq(int chiplet, char *irq_name, + irq_handler_t irq_handler, int cpu, int blade) +{ + unsigned long mmr; + int irq, core; + int ret; + + mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core); + if (mmr == 0) + return 0; + + irq = uv_setup_irq(irq_name, cpu, blade, mmr, UV_AFFINITY_CPU); + if (irq < 0) { + printk(KERN_ERR "%s: uv_setup_irq failed, errno=%d\n", + GRU_DRIVER_ID_STR, -irq); + return irq; + } + + ret = request_irq(irq, irq_handler, 0, irq_name, NULL); + if (ret) { + uv_teardown_irq(irq); + printk(KERN_ERR "%s: request_irq failed, errno=%d\n", + GRU_DRIVER_ID_STR, -ret); + return ret; + } + gru_base[blade]->bs_grus[chiplet].gs_irq[core] = irq; + return 0; +} + +static void gru_chiplet_teardown_tlb_irq(int chiplet, int cpu, int blade) +{ + int irq, core; + unsigned long mmr; + + mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core); + if (mmr) { + irq = gru_base[blade]->bs_grus[chiplet].gs_irq[core]; + if (irq) { + free_irq(irq, NULL); + uv_teardown_irq(irq); + } + } +} + +#endif + +static void gru_teardown_tlb_irqs(void) +{ + int blade; + int cpu; + + for_each_online_cpu(cpu) { + blade = uv_cpu_to_blade_id(cpu); + gru_chiplet_teardown_tlb_irq(0, cpu, blade); + gru_chiplet_teardown_tlb_irq(1, cpu, blade); + } + for_each_possible_blade(blade) { + if (uv_blade_nr_possible_cpus(blade)) + continue; + gru_chiplet_teardown_tlb_irq(0, 0, blade); + gru_chiplet_teardown_tlb_irq(1, 0, blade); + } +} + +static int gru_setup_tlb_irqs(void) +{ + int blade; + int cpu; + int ret; + + for_each_online_cpu(cpu) { + blade = uv_cpu_to_blade_id(cpu); + ret = gru_chiplet_setup_tlb_irq(0, "GRU0_TLB", gru0_intr, cpu, blade); + if (ret != 0) + goto exit1; + + ret = gru_chiplet_setup_tlb_irq(1, "GRU1_TLB", gru1_intr, cpu, blade); + if (ret != 0) + goto exit1; + } + for_each_possible_blade(blade) { + if (uv_blade_nr_possible_cpus(blade)) + continue; + ret = gru_chiplet_setup_tlb_irq(0, "GRU0_TLB", gru_intr_mblade, 0, blade); + if (ret != 0) + goto exit1; + + ret = gru_chiplet_setup_tlb_irq(1, "GRU1_TLB", gru_intr_mblade, 0, blade); + if (ret != 0) + goto exit1; + } + + return 0; + +exit1: + gru_teardown_tlb_irqs(); + return ret; +} + +/* + * gru_init + * + * Called at boot or module load time to initialize the GRUs. + */ +static int __init gru_init(void) +{ + int ret; + + if (!gru_supported()) + return 0; + +#if defined CONFIG_IA64 + gru_start_paddr = 0xd000000000UL; /* ZZZZZZZZZZZZZZZZZZZ fixme */ +#else + gru_start_paddr = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG) & + 0x7fffffffffffUL; +#endif + gru_start_vaddr = __va(gru_start_paddr); + gru_end_paddr = gru_start_paddr + GRU_MAX_BLADES * GRU_SIZE; + printk(KERN_INFO "GRU space: 0x%lx - 0x%lx\n", + gru_start_paddr, gru_end_paddr); + ret = misc_register(&gru_miscdev); + if (ret) { + printk(KERN_ERR "%s: misc_register failed\n", + GRU_DRIVER_ID_STR); + goto exit0; + } + + ret = gru_proc_init(); + if (ret) { + printk(KERN_ERR "%s: proc init failed\n", GRU_DRIVER_ID_STR); + goto exit1; + } + + ret = gru_init_tables(gru_start_paddr, gru_start_vaddr); + if (ret) { + printk(KERN_ERR "%s: init tables failed\n", GRU_DRIVER_ID_STR); + goto exit2; + } + + ret = gru_setup_tlb_irqs(); + if (ret != 0) + goto exit3; + + gru_kservices_init(); + + printk(KERN_INFO "%s: v%s\n", GRU_DRIVER_ID_STR, + GRU_DRIVER_VERSION_STR); + return 0; + +exit3: + gru_free_tables(); +exit2: + gru_proc_exit(); +exit1: + misc_deregister(&gru_miscdev); +exit0: + return ret; + +} + +static void __exit gru_exit(void) +{ + if (!gru_supported()) + return; + + gru_teardown_tlb_irqs(); + gru_kservices_exit(); + gru_free_tables(); + misc_deregister(&gru_miscdev); + gru_proc_exit(); + mmu_notifier_synchronize(); +} + +static const struct file_operations gru_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = gru_file_unlocked_ioctl, + .mmap = gru_file_mmap, + .llseek = noop_llseek, +}; + +static struct miscdevice gru_miscdev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "gru", + .fops = &gru_fops, +}; + +const struct vm_operations_struct gru_vm_ops = { + .close = gru_vma_close, + .fault = gru_fault, +}; + +#ifndef MODULE +fs_initcall(gru_init); +#else +module_init(gru_init); +#endif +module_exit(gru_exit); + +module_param(gru_options, ulong, 0644); +MODULE_PARM_DESC(gru_options, "Various debug options"); + +MODULE_AUTHOR("Silicon Graphics, Inc."); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION(GRU_DRIVER_ID_STR GRU_DRIVER_VERSION_STR); +MODULE_VERSION(GRU_DRIVER_VERSION_STR); + diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c new file mode 100644 index 000000000..1d75d5e54 --- /dev/null +++ b/drivers/misc/sgi-gru/gruhandles.c @@ -0,0 +1,198 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * GRU KERNEL MCS INSTRUCTIONS + * + * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. + */ + +#include <linux/kernel.h> +#include "gru.h" +#include "grulib.h" +#include "grutables.h" + +/* 10 sec */ +#ifdef CONFIG_IA64 +#include <asm/processor.h> +#define GRU_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10) +#define CLKS2NSEC(c) ((c) *1000000000 / local_cpu_data->itc_freq) +#else +#include <linux/sync_core.h> +#include <asm/tsc.h> +#define GRU_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) +#define CLKS2NSEC(c) ((c) * 1000000 / tsc_khz) +#endif + +/* Extract the status field from a kernel handle */ +#define GET_MSEG_HANDLE_STATUS(h) (((*(unsigned long *)(h)) >> 16) & 3) + +struct mcs_op_statistic mcs_op_statistics[mcsop_last]; + +static void update_mcs_stats(enum mcs_op op, unsigned long clks) +{ + unsigned long nsec; + + nsec = CLKS2NSEC(clks); + atomic_long_inc(&mcs_op_statistics[op].count); + atomic_long_add(nsec, &mcs_op_statistics[op].total); + if (mcs_op_statistics[op].max < nsec) + mcs_op_statistics[op].max = nsec; +} + +static void start_instruction(void *h) +{ + unsigned long *w0 = h; + + wmb(); /* setting CMD/STATUS bits must be last */ + *w0 = *w0 | 0x20001; + gru_flush_cache(h); +} + +static void report_instruction_timeout(void *h) +{ + unsigned long goff = GSEGPOFF((unsigned long)h); + char *id = "???"; + + if (TYPE_IS(CCH, goff)) + id = "CCH"; + else if (TYPE_IS(TGH, goff)) + id = "TGH"; + else if (TYPE_IS(TFH, goff)) + id = "TFH"; + + panic(KERN_ALERT "GRU %p (%s) is malfunctioning\n", h, id); +} + +static int wait_instruction_complete(void *h, enum mcs_op opc) +{ + int status; + unsigned long start_time = get_cycles(); + + while (1) { + cpu_relax(); + status = GET_MSEG_HANDLE_STATUS(h); + if (status != CCHSTATUS_ACTIVE) + break; + if (GRU_OPERATION_TIMEOUT < (get_cycles() - start_time)) { + report_instruction_timeout(h); + start_time = get_cycles(); + } + } + if (gru_options & OPT_STATS) + update_mcs_stats(opc, get_cycles() - start_time); + return status; +} + +int cch_allocate(struct gru_context_configuration_handle *cch) +{ + int ret; + + cch->opc = CCHOP_ALLOCATE; + start_instruction(cch); + ret = wait_instruction_complete(cch, cchop_allocate); + + /* + * Stop speculation into the GSEG being mapped by the previous ALLOCATE. + * The GSEG memory does not exist until the ALLOCATE completes. + */ + sync_core(); + return ret; +} + +int cch_start(struct gru_context_configuration_handle *cch) +{ + cch->opc = CCHOP_START; + start_instruction(cch); + return wait_instruction_complete(cch, cchop_start); +} + +int cch_interrupt(struct gru_context_configuration_handle *cch) +{ + cch->opc = CCHOP_INTERRUPT; + start_instruction(cch); + return wait_instruction_complete(cch, cchop_interrupt); +} + +int cch_deallocate(struct gru_context_configuration_handle *cch) +{ + int ret; + + cch->opc = CCHOP_DEALLOCATE; + start_instruction(cch); + ret = wait_instruction_complete(cch, cchop_deallocate); + + /* + * Stop speculation into the GSEG being unmapped by the previous + * DEALLOCATE. + */ + sync_core(); + return ret; +} + +int cch_interrupt_sync(struct gru_context_configuration_handle + *cch) +{ + cch->opc = CCHOP_INTERRUPT_SYNC; + start_instruction(cch); + return wait_instruction_complete(cch, cchop_interrupt_sync); +} + +int tgh_invalidate(struct gru_tlb_global_handle *tgh, + unsigned long vaddr, unsigned long vaddrmask, + int asid, int pagesize, int global, int n, + unsigned short ctxbitmap) +{ + tgh->vaddr = vaddr; + tgh->asid = asid; + tgh->pagesize = pagesize; + tgh->n = n; + tgh->global = global; + tgh->vaddrmask = vaddrmask; + tgh->ctxbitmap = ctxbitmap; + tgh->opc = TGHOP_TLBINV; + start_instruction(tgh); + return wait_instruction_complete(tgh, tghop_invalidate); +} + +int tfh_write_only(struct gru_tlb_fault_handle *tfh, + unsigned long paddr, int gaa, + unsigned long vaddr, int asid, int dirty, + int pagesize) +{ + tfh->fillasid = asid; + tfh->fillvaddr = vaddr; + tfh->pfn = paddr >> GRU_PADDR_SHIFT; + tfh->gaa = gaa; + tfh->dirty = dirty; + tfh->pagesize = pagesize; + tfh->opc = TFHOP_WRITE_ONLY; + start_instruction(tfh); + return wait_instruction_complete(tfh, tfhop_write_only); +} + +void tfh_write_restart(struct gru_tlb_fault_handle *tfh, + unsigned long paddr, int gaa, + unsigned long vaddr, int asid, int dirty, + int pagesize) +{ + tfh->fillasid = asid; + tfh->fillvaddr = vaddr; + tfh->pfn = paddr >> GRU_PADDR_SHIFT; + tfh->gaa = gaa; + tfh->dirty = dirty; + tfh->pagesize = pagesize; + tfh->opc = TFHOP_WRITE_RESTART; + start_instruction(tfh); +} + +void tfh_user_polling_mode(struct gru_tlb_fault_handle *tfh) +{ + tfh->opc = TFHOP_USER_POLLING_MODE; + start_instruction(tfh); +} + +void tfh_exception(struct gru_tlb_fault_handle *tfh) +{ + tfh->opc = TFHOP_EXCEPTION; + start_instruction(tfh); +} + diff --git a/drivers/misc/sgi-gru/gruhandles.h b/drivers/misc/sgi-gru/gruhandles.h new file mode 100644 index 000000000..5a498bf8d --- /dev/null +++ b/drivers/misc/sgi-gru/gruhandles.h @@ -0,0 +1,517 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * SN Platform GRU Driver + * + * GRU HANDLE DEFINITION + * + * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. + */ + +#ifndef __GRUHANDLES_H__ +#define __GRUHANDLES_H__ +#include "gru_instructions.h" + +/* + * Manifest constants for GRU Memory Map + */ +#define GRU_GSEG0_BASE 0 +#define GRU_MCS_BASE (64 * 1024 * 1024) +#define GRU_SIZE (128UL * 1024 * 1024) + +/* Handle & resource counts */ +#define GRU_NUM_CB 128 +#define GRU_NUM_DSR_BYTES (32 * 1024) +#define GRU_NUM_TFM 16 +#define GRU_NUM_TGH 24 +#define GRU_NUM_CBE 128 +#define GRU_NUM_TFH 128 +#define GRU_NUM_CCH 16 + +/* Maximum resource counts that can be reserved by user programs */ +#define GRU_NUM_USER_CBR GRU_NUM_CBE +#define GRU_NUM_USER_DSR_BYTES GRU_NUM_DSR_BYTES + +/* Bytes per handle & handle stride. Code assumes all cb, tfh, cbe handles + * are the same */ +#define GRU_HANDLE_BYTES 64 +#define GRU_HANDLE_STRIDE 256 + +/* Base addresses of handles */ +#define GRU_TFM_BASE (GRU_MCS_BASE + 0x00000) +#define GRU_TGH_BASE (GRU_MCS_BASE + 0x08000) +#define GRU_CBE_BASE (GRU_MCS_BASE + 0x10000) +#define GRU_TFH_BASE (GRU_MCS_BASE + 0x18000) +#define GRU_CCH_BASE (GRU_MCS_BASE + 0x20000) + +/* User gseg constants */ +#define GRU_GSEG_STRIDE (4 * 1024 * 1024) +#define GSEG_BASE(a) ((a) & ~(GRU_GSEG_PAGESIZE - 1)) + +/* Data segment constants */ +#define GRU_DSR_AU_BYTES 1024 +#define GRU_DSR_CL (GRU_NUM_DSR_BYTES / GRU_CACHE_LINE_BYTES) +#define GRU_DSR_AU_CL (GRU_DSR_AU_BYTES / GRU_CACHE_LINE_BYTES) +#define GRU_DSR_AU (GRU_NUM_DSR_BYTES / GRU_DSR_AU_BYTES) + +/* Control block constants */ +#define GRU_CBR_AU_SIZE 2 +#define GRU_CBR_AU (GRU_NUM_CBE / GRU_CBR_AU_SIZE) + +/* Convert resource counts to the number of AU */ +#define GRU_DS_BYTES_TO_AU(n) DIV_ROUND_UP(n, GRU_DSR_AU_BYTES) +#define GRU_CB_COUNT_TO_AU(n) DIV_ROUND_UP(n, GRU_CBR_AU_SIZE) + +/* UV limits */ +#define GRU_CHIPLETS_PER_HUB 2 +#define GRU_HUBS_PER_BLADE 1 +#define GRU_CHIPLETS_PER_BLADE (GRU_HUBS_PER_BLADE * GRU_CHIPLETS_PER_HUB) + +/* User GRU Gseg offsets */ +#define GRU_CB_BASE 0 +#define GRU_CB_LIMIT (GRU_CB_BASE + GRU_HANDLE_STRIDE * GRU_NUM_CBE) +#define GRU_DS_BASE 0x20000 +#define GRU_DS_LIMIT (GRU_DS_BASE + GRU_NUM_DSR_BYTES) + +/* Convert a GRU physical address to the chiplet offset */ +#define GSEGPOFF(h) ((h) & (GRU_SIZE - 1)) + +/* Convert an arbitrary handle address to the beginning of the GRU segment */ +#define GRUBASE(h) ((void *)((unsigned long)(h) & ~(GRU_SIZE - 1))) + +/* Test a valid handle address to determine the type */ +#define TYPE_IS(hn, h) ((h) >= GRU_##hn##_BASE && (h) < \ + GRU_##hn##_BASE + GRU_NUM_##hn * GRU_HANDLE_STRIDE && \ + (((h) & (GRU_HANDLE_STRIDE - 1)) == 0)) + + +/* General addressing macros. */ +static inline void *get_gseg_base_address(void *base, int ctxnum) +{ + return (void *)(base + GRU_GSEG0_BASE + GRU_GSEG_STRIDE * ctxnum); +} + +static inline void *get_gseg_base_address_cb(void *base, int ctxnum, int line) +{ + return (void *)(get_gseg_base_address(base, ctxnum) + + GRU_CB_BASE + GRU_HANDLE_STRIDE * line); +} + +static inline void *get_gseg_base_address_ds(void *base, int ctxnum, int line) +{ + return (void *)(get_gseg_base_address(base, ctxnum) + GRU_DS_BASE + + GRU_CACHE_LINE_BYTES * line); +} + +static inline struct gru_tlb_fault_map *get_tfm(void *base, int ctxnum) +{ + return (struct gru_tlb_fault_map *)(base + GRU_TFM_BASE + + ctxnum * GRU_HANDLE_STRIDE); +} + +static inline struct gru_tlb_global_handle *get_tgh(void *base, int ctxnum) +{ + return (struct gru_tlb_global_handle *)(base + GRU_TGH_BASE + + ctxnum * GRU_HANDLE_STRIDE); +} + +static inline struct gru_control_block_extended *get_cbe(void *base, int ctxnum) +{ + return (struct gru_control_block_extended *)(base + GRU_CBE_BASE + + ctxnum * GRU_HANDLE_STRIDE); +} + +static inline struct gru_tlb_fault_handle *get_tfh(void *base, int ctxnum) +{ + return (struct gru_tlb_fault_handle *)(base + GRU_TFH_BASE + + ctxnum * GRU_HANDLE_STRIDE); +} + +static inline struct gru_context_configuration_handle *get_cch(void *base, + int ctxnum) +{ + return (struct gru_context_configuration_handle *)(base + + GRU_CCH_BASE + ctxnum * GRU_HANDLE_STRIDE); +} + +static inline unsigned long get_cb_number(void *cb) +{ + return (((unsigned long)cb - GRU_CB_BASE) % GRU_GSEG_PAGESIZE) / + GRU_HANDLE_STRIDE; +} + +/* byte offset to a specific GRU chiplet. (p=pnode, c=chiplet (0 or 1)*/ +static inline unsigned long gru_chiplet_paddr(unsigned long paddr, int pnode, + int chiplet) +{ + return paddr + GRU_SIZE * (2 * pnode + chiplet); +} + +static inline void *gru_chiplet_vaddr(void *vaddr, int pnode, int chiplet) +{ + return vaddr + GRU_SIZE * (2 * pnode + chiplet); +} + +static inline struct gru_control_block_extended *gru_tfh_to_cbe( + struct gru_tlb_fault_handle *tfh) +{ + unsigned long cbe; + + cbe = (unsigned long)tfh - GRU_TFH_BASE + GRU_CBE_BASE; + return (struct gru_control_block_extended*)cbe; +} + + + + +/* + * Global TLB Fault Map + * Bitmap of outstanding TLB misses needing interrupt/polling service. + * + */ +struct gru_tlb_fault_map { + unsigned long fault_bits[BITS_TO_LONGS(GRU_NUM_CBE)]; + unsigned long fill0[2]; + unsigned long done_bits[BITS_TO_LONGS(GRU_NUM_CBE)]; + unsigned long fill1[2]; +}; + +/* + * TGH - TLB Global Handle + * Used for TLB flushing. + * + */ +struct gru_tlb_global_handle { + unsigned int cmd:1; /* DW 0 */ + unsigned int delresp:1; + unsigned int opc:1; + unsigned int fill1:5; + + unsigned int fill2:8; + + unsigned int status:2; + unsigned long fill3:2; + unsigned int state:3; + unsigned long fill4:1; + + unsigned int cause:3; + unsigned long fill5:37; + + unsigned long vaddr:64; /* DW 1 */ + + unsigned int asid:24; /* DW 2 */ + unsigned int fill6:8; + + unsigned int pagesize:5; + unsigned int fill7:11; + + unsigned int global:1; + unsigned int fill8:15; + + unsigned long vaddrmask:39; /* DW 3 */ + unsigned int fill9:9; + unsigned int n:10; + unsigned int fill10:6; + + unsigned int ctxbitmap:16; /* DW4 */ + unsigned long fill11[3]; +}; + +enum gru_tgh_cmd { + TGHCMD_START +}; + +enum gru_tgh_opc { + TGHOP_TLBNOP, + TGHOP_TLBINV +}; + +enum gru_tgh_status { + TGHSTATUS_IDLE, + TGHSTATUS_EXCEPTION, + TGHSTATUS_ACTIVE +}; + +enum gru_tgh_state { + TGHSTATE_IDLE, + TGHSTATE_PE_INVAL, + TGHSTATE_INTERRUPT_INVAL, + TGHSTATE_WAITDONE, + TGHSTATE_RESTART_CTX, +}; + +enum gru_tgh_cause { + TGHCAUSE_RR_ECC, + TGHCAUSE_TLB_ECC, + TGHCAUSE_LRU_ECC, + TGHCAUSE_PS_ECC, + TGHCAUSE_MUL_ERR, + TGHCAUSE_DATA_ERR, + TGHCAUSE_SW_FORCE +}; + + +/* + * TFH - TLB Global Handle + * Used for TLB dropins into the GRU TLB. + * + */ +struct gru_tlb_fault_handle { + unsigned int cmd:1; /* DW 0 - low 32*/ + unsigned int delresp:1; + unsigned int fill0:2; + unsigned int opc:3; + unsigned int fill1:9; + + unsigned int status:2; + unsigned int fill2:2; + unsigned int state:3; + unsigned int fill3:1; + + unsigned int cause:6; + unsigned int cb_int:1; + unsigned int fill4:1; + + unsigned int indexway:12; /* DW 0 - high 32 */ + unsigned int fill5:4; + + unsigned int ctxnum:4; + unsigned int fill6:12; + + unsigned long missvaddr:64; /* DW 1 */ + + unsigned int missasid:24; /* DW 2 */ + unsigned int fill7:8; + unsigned int fillasid:24; + unsigned int dirty:1; + unsigned int gaa:2; + unsigned long fill8:5; + + unsigned long pfn:41; /* DW 3 */ + unsigned int fill9:7; + unsigned int pagesize:5; + unsigned int fill10:11; + + unsigned long fillvaddr:64; /* DW 4 */ + + unsigned long fill11[3]; +}; + +enum gru_tfh_opc { + TFHOP_NOOP, + TFHOP_RESTART, + TFHOP_WRITE_ONLY, + TFHOP_WRITE_RESTART, + TFHOP_EXCEPTION, + TFHOP_USER_POLLING_MODE = 7, +}; + +enum tfh_status { + TFHSTATUS_IDLE, + TFHSTATUS_EXCEPTION, + TFHSTATUS_ACTIVE, +}; + +enum tfh_state { + TFHSTATE_INACTIVE, + TFHSTATE_IDLE, + TFHSTATE_MISS_UPM, + TFHSTATE_MISS_FMM, + TFHSTATE_HW_ERR, + TFHSTATE_WRITE_TLB, + TFHSTATE_RESTART_CBR, +}; + +/* TFH cause bits */ +enum tfh_cause { + TFHCAUSE_NONE, + TFHCAUSE_TLB_MISS, + TFHCAUSE_TLB_MOD, + TFHCAUSE_HW_ERROR_RR, + TFHCAUSE_HW_ERROR_MAIN_ARRAY, + TFHCAUSE_HW_ERROR_VALID, + TFHCAUSE_HW_ERROR_PAGESIZE, + TFHCAUSE_INSTRUCTION_EXCEPTION, + TFHCAUSE_UNCORRECTIBLE_ERROR, +}; + +/* GAA values */ +#define GAA_RAM 0x0 +#define GAA_NCRAM 0x2 +#define GAA_MMIO 0x1 +#define GAA_REGISTER 0x3 + +/* GRU paddr shift for pfn. (NOTE: shift is NOT by actual pagesize) */ +#define GRU_PADDR_SHIFT 12 + +/* + * Context Configuration handle + * Used to allocate resources to a GSEG context. + * + */ +struct gru_context_configuration_handle { + unsigned int cmd:1; /* DW0 */ + unsigned int delresp:1; + unsigned int opc:3; + unsigned int unmap_enable:1; + unsigned int req_slice_set_enable:1; + unsigned int req_slice:2; + unsigned int cb_int_enable:1; + unsigned int tlb_int_enable:1; + unsigned int tfm_fault_bit_enable:1; + unsigned int tlb_int_select:4; + + unsigned int status:2; + unsigned int state:2; + unsigned int reserved2:4; + + unsigned int cause:4; + unsigned int tfm_done_bit_enable:1; + unsigned int unused:3; + + unsigned int dsr_allocation_map; + + unsigned long cbr_allocation_map; /* DW1 */ + + unsigned int asid[8]; /* DW 2 - 5 */ + unsigned short sizeavail[8]; /* DW 6 - 7 */ +} __attribute__ ((packed)); + +enum gru_cch_opc { + CCHOP_START = 1, + CCHOP_ALLOCATE, + CCHOP_INTERRUPT, + CCHOP_DEALLOCATE, + CCHOP_INTERRUPT_SYNC, +}; + +enum gru_cch_status { + CCHSTATUS_IDLE, + CCHSTATUS_EXCEPTION, + CCHSTATUS_ACTIVE, +}; + +enum gru_cch_state { + CCHSTATE_INACTIVE, + CCHSTATE_MAPPED, + CCHSTATE_ACTIVE, + CCHSTATE_INTERRUPTED, +}; + +/* CCH Exception cause */ +enum gru_cch_cause { + CCHCAUSE_REGION_REGISTER_WRITE_ERROR = 1, + CCHCAUSE_ILLEGAL_OPCODE = 2, + CCHCAUSE_INVALID_START_REQUEST = 3, + CCHCAUSE_INVALID_ALLOCATION_REQUEST = 4, + CCHCAUSE_INVALID_DEALLOCATION_REQUEST = 5, + CCHCAUSE_INVALID_INTERRUPT_REQUEST = 6, + CCHCAUSE_CCH_BUSY = 7, + CCHCAUSE_NO_CBRS_TO_ALLOCATE = 8, + CCHCAUSE_BAD_TFM_CONFIG = 9, + CCHCAUSE_CBR_RESOURCES_OVERSUBSCRIPED = 10, + CCHCAUSE_DSR_RESOURCES_OVERSUBSCRIPED = 11, + CCHCAUSE_CBR_DEALLOCATION_ERROR = 12, +}; +/* + * CBE - Control Block Extended + * Maintains internal GRU state for active CBs. + * + */ +struct gru_control_block_extended { + unsigned int reserved0:1; /* DW 0 - low */ + unsigned int imacpy:3; + unsigned int reserved1:4; + unsigned int xtypecpy:3; + unsigned int iaa0cpy:2; + unsigned int iaa1cpy:2; + unsigned int reserved2:1; + unsigned int opccpy:8; + unsigned int exopccpy:8; + + unsigned int idef2cpy:22; /* DW 0 - high */ + unsigned int reserved3:10; + + unsigned int idef4cpy:22; /* DW 1 */ + unsigned int reserved4:10; + unsigned int idef4upd:22; + unsigned int reserved5:10; + + unsigned long idef1upd:64; /* DW 2 */ + + unsigned long idef5cpy:64; /* DW 3 */ + + unsigned long idef6cpy:64; /* DW 4 */ + + unsigned long idef3upd:64; /* DW 5 */ + + unsigned long idef5upd:64; /* DW 6 */ + + unsigned int idef2upd:22; /* DW 7 */ + unsigned int reserved6:10; + + unsigned int ecause:20; + unsigned int cbrstate:4; + unsigned int cbrexecstatus:8; +}; + +/* CBE fields for active BCOPY instructions */ +#define cbe_baddr0 idef1upd +#define cbe_baddr1 idef3upd +#define cbe_src_cl idef6cpy +#define cbe_nelemcur idef5upd + +enum gru_cbr_state { + CBRSTATE_INACTIVE, + CBRSTATE_IDLE, + CBRSTATE_PE_CHECK, + CBRSTATE_QUEUED, + CBRSTATE_WAIT_RESPONSE, + CBRSTATE_INTERRUPTED, + CBRSTATE_INTERRUPTED_MISS_FMM, + CBRSTATE_BUSY_INTERRUPT_MISS_FMM, + CBRSTATE_INTERRUPTED_MISS_UPM, + CBRSTATE_BUSY_INTERRUPTED_MISS_UPM, + CBRSTATE_REQUEST_ISSUE, + CBRSTATE_BUSY_INTERRUPT, +}; + +/* CBE cbrexecstatus bits - defined in gru_instructions.h*/ +/* CBE ecause bits - defined in gru_instructions.h */ + +/* + * Convert a processor pagesize into the strange encoded pagesize used by the + * GRU. Processor pagesize is encoded as log of bytes per page. (or PAGE_SHIFT) + * pagesize log pagesize grupagesize + * 4k 12 0 + * 16k 14 1 + * 64k 16 2 + * 256k 18 3 + * 1m 20 4 + * 2m 21 5 + * 4m 22 6 + * 16m 24 7 + * 64m 26 8 + * ... + */ +#define GRU_PAGESIZE(sh) ((((sh) > 20 ? (sh) + 2 : (sh)) >> 1) - 6) +#define GRU_SIZEAVAIL(sh) (1UL << GRU_PAGESIZE(sh)) + +/* minimum TLB purge count to ensure a full purge */ +#define GRUMAXINVAL 1024UL + +int cch_allocate(struct gru_context_configuration_handle *cch); +int cch_start(struct gru_context_configuration_handle *cch); +int cch_interrupt(struct gru_context_configuration_handle *cch); +int cch_deallocate(struct gru_context_configuration_handle *cch); +int cch_interrupt_sync(struct gru_context_configuration_handle *cch); +int tgh_invalidate(struct gru_tlb_global_handle *tgh, unsigned long vaddr, + unsigned long vaddrmask, int asid, int pagesize, int global, int n, + unsigned short ctxbitmap); +int tfh_write_only(struct gru_tlb_fault_handle *tfh, unsigned long paddr, + int gaa, unsigned long vaddr, int asid, int dirty, int pagesize); +void tfh_write_restart(struct gru_tlb_fault_handle *tfh, unsigned long paddr, + int gaa, unsigned long vaddr, int asid, int dirty, int pagesize); +void tfh_user_polling_mode(struct gru_tlb_fault_handle *tfh); +void tfh_exception(struct gru_tlb_fault_handle *tfh); + +#endif /* __GRUHANDLES_H__ */ diff --git a/drivers/misc/sgi-gru/grukdump.c b/drivers/misc/sgi-gru/grukdump.c new file mode 100644 index 000000000..9869f4f2f --- /dev/null +++ b/drivers/misc/sgi-gru/grukdump.c @@ -0,0 +1,223 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * SN Platform GRU Driver + * + * Dump GRU State + * + * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. + */ + +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/spinlock.h> +#include <linux/uaccess.h> +#include <linux/delay.h> +#include <linux/bitops.h> +#include <asm/uv/uv_hub.h> + +#include <linux/nospec.h> + +#include "gru.h" +#include "grutables.h" +#include "gruhandles.h" +#include "grulib.h" + +#define CCH_LOCK_ATTEMPTS 10 + +static int gru_user_copy_handle(void __user **dp, void *s) +{ + if (copy_to_user(*dp, s, GRU_HANDLE_BYTES)) + return -1; + *dp += GRU_HANDLE_BYTES; + return 0; +} + +static int gru_dump_context_data(void *grubase, + struct gru_context_configuration_handle *cch, + void __user *ubuf, int ctxnum, int dsrcnt, + int flush_cbrs) +{ + void *cb, *cbe, *tfh, *gseg; + int i, scr; + + gseg = grubase + ctxnum * GRU_GSEG_STRIDE; + cb = gseg + GRU_CB_BASE; + cbe = grubase + GRU_CBE_BASE; + tfh = grubase + GRU_TFH_BASE; + + for_each_cbr_in_allocation_map(i, &cch->cbr_allocation_map, scr) { + if (flush_cbrs) + gru_flush_cache(cb); + if (gru_user_copy_handle(&ubuf, cb)) + goto fail; + if (gru_user_copy_handle(&ubuf, tfh + i * GRU_HANDLE_STRIDE)) + goto fail; + if (gru_user_copy_handle(&ubuf, cbe + i * GRU_HANDLE_STRIDE)) + goto fail; + cb += GRU_HANDLE_STRIDE; + } + if (dsrcnt) + memcpy(ubuf, gseg + GRU_DS_BASE, dsrcnt * GRU_HANDLE_STRIDE); + return 0; + +fail: + return -EFAULT; +} + +static int gru_dump_tfm(struct gru_state *gru, + void __user *ubuf, void __user *ubufend) +{ + struct gru_tlb_fault_map *tfm; + int i; + + if (GRU_NUM_TFM * GRU_CACHE_LINE_BYTES > ubufend - ubuf) + return -EFBIG; + + for (i = 0; i < GRU_NUM_TFM; i++) { + tfm = get_tfm(gru->gs_gru_base_vaddr, i); + if (gru_user_copy_handle(&ubuf, tfm)) + goto fail; + } + return GRU_NUM_TFM * GRU_CACHE_LINE_BYTES; + +fail: + return -EFAULT; +} + +static int gru_dump_tgh(struct gru_state *gru, + void __user *ubuf, void __user *ubufend) +{ + struct gru_tlb_global_handle *tgh; + int i; + + if (GRU_NUM_TGH * GRU_CACHE_LINE_BYTES > ubufend - ubuf) + return -EFBIG; + + for (i = 0; i < GRU_NUM_TGH; i++) { + tgh = get_tgh(gru->gs_gru_base_vaddr, i); + if (gru_user_copy_handle(&ubuf, tgh)) + goto fail; + } + return GRU_NUM_TGH * GRU_CACHE_LINE_BYTES; + +fail: + return -EFAULT; +} + +static int gru_dump_context(struct gru_state *gru, int ctxnum, + void __user *ubuf, void __user *ubufend, char data_opt, + char lock_cch, char flush_cbrs) +{ + struct gru_dump_context_header hdr; + struct gru_dump_context_header __user *uhdr = ubuf; + struct gru_context_configuration_handle *cch, *ubufcch; + struct gru_thread_state *gts; + int try, cch_locked, cbrcnt = 0, dsrcnt = 0, bytes = 0, ret = 0; + void *grubase; + + memset(&hdr, 0, sizeof(hdr)); + grubase = gru->gs_gru_base_vaddr; + cch = get_cch(grubase, ctxnum); + for (try = 0; try < CCH_LOCK_ATTEMPTS; try++) { + cch_locked = trylock_cch_handle(cch); + if (cch_locked) + break; + msleep(1); + } + + ubuf += sizeof(hdr); + ubufcch = ubuf; + if (gru_user_copy_handle(&ubuf, cch)) { + if (cch_locked) + unlock_cch_handle(cch); + return -EFAULT; + } + if (cch_locked) + ubufcch->delresp = 0; + bytes = sizeof(hdr) + GRU_CACHE_LINE_BYTES; + + if (cch_locked || !lock_cch) { + gts = gru->gs_gts[ctxnum]; + if (gts && gts->ts_vma) { + hdr.pid = gts->ts_tgid_owner; + hdr.vaddr = gts->ts_vma->vm_start; + } + if (cch->state != CCHSTATE_INACTIVE) { + cbrcnt = hweight64(cch->cbr_allocation_map) * + GRU_CBR_AU_SIZE; + dsrcnt = data_opt ? hweight32(cch->dsr_allocation_map) * + GRU_DSR_AU_CL : 0; + } + bytes += (3 * cbrcnt + dsrcnt) * GRU_CACHE_LINE_BYTES; + if (bytes > ubufend - ubuf) + ret = -EFBIG; + else + ret = gru_dump_context_data(grubase, cch, ubuf, ctxnum, + dsrcnt, flush_cbrs); + } + if (cch_locked) + unlock_cch_handle(cch); + if (ret) + return ret; + + hdr.magic = GRU_DUMP_MAGIC; + hdr.gid = gru->gs_gid; + hdr.ctxnum = ctxnum; + hdr.cbrcnt = cbrcnt; + hdr.dsrcnt = dsrcnt; + hdr.cch_locked = cch_locked; + if (copy_to_user(uhdr, &hdr, sizeof(hdr))) + return -EFAULT; + + return bytes; +} + +int gru_dump_chiplet_request(unsigned long arg) +{ + struct gru_state *gru; + struct gru_dump_chiplet_state_req req; + void __user *ubuf; + void __user *ubufend; + int ctxnum, ret, cnt = 0; + + if (copy_from_user(&req, (void __user *)arg, sizeof(req))) + return -EFAULT; + + /* Currently, only dump by gid is implemented */ + if (req.gid >= gru_max_gids) + return -EINVAL; + req.gid = array_index_nospec(req.gid, gru_max_gids); + + gru = GID_TO_GRU(req.gid); + ubuf = req.buf; + ubufend = req.buf + req.buflen; + + ret = gru_dump_tfm(gru, ubuf, ubufend); + if (ret < 0) + goto fail; + ubuf += ret; + + ret = gru_dump_tgh(gru, ubuf, ubufend); + if (ret < 0) + goto fail; + ubuf += ret; + + for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) { + if (req.ctxnum == ctxnum || req.ctxnum < 0) { + ret = gru_dump_context(gru, ctxnum, ubuf, ubufend, + req.data_opt, req.lock_cch, + req.flush_cbrs); + if (ret < 0) + goto fail; + ubuf += ret; + cnt++; + } + } + + if (copy_to_user((void __user *)arg, &req, sizeof(req))) + return -EFAULT; + return cnt; + +fail: + return ret; +} diff --git a/drivers/misc/sgi-gru/grukservices.c b/drivers/misc/sgi-gru/grukservices.c new file mode 100644 index 000000000..0ea923fe6 --- /dev/null +++ b/drivers/misc/sgi-gru/grukservices.c @@ -0,0 +1,1159 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * SN Platform GRU Driver + * + * KERNEL SERVICES THAT USE THE GRU + * + * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. + */ + +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/slab.h> +#include <linux/mm.h> +#include <linux/spinlock.h> +#include <linux/device.h> +#include <linux/miscdevice.h> +#include <linux/proc_fs.h> +#include <linux/interrupt.h> +#include <linux/sync_core.h> +#include <linux/uaccess.h> +#include <linux/delay.h> +#include <linux/export.h> +#include <asm/io_apic.h> +#include "gru.h" +#include "grulib.h" +#include "grutables.h" +#include "grukservices.h" +#include "gru_instructions.h" +#include <asm/uv/uv_hub.h> + +/* + * Kernel GRU Usage + * + * The following is an interim algorithm for management of kernel GRU + * resources. This will likely be replaced when we better understand the + * kernel/user requirements. + * + * Blade percpu resources reserved for kernel use. These resources are + * reserved whenever the the kernel context for the blade is loaded. Note + * that the kernel context is not guaranteed to be always available. It is + * loaded on demand & can be stolen by a user if the user demand exceeds the + * kernel demand. The kernel can always reload the kernel context but + * a SLEEP may be required!!!. + * + * Async Overview: + * + * Each blade has one "kernel context" that owns GRU kernel resources + * located on the blade. Kernel drivers use GRU resources in this context + * for sending messages, zeroing memory, etc. + * + * The kernel context is dynamically loaded on demand. If it is not in + * use by the kernel, the kernel context can be unloaded & given to a user. + * The kernel context will be reloaded when needed. This may require that + * a context be stolen from a user. + * NOTE: frequent unloading/reloading of the kernel context is + * expensive. We are depending on batch schedulers, cpusets, sane + * drivers or some other mechanism to prevent the need for frequent + * stealing/reloading. + * + * The kernel context consists of two parts: + * - 1 CB & a few DSRs that are reserved for each cpu on the blade. + * Each cpu has it's own private resources & does not share them + * with other cpus. These resources are used serially, ie, + * locked, used & unlocked on each call to a function in + * grukservices. + * (Now that we have dynamic loading of kernel contexts, I + * may rethink this & allow sharing between cpus....) + * + * - Additional resources can be reserved long term & used directly + * by UV drivers located in the kernel. Drivers using these GRU + * resources can use asynchronous GRU instructions that send + * interrupts on completion. + * - these resources must be explicitly locked/unlocked + * - locked resources prevent (obviously) the kernel + * context from being unloaded. + * - drivers using these resource directly issue their own + * GRU instruction and must wait/check completion. + * + * When these resources are reserved, the caller can optionally + * associate a wait_queue with the resources and use asynchronous + * GRU instructions. When an async GRU instruction completes, the + * driver will do a wakeup on the event. + * + */ + + +#define ASYNC_HAN_TO_BID(h) ((h) - 1) +#define ASYNC_BID_TO_HAN(b) ((b) + 1) +#define ASYNC_HAN_TO_BS(h) gru_base[ASYNC_HAN_TO_BID(h)] + +#define GRU_NUM_KERNEL_CBR 1 +#define GRU_NUM_KERNEL_DSR_BYTES 256 +#define GRU_NUM_KERNEL_DSR_CL (GRU_NUM_KERNEL_DSR_BYTES / \ + GRU_CACHE_LINE_BYTES) + +/* GRU instruction attributes for all instructions */ +#define IMA IMA_CB_DELAY + +/* GRU cacheline size is always 64 bytes - even on arches with 128 byte lines */ +#define __gru_cacheline_aligned__ \ + __attribute__((__aligned__(GRU_CACHE_LINE_BYTES))) + +#define MAGIC 0x1234567887654321UL + +/* Default retry count for GRU errors on kernel instructions */ +#define EXCEPTION_RETRY_LIMIT 3 + +/* Status of message queue sections */ +#define MQS_EMPTY 0 +#define MQS_FULL 1 +#define MQS_NOOP 2 + +/*----------------- RESOURCE MANAGEMENT -------------------------------------*/ +/* optimized for x86_64 */ +struct message_queue { + union gru_mesqhead head __gru_cacheline_aligned__; /* CL 0 */ + int qlines; /* DW 1 */ + long hstatus[2]; + void *next __gru_cacheline_aligned__;/* CL 1 */ + void *limit; + void *start; + void *start2; + char data ____cacheline_aligned; /* CL 2 */ +}; + +/* First word in every message - used by mesq interface */ +struct message_header { + char present; + char present2; + char lines; + char fill; +}; + +#define HSTATUS(mq, h) ((mq) + offsetof(struct message_queue, hstatus[h])) + +/* + * Reload the blade's kernel context into a GRU chiplet. Called holding + * the bs_kgts_sema for READ. Will steal user contexts if necessary. + */ +static void gru_load_kernel_context(struct gru_blade_state *bs, int blade_id) +{ + struct gru_state *gru; + struct gru_thread_state *kgts; + void *vaddr; + int ctxnum, ncpus; + + up_read(&bs->bs_kgts_sema); + down_write(&bs->bs_kgts_sema); + + if (!bs->bs_kgts) { + do { + bs->bs_kgts = gru_alloc_gts(NULL, 0, 0, 0, 0, 0); + if (!IS_ERR(bs->bs_kgts)) + break; + msleep(1); + } while (true); + bs->bs_kgts->ts_user_blade_id = blade_id; + } + kgts = bs->bs_kgts; + + if (!kgts->ts_gru) { + STAT(load_kernel_context); + ncpus = uv_blade_nr_possible_cpus(blade_id); + kgts->ts_cbr_au_count = GRU_CB_COUNT_TO_AU( + GRU_NUM_KERNEL_CBR * ncpus + bs->bs_async_cbrs); + kgts->ts_dsr_au_count = GRU_DS_BYTES_TO_AU( + GRU_NUM_KERNEL_DSR_BYTES * ncpus + + bs->bs_async_dsr_bytes); + while (!gru_assign_gru_context(kgts)) { + msleep(1); + gru_steal_context(kgts); + } + gru_load_context(kgts); + gru = bs->bs_kgts->ts_gru; + vaddr = gru->gs_gru_base_vaddr; + ctxnum = kgts->ts_ctxnum; + bs->kernel_cb = get_gseg_base_address_cb(vaddr, ctxnum, 0); + bs->kernel_dsr = get_gseg_base_address_ds(vaddr, ctxnum, 0); + } + downgrade_write(&bs->bs_kgts_sema); +} + +/* + * Free all kernel contexts that are not currently in use. + * Returns 0 if all freed, else number of inuse context. + */ +static int gru_free_kernel_contexts(void) +{ + struct gru_blade_state *bs; + struct gru_thread_state *kgts; + int bid, ret = 0; + + for (bid = 0; bid < GRU_MAX_BLADES; bid++) { + bs = gru_base[bid]; + if (!bs) + continue; + + /* Ignore busy contexts. Don't want to block here. */ + if (down_write_trylock(&bs->bs_kgts_sema)) { + kgts = bs->bs_kgts; + if (kgts && kgts->ts_gru) + gru_unload_context(kgts, 0); + bs->bs_kgts = NULL; + up_write(&bs->bs_kgts_sema); + kfree(kgts); + } else { + ret++; + } + } + return ret; +} + +/* + * Lock & load the kernel context for the specified blade. + */ +static struct gru_blade_state *gru_lock_kernel_context(int blade_id) +{ + struct gru_blade_state *bs; + int bid; + + STAT(lock_kernel_context); +again: + bid = blade_id < 0 ? uv_numa_blade_id() : blade_id; + bs = gru_base[bid]; + + /* Handle the case where migration occurred while waiting for the sema */ + down_read(&bs->bs_kgts_sema); + if (blade_id < 0 && bid != uv_numa_blade_id()) { + up_read(&bs->bs_kgts_sema); + goto again; + } + if (!bs->bs_kgts || !bs->bs_kgts->ts_gru) + gru_load_kernel_context(bs, bid); + return bs; + +} + +/* + * Unlock the kernel context for the specified blade. Context is not + * unloaded but may be stolen before next use. + */ +static void gru_unlock_kernel_context(int blade_id) +{ + struct gru_blade_state *bs; + + bs = gru_base[blade_id]; + up_read(&bs->bs_kgts_sema); + STAT(unlock_kernel_context); +} + +/* + * Reserve & get pointers to the DSR/CBRs reserved for the current cpu. + * - returns with preemption disabled + */ +static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr) +{ + struct gru_blade_state *bs; + int lcpu; + + BUG_ON(dsr_bytes > GRU_NUM_KERNEL_DSR_BYTES); + preempt_disable(); + bs = gru_lock_kernel_context(-1); + lcpu = uv_blade_processor_id(); + *cb = bs->kernel_cb + lcpu * GRU_HANDLE_STRIDE; + *dsr = bs->kernel_dsr + lcpu * GRU_NUM_KERNEL_DSR_BYTES; + return 0; +} + +/* + * Free the current cpus reserved DSR/CBR resources. + */ +static void gru_free_cpu_resources(void *cb, void *dsr) +{ + gru_unlock_kernel_context(uv_numa_blade_id()); + preempt_enable(); +} + +/* + * Reserve GRU resources to be used asynchronously. + * Note: currently supports only 1 reservation per blade. + * + * input: + * blade_id - blade on which resources should be reserved + * cbrs - number of CBRs + * dsr_bytes - number of DSR bytes needed + * output: + * handle to identify resource + * (0 = async resources already reserved) + */ +unsigned long gru_reserve_async_resources(int blade_id, int cbrs, int dsr_bytes, + struct completion *cmp) +{ + struct gru_blade_state *bs; + struct gru_thread_state *kgts; + int ret = 0; + + bs = gru_base[blade_id]; + + down_write(&bs->bs_kgts_sema); + + /* Verify no resources already reserved */ + if (bs->bs_async_dsr_bytes + bs->bs_async_cbrs) + goto done; + bs->bs_async_dsr_bytes = dsr_bytes; + bs->bs_async_cbrs = cbrs; + bs->bs_async_wq = cmp; + kgts = bs->bs_kgts; + + /* Resources changed. Unload context if already loaded */ + if (kgts && kgts->ts_gru) + gru_unload_context(kgts, 0); + ret = ASYNC_BID_TO_HAN(blade_id); + +done: + up_write(&bs->bs_kgts_sema); + return ret; +} + +/* + * Release async resources previously reserved. + * + * input: + * han - handle to identify resources + */ +void gru_release_async_resources(unsigned long han) +{ + struct gru_blade_state *bs = ASYNC_HAN_TO_BS(han); + + down_write(&bs->bs_kgts_sema); + bs->bs_async_dsr_bytes = 0; + bs->bs_async_cbrs = 0; + bs->bs_async_wq = NULL; + up_write(&bs->bs_kgts_sema); +} + +/* + * Wait for async GRU instructions to complete. + * + * input: + * han - handle to identify resources + */ +void gru_wait_async_cbr(unsigned long han) +{ + struct gru_blade_state *bs = ASYNC_HAN_TO_BS(han); + + wait_for_completion(bs->bs_async_wq); + mb(); +} + +/* + * Lock previous reserved async GRU resources + * + * input: + * han - handle to identify resources + * output: + * cb - pointer to first CBR + * dsr - pointer to first DSR + */ +void gru_lock_async_resource(unsigned long han, void **cb, void **dsr) +{ + struct gru_blade_state *bs = ASYNC_HAN_TO_BS(han); + int blade_id = ASYNC_HAN_TO_BID(han); + int ncpus; + + gru_lock_kernel_context(blade_id); + ncpus = uv_blade_nr_possible_cpus(blade_id); + if (cb) + *cb = bs->kernel_cb + ncpus * GRU_HANDLE_STRIDE; + if (dsr) + *dsr = bs->kernel_dsr + ncpus * GRU_NUM_KERNEL_DSR_BYTES; +} + +/* + * Unlock previous reserved async GRU resources + * + * input: + * han - handle to identify resources + */ +void gru_unlock_async_resource(unsigned long han) +{ + int blade_id = ASYNC_HAN_TO_BID(han); + + gru_unlock_kernel_context(blade_id); +} + +/*----------------------------------------------------------------------*/ +int gru_get_cb_exception_detail(void *cb, + struct control_block_extended_exc_detail *excdet) +{ + struct gru_control_block_extended *cbe; + struct gru_thread_state *kgts = NULL; + unsigned long off; + int cbrnum, bid; + + /* + * Locate kgts for cb. This algorithm is SLOW but + * this function is rarely called (ie., almost never). + * Performance does not matter. + */ + for_each_possible_blade(bid) { + if (!gru_base[bid]) + break; + kgts = gru_base[bid]->bs_kgts; + if (!kgts || !kgts->ts_gru) + continue; + off = cb - kgts->ts_gru->gs_gru_base_vaddr; + if (off < GRU_SIZE) + break; + kgts = NULL; + } + BUG_ON(!kgts); + cbrnum = thread_cbr_number(kgts, get_cb_number(cb)); + cbe = get_cbe(GRUBASE(cb), cbrnum); + gru_flush_cache(cbe); /* CBE not coherent */ + sync_core(); + excdet->opc = cbe->opccpy; + excdet->exopc = cbe->exopccpy; + excdet->ecause = cbe->ecause; + excdet->exceptdet0 = cbe->idef1upd; + excdet->exceptdet1 = cbe->idef3upd; + gru_flush_cache(cbe); + return 0; +} + +static char *gru_get_cb_exception_detail_str(int ret, void *cb, + char *buf, int size) +{ + struct gru_control_block_status *gen = (void *)cb; + struct control_block_extended_exc_detail excdet; + + if (ret > 0 && gen->istatus == CBS_EXCEPTION) { + gru_get_cb_exception_detail(cb, &excdet); + snprintf(buf, size, + "GRU:%d exception: cb %p, opc %d, exopc %d, ecause 0x%x," + "excdet0 0x%lx, excdet1 0x%x", smp_processor_id(), + gen, excdet.opc, excdet.exopc, excdet.ecause, + excdet.exceptdet0, excdet.exceptdet1); + } else { + snprintf(buf, size, "No exception"); + } + return buf; +} + +static int gru_wait_idle_or_exception(struct gru_control_block_status *gen) +{ + while (gen->istatus >= CBS_ACTIVE) { + cpu_relax(); + barrier(); + } + return gen->istatus; +} + +static int gru_retry_exception(void *cb) +{ + struct gru_control_block_status *gen = (void *)cb; + struct control_block_extended_exc_detail excdet; + int retry = EXCEPTION_RETRY_LIMIT; + + while (1) { + if (gru_wait_idle_or_exception(gen) == CBS_IDLE) + return CBS_IDLE; + if (gru_get_cb_message_queue_substatus(cb)) + return CBS_EXCEPTION; + gru_get_cb_exception_detail(cb, &excdet); + if ((excdet.ecause & ~EXCEPTION_RETRY_BITS) || + (excdet.cbrexecstatus & CBR_EXS_ABORT_OCC)) + break; + if (retry-- == 0) + break; + gen->icmd = 1; + gru_flush_cache(gen); + } + return CBS_EXCEPTION; +} + +int gru_check_status_proc(void *cb) +{ + struct gru_control_block_status *gen = (void *)cb; + int ret; + + ret = gen->istatus; + if (ret == CBS_EXCEPTION) + ret = gru_retry_exception(cb); + rmb(); + return ret; + +} + +int gru_wait_proc(void *cb) +{ + struct gru_control_block_status *gen = (void *)cb; + int ret; + + ret = gru_wait_idle_or_exception(gen); + if (ret == CBS_EXCEPTION) + ret = gru_retry_exception(cb); + rmb(); + return ret; +} + +static void gru_abort(int ret, void *cb, char *str) +{ + char buf[GRU_EXC_STR_SIZE]; + + panic("GRU FATAL ERROR: %s - %s\n", str, + gru_get_cb_exception_detail_str(ret, cb, buf, sizeof(buf))); +} + +void gru_wait_abort_proc(void *cb) +{ + int ret; + + ret = gru_wait_proc(cb); + if (ret) + gru_abort(ret, cb, "gru_wait_abort"); +} + + +/*------------------------------ MESSAGE QUEUES -----------------------------*/ + +/* Internal status . These are NOT returned to the user. */ +#define MQIE_AGAIN -1 /* try again */ + + +/* + * Save/restore the "present" flag that is in the second line of 2-line + * messages + */ +static inline int get_present2(void *p) +{ + struct message_header *mhdr = p + GRU_CACHE_LINE_BYTES; + return mhdr->present; +} + +static inline void restore_present2(void *p, int val) +{ + struct message_header *mhdr = p + GRU_CACHE_LINE_BYTES; + mhdr->present = val; +} + +/* + * Create a message queue. + * qlines - message queue size in cache lines. Includes 2-line header. + */ +int gru_create_message_queue(struct gru_message_queue_desc *mqd, + void *p, unsigned int bytes, int nasid, int vector, int apicid) +{ + struct message_queue *mq = p; + unsigned int qlines; + + qlines = bytes / GRU_CACHE_LINE_BYTES - 2; + memset(mq, 0, bytes); + mq->start = &mq->data; + mq->start2 = &mq->data + (qlines / 2 - 1) * GRU_CACHE_LINE_BYTES; + mq->next = &mq->data; + mq->limit = &mq->data + (qlines - 2) * GRU_CACHE_LINE_BYTES; + mq->qlines = qlines; + mq->hstatus[0] = 0; + mq->hstatus[1] = 1; + mq->head = gru_mesq_head(2, qlines / 2 + 1); + mqd->mq = mq; + mqd->mq_gpa = uv_gpa(mq); + mqd->qlines = qlines; + mqd->interrupt_pnode = nasid >> 1; + mqd->interrupt_vector = vector; + mqd->interrupt_apicid = apicid; + return 0; +} +EXPORT_SYMBOL_GPL(gru_create_message_queue); + +/* + * Send a NOOP message to a message queue + * Returns: + * 0 - if queue is full after the send. This is the normal case + * but various races can change this. + * -1 - if mesq sent successfully but queue not full + * >0 - unexpected error. MQE_xxx returned + */ +static int send_noop_message(void *cb, struct gru_message_queue_desc *mqd, + void *mesg) +{ + const struct message_header noop_header = { + .present = MQS_NOOP, .lines = 1}; + unsigned long m; + int substatus, ret; + struct message_header save_mhdr, *mhdr = mesg; + + STAT(mesq_noop); + save_mhdr = *mhdr; + *mhdr = noop_header; + gru_mesq(cb, mqd->mq_gpa, gru_get_tri(mhdr), 1, IMA); + ret = gru_wait(cb); + + if (ret) { + substatus = gru_get_cb_message_queue_substatus(cb); + switch (substatus) { + case CBSS_NO_ERROR: + STAT(mesq_noop_unexpected_error); + ret = MQE_UNEXPECTED_CB_ERR; + break; + case CBSS_LB_OVERFLOWED: + STAT(mesq_noop_lb_overflow); + ret = MQE_CONGESTION; + break; + case CBSS_QLIMIT_REACHED: + STAT(mesq_noop_qlimit_reached); + ret = 0; + break; + case CBSS_AMO_NACKED: + STAT(mesq_noop_amo_nacked); + ret = MQE_CONGESTION; + break; + case CBSS_PUT_NACKED: + STAT(mesq_noop_put_nacked); + m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6); + gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, 1, 1, + IMA); + if (gru_wait(cb) == CBS_IDLE) + ret = MQIE_AGAIN; + else + ret = MQE_UNEXPECTED_CB_ERR; + break; + case CBSS_PAGE_OVERFLOW: + STAT(mesq_noop_page_overflow); + fallthrough; + default: + BUG(); + } + } + *mhdr = save_mhdr; + return ret; +} + +/* + * Handle a gru_mesq full. + */ +static int send_message_queue_full(void *cb, struct gru_message_queue_desc *mqd, + void *mesg, int lines) +{ + union gru_mesqhead mqh; + unsigned int limit, head; + unsigned long avalue; + int half, qlines; + + /* Determine if switching to first/second half of q */ + avalue = gru_get_amo_value(cb); + head = gru_get_amo_value_head(cb); + limit = gru_get_amo_value_limit(cb); + + qlines = mqd->qlines; + half = (limit != qlines); + + if (half) + mqh = gru_mesq_head(qlines / 2 + 1, qlines); + else + mqh = gru_mesq_head(2, qlines / 2 + 1); + + /* Try to get lock for switching head pointer */ + gru_gamir(cb, EOP_IR_CLR, HSTATUS(mqd->mq_gpa, half), XTYPE_DW, IMA); + if (gru_wait(cb) != CBS_IDLE) + goto cberr; + if (!gru_get_amo_value(cb)) { + STAT(mesq_qf_locked); + return MQE_QUEUE_FULL; + } + + /* Got the lock. Send optional NOP if queue not full, */ + if (head != limit) { + if (send_noop_message(cb, mqd, mesg)) { + gru_gamir(cb, EOP_IR_INC, HSTATUS(mqd->mq_gpa, half), + XTYPE_DW, IMA); + if (gru_wait(cb) != CBS_IDLE) + goto cberr; + STAT(mesq_qf_noop_not_full); + return MQIE_AGAIN; + } + avalue++; + } + + /* Then flip queuehead to other half of queue. */ + gru_gamer(cb, EOP_ERR_CSWAP, mqd->mq_gpa, XTYPE_DW, mqh.val, avalue, + IMA); + if (gru_wait(cb) != CBS_IDLE) + goto cberr; + + /* If not successfully in swapping queue head, clear the hstatus lock */ + if (gru_get_amo_value(cb) != avalue) { + STAT(mesq_qf_switch_head_failed); + gru_gamir(cb, EOP_IR_INC, HSTATUS(mqd->mq_gpa, half), XTYPE_DW, + IMA); + if (gru_wait(cb) != CBS_IDLE) + goto cberr; + } + return MQIE_AGAIN; +cberr: + STAT(mesq_qf_unexpected_error); + return MQE_UNEXPECTED_CB_ERR; +} + +/* + * Handle a PUT failure. Note: if message was a 2-line message, one of the + * lines might have successfully have been written. Before sending the + * message, "present" must be cleared in BOTH lines to prevent the receiver + * from prematurely seeing the full message. + */ +static int send_message_put_nacked(void *cb, struct gru_message_queue_desc *mqd, + void *mesg, int lines) +{ + unsigned long m; + int ret, loops = 200; /* experimentally determined */ + + m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6); + if (lines == 2) { + gru_vset(cb, m, 0, XTYPE_CL, lines, 1, IMA); + if (gru_wait(cb) != CBS_IDLE) + return MQE_UNEXPECTED_CB_ERR; + } + gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA); + if (gru_wait(cb) != CBS_IDLE) + return MQE_UNEXPECTED_CB_ERR; + + if (!mqd->interrupt_vector) + return MQE_OK; + + /* + * Send a noop message in order to deliver a cross-partition interrupt + * to the SSI that contains the target message queue. Normally, the + * interrupt is automatically delivered by hardware following mesq + * operations, but some error conditions require explicit delivery. + * The noop message will trigger delivery. Otherwise partition failures + * could cause unrecovered errors. + */ + do { + ret = send_noop_message(cb, mqd, mesg); + } while ((ret == MQIE_AGAIN || ret == MQE_CONGESTION) && (loops-- > 0)); + + if (ret == MQIE_AGAIN || ret == MQE_CONGESTION) { + /* + * Don't indicate to the app to resend the message, as it's + * already been successfully sent. We simply send an OK + * (rather than fail the send with MQE_UNEXPECTED_CB_ERR), + * assuming that the other side is receiving enough + * interrupts to get this message processed anyway. + */ + ret = MQE_OK; + } + return ret; +} + +/* + * Handle a gru_mesq failure. Some of these failures are software recoverable + * or retryable. + */ +static int send_message_failure(void *cb, struct gru_message_queue_desc *mqd, + void *mesg, int lines) +{ + int substatus, ret = 0; + + substatus = gru_get_cb_message_queue_substatus(cb); + switch (substatus) { + case CBSS_NO_ERROR: + STAT(mesq_send_unexpected_error); + ret = MQE_UNEXPECTED_CB_ERR; + break; + case CBSS_LB_OVERFLOWED: + STAT(mesq_send_lb_overflow); + ret = MQE_CONGESTION; + break; + case CBSS_QLIMIT_REACHED: + STAT(mesq_send_qlimit_reached); + ret = send_message_queue_full(cb, mqd, mesg, lines); + break; + case CBSS_AMO_NACKED: + STAT(mesq_send_amo_nacked); + ret = MQE_CONGESTION; + break; + case CBSS_PUT_NACKED: + STAT(mesq_send_put_nacked); + ret = send_message_put_nacked(cb, mqd, mesg, lines); + break; + case CBSS_PAGE_OVERFLOW: + STAT(mesq_page_overflow); + fallthrough; + default: + BUG(); + } + return ret; +} + +/* + * Send a message to a message queue + * mqd message queue descriptor + * mesg message. ust be vaddr within a GSEG + * bytes message size (<= 2 CL) + */ +int gru_send_message_gpa(struct gru_message_queue_desc *mqd, void *mesg, + unsigned int bytes) +{ + struct message_header *mhdr; + void *cb; + void *dsr; + int istatus, clines, ret; + + STAT(mesq_send); + BUG_ON(bytes < sizeof(int) || bytes > 2 * GRU_CACHE_LINE_BYTES); + + clines = DIV_ROUND_UP(bytes, GRU_CACHE_LINE_BYTES); + if (gru_get_cpu_resources(bytes, &cb, &dsr)) + return MQE_BUG_NO_RESOURCES; + memcpy(dsr, mesg, bytes); + mhdr = dsr; + mhdr->present = MQS_FULL; + mhdr->lines = clines; + if (clines == 2) { + mhdr->present2 = get_present2(mhdr); + restore_present2(mhdr, MQS_FULL); + } + + do { + ret = MQE_OK; + gru_mesq(cb, mqd->mq_gpa, gru_get_tri(mhdr), clines, IMA); + istatus = gru_wait(cb); + if (istatus != CBS_IDLE) + ret = send_message_failure(cb, mqd, dsr, clines); + } while (ret == MQIE_AGAIN); + gru_free_cpu_resources(cb, dsr); + + if (ret) + STAT(mesq_send_failed); + return ret; +} +EXPORT_SYMBOL_GPL(gru_send_message_gpa); + +/* + * Advance the receive pointer for the queue to the next message. + */ +void gru_free_message(struct gru_message_queue_desc *mqd, void *mesg) +{ + struct message_queue *mq = mqd->mq; + struct message_header *mhdr = mq->next; + void *next, *pnext; + int half = -1; + int lines = mhdr->lines; + + if (lines == 2) + restore_present2(mhdr, MQS_EMPTY); + mhdr->present = MQS_EMPTY; + + pnext = mq->next; + next = pnext + GRU_CACHE_LINE_BYTES * lines; + if (next == mq->limit) { + next = mq->start; + half = 1; + } else if (pnext < mq->start2 && next >= mq->start2) { + half = 0; + } + + if (half >= 0) + mq->hstatus[half] = 1; + mq->next = next; +} +EXPORT_SYMBOL_GPL(gru_free_message); + +/* + * Get next message from message queue. Return NULL if no message + * present. User must call next_message() to move to next message. + * rmq message queue + */ +void *gru_get_next_message(struct gru_message_queue_desc *mqd) +{ + struct message_queue *mq = mqd->mq; + struct message_header *mhdr = mq->next; + int present = mhdr->present; + + /* skip NOOP messages */ + while (present == MQS_NOOP) { + gru_free_message(mqd, mhdr); + mhdr = mq->next; + present = mhdr->present; + } + + /* Wait for both halves of 2 line messages */ + if (present == MQS_FULL && mhdr->lines == 2 && + get_present2(mhdr) == MQS_EMPTY) + present = MQS_EMPTY; + + if (!present) { + STAT(mesq_receive_none); + return NULL; + } + + if (mhdr->lines == 2) + restore_present2(mhdr, mhdr->present2); + + STAT(mesq_receive); + return mhdr; +} +EXPORT_SYMBOL_GPL(gru_get_next_message); + +/* ---------------------- GRU DATA COPY FUNCTIONS ---------------------------*/ + +/* + * Load a DW from a global GPA. The GPA can be a memory or MMR address. + */ +int gru_read_gpa(unsigned long *value, unsigned long gpa) +{ + void *cb; + void *dsr; + int ret, iaa; + + STAT(read_gpa); + if (gru_get_cpu_resources(GRU_NUM_KERNEL_DSR_BYTES, &cb, &dsr)) + return MQE_BUG_NO_RESOURCES; + iaa = gpa >> 62; + gru_vload_phys(cb, gpa, gru_get_tri(dsr), iaa, IMA); + ret = gru_wait(cb); + if (ret == CBS_IDLE) + *value = *(unsigned long *)dsr; + gru_free_cpu_resources(cb, dsr); + return ret; +} +EXPORT_SYMBOL_GPL(gru_read_gpa); + + +/* + * Copy a block of data using the GRU resources + */ +int gru_copy_gpa(unsigned long dest_gpa, unsigned long src_gpa, + unsigned int bytes) +{ + void *cb; + void *dsr; + int ret; + + STAT(copy_gpa); + if (gru_get_cpu_resources(GRU_NUM_KERNEL_DSR_BYTES, &cb, &dsr)) + return MQE_BUG_NO_RESOURCES; + gru_bcopy(cb, src_gpa, dest_gpa, gru_get_tri(dsr), + XTYPE_B, bytes, GRU_NUM_KERNEL_DSR_CL, IMA); + ret = gru_wait(cb); + gru_free_cpu_resources(cb, dsr); + return ret; +} +EXPORT_SYMBOL_GPL(gru_copy_gpa); + +/* ------------------- KERNEL QUICKTESTS RUN AT STARTUP ----------------*/ +/* Temp - will delete after we gain confidence in the GRU */ + +static int quicktest0(unsigned long arg) +{ + unsigned long word0; + unsigned long word1; + void *cb; + void *dsr; + unsigned long *p; + int ret = -EIO; + + if (gru_get_cpu_resources(GRU_CACHE_LINE_BYTES, &cb, &dsr)) + return MQE_BUG_NO_RESOURCES; + p = dsr; + word0 = MAGIC; + word1 = 0; + + gru_vload(cb, uv_gpa(&word0), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA); + if (gru_wait(cb) != CBS_IDLE) { + printk(KERN_DEBUG "GRU:%d quicktest0: CBR failure 1\n", smp_processor_id()); + goto done; + } + + if (*p != MAGIC) { + printk(KERN_DEBUG "GRU:%d quicktest0 bad magic 0x%lx\n", smp_processor_id(), *p); + goto done; + } + gru_vstore(cb, uv_gpa(&word1), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA); + if (gru_wait(cb) != CBS_IDLE) { + printk(KERN_DEBUG "GRU:%d quicktest0: CBR failure 2\n", smp_processor_id()); + goto done; + } + + if (word0 != word1 || word1 != MAGIC) { + printk(KERN_DEBUG + "GRU:%d quicktest0 err: found 0x%lx, expected 0x%lx\n", + smp_processor_id(), word1, MAGIC); + goto done; + } + ret = 0; + +done: + gru_free_cpu_resources(cb, dsr); + return ret; +} + +#define ALIGNUP(p, q) ((void *)(((unsigned long)(p) + (q) - 1) & ~(q - 1))) + +static int quicktest1(unsigned long arg) +{ + struct gru_message_queue_desc mqd; + void *p, *mq; + int i, ret = -EIO; + char mes[GRU_CACHE_LINE_BYTES], *m; + + /* Need 1K cacheline aligned that does not cross page boundary */ + p = kmalloc(4096, 0); + if (p == NULL) + return -ENOMEM; + mq = ALIGNUP(p, 1024); + memset(mes, 0xee, sizeof(mes)); + + gru_create_message_queue(&mqd, mq, 8 * GRU_CACHE_LINE_BYTES, 0, 0, 0); + for (i = 0; i < 6; i++) { + mes[8] = i; + do { + ret = gru_send_message_gpa(&mqd, mes, sizeof(mes)); + } while (ret == MQE_CONGESTION); + if (ret) + break; + } + if (ret != MQE_QUEUE_FULL || i != 4) { + printk(KERN_DEBUG "GRU:%d quicktest1: unexpect status %d, i %d\n", + smp_processor_id(), ret, i); + goto done; + } + + for (i = 0; i < 6; i++) { + m = gru_get_next_message(&mqd); + if (!m || m[8] != i) + break; + gru_free_message(&mqd, m); + } + if (i != 4) { + printk(KERN_DEBUG "GRU:%d quicktest2: bad message, i %d, m %p, m8 %d\n", + smp_processor_id(), i, m, m ? m[8] : -1); + goto done; + } + ret = 0; + +done: + kfree(p); + return ret; +} + +static int quicktest2(unsigned long arg) +{ + static DECLARE_COMPLETION(cmp); + unsigned long han; + int blade_id = 0; + int numcb = 4; + int ret = 0; + unsigned long *buf; + void *cb0, *cb; + struct gru_control_block_status *gen; + int i, k, istatus, bytes; + + bytes = numcb * 4 * 8; + buf = kmalloc(bytes, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ret = -EBUSY; + han = gru_reserve_async_resources(blade_id, numcb, 0, &cmp); + if (!han) + goto done; + + gru_lock_async_resource(han, &cb0, NULL); + memset(buf, 0xee, bytes); + for (i = 0; i < numcb; i++) + gru_vset(cb0 + i * GRU_HANDLE_STRIDE, uv_gpa(&buf[i * 4]), 0, + XTYPE_DW, 4, 1, IMA_INTERRUPT); + + ret = 0; + k = numcb; + do { + gru_wait_async_cbr(han); + for (i = 0; i < numcb; i++) { + cb = cb0 + i * GRU_HANDLE_STRIDE; + istatus = gru_check_status(cb); + if (istatus != CBS_ACTIVE && istatus != CBS_CALL_OS) + break; + } + if (i == numcb) + continue; + if (istatus != CBS_IDLE) { + printk(KERN_DEBUG "GRU:%d quicktest2: cb %d, exception\n", smp_processor_id(), i); + ret = -EFAULT; + } else if (buf[4 * i] || buf[4 * i + 1] || buf[4 * i + 2] || + buf[4 * i + 3]) { + printk(KERN_DEBUG "GRU:%d quicktest2:cb %d, buf 0x%lx, 0x%lx, 0x%lx, 0x%lx\n", + smp_processor_id(), i, buf[4 * i], buf[4 * i + 1], buf[4 * i + 2], buf[4 * i + 3]); + ret = -EIO; + } + k--; + gen = cb; + gen->istatus = CBS_CALL_OS; /* don't handle this CBR again */ + } while (k); + BUG_ON(cmp.done); + + gru_unlock_async_resource(han); + gru_release_async_resources(han); +done: + kfree(buf); + return ret; +} + +#define BUFSIZE 200 +static int quicktest3(unsigned long arg) +{ + char buf1[BUFSIZE], buf2[BUFSIZE]; + int ret = 0; + + memset(buf2, 0, sizeof(buf2)); + memset(buf1, get_cycles() & 255, sizeof(buf1)); + gru_copy_gpa(uv_gpa(buf2), uv_gpa(buf1), BUFSIZE); + if (memcmp(buf1, buf2, BUFSIZE)) { + printk(KERN_DEBUG "GRU:%d quicktest3 error\n", smp_processor_id()); + ret = -EIO; + } + return ret; +} + +/* + * Debugging only. User hook for various kernel tests + * of driver & gru. + */ +int gru_ktest(unsigned long arg) +{ + int ret = -EINVAL; + + switch (arg & 0xff) { + case 0: + ret = quicktest0(arg); + break; + case 1: + ret = quicktest1(arg); + break; + case 2: + ret = quicktest2(arg); + break; + case 3: + ret = quicktest3(arg); + break; + case 99: + ret = gru_free_kernel_contexts(); + break; + } + return ret; + +} + +int gru_kservices_init(void) +{ + return 0; +} + +void gru_kservices_exit(void) +{ + if (gru_free_kernel_contexts()) + BUG(); +} + diff --git a/drivers/misc/sgi-gru/grukservices.h b/drivers/misc/sgi-gru/grukservices.h new file mode 100644 index 000000000..510e45e97 --- /dev/null +++ b/drivers/misc/sgi-gru/grukservices.h @@ -0,0 +1,201 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +/* + * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. + */ +#ifndef __GRU_KSERVICES_H_ +#define __GRU_KSERVICES_H_ + + +/* + * Message queues using the GRU to send/receive messages. + * + * These function allow the user to create a message queue for + * sending/receiving 1 or 2 cacheline messages using the GRU. + * + * Processes SENDING messages will use a kernel CBR/DSR to send + * the message. This is transparent to the caller. + * + * The receiver does not use any GRU resources. + * + * The functions support: + * - single receiver + * - multiple senders + * - cross partition message + * + * Missing features ZZZ: + * - user options for dealing with timeouts, queue full, etc. + * - gru_create_message_queue() needs interrupt vector info + */ + +struct gru_message_queue_desc { + void *mq; /* message queue vaddress */ + unsigned long mq_gpa; /* global address of mq */ + int qlines; /* queue size in CL */ + int interrupt_vector; /* interrupt vector */ + int interrupt_pnode; /* pnode for interrupt */ + int interrupt_apicid; /* lapicid for interrupt */ +}; + +/* + * Initialize a user allocated chunk of memory to be used as + * a message queue. The caller must ensure that the queue is + * in contiguous physical memory and is cacheline aligned. + * + * Message queue size is the total number of bytes allocated + * to the queue including a 2 cacheline header that is used + * to manage the queue. + * + * Input: + * mqd pointer to message queue descriptor + * p pointer to user allocated mesq memory. + * bytes size of message queue in bytes + * vector interrupt vector (zero if no interrupts) + * nasid nasid of blade where interrupt is delivered + * apicid apicid of cpu for interrupt + * + * Errors: + * 0 OK + * >0 error + */ +extern int gru_create_message_queue(struct gru_message_queue_desc *mqd, + void *p, unsigned int bytes, int nasid, int vector, int apicid); + +/* + * Send a message to a message queue. + * + * Note: The message queue transport mechanism uses the first 32 + * bits of the message. Users should avoid using these bits. + * + * + * Input: + * mqd pointer to message queue descriptor + * mesg pointer to message. Must be 64-bit aligned + * bytes size of message in bytes + * + * Output: + * 0 message sent + * >0 Send failure - see error codes below + * + */ +extern int gru_send_message_gpa(struct gru_message_queue_desc *mqd, + void *mesg, unsigned int bytes); + +/* Status values for gru_send_message() */ +#define MQE_OK 0 /* message sent successfully */ +#define MQE_CONGESTION 1 /* temporary congestion, try again */ +#define MQE_QUEUE_FULL 2 /* queue is full */ +#define MQE_UNEXPECTED_CB_ERR 3 /* unexpected CB error */ +#define MQE_PAGE_OVERFLOW 10 /* BUG - queue overflowed a page */ +#define MQE_BUG_NO_RESOURCES 11 /* BUG - could not alloc GRU cb/dsr */ + +/* + * Advance the receive pointer for the message queue to the next message. + * Note: current API requires messages to be gotten & freed in order. Future + * API extensions may allow for out-of-order freeing. + * + * Input + * mqd pointer to message queue descriptor + * mesq message being freed + */ +extern void gru_free_message(struct gru_message_queue_desc *mqd, + void *mesq); + +/* + * Get next message from message queue. Returns pointer to + * message OR NULL if no message present. + * User must call gru_free_message() after message is processed + * in order to move the queue pointers to next message. + * + * Input + * mqd pointer to message queue descriptor + * + * Output: + * p pointer to message + * NULL no message available + */ +extern void *gru_get_next_message(struct gru_message_queue_desc *mqd); + + +/* + * Read a GRU global GPA. Source can be located in a remote partition. + * + * Input: + * value memory address where MMR value is returned + * gpa source numalink physical address of GPA + * + * Output: + * 0 OK + * >0 error + */ +int gru_read_gpa(unsigned long *value, unsigned long gpa); + + +/* + * Copy data using the GRU. Source or destination can be located in a remote + * partition. + * + * Input: + * dest_gpa destination global physical address + * src_gpa source global physical address + * bytes number of bytes to copy + * + * Output: + * 0 OK + * >0 error + */ +extern int gru_copy_gpa(unsigned long dest_gpa, unsigned long src_gpa, + unsigned int bytes); + +/* + * Reserve GRU resources to be used asynchronously. + * + * input: + * blade_id - blade on which resources should be reserved + * cbrs - number of CBRs + * dsr_bytes - number of DSR bytes needed + * cmp - completion structure for waiting for + * async completions + * output: + * handle to identify resource + * (0 = no resources) + */ +extern unsigned long gru_reserve_async_resources(int blade_id, int cbrs, int dsr_bytes, + struct completion *cmp); + +/* + * Release async resources previously reserved. + * + * input: + * han - handle to identify resources + */ +extern void gru_release_async_resources(unsigned long han); + +/* + * Wait for async GRU instructions to complete. + * + * input: + * han - handle to identify resources + */ +extern void gru_wait_async_cbr(unsigned long han); + +/* + * Lock previous reserved async GRU resources + * + * input: + * han - handle to identify resources + * output: + * cb - pointer to first CBR + * dsr - pointer to first DSR + */ +extern void gru_lock_async_resource(unsigned long han, void **cb, void **dsr); + +/* + * Unlock previous reserved async GRU resources + * + * input: + * han - handle to identify resources + */ +extern void gru_unlock_async_resource(unsigned long han); + +#endif /* __GRU_KSERVICES_H_ */ diff --git a/drivers/misc/sgi-gru/grulib.h b/drivers/misc/sgi-gru/grulib.h new file mode 100644 index 000000000..85c103923 --- /dev/null +++ b/drivers/misc/sgi-gru/grulib.h @@ -0,0 +1,153 @@ +/* + * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published by + * the Free Software Foundation; either version 2.1 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __GRULIB_H__ +#define __GRULIB_H__ + +#define GRU_BASENAME "gru" +#define GRU_FULLNAME "/dev/gru" +#define GRU_IOCTL_NUM 'G' + +/* + * Maximum number of GRU segments that a user can have open + * ZZZ temp - set high for testing. Revisit. + */ +#define GRU_MAX_OPEN_CONTEXTS 32 + +/* Set Number of Request Blocks */ +#define GRU_CREATE_CONTEXT _IOWR(GRU_IOCTL_NUM, 1, void *) + +/* Set Context Options */ +#define GRU_SET_CONTEXT_OPTION _IOWR(GRU_IOCTL_NUM, 4, void *) + +/* Fetch exception detail */ +#define GRU_USER_GET_EXCEPTION_DETAIL _IOWR(GRU_IOCTL_NUM, 6, void *) + +/* For user call_os handling - normally a TLB fault */ +#define GRU_USER_CALL_OS _IOWR(GRU_IOCTL_NUM, 8, void *) + +/* For user unload context */ +#define GRU_USER_UNLOAD_CONTEXT _IOWR(GRU_IOCTL_NUM, 9, void *) + +/* For dumpping GRU chiplet state */ +#define GRU_DUMP_CHIPLET_STATE _IOWR(GRU_IOCTL_NUM, 11, void *) + +/* For getting gseg statistics */ +#define GRU_GET_GSEG_STATISTICS _IOWR(GRU_IOCTL_NUM, 12, void *) + +/* For user TLB flushing (primarily for tests) */ +#define GRU_USER_FLUSH_TLB _IOWR(GRU_IOCTL_NUM, 50, void *) + +/* Get some config options (primarily for tests & emulator) */ +#define GRU_GET_CONFIG_INFO _IOWR(GRU_IOCTL_NUM, 51, void *) + +/* Various kernel self-tests */ +#define GRU_KTEST _IOWR(GRU_IOCTL_NUM, 52, void *) + +#define CONTEXT_WINDOW_BYTES(th) (GRU_GSEG_PAGESIZE * (th)) +#define THREAD_POINTER(p, th) (p + GRU_GSEG_PAGESIZE * (th)) +#define GSEG_START(cb) ((void *)((unsigned long)(cb) & ~(GRU_GSEG_PAGESIZE - 1))) + +struct gru_get_gseg_statistics_req { + unsigned long gseg; + struct gru_gseg_statistics stats; +}; + +/* + * Structure used to pass TLB flush parameters to the driver + */ +struct gru_create_context_req { + unsigned long gseg; + unsigned int data_segment_bytes; + unsigned int control_blocks; + unsigned int maximum_thread_count; + unsigned int options; + unsigned char tlb_preload_count; +}; + +/* + * Structure used to pass unload context parameters to the driver + */ +struct gru_unload_context_req { + unsigned long gseg; +}; + +/* + * Structure used to set context options + */ +enum {sco_gseg_owner, sco_cch_req_slice, sco_blade_chiplet}; +struct gru_set_context_option_req { + unsigned long gseg; + int op; + int val0; + long val1; +}; + +/* + * Structure used to pass TLB flush parameters to the driver + */ +struct gru_flush_tlb_req { + unsigned long gseg; + unsigned long vaddr; + size_t len; +}; + +/* + * Structure used to pass TLB flush parameters to the driver + */ +enum {dcs_pid, dcs_gid}; +struct gru_dump_chiplet_state_req { + unsigned int op; + unsigned int gid; + int ctxnum; + char data_opt; + char lock_cch; + char flush_cbrs; + char fill[10]; + pid_t pid; + void *buf; + size_t buflen; + /* ---- output --- */ + unsigned int num_contexts; +}; + +#define GRU_DUMP_MAGIC 0x3474ab6c +struct gru_dump_context_header { + unsigned int magic; + unsigned int gid; + unsigned char ctxnum; + unsigned char cbrcnt; + unsigned char dsrcnt; + pid_t pid; + unsigned long vaddr; + int cch_locked; + unsigned long data[]; +}; + +/* + * GRU configuration info (temp - for testing) + */ +struct gru_config_info { + int cpus; + int blades; + int nodes; + int chiplets; + int fill[16]; +}; + +#endif /* __GRULIB_H__ */ diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c new file mode 100644 index 000000000..e2325e3d0 --- /dev/null +++ b/drivers/misc/sgi-gru/grumain.c @@ -0,0 +1,977 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * SN Platform GRU Driver + * + * DRIVER TABLE MANAGER + GRU CONTEXT LOAD/UNLOAD + * + * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. + */ + +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/mm.h> +#include <linux/spinlock.h> +#include <linux/sched.h> +#include <linux/device.h> +#include <linux/list.h> +#include <linux/err.h> +#include <linux/prefetch.h> +#include <asm/uv/uv_hub.h> +#include "gru.h" +#include "grutables.h" +#include "gruhandles.h" + +unsigned long gru_options __read_mostly; + +static struct device_driver gru_driver = { + .name = "gru" +}; + +static struct device gru_device = { + .init_name = "", + .driver = &gru_driver, +}; + +struct device *grudev = &gru_device; + +/* + * Select a gru fault map to be used by the current cpu. Note that + * multiple cpus may be using the same map. + * ZZZ should be inline but did not work on emulator + */ +int gru_cpu_fault_map_id(void) +{ +#ifdef CONFIG_IA64 + return uv_blade_processor_id() % GRU_NUM_TFM; +#else + int cpu = smp_processor_id(); + int id, core; + + core = uv_cpu_core_number(cpu); + id = core + UV_MAX_INT_CORES * uv_cpu_socket_number(cpu); + return id; +#endif +} + +/*--------- ASID Management ------------------------------------------- + * + * Initially, assign asids sequentially from MIN_ASID .. MAX_ASID. + * Once MAX is reached, flush the TLB & start over. However, + * some asids may still be in use. There won't be many (percentage wise) still + * in use. Search active contexts & determine the value of the first + * asid in use ("x"s below). Set "limit" to this value. + * This defines a block of assignable asids. + * + * When "limit" is reached, search forward from limit+1 and determine the + * next block of assignable asids. + * + * Repeat until MAX_ASID is reached, then start over again. + * + * Each time MAX_ASID is reached, increment the asid generation. Since + * the search for in-use asids only checks contexts with GRUs currently + * assigned, asids in some contexts will be missed. Prior to loading + * a context, the asid generation of the GTS asid is rechecked. If it + * doesn't match the current generation, a new asid will be assigned. + * + * 0---------------x------------x---------------------x----| + * ^-next ^-limit ^-MAX_ASID + * + * All asid manipulation & context loading/unloading is protected by the + * gs_lock. + */ + +/* Hit the asid limit. Start over */ +static int gru_wrap_asid(struct gru_state *gru) +{ + gru_dbg(grudev, "gid %d\n", gru->gs_gid); + STAT(asid_wrap); + gru->gs_asid_gen++; + return MIN_ASID; +} + +/* Find the next chunk of unused asids */ +static int gru_reset_asid_limit(struct gru_state *gru, int asid) +{ + int i, gid, inuse_asid, limit; + + gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid); + STAT(asid_next); + limit = MAX_ASID; + if (asid >= limit) + asid = gru_wrap_asid(gru); + gru_flush_all_tlb(gru); + gid = gru->gs_gid; +again: + for (i = 0; i < GRU_NUM_CCH; i++) { + if (!gru->gs_gts[i] || is_kernel_context(gru->gs_gts[i])) + continue; + inuse_asid = gru->gs_gts[i]->ts_gms->ms_asids[gid].mt_asid; + gru_dbg(grudev, "gid %d, gts %p, gms %p, inuse 0x%x, cxt %d\n", + gru->gs_gid, gru->gs_gts[i], gru->gs_gts[i]->ts_gms, + inuse_asid, i); + if (inuse_asid == asid) { + asid += ASID_INC; + if (asid >= limit) { + /* + * empty range: reset the range limit and + * start over + */ + limit = MAX_ASID; + if (asid >= MAX_ASID) + asid = gru_wrap_asid(gru); + goto again; + } + } + + if ((inuse_asid > asid) && (inuse_asid < limit)) + limit = inuse_asid; + } + gru->gs_asid_limit = limit; + gru->gs_asid = asid; + gru_dbg(grudev, "gid %d, new asid 0x%x, new_limit 0x%x\n", gru->gs_gid, + asid, limit); + return asid; +} + +/* Assign a new ASID to a thread context. */ +static int gru_assign_asid(struct gru_state *gru) +{ + int asid; + + gru->gs_asid += ASID_INC; + asid = gru->gs_asid; + if (asid >= gru->gs_asid_limit) + asid = gru_reset_asid_limit(gru, asid); + + gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid); + return asid; +} + +/* + * Clear n bits in a word. Return a word indicating the bits that were cleared. + * Optionally, build an array of chars that contain the bit numbers allocated. + */ +static unsigned long reserve_resources(unsigned long *p, int n, int mmax, + char *idx) +{ + unsigned long bits = 0; + int i; + + while (n--) { + i = find_first_bit(p, mmax); + if (i == mmax) + BUG(); + __clear_bit(i, p); + __set_bit(i, &bits); + if (idx) + *idx++ = i; + } + return bits; +} + +unsigned long gru_reserve_cb_resources(struct gru_state *gru, int cbr_au_count, + char *cbmap) +{ + return reserve_resources(&gru->gs_cbr_map, cbr_au_count, GRU_CBR_AU, + cbmap); +} + +unsigned long gru_reserve_ds_resources(struct gru_state *gru, int dsr_au_count, + char *dsmap) +{ + return reserve_resources(&gru->gs_dsr_map, dsr_au_count, GRU_DSR_AU, + dsmap); +} + +static void reserve_gru_resources(struct gru_state *gru, + struct gru_thread_state *gts) +{ + gru->gs_active_contexts++; + gts->ts_cbr_map = + gru_reserve_cb_resources(gru, gts->ts_cbr_au_count, + gts->ts_cbr_idx); + gts->ts_dsr_map = + gru_reserve_ds_resources(gru, gts->ts_dsr_au_count, NULL); +} + +static void free_gru_resources(struct gru_state *gru, + struct gru_thread_state *gts) +{ + gru->gs_active_contexts--; + gru->gs_cbr_map |= gts->ts_cbr_map; + gru->gs_dsr_map |= gts->ts_dsr_map; +} + +/* + * Check if a GRU has sufficient free resources to satisfy an allocation + * request. Note: GRU locks may or may not be held when this is called. If + * not held, recheck after acquiring the appropriate locks. + * + * Returns 1 if sufficient resources, 0 if not + */ +static int check_gru_resources(struct gru_state *gru, int cbr_au_count, + int dsr_au_count, int max_active_contexts) +{ + return hweight64(gru->gs_cbr_map) >= cbr_au_count + && hweight64(gru->gs_dsr_map) >= dsr_au_count + && gru->gs_active_contexts < max_active_contexts; +} + +/* + * TLB manangment requires tracking all GRU chiplets that have loaded a GSEG + * context. + */ +static int gru_load_mm_tracker(struct gru_state *gru, + struct gru_thread_state *gts) +{ + struct gru_mm_struct *gms = gts->ts_gms; + struct gru_mm_tracker *asids = &gms->ms_asids[gru->gs_gid]; + unsigned short ctxbitmap = (1 << gts->ts_ctxnum); + int asid; + + spin_lock(&gms->ms_asid_lock); + asid = asids->mt_asid; + + spin_lock(&gru->gs_asid_lock); + if (asid == 0 || (asids->mt_ctxbitmap == 0 && asids->mt_asid_gen != + gru->gs_asid_gen)) { + asid = gru_assign_asid(gru); + asids->mt_asid = asid; + asids->mt_asid_gen = gru->gs_asid_gen; + STAT(asid_new); + } else { + STAT(asid_reuse); + } + spin_unlock(&gru->gs_asid_lock); + + BUG_ON(asids->mt_ctxbitmap & ctxbitmap); + asids->mt_ctxbitmap |= ctxbitmap; + if (!test_bit(gru->gs_gid, gms->ms_asidmap)) + __set_bit(gru->gs_gid, gms->ms_asidmap); + spin_unlock(&gms->ms_asid_lock); + + gru_dbg(grudev, + "gid %d, gts %p, gms %p, ctxnum %d, asid 0x%x, asidmap 0x%lx\n", + gru->gs_gid, gts, gms, gts->ts_ctxnum, asid, + gms->ms_asidmap[0]); + return asid; +} + +static void gru_unload_mm_tracker(struct gru_state *gru, + struct gru_thread_state *gts) +{ + struct gru_mm_struct *gms = gts->ts_gms; + struct gru_mm_tracker *asids; + unsigned short ctxbitmap; + + asids = &gms->ms_asids[gru->gs_gid]; + ctxbitmap = (1 << gts->ts_ctxnum); + spin_lock(&gms->ms_asid_lock); + spin_lock(&gru->gs_asid_lock); + BUG_ON((asids->mt_ctxbitmap & ctxbitmap) != ctxbitmap); + asids->mt_ctxbitmap ^= ctxbitmap; + gru_dbg(grudev, "gid %d, gts %p, gms %p, ctxnum %d, asidmap 0x%lx\n", + gru->gs_gid, gts, gms, gts->ts_ctxnum, gms->ms_asidmap[0]); + spin_unlock(&gru->gs_asid_lock); + spin_unlock(&gms->ms_asid_lock); +} + +/* + * Decrement the reference count on a GTS structure. Free the structure + * if the reference count goes to zero. + */ +void gts_drop(struct gru_thread_state *gts) +{ + if (gts && atomic_dec_return(>s->ts_refcnt) == 0) { + if (gts->ts_gms) + gru_drop_mmu_notifier(gts->ts_gms); + kfree(gts); + STAT(gts_free); + } +} + +/* + * Locate the GTS structure for the current thread. + */ +static struct gru_thread_state *gru_find_current_gts_nolock(struct gru_vma_data + *vdata, int tsid) +{ + struct gru_thread_state *gts; + + list_for_each_entry(gts, &vdata->vd_head, ts_next) + if (gts->ts_tsid == tsid) + return gts; + return NULL; +} + +/* + * Allocate a thread state structure. + */ +struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma, + int cbr_au_count, int dsr_au_count, + unsigned char tlb_preload_count, int options, int tsid) +{ + struct gru_thread_state *gts; + struct gru_mm_struct *gms; + int bytes; + + bytes = DSR_BYTES(dsr_au_count) + CBR_BYTES(cbr_au_count); + bytes += sizeof(struct gru_thread_state); + gts = kmalloc(bytes, GFP_KERNEL); + if (!gts) + return ERR_PTR(-ENOMEM); + + STAT(gts_alloc); + memset(gts, 0, sizeof(struct gru_thread_state)); /* zero out header */ + atomic_set(>s->ts_refcnt, 1); + mutex_init(>s->ts_ctxlock); + gts->ts_cbr_au_count = cbr_au_count; + gts->ts_dsr_au_count = dsr_au_count; + gts->ts_tlb_preload_count = tlb_preload_count; + gts->ts_user_options = options; + gts->ts_user_blade_id = -1; + gts->ts_user_chiplet_id = -1; + gts->ts_tsid = tsid; + gts->ts_ctxnum = NULLCTX; + gts->ts_tlb_int_select = -1; + gts->ts_cch_req_slice = -1; + gts->ts_sizeavail = GRU_SIZEAVAIL(PAGE_SHIFT); + if (vma) { + gts->ts_mm = current->mm; + gts->ts_vma = vma; + gms = gru_register_mmu_notifier(); + if (IS_ERR(gms)) + goto err; + gts->ts_gms = gms; + } + + gru_dbg(grudev, "alloc gts %p\n", gts); + return gts; + +err: + gts_drop(gts); + return ERR_CAST(gms); +} + +/* + * Allocate a vma private data structure. + */ +struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma, int tsid) +{ + struct gru_vma_data *vdata = NULL; + + vdata = kmalloc(sizeof(*vdata), GFP_KERNEL); + if (!vdata) + return NULL; + + STAT(vdata_alloc); + INIT_LIST_HEAD(&vdata->vd_head); + spin_lock_init(&vdata->vd_lock); + gru_dbg(grudev, "alloc vdata %p\n", vdata); + return vdata; +} + +/* + * Find the thread state structure for the current thread. + */ +struct gru_thread_state *gru_find_thread_state(struct vm_area_struct *vma, + int tsid) +{ + struct gru_vma_data *vdata = vma->vm_private_data; + struct gru_thread_state *gts; + + spin_lock(&vdata->vd_lock); + gts = gru_find_current_gts_nolock(vdata, tsid); + spin_unlock(&vdata->vd_lock); + gru_dbg(grudev, "vma %p, gts %p\n", vma, gts); + return gts; +} + +/* + * Allocate a new thread state for a GSEG. Note that races may allow + * another thread to race to create a gts. + */ +struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct *vma, + int tsid) +{ + struct gru_vma_data *vdata = vma->vm_private_data; + struct gru_thread_state *gts, *ngts; + + gts = gru_alloc_gts(vma, vdata->vd_cbr_au_count, + vdata->vd_dsr_au_count, + vdata->vd_tlb_preload_count, + vdata->vd_user_options, tsid); + if (IS_ERR(gts)) + return gts; + + spin_lock(&vdata->vd_lock); + ngts = gru_find_current_gts_nolock(vdata, tsid); + if (ngts) { + gts_drop(gts); + gts = ngts; + STAT(gts_double_allocate); + } else { + list_add(>s->ts_next, &vdata->vd_head); + } + spin_unlock(&vdata->vd_lock); + gru_dbg(grudev, "vma %p, gts %p\n", vma, gts); + return gts; +} + +/* + * Free the GRU context assigned to the thread state. + */ +static void gru_free_gru_context(struct gru_thread_state *gts) +{ + struct gru_state *gru; + + gru = gts->ts_gru; + gru_dbg(grudev, "gts %p, gid %d\n", gts, gru->gs_gid); + + spin_lock(&gru->gs_lock); + gru->gs_gts[gts->ts_ctxnum] = NULL; + free_gru_resources(gru, gts); + BUG_ON(test_bit(gts->ts_ctxnum, &gru->gs_context_map) == 0); + __clear_bit(gts->ts_ctxnum, &gru->gs_context_map); + gts->ts_ctxnum = NULLCTX; + gts->ts_gru = NULL; + gts->ts_blade = -1; + spin_unlock(&gru->gs_lock); + + gts_drop(gts); + STAT(free_context); +} + +/* + * Prefetching cachelines help hardware performance. + * (Strictly a performance enhancement. Not functionally required). + */ +static void prefetch_data(void *p, int num, int stride) +{ + while (num-- > 0) { + prefetchw(p); + p += stride; + } +} + +static inline long gru_copy_handle(void *d, void *s) +{ + memcpy(d, s, GRU_HANDLE_BYTES); + return GRU_HANDLE_BYTES; +} + +static void gru_prefetch_context(void *gseg, void *cb, void *cbe, + unsigned long cbrmap, unsigned long length) +{ + int i, scr; + + prefetch_data(gseg + GRU_DS_BASE, length / GRU_CACHE_LINE_BYTES, + GRU_CACHE_LINE_BYTES); + + for_each_cbr_in_allocation_map(i, &cbrmap, scr) { + prefetch_data(cb, 1, GRU_CACHE_LINE_BYTES); + prefetch_data(cbe + i * GRU_HANDLE_STRIDE, 1, + GRU_CACHE_LINE_BYTES); + cb += GRU_HANDLE_STRIDE; + } +} + +static void gru_load_context_data(void *save, void *grubase, int ctxnum, + unsigned long cbrmap, unsigned long dsrmap, + int data_valid) +{ + void *gseg, *cb, *cbe; + unsigned long length; + int i, scr; + + gseg = grubase + ctxnum * GRU_GSEG_STRIDE; + cb = gseg + GRU_CB_BASE; + cbe = grubase + GRU_CBE_BASE; + length = hweight64(dsrmap) * GRU_DSR_AU_BYTES; + gru_prefetch_context(gseg, cb, cbe, cbrmap, length); + + for_each_cbr_in_allocation_map(i, &cbrmap, scr) { + if (data_valid) { + save += gru_copy_handle(cb, save); + save += gru_copy_handle(cbe + i * GRU_HANDLE_STRIDE, + save); + } else { + memset(cb, 0, GRU_CACHE_LINE_BYTES); + memset(cbe + i * GRU_HANDLE_STRIDE, 0, + GRU_CACHE_LINE_BYTES); + } + /* Flush CBE to hide race in context restart */ + mb(); + gru_flush_cache(cbe + i * GRU_HANDLE_STRIDE); + cb += GRU_HANDLE_STRIDE; + } + + if (data_valid) + memcpy(gseg + GRU_DS_BASE, save, length); + else + memset(gseg + GRU_DS_BASE, 0, length); +} + +static void gru_unload_context_data(void *save, void *grubase, int ctxnum, + unsigned long cbrmap, unsigned long dsrmap) +{ + void *gseg, *cb, *cbe; + unsigned long length; + int i, scr; + + gseg = grubase + ctxnum * GRU_GSEG_STRIDE; + cb = gseg + GRU_CB_BASE; + cbe = grubase + GRU_CBE_BASE; + length = hweight64(dsrmap) * GRU_DSR_AU_BYTES; + + /* CBEs may not be coherent. Flush them from cache */ + for_each_cbr_in_allocation_map(i, &cbrmap, scr) + gru_flush_cache(cbe + i * GRU_HANDLE_STRIDE); + mb(); /* Let the CL flush complete */ + + gru_prefetch_context(gseg, cb, cbe, cbrmap, length); + + for_each_cbr_in_allocation_map(i, &cbrmap, scr) { + save += gru_copy_handle(save, cb); + save += gru_copy_handle(save, cbe + i * GRU_HANDLE_STRIDE); + cb += GRU_HANDLE_STRIDE; + } + memcpy(save, gseg + GRU_DS_BASE, length); +} + +void gru_unload_context(struct gru_thread_state *gts, int savestate) +{ + struct gru_state *gru = gts->ts_gru; + struct gru_context_configuration_handle *cch; + int ctxnum = gts->ts_ctxnum; + + if (!is_kernel_context(gts)) + zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE); + cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); + + gru_dbg(grudev, "gts %p, cbrmap 0x%lx, dsrmap 0x%lx\n", + gts, gts->ts_cbr_map, gts->ts_dsr_map); + lock_cch_handle(cch); + if (cch_interrupt_sync(cch)) + BUG(); + + if (!is_kernel_context(gts)) + gru_unload_mm_tracker(gru, gts); + if (savestate) { + gru_unload_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, + ctxnum, gts->ts_cbr_map, + gts->ts_dsr_map); + gts->ts_data_valid = 1; + } + + if (cch_deallocate(cch)) + BUG(); + unlock_cch_handle(cch); + + gru_free_gru_context(gts); +} + +/* + * Load a GRU context by copying it from the thread data structure in memory + * to the GRU. + */ +void gru_load_context(struct gru_thread_state *gts) +{ + struct gru_state *gru = gts->ts_gru; + struct gru_context_configuration_handle *cch; + int i, err, asid, ctxnum = gts->ts_ctxnum; + + cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); + lock_cch_handle(cch); + cch->tfm_fault_bit_enable = + (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL + || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR); + cch->tlb_int_enable = (gts->ts_user_options == GRU_OPT_MISS_FMM_INTR); + if (cch->tlb_int_enable) { + gts->ts_tlb_int_select = gru_cpu_fault_map_id(); + cch->tlb_int_select = gts->ts_tlb_int_select; + } + if (gts->ts_cch_req_slice >= 0) { + cch->req_slice_set_enable = 1; + cch->req_slice = gts->ts_cch_req_slice; + } else { + cch->req_slice_set_enable =0; + } + cch->tfm_done_bit_enable = 0; + cch->dsr_allocation_map = gts->ts_dsr_map; + cch->cbr_allocation_map = gts->ts_cbr_map; + + if (is_kernel_context(gts)) { + cch->unmap_enable = 1; + cch->tfm_done_bit_enable = 1; + cch->cb_int_enable = 1; + cch->tlb_int_select = 0; /* For now, ints go to cpu 0 */ + } else { + cch->unmap_enable = 0; + cch->tfm_done_bit_enable = 0; + cch->cb_int_enable = 0; + asid = gru_load_mm_tracker(gru, gts); + for (i = 0; i < 8; i++) { + cch->asid[i] = asid + i; + cch->sizeavail[i] = gts->ts_sizeavail; + } + } + + err = cch_allocate(cch); + if (err) { + gru_dbg(grudev, + "err %d: cch %p, gts %p, cbr 0x%lx, dsr 0x%lx\n", + err, cch, gts, gts->ts_cbr_map, gts->ts_dsr_map); + BUG(); + } + + gru_load_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, ctxnum, + gts->ts_cbr_map, gts->ts_dsr_map, gts->ts_data_valid); + + if (cch_start(cch)) + BUG(); + unlock_cch_handle(cch); + + gru_dbg(grudev, "gid %d, gts %p, cbrmap 0x%lx, dsrmap 0x%lx, tie %d, tis %d\n", + gts->ts_gru->gs_gid, gts, gts->ts_cbr_map, gts->ts_dsr_map, + (gts->ts_user_options == GRU_OPT_MISS_FMM_INTR), gts->ts_tlb_int_select); +} + +/* + * Update fields in an active CCH: + * - retarget interrupts on local blade + * - update sizeavail mask + */ +int gru_update_cch(struct gru_thread_state *gts) +{ + struct gru_context_configuration_handle *cch; + struct gru_state *gru = gts->ts_gru; + int i, ctxnum = gts->ts_ctxnum, ret = 0; + + cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); + + lock_cch_handle(cch); + if (cch->state == CCHSTATE_ACTIVE) { + if (gru->gs_gts[gts->ts_ctxnum] != gts) + goto exit; + if (cch_interrupt(cch)) + BUG(); + for (i = 0; i < 8; i++) + cch->sizeavail[i] = gts->ts_sizeavail; + gts->ts_tlb_int_select = gru_cpu_fault_map_id(); + cch->tlb_int_select = gru_cpu_fault_map_id(); + cch->tfm_fault_bit_enable = + (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL + || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR); + if (cch_start(cch)) + BUG(); + ret = 1; + } +exit: + unlock_cch_handle(cch); + return ret; +} + +/* + * Update CCH tlb interrupt select. Required when all the following is true: + * - task's GRU context is loaded into a GRU + * - task is using interrupt notification for TLB faults + * - task has migrated to a different cpu on the same blade where + * it was previously running. + */ +static int gru_retarget_intr(struct gru_thread_state *gts) +{ + if (gts->ts_tlb_int_select < 0 + || gts->ts_tlb_int_select == gru_cpu_fault_map_id()) + return 0; + + gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select, + gru_cpu_fault_map_id()); + return gru_update_cch(gts); +} + +/* + * Check if a GRU context is allowed to use a specific chiplet. By default + * a context is assigned to any blade-local chiplet. However, users can + * override this. + * Returns 1 if assignment allowed, 0 otherwise + */ +static int gru_check_chiplet_assignment(struct gru_state *gru, + struct gru_thread_state *gts) +{ + int blade_id; + int chiplet_id; + + blade_id = gts->ts_user_blade_id; + if (blade_id < 0) + blade_id = uv_numa_blade_id(); + + chiplet_id = gts->ts_user_chiplet_id; + return gru->gs_blade_id == blade_id && + (chiplet_id < 0 || chiplet_id == gru->gs_chiplet_id); +} + +/* + * Unload the gru context if it is not assigned to the correct blade or + * chiplet. Misassignment can occur if the process migrates to a different + * blade or if the user changes the selected blade/chiplet. + */ +int gru_check_context_placement(struct gru_thread_state *gts) +{ + struct gru_state *gru; + int ret = 0; + + /* + * If the current task is the context owner, verify that the + * context is correctly placed. This test is skipped for non-owner + * references. Pthread apps use non-owner references to the CBRs. + */ + gru = gts->ts_gru; + /* + * If gru or gts->ts_tgid_owner isn't initialized properly, return + * success to indicate that the caller does not need to unload the + * gru context.The caller is responsible for their inspection and + * reinitialization if needed. + */ + if (!gru || gts->ts_tgid_owner != current->tgid) + return ret; + + if (!gru_check_chiplet_assignment(gru, gts)) { + STAT(check_context_unload); + ret = -EINVAL; + } else if (gru_retarget_intr(gts)) { + STAT(check_context_retarget_intr); + } + + return ret; +} + + +/* + * Insufficient GRU resources available on the local blade. Steal a context from + * a process. This is a hack until a _real_ resource scheduler is written.... + */ +#define next_ctxnum(n) ((n) < GRU_NUM_CCH - 2 ? (n) + 1 : 0) +#define next_gru(b, g) (((g) < &(b)->bs_grus[GRU_CHIPLETS_PER_BLADE - 1]) ? \ + ((g)+1) : &(b)->bs_grus[0]) + +static int is_gts_stealable(struct gru_thread_state *gts, + struct gru_blade_state *bs) +{ + if (is_kernel_context(gts)) + return down_write_trylock(&bs->bs_kgts_sema); + else + return mutex_trylock(>s->ts_ctxlock); +} + +static void gts_stolen(struct gru_thread_state *gts, + struct gru_blade_state *bs) +{ + if (is_kernel_context(gts)) { + up_write(&bs->bs_kgts_sema); + STAT(steal_kernel_context); + } else { + mutex_unlock(>s->ts_ctxlock); + STAT(steal_user_context); + } +} + +void gru_steal_context(struct gru_thread_state *gts) +{ + struct gru_blade_state *blade; + struct gru_state *gru, *gru0; + struct gru_thread_state *ngts = NULL; + int ctxnum, ctxnum0, flag = 0, cbr, dsr; + int blade_id; + + blade_id = gts->ts_user_blade_id; + if (blade_id < 0) + blade_id = uv_numa_blade_id(); + cbr = gts->ts_cbr_au_count; + dsr = gts->ts_dsr_au_count; + + blade = gru_base[blade_id]; + spin_lock(&blade->bs_lock); + + ctxnum = next_ctxnum(blade->bs_lru_ctxnum); + gru = blade->bs_lru_gru; + if (ctxnum == 0) + gru = next_gru(blade, gru); + blade->bs_lru_gru = gru; + blade->bs_lru_ctxnum = ctxnum; + ctxnum0 = ctxnum; + gru0 = gru; + while (1) { + if (gru_check_chiplet_assignment(gru, gts)) { + if (check_gru_resources(gru, cbr, dsr, GRU_NUM_CCH)) + break; + spin_lock(&gru->gs_lock); + for (; ctxnum < GRU_NUM_CCH; ctxnum++) { + if (flag && gru == gru0 && ctxnum == ctxnum0) + break; + ngts = gru->gs_gts[ctxnum]; + /* + * We are grabbing locks out of order, so trylock is + * needed. GTSs are usually not locked, so the odds of + * success are high. If trylock fails, try to steal a + * different GSEG. + */ + if (ngts && is_gts_stealable(ngts, blade)) + break; + ngts = NULL; + } + spin_unlock(&gru->gs_lock); + if (ngts || (flag && gru == gru0 && ctxnum == ctxnum0)) + break; + } + if (flag && gru == gru0) + break; + flag = 1; + ctxnum = 0; + gru = next_gru(blade, gru); + } + spin_unlock(&blade->bs_lock); + + if (ngts) { + gts->ustats.context_stolen++; + ngts->ts_steal_jiffies = jiffies; + gru_unload_context(ngts, is_kernel_context(ngts) ? 0 : 1); + gts_stolen(ngts, blade); + } else { + STAT(steal_context_failed); + } + gru_dbg(grudev, + "stole gid %d, ctxnum %d from gts %p. Need cb %d, ds %d;" + " avail cb %ld, ds %ld\n", + gru->gs_gid, ctxnum, ngts, cbr, dsr, hweight64(gru->gs_cbr_map), + hweight64(gru->gs_dsr_map)); +} + +/* + * Assign a gru context. + */ +static int gru_assign_context_number(struct gru_state *gru) +{ + int ctxnum; + + ctxnum = find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH); + __set_bit(ctxnum, &gru->gs_context_map); + return ctxnum; +} + +/* + * Scan the GRUs on the local blade & assign a GRU context. + */ +struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts) +{ + struct gru_state *gru, *grux; + int i, max_active_contexts; + int blade_id = gts->ts_user_blade_id; + + if (blade_id < 0) + blade_id = uv_numa_blade_id(); +again: + gru = NULL; + max_active_contexts = GRU_NUM_CCH; + for_each_gru_on_blade(grux, blade_id, i) { + if (!gru_check_chiplet_assignment(grux, gts)) + continue; + if (check_gru_resources(grux, gts->ts_cbr_au_count, + gts->ts_dsr_au_count, + max_active_contexts)) { + gru = grux; + max_active_contexts = grux->gs_active_contexts; + if (max_active_contexts == 0) + break; + } + } + + if (gru) { + spin_lock(&gru->gs_lock); + if (!check_gru_resources(gru, gts->ts_cbr_au_count, + gts->ts_dsr_au_count, GRU_NUM_CCH)) { + spin_unlock(&gru->gs_lock); + goto again; + } + reserve_gru_resources(gru, gts); + gts->ts_gru = gru; + gts->ts_blade = gru->gs_blade_id; + gts->ts_ctxnum = gru_assign_context_number(gru); + atomic_inc(>s->ts_refcnt); + gru->gs_gts[gts->ts_ctxnum] = gts; + spin_unlock(&gru->gs_lock); + + STAT(assign_context); + gru_dbg(grudev, + "gseg %p, gts %p, gid %d, ctx %d, cbr %d, dsr %d\n", + gseg_virtual_address(gts->ts_gru, gts->ts_ctxnum), gts, + gts->ts_gru->gs_gid, gts->ts_ctxnum, + gts->ts_cbr_au_count, gts->ts_dsr_au_count); + } else { + gru_dbg(grudev, "failed to allocate a GTS %s\n", ""); + STAT(assign_context_failed); + } + + return gru; +} + +/* + * gru_nopage + * + * Map the user's GRU segment + * + * Note: gru segments alway mmaped on GRU_GSEG_PAGESIZE boundaries. + */ +vm_fault_t gru_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct gru_thread_state *gts; + unsigned long paddr, vaddr; + unsigned long expires; + + vaddr = vmf->address; + gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n", + vma, vaddr, GSEG_BASE(vaddr)); + STAT(nopfn); + + /* The following check ensures vaddr is a valid address in the VMA */ + gts = gru_find_thread_state(vma, TSID(vaddr, vma)); + if (!gts) + return VM_FAULT_SIGBUS; + +again: + mutex_lock(>s->ts_ctxlock); + preempt_disable(); + + if (gru_check_context_placement(gts)) { + preempt_enable(); + mutex_unlock(>s->ts_ctxlock); + gru_unload_context(gts, 1); + return VM_FAULT_NOPAGE; + } + + if (!gts->ts_gru) { + STAT(load_user_context); + if (!gru_assign_gru_context(gts)) { + preempt_enable(); + mutex_unlock(>s->ts_ctxlock); + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(GRU_ASSIGN_DELAY); /* true hack ZZZ */ + expires = gts->ts_steal_jiffies + GRU_STEAL_DELAY; + if (time_before(expires, jiffies)) + gru_steal_context(gts); + goto again; + } + gru_load_context(gts); + paddr = gseg_physical_address(gts->ts_gru, gts->ts_ctxnum); + remap_pfn_range(vma, vaddr & ~(GRU_GSEG_PAGESIZE - 1), + paddr >> PAGE_SHIFT, GRU_GSEG_PAGESIZE, + vma->vm_page_prot); + } + + preempt_enable(); + mutex_unlock(>s->ts_ctxlock); + + return VM_FAULT_NOPAGE; +} + diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c new file mode 100644 index 000000000..97b8b38ab --- /dev/null +++ b/drivers/misc/sgi-gru/gruprocfs.c @@ -0,0 +1,308 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * SN Platform GRU Driver + * + * PROC INTERFACES + * + * This file supports the /proc interfaces for the GRU driver + * + * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. + */ + +#include <linux/proc_fs.h> +#include <linux/device.h> +#include <linux/seq_file.h> +#include <linux/uaccess.h> +#include "gru.h" +#include "grulib.h" +#include "grutables.h" + +#define printstat(s, f) printstat_val(s, &gru_stats.f, #f) + +static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id) +{ + unsigned long val = atomic_long_read(v); + + seq_printf(s, "%16lu %s\n", val, id); +} + +static int statistics_show(struct seq_file *s, void *p) +{ + printstat(s, vdata_alloc); + printstat(s, vdata_free); + printstat(s, gts_alloc); + printstat(s, gts_free); + printstat(s, gms_alloc); + printstat(s, gms_free); + printstat(s, gts_double_allocate); + printstat(s, assign_context); + printstat(s, assign_context_failed); + printstat(s, free_context); + printstat(s, load_user_context); + printstat(s, load_kernel_context); + printstat(s, lock_kernel_context); + printstat(s, unlock_kernel_context); + printstat(s, steal_user_context); + printstat(s, steal_kernel_context); + printstat(s, steal_context_failed); + printstat(s, nopfn); + printstat(s, asid_new); + printstat(s, asid_next); + printstat(s, asid_wrap); + printstat(s, asid_reuse); + printstat(s, intr); + printstat(s, intr_cbr); + printstat(s, intr_tfh); + printstat(s, intr_spurious); + printstat(s, intr_mm_lock_failed); + printstat(s, call_os); + printstat(s, call_os_wait_queue); + printstat(s, user_flush_tlb); + printstat(s, user_unload_context); + printstat(s, user_exception); + printstat(s, set_context_option); + printstat(s, check_context_retarget_intr); + printstat(s, check_context_unload); + printstat(s, tlb_dropin); + printstat(s, tlb_preload_page); + printstat(s, tlb_dropin_fail_no_asid); + printstat(s, tlb_dropin_fail_upm); + printstat(s, tlb_dropin_fail_invalid); + printstat(s, tlb_dropin_fail_range_active); + printstat(s, tlb_dropin_fail_idle); + printstat(s, tlb_dropin_fail_fmm); + printstat(s, tlb_dropin_fail_no_exception); + printstat(s, tfh_stale_on_fault); + printstat(s, mmu_invalidate_range); + printstat(s, mmu_invalidate_page); + printstat(s, flush_tlb); + printstat(s, flush_tlb_gru); + printstat(s, flush_tlb_gru_tgh); + printstat(s, flush_tlb_gru_zero_asid); + printstat(s, copy_gpa); + printstat(s, read_gpa); + printstat(s, mesq_receive); + printstat(s, mesq_receive_none); + printstat(s, mesq_send); + printstat(s, mesq_send_failed); + printstat(s, mesq_noop); + printstat(s, mesq_send_unexpected_error); + printstat(s, mesq_send_lb_overflow); + printstat(s, mesq_send_qlimit_reached); + printstat(s, mesq_send_amo_nacked); + printstat(s, mesq_send_put_nacked); + printstat(s, mesq_qf_locked); + printstat(s, mesq_qf_noop_not_full); + printstat(s, mesq_qf_switch_head_failed); + printstat(s, mesq_qf_unexpected_error); + printstat(s, mesq_noop_unexpected_error); + printstat(s, mesq_noop_lb_overflow); + printstat(s, mesq_noop_qlimit_reached); + printstat(s, mesq_noop_amo_nacked); + printstat(s, mesq_noop_put_nacked); + printstat(s, mesq_noop_page_overflow); + return 0; +} + +static ssize_t statistics_write(struct file *file, const char __user *userbuf, + size_t count, loff_t *data) +{ + memset(&gru_stats, 0, sizeof(gru_stats)); + return count; +} + +static int mcs_statistics_show(struct seq_file *s, void *p) +{ + int op; + unsigned long total, count, max; + static char *id[] = {"cch_allocate", "cch_start", "cch_interrupt", + "cch_interrupt_sync", "cch_deallocate", "tfh_write_only", + "tfh_write_restart", "tgh_invalidate"}; + + seq_puts(s, "#id count aver-clks max-clks\n"); + for (op = 0; op < mcsop_last; op++) { + count = atomic_long_read(&mcs_op_statistics[op].count); + total = atomic_long_read(&mcs_op_statistics[op].total); + max = mcs_op_statistics[op].max; + seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count, + count ? total / count : 0, max); + } + return 0; +} + +static ssize_t mcs_statistics_write(struct file *file, + const char __user *userbuf, size_t count, loff_t *data) +{ + memset(mcs_op_statistics, 0, sizeof(mcs_op_statistics)); + return count; +} + +static int options_show(struct seq_file *s, void *p) +{ + seq_printf(s, "#bitmask: 1=trace, 2=statistics\n"); + seq_printf(s, "0x%lx\n", gru_options); + return 0; +} + +static ssize_t options_write(struct file *file, const char __user *userbuf, + size_t count, loff_t *data) +{ + int ret; + + ret = kstrtoul_from_user(userbuf, count, 0, &gru_options); + if (ret) + return ret; + + return count; +} + +static int cch_seq_show(struct seq_file *file, void *data) +{ + long gid = *(long *)data; + int i; + struct gru_state *gru = GID_TO_GRU(gid); + struct gru_thread_state *ts; + const char *mode[] = { "??", "UPM", "INTR", "OS_POLL" }; + + if (gid == 0) + seq_puts(file, "# gid bid ctx# asid pid cbrs dsbytes mode\n"); + if (gru) + for (i = 0; i < GRU_NUM_CCH; i++) { + ts = gru->gs_gts[i]; + if (!ts) + continue; + seq_printf(file, " %5d%5d%6d%7d%9d%6d%8d%8s\n", + gru->gs_gid, gru->gs_blade_id, i, + is_kernel_context(ts) ? 0 : ts->ts_gms->ms_asids[gid].mt_asid, + is_kernel_context(ts) ? 0 : ts->ts_tgid_owner, + ts->ts_cbr_au_count * GRU_CBR_AU_SIZE, + ts->ts_cbr_au_count * GRU_DSR_AU_BYTES, + mode[ts->ts_user_options & + GRU_OPT_MISS_MASK]); + } + + return 0; +} + +static int gru_seq_show(struct seq_file *file, void *data) +{ + long gid = *(long *)data, ctxfree, cbrfree, dsrfree; + struct gru_state *gru = GID_TO_GRU(gid); + + if (gid == 0) { + seq_puts(file, "# gid nid ctx cbr dsr ctx cbr dsr\n"); + seq_puts(file, "# busy busy busy free free free\n"); + } + if (gru) { + ctxfree = GRU_NUM_CCH - gru->gs_active_contexts; + cbrfree = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE; + dsrfree = hweight64(gru->gs_dsr_map) * GRU_DSR_AU_BYTES; + seq_printf(file, " %5d%5d%7ld%6ld%6ld%8ld%6ld%6ld\n", + gru->gs_gid, gru->gs_blade_id, GRU_NUM_CCH - ctxfree, + GRU_NUM_CBE - cbrfree, GRU_NUM_DSR_BYTES - dsrfree, + ctxfree, cbrfree, dsrfree); + } + + return 0; +} + +static void seq_stop(struct seq_file *file, void *data) +{ +} + +static void *seq_start(struct seq_file *file, loff_t *gid) +{ + if (*gid < gru_max_gids) + return gid; + return NULL; +} + +static void *seq_next(struct seq_file *file, void *data, loff_t *gid) +{ + (*gid)++; + if (*gid < gru_max_gids) + return gid; + return NULL; +} + +static const struct seq_operations cch_seq_ops = { + .start = seq_start, + .next = seq_next, + .stop = seq_stop, + .show = cch_seq_show +}; + +static const struct seq_operations gru_seq_ops = { + .start = seq_start, + .next = seq_next, + .stop = seq_stop, + .show = gru_seq_show +}; + +static int statistics_open(struct inode *inode, struct file *file) +{ + return single_open(file, statistics_show, NULL); +} + +static int mcs_statistics_open(struct inode *inode, struct file *file) +{ + return single_open(file, mcs_statistics_show, NULL); +} + +static int options_open(struct inode *inode, struct file *file) +{ + return single_open(file, options_show, NULL); +} + +/* *INDENT-OFF* */ +static const struct proc_ops statistics_proc_ops = { + .proc_open = statistics_open, + .proc_read = seq_read, + .proc_write = statistics_write, + .proc_lseek = seq_lseek, + .proc_release = single_release, +}; + +static const struct proc_ops mcs_statistics_proc_ops = { + .proc_open = mcs_statistics_open, + .proc_read = seq_read, + .proc_write = mcs_statistics_write, + .proc_lseek = seq_lseek, + .proc_release = single_release, +}; + +static const struct proc_ops options_proc_ops = { + .proc_open = options_open, + .proc_read = seq_read, + .proc_write = options_write, + .proc_lseek = seq_lseek, + .proc_release = single_release, +}; + +static struct proc_dir_entry *proc_gru __read_mostly; + +int gru_proc_init(void) +{ + proc_gru = proc_mkdir("sgi_uv/gru", NULL); + if (!proc_gru) + return -1; + if (!proc_create("statistics", 0644, proc_gru, &statistics_proc_ops)) + goto err; + if (!proc_create("mcs_statistics", 0644, proc_gru, &mcs_statistics_proc_ops)) + goto err; + if (!proc_create("debug_options", 0644, proc_gru, &options_proc_ops)) + goto err; + if (!proc_create_seq("cch_status", 0444, proc_gru, &cch_seq_ops)) + goto err; + if (!proc_create_seq("gru_status", 0444, proc_gru, &gru_seq_ops)) + goto err; + return 0; +err: + remove_proc_subtree("sgi_uv/gru", NULL); + return -1; +} + +void gru_proc_exit(void) +{ + remove_proc_subtree("sgi_uv/gru", NULL); +} diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h new file mode 100644 index 000000000..10f0a083b --- /dev/null +++ b/drivers/misc/sgi-gru/grutables.h @@ -0,0 +1,664 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * SN Platform GRU Driver + * + * GRU DRIVER TABLES, MACROS, externs, etc + * + * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. + */ + +#ifndef __GRUTABLES_H__ +#define __GRUTABLES_H__ + +/* + * GRU Chiplet: + * The GRU is a user addressible memory accelerator. It provides + * several forms of load, store, memset, bcopy instructions. In addition, it + * contains special instructions for AMOs, sending messages to message + * queues, etc. + * + * The GRU is an integral part of the node controller. It connects + * directly to the cpu socket. In its current implementation, there are 2 + * GRU chiplets in the node controller on each blade (~node). + * + * The entire GRU memory space is fully coherent and cacheable by the cpus. + * + * Each GRU chiplet has a physical memory map that looks like the following: + * + * +-----------------+ + * |/////////////////| + * |/////////////////| + * |/////////////////| + * |/////////////////| + * |/////////////////| + * |/////////////////| + * |/////////////////| + * |/////////////////| + * +-----------------+ + * | system control | + * +-----------------+ _______ +-------------+ + * |/////////////////| / | | + * |/////////////////| / | | + * |/////////////////| / | instructions| + * |/////////////////| / | | + * |/////////////////| / | | + * |/////////////////| / |-------------| + * |/////////////////| / | | + * +-----------------+ | | + * | context 15 | | data | + * +-----------------+ | | + * | ...... | \ | | + * +-----------------+ \____________ +-------------+ + * | context 1 | + * +-----------------+ + * | context 0 | + * +-----------------+ + * + * Each of the "contexts" is a chunk of memory that can be mmaped into user + * space. The context consists of 2 parts: + * + * - an instruction space that can be directly accessed by the user + * to issue GRU instructions and to check instruction status. + * + * - a data area that acts as normal RAM. + * + * User instructions contain virtual addresses of data to be accessed by the + * GRU. The GRU contains a TLB that is used to convert these user virtual + * addresses to physical addresses. + * + * The "system control" area of the GRU chiplet is used by the kernel driver + * to manage user contexts and to perform functions such as TLB dropin and + * purging. + * + * One context may be reserved for the kernel and used for cross-partition + * communication. The GRU will also be used to asynchronously zero out + * large blocks of memory (not currently implemented). + * + * + * Tables: + * + * VDATA-VMA Data - Holds a few parameters. Head of linked list of + * GTS tables for threads using the GSEG + * GTS - Gru Thread State - contains info for managing a GSEG context. A + * GTS is allocated for each thread accessing a + * GSEG. + * GTD - GRU Thread Data - contains shadow copy of GRU data when GSEG is + * not loaded into a GRU + * GMS - GRU Memory Struct - Used to manage TLB shootdowns. Tracks GRUs + * where a GSEG has been loaded. Similar to + * an mm_struct but for GRU. + * + * GS - GRU State - Used to manage the state of a GRU chiplet + * BS - Blade State - Used to manage state of all GRU chiplets + * on a blade + * + * + * Normal task tables for task using GRU. + * - 2 threads in process + * - 2 GSEGs open in process + * - GSEG1 is being used by both threads + * - GSEG2 is used only by thread 2 + * + * task -->| + * task ---+---> mm ->------ (notifier) -------+-> gms + * | | + * |--> vma -> vdata ---> gts--->| GSEG1 (thread1) + * | | | + * | +-> gts--->| GSEG1 (thread2) + * | | + * |--> vma -> vdata ---> gts--->| GSEG2 (thread2) + * . + * . + * + * GSEGs are marked DONTCOPY on fork + * + * At open + * file.private_data -> NULL + * + * At mmap, + * vma -> vdata + * + * After gseg reference + * vma -> vdata ->gts + * + * After fork + * parent + * vma -> vdata -> gts + * child + * (vma is not copied) + * + */ + +#include <linux/rmap.h> +#include <linux/interrupt.h> +#include <linux/mutex.h> +#include <linux/wait.h> +#include <linux/mmu_notifier.h> +#include <linux/mm_types.h> +#include "gru.h" +#include "grulib.h" +#include "gruhandles.h" + +extern struct gru_stats_s gru_stats; +extern struct gru_blade_state *gru_base[]; +extern unsigned long gru_start_paddr, gru_end_paddr; +extern void *gru_start_vaddr; +extern unsigned int gru_max_gids; + +#define GRU_MAX_BLADES MAX_NUMNODES +#define GRU_MAX_GRUS (GRU_MAX_BLADES * GRU_CHIPLETS_PER_BLADE) + +#define GRU_DRIVER_ID_STR "SGI GRU Device Driver" +#define GRU_DRIVER_VERSION_STR "0.85" + +/* + * GRU statistics. + */ +struct gru_stats_s { + atomic_long_t vdata_alloc; + atomic_long_t vdata_free; + atomic_long_t gts_alloc; + atomic_long_t gts_free; + atomic_long_t gms_alloc; + atomic_long_t gms_free; + atomic_long_t gts_double_allocate; + atomic_long_t assign_context; + atomic_long_t assign_context_failed; + atomic_long_t free_context; + atomic_long_t load_user_context; + atomic_long_t load_kernel_context; + atomic_long_t lock_kernel_context; + atomic_long_t unlock_kernel_context; + atomic_long_t steal_user_context; + atomic_long_t steal_kernel_context; + atomic_long_t steal_context_failed; + atomic_long_t nopfn; + atomic_long_t asid_new; + atomic_long_t asid_next; + atomic_long_t asid_wrap; + atomic_long_t asid_reuse; + atomic_long_t intr; + atomic_long_t intr_cbr; + atomic_long_t intr_tfh; + atomic_long_t intr_spurious; + atomic_long_t intr_mm_lock_failed; + atomic_long_t call_os; + atomic_long_t call_os_wait_queue; + atomic_long_t user_flush_tlb; + atomic_long_t user_unload_context; + atomic_long_t user_exception; + atomic_long_t set_context_option; + atomic_long_t check_context_retarget_intr; + atomic_long_t check_context_unload; + atomic_long_t tlb_dropin; + atomic_long_t tlb_preload_page; + atomic_long_t tlb_dropin_fail_no_asid; + atomic_long_t tlb_dropin_fail_upm; + atomic_long_t tlb_dropin_fail_invalid; + atomic_long_t tlb_dropin_fail_range_active; + atomic_long_t tlb_dropin_fail_idle; + atomic_long_t tlb_dropin_fail_fmm; + atomic_long_t tlb_dropin_fail_no_exception; + atomic_long_t tfh_stale_on_fault; + atomic_long_t mmu_invalidate_range; + atomic_long_t mmu_invalidate_page; + atomic_long_t flush_tlb; + atomic_long_t flush_tlb_gru; + atomic_long_t flush_tlb_gru_tgh; + atomic_long_t flush_tlb_gru_zero_asid; + + atomic_long_t copy_gpa; + atomic_long_t read_gpa; + + atomic_long_t mesq_receive; + atomic_long_t mesq_receive_none; + atomic_long_t mesq_send; + atomic_long_t mesq_send_failed; + atomic_long_t mesq_noop; + atomic_long_t mesq_send_unexpected_error; + atomic_long_t mesq_send_lb_overflow; + atomic_long_t mesq_send_qlimit_reached; + atomic_long_t mesq_send_amo_nacked; + atomic_long_t mesq_send_put_nacked; + atomic_long_t mesq_page_overflow; + atomic_long_t mesq_qf_locked; + atomic_long_t mesq_qf_noop_not_full; + atomic_long_t mesq_qf_switch_head_failed; + atomic_long_t mesq_qf_unexpected_error; + atomic_long_t mesq_noop_unexpected_error; + atomic_long_t mesq_noop_lb_overflow; + atomic_long_t mesq_noop_qlimit_reached; + atomic_long_t mesq_noop_amo_nacked; + atomic_long_t mesq_noop_put_nacked; + atomic_long_t mesq_noop_page_overflow; + +}; + +enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync, + cchop_deallocate, tfhop_write_only, tfhop_write_restart, + tghop_invalidate, mcsop_last}; + +struct mcs_op_statistic { + atomic_long_t count; + atomic_long_t total; + unsigned long max; +}; + +extern struct mcs_op_statistic mcs_op_statistics[mcsop_last]; + +#define OPT_DPRINT 1 +#define OPT_STATS 2 + + +#define IRQ_GRU 110 /* Starting IRQ number for interrupts */ + +/* Delay in jiffies between attempts to assign a GRU context */ +#define GRU_ASSIGN_DELAY ((HZ * 20) / 1000) + +/* + * If a process has it's context stolen, min delay in jiffies before trying to + * steal a context from another process. + */ +#define GRU_STEAL_DELAY ((HZ * 200) / 1000) + +#define STAT(id) do { \ + if (gru_options & OPT_STATS) \ + atomic_long_inc(&gru_stats.id); \ + } while (0) + +#ifdef CONFIG_SGI_GRU_DEBUG +#define gru_dbg(dev, fmt, x...) \ + do { \ + if (gru_options & OPT_DPRINT) \ + printk(KERN_DEBUG "GRU:%d %s: " fmt, smp_processor_id(), __func__, x);\ + } while (0) +#else +#define gru_dbg(x...) +#endif + +/*----------------------------------------------------------------------------- + * ASID management + */ +#define MAX_ASID 0xfffff0 +#define MIN_ASID 8 +#define ASID_INC 8 /* number of regions */ + +/* Generate a GRU asid value from a GRU base asid & a virtual address. */ +#define VADDR_HI_BIT 64 +#define GRUREGION(addr) ((addr) >> (VADDR_HI_BIT - 3) & 3) +#define GRUASID(asid, addr) ((asid) + GRUREGION(addr)) + +/*------------------------------------------------------------------------------ + * File & VMS Tables + */ + +struct gru_state; + +/* + * This structure is pointed to from the mmstruct via the notifier pointer. + * There is one of these per address space. + */ +struct gru_mm_tracker { /* pack to reduce size */ + unsigned int mt_asid_gen:24; /* ASID wrap count */ + unsigned int mt_asid:24; /* current base ASID for gru */ + unsigned short mt_ctxbitmap:16;/* bitmap of contexts using + asid */ +} __attribute__ ((packed)); + +struct gru_mm_struct { + struct mmu_notifier ms_notifier; + spinlock_t ms_asid_lock; /* protects ASID assignment */ + atomic_t ms_range_active;/* num range_invals active */ + wait_queue_head_t ms_wait_queue; + DECLARE_BITMAP(ms_asidmap, GRU_MAX_GRUS); + struct gru_mm_tracker ms_asids[GRU_MAX_GRUS]; +}; + +/* + * One of these structures is allocated when a GSEG is mmaped. The + * structure is pointed to by the vma->vm_private_data field in the vma struct. + */ +struct gru_vma_data { + spinlock_t vd_lock; /* Serialize access to vma */ + struct list_head vd_head; /* head of linked list of gts */ + long vd_user_options;/* misc user option flags */ + int vd_cbr_au_count; + int vd_dsr_au_count; + unsigned char vd_tlb_preload_count; +}; + +/* + * One of these is allocated for each thread accessing a mmaped GRU. A linked + * list of these structure is hung off the struct gru_vma_data in the mm_struct. + */ +struct gru_thread_state { + struct list_head ts_next; /* list - head at vma-private */ + struct mutex ts_ctxlock; /* load/unload CTX lock */ + struct mm_struct *ts_mm; /* mm currently mapped to + context */ + struct vm_area_struct *ts_vma; /* vma of GRU context */ + struct gru_state *ts_gru; /* GRU where the context is + loaded */ + struct gru_mm_struct *ts_gms; /* asid & ioproc struct */ + unsigned char ts_tlb_preload_count; /* TLB preload pages */ + unsigned long ts_cbr_map; /* map of allocated CBRs */ + unsigned long ts_dsr_map; /* map of allocated DATA + resources */ + unsigned long ts_steal_jiffies;/* jiffies when context last + stolen */ + long ts_user_options;/* misc user option flags */ + pid_t ts_tgid_owner; /* task that is using the + context - for migration */ + short ts_user_blade_id;/* user selected blade */ + char ts_user_chiplet_id;/* user selected chiplet */ + unsigned short ts_sizeavail; /* Pagesizes in use */ + int ts_tsid; /* thread that owns the + structure */ + int ts_tlb_int_select;/* target cpu if interrupts + enabled */ + int ts_ctxnum; /* context number where the + context is loaded */ + atomic_t ts_refcnt; /* reference count GTS */ + unsigned char ts_dsr_au_count;/* Number of DSR resources + required for contest */ + unsigned char ts_cbr_au_count;/* Number of CBR resources + required for contest */ + char ts_cch_req_slice;/* CCH packet slice */ + char ts_blade; /* If >= 0, migrate context if + ref from different blade */ + char ts_force_cch_reload; + char ts_cbr_idx[GRU_CBR_AU];/* CBR numbers of each + allocated CB */ + int ts_data_valid; /* Indicates if ts_gdata has + valid data */ + struct gru_gseg_statistics ustats; /* User statistics */ + unsigned long ts_gdata[]; /* save area for GRU data (CB, + DS, CBE) */ +}; + +/* + * Threaded programs actually allocate an array of GSEGs when a context is + * created. Each thread uses a separate GSEG. TSID is the index into the GSEG + * array. + */ +#define TSID(a, v) (((a) - (v)->vm_start) / GRU_GSEG_PAGESIZE) +#define UGRUADDR(gts) ((gts)->ts_vma->vm_start + \ + (gts)->ts_tsid * GRU_GSEG_PAGESIZE) + +#define NULLCTX (-1) /* if context not loaded into GRU */ + +/*----------------------------------------------------------------------------- + * GRU State Tables + */ + +/* + * One of these exists for each GRU chiplet. + */ +struct gru_state { + struct gru_blade_state *gs_blade; /* GRU state for entire + blade */ + unsigned long gs_gru_base_paddr; /* Physical address of + gru segments (64) */ + void *gs_gru_base_vaddr; /* Virtual address of + gru segments (64) */ + unsigned short gs_gid; /* unique GRU number */ + unsigned short gs_blade_id; /* blade of GRU */ + unsigned char gs_chiplet_id; /* blade chiplet of GRU */ + unsigned char gs_tgh_local_shift; /* used to pick TGH for + local flush */ + unsigned char gs_tgh_first_remote; /* starting TGH# for + remote flush */ + spinlock_t gs_asid_lock; /* lock used for + assigning asids */ + spinlock_t gs_lock; /* lock used for + assigning contexts */ + + /* -- the following are protected by the gs_asid_lock spinlock ---- */ + unsigned int gs_asid; /* Next availe ASID */ + unsigned int gs_asid_limit; /* Limit of available + ASIDs */ + unsigned int gs_asid_gen; /* asid generation. + Inc on wrap */ + + /* --- the following fields are protected by the gs_lock spinlock --- */ + unsigned long gs_context_map; /* bitmap to manage + contexts in use */ + unsigned long gs_cbr_map; /* bitmap to manage CB + resources */ + unsigned long gs_dsr_map; /* bitmap used to manage + DATA resources */ + unsigned int gs_reserved_cbrs; /* Number of kernel- + reserved cbrs */ + unsigned int gs_reserved_dsr_bytes; /* Bytes of kernel- + reserved dsrs */ + unsigned short gs_active_contexts; /* number of contexts + in use */ + struct gru_thread_state *gs_gts[GRU_NUM_CCH]; /* GTS currently using + the context */ + int gs_irq[GRU_NUM_TFM]; /* Interrupt irqs */ +}; + +/* + * This structure contains the GRU state for all the GRUs on a blade. + */ +struct gru_blade_state { + void *kernel_cb; /* First kernel + reserved cb */ + void *kernel_dsr; /* First kernel + reserved DSR */ + struct rw_semaphore bs_kgts_sema; /* lock for kgts */ + struct gru_thread_state *bs_kgts; /* GTS for kernel use */ + + /* ---- the following are used for managing kernel async GRU CBRs --- */ + int bs_async_dsr_bytes; /* DSRs for async */ + int bs_async_cbrs; /* CBRs AU for async */ + struct completion *bs_async_wq; + + /* ---- the following are protected by the bs_lock spinlock ---- */ + spinlock_t bs_lock; /* lock used for + stealing contexts */ + int bs_lru_ctxnum; /* STEAL - last context + stolen */ + struct gru_state *bs_lru_gru; /* STEAL - last gru + stolen */ + + struct gru_state bs_grus[GRU_CHIPLETS_PER_BLADE]; +}; + +/*----------------------------------------------------------------------------- + * Address Primitives + */ +#define get_tfm_for_cpu(g, c) \ + ((struct gru_tlb_fault_map *)get_tfm((g)->gs_gru_base_vaddr, (c))) +#define get_tfh_by_index(g, i) \ + ((struct gru_tlb_fault_handle *)get_tfh((g)->gs_gru_base_vaddr, (i))) +#define get_tgh_by_index(g, i) \ + ((struct gru_tlb_global_handle *)get_tgh((g)->gs_gru_base_vaddr, (i))) +#define get_cbe_by_index(g, i) \ + ((struct gru_control_block_extended *)get_cbe((g)->gs_gru_base_vaddr,\ + (i))) + +/*----------------------------------------------------------------------------- + * Useful Macros + */ + +/* Given a blade# & chiplet#, get a pointer to the GRU */ +#define get_gru(b, c) (&gru_base[b]->bs_grus[c]) + +/* Number of bytes to save/restore when unloading/loading GRU contexts */ +#define DSR_BYTES(dsr) ((dsr) * GRU_DSR_AU_BYTES) +#define CBR_BYTES(cbr) ((cbr) * GRU_HANDLE_BYTES * GRU_CBR_AU_SIZE * 2) + +/* Convert a user CB number to the actual CBRNUM */ +#define thread_cbr_number(gts, n) ((gts)->ts_cbr_idx[(n) / GRU_CBR_AU_SIZE] \ + * GRU_CBR_AU_SIZE + (n) % GRU_CBR_AU_SIZE) + +/* Convert a gid to a pointer to the GRU */ +#define GID_TO_GRU(gid) \ + (gru_base[(gid) / GRU_CHIPLETS_PER_BLADE] ? \ + (&gru_base[(gid) / GRU_CHIPLETS_PER_BLADE]-> \ + bs_grus[(gid) % GRU_CHIPLETS_PER_BLADE]) : \ + NULL) + +/* Scan all active GRUs in a GRU bitmap */ +#define for_each_gru_in_bitmap(gid, map) \ + for_each_set_bit((gid), (map), GRU_MAX_GRUS) + +/* Scan all active GRUs on a specific blade */ +#define for_each_gru_on_blade(gru, nid, i) \ + for ((gru) = gru_base[nid]->bs_grus, (i) = 0; \ + (i) < GRU_CHIPLETS_PER_BLADE; \ + (i)++, (gru)++) + +/* Scan all GRUs */ +#define foreach_gid(gid) \ + for ((gid) = 0; (gid) < gru_max_gids; (gid)++) + +/* Scan all active GTSs on a gru. Note: must hold ss_lock to use this macro. */ +#define for_each_gts_on_gru(gts, gru, ctxnum) \ + for ((ctxnum) = 0; (ctxnum) < GRU_NUM_CCH; (ctxnum)++) \ + if (((gts) = (gru)->gs_gts[ctxnum])) + +/* Scan each CBR whose bit is set in a TFM (or copy of) */ +#define for_each_cbr_in_tfm(i, map) \ + for_each_set_bit((i), (map), GRU_NUM_CBE) + +/* Scan each CBR in a CBR bitmap. Note: multiple CBRs in an allocation unit */ +#define for_each_cbr_in_allocation_map(i, map, k) \ + for_each_set_bit((k), (map), GRU_CBR_AU) \ + for ((i) = (k)*GRU_CBR_AU_SIZE; \ + (i) < ((k) + 1) * GRU_CBR_AU_SIZE; (i)++) + +/* Scan each DSR in a DSR bitmap. Note: multiple DSRs in an allocation unit */ +#define for_each_dsr_in_allocation_map(i, map, k) \ + for_each_set_bit((k), (const unsigned long *)(map), GRU_DSR_AU) \ + for ((i) = (k) * GRU_DSR_AU_CL; \ + (i) < ((k) + 1) * GRU_DSR_AU_CL; (i)++) + +#define gseg_physical_address(gru, ctxnum) \ + ((gru)->gs_gru_base_paddr + ctxnum * GRU_GSEG_STRIDE) +#define gseg_virtual_address(gru, ctxnum) \ + ((gru)->gs_gru_base_vaddr + ctxnum * GRU_GSEG_STRIDE) + +/*----------------------------------------------------------------------------- + * Lock / Unlock GRU handles + * Use the "delresp" bit in the handle as a "lock" bit. + */ + +/* Lock hierarchy checking enabled only in emulator */ + +/* 0 = lock failed, 1 = locked */ +static inline int __trylock_handle(void *h) +{ + return !test_and_set_bit(1, h); +} + +static inline void __lock_handle(void *h) +{ + while (test_and_set_bit(1, h)) + cpu_relax(); +} + +static inline void __unlock_handle(void *h) +{ + clear_bit(1, h); +} + +static inline int trylock_cch_handle(struct gru_context_configuration_handle *cch) +{ + return __trylock_handle(cch); +} + +static inline void lock_cch_handle(struct gru_context_configuration_handle *cch) +{ + __lock_handle(cch); +} + +static inline void unlock_cch_handle(struct gru_context_configuration_handle + *cch) +{ + __unlock_handle(cch); +} + +static inline void lock_tgh_handle(struct gru_tlb_global_handle *tgh) +{ + __lock_handle(tgh); +} + +static inline void unlock_tgh_handle(struct gru_tlb_global_handle *tgh) +{ + __unlock_handle(tgh); +} + +static inline int is_kernel_context(struct gru_thread_state *gts) +{ + return !gts->ts_mm; +} + +/* + * The following are for Nehelem-EX. A more general scheme is needed for + * future processors. + */ +#define UV_MAX_INT_CORES 8 +#define uv_cpu_socket_number(p) ((cpu_physical_id(p) >> 5) & 1) +#define uv_cpu_ht_number(p) (cpu_physical_id(p) & 1) +#define uv_cpu_core_number(p) (((cpu_physical_id(p) >> 2) & 4) | \ + ((cpu_physical_id(p) >> 1) & 3)) +/*----------------------------------------------------------------------------- + * Function prototypes & externs + */ +struct gru_unload_context_req; + +extern const struct vm_operations_struct gru_vm_ops; +extern struct device *grudev; + +extern struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma, + int tsid); +extern struct gru_thread_state *gru_find_thread_state(struct vm_area_struct + *vma, int tsid); +extern struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct + *vma, int tsid); +extern struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts); +extern void gru_load_context(struct gru_thread_state *gts); +extern void gru_steal_context(struct gru_thread_state *gts); +extern void gru_unload_context(struct gru_thread_state *gts, int savestate); +extern int gru_update_cch(struct gru_thread_state *gts); +extern void gts_drop(struct gru_thread_state *gts); +extern void gru_tgh_flush_init(struct gru_state *gru); +extern int gru_kservices_init(void); +extern void gru_kservices_exit(void); +extern irqreturn_t gru0_intr(int irq, void *dev_id); +extern irqreturn_t gru1_intr(int irq, void *dev_id); +extern irqreturn_t gru_intr_mblade(int irq, void *dev_id); +extern int gru_dump_chiplet_request(unsigned long arg); +extern long gru_get_gseg_statistics(unsigned long arg); +extern int gru_handle_user_call_os(unsigned long address); +extern int gru_user_flush_tlb(unsigned long arg); +extern int gru_user_unload_context(unsigned long arg); +extern int gru_get_exception_detail(unsigned long arg); +extern int gru_set_context_option(unsigned long address); +extern int gru_check_context_placement(struct gru_thread_state *gts); +extern int gru_cpu_fault_map_id(void); +extern struct vm_area_struct *gru_find_vma(unsigned long vaddr); +extern void gru_flush_all_tlb(struct gru_state *gru); +extern int gru_proc_init(void); +extern void gru_proc_exit(void); + +extern struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma, + int cbr_au_count, int dsr_au_count, + unsigned char tlb_preload_count, int options, int tsid); +extern unsigned long gru_reserve_cb_resources(struct gru_state *gru, + int cbr_au_count, char *cbmap); +extern unsigned long gru_reserve_ds_resources(struct gru_state *gru, + int dsr_au_count, char *dsmap); +extern vm_fault_t gru_fault(struct vm_fault *vmf); +extern struct gru_mm_struct *gru_register_mmu_notifier(void); +extern void gru_drop_mmu_notifier(struct gru_mm_struct *gms); + +extern int gru_ktest(unsigned long arg); +extern void gru_flush_tlb_range(struct gru_mm_struct *gms, unsigned long start, + unsigned long len); + +extern unsigned long gru_options; + +#endif /* __GRUTABLES_H__ */ diff --git a/drivers/misc/sgi-gru/grutlbpurge.c b/drivers/misc/sgi-gru/grutlbpurge.c new file mode 100644 index 000000000..10921cd26 --- /dev/null +++ b/drivers/misc/sgi-gru/grutlbpurge.c @@ -0,0 +1,318 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * SN Platform GRU Driver + * + * MMUOPS callbacks + TLB flushing + * + * This file handles emu notifier callbacks from the core kernel. The callbacks + * are used to update the TLB in the GRU as a result of changes in the + * state of a process address space. This file also handles TLB invalidates + * from the GRU driver. + * + * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. + */ + +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/spinlock.h> +#include <linux/mm.h> +#include <linux/slab.h> +#include <linux/device.h> +#include <linux/hugetlb.h> +#include <linux/delay.h> +#include <linux/timex.h> +#include <linux/srcu.h> +#include <asm/processor.h> +#include "gru.h" +#include "grutables.h" +#include <asm/uv/uv_hub.h> + +#define gru_random() get_cycles() + +/* ---------------------------------- TLB Invalidation functions -------- + * get_tgh_handle + * + * Find a TGH to use for issuing a TLB invalidate. For GRUs that are on the + * local blade, use a fixed TGH that is a function of the blade-local cpu + * number. Normally, this TGH is private to the cpu & no contention occurs for + * the TGH. For offblade GRUs, select a random TGH in the range above the + * private TGHs. A spinlock is required to access this TGH & the lock must be + * released when the invalidate is completes. This sucks, but it is the best we + * can do. + * + * Note that the spinlock is IN the TGH handle so locking does not involve + * additional cache lines. + * + */ +static inline int get_off_blade_tgh(struct gru_state *gru) +{ + int n; + + n = GRU_NUM_TGH - gru->gs_tgh_first_remote; + n = gru_random() % n; + n += gru->gs_tgh_first_remote; + return n; +} + +static inline int get_on_blade_tgh(struct gru_state *gru) +{ + return uv_blade_processor_id() >> gru->gs_tgh_local_shift; +} + +static struct gru_tlb_global_handle *get_lock_tgh_handle(struct gru_state + *gru) +{ + struct gru_tlb_global_handle *tgh; + int n; + + preempt_disable(); + if (uv_numa_blade_id() == gru->gs_blade_id) + n = get_on_blade_tgh(gru); + else + n = get_off_blade_tgh(gru); + tgh = get_tgh_by_index(gru, n); + lock_tgh_handle(tgh); + + return tgh; +} + +static void get_unlock_tgh_handle(struct gru_tlb_global_handle *tgh) +{ + unlock_tgh_handle(tgh); + preempt_enable(); +} + +/* + * gru_flush_tlb_range + * + * General purpose TLB invalidation function. This function scans every GRU in + * the ENTIRE system (partition) looking for GRUs where the specified MM has + * been accessed by the GRU. For each GRU found, the TLB must be invalidated OR + * the ASID invalidated. Invalidating an ASID causes a new ASID to be assigned + * on the next fault. This effectively flushes the ENTIRE TLB for the MM at the + * cost of (possibly) a large number of future TLBmisses. + * + * The current algorithm is optimized based on the following (somewhat true) + * assumptions: + * - GRU contexts are not loaded into a GRU unless a reference is made to + * the data segment or control block (this is true, not an assumption). + * If a DS/CB is referenced, the user will also issue instructions that + * cause TLBmisses. It is not necessary to optimize for the case where + * contexts are loaded but no instructions cause TLB misses. (I know + * this will happen but I'm not optimizing for it). + * - GRU instructions to invalidate TLB entries are SLOOOOWWW - normally + * a few usec but in unusual cases, it could be longer. Avoid if + * possible. + * - intrablade process migration between cpus is not frequent but is + * common. + * - a GRU context is not typically migrated to a different GRU on the + * blade because of intrablade migration + * - interblade migration is rare. Processes migrate their GRU context to + * the new blade. + * - if interblade migration occurs, migration back to the original blade + * is very very rare (ie., no optimization for this case) + * - most GRU instruction operate on a subset of the user REGIONS. Code + * & shared library regions are not likely targets of GRU instructions. + * + * To help improve the efficiency of TLB invalidation, the GMS data + * structure is maintained for EACH address space (MM struct). The GMS is + * also the structure that contains the pointer to the mmu callout + * functions. This structure is linked to the mm_struct for the address space + * using the mmu "register" function. The mmu interfaces are used to + * provide the callbacks for TLB invalidation. The GMS contains: + * + * - asid[maxgrus] array. ASIDs are assigned to a GRU when a context is + * loaded into the GRU. + * - asidmap[maxgrus]. bitmap to make it easier to find non-zero asids in + * the above array + * - ctxbitmap[maxgrus]. Indicates the contexts that are currently active + * in the GRU for the address space. This bitmap must be passed to the + * GRU to do an invalidate. + * + * The current algorithm for invalidating TLBs is: + * - scan the asidmap for GRUs where the context has been loaded, ie, + * asid is non-zero. + * - for each gru found: + * - if the ctxtmap is non-zero, there are active contexts in the + * GRU. TLB invalidate instructions must be issued to the GRU. + * - if the ctxtmap is zero, no context is active. Set the ASID to + * zero to force a full TLB invalidation. This is fast but will + * cause a lot of TLB misses if the context is reloaded onto the + * GRU + * + */ + +void gru_flush_tlb_range(struct gru_mm_struct *gms, unsigned long start, + unsigned long len) +{ + struct gru_state *gru; + struct gru_mm_tracker *asids; + struct gru_tlb_global_handle *tgh; + unsigned long num; + int grupagesize, pagesize, pageshift, gid, asid; + + /* ZZZ TODO - handle huge pages */ + pageshift = PAGE_SHIFT; + pagesize = (1UL << pageshift); + grupagesize = GRU_PAGESIZE(pageshift); + num = min(((len + pagesize - 1) >> pageshift), GRUMAXINVAL); + + STAT(flush_tlb); + gru_dbg(grudev, "gms %p, start 0x%lx, len 0x%lx, asidmap 0x%lx\n", gms, + start, len, gms->ms_asidmap[0]); + + spin_lock(&gms->ms_asid_lock); + for_each_gru_in_bitmap(gid, gms->ms_asidmap) { + STAT(flush_tlb_gru); + gru = GID_TO_GRU(gid); + asids = gms->ms_asids + gid; + asid = asids->mt_asid; + if (asids->mt_ctxbitmap && asid) { + STAT(flush_tlb_gru_tgh); + asid = GRUASID(asid, start); + gru_dbg(grudev, + " FLUSH gruid %d, asid 0x%x, vaddr 0x%lx, vamask 0x%x, num %ld, cbmap 0x%x\n", + gid, asid, start, grupagesize, num, asids->mt_ctxbitmap); + tgh = get_lock_tgh_handle(gru); + tgh_invalidate(tgh, start, ~0, asid, grupagesize, 0, + num - 1, asids->mt_ctxbitmap); + get_unlock_tgh_handle(tgh); + } else { + STAT(flush_tlb_gru_zero_asid); + asids->mt_asid = 0; + __clear_bit(gru->gs_gid, gms->ms_asidmap); + gru_dbg(grudev, + " CLEARASID gruid %d, asid 0x%x, cbtmap 0x%x, asidmap 0x%lx\n", + gid, asid, asids->mt_ctxbitmap, + gms->ms_asidmap[0]); + } + } + spin_unlock(&gms->ms_asid_lock); +} + +/* + * Flush the entire TLB on a chiplet. + */ +void gru_flush_all_tlb(struct gru_state *gru) +{ + struct gru_tlb_global_handle *tgh; + + gru_dbg(grudev, "gid %d\n", gru->gs_gid); + tgh = get_lock_tgh_handle(gru); + tgh_invalidate(tgh, 0, ~0, 0, 1, 1, GRUMAXINVAL - 1, 0xffff); + get_unlock_tgh_handle(tgh); +} + +/* + * MMUOPS notifier callout functions + */ +static int gru_invalidate_range_start(struct mmu_notifier *mn, + const struct mmu_notifier_range *range) +{ + struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct, + ms_notifier); + + STAT(mmu_invalidate_range); + atomic_inc(&gms->ms_range_active); + gru_dbg(grudev, "gms %p, start 0x%lx, end 0x%lx, act %d\n", gms, + range->start, range->end, atomic_read(&gms->ms_range_active)); + gru_flush_tlb_range(gms, range->start, range->end - range->start); + + return 0; +} + +static void gru_invalidate_range_end(struct mmu_notifier *mn, + const struct mmu_notifier_range *range) +{ + struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct, + ms_notifier); + + /* ..._and_test() provides needed barrier */ + (void)atomic_dec_and_test(&gms->ms_range_active); + + wake_up_all(&gms->ms_wait_queue); + gru_dbg(grudev, "gms %p, start 0x%lx, end 0x%lx\n", + gms, range->start, range->end); +} + +static struct mmu_notifier *gru_alloc_notifier(struct mm_struct *mm) +{ + struct gru_mm_struct *gms; + + gms = kzalloc(sizeof(*gms), GFP_KERNEL); + if (!gms) + return ERR_PTR(-ENOMEM); + STAT(gms_alloc); + spin_lock_init(&gms->ms_asid_lock); + init_waitqueue_head(&gms->ms_wait_queue); + + return &gms->ms_notifier; +} + +static void gru_free_notifier(struct mmu_notifier *mn) +{ + kfree(container_of(mn, struct gru_mm_struct, ms_notifier)); + STAT(gms_free); +} + +static const struct mmu_notifier_ops gru_mmuops = { + .invalidate_range_start = gru_invalidate_range_start, + .invalidate_range_end = gru_invalidate_range_end, + .alloc_notifier = gru_alloc_notifier, + .free_notifier = gru_free_notifier, +}; + +struct gru_mm_struct *gru_register_mmu_notifier(void) +{ + struct mmu_notifier *mn; + + mn = mmu_notifier_get_locked(&gru_mmuops, current->mm); + if (IS_ERR(mn)) + return ERR_CAST(mn); + + return container_of(mn, struct gru_mm_struct, ms_notifier); +} + +void gru_drop_mmu_notifier(struct gru_mm_struct *gms) +{ + mmu_notifier_put(&gms->ms_notifier); +} + +/* + * Setup TGH parameters. There are: + * - 24 TGH handles per GRU chiplet + * - a portion (MAX_LOCAL_TGH) of the handles are reserved for + * use by blade-local cpus + * - the rest are used by off-blade cpus. This usage is + * less frequent than blade-local usage. + * + * For now, use 16 handles for local flushes, 8 for remote flushes. If the blade + * has less tan or equal to 16 cpus, each cpu has a unique handle that it can + * use. + */ +#define MAX_LOCAL_TGH 16 + +void gru_tgh_flush_init(struct gru_state *gru) +{ + int cpus, shift = 0, n; + + cpus = uv_blade_nr_possible_cpus(gru->gs_blade_id); + + /* n = cpus rounded up to next power of 2 */ + if (cpus) { + n = 1 << fls(cpus - 1); + + /* + * shift count for converting local cpu# to TGH index + * 0 if cpus <= MAX_LOCAL_TGH, + * 1 if cpus <= 2*MAX_LOCAL_TGH, + * etc + */ + shift = max(0, fls(n - 1) - fls(MAX_LOCAL_TGH - 1)); + } + gru->gs_tgh_local_shift = shift; + + /* first starting TGH index to use for remote purges */ + gru->gs_tgh_first_remote = (cpus + (1 << shift) - 1) >> shift; + +} diff --git a/drivers/misc/sgi-xp/Makefile b/drivers/misc/sgi-xp/Makefile new file mode 100644 index 000000000..34c55a404 --- /dev/null +++ b/drivers/misc/sgi-xp/Makefile @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for SGI's XP devices. +# + +obj-$(CONFIG_SGI_XP) += xp.o +xp-y := xp_main.o xp_uv.o + +obj-$(CONFIG_SGI_XP) += xpc.o +xpc-y := xpc_main.o xpc_channel.o xpc_partition.o \ + xpc_uv.o + +obj-$(CONFIG_SGI_XP) += xpnet.o diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h new file mode 100644 index 000000000..9f9af77f8 --- /dev/null +++ b/drivers/misc/sgi-xp/xp.h @@ -0,0 +1,345 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * (C) Copyright 2020 Hewlett Packard Enterprise Development LP + * Copyright (C) 2004-2008 Silicon Graphics, Inc. All rights reserved. + */ + +/* + * External Cross Partition (XP) structures and defines. + */ + +#ifndef _DRIVERS_MISC_SGIXP_XP_H +#define _DRIVERS_MISC_SGIXP_XP_H + +#include <linux/mutex.h> + +#if defined CONFIG_X86_UV || defined CONFIG_IA64_SGI_UV +#include <asm/uv/uv.h> +#endif + +#ifdef USE_DBUG_ON +#define DBUG_ON(condition) BUG_ON(condition) +#else +#define DBUG_ON(condition) +#endif + +/* + * Define the maximum number of partitions the system can possibly support. + * It is based on the maximum number of hardware partitionable regions. The + * term 'region' in this context refers to the minimum number of nodes that + * can comprise an access protection grouping. The access protection is in + * regards to memory, IPI and IOI. + * + * The maximum number of hardware partitionable regions is equal to the + * maximum number of nodes in the entire system divided by the minimum number + * of nodes that comprise an access protection grouping. + */ +#define XP_MAX_NPARTITIONS_SN2 64 +#define XP_MAX_NPARTITIONS_UV 256 + +/* + * XPC establishes channel connections between the local partition and any + * other partition that is currently up. Over these channels, kernel-level + * `users' can communicate with their counterparts on the other partitions. + * + * If the need for additional channels arises, one can simply increase + * XPC_MAX_NCHANNELS accordingly. If the day should come where that number + * exceeds the absolute MAXIMUM number of channels possible (eight), then one + * will need to make changes to the XPC code to accommodate for this. + * + * The absolute maximum number of channels possible is limited to eight for + * performance reasons on sn2 hardware. The internal cross partition structures + * require sixteen bytes per channel, and eight allows all of this + * interface-shared info to fit in one 128-byte cacheline. + */ +#define XPC_MEM_CHANNEL 0 /* memory channel number */ +#define XPC_NET_CHANNEL 1 /* network channel number */ + +#define XPC_MAX_NCHANNELS 2 /* max #of channels allowed */ + +#if XPC_MAX_NCHANNELS > 8 +#error XPC_MAX_NCHANNELS exceeds absolute MAXIMUM possible. +#endif + +/* + * Define macro, XPC_MSG_SIZE(), is provided for the user + * that wants to fit as many msg entries as possible in a given memory size + * (e.g. a memory page). + */ +#define XPC_MSG_MAX_SIZE 128 +#define XPC_MSG_HDR_MAX_SIZE 16 +#define XPC_MSG_PAYLOAD_MAX_SIZE (XPC_MSG_MAX_SIZE - XPC_MSG_HDR_MAX_SIZE) + +#define XPC_MSG_SIZE(_payload_size) \ + ALIGN(XPC_MSG_HDR_MAX_SIZE + (_payload_size), \ + is_uv_system() ? 64 : 128) + + +/* + * Define the return values and values passed to user's callout functions. + * (It is important to add new value codes at the end just preceding + * xpUnknownReason, which must have the highest numerical value.) + */ +enum xp_retval { + xpSuccess = 0, + + xpNotConnected, /* 1: channel is not connected */ + xpConnected, /* 2: channel connected (opened) */ + xpRETIRED1, /* 3: (formerly xpDisconnected) */ + + xpMsgReceived, /* 4: message received */ + xpMsgDelivered, /* 5: message delivered and acknowledged */ + + xpRETIRED2, /* 6: (formerly xpTransferFailed) */ + + xpNoWait, /* 7: operation would require wait */ + xpRetry, /* 8: retry operation */ + xpTimeout, /* 9: timeout in xpc_allocate_msg_wait() */ + xpInterrupted, /* 10: interrupted wait */ + + xpUnequalMsgSizes, /* 11: message size disparity between sides */ + xpInvalidAddress, /* 12: invalid address */ + + xpNoMemory, /* 13: no memory available for XPC structures */ + xpLackOfResources, /* 14: insufficient resources for operation */ + xpUnregistered, /* 15: channel is not registered */ + xpAlreadyRegistered, /* 16: channel is already registered */ + + xpPartitionDown, /* 17: remote partition is down */ + xpNotLoaded, /* 18: XPC module is not loaded */ + xpUnloading, /* 19: this side is unloading XPC module */ + + xpBadMagic, /* 20: XPC MAGIC string not found */ + + xpReactivating, /* 21: remote partition was reactivated */ + + xpUnregistering, /* 22: this side is unregistering channel */ + xpOtherUnregistering, /* 23: other side is unregistering channel */ + + xpCloneKThread, /* 24: cloning kernel thread */ + xpCloneKThreadFailed, /* 25: cloning kernel thread failed */ + + xpNoHeartbeat, /* 26: remote partition has no heartbeat */ + + xpPioReadError, /* 27: PIO read error */ + xpPhysAddrRegFailed, /* 28: registration of phys addr range failed */ + + xpRETIRED3, /* 29: (formerly xpBteDirectoryError) */ + xpRETIRED4, /* 30: (formerly xpBtePoisonError) */ + xpRETIRED5, /* 31: (formerly xpBteWriteError) */ + xpRETIRED6, /* 32: (formerly xpBteAccessError) */ + xpRETIRED7, /* 33: (formerly xpBtePWriteError) */ + xpRETIRED8, /* 34: (formerly xpBtePReadError) */ + xpRETIRED9, /* 35: (formerly xpBteTimeOutError) */ + xpRETIRED10, /* 36: (formerly xpBteXtalkError) */ + xpRETIRED11, /* 37: (formerly xpBteNotAvailable) */ + xpRETIRED12, /* 38: (formerly xpBteUnmappedError) */ + + xpBadVersion, /* 39: bad version number */ + xpVarsNotSet, /* 40: the XPC variables are not set up */ + xpNoRsvdPageAddr, /* 41: unable to get rsvd page's phys addr */ + xpInvalidPartid, /* 42: invalid partition ID */ + xpLocalPartid, /* 43: local partition ID */ + + xpOtherGoingDown, /* 44: other side going down, reason unknown */ + xpSystemGoingDown, /* 45: system is going down, reason unknown */ + xpSystemHalt, /* 46: system is being halted */ + xpSystemReboot, /* 47: system is being rebooted */ + xpSystemPoweroff, /* 48: system is being powered off */ + + xpDisconnecting, /* 49: channel disconnecting (closing) */ + + xpOpenCloseError, /* 50: channel open/close protocol error */ + + xpDisconnected, /* 51: channel disconnected (closed) */ + + xpBteCopyError, /* 52: bte_copy() returned error */ + xpSalError, /* 53: sn SAL error */ + xpRsvdPageNotSet, /* 54: the reserved page is not set up */ + xpPayloadTooBig, /* 55: payload too large for message slot */ + + xpUnsupported, /* 56: unsupported functionality or resource */ + xpNeedMoreInfo, /* 57: more info is needed by SAL */ + + xpGruCopyError, /* 58: gru_copy_gru() returned error */ + xpGruSendMqError, /* 59: gru send message queue related error */ + + xpBadChannelNumber, /* 60: invalid channel number */ + xpBadMsgType, /* 61: invalid message type */ + xpBiosError, /* 62: BIOS error */ + + xpUnknownReason /* 63: unknown reason - must be last in enum */ +}; + +/* + * Define the callout function type used by XPC to update the user on + * connection activity and state changes via the user function registered + * by xpc_connect(). + * + * Arguments: + * + * reason - reason code. + * partid - partition ID associated with condition. + * ch_number - channel # associated with condition. + * data - pointer to optional data. + * key - pointer to optional user-defined value provided as the "key" + * argument to xpc_connect(). + * + * A reason code of xpConnected indicates that a connection has been + * established to the specified partition on the specified channel. The data + * argument indicates the max number of entries allowed in the message queue. + * + * A reason code of xpMsgReceived indicates that a XPC message arrived from + * the specified partition on the specified channel. The data argument + * specifies the address of the message's payload. The user must call + * xpc_received() when finished with the payload. + * + * All other reason codes indicate failure. The data argmument is NULL. + * When a failure reason code is received, one can assume that the channel + * is not connected. + */ +typedef void (*xpc_channel_func) (enum xp_retval reason, short partid, + int ch_number, void *data, void *key); + +/* + * Define the callout function type used by XPC to notify the user of + * messages received and delivered via the user function registered by + * xpc_send_notify(). + * + * Arguments: + * + * reason - reason code. + * partid - partition ID associated with condition. + * ch_number - channel # associated with condition. + * key - pointer to optional user-defined value provided as the "key" + * argument to xpc_send_notify(). + * + * A reason code of xpMsgDelivered indicates that the message was delivered + * to the intended recipient and that they have acknowledged its receipt by + * calling xpc_received(). + * + * All other reason codes indicate failure. + * + * NOTE: The user defined function must be callable by an interrupt handler + * and thus cannot block. + */ +typedef void (*xpc_notify_func) (enum xp_retval reason, short partid, + int ch_number, void *key); + +/* + * The following is a registration entry. There is a global array of these, + * one per channel. It is used to record the connection registration made + * by the users of XPC. As long as a registration entry exists, for any + * partition that comes up, XPC will attempt to establish a connection on + * that channel. Notification that a connection has been made will occur via + * the xpc_channel_func function. + * + * The 'func' field points to the function to call when aynchronous + * notification is required for such events as: a connection established/lost, + * or an incoming message received, or an error condition encountered. A + * non-NULL 'func' field indicates that there is an active registration for + * the channel. + */ +struct xpc_registration { + struct mutex mutex; + xpc_channel_func func; /* function to call */ + void *key; /* pointer to user's key */ + u16 nentries; /* #of msg entries in local msg queue */ + u16 entry_size; /* message queue's message entry size */ + u32 assigned_limit; /* limit on #of assigned kthreads */ + u32 idle_limit; /* limit on #of idle kthreads */ +} ____cacheline_aligned; + +#define XPC_CHANNEL_REGISTERED(_c) (xpc_registrations[_c].func != NULL) + +/* the following are valid xpc_send() or xpc_send_notify() flags */ +#define XPC_WAIT 0 /* wait flag */ +#define XPC_NOWAIT 1 /* no wait flag */ + +struct xpc_interface { + void (*connect) (int); + void (*disconnect) (int); + enum xp_retval (*send) (short, int, u32, void *, u16); + enum xp_retval (*send_notify) (short, int, u32, void *, u16, + xpc_notify_func, void *); + void (*received) (short, int, void *); + enum xp_retval (*partid_to_nasids) (short, void *); +}; + +extern struct xpc_interface xpc_interface; + +extern void xpc_set_interface(void (*)(int), + void (*)(int), + enum xp_retval (*)(short, int, u32, void *, u16), + enum xp_retval (*)(short, int, u32, void *, u16, + xpc_notify_func, void *), + void (*)(short, int, void *), + enum xp_retval (*)(short, void *)); +extern void xpc_clear_interface(void); + +extern enum xp_retval xpc_connect(int, xpc_channel_func, void *, u16, + u16, u32, u32); +extern void xpc_disconnect(int); + +static inline enum xp_retval +xpc_send(short partid, int ch_number, u32 flags, void *payload, + u16 payload_size) +{ + if (!xpc_interface.send) + return xpNotLoaded; + + return xpc_interface.send(partid, ch_number, flags, payload, + payload_size); +} + +static inline enum xp_retval +xpc_send_notify(short partid, int ch_number, u32 flags, void *payload, + u16 payload_size, xpc_notify_func func, void *key) +{ + if (!xpc_interface.send_notify) + return xpNotLoaded; + + return xpc_interface.send_notify(partid, ch_number, flags, payload, + payload_size, func, key); +} + +static inline void +xpc_received(short partid, int ch_number, void *payload) +{ + if (xpc_interface.received) + xpc_interface.received(partid, ch_number, payload); +} + +static inline enum xp_retval +xpc_partid_to_nasids(short partid, void *nasids) +{ + if (!xpc_interface.partid_to_nasids) + return xpNotLoaded; + + return xpc_interface.partid_to_nasids(partid, nasids); +} + +extern short xp_max_npartitions; +extern short xp_partition_id; +extern u8 xp_region_size; + +extern unsigned long (*xp_pa) (void *); +extern unsigned long (*xp_socket_pa) (unsigned long); +extern enum xp_retval (*xp_remote_memcpy) (unsigned long, const unsigned long, + size_t); +extern int (*xp_cpu_to_nasid) (int); +extern enum xp_retval (*xp_expand_memprotect) (unsigned long, unsigned long); +extern enum xp_retval (*xp_restrict_memprotect) (unsigned long, unsigned long); + +extern u64 xp_nofault_PIOR_target; +extern int xp_nofault_PIOR(void *); +extern int xp_error_PIOR(void); + +extern struct device *xp; +extern enum xp_retval xp_init_uv(void); +extern void xp_exit_uv(void); + +#endif /* _DRIVERS_MISC_SGIXP_XP_H */ diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c new file mode 100644 index 000000000..cf2965aa5 --- /dev/null +++ b/drivers/misc/sgi-xp/xp_main.c @@ -0,0 +1,261 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * (C) Copyright 2020 Hewlett Packard Enterprise Development LP + * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved. + */ + +/* + * Cross Partition (XP) base. + * + * XP provides a base from which its users can interact + * with XPC, yet not be dependent on XPC. + * + */ + +#include <linux/module.h> +#include <linux/device.h> +#include "xp.h" + +/* define the XP debug device structures to be used with dev_dbg() et al */ + +struct device_driver xp_dbg_name = { + .name = "xp" +}; + +struct device xp_dbg_subname = { + .init_name = "", /* set to "" */ + .driver = &xp_dbg_name +}; + +struct device *xp = &xp_dbg_subname; + +/* max #of partitions possible */ +short xp_max_npartitions; +EXPORT_SYMBOL_GPL(xp_max_npartitions); + +short xp_partition_id; +EXPORT_SYMBOL_GPL(xp_partition_id); + +u8 xp_region_size; +EXPORT_SYMBOL_GPL(xp_region_size); + +unsigned long (*xp_pa) (void *addr); +EXPORT_SYMBOL_GPL(xp_pa); + +unsigned long (*xp_socket_pa) (unsigned long gpa); +EXPORT_SYMBOL_GPL(xp_socket_pa); + +enum xp_retval (*xp_remote_memcpy) (unsigned long dst_gpa, + const unsigned long src_gpa, size_t len); +EXPORT_SYMBOL_GPL(xp_remote_memcpy); + +int (*xp_cpu_to_nasid) (int cpuid); +EXPORT_SYMBOL_GPL(xp_cpu_to_nasid); + +enum xp_retval (*xp_expand_memprotect) (unsigned long phys_addr, + unsigned long size); +EXPORT_SYMBOL_GPL(xp_expand_memprotect); +enum xp_retval (*xp_restrict_memprotect) (unsigned long phys_addr, + unsigned long size); +EXPORT_SYMBOL_GPL(xp_restrict_memprotect); + +/* + * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level + * users of XPC. + */ +struct xpc_registration xpc_registrations[XPC_MAX_NCHANNELS]; +EXPORT_SYMBOL_GPL(xpc_registrations); + +/* + * Initialize the XPC interface to NULL to indicate that XPC isn't loaded. + */ +struct xpc_interface xpc_interface = { }; +EXPORT_SYMBOL_GPL(xpc_interface); + +/* + * XPC calls this when it (the XPC module) has been loaded. + */ +void +xpc_set_interface(void (*connect) (int), + void (*disconnect) (int), + enum xp_retval (*send) (short, int, u32, void *, u16), + enum xp_retval (*send_notify) (short, int, u32, void *, u16, + xpc_notify_func, void *), + void (*received) (short, int, void *), + enum xp_retval (*partid_to_nasids) (short, void *)) +{ + xpc_interface.connect = connect; + xpc_interface.disconnect = disconnect; + xpc_interface.send = send; + xpc_interface.send_notify = send_notify; + xpc_interface.received = received; + xpc_interface.partid_to_nasids = partid_to_nasids; +} +EXPORT_SYMBOL_GPL(xpc_set_interface); + +/* + * XPC calls this when it (the XPC module) is being unloaded. + */ +void +xpc_clear_interface(void) +{ + memset(&xpc_interface, 0, sizeof(xpc_interface)); +} +EXPORT_SYMBOL_GPL(xpc_clear_interface); + +/* + * Register for automatic establishment of a channel connection whenever + * a partition comes up. + * + * Arguments: + * + * ch_number - channel # to register for connection. + * func - function to call for asynchronous notification of channel + * state changes (i.e., connection, disconnection, error) and + * the arrival of incoming messages. + * key - pointer to optional user-defined value that gets passed back + * to the user on any callouts made to func. + * payload_size - size in bytes of the XPC message's payload area which + * contains a user-defined message. The user should make + * this large enough to hold their largest message. + * nentries - max #of XPC message entries a message queue can contain. + * The actual number, which is determined when a connection + * is established and may be less then requested, will be + * passed to the user via the xpConnected callout. + * assigned_limit - max number of kthreads allowed to be processing + * messages (per connection) at any given instant. + * idle_limit - max number of kthreads allowed to be idle at any given + * instant. + */ +enum xp_retval +xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size, + u16 nentries, u32 assigned_limit, u32 idle_limit) +{ + struct xpc_registration *registration; + + DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS); + DBUG_ON(payload_size == 0 || nentries == 0); + DBUG_ON(func == NULL); + DBUG_ON(assigned_limit == 0 || idle_limit > assigned_limit); + + if (XPC_MSG_SIZE(payload_size) > XPC_MSG_MAX_SIZE) + return xpPayloadTooBig; + + registration = &xpc_registrations[ch_number]; + + if (mutex_lock_interruptible(®istration->mutex) != 0) + return xpInterrupted; + + /* if XPC_CHANNEL_REGISTERED(ch_number) */ + if (registration->func != NULL) { + mutex_unlock(®istration->mutex); + return xpAlreadyRegistered; + } + + /* register the channel for connection */ + registration->entry_size = XPC_MSG_SIZE(payload_size); + registration->nentries = nentries; + registration->assigned_limit = assigned_limit; + registration->idle_limit = idle_limit; + registration->key = key; + registration->func = func; + + mutex_unlock(®istration->mutex); + + if (xpc_interface.connect) + xpc_interface.connect(ch_number); + + return xpSuccess; +} +EXPORT_SYMBOL_GPL(xpc_connect); + +/* + * Remove the registration for automatic connection of the specified channel + * when a partition comes up. + * + * Before returning this xpc_disconnect() will wait for all connections on the + * specified channel have been closed/torndown. So the caller can be assured + * that they will not be receiving any more callouts from XPC to their + * function registered via xpc_connect(). + * + * Arguments: + * + * ch_number - channel # to unregister. + */ +void +xpc_disconnect(int ch_number) +{ + struct xpc_registration *registration; + + DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS); + + registration = &xpc_registrations[ch_number]; + + /* + * We've decided not to make this a down_interruptible(), since we + * figured XPC's users will just turn around and call xpc_disconnect() + * again anyways, so we might as well wait, if need be. + */ + mutex_lock(®istration->mutex); + + /* if !XPC_CHANNEL_REGISTERED(ch_number) */ + if (registration->func == NULL) { + mutex_unlock(®istration->mutex); + return; + } + + /* remove the connection registration for the specified channel */ + registration->func = NULL; + registration->key = NULL; + registration->nentries = 0; + registration->entry_size = 0; + registration->assigned_limit = 0; + registration->idle_limit = 0; + + if (xpc_interface.disconnect) + xpc_interface.disconnect(ch_number); + + mutex_unlock(®istration->mutex); + + return; +} +EXPORT_SYMBOL_GPL(xpc_disconnect); + +static int __init +xp_init(void) +{ + enum xp_retval ret; + int ch_number; + + /* initialize the connection registration mutex */ + for (ch_number = 0; ch_number < XPC_MAX_NCHANNELS; ch_number++) + mutex_init(&xpc_registrations[ch_number].mutex); + + if (is_uv_system()) + ret = xp_init_uv(); + else + ret = 0; + + if (ret != xpSuccess) + return ret; + + return 0; +} + +module_init(xp_init); + +static void __exit +xp_exit(void) +{ + if (is_uv_system()) + xp_exit_uv(); +} + +module_exit(xp_exit); + +MODULE_AUTHOR("Silicon Graphics, Inc."); +MODULE_DESCRIPTION("Cross Partition (XP) base"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/sgi-xp/xp_uv.c b/drivers/misc/sgi-xp/xp_uv.c new file mode 100644 index 000000000..19fc7076a --- /dev/null +++ b/drivers/misc/sgi-xp/xp_uv.c @@ -0,0 +1,175 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * (C) Copyright 2020 Hewlett Packard Enterprise Development LP + * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. + */ + +/* + * Cross Partition (XP) uv-based functions. + * + * Architecture specific implementation of common functions. + * + */ + +#include <linux/device.h> +#include <asm/uv/uv_hub.h> +#if defined CONFIG_X86_64 +#include <asm/uv/bios.h> +#elif defined CONFIG_IA64_SGI_UV +#include <asm/sn/sn_sal.h> +#endif +#include "../sgi-gru/grukservices.h" +#include "xp.h" + +/* + * Convert a virtual memory address to a physical memory address. + */ +static unsigned long +xp_pa_uv(void *addr) +{ + return uv_gpa(addr); +} + +/* + * Convert a global physical to socket physical address. + */ +static unsigned long +xp_socket_pa_uv(unsigned long gpa) +{ + return uv_gpa_to_soc_phys_ram(gpa); +} + +static enum xp_retval +xp_remote_mmr_read(unsigned long dst_gpa, const unsigned long src_gpa, + size_t len) +{ + int ret; + unsigned long *dst_va = __va(uv_gpa_to_soc_phys_ram(dst_gpa)); + + BUG_ON(!uv_gpa_in_mmr_space(src_gpa)); + BUG_ON(len != 8); + + ret = gru_read_gpa(dst_va, src_gpa); + if (ret == 0) + return xpSuccess; + + dev_err(xp, "gru_read_gpa() failed, dst_gpa=0x%016lx src_gpa=0x%016lx " + "len=%ld\n", dst_gpa, src_gpa, len); + return xpGruCopyError; +} + + +static enum xp_retval +xp_remote_memcpy_uv(unsigned long dst_gpa, const unsigned long src_gpa, + size_t len) +{ + int ret; + + if (uv_gpa_in_mmr_space(src_gpa)) + return xp_remote_mmr_read(dst_gpa, src_gpa, len); + + ret = gru_copy_gpa(dst_gpa, src_gpa, len); + if (ret == 0) + return xpSuccess; + + dev_err(xp, "gru_copy_gpa() failed, dst_gpa=0x%016lx src_gpa=0x%016lx " + "len=%ld\n", dst_gpa, src_gpa, len); + return xpGruCopyError; +} + +static int +xp_cpu_to_nasid_uv(int cpuid) +{ + /* ??? Is this same as sn2 nasid in mach/part bitmaps set up by SAL? */ + return UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpuid)); +} + +static enum xp_retval +xp_expand_memprotect_uv(unsigned long phys_addr, unsigned long size) +{ + int ret; + +#if defined CONFIG_X86_64 + ret = uv_bios_change_memprotect(phys_addr, size, UV_MEMPROT_ALLOW_RW); + if (ret != BIOS_STATUS_SUCCESS) { + dev_err(xp, "uv_bios_change_memprotect(,, " + "UV_MEMPROT_ALLOW_RW) failed, ret=%d\n", ret); + return xpBiosError; + } + +#elif defined CONFIG_IA64_SGI_UV + u64 nasid_array; + + ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_1, + &nasid_array); + if (ret != 0) { + dev_err(xp, "sn_change_memprotect(,, " + "SN_MEMPROT_ACCESS_CLASS_1,) failed ret=%d\n", ret); + return xpSalError; + } +#else + #error not a supported configuration +#endif + return xpSuccess; +} + +static enum xp_retval +xp_restrict_memprotect_uv(unsigned long phys_addr, unsigned long size) +{ + int ret; + +#if defined CONFIG_X86_64 + ret = uv_bios_change_memprotect(phys_addr, size, + UV_MEMPROT_RESTRICT_ACCESS); + if (ret != BIOS_STATUS_SUCCESS) { + dev_err(xp, "uv_bios_change_memprotect(,, " + "UV_MEMPROT_RESTRICT_ACCESS) failed, ret=%d\n", ret); + return xpBiosError; + } + +#elif defined CONFIG_IA64_SGI_UV + u64 nasid_array; + + ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_0, + &nasid_array); + if (ret != 0) { + dev_err(xp, "sn_change_memprotect(,, " + "SN_MEMPROT_ACCESS_CLASS_0,) failed ret=%d\n", ret); + return xpSalError; + } +#else + #error not a supported configuration +#endif + return xpSuccess; +} + +enum xp_retval +xp_init_uv(void) +{ + WARN_ON(!is_uv_system()); + if (!is_uv_system()) + return xpUnsupported; + + xp_max_npartitions = XP_MAX_NPARTITIONS_UV; +#ifdef CONFIG_X86 + xp_partition_id = sn_partition_id; + xp_region_size = sn_region_size; +#endif + xp_pa = xp_pa_uv; + xp_socket_pa = xp_socket_pa_uv; + xp_remote_memcpy = xp_remote_memcpy_uv; + xp_cpu_to_nasid = xp_cpu_to_nasid_uv; + xp_expand_memprotect = xp_expand_memprotect_uv; + xp_restrict_memprotect = xp_restrict_memprotect_uv; + + return xpSuccess; +} + +void +xp_exit_uv(void) +{ + WARN_ON(!is_uv_system()); +} diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h new file mode 100644 index 000000000..71db60edf --- /dev/null +++ b/drivers/misc/sgi-xp/xpc.h @@ -0,0 +1,731 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2004-2009 Silicon Graphics, Inc. All Rights Reserved. + */ + +/* + * Cross Partition Communication (XPC) structures and macros. + */ + +#ifndef _DRIVERS_MISC_SGIXP_XPC_H +#define _DRIVERS_MISC_SGIXP_XPC_H + +#include <linux/wait.h> +#include <linux/completion.h> +#include <linux/timer.h> +#include <linux/sched.h> +#include "xp.h" + +/* + * XPC Version numbers consist of a major and minor number. XPC can always + * talk to versions with same major #, and never talk to versions with a + * different major #. + */ +#define _XPC_VERSION(_maj, _min) (((_maj) << 4) | ((_min) & 0xf)) +#define XPC_VERSION_MAJOR(_v) ((_v) >> 4) +#define XPC_VERSION_MINOR(_v) ((_v) & 0xf) + +/* define frequency of the heartbeat and frequency how often it's checked */ +#define XPC_HB_DEFAULT_INTERVAL 5 /* incr HB every x secs */ +#define XPC_HB_CHECK_DEFAULT_INTERVAL 20 /* check HB every x secs */ + +/* define the process name of HB checker and the CPU it is pinned to */ +#define XPC_HB_CHECK_THREAD_NAME "xpc_hb" +#define XPC_HB_CHECK_CPU 0 + +/* define the process name of the discovery thread */ +#define XPC_DISCOVERY_THREAD_NAME "xpc_discovery" + +/* + * the reserved page + * + * SAL reserves one page of memory per partition for XPC. Though a full page + * in length (16384 bytes), its starting address is not page aligned, but it + * is cacheline aligned. The reserved page consists of the following: + * + * reserved page header + * + * The first two 64-byte cachelines of the reserved page contain the + * header (struct xpc_rsvd_page). Before SAL initialization has completed, + * SAL has set up the following fields of the reserved page header: + * SAL_signature, SAL_version, SAL_partid, and SAL_nasids_size. The + * other fields are set up by XPC. (xpc_rsvd_page points to the local + * partition's reserved page.) + * + * part_nasids mask + * mach_nasids mask + * + * SAL also sets up two bitmaps (or masks), one that reflects the actual + * nasids in this partition (part_nasids), and the other that reflects + * the actual nasids in the entire machine (mach_nasids). We're only + * interested in the even numbered nasids (which contain the processors + * and/or memory), so we only need half as many bits to represent the + * nasids. When mapping nasid to bit in a mask (or bit to nasid) be sure + * to either divide or multiply by 2. The part_nasids mask is located + * starting at the first cacheline following the reserved page header. The + * mach_nasids mask follows right after the part_nasids mask. The size in + * bytes of each mask is reflected by the reserved page header field + * 'SAL_nasids_size'. (Local partition's mask pointers are xpc_part_nasids + * and xpc_mach_nasids.) + * + * Immediately following the mach_nasids mask are the XPC variables + * required by other partitions. First are those that are generic to all + * partitions (vars), followed on the next available cacheline by those + * which are partition specific (vars part). These are setup by XPC. + * + * Note: Until 'ts_jiffies' is set non-zero, the partition XPC code has not been + * initialized. + */ +struct xpc_rsvd_page { + u64 SAL_signature; /* SAL: unique signature */ + u64 SAL_version; /* SAL: version */ + short SAL_partid; /* SAL: partition ID */ + short max_npartitions; /* value of XPC_MAX_PARTITIONS */ + u8 version; + u8 pad1[3]; /* align to next u64 in 1st 64-byte cacheline */ + unsigned long ts_jiffies; /* timestamp when rsvd pg was setup by XPC */ + union { + struct { + unsigned long heartbeat_gpa; /* phys addr */ + unsigned long activate_gru_mq_desc_gpa; /* phys addr */ + } uv; + } sn; + u64 pad2[9]; /* align to last u64 in 2nd 64-byte cacheline */ + u64 SAL_nasids_size; /* SAL: size of each nasid mask in bytes */ +}; + +#define XPC_RP_VERSION _XPC_VERSION(3, 0) /* version 3.0 of the reserved page */ + +/* the reserved page sizes and offsets */ + +#define XPC_RP_HEADER_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page)) + +#define XPC_RP_PART_NASIDS(_rp) ((unsigned long *)((u8 *)(_rp) + \ + XPC_RP_HEADER_SIZE)) +#define XPC_RP_MACH_NASIDS(_rp) (XPC_RP_PART_NASIDS(_rp) + \ + xpc_nasid_mask_nlongs) + + +/* + * The following structure describes the partition's heartbeat info which + * will be periodically read by other partitions to determine whether this + * XPC is still 'alive'. + */ +struct xpc_heartbeat_uv { + unsigned long value; + unsigned long offline; /* if 0, heartbeat should be changing */ +}; + +/* + * Info pertinent to a GRU message queue using a watch list for irq generation. + */ +struct xpc_gru_mq_uv { + void *address; /* address of GRU message queue */ + unsigned int order; /* size of GRU message queue as a power of 2 */ + int irq; /* irq raised when message is received in mq */ + int mmr_blade; /* blade where watchlist was allocated from */ + unsigned long mmr_offset; /* offset of irq mmr located on mmr_blade */ + unsigned long mmr_value; /* value of irq mmr located on mmr_blade */ + int watchlist_num; /* number of watchlist allocatd by BIOS */ + void *gru_mq_desc; /* opaque structure used by the GRU driver */ +}; + +/* + * The activate_mq is used to send/receive GRU messages that affect XPC's + * partition active state and channel state. This is uv only. + */ +struct xpc_activate_mq_msghdr_uv { + unsigned int gru_msg_hdr; /* FOR GRU INTERNAL USE ONLY */ + short partid; /* sender's partid */ + u8 act_state; /* sender's act_state at time msg sent */ + u8 type; /* message's type */ + unsigned long rp_ts_jiffies; /* timestamp of sender's rp setup by XPC */ +}; + +/* activate_mq defined message types */ +#define XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV 0 + +#define XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV 1 +#define XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV 2 + +#define XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV 3 +#define XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV 4 +#define XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV 5 +#define XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV 6 +#define XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV 7 + +#define XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV 8 +#define XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV 9 + +struct xpc_activate_mq_msg_uv { + struct xpc_activate_mq_msghdr_uv hdr; +}; + +struct xpc_activate_mq_msg_activate_req_uv { + struct xpc_activate_mq_msghdr_uv hdr; + unsigned long rp_gpa; + unsigned long heartbeat_gpa; + unsigned long activate_gru_mq_desc_gpa; +}; + +struct xpc_activate_mq_msg_deactivate_req_uv { + struct xpc_activate_mq_msghdr_uv hdr; + enum xp_retval reason; +}; + +struct xpc_activate_mq_msg_chctl_closerequest_uv { + struct xpc_activate_mq_msghdr_uv hdr; + short ch_number; + enum xp_retval reason; +}; + +struct xpc_activate_mq_msg_chctl_closereply_uv { + struct xpc_activate_mq_msghdr_uv hdr; + short ch_number; +}; + +struct xpc_activate_mq_msg_chctl_openrequest_uv { + struct xpc_activate_mq_msghdr_uv hdr; + short ch_number; + short entry_size; /* size of notify_mq's GRU messages */ + short local_nentries; /* ??? Is this needed? What is? */ +}; + +struct xpc_activate_mq_msg_chctl_openreply_uv { + struct xpc_activate_mq_msghdr_uv hdr; + short ch_number; + short remote_nentries; /* ??? Is this needed? What is? */ + short local_nentries; /* ??? Is this needed? What is? */ + unsigned long notify_gru_mq_desc_gpa; +}; + +struct xpc_activate_mq_msg_chctl_opencomplete_uv { + struct xpc_activate_mq_msghdr_uv hdr; + short ch_number; +}; + +/* + * Functions registered by add_timer() or called by kernel_thread() only + * allow for a single 64-bit argument. The following macros can be used to + * pack and unpack two (32-bit, 16-bit or 8-bit) arguments into or out from + * the passed argument. + */ +#define XPC_PACK_ARGS(_arg1, _arg2) \ + ((((u64)_arg1) & 0xffffffff) | \ + ((((u64)_arg2) & 0xffffffff) << 32)) + +#define XPC_UNPACK_ARG1(_args) (((u64)_args) & 0xffffffff) +#define XPC_UNPACK_ARG2(_args) ((((u64)_args) >> 32) & 0xffffffff) + +/* + * Define a structure that contains arguments associated with opening and + * closing a channel. + */ +struct xpc_openclose_args { + u16 reason; /* reason why channel is closing */ + u16 entry_size; /* sizeof each message entry */ + u16 remote_nentries; /* #of message entries in remote msg queue */ + u16 local_nentries; /* #of message entries in local msg queue */ + unsigned long local_msgqueue_pa; /* phys addr of local message queue */ +}; + +#define XPC_OPENCLOSE_ARGS_SIZE \ + L1_CACHE_ALIGN(sizeof(struct xpc_openclose_args) * \ + XPC_MAX_NCHANNELS) + + +/* + * Structures to define a fifo singly-linked list. + */ + +struct xpc_fifo_entry_uv { + struct xpc_fifo_entry_uv *next; +}; + +struct xpc_fifo_head_uv { + struct xpc_fifo_entry_uv *first; + struct xpc_fifo_entry_uv *last; + spinlock_t lock; + int n_entries; +}; + +/* + * The format of a uv XPC notify_mq GRU message is as follows: + * + * A user-defined message resides in the payload area. The max size of the + * payload is defined by the user via xpc_connect(). + * + * The size of a message (payload and header) sent via the GRU must be either 1 + * or 2 GRU_CACHE_LINE_BYTES in length. + */ + +struct xpc_notify_mq_msghdr_uv { + union { + unsigned int gru_msg_hdr; /* FOR GRU INTERNAL USE ONLY */ + struct xpc_fifo_entry_uv next; /* FOR XPC INTERNAL USE ONLY */ + } u; + short partid; /* FOR XPC INTERNAL USE ONLY */ + u8 ch_number; /* FOR XPC INTERNAL USE ONLY */ + u8 size; /* FOR XPC INTERNAL USE ONLY */ + unsigned int msg_slot_number; /* FOR XPC INTERNAL USE ONLY */ +}; + +struct xpc_notify_mq_msg_uv { + struct xpc_notify_mq_msghdr_uv hdr; + unsigned long payload; +}; + +/* struct xpc_notify_sn2 type of notification */ + +#define XPC_N_CALL 0x01 /* notify function provided by user */ + +/* + * Define uv's version of the notify entry. It additionally is used to allocate + * a msg slot on the remote partition into which is copied a sent message. + */ +struct xpc_send_msg_slot_uv { + struct xpc_fifo_entry_uv next; + unsigned int msg_slot_number; + xpc_notify_func func; /* user's notify function */ + void *key; /* pointer to user's key */ +}; + +/* + * Define the structure that manages all the stuff required by a channel. In + * particular, they are used to manage the messages sent across the channel. + * + * This structure is private to a partition, and is NOT shared across the + * partition boundary. + * + * There is an array of these structures for each remote partition. It is + * allocated at the time a partition becomes active. The array contains one + * of these structures for each potential channel connection to that partition. + */ + +struct xpc_channel_uv { + void *cached_notify_gru_mq_desc; /* remote partition's notify mq's */ + /* gru mq descriptor */ + + struct xpc_send_msg_slot_uv *send_msg_slots; + void *recv_msg_slots; /* each slot will hold a xpc_notify_mq_msg_uv */ + /* structure plus the user's payload */ + + struct xpc_fifo_head_uv msg_slot_free_list; + struct xpc_fifo_head_uv recv_msg_list; /* deliverable payloads */ +}; + +struct xpc_channel { + short partid; /* ID of remote partition connected */ + spinlock_t lock; /* lock for updating this structure */ + unsigned int flags; /* general flags */ + + enum xp_retval reason; /* reason why channel is disconnect'g */ + int reason_line; /* line# disconnect initiated from */ + + u16 number; /* channel # */ + + u16 entry_size; /* sizeof each msg entry */ + u16 local_nentries; /* #of msg entries in local msg queue */ + u16 remote_nentries; /* #of msg entries in remote msg queue */ + + atomic_t references; /* #of external references to queues */ + + atomic_t n_on_msg_allocate_wq; /* #on msg allocation wait queue */ + wait_queue_head_t msg_allocate_wq; /* msg allocation wait queue */ + + u8 delayed_chctl_flags; /* chctl flags received, but delayed */ + /* action until channel disconnected */ + + atomic_t n_to_notify; /* #of msg senders to notify */ + + xpc_channel_func func; /* user's channel function */ + void *key; /* pointer to user's key */ + + struct completion wdisconnect_wait; /* wait for channel disconnect */ + + /* kthread management related fields */ + + atomic_t kthreads_assigned; /* #of kthreads assigned to channel */ + u32 kthreads_assigned_limit; /* limit on #of kthreads assigned */ + atomic_t kthreads_idle; /* #of kthreads idle waiting for work */ + u32 kthreads_idle_limit; /* limit on #of kthreads idle */ + atomic_t kthreads_active; /* #of kthreads actively working */ + + wait_queue_head_t idle_wq; /* idle kthread wait queue */ + + union { + struct xpc_channel_uv uv; + } sn; + +} ____cacheline_aligned; + +/* struct xpc_channel flags */ + +#define XPC_C_WASCONNECTED 0x00000001 /* channel was connected */ + +#define XPC_C_ROPENCOMPLETE 0x00000002 /* remote open channel complete */ +#define XPC_C_OPENCOMPLETE 0x00000004 /* local open channel complete */ +#define XPC_C_ROPENREPLY 0x00000008 /* remote open channel reply */ +#define XPC_C_OPENREPLY 0x00000010 /* local open channel reply */ +#define XPC_C_ROPENREQUEST 0x00000020 /* remote open channel request */ +#define XPC_C_OPENREQUEST 0x00000040 /* local open channel request */ + +#define XPC_C_SETUP 0x00000080 /* channel's msgqueues are alloc'd */ +#define XPC_C_CONNECTEDCALLOUT 0x00000100 /* connected callout initiated */ +#define XPC_C_CONNECTEDCALLOUT_MADE \ + 0x00000200 /* connected callout completed */ +#define XPC_C_CONNECTED 0x00000400 /* local channel is connected */ +#define XPC_C_CONNECTING 0x00000800 /* channel is being connected */ + +#define XPC_C_RCLOSEREPLY 0x00001000 /* remote close channel reply */ +#define XPC_C_CLOSEREPLY 0x00002000 /* local close channel reply */ +#define XPC_C_RCLOSEREQUEST 0x00004000 /* remote close channel request */ +#define XPC_C_CLOSEREQUEST 0x00008000 /* local close channel request */ + +#define XPC_C_DISCONNECTED 0x00010000 /* channel is disconnected */ +#define XPC_C_DISCONNECTING 0x00020000 /* channel is being disconnected */ +#define XPC_C_DISCONNECTINGCALLOUT \ + 0x00040000 /* disconnecting callout initiated */ +#define XPC_C_DISCONNECTINGCALLOUT_MADE \ + 0x00080000 /* disconnecting callout completed */ +#define XPC_C_WDISCONNECT 0x00100000 /* waiting for channel disconnect */ + +/* + * The channel control flags (chctl) union consists of a 64-bit variable which + * is divided up into eight bytes, ordered from right to left. Byte zero + * pertains to channel 0, byte one to channel 1, and so on. Each channel's byte + * can have one or more of the chctl flags set in it. + */ + +union xpc_channel_ctl_flags { + u64 all_flags; + u8 flags[XPC_MAX_NCHANNELS]; +}; + +/* chctl flags */ +#define XPC_CHCTL_CLOSEREQUEST 0x01 +#define XPC_CHCTL_CLOSEREPLY 0x02 +#define XPC_CHCTL_OPENREQUEST 0x04 +#define XPC_CHCTL_OPENREPLY 0x08 +#define XPC_CHCTL_OPENCOMPLETE 0x10 +#define XPC_CHCTL_MSGREQUEST 0x20 + +#define XPC_OPENCLOSE_CHCTL_FLAGS \ + (XPC_CHCTL_CLOSEREQUEST | XPC_CHCTL_CLOSEREPLY | \ + XPC_CHCTL_OPENREQUEST | XPC_CHCTL_OPENREPLY | \ + XPC_CHCTL_OPENCOMPLETE) +#define XPC_MSG_CHCTL_FLAGS XPC_CHCTL_MSGREQUEST + +static inline int +xpc_any_openclose_chctl_flags_set(union xpc_channel_ctl_flags *chctl) +{ + int ch_number; + + for (ch_number = 0; ch_number < XPC_MAX_NCHANNELS; ch_number++) { + if (chctl->flags[ch_number] & XPC_OPENCLOSE_CHCTL_FLAGS) + return 1; + } + return 0; +} + +static inline int +xpc_any_msg_chctl_flags_set(union xpc_channel_ctl_flags *chctl) +{ + int ch_number; + + for (ch_number = 0; ch_number < XPC_MAX_NCHANNELS; ch_number++) { + if (chctl->flags[ch_number] & XPC_MSG_CHCTL_FLAGS) + return 1; + } + return 0; +} + +struct xpc_partition_uv { + unsigned long heartbeat_gpa; /* phys addr of partition's heartbeat */ + struct xpc_heartbeat_uv cached_heartbeat; /* cached copy of */ + /* partition's heartbeat */ + unsigned long activate_gru_mq_desc_gpa; /* phys addr of parititon's */ + /* activate mq's gru mq */ + /* descriptor */ + void *cached_activate_gru_mq_desc; /* cached copy of partition's */ + /* activate mq's gru mq descriptor */ + struct mutex cached_activate_gru_mq_desc_mutex; + spinlock_t flags_lock; /* protect updating of flags */ + unsigned int flags; /* general flags */ + u8 remote_act_state; /* remote partition's act_state */ + u8 act_state_req; /* act_state request from remote partition */ + enum xp_retval reason; /* reason for deactivate act_state request */ +}; + +/* struct xpc_partition_uv flags */ + +#define XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV 0x00000001 +#define XPC_P_ENGAGED_UV 0x00000002 + +/* struct xpc_partition_uv act_state change requests */ + +#define XPC_P_ASR_ACTIVATE_UV 0x01 +#define XPC_P_ASR_REACTIVATE_UV 0x02 +#define XPC_P_ASR_DEACTIVATE_UV 0x03 + +struct xpc_partition { + + /* XPC HB infrastructure */ + + u8 remote_rp_version; /* version# of partition's rsvd pg */ + unsigned long remote_rp_ts_jiffies; /* timestamp when rsvd pg setup */ + unsigned long remote_rp_pa; /* phys addr of partition's rsvd pg */ + u64 last_heartbeat; /* HB at last read */ + u32 activate_IRQ_rcvd; /* IRQs since activation */ + spinlock_t act_lock; /* protect updating of act_state */ + u8 act_state; /* from XPC HB viewpoint */ + enum xp_retval reason; /* reason partition is deactivating */ + int reason_line; /* line# deactivation initiated from */ + + unsigned long disengage_timeout; /* timeout in jiffies */ + struct timer_list disengage_timer; + + /* XPC infrastructure referencing and teardown control */ + + u8 setup_state; /* infrastructure setup state */ + wait_queue_head_t teardown_wq; /* kthread waiting to teardown infra */ + atomic_t references; /* #of references to infrastructure */ + + u8 nchannels; /* #of defined channels supported */ + atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */ + atomic_t nchannels_engaged; /* #of channels engaged with remote part */ + struct xpc_channel *channels; /* array of channel structures */ + + /* fields used for managing channel avialability and activity */ + + union xpc_channel_ctl_flags chctl; /* chctl flags yet to be processed */ + spinlock_t chctl_lock; /* chctl flags lock */ + + void *remote_openclose_args_base; /* base address of kmalloc'd space */ + struct xpc_openclose_args *remote_openclose_args; /* copy of remote's */ + /* args */ + + /* channel manager related fields */ + + atomic_t channel_mgr_requests; /* #of requests to activate chan mgr */ + wait_queue_head_t channel_mgr_wq; /* channel mgr's wait queue */ + + union { + struct xpc_partition_uv uv; + } sn; + +} ____cacheline_aligned; + +struct xpc_arch_operations { + int (*setup_partitions) (void); + void (*teardown_partitions) (void); + void (*process_activate_IRQ_rcvd) (void); + enum xp_retval (*get_partition_rsvd_page_pa) + (void *, u64 *, unsigned long *, size_t *); + int (*setup_rsvd_page) (struct xpc_rsvd_page *); + + void (*allow_hb) (short); + void (*disallow_hb) (short); + void (*disallow_all_hbs) (void); + void (*increment_heartbeat) (void); + void (*offline_heartbeat) (void); + void (*online_heartbeat) (void); + void (*heartbeat_init) (void); + void (*heartbeat_exit) (void); + enum xp_retval (*get_remote_heartbeat) (struct xpc_partition *); + + void (*request_partition_activation) (struct xpc_rsvd_page *, + unsigned long, int); + void (*request_partition_reactivation) (struct xpc_partition *); + void (*request_partition_deactivation) (struct xpc_partition *); + void (*cancel_partition_deactivation_request) (struct xpc_partition *); + enum xp_retval (*setup_ch_structures) (struct xpc_partition *); + void (*teardown_ch_structures) (struct xpc_partition *); + + enum xp_retval (*make_first_contact) (struct xpc_partition *); + + u64 (*get_chctl_all_flags) (struct xpc_partition *); + void (*send_chctl_closerequest) (struct xpc_channel *, unsigned long *); + void (*send_chctl_closereply) (struct xpc_channel *, unsigned long *); + void (*send_chctl_openrequest) (struct xpc_channel *, unsigned long *); + void (*send_chctl_openreply) (struct xpc_channel *, unsigned long *); + void (*send_chctl_opencomplete) (struct xpc_channel *, unsigned long *); + void (*process_msg_chctl_flags) (struct xpc_partition *, int); + + enum xp_retval (*save_remote_msgqueue_pa) (struct xpc_channel *, + unsigned long); + + enum xp_retval (*setup_msg_structures) (struct xpc_channel *); + void (*teardown_msg_structures) (struct xpc_channel *); + + void (*indicate_partition_engaged) (struct xpc_partition *); + void (*indicate_partition_disengaged) (struct xpc_partition *); + void (*assume_partition_disengaged) (short); + int (*partition_engaged) (short); + int (*any_partition_engaged) (void); + + int (*n_of_deliverable_payloads) (struct xpc_channel *); + enum xp_retval (*send_payload) (struct xpc_channel *, u32, void *, + u16, u8, xpc_notify_func, void *); + void *(*get_deliverable_payload) (struct xpc_channel *); + void (*received_payload) (struct xpc_channel *, void *); + void (*notify_senders_of_disconnect) (struct xpc_channel *); +}; + +/* struct xpc_partition act_state values (for XPC HB) */ + +#define XPC_P_AS_INACTIVE 0x00 /* partition is not active */ +#define XPC_P_AS_ACTIVATION_REQ 0x01 /* created thread to activate */ +#define XPC_P_AS_ACTIVATING 0x02 /* activation thread started */ +#define XPC_P_AS_ACTIVE 0x03 /* xpc_partition_up() was called */ +#define XPC_P_AS_DEACTIVATING 0x04 /* partition deactivation initiated */ + +#define XPC_DEACTIVATE_PARTITION(_p, _reason) \ + xpc_deactivate_partition(__LINE__, (_p), (_reason)) + +/* struct xpc_partition setup_state values */ + +#define XPC_P_SS_UNSET 0x00 /* infrastructure was never setup */ +#define XPC_P_SS_SETUP 0x01 /* infrastructure is setup */ +#define XPC_P_SS_WTEARDOWN 0x02 /* waiting to teardown infrastructure */ +#define XPC_P_SS_TORNDOWN 0x03 /* infrastructure is torndown */ + +/* number of seconds to wait for other partitions to disengage */ +#define XPC_DISENGAGE_DEFAULT_TIMELIMIT 90 + +/* interval in seconds to print 'waiting deactivation' messages */ +#define XPC_DEACTIVATE_PRINTMSG_INTERVAL 10 + +#define XPC_PARTID(_p) ((short)((_p) - &xpc_partitions[0])) + +/* found in xp_main.c */ +extern struct xpc_registration xpc_registrations[]; + +/* found in xpc_main.c */ +extern struct device *xpc_part; +extern struct device *xpc_chan; +extern struct xpc_arch_operations xpc_arch_ops; +extern int xpc_disengage_timelimit; +extern int xpc_disengage_timedout; +extern int xpc_activate_IRQ_rcvd; +extern spinlock_t xpc_activate_IRQ_rcvd_lock; +extern wait_queue_head_t xpc_activate_IRQ_wq; +extern void *xpc_kzalloc_cacheline_aligned(size_t, gfp_t, void **); +extern void xpc_activate_partition(struct xpc_partition *); +extern void xpc_activate_kthreads(struct xpc_channel *, int); +extern void xpc_create_kthreads(struct xpc_channel *, int, int); +extern void xpc_disconnect_wait(int); + +/* found in xpc_uv.c */ +extern int xpc_init_uv(void); +extern void xpc_exit_uv(void); + +/* found in xpc_partition.c */ +extern int xpc_exiting; +extern int xpc_nasid_mask_nlongs; +extern struct xpc_rsvd_page *xpc_rsvd_page; +extern unsigned long *xpc_mach_nasids; +extern struct xpc_partition *xpc_partitions; +extern void *xpc_kmalloc_cacheline_aligned(size_t, gfp_t, void **); +extern int xpc_setup_rsvd_page(void); +extern void xpc_teardown_rsvd_page(void); +extern int xpc_identify_activate_IRQ_sender(void); +extern int xpc_partition_disengaged(struct xpc_partition *); +extern enum xp_retval xpc_mark_partition_active(struct xpc_partition *); +extern void xpc_mark_partition_inactive(struct xpc_partition *); +extern void xpc_discovery(void); +extern enum xp_retval xpc_get_remote_rp(int, unsigned long *, + struct xpc_rsvd_page *, + unsigned long *); +extern void xpc_deactivate_partition(const int, struct xpc_partition *, + enum xp_retval); +extern enum xp_retval xpc_initiate_partid_to_nasids(short, void *); + +/* found in xpc_channel.c */ +extern void xpc_initiate_connect(int); +extern void xpc_initiate_disconnect(int); +extern enum xp_retval xpc_allocate_msg_wait(struct xpc_channel *); +extern enum xp_retval xpc_initiate_send(short, int, u32, void *, u16); +extern enum xp_retval xpc_initiate_send_notify(short, int, u32, void *, u16, + xpc_notify_func, void *); +extern void xpc_initiate_received(short, int, void *); +extern void xpc_process_sent_chctl_flags(struct xpc_partition *); +extern void xpc_connected_callout(struct xpc_channel *); +extern void xpc_deliver_payload(struct xpc_channel *); +extern void xpc_disconnect_channel(const int, struct xpc_channel *, + enum xp_retval, unsigned long *); +extern void xpc_disconnect_callout(struct xpc_channel *, enum xp_retval); +extern void xpc_partition_going_down(struct xpc_partition *, enum xp_retval); + +static inline void +xpc_wakeup_channel_mgr(struct xpc_partition *part) +{ + if (atomic_inc_return(&part->channel_mgr_requests) == 1) + wake_up(&part->channel_mgr_wq); +} + +/* + * These next two inlines are used to keep us from tearing down a channel's + * msg queues while a thread may be referencing them. + */ +static inline void +xpc_msgqueue_ref(struct xpc_channel *ch) +{ + atomic_inc(&ch->references); +} + +static inline void +xpc_msgqueue_deref(struct xpc_channel *ch) +{ + s32 refs = atomic_dec_return(&ch->references); + + DBUG_ON(refs < 0); + if (refs == 0) + xpc_wakeup_channel_mgr(&xpc_partitions[ch->partid]); +} + +#define XPC_DISCONNECT_CHANNEL(_ch, _reason, _irqflgs) \ + xpc_disconnect_channel(__LINE__, _ch, _reason, _irqflgs) + +/* + * These two inlines are used to keep us from tearing down a partition's + * setup infrastructure while a thread may be referencing it. + */ +static inline void +xpc_part_deref(struct xpc_partition *part) +{ + s32 refs = atomic_dec_return(&part->references); + + DBUG_ON(refs < 0); + if (refs == 0 && part->setup_state == XPC_P_SS_WTEARDOWN) + wake_up(&part->teardown_wq); +} + +static inline int +xpc_part_ref(struct xpc_partition *part) +{ + int setup; + + atomic_inc(&part->references); + setup = (part->setup_state == XPC_P_SS_SETUP); + if (!setup) + xpc_part_deref(part); + + return setup; +} + +/* + * The following macro is to be used for the setting of the reason and + * reason_line fields in both the struct xpc_channel and struct xpc_partition + * structures. + */ +#define XPC_SET_REASON(_p, _reason, _line) \ + { \ + (_p)->reason = _reason; \ + (_p)->reason_line = _line; \ + } + +#endif /* _DRIVERS_MISC_SGIXP_XPC_H */ diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c new file mode 100644 index 000000000..8e6607fc8 --- /dev/null +++ b/drivers/misc/sgi-xp/xpc_channel.c @@ -0,0 +1,1011 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2004-2009 Silicon Graphics, Inc. All Rights Reserved. + */ + +/* + * Cross Partition Communication (XPC) channel support. + * + * This is the part of XPC that manages the channels and + * sends/receives messages across them to/from other partitions. + * + */ + +#include <linux/device.h> +#include "xpc.h" + +/* + * Process a connect message from a remote partition. + * + * Note: xpc_process_connect() is expecting to be called with the + * spin_lock_irqsave held and will leave it locked upon return. + */ +static void +xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags) +{ + enum xp_retval ret; + + lockdep_assert_held(&ch->lock); + + if (!(ch->flags & XPC_C_OPENREQUEST) || + !(ch->flags & XPC_C_ROPENREQUEST)) { + /* nothing more to do for now */ + return; + } + DBUG_ON(!(ch->flags & XPC_C_CONNECTING)); + + if (!(ch->flags & XPC_C_SETUP)) { + spin_unlock_irqrestore(&ch->lock, *irq_flags); + ret = xpc_arch_ops.setup_msg_structures(ch); + spin_lock_irqsave(&ch->lock, *irq_flags); + + if (ret != xpSuccess) + XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags); + else + ch->flags |= XPC_C_SETUP; + + if (ch->flags & XPC_C_DISCONNECTING) + return; + } + + if (!(ch->flags & XPC_C_OPENREPLY)) { + ch->flags |= XPC_C_OPENREPLY; + xpc_arch_ops.send_chctl_openreply(ch, irq_flags); + } + + if (!(ch->flags & XPC_C_ROPENREPLY)) + return; + + if (!(ch->flags & XPC_C_OPENCOMPLETE)) { + ch->flags |= (XPC_C_OPENCOMPLETE | XPC_C_CONNECTED); + xpc_arch_ops.send_chctl_opencomplete(ch, irq_flags); + } + + if (!(ch->flags & XPC_C_ROPENCOMPLETE)) + return; + + dev_info(xpc_chan, "channel %d to partition %d connected\n", + ch->number, ch->partid); + + ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */ +} + +/* + * spin_lock_irqsave() is expected to be held on entry. + */ +static void +xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) +{ + struct xpc_partition *part = &xpc_partitions[ch->partid]; + u32 channel_was_connected = (ch->flags & XPC_C_WASCONNECTED); + + lockdep_assert_held(&ch->lock); + + if (!(ch->flags & XPC_C_DISCONNECTING)) + return; + + DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); + + /* make sure all activity has settled down first */ + + if (atomic_read(&ch->kthreads_assigned) > 0 || + atomic_read(&ch->references) > 0) { + return; + } + DBUG_ON((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && + !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE)); + + if (part->act_state == XPC_P_AS_DEACTIVATING) { + /* can't proceed until the other side disengages from us */ + if (xpc_arch_ops.partition_engaged(ch->partid)) + return; + + } else { + + /* as long as the other side is up do the full protocol */ + + if (!(ch->flags & XPC_C_RCLOSEREQUEST)) + return; + + if (!(ch->flags & XPC_C_CLOSEREPLY)) { + ch->flags |= XPC_C_CLOSEREPLY; + xpc_arch_ops.send_chctl_closereply(ch, irq_flags); + } + + if (!(ch->flags & XPC_C_RCLOSEREPLY)) + return; + } + + /* wake those waiting for notify completion */ + if (atomic_read(&ch->n_to_notify) > 0) { + /* we do callout while holding ch->lock, callout can't block */ + xpc_arch_ops.notify_senders_of_disconnect(ch); + } + + /* both sides are disconnected now */ + + if (ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE) { + spin_unlock_irqrestore(&ch->lock, *irq_flags); + xpc_disconnect_callout(ch, xpDisconnected); + spin_lock_irqsave(&ch->lock, *irq_flags); + } + + DBUG_ON(atomic_read(&ch->n_to_notify) != 0); + + /* it's now safe to free the channel's message queues */ + xpc_arch_ops.teardown_msg_structures(ch); + + ch->func = NULL; + ch->key = NULL; + ch->entry_size = 0; + ch->local_nentries = 0; + ch->remote_nentries = 0; + ch->kthreads_assigned_limit = 0; + ch->kthreads_idle_limit = 0; + + /* + * Mark the channel disconnected and clear all other flags, including + * XPC_C_SETUP (because of call to + * xpc_arch_ops.teardown_msg_structures()) but not including + * XPC_C_WDISCONNECT (if it was set). + */ + ch->flags = (XPC_C_DISCONNECTED | (ch->flags & XPC_C_WDISCONNECT)); + + atomic_dec(&part->nchannels_active); + + if (channel_was_connected) { + dev_info(xpc_chan, "channel %d to partition %d disconnected, " + "reason=%d\n", ch->number, ch->partid, ch->reason); + } + + if (ch->flags & XPC_C_WDISCONNECT) { + /* we won't lose the CPU since we're holding ch->lock */ + complete(&ch->wdisconnect_wait); + } else if (ch->delayed_chctl_flags) { + if (part->act_state != XPC_P_AS_DEACTIVATING) { + /* time to take action on any delayed chctl flags */ + spin_lock(&part->chctl_lock); + part->chctl.flags[ch->number] |= + ch->delayed_chctl_flags; + spin_unlock(&part->chctl_lock); + } + ch->delayed_chctl_flags = 0; + } +} + +/* + * Process a change in the channel's remote connection state. + */ +static void +xpc_process_openclose_chctl_flags(struct xpc_partition *part, int ch_number, + u8 chctl_flags) +{ + unsigned long irq_flags; + struct xpc_openclose_args *args = + &part->remote_openclose_args[ch_number]; + struct xpc_channel *ch = &part->channels[ch_number]; + enum xp_retval reason; + enum xp_retval ret; + int create_kthread = 0; + + spin_lock_irqsave(&ch->lock, irq_flags); + +again: + + if ((ch->flags & XPC_C_DISCONNECTED) && + (ch->flags & XPC_C_WDISCONNECT)) { + /* + * Delay processing chctl flags until thread waiting disconnect + * has had a chance to see that the channel is disconnected. + */ + ch->delayed_chctl_flags |= chctl_flags; + goto out; + } + + if (chctl_flags & XPC_CHCTL_CLOSEREQUEST) { + + dev_dbg(xpc_chan, "XPC_CHCTL_CLOSEREQUEST (reason=%d) received " + "from partid=%d, channel=%d\n", args->reason, + ch->partid, ch->number); + + /* + * If RCLOSEREQUEST is set, we're probably waiting for + * RCLOSEREPLY. We should find it and a ROPENREQUEST packed + * with this RCLOSEREQUEST in the chctl_flags. + */ + + if (ch->flags & XPC_C_RCLOSEREQUEST) { + DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING)); + DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); + DBUG_ON(!(ch->flags & XPC_C_CLOSEREPLY)); + DBUG_ON(ch->flags & XPC_C_RCLOSEREPLY); + + DBUG_ON(!(chctl_flags & XPC_CHCTL_CLOSEREPLY)); + chctl_flags &= ~XPC_CHCTL_CLOSEREPLY; + ch->flags |= XPC_C_RCLOSEREPLY; + + /* both sides have finished disconnecting */ + xpc_process_disconnect(ch, &irq_flags); + DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED)); + goto again; + } + + if (ch->flags & XPC_C_DISCONNECTED) { + if (!(chctl_flags & XPC_CHCTL_OPENREQUEST)) { + if (part->chctl.flags[ch_number] & + XPC_CHCTL_OPENREQUEST) { + + DBUG_ON(ch->delayed_chctl_flags != 0); + spin_lock(&part->chctl_lock); + part->chctl.flags[ch_number] |= + XPC_CHCTL_CLOSEREQUEST; + spin_unlock(&part->chctl_lock); + } + goto out; + } + + XPC_SET_REASON(ch, 0, 0); + ch->flags &= ~XPC_C_DISCONNECTED; + + atomic_inc(&part->nchannels_active); + ch->flags |= (XPC_C_CONNECTING | XPC_C_ROPENREQUEST); + } + + chctl_flags &= ~(XPC_CHCTL_OPENREQUEST | XPC_CHCTL_OPENREPLY | + XPC_CHCTL_OPENCOMPLETE); + + /* + * The meaningful CLOSEREQUEST connection state fields are: + * reason = reason connection is to be closed + */ + + ch->flags |= XPC_C_RCLOSEREQUEST; + + if (!(ch->flags & XPC_C_DISCONNECTING)) { + reason = args->reason; + if (reason <= xpSuccess || reason > xpUnknownReason) + reason = xpUnknownReason; + else if (reason == xpUnregistering) + reason = xpOtherUnregistering; + + XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags); + + DBUG_ON(chctl_flags & XPC_CHCTL_CLOSEREPLY); + goto out; + } + + xpc_process_disconnect(ch, &irq_flags); + } + + if (chctl_flags & XPC_CHCTL_CLOSEREPLY) { + + dev_dbg(xpc_chan, "XPC_CHCTL_CLOSEREPLY received from partid=" + "%d, channel=%d\n", ch->partid, ch->number); + + if (ch->flags & XPC_C_DISCONNECTED) { + DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING); + goto out; + } + + DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); + + if (!(ch->flags & XPC_C_RCLOSEREQUEST)) { + if (part->chctl.flags[ch_number] & + XPC_CHCTL_CLOSEREQUEST) { + + DBUG_ON(ch->delayed_chctl_flags != 0); + spin_lock(&part->chctl_lock); + part->chctl.flags[ch_number] |= + XPC_CHCTL_CLOSEREPLY; + spin_unlock(&part->chctl_lock); + } + goto out; + } + + ch->flags |= XPC_C_RCLOSEREPLY; + + if (ch->flags & XPC_C_CLOSEREPLY) { + /* both sides have finished disconnecting */ + xpc_process_disconnect(ch, &irq_flags); + } + } + + if (chctl_flags & XPC_CHCTL_OPENREQUEST) { + + dev_dbg(xpc_chan, "XPC_CHCTL_OPENREQUEST (entry_size=%d, " + "local_nentries=%d) received from partid=%d, " + "channel=%d\n", args->entry_size, args->local_nentries, + ch->partid, ch->number); + + if (part->act_state == XPC_P_AS_DEACTIVATING || + (ch->flags & XPC_C_ROPENREQUEST)) { + goto out; + } + + if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_WDISCONNECT)) { + ch->delayed_chctl_flags |= XPC_CHCTL_OPENREQUEST; + goto out; + } + DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED | + XPC_C_OPENREQUEST))); + DBUG_ON(ch->flags & (XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY | + XPC_C_OPENREPLY | XPC_C_CONNECTED)); + + /* + * The meaningful OPENREQUEST connection state fields are: + * entry_size = size of channel's messages in bytes + * local_nentries = remote partition's local_nentries + */ + if (args->entry_size == 0 || args->local_nentries == 0) { + /* assume OPENREQUEST was delayed by mistake */ + goto out; + } + + ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING); + ch->remote_nentries = args->local_nentries; + + if (ch->flags & XPC_C_OPENREQUEST) { + if (args->entry_size != ch->entry_size) { + XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes, + &irq_flags); + goto out; + } + } else { + ch->entry_size = args->entry_size; + + XPC_SET_REASON(ch, 0, 0); + ch->flags &= ~XPC_C_DISCONNECTED; + + atomic_inc(&part->nchannels_active); + } + + xpc_process_connect(ch, &irq_flags); + } + + if (chctl_flags & XPC_CHCTL_OPENREPLY) { + + dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY (local_msgqueue_pa=" + "0x%lx, local_nentries=%d, remote_nentries=%d) " + "received from partid=%d, channel=%d\n", + args->local_msgqueue_pa, args->local_nentries, + args->remote_nentries, ch->partid, ch->number); + + if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) + goto out; + + if (!(ch->flags & XPC_C_OPENREQUEST)) { + XPC_DISCONNECT_CHANNEL(ch, xpOpenCloseError, + &irq_flags); + goto out; + } + + DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST)); + DBUG_ON(ch->flags & XPC_C_CONNECTED); + + /* + * The meaningful OPENREPLY connection state fields are: + * local_msgqueue_pa = physical address of remote + * partition's local_msgqueue + * local_nentries = remote partition's local_nentries + * remote_nentries = remote partition's remote_nentries + */ + DBUG_ON(args->local_msgqueue_pa == 0); + DBUG_ON(args->local_nentries == 0); + DBUG_ON(args->remote_nentries == 0); + + ret = xpc_arch_ops.save_remote_msgqueue_pa(ch, + args->local_msgqueue_pa); + if (ret != xpSuccess) { + XPC_DISCONNECT_CHANNEL(ch, ret, &irq_flags); + goto out; + } + ch->flags |= XPC_C_ROPENREPLY; + + if (args->local_nentries < ch->remote_nentries) { + dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY: new " + "remote_nentries=%d, old remote_nentries=%d, " + "partid=%d, channel=%d\n", + args->local_nentries, ch->remote_nentries, + ch->partid, ch->number); + + ch->remote_nentries = args->local_nentries; + } + if (args->remote_nentries < ch->local_nentries) { + dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY: new " + "local_nentries=%d, old local_nentries=%d, " + "partid=%d, channel=%d\n", + args->remote_nentries, ch->local_nentries, + ch->partid, ch->number); + + ch->local_nentries = args->remote_nentries; + } + + xpc_process_connect(ch, &irq_flags); + } + + if (chctl_flags & XPC_CHCTL_OPENCOMPLETE) { + + dev_dbg(xpc_chan, "XPC_CHCTL_OPENCOMPLETE received from " + "partid=%d, channel=%d\n", ch->partid, ch->number); + + if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) + goto out; + + if (!(ch->flags & XPC_C_OPENREQUEST) || + !(ch->flags & XPC_C_OPENREPLY)) { + XPC_DISCONNECT_CHANNEL(ch, xpOpenCloseError, + &irq_flags); + goto out; + } + + DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST)); + DBUG_ON(!(ch->flags & XPC_C_ROPENREPLY)); + DBUG_ON(!(ch->flags & XPC_C_CONNECTED)); + + ch->flags |= XPC_C_ROPENCOMPLETE; + + xpc_process_connect(ch, &irq_flags); + create_kthread = 1; + } + +out: + spin_unlock_irqrestore(&ch->lock, irq_flags); + + if (create_kthread) + xpc_create_kthreads(ch, 1, 0); +} + +/* + * Attempt to establish a channel connection to a remote partition. + */ +static enum xp_retval +xpc_connect_channel(struct xpc_channel *ch) +{ + unsigned long irq_flags; + struct xpc_registration *registration = &xpc_registrations[ch->number]; + + if (mutex_trylock(®istration->mutex) == 0) + return xpRetry; + + if (!XPC_CHANNEL_REGISTERED(ch->number)) { + mutex_unlock(®istration->mutex); + return xpUnregistered; + } + + spin_lock_irqsave(&ch->lock, irq_flags); + + DBUG_ON(ch->flags & XPC_C_CONNECTED); + DBUG_ON(ch->flags & XPC_C_OPENREQUEST); + + if (ch->flags & XPC_C_DISCONNECTING) { + spin_unlock_irqrestore(&ch->lock, irq_flags); + mutex_unlock(®istration->mutex); + return ch->reason; + } + + /* add info from the channel connect registration to the channel */ + + ch->kthreads_assigned_limit = registration->assigned_limit; + ch->kthreads_idle_limit = registration->idle_limit; + DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0); + DBUG_ON(atomic_read(&ch->kthreads_idle) != 0); + DBUG_ON(atomic_read(&ch->kthreads_active) != 0); + + ch->func = registration->func; + DBUG_ON(registration->func == NULL); + ch->key = registration->key; + + ch->local_nentries = registration->nentries; + + if (ch->flags & XPC_C_ROPENREQUEST) { + if (registration->entry_size != ch->entry_size) { + /* the local and remote sides aren't the same */ + + /* + * Because XPC_DISCONNECT_CHANNEL() can block we're + * forced to up the registration sema before we unlock + * the channel lock. But that's okay here because we're + * done with the part that required the registration + * sema. XPC_DISCONNECT_CHANNEL() requires that the + * channel lock be locked and will unlock and relock + * the channel lock as needed. + */ + mutex_unlock(®istration->mutex); + XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes, + &irq_flags); + spin_unlock_irqrestore(&ch->lock, irq_flags); + return xpUnequalMsgSizes; + } + } else { + ch->entry_size = registration->entry_size; + + XPC_SET_REASON(ch, 0, 0); + ch->flags &= ~XPC_C_DISCONNECTED; + + atomic_inc(&xpc_partitions[ch->partid].nchannels_active); + } + + mutex_unlock(®istration->mutex); + + /* initiate the connection */ + + ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING); + xpc_arch_ops.send_chctl_openrequest(ch, &irq_flags); + + xpc_process_connect(ch, &irq_flags); + + spin_unlock_irqrestore(&ch->lock, irq_flags); + + return xpSuccess; +} + +void +xpc_process_sent_chctl_flags(struct xpc_partition *part) +{ + unsigned long irq_flags; + union xpc_channel_ctl_flags chctl; + struct xpc_channel *ch; + int ch_number; + u32 ch_flags; + + chctl.all_flags = xpc_arch_ops.get_chctl_all_flags(part); + + /* + * Initiate channel connections for registered channels. + * + * For each connected channel that has pending messages activate idle + * kthreads and/or create new kthreads as needed. + */ + + for (ch_number = 0; ch_number < part->nchannels; ch_number++) { + ch = &part->channels[ch_number]; + + /* + * Process any open or close related chctl flags, and then deal + * with connecting or disconnecting the channel as required. + */ + + if (chctl.flags[ch_number] & XPC_OPENCLOSE_CHCTL_FLAGS) { + xpc_process_openclose_chctl_flags(part, ch_number, + chctl.flags[ch_number]); + } + + ch_flags = ch->flags; /* need an atomic snapshot of flags */ + + if (ch_flags & XPC_C_DISCONNECTING) { + spin_lock_irqsave(&ch->lock, irq_flags); + xpc_process_disconnect(ch, &irq_flags); + spin_unlock_irqrestore(&ch->lock, irq_flags); + continue; + } + + if (part->act_state == XPC_P_AS_DEACTIVATING) + continue; + + if (!(ch_flags & XPC_C_CONNECTED)) { + if (!(ch_flags & XPC_C_OPENREQUEST)) { + DBUG_ON(ch_flags & XPC_C_SETUP); + (void)xpc_connect_channel(ch); + } + continue; + } + + /* + * Process any message related chctl flags, this may involve + * the activation of kthreads to deliver any pending messages + * sent from the other partition. + */ + + if (chctl.flags[ch_number] & XPC_MSG_CHCTL_FLAGS) + xpc_arch_ops.process_msg_chctl_flags(part, ch_number); + } +} + +/* + * XPC's heartbeat code calls this function to inform XPC that a partition is + * going down. XPC responds by tearing down the XPartition Communication + * infrastructure used for the just downed partition. + * + * XPC's heartbeat code will never call this function and xpc_partition_up() + * at the same time. Nor will it ever make multiple calls to either function + * at the same time. + */ +void +xpc_partition_going_down(struct xpc_partition *part, enum xp_retval reason) +{ + unsigned long irq_flags; + int ch_number; + struct xpc_channel *ch; + + dev_dbg(xpc_chan, "deactivating partition %d, reason=%d\n", + XPC_PARTID(part), reason); + + if (!xpc_part_ref(part)) { + /* infrastructure for this partition isn't currently set up */ + return; + } + + /* disconnect channels associated with the partition going down */ + + for (ch_number = 0; ch_number < part->nchannels; ch_number++) { + ch = &part->channels[ch_number]; + + xpc_msgqueue_ref(ch); + spin_lock_irqsave(&ch->lock, irq_flags); + + XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags); + + spin_unlock_irqrestore(&ch->lock, irq_flags); + xpc_msgqueue_deref(ch); + } + + xpc_wakeup_channel_mgr(part); + + xpc_part_deref(part); +} + +/* + * Called by XP at the time of channel connection registration to cause + * XPC to establish connections to all currently active partitions. + */ +void +xpc_initiate_connect(int ch_number) +{ + short partid; + struct xpc_partition *part; + + DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS); + + for (partid = 0; partid < xp_max_npartitions; partid++) { + part = &xpc_partitions[partid]; + + if (xpc_part_ref(part)) { + /* + * Initiate the establishment of a connection on the + * newly registered channel to the remote partition. + */ + xpc_wakeup_channel_mgr(part); + xpc_part_deref(part); + } + } +} + +void +xpc_connected_callout(struct xpc_channel *ch) +{ + /* let the registerer know that a connection has been established */ + + if (ch->func != NULL) { + dev_dbg(xpc_chan, "ch->func() called, reason=xpConnected, " + "partid=%d, channel=%d\n", ch->partid, ch->number); + + ch->func(xpConnected, ch->partid, ch->number, + (void *)(u64)ch->local_nentries, ch->key); + + dev_dbg(xpc_chan, "ch->func() returned, reason=xpConnected, " + "partid=%d, channel=%d\n", ch->partid, ch->number); + } +} + +/* + * Called by XP at the time of channel connection unregistration to cause + * XPC to teardown all current connections for the specified channel. + * + * Before returning xpc_initiate_disconnect() will wait until all connections + * on the specified channel have been closed/torndown. So the caller can be + * assured that they will not be receiving any more callouts from XPC to the + * function they registered via xpc_connect(). + * + * Arguments: + * + * ch_number - channel # to unregister. + */ +void +xpc_initiate_disconnect(int ch_number) +{ + unsigned long irq_flags; + short partid; + struct xpc_partition *part; + struct xpc_channel *ch; + + DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS); + + /* initiate the channel disconnect for every active partition */ + for (partid = 0; partid < xp_max_npartitions; partid++) { + part = &xpc_partitions[partid]; + + if (xpc_part_ref(part)) { + ch = &part->channels[ch_number]; + xpc_msgqueue_ref(ch); + + spin_lock_irqsave(&ch->lock, irq_flags); + + if (!(ch->flags & XPC_C_DISCONNECTED)) { + ch->flags |= XPC_C_WDISCONNECT; + + XPC_DISCONNECT_CHANNEL(ch, xpUnregistering, + &irq_flags); + } + + spin_unlock_irqrestore(&ch->lock, irq_flags); + + xpc_msgqueue_deref(ch); + xpc_part_deref(part); + } + } + + xpc_disconnect_wait(ch_number); +} + +/* + * To disconnect a channel, and reflect it back to all who may be waiting. + * + * An OPEN is not allowed until XPC_C_DISCONNECTING is cleared by + * xpc_process_disconnect(), and if set, XPC_C_WDISCONNECT is cleared by + * xpc_disconnect_wait(). + * + * THE CHANNEL IS TO BE LOCKED BY THE CALLER AND WILL REMAIN LOCKED UPON RETURN. + */ +void +xpc_disconnect_channel(const int line, struct xpc_channel *ch, + enum xp_retval reason, unsigned long *irq_flags) +{ + u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED); + + lockdep_assert_held(&ch->lock); + + if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) + return; + + DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED))); + + dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n", + reason, line, ch->partid, ch->number); + + XPC_SET_REASON(ch, reason, line); + + ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING); + /* some of these may not have been set */ + ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY | + XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY | + XPC_C_CONNECTING | XPC_C_CONNECTED); + + xpc_arch_ops.send_chctl_closerequest(ch, irq_flags); + + if (channel_was_connected) + ch->flags |= XPC_C_WASCONNECTED; + + spin_unlock_irqrestore(&ch->lock, *irq_flags); + + /* wake all idle kthreads so they can exit */ + if (atomic_read(&ch->kthreads_idle) > 0) { + wake_up_all(&ch->idle_wq); + + } else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && + !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { + /* start a kthread that will do the xpDisconnecting callout */ + xpc_create_kthreads(ch, 1, 1); + } + + /* wake those waiting to allocate an entry from the local msg queue */ + if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) + wake_up(&ch->msg_allocate_wq); + + spin_lock_irqsave(&ch->lock, *irq_flags); +} + +void +xpc_disconnect_callout(struct xpc_channel *ch, enum xp_retval reason) +{ + /* + * Let the channel's registerer know that the channel is being + * disconnected. We don't want to do this if the registerer was never + * informed of a connection being made. + */ + + if (ch->func != NULL) { + dev_dbg(xpc_chan, "ch->func() called, reason=%d, partid=%d, " + "channel=%d\n", reason, ch->partid, ch->number); + + ch->func(reason, ch->partid, ch->number, NULL, ch->key); + + dev_dbg(xpc_chan, "ch->func() returned, reason=%d, partid=%d, " + "channel=%d\n", reason, ch->partid, ch->number); + } +} + +/* + * Wait for a message entry to become available for the specified channel, + * but don't wait any longer than 1 jiffy. + */ +enum xp_retval +xpc_allocate_msg_wait(struct xpc_channel *ch) +{ + enum xp_retval ret; + DEFINE_WAIT(wait); + + if (ch->flags & XPC_C_DISCONNECTING) { + DBUG_ON(ch->reason == xpInterrupted); + return ch->reason; + } + + atomic_inc(&ch->n_on_msg_allocate_wq); + prepare_to_wait(&ch->msg_allocate_wq, &wait, TASK_INTERRUPTIBLE); + ret = schedule_timeout(1); + finish_wait(&ch->msg_allocate_wq, &wait); + atomic_dec(&ch->n_on_msg_allocate_wq); + + if (ch->flags & XPC_C_DISCONNECTING) { + ret = ch->reason; + DBUG_ON(ch->reason == xpInterrupted); + } else if (ret == 0) { + ret = xpTimeout; + } else { + ret = xpInterrupted; + } + + return ret; +} + +/* + * Send a message that contains the user's payload on the specified channel + * connected to the specified partition. + * + * NOTE that this routine can sleep waiting for a message entry to become + * available. To not sleep, pass in the XPC_NOWAIT flag. + * + * Once sent, this routine will not wait for the message to be received, nor + * will notification be given when it does happen. + * + * Arguments: + * + * partid - ID of partition to which the channel is connected. + * ch_number - channel # to send message on. + * flags - see xp.h for valid flags. + * payload - pointer to the payload which is to be sent. + * payload_size - size of the payload in bytes. + */ +enum xp_retval +xpc_initiate_send(short partid, int ch_number, u32 flags, void *payload, + u16 payload_size) +{ + struct xpc_partition *part = &xpc_partitions[partid]; + enum xp_retval ret = xpUnknownReason; + + dev_dbg(xpc_chan, "payload=0x%p, partid=%d, channel=%d\n", payload, + partid, ch_number); + + DBUG_ON(partid < 0 || partid >= xp_max_npartitions); + DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); + DBUG_ON(payload == NULL); + + if (xpc_part_ref(part)) { + ret = xpc_arch_ops.send_payload(&part->channels[ch_number], + flags, payload, payload_size, 0, NULL, NULL); + xpc_part_deref(part); + } + + return ret; +} + +/* + * Send a message that contains the user's payload on the specified channel + * connected to the specified partition. + * + * NOTE that this routine can sleep waiting for a message entry to become + * available. To not sleep, pass in the XPC_NOWAIT flag. + * + * This routine will not wait for the message to be sent or received. + * + * Once the remote end of the channel has received the message, the function + * passed as an argument to xpc_initiate_send_notify() will be called. This + * allows the sender to free up or re-use any buffers referenced by the + * message, but does NOT mean the message has been processed at the remote + * end by a receiver. + * + * If this routine returns an error, the caller's function will NOT be called. + * + * Arguments: + * + * partid - ID of partition to which the channel is connected. + * ch_number - channel # to send message on. + * flags - see xp.h for valid flags. + * payload - pointer to the payload which is to be sent. + * payload_size - size of the payload in bytes. + * func - function to call with asynchronous notification of message + * receipt. THIS FUNCTION MUST BE NON-BLOCKING. + * key - user-defined key to be passed to the function when it's called. + */ +enum xp_retval +xpc_initiate_send_notify(short partid, int ch_number, u32 flags, void *payload, + u16 payload_size, xpc_notify_func func, void *key) +{ + struct xpc_partition *part = &xpc_partitions[partid]; + enum xp_retval ret = xpUnknownReason; + + dev_dbg(xpc_chan, "payload=0x%p, partid=%d, channel=%d\n", payload, + partid, ch_number); + + DBUG_ON(partid < 0 || partid >= xp_max_npartitions); + DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); + DBUG_ON(payload == NULL); + DBUG_ON(func == NULL); + + if (xpc_part_ref(part)) { + ret = xpc_arch_ops.send_payload(&part->channels[ch_number], + flags, payload, payload_size, XPC_N_CALL, func, key); + xpc_part_deref(part); + } + return ret; +} + +/* + * Deliver a message's payload to its intended recipient. + */ +void +xpc_deliver_payload(struct xpc_channel *ch) +{ + void *payload; + + payload = xpc_arch_ops.get_deliverable_payload(ch); + if (payload != NULL) { + + /* + * This ref is taken to protect the payload itself from being + * freed before the user is finished with it, which the user + * indicates by calling xpc_initiate_received(). + */ + xpc_msgqueue_ref(ch); + + atomic_inc(&ch->kthreads_active); + + if (ch->func != NULL) { + dev_dbg(xpc_chan, "ch->func() called, payload=0x%p " + "partid=%d channel=%d\n", payload, ch->partid, + ch->number); + + /* deliver the message to its intended recipient */ + ch->func(xpMsgReceived, ch->partid, ch->number, payload, + ch->key); + + dev_dbg(xpc_chan, "ch->func() returned, payload=0x%p " + "partid=%d channel=%d\n", payload, ch->partid, + ch->number); + } + + atomic_dec(&ch->kthreads_active); + } +} + +/* + * Acknowledge receipt of a delivered message's payload. + * + * This function, although called by users, does not call xpc_part_ref() to + * ensure that the partition infrastructure is in place. It relies on the + * fact that we called xpc_msgqueue_ref() in xpc_deliver_payload(). + * + * Arguments: + * + * partid - ID of partition to which the channel is connected. + * ch_number - channel # message received on. + * payload - pointer to the payload area allocated via + * xpc_initiate_send() or xpc_initiate_send_notify(). + */ +void +xpc_initiate_received(short partid, int ch_number, void *payload) +{ + struct xpc_partition *part = &xpc_partitions[partid]; + struct xpc_channel *ch; + + DBUG_ON(partid < 0 || partid >= xp_max_npartitions); + DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); + + ch = &part->channels[ch_number]; + xpc_arch_ops.received_payload(ch, payload); + + /* the call to xpc_msgqueue_ref() was done by xpc_deliver_payload() */ + xpc_msgqueue_deref(ch); +} diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c new file mode 100644 index 000000000..e5244fc1d --- /dev/null +++ b/drivers/misc/sgi-xp/xpc_main.c @@ -0,0 +1,1349 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * (C) Copyright 2020 Hewlett Packard Enterprise Development LP + * Copyright (c) 2004-2009 Silicon Graphics, Inc. All Rights Reserved. + */ + +/* + * Cross Partition Communication (XPC) support - standard version. + * + * XPC provides a message passing capability that crosses partition + * boundaries. This module is made up of two parts: + * + * partition This part detects the presence/absence of other + * partitions. It provides a heartbeat and monitors + * the heartbeats of other partitions. + * + * channel This part manages the channels and sends/receives + * messages across them to/from other partitions. + * + * There are a couple of additional functions residing in XP, which + * provide an interface to XPC for its users. + * + * + * Caveats: + * + * . Currently on sn2, we have no way to determine which nasid an IRQ + * came from. Thus, xpc_send_IRQ_sn2() does a remote amo write + * followed by an IPI. The amo indicates where data is to be pulled + * from, so after the IPI arrives, the remote partition checks the amo + * word. The IPI can actually arrive before the amo however, so other + * code must periodically check for this case. Also, remote amo + * operations do not reliably time out. Thus we do a remote PIO read + * solely to know whether the remote partition is down and whether we + * should stop sending IPIs to it. This remote PIO read operation is + * set up in a special nofault region so SAL knows to ignore (and + * cleanup) any errors due to the remote amo write, PIO read, and/or + * PIO write operations. + * + * If/when new hardware solves this IPI problem, we should abandon + * the current approach. + * + */ + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/sysctl.h> +#include <linux/device.h> +#include <linux/delay.h> +#include <linux/reboot.h> +#include <linux/kdebug.h> +#include <linux/kthread.h> +#include "xpc.h" + +#ifdef CONFIG_X86_64 +#include <asm/traps.h> +#endif + +/* define two XPC debug device structures to be used with dev_dbg() et al */ + +static struct device_driver xpc_dbg_name = { + .name = "xpc" +}; + +static struct device xpc_part_dbg_subname = { + .init_name = "", /* set to "part" at xpc_init() time */ + .driver = &xpc_dbg_name +}; + +static struct device xpc_chan_dbg_subname = { + .init_name = "", /* set to "chan" at xpc_init() time */ + .driver = &xpc_dbg_name +}; + +struct device *xpc_part = &xpc_part_dbg_subname; +struct device *xpc_chan = &xpc_chan_dbg_subname; + +static int xpc_kdebug_ignore; + +/* systune related variables for /proc/sys directories */ + +static int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL; +static int xpc_hb_min_interval = 1; +static int xpc_hb_max_interval = 10; + +static int xpc_hb_check_interval = XPC_HB_CHECK_DEFAULT_INTERVAL; +static int xpc_hb_check_min_interval = 10; +static int xpc_hb_check_max_interval = 120; + +int xpc_disengage_timelimit = XPC_DISENGAGE_DEFAULT_TIMELIMIT; +static int xpc_disengage_min_timelimit; /* = 0 */ +static int xpc_disengage_max_timelimit = 120; + +static struct ctl_table xpc_sys_xpc_hb_dir[] = { + { + .procname = "hb_interval", + .data = &xpc_hb_interval, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &xpc_hb_min_interval, + .extra2 = &xpc_hb_max_interval}, + { + .procname = "hb_check_interval", + .data = &xpc_hb_check_interval, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &xpc_hb_check_min_interval, + .extra2 = &xpc_hb_check_max_interval}, + {} +}; +static struct ctl_table xpc_sys_xpc_dir[] = { + { + .procname = "hb", + .mode = 0555, + .child = xpc_sys_xpc_hb_dir}, + { + .procname = "disengage_timelimit", + .data = &xpc_disengage_timelimit, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &xpc_disengage_min_timelimit, + .extra2 = &xpc_disengage_max_timelimit}, + {} +}; +static struct ctl_table xpc_sys_dir[] = { + { + .procname = "xpc", + .mode = 0555, + .child = xpc_sys_xpc_dir}, + {} +}; +static struct ctl_table_header *xpc_sysctl; + +/* non-zero if any remote partition disengage was timed out */ +int xpc_disengage_timedout; + +/* #of activate IRQs received and not yet processed */ +int xpc_activate_IRQ_rcvd; +DEFINE_SPINLOCK(xpc_activate_IRQ_rcvd_lock); + +/* IRQ handler notifies this wait queue on receipt of an IRQ */ +DECLARE_WAIT_QUEUE_HEAD(xpc_activate_IRQ_wq); + +static unsigned long xpc_hb_check_timeout; +static struct timer_list xpc_hb_timer; + +/* notification that the xpc_hb_checker thread has exited */ +static DECLARE_COMPLETION(xpc_hb_checker_exited); + +/* notification that the xpc_discovery thread has exited */ +static DECLARE_COMPLETION(xpc_discovery_exited); + +static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *); + +static int xpc_system_reboot(struct notifier_block *, unsigned long, void *); +static struct notifier_block xpc_reboot_notifier = { + .notifier_call = xpc_system_reboot, +}; + +static int xpc_system_die(struct notifier_block *, unsigned long, void *); +static struct notifier_block xpc_die_notifier = { + .notifier_call = xpc_system_die, +}; + +struct xpc_arch_operations xpc_arch_ops; + +/* + * Timer function to enforce the timelimit on the partition disengage. + */ +static void +xpc_timeout_partition_disengage(struct timer_list *t) +{ + struct xpc_partition *part = from_timer(part, t, disengage_timer); + + DBUG_ON(time_is_after_jiffies(part->disengage_timeout)); + + (void)xpc_partition_disengaged(part); + + DBUG_ON(part->disengage_timeout != 0); + DBUG_ON(xpc_arch_ops.partition_engaged(XPC_PARTID(part))); +} + +/* + * Timer to produce the heartbeat. The timer structures function is + * already set when this is initially called. A tunable is used to + * specify when the next timeout should occur. + */ +static void +xpc_hb_beater(struct timer_list *unused) +{ + xpc_arch_ops.increment_heartbeat(); + + if (time_is_before_eq_jiffies(xpc_hb_check_timeout)) + wake_up_interruptible(&xpc_activate_IRQ_wq); + + xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ); + add_timer(&xpc_hb_timer); +} + +static void +xpc_start_hb_beater(void) +{ + xpc_arch_ops.heartbeat_init(); + timer_setup(&xpc_hb_timer, xpc_hb_beater, 0); + xpc_hb_beater(0); +} + +static void +xpc_stop_hb_beater(void) +{ + del_timer_sync(&xpc_hb_timer); + xpc_arch_ops.heartbeat_exit(); +} + +/* + * At periodic intervals, scan through all active partitions and ensure + * their heartbeat is still active. If not, the partition is deactivated. + */ +static void +xpc_check_remote_hb(void) +{ + struct xpc_partition *part; + short partid; + enum xp_retval ret; + + for (partid = 0; partid < xp_max_npartitions; partid++) { + + if (xpc_exiting) + break; + + if (partid == xp_partition_id) + continue; + + part = &xpc_partitions[partid]; + + if (part->act_state == XPC_P_AS_INACTIVE || + part->act_state == XPC_P_AS_DEACTIVATING) { + continue; + } + + ret = xpc_arch_ops.get_remote_heartbeat(part); + if (ret != xpSuccess) + XPC_DEACTIVATE_PARTITION(part, ret); + } +} + +/* + * This thread is responsible for nearly all of the partition + * activation/deactivation. + */ +static int +xpc_hb_checker(void *ignore) +{ + int force_IRQ = 0; + + /* this thread was marked active by xpc_hb_init() */ + + set_cpus_allowed_ptr(current, cpumask_of(XPC_HB_CHECK_CPU)); + + /* set our heartbeating to other partitions into motion */ + xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ); + xpc_start_hb_beater(); + + while (!xpc_exiting) { + + dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have " + "been received\n", + (int)(xpc_hb_check_timeout - jiffies), + xpc_activate_IRQ_rcvd); + + /* checking of remote heartbeats is skewed by IRQ handling */ + if (time_is_before_eq_jiffies(xpc_hb_check_timeout)) { + xpc_hb_check_timeout = jiffies + + (xpc_hb_check_interval * HZ); + + dev_dbg(xpc_part, "checking remote heartbeats\n"); + xpc_check_remote_hb(); + } + + /* check for outstanding IRQs */ + if (xpc_activate_IRQ_rcvd > 0 || force_IRQ != 0) { + force_IRQ = 0; + dev_dbg(xpc_part, "processing activate IRQs " + "received\n"); + xpc_arch_ops.process_activate_IRQ_rcvd(); + } + + /* wait for IRQ or timeout */ + (void)wait_event_interruptible(xpc_activate_IRQ_wq, + (time_is_before_eq_jiffies( + xpc_hb_check_timeout) || + xpc_activate_IRQ_rcvd > 0 || + xpc_exiting)); + } + + xpc_stop_hb_beater(); + + dev_dbg(xpc_part, "heartbeat checker is exiting\n"); + + /* mark this thread as having exited */ + complete(&xpc_hb_checker_exited); + return 0; +} + +/* + * This thread will attempt to discover other partitions to activate + * based on info provided by SAL. This new thread is short lived and + * will exit once discovery is complete. + */ +static int +xpc_initiate_discovery(void *ignore) +{ + xpc_discovery(); + + dev_dbg(xpc_part, "discovery thread is exiting\n"); + + /* mark this thread as having exited */ + complete(&xpc_discovery_exited); + return 0; +} + +/* + * The first kthread assigned to a newly activated partition is the one + * created by XPC HB with which it calls xpc_activating(). XPC hangs on to + * that kthread until the partition is brought down, at which time that kthread + * returns back to XPC HB. (The return of that kthread will signify to XPC HB + * that XPC has dismantled all communication infrastructure for the associated + * partition.) This kthread becomes the channel manager for that partition. + * + * Each active partition has a channel manager, who, besides connecting and + * disconnecting channels, will ensure that each of the partition's connected + * channels has the required number of assigned kthreads to get the work done. + */ +static void +xpc_channel_mgr(struct xpc_partition *part) +{ + while (part->act_state != XPC_P_AS_DEACTIVATING || + atomic_read(&part->nchannels_active) > 0 || + !xpc_partition_disengaged(part)) { + + xpc_process_sent_chctl_flags(part); + + /* + * Wait until we've been requested to activate kthreads or + * all of the channel's message queues have been torn down or + * a signal is pending. + * + * The channel_mgr_requests is set to 1 after being awakened, + * This is done to prevent the channel mgr from making one pass + * through the loop for each request, since he will + * be servicing all the requests in one pass. The reason it's + * set to 1 instead of 0 is so that other kthreads will know + * that the channel mgr is running and won't bother trying to + * wake him up. + */ + atomic_dec(&part->channel_mgr_requests); + (void)wait_event_interruptible(part->channel_mgr_wq, + (atomic_read(&part->channel_mgr_requests) > 0 || + part->chctl.all_flags != 0 || + (part->act_state == XPC_P_AS_DEACTIVATING && + atomic_read(&part->nchannels_active) == 0 && + xpc_partition_disengaged(part)))); + atomic_set(&part->channel_mgr_requests, 1); + } +} + +/* + * Guarantee that the kzalloc'd memory is cacheline aligned. + */ +void * +xpc_kzalloc_cacheline_aligned(size_t size, gfp_t flags, void **base) +{ + /* see if kzalloc will give us cachline aligned memory by default */ + *base = kzalloc(size, flags); + if (*base == NULL) + return NULL; + + if ((u64)*base == L1_CACHE_ALIGN((u64)*base)) + return *base; + + kfree(*base); + + /* nope, we'll have to do it ourselves */ + *base = kzalloc(size + L1_CACHE_BYTES, flags); + if (*base == NULL) + return NULL; + + return (void *)L1_CACHE_ALIGN((u64)*base); +} + +/* + * Setup the channel structures necessary to support XPartition Communication + * between the specified remote partition and the local one. + */ +static enum xp_retval +xpc_setup_ch_structures(struct xpc_partition *part) +{ + enum xp_retval ret; + int ch_number; + struct xpc_channel *ch; + short partid = XPC_PARTID(part); + + /* + * Allocate all of the channel structures as a contiguous chunk of + * memory. + */ + DBUG_ON(part->channels != NULL); + part->channels = kcalloc(XPC_MAX_NCHANNELS, + sizeof(struct xpc_channel), + GFP_KERNEL); + if (part->channels == NULL) { + dev_err(xpc_chan, "can't get memory for channels\n"); + return xpNoMemory; + } + + /* allocate the remote open and close args */ + + part->remote_openclose_args = + xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE, + GFP_KERNEL, &part-> + remote_openclose_args_base); + if (part->remote_openclose_args == NULL) { + dev_err(xpc_chan, "can't get memory for remote connect args\n"); + ret = xpNoMemory; + goto out_1; + } + + part->chctl.all_flags = 0; + spin_lock_init(&part->chctl_lock); + + atomic_set(&part->channel_mgr_requests, 1); + init_waitqueue_head(&part->channel_mgr_wq); + + part->nchannels = XPC_MAX_NCHANNELS; + + atomic_set(&part->nchannels_active, 0); + atomic_set(&part->nchannels_engaged, 0); + + for (ch_number = 0; ch_number < part->nchannels; ch_number++) { + ch = &part->channels[ch_number]; + + ch->partid = partid; + ch->number = ch_number; + ch->flags = XPC_C_DISCONNECTED; + + atomic_set(&ch->kthreads_assigned, 0); + atomic_set(&ch->kthreads_idle, 0); + atomic_set(&ch->kthreads_active, 0); + + atomic_set(&ch->references, 0); + atomic_set(&ch->n_to_notify, 0); + + spin_lock_init(&ch->lock); + init_completion(&ch->wdisconnect_wait); + + atomic_set(&ch->n_on_msg_allocate_wq, 0); + init_waitqueue_head(&ch->msg_allocate_wq); + init_waitqueue_head(&ch->idle_wq); + } + + ret = xpc_arch_ops.setup_ch_structures(part); + if (ret != xpSuccess) + goto out_2; + + /* + * With the setting of the partition setup_state to XPC_P_SS_SETUP, + * we're declaring that this partition is ready to go. + */ + part->setup_state = XPC_P_SS_SETUP; + + return xpSuccess; + + /* setup of ch structures failed */ +out_2: + kfree(part->remote_openclose_args_base); + part->remote_openclose_args = NULL; +out_1: + kfree(part->channels); + part->channels = NULL; + return ret; +} + +/* + * Teardown the channel structures necessary to support XPartition Communication + * between the specified remote partition and the local one. + */ +static void +xpc_teardown_ch_structures(struct xpc_partition *part) +{ + DBUG_ON(atomic_read(&part->nchannels_engaged) != 0); + DBUG_ON(atomic_read(&part->nchannels_active) != 0); + + /* + * Make this partition inaccessible to local processes by marking it + * as no longer setup. Then wait before proceeding with the teardown + * until all existing references cease. + */ + DBUG_ON(part->setup_state != XPC_P_SS_SETUP); + part->setup_state = XPC_P_SS_WTEARDOWN; + + wait_event(part->teardown_wq, (atomic_read(&part->references) == 0)); + + /* now we can begin tearing down the infrastructure */ + + xpc_arch_ops.teardown_ch_structures(part); + + kfree(part->remote_openclose_args_base); + part->remote_openclose_args = NULL; + kfree(part->channels); + part->channels = NULL; + + part->setup_state = XPC_P_SS_TORNDOWN; +} + +/* + * When XPC HB determines that a partition has come up, it will create a new + * kthread and that kthread will call this function to attempt to set up the + * basic infrastructure used for Cross Partition Communication with the newly + * upped partition. + * + * The kthread that was created by XPC HB and which setup the XPC + * infrastructure will remain assigned to the partition becoming the channel + * manager for that partition until the partition is deactivating, at which + * time the kthread will teardown the XPC infrastructure and then exit. + */ +static int +xpc_activating(void *__partid) +{ + short partid = (u64)__partid; + struct xpc_partition *part = &xpc_partitions[partid]; + unsigned long irq_flags; + + DBUG_ON(partid < 0 || partid >= xp_max_npartitions); + + spin_lock_irqsave(&part->act_lock, irq_flags); + + if (part->act_state == XPC_P_AS_DEACTIVATING) { + part->act_state = XPC_P_AS_INACTIVE; + spin_unlock_irqrestore(&part->act_lock, irq_flags); + part->remote_rp_pa = 0; + return 0; + } + + /* indicate the thread is activating */ + DBUG_ON(part->act_state != XPC_P_AS_ACTIVATION_REQ); + part->act_state = XPC_P_AS_ACTIVATING; + + XPC_SET_REASON(part, 0, 0); + spin_unlock_irqrestore(&part->act_lock, irq_flags); + + dev_dbg(xpc_part, "activating partition %d\n", partid); + + xpc_arch_ops.allow_hb(partid); + + if (xpc_setup_ch_structures(part) == xpSuccess) { + (void)xpc_part_ref(part); /* this will always succeed */ + + if (xpc_arch_ops.make_first_contact(part) == xpSuccess) { + xpc_mark_partition_active(part); + xpc_channel_mgr(part); + /* won't return until partition is deactivating */ + } + + xpc_part_deref(part); + xpc_teardown_ch_structures(part); + } + + xpc_arch_ops.disallow_hb(partid); + xpc_mark_partition_inactive(part); + + if (part->reason == xpReactivating) { + /* interrupting ourselves results in activating partition */ + xpc_arch_ops.request_partition_reactivation(part); + } + + return 0; +} + +void +xpc_activate_partition(struct xpc_partition *part) +{ + short partid = XPC_PARTID(part); + unsigned long irq_flags; + struct task_struct *kthread; + + spin_lock_irqsave(&part->act_lock, irq_flags); + + DBUG_ON(part->act_state != XPC_P_AS_INACTIVE); + + part->act_state = XPC_P_AS_ACTIVATION_REQ; + XPC_SET_REASON(part, xpCloneKThread, __LINE__); + + spin_unlock_irqrestore(&part->act_lock, irq_flags); + + kthread = kthread_run(xpc_activating, (void *)((u64)partid), "xpc%02d", + partid); + if (IS_ERR(kthread)) { + spin_lock_irqsave(&part->act_lock, irq_flags); + part->act_state = XPC_P_AS_INACTIVE; + XPC_SET_REASON(part, xpCloneKThreadFailed, __LINE__); + spin_unlock_irqrestore(&part->act_lock, irq_flags); + } +} + +void +xpc_activate_kthreads(struct xpc_channel *ch, int needed) +{ + int idle = atomic_read(&ch->kthreads_idle); + int assigned = atomic_read(&ch->kthreads_assigned); + int wakeup; + + DBUG_ON(needed <= 0); + + if (idle > 0) { + wakeup = (needed > idle) ? idle : needed; + needed -= wakeup; + + dev_dbg(xpc_chan, "wakeup %d idle kthreads, partid=%d, " + "channel=%d\n", wakeup, ch->partid, ch->number); + + /* only wakeup the requested number of kthreads */ + wake_up_nr(&ch->idle_wq, wakeup); + } + + if (needed <= 0) + return; + + if (needed + assigned > ch->kthreads_assigned_limit) { + needed = ch->kthreads_assigned_limit - assigned; + if (needed <= 0) + return; + } + + dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n", + needed, ch->partid, ch->number); + + xpc_create_kthreads(ch, needed, 0); +} + +/* + * This function is where XPC's kthreads wait for messages to deliver. + */ +static void +xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch) +{ + int (*n_of_deliverable_payloads) (struct xpc_channel *) = + xpc_arch_ops.n_of_deliverable_payloads; + + do { + /* deliver messages to their intended recipients */ + + while (n_of_deliverable_payloads(ch) > 0 && + !(ch->flags & XPC_C_DISCONNECTING)) { + xpc_deliver_payload(ch); + } + + if (atomic_inc_return(&ch->kthreads_idle) > + ch->kthreads_idle_limit) { + /* too many idle kthreads on this channel */ + atomic_dec(&ch->kthreads_idle); + break; + } + + dev_dbg(xpc_chan, "idle kthread calling " + "wait_event_interruptible_exclusive()\n"); + + (void)wait_event_interruptible_exclusive(ch->idle_wq, + (n_of_deliverable_payloads(ch) > 0 || + (ch->flags & XPC_C_DISCONNECTING))); + + atomic_dec(&ch->kthreads_idle); + + } while (!(ch->flags & XPC_C_DISCONNECTING)); +} + +static int +xpc_kthread_start(void *args) +{ + short partid = XPC_UNPACK_ARG1(args); + u16 ch_number = XPC_UNPACK_ARG2(args); + struct xpc_partition *part = &xpc_partitions[partid]; + struct xpc_channel *ch; + int n_needed; + unsigned long irq_flags; + int (*n_of_deliverable_payloads) (struct xpc_channel *) = + xpc_arch_ops.n_of_deliverable_payloads; + + dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n", + partid, ch_number); + + ch = &part->channels[ch_number]; + + if (!(ch->flags & XPC_C_DISCONNECTING)) { + + /* let registerer know that connection has been established */ + + spin_lock_irqsave(&ch->lock, irq_flags); + if (!(ch->flags & XPC_C_CONNECTEDCALLOUT)) { + ch->flags |= XPC_C_CONNECTEDCALLOUT; + spin_unlock_irqrestore(&ch->lock, irq_flags); + + xpc_connected_callout(ch); + + spin_lock_irqsave(&ch->lock, irq_flags); + ch->flags |= XPC_C_CONNECTEDCALLOUT_MADE; + spin_unlock_irqrestore(&ch->lock, irq_flags); + + /* + * It is possible that while the callout was being + * made that the remote partition sent some messages. + * If that is the case, we may need to activate + * additional kthreads to help deliver them. We only + * need one less than total #of messages to deliver. + */ + n_needed = n_of_deliverable_payloads(ch) - 1; + if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING)) + xpc_activate_kthreads(ch, n_needed); + + } else { + spin_unlock_irqrestore(&ch->lock, irq_flags); + } + + xpc_kthread_waitmsgs(part, ch); + } + + /* let registerer know that connection is disconnecting */ + + spin_lock_irqsave(&ch->lock, irq_flags); + if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && + !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { + ch->flags |= XPC_C_DISCONNECTINGCALLOUT; + spin_unlock_irqrestore(&ch->lock, irq_flags); + + xpc_disconnect_callout(ch, xpDisconnecting); + + spin_lock_irqsave(&ch->lock, irq_flags); + ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE; + } + spin_unlock_irqrestore(&ch->lock, irq_flags); + + if (atomic_dec_return(&ch->kthreads_assigned) == 0 && + atomic_dec_return(&part->nchannels_engaged) == 0) { + xpc_arch_ops.indicate_partition_disengaged(part); + } + + xpc_msgqueue_deref(ch); + + dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n", + partid, ch_number); + + xpc_part_deref(part); + return 0; +} + +/* + * For each partition that XPC has established communications with, there is + * a minimum of one kernel thread assigned to perform any operation that + * may potentially sleep or block (basically the callouts to the asynchronous + * functions registered via xpc_connect()). + * + * Additional kthreads are created and destroyed by XPC as the workload + * demands. + * + * A kthread is assigned to one of the active channels that exists for a given + * partition. + */ +void +xpc_create_kthreads(struct xpc_channel *ch, int needed, + int ignore_disconnecting) +{ + unsigned long irq_flags; + u64 args = XPC_PACK_ARGS(ch->partid, ch->number); + struct xpc_partition *part = &xpc_partitions[ch->partid]; + struct task_struct *kthread; + void (*indicate_partition_disengaged) (struct xpc_partition *) = + xpc_arch_ops.indicate_partition_disengaged; + + while (needed-- > 0) { + + /* + * The following is done on behalf of the newly created + * kthread. That kthread is responsible for doing the + * counterpart to the following before it exits. + */ + if (ignore_disconnecting) { + if (!atomic_inc_not_zero(&ch->kthreads_assigned)) { + /* kthreads assigned had gone to zero */ + BUG_ON(!(ch->flags & + XPC_C_DISCONNECTINGCALLOUT_MADE)); + break; + } + + } else if (ch->flags & XPC_C_DISCONNECTING) { + break; + + } else if (atomic_inc_return(&ch->kthreads_assigned) == 1 && + atomic_inc_return(&part->nchannels_engaged) == 1) { + xpc_arch_ops.indicate_partition_engaged(part); + } + (void)xpc_part_ref(part); + xpc_msgqueue_ref(ch); + + kthread = kthread_run(xpc_kthread_start, (void *)args, + "xpc%02dc%d", ch->partid, ch->number); + if (IS_ERR(kthread)) { + /* the fork failed */ + + /* + * NOTE: if (ignore_disconnecting && + * !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) is true, + * then we'll deadlock if all other kthreads assigned + * to this channel are blocked in the channel's + * registerer, because the only thing that will unblock + * them is the xpDisconnecting callout that this + * failed kthread_run() would have made. + */ + + if (atomic_dec_return(&ch->kthreads_assigned) == 0 && + atomic_dec_return(&part->nchannels_engaged) == 0) { + indicate_partition_disengaged(part); + } + xpc_msgqueue_deref(ch); + xpc_part_deref(part); + + if (atomic_read(&ch->kthreads_assigned) < + ch->kthreads_idle_limit) { + /* + * Flag this as an error only if we have an + * insufficient #of kthreads for the channel + * to function. + */ + spin_lock_irqsave(&ch->lock, irq_flags); + XPC_DISCONNECT_CHANNEL(ch, xpLackOfResources, + &irq_flags); + spin_unlock_irqrestore(&ch->lock, irq_flags); + } + break; + } + } +} + +void +xpc_disconnect_wait(int ch_number) +{ + unsigned long irq_flags; + short partid; + struct xpc_partition *part; + struct xpc_channel *ch; + int wakeup_channel_mgr; + + /* now wait for all callouts to the caller's function to cease */ + for (partid = 0; partid < xp_max_npartitions; partid++) { + part = &xpc_partitions[partid]; + + if (!xpc_part_ref(part)) + continue; + + ch = &part->channels[ch_number]; + + if (!(ch->flags & XPC_C_WDISCONNECT)) { + xpc_part_deref(part); + continue; + } + + wait_for_completion(&ch->wdisconnect_wait); + + spin_lock_irqsave(&ch->lock, irq_flags); + DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED)); + wakeup_channel_mgr = 0; + + if (ch->delayed_chctl_flags) { + if (part->act_state != XPC_P_AS_DEACTIVATING) { + spin_lock(&part->chctl_lock); + part->chctl.flags[ch->number] |= + ch->delayed_chctl_flags; + spin_unlock(&part->chctl_lock); + wakeup_channel_mgr = 1; + } + ch->delayed_chctl_flags = 0; + } + + ch->flags &= ~XPC_C_WDISCONNECT; + spin_unlock_irqrestore(&ch->lock, irq_flags); + + if (wakeup_channel_mgr) + xpc_wakeup_channel_mgr(part); + + xpc_part_deref(part); + } +} + +static int +xpc_setup_partitions(void) +{ + short partid; + struct xpc_partition *part; + + xpc_partitions = kcalloc(xp_max_npartitions, + sizeof(struct xpc_partition), + GFP_KERNEL); + if (xpc_partitions == NULL) { + dev_err(xpc_part, "can't get memory for partition structure\n"); + return -ENOMEM; + } + + /* + * The first few fields of each entry of xpc_partitions[] need to + * be initialized now so that calls to xpc_connect() and + * xpc_disconnect() can be made prior to the activation of any remote + * partition. NOTE THAT NONE OF THE OTHER FIELDS BELONGING TO THESE + * ENTRIES ARE MEANINGFUL UNTIL AFTER AN ENTRY'S CORRESPONDING + * PARTITION HAS BEEN ACTIVATED. + */ + for (partid = 0; partid < xp_max_npartitions; partid++) { + part = &xpc_partitions[partid]; + + DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part)); + + part->activate_IRQ_rcvd = 0; + spin_lock_init(&part->act_lock); + part->act_state = XPC_P_AS_INACTIVE; + XPC_SET_REASON(part, 0, 0); + + timer_setup(&part->disengage_timer, + xpc_timeout_partition_disengage, 0); + + part->setup_state = XPC_P_SS_UNSET; + init_waitqueue_head(&part->teardown_wq); + atomic_set(&part->references, 0); + } + + return xpc_arch_ops.setup_partitions(); +} + +static void +xpc_teardown_partitions(void) +{ + xpc_arch_ops.teardown_partitions(); + kfree(xpc_partitions); +} + +static void +xpc_do_exit(enum xp_retval reason) +{ + short partid; + int active_part_count, printed_waiting_msg = 0; + struct xpc_partition *part; + unsigned long printmsg_time, disengage_timeout = 0; + + /* a 'rmmod XPC' and a 'reboot' cannot both end up here together */ + DBUG_ON(xpc_exiting == 1); + + /* + * Let the heartbeat checker thread and the discovery thread + * (if one is running) know that they should exit. Also wake up + * the heartbeat checker thread in case it's sleeping. + */ + xpc_exiting = 1; + wake_up_interruptible(&xpc_activate_IRQ_wq); + + /* wait for the discovery thread to exit */ + wait_for_completion(&xpc_discovery_exited); + + /* wait for the heartbeat checker thread to exit */ + wait_for_completion(&xpc_hb_checker_exited); + + /* sleep for a 1/3 of a second or so */ + (void)msleep_interruptible(300); + + /* wait for all partitions to become inactive */ + + printmsg_time = jiffies + (XPC_DEACTIVATE_PRINTMSG_INTERVAL * HZ); + xpc_disengage_timedout = 0; + + do { + active_part_count = 0; + + for (partid = 0; partid < xp_max_npartitions; partid++) { + part = &xpc_partitions[partid]; + + if (xpc_partition_disengaged(part) && + part->act_state == XPC_P_AS_INACTIVE) { + continue; + } + + active_part_count++; + + XPC_DEACTIVATE_PARTITION(part, reason); + + if (part->disengage_timeout > disengage_timeout) + disengage_timeout = part->disengage_timeout; + } + + if (xpc_arch_ops.any_partition_engaged()) { + if (time_is_before_jiffies(printmsg_time)) { + dev_info(xpc_part, "waiting for remote " + "partitions to deactivate, timeout in " + "%ld seconds\n", (disengage_timeout - + jiffies) / HZ); + printmsg_time = jiffies + + (XPC_DEACTIVATE_PRINTMSG_INTERVAL * HZ); + printed_waiting_msg = 1; + } + + } else if (active_part_count > 0) { + if (printed_waiting_msg) { + dev_info(xpc_part, "waiting for local partition" + " to deactivate\n"); + printed_waiting_msg = 0; + } + + } else { + if (!xpc_disengage_timedout) { + dev_info(xpc_part, "all partitions have " + "deactivated\n"); + } + break; + } + + /* sleep for a 1/3 of a second or so */ + (void)msleep_interruptible(300); + + } while (1); + + DBUG_ON(xpc_arch_ops.any_partition_engaged()); + + xpc_teardown_rsvd_page(); + + if (reason == xpUnloading) { + (void)unregister_die_notifier(&xpc_die_notifier); + (void)unregister_reboot_notifier(&xpc_reboot_notifier); + } + + /* clear the interface to XPC's functions */ + xpc_clear_interface(); + + if (xpc_sysctl) + unregister_sysctl_table(xpc_sysctl); + + xpc_teardown_partitions(); + + if (is_uv_system()) + xpc_exit_uv(); +} + +/* + * This function is called when the system is being rebooted. + */ +static int +xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused) +{ + enum xp_retval reason; + + switch (event) { + case SYS_RESTART: + reason = xpSystemReboot; + break; + case SYS_HALT: + reason = xpSystemHalt; + break; + case SYS_POWER_OFF: + reason = xpSystemPoweroff; + break; + default: + reason = xpSystemGoingDown; + } + + xpc_do_exit(reason); + return NOTIFY_DONE; +} + +/* Used to only allow one cpu to complete disconnect */ +static unsigned int xpc_die_disconnecting; + +/* + * Notify other partitions to deactivate from us by first disengaging from all + * references to our memory. + */ +static void +xpc_die_deactivate(void) +{ + struct xpc_partition *part; + short partid; + int any_engaged; + long keep_waiting; + long wait_to_print; + + if (cmpxchg(&xpc_die_disconnecting, 0, 1)) + return; + + /* keep xpc_hb_checker thread from doing anything (just in case) */ + xpc_exiting = 1; + + xpc_arch_ops.disallow_all_hbs(); /*indicate we're deactivated */ + + for (partid = 0; partid < xp_max_npartitions; partid++) { + part = &xpc_partitions[partid]; + + if (xpc_arch_ops.partition_engaged(partid) || + part->act_state != XPC_P_AS_INACTIVE) { + xpc_arch_ops.request_partition_deactivation(part); + xpc_arch_ops.indicate_partition_disengaged(part); + } + } + + /* + * Though we requested that all other partitions deactivate from us, + * we only wait until they've all disengaged or we've reached the + * defined timelimit. + * + * Given that one iteration through the following while-loop takes + * approximately 200 microseconds, calculate the #of loops to take + * before bailing and the #of loops before printing a waiting message. + */ + keep_waiting = xpc_disengage_timelimit * 1000 * 5; + wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL * 1000 * 5; + + while (1) { + any_engaged = xpc_arch_ops.any_partition_engaged(); + if (!any_engaged) { + dev_info(xpc_part, "all partitions have deactivated\n"); + break; + } + + if (!keep_waiting--) { + for (partid = 0; partid < xp_max_npartitions; + partid++) { + if (xpc_arch_ops.partition_engaged(partid)) { + dev_info(xpc_part, "deactivate from " + "remote partition %d timed " + "out\n", partid); + } + } + break; + } + + if (!wait_to_print--) { + dev_info(xpc_part, "waiting for remote partitions to " + "deactivate, timeout in %ld seconds\n", + keep_waiting / (1000 * 5)); + wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL * + 1000 * 5; + } + + udelay(200); + } +} + +/* + * This function is called when the system is being restarted or halted due + * to some sort of system failure. If this is the case we need to notify the + * other partitions to disengage from all references to our memory. + * This function can also be called when our heartbeater could be offlined + * for a time. In this case we need to notify other partitions to not worry + * about the lack of a heartbeat. + */ +static int +xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args) +{ +#ifdef CONFIG_IA64 /* !!! temporary kludge */ + switch (event) { + case DIE_MACHINE_RESTART: + case DIE_MACHINE_HALT: + xpc_die_deactivate(); + break; + + case DIE_KDEBUG_ENTER: + /* Should lack of heartbeat be ignored by other partitions? */ + if (!xpc_kdebug_ignore) + break; + + fallthrough; + case DIE_MCA_MONARCH_ENTER: + case DIE_INIT_MONARCH_ENTER: + xpc_arch_ops.offline_heartbeat(); + break; + + case DIE_KDEBUG_LEAVE: + /* Is lack of heartbeat being ignored by other partitions? */ + if (!xpc_kdebug_ignore) + break; + + fallthrough; + case DIE_MCA_MONARCH_LEAVE: + case DIE_INIT_MONARCH_LEAVE: + xpc_arch_ops.online_heartbeat(); + break; + } +#else + struct die_args *die_args = _die_args; + + switch (event) { + case DIE_TRAP: + if (die_args->trapnr == X86_TRAP_DF) + xpc_die_deactivate(); + + if (((die_args->trapnr == X86_TRAP_MF) || + (die_args->trapnr == X86_TRAP_XF)) && + !user_mode(die_args->regs)) + xpc_die_deactivate(); + + break; + case DIE_INT3: + case DIE_DEBUG: + break; + case DIE_OOPS: + case DIE_GPF: + default: + xpc_die_deactivate(); + } +#endif + + return NOTIFY_DONE; +} + +static int __init +xpc_init(void) +{ + int ret; + struct task_struct *kthread; + + dev_set_name(xpc_part, "part"); + dev_set_name(xpc_chan, "chan"); + + if (is_uv_system()) { + ret = xpc_init_uv(); + + } else { + ret = -ENODEV; + } + + if (ret != 0) + return ret; + + ret = xpc_setup_partitions(); + if (ret != 0) { + dev_err(xpc_part, "can't get memory for partition structure\n"); + goto out_1; + } + + xpc_sysctl = register_sysctl_table(xpc_sys_dir); + + /* + * Fill the partition reserved page with the information needed by + * other partitions to discover we are alive and establish initial + * communications. + */ + ret = xpc_setup_rsvd_page(); + if (ret != 0) { + dev_err(xpc_part, "can't setup our reserved page\n"); + goto out_2; + } + + /* add ourselves to the reboot_notifier_list */ + ret = register_reboot_notifier(&xpc_reboot_notifier); + if (ret != 0) + dev_warn(xpc_part, "can't register reboot notifier\n"); + + /* add ourselves to the die_notifier list */ + ret = register_die_notifier(&xpc_die_notifier); + if (ret != 0) + dev_warn(xpc_part, "can't register die notifier\n"); + + /* + * The real work-horse behind xpc. This processes incoming + * interrupts and monitors remote heartbeats. + */ + kthread = kthread_run(xpc_hb_checker, NULL, XPC_HB_CHECK_THREAD_NAME); + if (IS_ERR(kthread)) { + dev_err(xpc_part, "failed while forking hb check thread\n"); + ret = -EBUSY; + goto out_3; + } + + /* + * Startup a thread that will attempt to discover other partitions to + * activate based on info provided by SAL. This new thread is short + * lived and will exit once discovery is complete. + */ + kthread = kthread_run(xpc_initiate_discovery, NULL, + XPC_DISCOVERY_THREAD_NAME); + if (IS_ERR(kthread)) { + dev_err(xpc_part, "failed while forking discovery thread\n"); + + /* mark this new thread as a non-starter */ + complete(&xpc_discovery_exited); + + xpc_do_exit(xpUnloading); + return -EBUSY; + } + + /* set the interface to point at XPC's functions */ + xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect, + xpc_initiate_send, xpc_initiate_send_notify, + xpc_initiate_received, xpc_initiate_partid_to_nasids); + + return 0; + + /* initialization was not successful */ +out_3: + xpc_teardown_rsvd_page(); + + (void)unregister_die_notifier(&xpc_die_notifier); + (void)unregister_reboot_notifier(&xpc_reboot_notifier); +out_2: + if (xpc_sysctl) + unregister_sysctl_table(xpc_sysctl); + + xpc_teardown_partitions(); +out_1: + if (is_uv_system()) + xpc_exit_uv(); + return ret; +} + +module_init(xpc_init); + +static void __exit +xpc_exit(void) +{ + xpc_do_exit(xpUnloading); +} + +module_exit(xpc_exit); + +MODULE_AUTHOR("Silicon Graphics, Inc."); +MODULE_DESCRIPTION("Cross Partition Communication (XPC) support"); +MODULE_LICENSE("GPL"); + +module_param(xpc_hb_interval, int, 0); +MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between " + "heartbeat increments."); + +module_param(xpc_hb_check_interval, int, 0); +MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between " + "heartbeat checks."); + +module_param(xpc_disengage_timelimit, int, 0); +MODULE_PARM_DESC(xpc_disengage_timelimit, "Number of seconds to wait " + "for disengage to complete."); + +module_param(xpc_kdebug_ignore, int, 0); +MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by " + "other partitions when dropping into kdebug."); diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c new file mode 100644 index 000000000..57df06820 --- /dev/null +++ b/drivers/misc/sgi-xp/xpc_partition.c @@ -0,0 +1,535 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * (C) Copyright 2020 Hewlett Packard Enterprise Development LP + * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved. + */ + +/* + * Cross Partition Communication (XPC) partition support. + * + * This is the part of XPC that detects the presence/absence of + * other partitions. It provides a heartbeat and monitors the + * heartbeats of other partitions. + * + */ + +#include <linux/device.h> +#include <linux/hardirq.h> +#include <linux/slab.h> +#include "xpc.h" +#include <asm/uv/uv_hub.h> + +/* XPC is exiting flag */ +int xpc_exiting; + +/* this partition's reserved page pointers */ +struct xpc_rsvd_page *xpc_rsvd_page; +static unsigned long *xpc_part_nasids; +unsigned long *xpc_mach_nasids; + +static int xpc_nasid_mask_nbytes; /* #of bytes in nasid mask */ +int xpc_nasid_mask_nlongs; /* #of longs in nasid mask */ + +struct xpc_partition *xpc_partitions; + +/* + * Guarantee that the kmalloc'd memory is cacheline aligned. + */ +void * +xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base) +{ + /* see if kmalloc will give us cachline aligned memory by default */ + *base = kmalloc(size, flags); + if (*base == NULL) + return NULL; + + if ((u64)*base == L1_CACHE_ALIGN((u64)*base)) + return *base; + + kfree(*base); + + /* nope, we'll have to do it ourselves */ + *base = kmalloc(size + L1_CACHE_BYTES, flags); + if (*base == NULL) + return NULL; + + return (void *)L1_CACHE_ALIGN((u64)*base); +} + +/* + * Given a nasid, get the physical address of the partition's reserved page + * for that nasid. This function returns 0 on any error. + */ +static unsigned long +xpc_get_rsvd_page_pa(int nasid) +{ + enum xp_retval ret; + u64 cookie = 0; + unsigned long rp_pa = nasid; /* seed with nasid */ + size_t len = 0; + size_t buf_len = 0; + void *buf = NULL; + void *buf_base = NULL; + enum xp_retval (*get_partition_rsvd_page_pa) + (void *, u64 *, unsigned long *, size_t *) = + xpc_arch_ops.get_partition_rsvd_page_pa; + + while (1) { + + /* !!! rp_pa will need to be _gpa on UV. + * ??? So do we save it into the architecture specific parts + * ??? of the xpc_partition structure? Do we rename this + * ??? function or have two versions? Rename rp_pa for UV to + * ??? rp_gpa? + */ + ret = get_partition_rsvd_page_pa(buf, &cookie, &rp_pa, &len); + + dev_dbg(xpc_part, "SAL returned with ret=%d, cookie=0x%016lx, " + "address=0x%016lx, len=0x%016lx\n", ret, + (unsigned long)cookie, rp_pa, len); + + if (ret != xpNeedMoreInfo) + break; + + if (len > buf_len) { + kfree(buf_base); + buf_len = L1_CACHE_ALIGN(len); + buf = xpc_kmalloc_cacheline_aligned(buf_len, GFP_KERNEL, + &buf_base); + if (buf_base == NULL) { + dev_err(xpc_part, "unable to kmalloc " + "len=0x%016lx\n", buf_len); + ret = xpNoMemory; + break; + } + } + + ret = xp_remote_memcpy(xp_pa(buf), rp_pa, len); + if (ret != xpSuccess) { + dev_dbg(xpc_part, "xp_remote_memcpy failed %d\n", ret); + break; + } + } + + kfree(buf_base); + + if (ret != xpSuccess) + rp_pa = 0; + + dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", rp_pa); + return rp_pa; +} + +/* + * Fill the partition reserved page with the information needed by + * other partitions to discover we are alive and establish initial + * communications. + */ +int +xpc_setup_rsvd_page(void) +{ + int ret; + struct xpc_rsvd_page *rp; + unsigned long rp_pa; + unsigned long new_ts_jiffies; + + /* get the local reserved page's address */ + + preempt_disable(); + rp_pa = xpc_get_rsvd_page_pa(xp_cpu_to_nasid(smp_processor_id())); + preempt_enable(); + if (rp_pa == 0) { + dev_err(xpc_part, "SAL failed to locate the reserved page\n"); + return -ESRCH; + } + rp = (struct xpc_rsvd_page *)__va(xp_socket_pa(rp_pa)); + + if (rp->SAL_version < 3) { + /* SAL_versions < 3 had a SAL_partid defined as a u8 */ + rp->SAL_partid &= 0xff; + } + BUG_ON(rp->SAL_partid != xp_partition_id); + + if (rp->SAL_partid < 0 || rp->SAL_partid >= xp_max_npartitions) { + dev_err(xpc_part, "the reserved page's partid of %d is outside " + "supported range (< 0 || >= %d)\n", rp->SAL_partid, + xp_max_npartitions); + return -EINVAL; + } + + rp->version = XPC_RP_VERSION; + rp->max_npartitions = xp_max_npartitions; + + /* establish the actual sizes of the nasid masks */ + if (rp->SAL_version == 1) { + /* SAL_version 1 didn't set the nasids_size field */ + rp->SAL_nasids_size = 128; + } + xpc_nasid_mask_nbytes = rp->SAL_nasids_size; + xpc_nasid_mask_nlongs = BITS_TO_LONGS(rp->SAL_nasids_size * + BITS_PER_BYTE); + + /* setup the pointers to the various items in the reserved page */ + xpc_part_nasids = XPC_RP_PART_NASIDS(rp); + xpc_mach_nasids = XPC_RP_MACH_NASIDS(rp); + + ret = xpc_arch_ops.setup_rsvd_page(rp); + if (ret != 0) + return ret; + + /* + * Set timestamp of when reserved page was setup by XPC. + * This signifies to the remote partition that our reserved + * page is initialized. + */ + new_ts_jiffies = jiffies; + if (new_ts_jiffies == 0 || new_ts_jiffies == rp->ts_jiffies) + new_ts_jiffies++; + rp->ts_jiffies = new_ts_jiffies; + + xpc_rsvd_page = rp; + return 0; +} + +void +xpc_teardown_rsvd_page(void) +{ + /* a zero timestamp indicates our rsvd page is not initialized */ + xpc_rsvd_page->ts_jiffies = 0; +} + +/* + * Get a copy of a portion of the remote partition's rsvd page. + * + * remote_rp points to a buffer that is cacheline aligned for BTE copies and + * is large enough to contain a copy of their reserved page header and + * part_nasids mask. + */ +enum xp_retval +xpc_get_remote_rp(int nasid, unsigned long *discovered_nasids, + struct xpc_rsvd_page *remote_rp, unsigned long *remote_rp_pa) +{ + int l; + enum xp_retval ret; + + /* get the reserved page's physical address */ + + *remote_rp_pa = xpc_get_rsvd_page_pa(nasid); + if (*remote_rp_pa == 0) + return xpNoRsvdPageAddr; + + /* pull over the reserved page header and part_nasids mask */ + ret = xp_remote_memcpy(xp_pa(remote_rp), *remote_rp_pa, + XPC_RP_HEADER_SIZE + xpc_nasid_mask_nbytes); + if (ret != xpSuccess) + return ret; + + if (discovered_nasids != NULL) { + unsigned long *remote_part_nasids = + XPC_RP_PART_NASIDS(remote_rp); + + for (l = 0; l < xpc_nasid_mask_nlongs; l++) + discovered_nasids[l] |= remote_part_nasids[l]; + } + + /* zero timestamp indicates the reserved page has not been setup */ + if (remote_rp->ts_jiffies == 0) + return xpRsvdPageNotSet; + + if (XPC_VERSION_MAJOR(remote_rp->version) != + XPC_VERSION_MAJOR(XPC_RP_VERSION)) { + return xpBadVersion; + } + + /* check that both remote and local partids are valid for each side */ + if (remote_rp->SAL_partid < 0 || + remote_rp->SAL_partid >= xp_max_npartitions || + remote_rp->max_npartitions <= xp_partition_id) { + return xpInvalidPartid; + } + + if (remote_rp->SAL_partid == xp_partition_id) + return xpLocalPartid; + + return xpSuccess; +} + +/* + * See if the other side has responded to a partition deactivate request + * from us. Though we requested the remote partition to deactivate with regard + * to us, we really only need to wait for the other side to disengage from us. + */ +int +xpc_partition_disengaged(struct xpc_partition *part) +{ + short partid = XPC_PARTID(part); + int disengaged; + + disengaged = !xpc_arch_ops.partition_engaged(partid); + if (part->disengage_timeout) { + if (!disengaged) { + if (time_is_after_jiffies(part->disengage_timeout)) { + /* timelimit hasn't been reached yet */ + return 0; + } + + /* + * Other side hasn't responded to our deactivate + * request in a timely fashion, so assume it's dead. + */ + + dev_info(xpc_part, "deactivate request to remote " + "partition %d timed out\n", partid); + xpc_disengage_timedout = 1; + xpc_arch_ops.assume_partition_disengaged(partid); + disengaged = 1; + } + part->disengage_timeout = 0; + + /* cancel the timer function, provided it's not us */ + if (!in_interrupt()) + del_singleshot_timer_sync(&part->disengage_timer); + + DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING && + part->act_state != XPC_P_AS_INACTIVE); + if (part->act_state != XPC_P_AS_INACTIVE) + xpc_wakeup_channel_mgr(part); + + xpc_arch_ops.cancel_partition_deactivation_request(part); + } + return disengaged; +} + +/* + * Mark specified partition as active. + */ +enum xp_retval +xpc_mark_partition_active(struct xpc_partition *part) +{ + unsigned long irq_flags; + enum xp_retval ret; + + dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part)); + + spin_lock_irqsave(&part->act_lock, irq_flags); + if (part->act_state == XPC_P_AS_ACTIVATING) { + part->act_state = XPC_P_AS_ACTIVE; + ret = xpSuccess; + } else { + DBUG_ON(part->reason == xpSuccess); + ret = part->reason; + } + spin_unlock_irqrestore(&part->act_lock, irq_flags); + + return ret; +} + +/* + * Start the process of deactivating the specified partition. + */ +void +xpc_deactivate_partition(const int line, struct xpc_partition *part, + enum xp_retval reason) +{ + unsigned long irq_flags; + + spin_lock_irqsave(&part->act_lock, irq_flags); + + if (part->act_state == XPC_P_AS_INACTIVE) { + XPC_SET_REASON(part, reason, line); + spin_unlock_irqrestore(&part->act_lock, irq_flags); + if (reason == xpReactivating) { + /* we interrupt ourselves to reactivate partition */ + xpc_arch_ops.request_partition_reactivation(part); + } + return; + } + if (part->act_state == XPC_P_AS_DEACTIVATING) { + if ((part->reason == xpUnloading && reason != xpUnloading) || + reason == xpReactivating) { + XPC_SET_REASON(part, reason, line); + } + spin_unlock_irqrestore(&part->act_lock, irq_flags); + return; + } + + part->act_state = XPC_P_AS_DEACTIVATING; + XPC_SET_REASON(part, reason, line); + + spin_unlock_irqrestore(&part->act_lock, irq_flags); + + /* ask remote partition to deactivate with regard to us */ + xpc_arch_ops.request_partition_deactivation(part); + + /* set a timelimit on the disengage phase of the deactivation request */ + part->disengage_timeout = jiffies + (xpc_disengage_timelimit * HZ); + part->disengage_timer.expires = part->disengage_timeout; + add_timer(&part->disengage_timer); + + dev_dbg(xpc_part, "bringing partition %d down, reason = %d\n", + XPC_PARTID(part), reason); + + xpc_partition_going_down(part, reason); +} + +/* + * Mark specified partition as inactive. + */ +void +xpc_mark_partition_inactive(struct xpc_partition *part) +{ + unsigned long irq_flags; + + dev_dbg(xpc_part, "setting partition %d to INACTIVE\n", + XPC_PARTID(part)); + + spin_lock_irqsave(&part->act_lock, irq_flags); + part->act_state = XPC_P_AS_INACTIVE; + spin_unlock_irqrestore(&part->act_lock, irq_flags); + part->remote_rp_pa = 0; +} + +/* + * SAL has provided a partition and machine mask. The partition mask + * contains a bit for each even nasid in our partition. The machine + * mask contains a bit for each even nasid in the entire machine. + * + * Using those two bit arrays, we can determine which nasids are + * known in the machine. Each should also have a reserved page + * initialized if they are available for partitioning. + */ +void +xpc_discovery(void) +{ + void *remote_rp_base; + struct xpc_rsvd_page *remote_rp; + unsigned long remote_rp_pa; + int region; + int region_size; + int max_regions; + int nasid; + unsigned long *discovered_nasids; + enum xp_retval ret; + + remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RP_HEADER_SIZE + + xpc_nasid_mask_nbytes, + GFP_KERNEL, &remote_rp_base); + if (remote_rp == NULL) + return; + + discovered_nasids = kcalloc(xpc_nasid_mask_nlongs, sizeof(long), + GFP_KERNEL); + if (discovered_nasids == NULL) { + kfree(remote_rp_base); + return; + } + + /* + * The term 'region' in this context refers to the minimum number of + * nodes that can comprise an access protection grouping. The access + * protection is in regards to memory, IOI and IPI. + */ + region_size = xp_region_size; + + if (is_uv_system()) + max_regions = 256; + else { + max_regions = 64; + + switch (region_size) { + case 128: + max_regions *= 2; + fallthrough; + case 64: + max_regions *= 2; + fallthrough; + case 32: + max_regions *= 2; + region_size = 16; + } + } + + for (region = 0; region < max_regions; region++) { + + if (xpc_exiting) + break; + + dev_dbg(xpc_part, "searching region %d\n", region); + + for (nasid = (region * region_size * 2); + nasid < ((region + 1) * region_size * 2); nasid += 2) { + + if (xpc_exiting) + break; + + dev_dbg(xpc_part, "checking nasid %d\n", nasid); + + if (test_bit(nasid / 2, xpc_part_nasids)) { + dev_dbg(xpc_part, "PROM indicates Nasid %d is " + "part of the local partition; skipping " + "region\n", nasid); + break; + } + + if (!(test_bit(nasid / 2, xpc_mach_nasids))) { + dev_dbg(xpc_part, "PROM indicates Nasid %d was " + "not on Numa-Link network at reset\n", + nasid); + continue; + } + + if (test_bit(nasid / 2, discovered_nasids)) { + dev_dbg(xpc_part, "Nasid %d is part of a " + "partition which was previously " + "discovered\n", nasid); + continue; + } + + /* pull over the rsvd page header & part_nasids mask */ + + ret = xpc_get_remote_rp(nasid, discovered_nasids, + remote_rp, &remote_rp_pa); + if (ret != xpSuccess) { + dev_dbg(xpc_part, "unable to get reserved page " + "from nasid %d, reason=%d\n", nasid, + ret); + + if (ret == xpLocalPartid) + break; + + continue; + } + + xpc_arch_ops.request_partition_activation(remote_rp, + remote_rp_pa, nasid); + } + } + + kfree(discovered_nasids); + kfree(remote_rp_base); +} + +/* + * Given a partid, get the nasids owned by that partition from the + * remote partition's reserved page. + */ +enum xp_retval +xpc_initiate_partid_to_nasids(short partid, void *nasid_mask) +{ + struct xpc_partition *part; + unsigned long part_nasid_pa; + + part = &xpc_partitions[partid]; + if (part->remote_rp_pa == 0) + return xpPartitionDown; + + memset(nasid_mask, 0, xpc_nasid_mask_nbytes); + + part_nasid_pa = (unsigned long)XPC_RP_PART_NASIDS(part->remote_rp_pa); + + return xp_remote_memcpy(xp_pa(nasid_mask), part_nasid_pa, + xpc_nasid_mask_nbytes); +} diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c new file mode 100644 index 000000000..7791bde81 --- /dev/null +++ b/drivers/misc/sgi-xp/xpc_uv.c @@ -0,0 +1,1817 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved. + */ + +/* + * Cross Partition Communication (XPC) uv-based functions. + * + * Architecture specific implementation of common functions. + * + */ + +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/cpu.h> +#include <linux/module.h> +#include <linux/err.h> +#include <linux/slab.h> +#include <linux/numa.h> +#include <asm/uv/uv_hub.h> +#if defined CONFIG_X86_64 +#include <asm/uv/bios.h> +#include <asm/uv/uv_irq.h> +#elif defined CONFIG_IA64_SGI_UV +#include <asm/sn/intr.h> +#include <asm/sn/sn_sal.h> +#endif +#include "../sgi-gru/gru.h" +#include "../sgi-gru/grukservices.h" +#include "xpc.h" + +#if defined CONFIG_IA64_SGI_UV +struct uv_IO_APIC_route_entry { + __u64 vector : 8, + delivery_mode : 3, + dest_mode : 1, + delivery_status : 1, + polarity : 1, + __reserved_1 : 1, + trigger : 1, + mask : 1, + __reserved_2 : 15, + dest : 32; +}; + +#define sn_partition_id 0 +#endif + +static struct xpc_heartbeat_uv *xpc_heartbeat_uv; + +#define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES) +#define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ + XPC_ACTIVATE_MSG_SIZE_UV) +#define XPC_ACTIVATE_IRQ_NAME "xpc_activate" + +#define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES) +#define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ + XPC_NOTIFY_MSG_SIZE_UV) +#define XPC_NOTIFY_IRQ_NAME "xpc_notify" + +static int xpc_mq_node = NUMA_NO_NODE; + +static struct xpc_gru_mq_uv *xpc_activate_mq_uv; +static struct xpc_gru_mq_uv *xpc_notify_mq_uv; + +static int +xpc_setup_partitions_uv(void) +{ + short partid; + struct xpc_partition_uv *part_uv; + + for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) { + part_uv = &xpc_partitions[partid].sn.uv; + + mutex_init(&part_uv->cached_activate_gru_mq_desc_mutex); + spin_lock_init(&part_uv->flags_lock); + part_uv->remote_act_state = XPC_P_AS_INACTIVE; + } + return 0; +} + +static void +xpc_teardown_partitions_uv(void) +{ + short partid; + struct xpc_partition_uv *part_uv; + unsigned long irq_flags; + + for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) { + part_uv = &xpc_partitions[partid].sn.uv; + + if (part_uv->cached_activate_gru_mq_desc != NULL) { + mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex); + spin_lock_irqsave(&part_uv->flags_lock, irq_flags); + part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV; + spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); + kfree(part_uv->cached_activate_gru_mq_desc); + part_uv->cached_activate_gru_mq_desc = NULL; + mutex_unlock(&part_uv-> + cached_activate_gru_mq_desc_mutex); + } + } +} + +static int +xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name) +{ + int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); + +#if defined CONFIG_X86_64 + mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset, + UV_AFFINITY_CPU); + if (mq->irq < 0) + return mq->irq; + + mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset); + +#elif defined CONFIG_IA64_SGI_UV + if (strcmp(irq_name, XPC_ACTIVATE_IRQ_NAME) == 0) + mq->irq = SGI_XPC_ACTIVATE; + else if (strcmp(irq_name, XPC_NOTIFY_IRQ_NAME) == 0) + mq->irq = SGI_XPC_NOTIFY; + else + return -EINVAL; + + mq->mmr_value = (unsigned long)cpu_physical_id(cpu) << 32 | mq->irq; + uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mq->mmr_value); +#else + #error not a supported configuration +#endif + + return 0; +} + +static void +xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq) +{ +#if defined CONFIG_X86_64 + uv_teardown_irq(mq->irq); + +#elif defined CONFIG_IA64_SGI_UV + int mmr_pnode; + unsigned long mmr_value; + + mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); + mmr_value = 1UL << 16; + + uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value); +#else + #error not a supported configuration +#endif +} + +static int +xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq) +{ + int ret; + +#if defined CONFIG_IA64_SGI_UV + int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); + + ret = sn_mq_watchlist_alloc(mmr_pnode, (void *)uv_gpa(mq->address), + mq->order, &mq->mmr_offset); + if (ret < 0) { + dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n", + ret); + return -EBUSY; + } +#elif defined CONFIG_X86_64 + ret = uv_bios_mq_watchlist_alloc(uv_gpa(mq->address), + mq->order, &mq->mmr_offset); + if (ret < 0) { + dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, " + "ret=%d\n", ret); + return ret; + } +#else + #error not a supported configuration +#endif + + mq->watchlist_num = ret; + return 0; +} + +static void +xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq) +{ + int ret; + int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); + +#if defined CONFIG_X86_64 + ret = uv_bios_mq_watchlist_free(mmr_pnode, mq->watchlist_num); + BUG_ON(ret != BIOS_STATUS_SUCCESS); +#elif defined CONFIG_IA64_SGI_UV + ret = sn_mq_watchlist_free(mmr_pnode, mq->watchlist_num); + BUG_ON(ret != SALRET_OK); +#else + #error not a supported configuration +#endif +} + +static struct xpc_gru_mq_uv * +xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name, + irq_handler_t irq_handler) +{ + enum xp_retval xp_ret; + int ret; + int nid; + int nasid; + int pg_order; + struct page *page; + struct xpc_gru_mq_uv *mq; + struct uv_IO_APIC_route_entry *mmr_value; + + mq = kmalloc(sizeof(struct xpc_gru_mq_uv), GFP_KERNEL); + if (mq == NULL) { + dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() " + "a xpc_gru_mq_uv structure\n"); + ret = -ENOMEM; + goto out_0; + } + + mq->gru_mq_desc = kzalloc(sizeof(struct gru_message_queue_desc), + GFP_KERNEL); + if (mq->gru_mq_desc == NULL) { + dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() " + "a gru_message_queue_desc structure\n"); + ret = -ENOMEM; + goto out_1; + } + + pg_order = get_order(mq_size); + mq->order = pg_order + PAGE_SHIFT; + mq_size = 1UL << mq->order; + + mq->mmr_blade = uv_cpu_to_blade_id(cpu); + + nid = cpu_to_node(cpu); + page = __alloc_pages_node(nid, + GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, + pg_order); + if (page == NULL) { + dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " + "bytes of memory on nid=%d for GRU mq\n", mq_size, nid); + ret = -ENOMEM; + goto out_2; + } + mq->address = page_address(page); + + /* enable generation of irq when GRU mq operation occurs to this mq */ + ret = xpc_gru_mq_watchlist_alloc_uv(mq); + if (ret != 0) + goto out_3; + + ret = xpc_get_gru_mq_irq_uv(mq, cpu, irq_name); + if (ret != 0) + goto out_4; + + ret = request_irq(mq->irq, irq_handler, 0, irq_name, NULL); + if (ret != 0) { + dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n", + mq->irq, -ret); + goto out_5; + } + + nasid = UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpu)); + + mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value; + ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size, + nasid, mmr_value->vector, mmr_value->dest); + if (ret != 0) { + dev_err(xpc_part, "gru_create_message_queue() returned " + "error=%d\n", ret); + ret = -EINVAL; + goto out_6; + } + + /* allow other partitions to access this GRU mq */ + xp_ret = xp_expand_memprotect(xp_pa(mq->address), mq_size); + if (xp_ret != xpSuccess) { + ret = -EACCES; + goto out_6; + } + + return mq; + + /* something went wrong */ +out_6: + free_irq(mq->irq, NULL); +out_5: + xpc_release_gru_mq_irq_uv(mq); +out_4: + xpc_gru_mq_watchlist_free_uv(mq); +out_3: + free_pages((unsigned long)mq->address, pg_order); +out_2: + kfree(mq->gru_mq_desc); +out_1: + kfree(mq); +out_0: + return ERR_PTR(ret); +} + +static void +xpc_destroy_gru_mq_uv(struct xpc_gru_mq_uv *mq) +{ + unsigned int mq_size; + int pg_order; + int ret; + + /* disallow other partitions to access GRU mq */ + mq_size = 1UL << mq->order; + ret = xp_restrict_memprotect(xp_pa(mq->address), mq_size); + BUG_ON(ret != xpSuccess); + + /* unregister irq handler and release mq irq/vector mapping */ + free_irq(mq->irq, NULL); + xpc_release_gru_mq_irq_uv(mq); + + /* disable generation of irq when GRU mq op occurs to this mq */ + xpc_gru_mq_watchlist_free_uv(mq); + + pg_order = mq->order - PAGE_SHIFT; + free_pages((unsigned long)mq->address, pg_order); + + kfree(mq); +} + +static enum xp_retval +xpc_send_gru_msg(struct gru_message_queue_desc *gru_mq_desc, void *msg, + size_t msg_size) +{ + enum xp_retval xp_ret; + int ret; + + while (1) { + ret = gru_send_message_gpa(gru_mq_desc, msg, msg_size); + if (ret == MQE_OK) { + xp_ret = xpSuccess; + break; + } + + if (ret == MQE_QUEUE_FULL) { + dev_dbg(xpc_chan, "gru_send_message_gpa() returned " + "error=MQE_QUEUE_FULL\n"); + /* !!! handle QLimit reached; delay & try again */ + /* ??? Do we add a limit to the number of retries? */ + (void)msleep_interruptible(10); + } else if (ret == MQE_CONGESTION) { + dev_dbg(xpc_chan, "gru_send_message_gpa() returned " + "error=MQE_CONGESTION\n"); + /* !!! handle LB Overflow; simply try again */ + /* ??? Do we add a limit to the number of retries? */ + } else { + /* !!! Currently this is MQE_UNEXPECTED_CB_ERR */ + dev_err(xpc_chan, "gru_send_message_gpa() returned " + "error=%d\n", ret); + xp_ret = xpGruSendMqError; + break; + } + } + return xp_ret; +} + +static void +xpc_process_activate_IRQ_rcvd_uv(void) +{ + unsigned long irq_flags; + short partid; + struct xpc_partition *part; + u8 act_state_req; + + DBUG_ON(xpc_activate_IRQ_rcvd == 0); + + spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); + for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) { + part = &xpc_partitions[partid]; + + if (part->sn.uv.act_state_req == 0) + continue; + + xpc_activate_IRQ_rcvd--; + BUG_ON(xpc_activate_IRQ_rcvd < 0); + + act_state_req = part->sn.uv.act_state_req; + part->sn.uv.act_state_req = 0; + spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); + + if (act_state_req == XPC_P_ASR_ACTIVATE_UV) { + if (part->act_state == XPC_P_AS_INACTIVE) + xpc_activate_partition(part); + else if (part->act_state == XPC_P_AS_DEACTIVATING) + XPC_DEACTIVATE_PARTITION(part, xpReactivating); + + } else if (act_state_req == XPC_P_ASR_REACTIVATE_UV) { + if (part->act_state == XPC_P_AS_INACTIVE) + xpc_activate_partition(part); + else + XPC_DEACTIVATE_PARTITION(part, xpReactivating); + + } else if (act_state_req == XPC_P_ASR_DEACTIVATE_UV) { + XPC_DEACTIVATE_PARTITION(part, part->sn.uv.reason); + + } else { + BUG(); + } + + spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); + if (xpc_activate_IRQ_rcvd == 0) + break; + } + spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); + +} + +static void +xpc_handle_activate_mq_msg_uv(struct xpc_partition *part, + struct xpc_activate_mq_msghdr_uv *msg_hdr, + int part_setup, + int *wakeup_hb_checker) +{ + unsigned long irq_flags; + struct xpc_partition_uv *part_uv = &part->sn.uv; + struct xpc_openclose_args *args; + + part_uv->remote_act_state = msg_hdr->act_state; + + switch (msg_hdr->type) { + case XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV: + /* syncing of remote_act_state was just done above */ + break; + + case XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV: { + struct xpc_activate_mq_msg_activate_req_uv *msg; + + /* + * ??? Do we deal here with ts_jiffies being different + * ??? if act_state != XPC_P_AS_INACTIVE instead of + * ??? below? + */ + msg = container_of(msg_hdr, struct + xpc_activate_mq_msg_activate_req_uv, hdr); + + spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); + if (part_uv->act_state_req == 0) + xpc_activate_IRQ_rcvd++; + part_uv->act_state_req = XPC_P_ASR_ACTIVATE_UV; + part->remote_rp_pa = msg->rp_gpa; /* !!! _pa is _gpa */ + part->remote_rp_ts_jiffies = msg_hdr->rp_ts_jiffies; + part_uv->heartbeat_gpa = msg->heartbeat_gpa; + + if (msg->activate_gru_mq_desc_gpa != + part_uv->activate_gru_mq_desc_gpa) { + spin_lock(&part_uv->flags_lock); + part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV; + spin_unlock(&part_uv->flags_lock); + part_uv->activate_gru_mq_desc_gpa = + msg->activate_gru_mq_desc_gpa; + } + spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); + + (*wakeup_hb_checker)++; + break; + } + case XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV: { + struct xpc_activate_mq_msg_deactivate_req_uv *msg; + + msg = container_of(msg_hdr, struct + xpc_activate_mq_msg_deactivate_req_uv, hdr); + + spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); + if (part_uv->act_state_req == 0) + xpc_activate_IRQ_rcvd++; + part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV; + part_uv->reason = msg->reason; + spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); + + (*wakeup_hb_checker)++; + return; + } + case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV: { + struct xpc_activate_mq_msg_chctl_closerequest_uv *msg; + + if (!part_setup) + break; + + msg = container_of(msg_hdr, struct + xpc_activate_mq_msg_chctl_closerequest_uv, + hdr); + args = &part->remote_openclose_args[msg->ch_number]; + args->reason = msg->reason; + + spin_lock_irqsave(&part->chctl_lock, irq_flags); + part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREQUEST; + spin_unlock_irqrestore(&part->chctl_lock, irq_flags); + + xpc_wakeup_channel_mgr(part); + break; + } + case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV: { + struct xpc_activate_mq_msg_chctl_closereply_uv *msg; + + if (!part_setup) + break; + + msg = container_of(msg_hdr, struct + xpc_activate_mq_msg_chctl_closereply_uv, + hdr); + + spin_lock_irqsave(&part->chctl_lock, irq_flags); + part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREPLY; + spin_unlock_irqrestore(&part->chctl_lock, irq_flags); + + xpc_wakeup_channel_mgr(part); + break; + } + case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV: { + struct xpc_activate_mq_msg_chctl_openrequest_uv *msg; + + if (!part_setup) + break; + + msg = container_of(msg_hdr, struct + xpc_activate_mq_msg_chctl_openrequest_uv, + hdr); + args = &part->remote_openclose_args[msg->ch_number]; + args->entry_size = msg->entry_size; + args->local_nentries = msg->local_nentries; + + spin_lock_irqsave(&part->chctl_lock, irq_flags); + part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREQUEST; + spin_unlock_irqrestore(&part->chctl_lock, irq_flags); + + xpc_wakeup_channel_mgr(part); + break; + } + case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV: { + struct xpc_activate_mq_msg_chctl_openreply_uv *msg; + + if (!part_setup) + break; + + msg = container_of(msg_hdr, struct + xpc_activate_mq_msg_chctl_openreply_uv, hdr); + args = &part->remote_openclose_args[msg->ch_number]; + args->remote_nentries = msg->remote_nentries; + args->local_nentries = msg->local_nentries; + args->local_msgqueue_pa = msg->notify_gru_mq_desc_gpa; + + spin_lock_irqsave(&part->chctl_lock, irq_flags); + part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREPLY; + spin_unlock_irqrestore(&part->chctl_lock, irq_flags); + + xpc_wakeup_channel_mgr(part); + break; + } + case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV: { + struct xpc_activate_mq_msg_chctl_opencomplete_uv *msg; + + if (!part_setup) + break; + + msg = container_of(msg_hdr, struct + xpc_activate_mq_msg_chctl_opencomplete_uv, hdr); + spin_lock_irqsave(&part->chctl_lock, irq_flags); + part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENCOMPLETE; + spin_unlock_irqrestore(&part->chctl_lock, irq_flags); + + xpc_wakeup_channel_mgr(part); + } + fallthrough; + case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV: + spin_lock_irqsave(&part_uv->flags_lock, irq_flags); + part_uv->flags |= XPC_P_ENGAGED_UV; + spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); + break; + + case XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV: + spin_lock_irqsave(&part_uv->flags_lock, irq_flags); + part_uv->flags &= ~XPC_P_ENGAGED_UV; + spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); + break; + + default: + dev_err(xpc_part, "received unknown activate_mq msg type=%d " + "from partition=%d\n", msg_hdr->type, XPC_PARTID(part)); + + /* get hb checker to deactivate from the remote partition */ + spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); + if (part_uv->act_state_req == 0) + xpc_activate_IRQ_rcvd++; + part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV; + part_uv->reason = xpBadMsgType; + spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); + + (*wakeup_hb_checker)++; + return; + } + + if (msg_hdr->rp_ts_jiffies != part->remote_rp_ts_jiffies && + part->remote_rp_ts_jiffies != 0) { + /* + * ??? Does what we do here need to be sensitive to + * ??? act_state or remote_act_state? + */ + spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); + if (part_uv->act_state_req == 0) + xpc_activate_IRQ_rcvd++; + part_uv->act_state_req = XPC_P_ASR_REACTIVATE_UV; + spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); + + (*wakeup_hb_checker)++; + } +} + +static irqreturn_t +xpc_handle_activate_IRQ_uv(int irq, void *dev_id) +{ + struct xpc_activate_mq_msghdr_uv *msg_hdr; + short partid; + struct xpc_partition *part; + int wakeup_hb_checker = 0; + int part_referenced; + + while (1) { + msg_hdr = gru_get_next_message(xpc_activate_mq_uv->gru_mq_desc); + if (msg_hdr == NULL) + break; + + partid = msg_hdr->partid; + if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) { + dev_err(xpc_part, "xpc_handle_activate_IRQ_uv() " + "received invalid partid=0x%x in message\n", + partid); + } else { + part = &xpc_partitions[partid]; + + part_referenced = xpc_part_ref(part); + xpc_handle_activate_mq_msg_uv(part, msg_hdr, + part_referenced, + &wakeup_hb_checker); + if (part_referenced) + xpc_part_deref(part); + } + + gru_free_message(xpc_activate_mq_uv->gru_mq_desc, msg_hdr); + } + + if (wakeup_hb_checker) + wake_up_interruptible(&xpc_activate_IRQ_wq); + + return IRQ_HANDLED; +} + +static enum xp_retval +xpc_cache_remote_gru_mq_desc_uv(struct gru_message_queue_desc *gru_mq_desc, + unsigned long gru_mq_desc_gpa) +{ + enum xp_retval ret; + + ret = xp_remote_memcpy(uv_gpa(gru_mq_desc), gru_mq_desc_gpa, + sizeof(struct gru_message_queue_desc)); + if (ret == xpSuccess) + gru_mq_desc->mq = NULL; + + return ret; +} + +static enum xp_retval +xpc_send_activate_IRQ_uv(struct xpc_partition *part, void *msg, size_t msg_size, + int msg_type) +{ + struct xpc_activate_mq_msghdr_uv *msg_hdr = msg; + struct xpc_partition_uv *part_uv = &part->sn.uv; + struct gru_message_queue_desc *gru_mq_desc; + unsigned long irq_flags; + enum xp_retval ret; + + DBUG_ON(msg_size > XPC_ACTIVATE_MSG_SIZE_UV); + + msg_hdr->type = msg_type; + msg_hdr->partid = xp_partition_id; + msg_hdr->act_state = part->act_state; + msg_hdr->rp_ts_jiffies = xpc_rsvd_page->ts_jiffies; + + mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex); +again: + if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV)) { + gru_mq_desc = part_uv->cached_activate_gru_mq_desc; + if (gru_mq_desc == NULL) { + gru_mq_desc = kmalloc(sizeof(struct + gru_message_queue_desc), + GFP_ATOMIC); + if (gru_mq_desc == NULL) { + ret = xpNoMemory; + goto done; + } + part_uv->cached_activate_gru_mq_desc = gru_mq_desc; + } + + ret = xpc_cache_remote_gru_mq_desc_uv(gru_mq_desc, + part_uv-> + activate_gru_mq_desc_gpa); + if (ret != xpSuccess) + goto done; + + spin_lock_irqsave(&part_uv->flags_lock, irq_flags); + part_uv->flags |= XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV; + spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); + } + + /* ??? Is holding a spin_lock (ch->lock) during this call a bad idea? */ + ret = xpc_send_gru_msg(part_uv->cached_activate_gru_mq_desc, msg, + msg_size); + if (ret != xpSuccess) { + smp_rmb(); /* ensure a fresh copy of part_uv->flags */ + if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV)) + goto again; + } +done: + mutex_unlock(&part_uv->cached_activate_gru_mq_desc_mutex); + return ret; +} + +static void +xpc_send_activate_IRQ_part_uv(struct xpc_partition *part, void *msg, + size_t msg_size, int msg_type) +{ + enum xp_retval ret; + + ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type); + if (unlikely(ret != xpSuccess)) + XPC_DEACTIVATE_PARTITION(part, ret); +} + +static void +xpc_send_activate_IRQ_ch_uv(struct xpc_channel *ch, unsigned long *irq_flags, + void *msg, size_t msg_size, int msg_type) +{ + struct xpc_partition *part = &xpc_partitions[ch->partid]; + enum xp_retval ret; + + ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type); + if (unlikely(ret != xpSuccess)) { + if (irq_flags != NULL) + spin_unlock_irqrestore(&ch->lock, *irq_flags); + + XPC_DEACTIVATE_PARTITION(part, ret); + + if (irq_flags != NULL) + spin_lock_irqsave(&ch->lock, *irq_flags); + } +} + +static void +xpc_send_local_activate_IRQ_uv(struct xpc_partition *part, int act_state_req) +{ + unsigned long irq_flags; + struct xpc_partition_uv *part_uv = &part->sn.uv; + + /* + * !!! Make our side think that the remote partition sent an activate + * !!! mq message our way by doing what the activate IRQ handler would + * !!! do had one really been sent. + */ + + spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); + if (part_uv->act_state_req == 0) + xpc_activate_IRQ_rcvd++; + part_uv->act_state_req = act_state_req; + spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); + + wake_up_interruptible(&xpc_activate_IRQ_wq); +} + +static enum xp_retval +xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa, + size_t *len) +{ + s64 status; + enum xp_retval ret; + +#if defined CONFIG_X86_64 + status = uv_bios_reserved_page_pa((u64)buf, cookie, (u64 *)rp_pa, + (u64 *)len); + if (status == BIOS_STATUS_SUCCESS) + ret = xpSuccess; + else if (status == BIOS_STATUS_MORE_PASSES) + ret = xpNeedMoreInfo; + else + ret = xpBiosError; + +#elif defined CONFIG_IA64_SGI_UV + status = sn_partition_reserved_page_pa((u64)buf, cookie, rp_pa, len); + if (status == SALRET_OK) + ret = xpSuccess; + else if (status == SALRET_MORE_PASSES) + ret = xpNeedMoreInfo; + else + ret = xpSalError; + +#else + #error not a supported configuration +#endif + + return ret; +} + +static int +xpc_setup_rsvd_page_uv(struct xpc_rsvd_page *rp) +{ + xpc_heartbeat_uv = + &xpc_partitions[sn_partition_id].sn.uv.cached_heartbeat; + rp->sn.uv.heartbeat_gpa = uv_gpa(xpc_heartbeat_uv); + rp->sn.uv.activate_gru_mq_desc_gpa = + uv_gpa(xpc_activate_mq_uv->gru_mq_desc); + return 0; +} + +static void +xpc_allow_hb_uv(short partid) +{ +} + +static void +xpc_disallow_hb_uv(short partid) +{ +} + +static void +xpc_disallow_all_hbs_uv(void) +{ +} + +static void +xpc_increment_heartbeat_uv(void) +{ + xpc_heartbeat_uv->value++; +} + +static void +xpc_offline_heartbeat_uv(void) +{ + xpc_increment_heartbeat_uv(); + xpc_heartbeat_uv->offline = 1; +} + +static void +xpc_online_heartbeat_uv(void) +{ + xpc_increment_heartbeat_uv(); + xpc_heartbeat_uv->offline = 0; +} + +static void +xpc_heartbeat_init_uv(void) +{ + xpc_heartbeat_uv->value = 1; + xpc_heartbeat_uv->offline = 0; +} + +static void +xpc_heartbeat_exit_uv(void) +{ + xpc_offline_heartbeat_uv(); +} + +static enum xp_retval +xpc_get_remote_heartbeat_uv(struct xpc_partition *part) +{ + struct xpc_partition_uv *part_uv = &part->sn.uv; + enum xp_retval ret; + + ret = xp_remote_memcpy(uv_gpa(&part_uv->cached_heartbeat), + part_uv->heartbeat_gpa, + sizeof(struct xpc_heartbeat_uv)); + if (ret != xpSuccess) + return ret; + + if (part_uv->cached_heartbeat.value == part->last_heartbeat && + !part_uv->cached_heartbeat.offline) { + + ret = xpNoHeartbeat; + } else { + part->last_heartbeat = part_uv->cached_heartbeat.value; + } + return ret; +} + +static void +xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp, + unsigned long remote_rp_gpa, int nasid) +{ + short partid = remote_rp->SAL_partid; + struct xpc_partition *part = &xpc_partitions[partid]; + struct xpc_activate_mq_msg_activate_req_uv msg; + + part->remote_rp_pa = remote_rp_gpa; /* !!! _pa here is really _gpa */ + part->remote_rp_ts_jiffies = remote_rp->ts_jiffies; + part->sn.uv.heartbeat_gpa = remote_rp->sn.uv.heartbeat_gpa; + part->sn.uv.activate_gru_mq_desc_gpa = + remote_rp->sn.uv.activate_gru_mq_desc_gpa; + + /* + * ??? Is it a good idea to make this conditional on what is + * ??? potentially stale state information? + */ + if (part->sn.uv.remote_act_state == XPC_P_AS_INACTIVE) { + msg.rp_gpa = uv_gpa(xpc_rsvd_page); + msg.heartbeat_gpa = xpc_rsvd_page->sn.uv.heartbeat_gpa; + msg.activate_gru_mq_desc_gpa = + xpc_rsvd_page->sn.uv.activate_gru_mq_desc_gpa; + xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), + XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV); + } + + if (part->act_state == XPC_P_AS_INACTIVE) + xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV); +} + +static void +xpc_request_partition_reactivation_uv(struct xpc_partition *part) +{ + xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV); +} + +static void +xpc_request_partition_deactivation_uv(struct xpc_partition *part) +{ + struct xpc_activate_mq_msg_deactivate_req_uv msg; + + /* + * ??? Is it a good idea to make this conditional on what is + * ??? potentially stale state information? + */ + if (part->sn.uv.remote_act_state != XPC_P_AS_DEACTIVATING && + part->sn.uv.remote_act_state != XPC_P_AS_INACTIVE) { + + msg.reason = part->reason; + xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), + XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV); + } +} + +static void +xpc_cancel_partition_deactivation_request_uv(struct xpc_partition *part) +{ + /* nothing needs to be done */ + return; +} + +static void +xpc_init_fifo_uv(struct xpc_fifo_head_uv *head) +{ + head->first = NULL; + head->last = NULL; + spin_lock_init(&head->lock); + head->n_entries = 0; +} + +static void * +xpc_get_fifo_entry_uv(struct xpc_fifo_head_uv *head) +{ + unsigned long irq_flags; + struct xpc_fifo_entry_uv *first; + + spin_lock_irqsave(&head->lock, irq_flags); + first = head->first; + if (head->first != NULL) { + head->first = first->next; + if (head->first == NULL) + head->last = NULL; + + head->n_entries--; + BUG_ON(head->n_entries < 0); + + first->next = NULL; + } + spin_unlock_irqrestore(&head->lock, irq_flags); + return first; +} + +static void +xpc_put_fifo_entry_uv(struct xpc_fifo_head_uv *head, + struct xpc_fifo_entry_uv *last) +{ + unsigned long irq_flags; + + last->next = NULL; + spin_lock_irqsave(&head->lock, irq_flags); + if (head->last != NULL) + head->last->next = last; + else + head->first = last; + head->last = last; + head->n_entries++; + spin_unlock_irqrestore(&head->lock, irq_flags); +} + +static int +xpc_n_of_fifo_entries_uv(struct xpc_fifo_head_uv *head) +{ + return head->n_entries; +} + +/* + * Setup the channel structures that are uv specific. + */ +static enum xp_retval +xpc_setup_ch_structures_uv(struct xpc_partition *part) +{ + struct xpc_channel_uv *ch_uv; + int ch_number; + + for (ch_number = 0; ch_number < part->nchannels; ch_number++) { + ch_uv = &part->channels[ch_number].sn.uv; + + xpc_init_fifo_uv(&ch_uv->msg_slot_free_list); + xpc_init_fifo_uv(&ch_uv->recv_msg_list); + } + + return xpSuccess; +} + +/* + * Teardown the channel structures that are uv specific. + */ +static void +xpc_teardown_ch_structures_uv(struct xpc_partition *part) +{ + /* nothing needs to be done */ + return; +} + +static enum xp_retval +xpc_make_first_contact_uv(struct xpc_partition *part) +{ + struct xpc_activate_mq_msg_uv msg; + + /* + * We send a sync msg to get the remote partition's remote_act_state + * updated to our current act_state which at this point should + * be XPC_P_AS_ACTIVATING. + */ + xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), + XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV); + + while (!((part->sn.uv.remote_act_state == XPC_P_AS_ACTIVATING) || + (part->sn.uv.remote_act_state == XPC_P_AS_ACTIVE))) { + + dev_dbg(xpc_part, "waiting to make first contact with " + "partition %d\n", XPC_PARTID(part)); + + /* wait a 1/4 of a second or so */ + (void)msleep_interruptible(250); + + if (part->act_state == XPC_P_AS_DEACTIVATING) + return part->reason; + } + + return xpSuccess; +} + +static u64 +xpc_get_chctl_all_flags_uv(struct xpc_partition *part) +{ + unsigned long irq_flags; + union xpc_channel_ctl_flags chctl; + + spin_lock_irqsave(&part->chctl_lock, irq_flags); + chctl = part->chctl; + if (chctl.all_flags != 0) + part->chctl.all_flags = 0; + + spin_unlock_irqrestore(&part->chctl_lock, irq_flags); + return chctl.all_flags; +} + +static enum xp_retval +xpc_allocate_send_msg_slot_uv(struct xpc_channel *ch) +{ + struct xpc_channel_uv *ch_uv = &ch->sn.uv; + struct xpc_send_msg_slot_uv *msg_slot; + unsigned long irq_flags; + int nentries; + int entry; + size_t nbytes; + + for (nentries = ch->local_nentries; nentries > 0; nentries--) { + nbytes = nentries * sizeof(struct xpc_send_msg_slot_uv); + ch_uv->send_msg_slots = kzalloc(nbytes, GFP_KERNEL); + if (ch_uv->send_msg_slots == NULL) + continue; + + for (entry = 0; entry < nentries; entry++) { + msg_slot = &ch_uv->send_msg_slots[entry]; + + msg_slot->msg_slot_number = entry; + xpc_put_fifo_entry_uv(&ch_uv->msg_slot_free_list, + &msg_slot->next); + } + + spin_lock_irqsave(&ch->lock, irq_flags); + if (nentries < ch->local_nentries) + ch->local_nentries = nentries; + spin_unlock_irqrestore(&ch->lock, irq_flags); + return xpSuccess; + } + + return xpNoMemory; +} + +static enum xp_retval +xpc_allocate_recv_msg_slot_uv(struct xpc_channel *ch) +{ + struct xpc_channel_uv *ch_uv = &ch->sn.uv; + struct xpc_notify_mq_msg_uv *msg_slot; + unsigned long irq_flags; + int nentries; + int entry; + size_t nbytes; + + for (nentries = ch->remote_nentries; nentries > 0; nentries--) { + nbytes = nentries * ch->entry_size; + ch_uv->recv_msg_slots = kzalloc(nbytes, GFP_KERNEL); + if (ch_uv->recv_msg_slots == NULL) + continue; + + for (entry = 0; entry < nentries; entry++) { + msg_slot = ch_uv->recv_msg_slots + + entry * ch->entry_size; + + msg_slot->hdr.msg_slot_number = entry; + } + + spin_lock_irqsave(&ch->lock, irq_flags); + if (nentries < ch->remote_nentries) + ch->remote_nentries = nentries; + spin_unlock_irqrestore(&ch->lock, irq_flags); + return xpSuccess; + } + + return xpNoMemory; +} + +/* + * Allocate msg_slots associated with the channel. + */ +static enum xp_retval +xpc_setup_msg_structures_uv(struct xpc_channel *ch) +{ + static enum xp_retval ret; + struct xpc_channel_uv *ch_uv = &ch->sn.uv; + + DBUG_ON(ch->flags & XPC_C_SETUP); + + ch_uv->cached_notify_gru_mq_desc = kmalloc(sizeof(struct + gru_message_queue_desc), + GFP_KERNEL); + if (ch_uv->cached_notify_gru_mq_desc == NULL) + return xpNoMemory; + + ret = xpc_allocate_send_msg_slot_uv(ch); + if (ret == xpSuccess) { + + ret = xpc_allocate_recv_msg_slot_uv(ch); + if (ret != xpSuccess) { + kfree(ch_uv->send_msg_slots); + xpc_init_fifo_uv(&ch_uv->msg_slot_free_list); + } + } + return ret; +} + +/* + * Free up msg_slots and clear other stuff that were setup for the specified + * channel. + */ +static void +xpc_teardown_msg_structures_uv(struct xpc_channel *ch) +{ + struct xpc_channel_uv *ch_uv = &ch->sn.uv; + + lockdep_assert_held(&ch->lock); + + kfree(ch_uv->cached_notify_gru_mq_desc); + ch_uv->cached_notify_gru_mq_desc = NULL; + + if (ch->flags & XPC_C_SETUP) { + xpc_init_fifo_uv(&ch_uv->msg_slot_free_list); + kfree(ch_uv->send_msg_slots); + xpc_init_fifo_uv(&ch_uv->recv_msg_list); + kfree(ch_uv->recv_msg_slots); + } +} + +static void +xpc_send_chctl_closerequest_uv(struct xpc_channel *ch, unsigned long *irq_flags) +{ + struct xpc_activate_mq_msg_chctl_closerequest_uv msg; + + msg.ch_number = ch->number; + msg.reason = ch->reason; + xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), + XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV); +} + +static void +xpc_send_chctl_closereply_uv(struct xpc_channel *ch, unsigned long *irq_flags) +{ + struct xpc_activate_mq_msg_chctl_closereply_uv msg; + + msg.ch_number = ch->number; + xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), + XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV); +} + +static void +xpc_send_chctl_openrequest_uv(struct xpc_channel *ch, unsigned long *irq_flags) +{ + struct xpc_activate_mq_msg_chctl_openrequest_uv msg; + + msg.ch_number = ch->number; + msg.entry_size = ch->entry_size; + msg.local_nentries = ch->local_nentries; + xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), + XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV); +} + +static void +xpc_send_chctl_openreply_uv(struct xpc_channel *ch, unsigned long *irq_flags) +{ + struct xpc_activate_mq_msg_chctl_openreply_uv msg; + + msg.ch_number = ch->number; + msg.local_nentries = ch->local_nentries; + msg.remote_nentries = ch->remote_nentries; + msg.notify_gru_mq_desc_gpa = uv_gpa(xpc_notify_mq_uv->gru_mq_desc); + xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), + XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV); +} + +static void +xpc_send_chctl_opencomplete_uv(struct xpc_channel *ch, unsigned long *irq_flags) +{ + struct xpc_activate_mq_msg_chctl_opencomplete_uv msg; + + msg.ch_number = ch->number; + xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), + XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV); +} + +static void +xpc_send_chctl_local_msgrequest_uv(struct xpc_partition *part, int ch_number) +{ + unsigned long irq_flags; + + spin_lock_irqsave(&part->chctl_lock, irq_flags); + part->chctl.flags[ch_number] |= XPC_CHCTL_MSGREQUEST; + spin_unlock_irqrestore(&part->chctl_lock, irq_flags); + + xpc_wakeup_channel_mgr(part); +} + +static enum xp_retval +xpc_save_remote_msgqueue_pa_uv(struct xpc_channel *ch, + unsigned long gru_mq_desc_gpa) +{ + struct xpc_channel_uv *ch_uv = &ch->sn.uv; + + DBUG_ON(ch_uv->cached_notify_gru_mq_desc == NULL); + return xpc_cache_remote_gru_mq_desc_uv(ch_uv->cached_notify_gru_mq_desc, + gru_mq_desc_gpa); +} + +static void +xpc_indicate_partition_engaged_uv(struct xpc_partition *part) +{ + struct xpc_activate_mq_msg_uv msg; + + xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), + XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV); +} + +static void +xpc_indicate_partition_disengaged_uv(struct xpc_partition *part) +{ + struct xpc_activate_mq_msg_uv msg; + + xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), + XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV); +} + +static void +xpc_assume_partition_disengaged_uv(short partid) +{ + struct xpc_partition_uv *part_uv = &xpc_partitions[partid].sn.uv; + unsigned long irq_flags; + + spin_lock_irqsave(&part_uv->flags_lock, irq_flags); + part_uv->flags &= ~XPC_P_ENGAGED_UV; + spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); +} + +static int +xpc_partition_engaged_uv(short partid) +{ + return (xpc_partitions[partid].sn.uv.flags & XPC_P_ENGAGED_UV) != 0; +} + +static int +xpc_any_partition_engaged_uv(void) +{ + struct xpc_partition_uv *part_uv; + short partid; + + for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) { + part_uv = &xpc_partitions[partid].sn.uv; + if ((part_uv->flags & XPC_P_ENGAGED_UV) != 0) + return 1; + } + return 0; +} + +static enum xp_retval +xpc_allocate_msg_slot_uv(struct xpc_channel *ch, u32 flags, + struct xpc_send_msg_slot_uv **address_of_msg_slot) +{ + enum xp_retval ret; + struct xpc_send_msg_slot_uv *msg_slot; + struct xpc_fifo_entry_uv *entry; + + while (1) { + entry = xpc_get_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list); + if (entry != NULL) + break; + + if (flags & XPC_NOWAIT) + return xpNoWait; + + ret = xpc_allocate_msg_wait(ch); + if (ret != xpInterrupted && ret != xpTimeout) + return ret; + } + + msg_slot = container_of(entry, struct xpc_send_msg_slot_uv, next); + *address_of_msg_slot = msg_slot; + return xpSuccess; +} + +static void +xpc_free_msg_slot_uv(struct xpc_channel *ch, + struct xpc_send_msg_slot_uv *msg_slot) +{ + xpc_put_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list, &msg_slot->next); + + /* wakeup anyone waiting for a free msg slot */ + if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) + wake_up(&ch->msg_allocate_wq); +} + +static void +xpc_notify_sender_uv(struct xpc_channel *ch, + struct xpc_send_msg_slot_uv *msg_slot, + enum xp_retval reason) +{ + xpc_notify_func func = msg_slot->func; + + if (func != NULL && cmpxchg(&msg_slot->func, func, NULL) == func) { + + atomic_dec(&ch->n_to_notify); + + dev_dbg(xpc_chan, "msg_slot->func() called, msg_slot=0x%p " + "msg_slot_number=%d partid=%d channel=%d\n", msg_slot, + msg_slot->msg_slot_number, ch->partid, ch->number); + + func(reason, ch->partid, ch->number, msg_slot->key); + + dev_dbg(xpc_chan, "msg_slot->func() returned, msg_slot=0x%p " + "msg_slot_number=%d partid=%d channel=%d\n", msg_slot, + msg_slot->msg_slot_number, ch->partid, ch->number); + } +} + +static void +xpc_handle_notify_mq_ack_uv(struct xpc_channel *ch, + struct xpc_notify_mq_msg_uv *msg) +{ + struct xpc_send_msg_slot_uv *msg_slot; + int entry = msg->hdr.msg_slot_number % ch->local_nentries; + + msg_slot = &ch->sn.uv.send_msg_slots[entry]; + + BUG_ON(msg_slot->msg_slot_number != msg->hdr.msg_slot_number); + msg_slot->msg_slot_number += ch->local_nentries; + + if (msg_slot->func != NULL) + xpc_notify_sender_uv(ch, msg_slot, xpMsgDelivered); + + xpc_free_msg_slot_uv(ch, msg_slot); +} + +static void +xpc_handle_notify_mq_msg_uv(struct xpc_partition *part, + struct xpc_notify_mq_msg_uv *msg) +{ + struct xpc_partition_uv *part_uv = &part->sn.uv; + struct xpc_channel *ch; + struct xpc_channel_uv *ch_uv; + struct xpc_notify_mq_msg_uv *msg_slot; + unsigned long irq_flags; + int ch_number = msg->hdr.ch_number; + + if (unlikely(ch_number >= part->nchannels)) { + dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received invalid " + "channel number=0x%x in message from partid=%d\n", + ch_number, XPC_PARTID(part)); + + /* get hb checker to deactivate from the remote partition */ + spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); + if (part_uv->act_state_req == 0) + xpc_activate_IRQ_rcvd++; + part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV; + part_uv->reason = xpBadChannelNumber; + spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); + + wake_up_interruptible(&xpc_activate_IRQ_wq); + return; + } + + ch = &part->channels[ch_number]; + xpc_msgqueue_ref(ch); + + if (!(ch->flags & XPC_C_CONNECTED)) { + xpc_msgqueue_deref(ch); + return; + } + + /* see if we're really dealing with an ACK for a previously sent msg */ + if (msg->hdr.size == 0) { + xpc_handle_notify_mq_ack_uv(ch, msg); + xpc_msgqueue_deref(ch); + return; + } + + /* we're dealing with a normal message sent via the notify_mq */ + ch_uv = &ch->sn.uv; + + msg_slot = ch_uv->recv_msg_slots + + (msg->hdr.msg_slot_number % ch->remote_nentries) * ch->entry_size; + + BUG_ON(msg_slot->hdr.size != 0); + + memcpy(msg_slot, msg, msg->hdr.size); + + xpc_put_fifo_entry_uv(&ch_uv->recv_msg_list, &msg_slot->hdr.u.next); + + if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) { + /* + * If there is an existing idle kthread get it to deliver + * the payload, otherwise we'll have to get the channel mgr + * for this partition to create a kthread to do the delivery. + */ + if (atomic_read(&ch->kthreads_idle) > 0) + wake_up_nr(&ch->idle_wq, 1); + else + xpc_send_chctl_local_msgrequest_uv(part, ch->number); + } + xpc_msgqueue_deref(ch); +} + +static irqreturn_t +xpc_handle_notify_IRQ_uv(int irq, void *dev_id) +{ + struct xpc_notify_mq_msg_uv *msg; + short partid; + struct xpc_partition *part; + + while ((msg = gru_get_next_message(xpc_notify_mq_uv->gru_mq_desc)) != + NULL) { + + partid = msg->hdr.partid; + if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) { + dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received " + "invalid partid=0x%x in message\n", partid); + } else { + part = &xpc_partitions[partid]; + + if (xpc_part_ref(part)) { + xpc_handle_notify_mq_msg_uv(part, msg); + xpc_part_deref(part); + } + } + + gru_free_message(xpc_notify_mq_uv->gru_mq_desc, msg); + } + + return IRQ_HANDLED; +} + +static int +xpc_n_of_deliverable_payloads_uv(struct xpc_channel *ch) +{ + return xpc_n_of_fifo_entries_uv(&ch->sn.uv.recv_msg_list); +} + +static void +xpc_process_msg_chctl_flags_uv(struct xpc_partition *part, int ch_number) +{ + struct xpc_channel *ch = &part->channels[ch_number]; + int ndeliverable_payloads; + + xpc_msgqueue_ref(ch); + + ndeliverable_payloads = xpc_n_of_deliverable_payloads_uv(ch); + + if (ndeliverable_payloads > 0 && + (ch->flags & XPC_C_CONNECTED) && + (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE)) { + + xpc_activate_kthreads(ch, ndeliverable_payloads); + } + + xpc_msgqueue_deref(ch); +} + +static enum xp_retval +xpc_send_payload_uv(struct xpc_channel *ch, u32 flags, void *payload, + u16 payload_size, u8 notify_type, xpc_notify_func func, + void *key) +{ + enum xp_retval ret = xpSuccess; + struct xpc_send_msg_slot_uv *msg_slot = NULL; + struct xpc_notify_mq_msg_uv *msg; + u8 msg_buffer[XPC_NOTIFY_MSG_SIZE_UV]; + size_t msg_size; + + DBUG_ON(notify_type != XPC_N_CALL); + + msg_size = sizeof(struct xpc_notify_mq_msghdr_uv) + payload_size; + if (msg_size > ch->entry_size) + return xpPayloadTooBig; + + xpc_msgqueue_ref(ch); + + if (ch->flags & XPC_C_DISCONNECTING) { + ret = ch->reason; + goto out_1; + } + if (!(ch->flags & XPC_C_CONNECTED)) { + ret = xpNotConnected; + goto out_1; + } + + ret = xpc_allocate_msg_slot_uv(ch, flags, &msg_slot); + if (ret != xpSuccess) + goto out_1; + + if (func != NULL) { + atomic_inc(&ch->n_to_notify); + + msg_slot->key = key; + smp_wmb(); /* a non-NULL func must hit memory after the key */ + msg_slot->func = func; + + if (ch->flags & XPC_C_DISCONNECTING) { + ret = ch->reason; + goto out_2; + } + } + + msg = (struct xpc_notify_mq_msg_uv *)&msg_buffer; + msg->hdr.partid = xp_partition_id; + msg->hdr.ch_number = ch->number; + msg->hdr.size = msg_size; + msg->hdr.msg_slot_number = msg_slot->msg_slot_number; + memcpy(&msg->payload, payload, payload_size); + + ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg, + msg_size); + if (ret == xpSuccess) + goto out_1; + + XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret); +out_2: + if (func != NULL) { + /* + * Try to NULL the msg_slot's func field. If we fail, then + * xpc_notify_senders_of_disconnect_uv() beat us to it, in which + * case we need to pretend we succeeded to send the message + * since the user will get a callout for the disconnect error + * by xpc_notify_senders_of_disconnect_uv(), and to also get an + * error returned here will confuse them. Additionally, since + * in this case the channel is being disconnected we don't need + * to put the the msg_slot back on the free list. + */ + if (cmpxchg(&msg_slot->func, func, NULL) != func) { + ret = xpSuccess; + goto out_1; + } + + msg_slot->key = NULL; + atomic_dec(&ch->n_to_notify); + } + xpc_free_msg_slot_uv(ch, msg_slot); +out_1: + xpc_msgqueue_deref(ch); + return ret; +} + +/* + * Tell the callers of xpc_send_notify() that the status of their payloads + * is unknown because the channel is now disconnecting. + * + * We don't worry about putting these msg_slots on the free list since the + * msg_slots themselves are about to be kfree'd. + */ +static void +xpc_notify_senders_of_disconnect_uv(struct xpc_channel *ch) +{ + struct xpc_send_msg_slot_uv *msg_slot; + int entry; + + DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING)); + + for (entry = 0; entry < ch->local_nentries; entry++) { + + if (atomic_read(&ch->n_to_notify) == 0) + break; + + msg_slot = &ch->sn.uv.send_msg_slots[entry]; + if (msg_slot->func != NULL) + xpc_notify_sender_uv(ch, msg_slot, ch->reason); + } +} + +/* + * Get the next deliverable message's payload. + */ +static void * +xpc_get_deliverable_payload_uv(struct xpc_channel *ch) +{ + struct xpc_fifo_entry_uv *entry; + struct xpc_notify_mq_msg_uv *msg; + void *payload = NULL; + + if (!(ch->flags & XPC_C_DISCONNECTING)) { + entry = xpc_get_fifo_entry_uv(&ch->sn.uv.recv_msg_list); + if (entry != NULL) { + msg = container_of(entry, struct xpc_notify_mq_msg_uv, + hdr.u.next); + payload = &msg->payload; + } + } + return payload; +} + +static void +xpc_received_payload_uv(struct xpc_channel *ch, void *payload) +{ + struct xpc_notify_mq_msg_uv *msg; + enum xp_retval ret; + + msg = container_of(payload, struct xpc_notify_mq_msg_uv, payload); + + /* return an ACK to the sender of this message */ + + msg->hdr.partid = xp_partition_id; + msg->hdr.size = 0; /* size of zero indicates this is an ACK */ + + ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg, + sizeof(struct xpc_notify_mq_msghdr_uv)); + if (ret != xpSuccess) + XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret); +} + +static const struct xpc_arch_operations xpc_arch_ops_uv = { + .setup_partitions = xpc_setup_partitions_uv, + .teardown_partitions = xpc_teardown_partitions_uv, + .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv, + .get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_uv, + .setup_rsvd_page = xpc_setup_rsvd_page_uv, + + .allow_hb = xpc_allow_hb_uv, + .disallow_hb = xpc_disallow_hb_uv, + .disallow_all_hbs = xpc_disallow_all_hbs_uv, + .increment_heartbeat = xpc_increment_heartbeat_uv, + .offline_heartbeat = xpc_offline_heartbeat_uv, + .online_heartbeat = xpc_online_heartbeat_uv, + .heartbeat_init = xpc_heartbeat_init_uv, + .heartbeat_exit = xpc_heartbeat_exit_uv, + .get_remote_heartbeat = xpc_get_remote_heartbeat_uv, + + .request_partition_activation = + xpc_request_partition_activation_uv, + .request_partition_reactivation = + xpc_request_partition_reactivation_uv, + .request_partition_deactivation = + xpc_request_partition_deactivation_uv, + .cancel_partition_deactivation_request = + xpc_cancel_partition_deactivation_request_uv, + + .setup_ch_structures = xpc_setup_ch_structures_uv, + .teardown_ch_structures = xpc_teardown_ch_structures_uv, + + .make_first_contact = xpc_make_first_contact_uv, + + .get_chctl_all_flags = xpc_get_chctl_all_flags_uv, + .send_chctl_closerequest = xpc_send_chctl_closerequest_uv, + .send_chctl_closereply = xpc_send_chctl_closereply_uv, + .send_chctl_openrequest = xpc_send_chctl_openrequest_uv, + .send_chctl_openreply = xpc_send_chctl_openreply_uv, + .send_chctl_opencomplete = xpc_send_chctl_opencomplete_uv, + .process_msg_chctl_flags = xpc_process_msg_chctl_flags_uv, + + .save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_uv, + + .setup_msg_structures = xpc_setup_msg_structures_uv, + .teardown_msg_structures = xpc_teardown_msg_structures_uv, + + .indicate_partition_engaged = xpc_indicate_partition_engaged_uv, + .indicate_partition_disengaged = xpc_indicate_partition_disengaged_uv, + .assume_partition_disengaged = xpc_assume_partition_disengaged_uv, + .partition_engaged = xpc_partition_engaged_uv, + .any_partition_engaged = xpc_any_partition_engaged_uv, + + .n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_uv, + .send_payload = xpc_send_payload_uv, + .get_deliverable_payload = xpc_get_deliverable_payload_uv, + .received_payload = xpc_received_payload_uv, + .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv, +}; + +static int +xpc_init_mq_node(int nid) +{ + int cpu; + + get_online_cpus(); + + for_each_cpu(cpu, cpumask_of_node(nid)) { + xpc_activate_mq_uv = + xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, nid, + XPC_ACTIVATE_IRQ_NAME, + xpc_handle_activate_IRQ_uv); + if (!IS_ERR(xpc_activate_mq_uv)) + break; + } + if (IS_ERR(xpc_activate_mq_uv)) { + put_online_cpus(); + return PTR_ERR(xpc_activate_mq_uv); + } + + for_each_cpu(cpu, cpumask_of_node(nid)) { + xpc_notify_mq_uv = + xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, nid, + XPC_NOTIFY_IRQ_NAME, + xpc_handle_notify_IRQ_uv); + if (!IS_ERR(xpc_notify_mq_uv)) + break; + } + if (IS_ERR(xpc_notify_mq_uv)) { + xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); + put_online_cpus(); + return PTR_ERR(xpc_notify_mq_uv); + } + + put_online_cpus(); + return 0; +} + +int +xpc_init_uv(void) +{ + int nid; + int ret = 0; + + xpc_arch_ops = xpc_arch_ops_uv; + + if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) { + dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n", + XPC_MSG_HDR_MAX_SIZE); + return -E2BIG; + } + + if (xpc_mq_node < 0) + for_each_online_node(nid) { + ret = xpc_init_mq_node(nid); + + if (!ret) + break; + } + else + ret = xpc_init_mq_node(xpc_mq_node); + + if (ret < 0) + dev_err(xpc_part, "xpc_init_mq_node() returned error=%d\n", + -ret); + + return ret; +} + +void +xpc_exit_uv(void) +{ + xpc_destroy_gru_mq_uv(xpc_notify_mq_uv); + xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); +} + +module_param(xpc_mq_node, int, 0); +MODULE_PARM_DESC(xpc_mq_node, "Node number on which to allocate message queues."); diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c new file mode 100644 index 000000000..23837d0d6 --- /dev/null +++ b/drivers/misc/sgi-xp/xpnet.c @@ -0,0 +1,597 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * (C) Copyright 2020 Hewlett Packard Enterprise Development LP + * Copyright (C) 1999-2009 Silicon Graphics, Inc. All rights reserved. + */ + +/* + * Cross Partition Network Interface (XPNET) support + * + * XPNET provides a virtual network layered on top of the Cross + * Partition communication layer. + * + * XPNET provides direct point-to-point and broadcast-like support + * for an ethernet-like device. The ethernet broadcast medium is + * replaced with a point-to-point message structure which passes + * pointers to a DMA-capable block that a remote partition should + * retrieve and pass to the upper level networking layer. + * + */ + +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include "xp.h" + +/* + * The message payload transferred by XPC. + * + * buf_pa is the physical address where the DMA should pull from. + * + * NOTE: for performance reasons, buf_pa should _ALWAYS_ begin on a + * cacheline boundary. To accomplish this, we record the number of + * bytes from the beginning of the first cacheline to the first useful + * byte of the skb (leadin_ignore) and the number of bytes from the + * last useful byte of the skb to the end of the last cacheline + * (tailout_ignore). + * + * size is the number of bytes to transfer which includes the skb->len + * (useful bytes of the senders skb) plus the leadin and tailout + */ +struct xpnet_message { + u16 version; /* Version for this message */ + u16 embedded_bytes; /* #of bytes embedded in XPC message */ + u32 magic; /* Special number indicating this is xpnet */ + unsigned long buf_pa; /* phys address of buffer to retrieve */ + u32 size; /* #of bytes in buffer */ + u8 leadin_ignore; /* #of bytes to ignore at the beginning */ + u8 tailout_ignore; /* #of bytes to ignore at the end */ + unsigned char data; /* body of small packets */ +}; + +/* + * Determine the size of our message, the cacheline aligned size, + * and then the number of message will request from XPC. + * + * XPC expects each message to exist in an individual cacheline. + */ +#define XPNET_MSG_SIZE XPC_MSG_PAYLOAD_MAX_SIZE +#define XPNET_MSG_DATA_MAX \ + (XPNET_MSG_SIZE - offsetof(struct xpnet_message, data)) +#define XPNET_MSG_NENTRIES (PAGE_SIZE / XPC_MSG_MAX_SIZE) + +#define XPNET_MAX_KTHREADS (XPNET_MSG_NENTRIES + 1) +#define XPNET_MAX_IDLE_KTHREADS (XPNET_MSG_NENTRIES + 1) + +/* + * Version number of XPNET implementation. XPNET can always talk to versions + * with same major #, and never talk to versions with a different version. + */ +#define _XPNET_VERSION(_major, _minor) (((_major) << 4) | (_minor)) +#define XPNET_VERSION_MAJOR(_v) ((_v) >> 4) +#define XPNET_VERSION_MINOR(_v) ((_v) & 0xf) + +#define XPNET_VERSION _XPNET_VERSION(1, 0) /* version 1.0 */ +#define XPNET_VERSION_EMBED _XPNET_VERSION(1, 1) /* version 1.1 */ +#define XPNET_MAGIC 0x88786984 /* "XNET" */ + +#define XPNET_VALID_MSG(_m) \ + ((XPNET_VERSION_MAJOR(_m->version) == XPNET_VERSION_MAJOR(XPNET_VERSION)) \ + && (msg->magic == XPNET_MAGIC)) + +#define XPNET_DEVICE_NAME "xp0" + +/* + * When messages are queued with xpc_send_notify, a kmalloc'd buffer + * of the following type is passed as a notification cookie. When the + * notification function is called, we use the cookie to decide + * whether all outstanding message sends have completed. The skb can + * then be released. + */ +struct xpnet_pending_msg { + struct sk_buff *skb; + atomic_t use_count; +}; + +static struct net_device *xpnet_device; + +/* + * When we are notified of other partitions activating, we add them to + * our bitmask of partitions to which we broadcast. + */ +static unsigned long *xpnet_broadcast_partitions; +/* protect above */ +static DEFINE_SPINLOCK(xpnet_broadcast_lock); + +/* + * Since the Block Transfer Engine (BTE) is being used for the transfer + * and it relies upon cache-line size transfers, we need to reserve at + * least one cache-line for head and tail alignment. The BTE is + * limited to 8MB transfers. + * + * Testing has shown that changing MTU to greater than 64KB has no effect + * on TCP as the two sides negotiate a Max Segment Size that is limited + * to 64K. Other protocols May use packets greater than this, but for + * now, the default is 64KB. + */ +#define XPNET_MAX_MTU (0x800000UL - L1_CACHE_BYTES) +/* 68 comes from min TCP+IP+MAC header */ +#define XPNET_MIN_MTU 68 +/* 32KB has been determined to be the ideal */ +#define XPNET_DEF_MTU (0x8000UL) + +/* + * The partid is encapsulated in the MAC address beginning in the following + * octet and it consists of two octets. + */ +#define XPNET_PARTID_OCTET 2 + +/* Define the XPNET debug device structures to be used with dev_dbg() et al */ + +static struct device_driver xpnet_dbg_name = { + .name = "xpnet" +}; + +static struct device xpnet_dbg_subname = { + .init_name = "", /* set to "" */ + .driver = &xpnet_dbg_name +}; + +static struct device *xpnet = &xpnet_dbg_subname; + +/* + * Packet was recevied by XPC and forwarded to us. + */ +static void +xpnet_receive(short partid, int channel, struct xpnet_message *msg) +{ + struct sk_buff *skb; + void *dst; + enum xp_retval ret; + + if (!XPNET_VALID_MSG(msg)) { + /* + * Packet with a different XPC version. Ignore. + */ + xpc_received(partid, channel, (void *)msg); + + xpnet_device->stats.rx_errors++; + + return; + } + dev_dbg(xpnet, "received 0x%lx, %d, %d, %d\n", msg->buf_pa, msg->size, + msg->leadin_ignore, msg->tailout_ignore); + + /* reserve an extra cache line */ + skb = dev_alloc_skb(msg->size + L1_CACHE_BYTES); + if (!skb) { + dev_err(xpnet, "failed on dev_alloc_skb(%d)\n", + msg->size + L1_CACHE_BYTES); + + xpc_received(partid, channel, (void *)msg); + + xpnet_device->stats.rx_errors++; + + return; + } + + /* + * The allocated skb has some reserved space. + * In order to use xp_remote_memcpy(), we need to get the + * skb->data pointer moved forward. + */ + skb_reserve(skb, (L1_CACHE_BYTES - ((u64)skb->data & + (L1_CACHE_BYTES - 1)) + + msg->leadin_ignore)); + + /* + * Update the tail pointer to indicate data actually + * transferred. + */ + skb_put(skb, (msg->size - msg->leadin_ignore - msg->tailout_ignore)); + + /* + * Move the data over from the other side. + */ + if ((XPNET_VERSION_MINOR(msg->version) == 1) && + (msg->embedded_bytes != 0)) { + dev_dbg(xpnet, "copying embedded message. memcpy(0x%p, 0x%p, " + "%lu)\n", skb->data, &msg->data, + (size_t)msg->embedded_bytes); + + skb_copy_to_linear_data(skb, &msg->data, + (size_t)msg->embedded_bytes); + } else { + dst = (void *)((u64)skb->data & ~(L1_CACHE_BYTES - 1)); + dev_dbg(xpnet, "transferring buffer to the skb->data area;\n\t" + "xp_remote_memcpy(0x%p, 0x%p, %hu)\n", dst, + (void *)msg->buf_pa, msg->size); + + ret = xp_remote_memcpy(xp_pa(dst), msg->buf_pa, msg->size); + if (ret != xpSuccess) { + /* + * !!! Need better way of cleaning skb. Currently skb + * !!! appears in_use and we can't just call + * !!! dev_kfree_skb. + */ + dev_err(xpnet, "xp_remote_memcpy(0x%p, 0x%p, 0x%hx) " + "returned error=0x%x\n", dst, + (void *)msg->buf_pa, msg->size, ret); + + xpc_received(partid, channel, (void *)msg); + + xpnet_device->stats.rx_errors++; + + return; + } + } + + dev_dbg(xpnet, "<skb->head=0x%p skb->data=0x%p skb->tail=0x%p " + "skb->end=0x%p skb->len=%d\n", (void *)skb->head, + (void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb), + skb->len); + + skb->protocol = eth_type_trans(skb, xpnet_device); + skb->ip_summed = CHECKSUM_UNNECESSARY; + + dev_dbg(xpnet, "passing skb to network layer\n" + "\tskb->head=0x%p skb->data=0x%p skb->tail=0x%p " + "skb->end=0x%p skb->len=%d\n", + (void *)skb->head, (void *)skb->data, skb_tail_pointer(skb), + skb_end_pointer(skb), skb->len); + + xpnet_device->stats.rx_packets++; + xpnet_device->stats.rx_bytes += skb->len + ETH_HLEN; + + netif_rx_ni(skb); + xpc_received(partid, channel, (void *)msg); +} + +/* + * This is the handler which XPC calls during any sort of change in + * state or message reception on a connection. + */ +static void +xpnet_connection_activity(enum xp_retval reason, short partid, int channel, + void *data, void *key) +{ + DBUG_ON(partid < 0 || partid >= xp_max_npartitions); + DBUG_ON(channel != XPC_NET_CHANNEL); + + switch (reason) { + case xpMsgReceived: /* message received */ + DBUG_ON(data == NULL); + + xpnet_receive(partid, channel, (struct xpnet_message *)data); + break; + + case xpConnected: /* connection completed to a partition */ + spin_lock_bh(&xpnet_broadcast_lock); + __set_bit(partid, xpnet_broadcast_partitions); + spin_unlock_bh(&xpnet_broadcast_lock); + + netif_carrier_on(xpnet_device); + + dev_dbg(xpnet, "%s connected to partition %d\n", + xpnet_device->name, partid); + break; + + default: + spin_lock_bh(&xpnet_broadcast_lock); + __clear_bit(partid, xpnet_broadcast_partitions); + spin_unlock_bh(&xpnet_broadcast_lock); + + if (bitmap_empty((unsigned long *)xpnet_broadcast_partitions, + xp_max_npartitions)) { + netif_carrier_off(xpnet_device); + } + + dev_dbg(xpnet, "%s disconnected from partition %d\n", + xpnet_device->name, partid); + break; + } +} + +static int +xpnet_dev_open(struct net_device *dev) +{ + enum xp_retval ret; + + dev_dbg(xpnet, "calling xpc_connect(%d, 0x%p, NULL, %ld, %ld, %ld, " + "%ld)\n", XPC_NET_CHANNEL, xpnet_connection_activity, + (unsigned long)XPNET_MSG_SIZE, + (unsigned long)XPNET_MSG_NENTRIES, + (unsigned long)XPNET_MAX_KTHREADS, + (unsigned long)XPNET_MAX_IDLE_KTHREADS); + + ret = xpc_connect(XPC_NET_CHANNEL, xpnet_connection_activity, NULL, + XPNET_MSG_SIZE, XPNET_MSG_NENTRIES, + XPNET_MAX_KTHREADS, XPNET_MAX_IDLE_KTHREADS); + if (ret != xpSuccess) { + dev_err(xpnet, "ifconfig up of %s failed on XPC connect, " + "ret=%d\n", dev->name, ret); + + return -ENOMEM; + } + + dev_dbg(xpnet, "ifconfig up of %s; XPC connected\n", dev->name); + + return 0; +} + +static int +xpnet_dev_stop(struct net_device *dev) +{ + xpc_disconnect(XPC_NET_CHANNEL); + + dev_dbg(xpnet, "ifconfig down of %s; XPC disconnected\n", dev->name); + + return 0; +} + +/* + * Notification that the other end has received the message and + * DMA'd the skb information. At this point, they are done with + * our side. When all recipients are done processing, we + * release the skb and then release our pending message structure. + */ +static void +xpnet_send_completed(enum xp_retval reason, short partid, int channel, + void *__qm) +{ + struct xpnet_pending_msg *queued_msg = (struct xpnet_pending_msg *)__qm; + + DBUG_ON(queued_msg == NULL); + + dev_dbg(xpnet, "message to %d notified with reason %d\n", + partid, reason); + + if (atomic_dec_return(&queued_msg->use_count) == 0) { + dev_dbg(xpnet, "all acks for skb->head=-x%p\n", + (void *)queued_msg->skb->head); + + dev_kfree_skb_any(queued_msg->skb); + kfree(queued_msg); + } +} + +static void +xpnet_send(struct sk_buff *skb, struct xpnet_pending_msg *queued_msg, + u64 start_addr, u64 end_addr, u16 embedded_bytes, int dest_partid) +{ + u8 msg_buffer[XPNET_MSG_SIZE]; + struct xpnet_message *msg = (struct xpnet_message *)&msg_buffer; + u16 msg_size = sizeof(struct xpnet_message); + enum xp_retval ret; + + msg->embedded_bytes = embedded_bytes; + if (unlikely(embedded_bytes != 0)) { + msg->version = XPNET_VERSION_EMBED; + dev_dbg(xpnet, "calling memcpy(0x%p, 0x%p, 0x%lx)\n", + &msg->data, skb->data, (size_t)embedded_bytes); + skb_copy_from_linear_data(skb, &msg->data, + (size_t)embedded_bytes); + msg_size += embedded_bytes - 1; + } else { + msg->version = XPNET_VERSION; + } + msg->magic = XPNET_MAGIC; + msg->size = end_addr - start_addr; + msg->leadin_ignore = (u64)skb->data - start_addr; + msg->tailout_ignore = end_addr - (u64)skb_tail_pointer(skb); + msg->buf_pa = xp_pa((void *)start_addr); + + dev_dbg(xpnet, "sending XPC message to %d:%d\n" + "msg->buf_pa=0x%lx, msg->size=%u, " + "msg->leadin_ignore=%u, msg->tailout_ignore=%u\n", + dest_partid, XPC_NET_CHANNEL, msg->buf_pa, msg->size, + msg->leadin_ignore, msg->tailout_ignore); + + atomic_inc(&queued_msg->use_count); + + ret = xpc_send_notify(dest_partid, XPC_NET_CHANNEL, XPC_NOWAIT, msg, + msg_size, xpnet_send_completed, queued_msg); + if (unlikely(ret != xpSuccess)) + atomic_dec(&queued_msg->use_count); +} + +/* + * Network layer has formatted a packet (skb) and is ready to place it + * "on the wire". Prepare and send an xpnet_message to all partitions + * which have connected with us and are targets of this packet. + * + * MAC-NOTE: For the XPNET driver, the MAC address contains the + * destination partid. If the destination partid octets are 0xffff, + * this packet is to be broadcast to all connected partitions. + */ +static netdev_tx_t +xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct xpnet_pending_msg *queued_msg; + u64 start_addr, end_addr; + short dest_partid; + u16 embedded_bytes = 0; + + dev_dbg(xpnet, ">skb->head=0x%p skb->data=0x%p skb->tail=0x%p " + "skb->end=0x%p skb->len=%d\n", (void *)skb->head, + (void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb), + skb->len); + + if (skb->data[0] == 0x33) { + dev_kfree_skb(skb); + return NETDEV_TX_OK; /* nothing needed to be done */ + } + + /* + * The xpnet_pending_msg tracks how many outstanding + * xpc_send_notifies are relying on this skb. When none + * remain, release the skb. + */ + queued_msg = kmalloc(sizeof(struct xpnet_pending_msg), GFP_ATOMIC); + if (queued_msg == NULL) { + dev_warn(xpnet, "failed to kmalloc %ld bytes; dropping " + "packet\n", sizeof(struct xpnet_pending_msg)); + + dev->stats.tx_errors++; + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + + /* get the beginning of the first cacheline and end of last */ + start_addr = ((u64)skb->data & ~(L1_CACHE_BYTES - 1)); + end_addr = L1_CACHE_ALIGN((u64)skb_tail_pointer(skb)); + + /* calculate how many bytes to embed in the XPC message */ + if (unlikely(skb->len <= XPNET_MSG_DATA_MAX)) { + /* skb->data does fit so embed */ + embedded_bytes = skb->len; + } + + /* + * Since the send occurs asynchronously, we set the count to one + * and begin sending. Any sends that happen to complete before + * we are done sending will not free the skb. We will be left + * with that task during exit. This also handles the case of + * a packet destined for a partition which is no longer up. + */ + atomic_set(&queued_msg->use_count, 1); + queued_msg->skb = skb; + + if (skb->data[0] == 0xff) { + /* we are being asked to broadcast to all partitions */ + for_each_set_bit(dest_partid, xpnet_broadcast_partitions, + xp_max_npartitions) { + + xpnet_send(skb, queued_msg, start_addr, end_addr, + embedded_bytes, dest_partid); + } + } else { + dest_partid = (short)skb->data[XPNET_PARTID_OCTET + 1]; + dest_partid |= (short)skb->data[XPNET_PARTID_OCTET + 0] << 8; + + if (dest_partid >= 0 && + dest_partid < xp_max_npartitions && + test_bit(dest_partid, xpnet_broadcast_partitions) != 0) { + + xpnet_send(skb, queued_msg, start_addr, end_addr, + embedded_bytes, dest_partid); + } + } + + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + + if (atomic_dec_return(&queued_msg->use_count) == 0) { + dev_kfree_skb(skb); + kfree(queued_msg); + } + + return NETDEV_TX_OK; +} + +/* + * Deal with transmit timeouts coming from the network layer. + */ +static void +xpnet_dev_tx_timeout(struct net_device *dev, unsigned int txqueue) +{ + dev->stats.tx_errors++; +} + +static const struct net_device_ops xpnet_netdev_ops = { + .ndo_open = xpnet_dev_open, + .ndo_stop = xpnet_dev_stop, + .ndo_start_xmit = xpnet_dev_hard_start_xmit, + .ndo_tx_timeout = xpnet_dev_tx_timeout, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +static int __init +xpnet_init(void) +{ + int result; + + if (!is_uv_system()) + return -ENODEV; + + dev_info(xpnet, "registering network device %s\n", XPNET_DEVICE_NAME); + + xpnet_broadcast_partitions = kcalloc(BITS_TO_LONGS(xp_max_npartitions), + sizeof(long), + GFP_KERNEL); + if (xpnet_broadcast_partitions == NULL) + return -ENOMEM; + + /* + * use ether_setup() to init the majority of our device + * structure and then override the necessary pieces. + */ + xpnet_device = alloc_netdev(0, XPNET_DEVICE_NAME, NET_NAME_UNKNOWN, + ether_setup); + if (xpnet_device == NULL) { + kfree(xpnet_broadcast_partitions); + return -ENOMEM; + } + + netif_carrier_off(xpnet_device); + + xpnet_device->netdev_ops = &xpnet_netdev_ops; + xpnet_device->mtu = XPNET_DEF_MTU; + xpnet_device->min_mtu = XPNET_MIN_MTU; + xpnet_device->max_mtu = XPNET_MAX_MTU; + + /* + * Multicast assumes the LSB of the first octet is set for multicast + * MAC addresses. We chose the first octet of the MAC to be unlikely + * to collide with any vendor's officially issued MAC. + */ + xpnet_device->dev_addr[0] = 0x02; /* locally administered, no OUI */ + + xpnet_device->dev_addr[XPNET_PARTID_OCTET + 1] = xp_partition_id; + xpnet_device->dev_addr[XPNET_PARTID_OCTET + 0] = (xp_partition_id >> 8); + + /* + * ether_setup() sets this to a multicast device. We are + * really not supporting multicast at this time. + */ + xpnet_device->flags &= ~IFF_MULTICAST; + + /* + * No need to checksum as it is a DMA transfer. The BTE will + * report an error if the data is not retrievable and the + * packet will be dropped. + */ + xpnet_device->features = NETIF_F_HW_CSUM; + + result = register_netdev(xpnet_device); + if (result != 0) { + free_netdev(xpnet_device); + kfree(xpnet_broadcast_partitions); + } + + return result; +} + +module_init(xpnet_init); + +static void __exit +xpnet_exit(void) +{ + dev_info(xpnet, "unregistering network device %s\n", + xpnet_device[0].name); + + unregister_netdev(xpnet_device); + free_netdev(xpnet_device); + kfree(xpnet_broadcast_partitions); +} + +module_exit(xpnet_exit); + +MODULE_AUTHOR("Silicon Graphics, Inc."); +MODULE_DESCRIPTION("Cross Partition Network adapter (XPNET)"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/sram-exec.c b/drivers/misc/sram-exec.c new file mode 100644 index 000000000..6cc31789b --- /dev/null +++ b/drivers/misc/sram-exec.c @@ -0,0 +1,132 @@ +/* + * SRAM protect-exec region helper functions + * + * Copyright (C) 2017 Texas Instruments Incorporated - https://www.ti.com/ + * Dave Gerlach + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/device.h> +#include <linux/genalloc.h> +#include <linux/mm.h> +#include <linux/sram.h> + +#include <asm/fncpy.h> +#include <asm/set_memory.h> + +#include "sram.h" + +static DEFINE_MUTEX(exec_pool_list_mutex); +static LIST_HEAD(exec_pool_list); + +int sram_check_protect_exec(struct sram_dev *sram, struct sram_reserve *block, + struct sram_partition *part) +{ + unsigned long base = (unsigned long)part->base; + unsigned long end = base + block->size; + + if (!PAGE_ALIGNED(base) || !PAGE_ALIGNED(end)) { + dev_err(sram->dev, + "SRAM pool marked with 'protect-exec' is not page aligned and will not be created.\n"); + return -ENOMEM; + } + + return 0; +} + +int sram_add_protect_exec(struct sram_partition *part) +{ + mutex_lock(&exec_pool_list_mutex); + list_add_tail(&part->list, &exec_pool_list); + mutex_unlock(&exec_pool_list_mutex); + + return 0; +} + +/** + * sram_exec_copy - copy data to a protected executable region of sram + * + * @pool: struct gen_pool retrieved that is part of this sram + * @dst: Destination address for the copy, that must be inside pool + * @src: Source address for the data to copy + * @size: Size of copy to perform, which starting from dst, must reside in pool + * + * Return: Address for copied data that can safely be called through function + * pointer, or NULL if problem. + * + * This helper function allows sram driver to act as central control location + * of 'protect-exec' pools which are normal sram pools but are always set + * read-only and executable except when copying data to them, at which point + * they are set to read-write non-executable, to make sure no memory is + * writeable and executable at the same time. This region must be page-aligned + * and is checked during probe, otherwise page attribute manipulation would + * not be possible. Care must be taken to only call the returned address as + * dst address is not guaranteed to be safely callable. + * + * NOTE: This function uses the fncpy macro to move code to the executable + * region. Some architectures have strict requirements for relocating + * executable code, so fncpy is a macro that must be defined by any arch + * making use of this functionality that guarantees a safe copy of exec + * data and returns a safe address that can be called as a C function + * pointer. + */ +void *sram_exec_copy(struct gen_pool *pool, void *dst, void *src, + size_t size) +{ + struct sram_partition *part = NULL, *p; + unsigned long base; + int pages; + void *dst_cpy; + int ret; + + mutex_lock(&exec_pool_list_mutex); + list_for_each_entry(p, &exec_pool_list, list) { + if (p->pool == pool) + part = p; + } + mutex_unlock(&exec_pool_list_mutex); + + if (!part) + return NULL; + + if (!gen_pool_has_addr(pool, (unsigned long)dst, size)) + return NULL; + + base = (unsigned long)part->base; + pages = PAGE_ALIGN(size) / PAGE_SIZE; + + mutex_lock(&part->lock); + + ret = set_memory_nx((unsigned long)base, pages); + if (ret) + goto error_out; + ret = set_memory_rw((unsigned long)base, pages); + if (ret) + goto error_out; + + dst_cpy = fncpy(dst, src, size); + + ret = set_memory_ro((unsigned long)base, pages); + if (ret) + goto error_out; + ret = set_memory_x((unsigned long)base, pages); + if (ret) + goto error_out; + + mutex_unlock(&part->lock); + + return dst_cpy; + +error_out: + mutex_unlock(&part->lock); + return NULL; +} +EXPORT_SYMBOL_GPL(sram_exec_copy); diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c new file mode 100644 index 000000000..202bf951e --- /dev/null +++ b/drivers/misc/sram.c @@ -0,0 +1,429 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Generic on-chip SRAM allocation driver + * + * Copyright (C) 2012 Philipp Zabel, Pengutronix + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/genalloc.h> +#include <linux/io.h> +#include <linux/list_sort.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/slab.h> +#include <linux/mfd/syscon.h> +#include <soc/at91/atmel-secumod.h> + +#include "sram.h" + +#define SRAM_GRANULARITY 32 + +static ssize_t sram_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t pos, size_t count) +{ + struct sram_partition *part; + + part = container_of(attr, struct sram_partition, battr); + + mutex_lock(&part->lock); + memcpy_fromio(buf, part->base + pos, count); + mutex_unlock(&part->lock); + + return count; +} + +static ssize_t sram_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t pos, size_t count) +{ + struct sram_partition *part; + + part = container_of(attr, struct sram_partition, battr); + + mutex_lock(&part->lock); + memcpy_toio(part->base + pos, buf, count); + mutex_unlock(&part->lock); + + return count; +} + +static int sram_add_pool(struct sram_dev *sram, struct sram_reserve *block, + phys_addr_t start, struct sram_partition *part) +{ + int ret; + + part->pool = devm_gen_pool_create(sram->dev, ilog2(SRAM_GRANULARITY), + NUMA_NO_NODE, block->label); + if (IS_ERR(part->pool)) + return PTR_ERR(part->pool); + + ret = gen_pool_add_virt(part->pool, (unsigned long)part->base, start, + block->size, NUMA_NO_NODE); + if (ret < 0) { + dev_err(sram->dev, "failed to register subpool: %d\n", ret); + return ret; + } + + return 0; +} + +static int sram_add_export(struct sram_dev *sram, struct sram_reserve *block, + phys_addr_t start, struct sram_partition *part) +{ + sysfs_bin_attr_init(&part->battr); + part->battr.attr.name = devm_kasprintf(sram->dev, GFP_KERNEL, + "%llx.sram", + (unsigned long long)start); + if (!part->battr.attr.name) + return -ENOMEM; + + part->battr.attr.mode = S_IRUSR | S_IWUSR; + part->battr.read = sram_read; + part->battr.write = sram_write; + part->battr.size = block->size; + + return device_create_bin_file(sram->dev, &part->battr); +} + +static int sram_add_partition(struct sram_dev *sram, struct sram_reserve *block, + phys_addr_t start) +{ + int ret; + struct sram_partition *part = &sram->partition[sram->partitions]; + + mutex_init(&part->lock); + part->base = sram->virt_base + block->start; + + if (block->pool) { + ret = sram_add_pool(sram, block, start, part); + if (ret) + return ret; + } + if (block->export) { + ret = sram_add_export(sram, block, start, part); + if (ret) + return ret; + } + if (block->protect_exec) { + ret = sram_check_protect_exec(sram, block, part); + if (ret) + return ret; + + ret = sram_add_pool(sram, block, start, part); + if (ret) + return ret; + + sram_add_protect_exec(part); + } + + sram->partitions++; + + return 0; +} + +static void sram_free_partitions(struct sram_dev *sram) +{ + struct sram_partition *part; + + if (!sram->partitions) + return; + + part = &sram->partition[sram->partitions - 1]; + for (; sram->partitions; sram->partitions--, part--) { + if (part->battr.size) + device_remove_bin_file(sram->dev, &part->battr); + + if (part->pool && + gen_pool_avail(part->pool) < gen_pool_size(part->pool)) + dev_err(sram->dev, "removed pool while SRAM allocated\n"); + } +} + +static int sram_reserve_cmp(void *priv, const struct list_head *a, + const struct list_head *b) +{ + struct sram_reserve *ra = list_entry(a, struct sram_reserve, list); + struct sram_reserve *rb = list_entry(b, struct sram_reserve, list); + + return ra->start - rb->start; +} + +static int sram_reserve_regions(struct sram_dev *sram, struct resource *res) +{ + struct device_node *np = sram->dev->of_node, *child; + unsigned long size, cur_start, cur_size; + struct sram_reserve *rblocks, *block; + struct list_head reserve_list; + unsigned int nblocks, exports = 0; + const char *label; + int ret = 0; + + INIT_LIST_HEAD(&reserve_list); + + size = resource_size(res); + + /* + * We need an additional block to mark the end of the memory region + * after the reserved blocks from the dt are processed. + */ + nblocks = (np) ? of_get_available_child_count(np) + 1 : 1; + rblocks = kcalloc(nblocks, sizeof(*rblocks), GFP_KERNEL); + if (!rblocks) + return -ENOMEM; + + block = &rblocks[0]; + for_each_available_child_of_node(np, child) { + struct resource child_res; + + ret = of_address_to_resource(child, 0, &child_res); + if (ret < 0) { + dev_err(sram->dev, + "could not get address for node %pOF\n", + child); + goto err_chunks; + } + + if (child_res.start < res->start || child_res.end > res->end) { + dev_err(sram->dev, + "reserved block %pOF outside the sram area\n", + child); + ret = -EINVAL; + goto err_chunks; + } + + block->start = child_res.start - res->start; + block->size = resource_size(&child_res); + list_add_tail(&block->list, &reserve_list); + + if (of_find_property(child, "export", NULL)) + block->export = true; + + if (of_find_property(child, "pool", NULL)) + block->pool = true; + + if (of_find_property(child, "protect-exec", NULL)) + block->protect_exec = true; + + if ((block->export || block->pool || block->protect_exec) && + block->size) { + exports++; + + label = NULL; + ret = of_property_read_string(child, "label", &label); + if (ret && ret != -EINVAL) { + dev_err(sram->dev, + "%pOF has invalid label name\n", + child); + goto err_chunks; + } + if (!label) + label = child->name; + + block->label = devm_kstrdup(sram->dev, + label, GFP_KERNEL); + if (!block->label) { + ret = -ENOMEM; + goto err_chunks; + } + + dev_dbg(sram->dev, "found %sblock '%s' 0x%x-0x%x\n", + block->export ? "exported " : "", block->label, + block->start, block->start + block->size); + } else { + dev_dbg(sram->dev, "found reserved block 0x%x-0x%x\n", + block->start, block->start + block->size); + } + + block++; + } + child = NULL; + + /* the last chunk marks the end of the region */ + rblocks[nblocks - 1].start = size; + rblocks[nblocks - 1].size = 0; + list_add_tail(&rblocks[nblocks - 1].list, &reserve_list); + + list_sort(NULL, &reserve_list, sram_reserve_cmp); + + if (exports) { + sram->partition = devm_kcalloc(sram->dev, + exports, sizeof(*sram->partition), + GFP_KERNEL); + if (!sram->partition) { + ret = -ENOMEM; + goto err_chunks; + } + } + + cur_start = 0; + list_for_each_entry(block, &reserve_list, list) { + /* can only happen if sections overlap */ + if (block->start < cur_start) { + dev_err(sram->dev, + "block at 0x%x starts after current offset 0x%lx\n", + block->start, cur_start); + ret = -EINVAL; + sram_free_partitions(sram); + goto err_chunks; + } + + if ((block->export || block->pool || block->protect_exec) && + block->size) { + ret = sram_add_partition(sram, block, + res->start + block->start); + if (ret) { + sram_free_partitions(sram); + goto err_chunks; + } + } + + /* current start is in a reserved block, so continue after it */ + if (block->start == cur_start) { + cur_start = block->start + block->size; + continue; + } + + /* + * allocate the space between the current starting + * address and the following reserved block, or the + * end of the region. + */ + cur_size = block->start - cur_start; + + dev_dbg(sram->dev, "adding chunk 0x%lx-0x%lx\n", + cur_start, cur_start + cur_size); + + ret = gen_pool_add_virt(sram->pool, + (unsigned long)sram->virt_base + cur_start, + res->start + cur_start, cur_size, -1); + if (ret < 0) { + sram_free_partitions(sram); + goto err_chunks; + } + + /* next allocation after this reserved block */ + cur_start = block->start + block->size; + } + +err_chunks: + of_node_put(child); + kfree(rblocks); + + return ret; +} + +static int atmel_securam_wait(void) +{ + struct regmap *regmap; + u32 val; + + regmap = syscon_regmap_lookup_by_compatible("atmel,sama5d2-secumod"); + if (IS_ERR(regmap)) + return -ENODEV; + + return regmap_read_poll_timeout(regmap, AT91_SECUMOD_RAMRDY, val, + val & AT91_SECUMOD_RAMRDY_READY, + 10000, 500000); +} + +static const struct of_device_id sram_dt_ids[] = { + { .compatible = "mmio-sram" }, + { .compatible = "atmel,sama5d2-securam", .data = atmel_securam_wait }, + {} +}; + +static int sram_probe(struct platform_device *pdev) +{ + struct sram_dev *sram; + int ret; + int (*init_func)(void); + + sram = devm_kzalloc(&pdev->dev, sizeof(*sram), GFP_KERNEL); + if (!sram) + return -ENOMEM; + + sram->dev = &pdev->dev; + + if (of_property_read_bool(pdev->dev.of_node, "no-memory-wc")) + sram->virt_base = devm_platform_ioremap_resource(pdev, 0); + else + sram->virt_base = devm_platform_ioremap_resource_wc(pdev, 0); + if (IS_ERR(sram->virt_base)) { + dev_err(&pdev->dev, "could not map SRAM registers\n"); + return PTR_ERR(sram->virt_base); + } + + sram->pool = devm_gen_pool_create(sram->dev, ilog2(SRAM_GRANULARITY), + NUMA_NO_NODE, NULL); + if (IS_ERR(sram->pool)) + return PTR_ERR(sram->pool); + + sram->clk = devm_clk_get(sram->dev, NULL); + if (IS_ERR(sram->clk)) + sram->clk = NULL; + else + clk_prepare_enable(sram->clk); + + ret = sram_reserve_regions(sram, + platform_get_resource(pdev, IORESOURCE_MEM, 0)); + if (ret) + goto err_disable_clk; + + platform_set_drvdata(pdev, sram); + + init_func = of_device_get_match_data(&pdev->dev); + if (init_func) { + ret = init_func(); + if (ret) + goto err_free_partitions; + } + + dev_dbg(sram->dev, "SRAM pool: %zu KiB @ 0x%p\n", + gen_pool_size(sram->pool) / 1024, sram->virt_base); + + return 0; + +err_free_partitions: + sram_free_partitions(sram); +err_disable_clk: + if (sram->clk) + clk_disable_unprepare(sram->clk); + + return ret; +} + +static int sram_remove(struct platform_device *pdev) +{ + struct sram_dev *sram = platform_get_drvdata(pdev); + + sram_free_partitions(sram); + + if (gen_pool_avail(sram->pool) < gen_pool_size(sram->pool)) + dev_err(sram->dev, "removed while SRAM allocated\n"); + + if (sram->clk) + clk_disable_unprepare(sram->clk); + + return 0; +} + +static struct platform_driver sram_driver = { + .driver = { + .name = "sram", + .of_match_table = sram_dt_ids, + }, + .probe = sram_probe, + .remove = sram_remove, +}; + +static int __init sram_init(void) +{ + return platform_driver_register(&sram_driver); +} + +postcore_initcall(sram_init); diff --git a/drivers/misc/sram.h b/drivers/misc/sram.h new file mode 100644 index 000000000..9c1d21ff7 --- /dev/null +++ b/drivers/misc/sram.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Defines for the SRAM driver + */ +#ifndef __SRAM_H +#define __SRAM_H + +struct sram_partition { + void __iomem *base; + + struct gen_pool *pool; + struct bin_attribute battr; + struct mutex lock; + struct list_head list; +}; + +struct sram_dev { + struct device *dev; + void __iomem *virt_base; + + struct gen_pool *pool; + struct clk *clk; + + struct sram_partition *partition; + u32 partitions; +}; + +struct sram_reserve { + struct list_head list; + u32 start; + u32 size; + bool export; + bool pool; + bool protect_exec; + const char *label; +}; + +#ifdef CONFIG_SRAM_EXEC +int sram_check_protect_exec(struct sram_dev *sram, struct sram_reserve *block, + struct sram_partition *part); +int sram_add_protect_exec(struct sram_partition *part); +#else +static inline int sram_check_protect_exec(struct sram_dev *sram, + struct sram_reserve *block, + struct sram_partition *part) +{ + return -ENODEV; +} + +static inline int sram_add_protect_exec(struct sram_partition *part) +{ + return -ENODEV; +} +#endif /* CONFIG_SRAM_EXEC */ +#endif /* __SRAM_H */ diff --git a/drivers/misc/ti-st/Kconfig b/drivers/misc/ti-st/Kconfig new file mode 100644 index 000000000..1503a6496 --- /dev/null +++ b/drivers/misc/ti-st/Kconfig @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# TI's shared transport line discipline and the protocol +# drivers (BT, FM and GPS) +# +menu "Texas Instruments shared transport line discipline" +config TI_ST + tristate "Shared transport core driver" + depends on NET && TTY + depends on GPIOLIB || COMPILE_TEST + select FW_LOADER + help + This enables the shared transport core driver for TI + BT / FM and GPS combo chips. This enables protocol drivers + to register themselves with core and send data, the responses + are returned to relevant protocol drivers based on their + packet types. + +endmenu diff --git a/drivers/misc/ti-st/Makefile b/drivers/misc/ti-st/Makefile new file mode 100644 index 000000000..933931009 --- /dev/null +++ b/drivers/misc/ti-st/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Makefile for TI's shared transport line discipline +# and its protocol drivers (BT, FM, GPS) +# +obj-$(CONFIG_TI_ST) += st_drv.o +st_drv-objs := st_core.o st_kim.o st_ll.o diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c new file mode 100644 index 000000000..ca115f344 --- /dev/null +++ b/drivers/misc/ti-st/st_core.c @@ -0,0 +1,929 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Shared Transport Line discipline driver Core + * This hooks up ST KIM driver and ST LL driver + * Copyright (C) 2009-2010 Texas Instruments + * Author: Pavan Savoy <pavan_savoy@ti.com> + */ + +#define pr_fmt(fmt) "(stc): " fmt +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/tty.h> + +#include <linux/seq_file.h> +#include <linux/skbuff.h> + +#include <linux/ti_wilink_st.h> +#include <linux/netdevice.h> + +extern void st_kim_recv(void *, const unsigned char *, long); +void st_int_recv(void *, const unsigned char *, long); +/* + * function pointer pointing to either, + * st_kim_recv during registration to receive fw download responses + * st_int_recv after registration to receive proto stack responses + */ +static void (*st_recv) (void *, const unsigned char *, long); + +/********************************************************************/ +static void add_channel_to_table(struct st_data_s *st_gdata, + struct st_proto_s *new_proto) +{ + pr_info("%s: id %d\n", __func__, new_proto->chnl_id); + /* list now has the channel id as index itself */ + st_gdata->list[new_proto->chnl_id] = new_proto; + st_gdata->is_registered[new_proto->chnl_id] = true; +} + +static void remove_channel_from_table(struct st_data_s *st_gdata, + struct st_proto_s *proto) +{ + pr_info("%s: id %d\n", __func__, proto->chnl_id); +/* st_gdata->list[proto->chnl_id] = NULL; */ + st_gdata->is_registered[proto->chnl_id] = false; +} + +/* + * called from KIM during firmware download. + * + * This is a wrapper function to tty->ops->write_room. + * It returns number of free space available in + * uart tx buffer. + */ +int st_get_uart_wr_room(struct st_data_s *st_gdata) +{ + struct tty_struct *tty; + if (unlikely(st_gdata == NULL || st_gdata->tty == NULL)) { + pr_err("tty unavailable to perform write"); + return -1; + } + tty = st_gdata->tty; + return tty->ops->write_room(tty); +} + +/* + * can be called in from + * -- KIM (during fw download) + * -- ST Core (during st_write) + * + * This is the internal write function - a wrapper + * to tty->ops->write + */ +int st_int_write(struct st_data_s *st_gdata, + const unsigned char *data, int count) +{ + struct tty_struct *tty; + if (unlikely(st_gdata == NULL || st_gdata->tty == NULL)) { + pr_err("tty unavailable to perform write"); + return -EINVAL; + } + tty = st_gdata->tty; +#ifdef VERBOSE + print_hex_dump(KERN_DEBUG, "<out<", DUMP_PREFIX_NONE, + 16, 1, data, count, 0); +#endif + return tty->ops->write(tty, data, count); + +} + +/* + * push the skb received to relevant + * protocol stacks + */ +static void st_send_frame(unsigned char chnl_id, struct st_data_s *st_gdata) +{ + pr_debug(" %s(prot:%d) ", __func__, chnl_id); + + if (unlikely + (st_gdata == NULL || st_gdata->rx_skb == NULL + || st_gdata->is_registered[chnl_id] == false)) { + pr_err("chnl_id %d not registered, no data to send?", + chnl_id); + kfree_skb(st_gdata->rx_skb); + return; + } + /* + * this cannot fail + * this shouldn't take long + * - should be just skb_queue_tail for the + * protocol stack driver + */ + if (likely(st_gdata->list[chnl_id]->recv != NULL)) { + if (unlikely + (st_gdata->list[chnl_id]->recv + (st_gdata->list[chnl_id]->priv_data, st_gdata->rx_skb) + != 0)) { + pr_err(" proto stack %d's ->recv failed", chnl_id); + kfree_skb(st_gdata->rx_skb); + return; + } + } else { + pr_err(" proto stack %d's ->recv null", chnl_id); + kfree_skb(st_gdata->rx_skb); + } + return; +} + +/* + * st_reg_complete - to call registration complete callbacks + * of all protocol stack drivers + * This function is being called with spin lock held, protocol drivers are + * only expected to complete their waits and do nothing more than that. + */ +static void st_reg_complete(struct st_data_s *st_gdata, int err) +{ + unsigned char i = 0; + pr_info(" %s ", __func__); + for (i = 0; i < ST_MAX_CHANNELS; i++) { + if (likely(st_gdata != NULL && + st_gdata->is_registered[i] == true && + st_gdata->list[i]->reg_complete_cb != NULL)) { + st_gdata->list[i]->reg_complete_cb + (st_gdata->list[i]->priv_data, err); + pr_info("protocol %d's cb sent %d\n", i, err); + if (err) { /* cleanup registered protocol */ + st_gdata->is_registered[i] = false; + if (st_gdata->protos_registered) + st_gdata->protos_registered--; + } + } + } +} + +static inline int st_check_data_len(struct st_data_s *st_gdata, + unsigned char chnl_id, int len) +{ + int room = skb_tailroom(st_gdata->rx_skb); + + pr_debug("len %d room %d", len, room); + + if (!len) { + /* + * Received packet has only packet header and + * has zero length payload. So, ask ST CORE to + * forward the packet to protocol driver (BT/FM/GPS) + */ + st_send_frame(chnl_id, st_gdata); + + } else if (len > room) { + /* + * Received packet's payload length is larger. + * We can't accommodate it in created skb. + */ + pr_err("Data length is too large len %d room %d", len, + room); + kfree_skb(st_gdata->rx_skb); + } else { + /* + * Packet header has non-zero payload length and + * we have enough space in created skb. Lets read + * payload data */ + st_gdata->rx_state = ST_W4_DATA; + st_gdata->rx_count = len; + return len; + } + + /* Change ST state to continue to process next packet */ + st_gdata->rx_state = ST_W4_PACKET_TYPE; + st_gdata->rx_skb = NULL; + st_gdata->rx_count = 0; + st_gdata->rx_chnl = 0; + + return 0; +} + +/* + * st_wakeup_ack - internal function for action when wake-up ack + * received + */ +static inline void st_wakeup_ack(struct st_data_s *st_gdata, + unsigned char cmd) +{ + struct sk_buff *waiting_skb; + unsigned long flags = 0; + + spin_lock_irqsave(&st_gdata->lock, flags); + /* + * de-Q from waitQ and Q in txQ now that the + * chip is awake + */ + while ((waiting_skb = skb_dequeue(&st_gdata->tx_waitq))) + skb_queue_tail(&st_gdata->txq, waiting_skb); + + /* state forwarded to ST LL */ + st_ll_sleep_state(st_gdata, (unsigned long)cmd); + spin_unlock_irqrestore(&st_gdata->lock, flags); + + /* wake up to send the recently copied skbs from waitQ */ + st_tx_wakeup(st_gdata); +} + +/* + * st_int_recv - ST's internal receive function. + * Decodes received RAW data and forwards to corresponding + * client drivers (Bluetooth,FM,GPS..etc). + * This can receive various types of packets, + * HCI-Events, ACL, SCO, 4 types of HCI-LL PM packets + * CH-8 packets from FM, CH-9 packets from GPS cores. + */ +void st_int_recv(void *disc_data, + const unsigned char *data, long count) +{ + char *ptr; + struct st_proto_s *proto; + unsigned short payload_len = 0; + int len = 0; + unsigned char type = 0; + unsigned char *plen; + struct st_data_s *st_gdata = (struct st_data_s *)disc_data; + unsigned long flags; + + ptr = (char *)data; + /* tty_receive sent null ? */ + if (unlikely(ptr == NULL) || (st_gdata == NULL)) { + pr_err(" received null from TTY "); + return; + } + + pr_debug("count %ld rx_state %ld" + "rx_count %ld", count, st_gdata->rx_state, + st_gdata->rx_count); + + spin_lock_irqsave(&st_gdata->lock, flags); + /* Decode received bytes here */ + while (count) { + if (st_gdata->rx_count) { + len = min_t(unsigned int, st_gdata->rx_count, count); + skb_put_data(st_gdata->rx_skb, ptr, len); + st_gdata->rx_count -= len; + count -= len; + ptr += len; + + if (st_gdata->rx_count) + continue; + + /* Check ST RX state machine , where are we? */ + switch (st_gdata->rx_state) { + /* Waiting for complete packet ? */ + case ST_W4_DATA: + pr_debug("Complete pkt received"); + /* + * Ask ST CORE to forward + * the packet to protocol driver + */ + st_send_frame(st_gdata->rx_chnl, st_gdata); + + st_gdata->rx_state = ST_W4_PACKET_TYPE; + st_gdata->rx_skb = NULL; + continue; + /* parse the header to know details */ + case ST_W4_HEADER: + proto = st_gdata->list[st_gdata->rx_chnl]; + plen = + &st_gdata->rx_skb->data + [proto->offset_len_in_hdr]; + pr_debug("plen pointing to %x\n", *plen); + if (proto->len_size == 1) /* 1 byte len field */ + payload_len = *(unsigned char *)plen; + else if (proto->len_size == 2) + payload_len = + __le16_to_cpu(*(unsigned short *)plen); + else + pr_info("%s: invalid length " + "for id %d\n", + __func__, proto->chnl_id); + st_check_data_len(st_gdata, proto->chnl_id, + payload_len); + pr_debug("off %d, pay len %d\n", + proto->offset_len_in_hdr, payload_len); + continue; + } /* end of switch rx_state */ + } + + /* end of if rx_count */ + + /* + * Check first byte of packet and identify module + * owner (BT/FM/GPS) + */ + switch (*ptr) { + case LL_SLEEP_IND: + case LL_SLEEP_ACK: + case LL_WAKE_UP_IND: + pr_debug("PM packet"); + /* + * this takes appropriate action based on + * sleep state received -- + */ + st_ll_sleep_state(st_gdata, *ptr); + /* + * if WAKEUP_IND collides copy from waitq to txq + * and assume chip awake + */ + spin_unlock_irqrestore(&st_gdata->lock, flags); + if (st_ll_getstate(st_gdata) == ST_LL_AWAKE) + st_wakeup_ack(st_gdata, LL_WAKE_UP_ACK); + spin_lock_irqsave(&st_gdata->lock, flags); + + ptr++; + count--; + continue; + case LL_WAKE_UP_ACK: + pr_debug("PM packet"); + + spin_unlock_irqrestore(&st_gdata->lock, flags); + /* wake up ack received */ + st_wakeup_ack(st_gdata, *ptr); + spin_lock_irqsave(&st_gdata->lock, flags); + + ptr++; + count--; + continue; + /* Unknow packet? */ + default: + type = *ptr; + + /* + * Default case means non-HCILL packets, + * possibilities are packets for: + * (a) valid protocol - Supported Protocols within + * the ST_MAX_CHANNELS. + * (b) registered protocol - Checked by + * "st_gdata->list[type] == NULL)" are supported + * protocols only. + * Rules out any invalid protocol and + * unregistered protocols with channel ID < 16. + */ + + if ((type >= ST_MAX_CHANNELS) || + (st_gdata->list[type] == NULL)) { + pr_err("chip/interface misbehavior: " + "dropping frame starting " + "with 0x%02x\n", type); + goto done; + } + + st_gdata->rx_skb = alloc_skb( + st_gdata->list[type]->max_frame_size, + GFP_ATOMIC); + if (st_gdata->rx_skb == NULL) { + pr_err("out of memory: dropping\n"); + goto done; + } + + skb_reserve(st_gdata->rx_skb, + st_gdata->list[type]->reserve); + /* next 2 required for BT only */ + st_gdata->rx_skb->cb[0] = type; /*pkt_type*/ + st_gdata->rx_skb->cb[1] = 0; /*incoming*/ + st_gdata->rx_chnl = *ptr; + st_gdata->rx_state = ST_W4_HEADER; + st_gdata->rx_count = st_gdata->list[type]->hdr_len; + pr_debug("rx_count %ld\n", st_gdata->rx_count); + }; + ptr++; + count--; + } +done: + spin_unlock_irqrestore(&st_gdata->lock, flags); + pr_debug("done %s", __func__); + return; +} + +/* + * st_int_dequeue - internal de-Q function. + * If the previous data set was not written + * completely, return that skb which has the pending data. + * In normal cases, return top of txq. + */ +static struct sk_buff *st_int_dequeue(struct st_data_s *st_gdata) +{ + struct sk_buff *returning_skb; + + pr_debug("%s", __func__); + if (st_gdata->tx_skb != NULL) { + returning_skb = st_gdata->tx_skb; + st_gdata->tx_skb = NULL; + return returning_skb; + } + return skb_dequeue(&st_gdata->txq); +} + +/* + * st_int_enqueue - internal Q-ing function. + * Will either Q the skb to txq or the tx_waitq + * depending on the ST LL state. + * If the chip is asleep, then Q it onto waitq and + * wakeup the chip. + * txq and waitq needs protection since the other contexts + * may be sending data, waking up chip. + */ +static void st_int_enqueue(struct st_data_s *st_gdata, struct sk_buff *skb) +{ + unsigned long flags = 0; + + pr_debug("%s", __func__); + spin_lock_irqsave(&st_gdata->lock, flags); + + switch (st_ll_getstate(st_gdata)) { + case ST_LL_AWAKE: + pr_debug("ST LL is AWAKE, sending normally"); + skb_queue_tail(&st_gdata->txq, skb); + break; + case ST_LL_ASLEEP_TO_AWAKE: + skb_queue_tail(&st_gdata->tx_waitq, skb); + break; + case ST_LL_AWAKE_TO_ASLEEP: + pr_err("ST LL is illegal state(%ld)," + "purging received skb.", st_ll_getstate(st_gdata)); + dev_kfree_skb_irq(skb); + break; + case ST_LL_ASLEEP: + skb_queue_tail(&st_gdata->tx_waitq, skb); + st_ll_wakeup(st_gdata); + break; + default: + pr_err("ST LL is illegal state(%ld)," + "purging received skb.", st_ll_getstate(st_gdata)); + dev_kfree_skb_irq(skb); + break; + } + + spin_unlock_irqrestore(&st_gdata->lock, flags); + pr_debug("done %s", __func__); + return; +} + +/* + * internal wakeup function + * called from either + * - TTY layer when write's finished + * - st_write (in context of the protocol stack) + */ +static void work_fn_write_wakeup(struct work_struct *work) +{ + struct st_data_s *st_gdata = container_of(work, struct st_data_s, + work_write_wakeup); + + st_tx_wakeup((void *)st_gdata); +} +void st_tx_wakeup(struct st_data_s *st_data) +{ + struct sk_buff *skb; + unsigned long flags; /* for irq save flags */ + pr_debug("%s", __func__); + /* check for sending & set flag sending here */ + if (test_and_set_bit(ST_TX_SENDING, &st_data->tx_state)) { + pr_debug("ST already sending"); + /* keep sending */ + set_bit(ST_TX_WAKEUP, &st_data->tx_state); + return; + /* TX_WAKEUP will be checked in another + * context + */ + } + do { /* come back if st_tx_wakeup is set */ + /* woke-up to write */ + clear_bit(ST_TX_WAKEUP, &st_data->tx_state); + while ((skb = st_int_dequeue(st_data))) { + int len; + spin_lock_irqsave(&st_data->lock, flags); + /* enable wake-up from TTY */ + set_bit(TTY_DO_WRITE_WAKEUP, &st_data->tty->flags); + len = st_int_write(st_data, skb->data, skb->len); + skb_pull(skb, len); + /* if skb->len = len as expected, skb->len=0 */ + if (skb->len) { + /* would be the next skb to be sent */ + st_data->tx_skb = skb; + spin_unlock_irqrestore(&st_data->lock, flags); + break; + } + dev_kfree_skb_irq(skb); + spin_unlock_irqrestore(&st_data->lock, flags); + } + /* if wake-up is set in another context- restart sending */ + } while (test_bit(ST_TX_WAKEUP, &st_data->tx_state)); + + /* clear flag sending */ + clear_bit(ST_TX_SENDING, &st_data->tx_state); +} + +/********************************************************************/ +/* functions called from ST KIM +*/ +void kim_st_list_protocols(struct st_data_s *st_gdata, void *buf) +{ + seq_printf(buf, "[%d]\nBT=%c\nFM=%c\nGPS=%c\n", + st_gdata->protos_registered, + st_gdata->is_registered[0x04] == true ? 'R' : 'U', + st_gdata->is_registered[0x08] == true ? 'R' : 'U', + st_gdata->is_registered[0x09] == true ? 'R' : 'U'); +} + +/********************************************************************/ +/* + * functions called from protocol stack drivers + * to be EXPORT-ed + */ +long st_register(struct st_proto_s *new_proto) +{ + struct st_data_s *st_gdata; + long err = 0; + unsigned long flags = 0; + + st_kim_ref(&st_gdata, 0); + if (st_gdata == NULL || new_proto == NULL || new_proto->recv == NULL + || new_proto->reg_complete_cb == NULL) { + pr_err("gdata/new_proto/recv or reg_complete_cb not ready"); + return -EINVAL; + } + + if (new_proto->chnl_id >= ST_MAX_CHANNELS) { + pr_err("chnl_id %d not supported", new_proto->chnl_id); + return -EPROTONOSUPPORT; + } + + if (st_gdata->is_registered[new_proto->chnl_id] == true) { + pr_err("chnl_id %d already registered", new_proto->chnl_id); + return -EALREADY; + } + + /* can be from process context only */ + spin_lock_irqsave(&st_gdata->lock, flags); + + if (test_bit(ST_REG_IN_PROGRESS, &st_gdata->st_state)) { + pr_info(" ST_REG_IN_PROGRESS:%d ", new_proto->chnl_id); + /* fw download in progress */ + + add_channel_to_table(st_gdata, new_proto); + st_gdata->protos_registered++; + new_proto->write = st_write; + + set_bit(ST_REG_PENDING, &st_gdata->st_state); + spin_unlock_irqrestore(&st_gdata->lock, flags); + return -EINPROGRESS; + } else if (st_gdata->protos_registered == ST_EMPTY) { + pr_info(" chnl_id list empty :%d ", new_proto->chnl_id); + set_bit(ST_REG_IN_PROGRESS, &st_gdata->st_state); + st_recv = st_kim_recv; + + /* enable the ST LL - to set default chip state */ + st_ll_enable(st_gdata); + + /* release lock previously held - re-locked below */ + spin_unlock_irqrestore(&st_gdata->lock, flags); + + /* + * this may take a while to complete + * since it involves BT fw download + */ + err = st_kim_start(st_gdata->kim_data); + if (err != 0) { + clear_bit(ST_REG_IN_PROGRESS, &st_gdata->st_state); + if ((st_gdata->protos_registered != ST_EMPTY) && + (test_bit(ST_REG_PENDING, &st_gdata->st_state))) { + pr_err(" KIM failure complete callback "); + spin_lock_irqsave(&st_gdata->lock, flags); + st_reg_complete(st_gdata, err); + spin_unlock_irqrestore(&st_gdata->lock, flags); + clear_bit(ST_REG_PENDING, &st_gdata->st_state); + } + return -EINVAL; + } + + spin_lock_irqsave(&st_gdata->lock, flags); + + clear_bit(ST_REG_IN_PROGRESS, &st_gdata->st_state); + st_recv = st_int_recv; + + /* + * this is where all pending registration + * are signalled to be complete by calling callback functions + */ + if ((st_gdata->protos_registered != ST_EMPTY) && + (test_bit(ST_REG_PENDING, &st_gdata->st_state))) { + pr_debug(" call reg complete callback "); + st_reg_complete(st_gdata, 0); + } + clear_bit(ST_REG_PENDING, &st_gdata->st_state); + + /* + * check for already registered once more, + * since the above check is old + */ + if (st_gdata->is_registered[new_proto->chnl_id] == true) { + pr_err(" proto %d already registered ", + new_proto->chnl_id); + spin_unlock_irqrestore(&st_gdata->lock, flags); + return -EALREADY; + } + + add_channel_to_table(st_gdata, new_proto); + st_gdata->protos_registered++; + new_proto->write = st_write; + spin_unlock_irqrestore(&st_gdata->lock, flags); + return err; + } + /* if fw is already downloaded & new stack registers protocol */ + else { + add_channel_to_table(st_gdata, new_proto); + st_gdata->protos_registered++; + new_proto->write = st_write; + + /* lock already held before entering else */ + spin_unlock_irqrestore(&st_gdata->lock, flags); + return err; + } +} +EXPORT_SYMBOL_GPL(st_register); + +/* + * to unregister a protocol - + * to be called from protocol stack driver + */ +long st_unregister(struct st_proto_s *proto) +{ + long err = 0; + unsigned long flags = 0; + struct st_data_s *st_gdata; + + pr_debug("%s: %d ", __func__, proto->chnl_id); + + st_kim_ref(&st_gdata, 0); + if (!st_gdata || proto->chnl_id >= ST_MAX_CHANNELS) { + pr_err(" chnl_id %d not supported", proto->chnl_id); + return -EPROTONOSUPPORT; + } + + spin_lock_irqsave(&st_gdata->lock, flags); + + if (st_gdata->is_registered[proto->chnl_id] == false) { + pr_err(" chnl_id %d not registered", proto->chnl_id); + spin_unlock_irqrestore(&st_gdata->lock, flags); + return -EPROTONOSUPPORT; + } + + if (st_gdata->protos_registered) + st_gdata->protos_registered--; + + remove_channel_from_table(st_gdata, proto); + spin_unlock_irqrestore(&st_gdata->lock, flags); + + if ((st_gdata->protos_registered == ST_EMPTY) && + (!test_bit(ST_REG_PENDING, &st_gdata->st_state))) { + pr_info(" all chnl_ids unregistered "); + + /* stop traffic on tty */ + if (st_gdata->tty) { + tty_ldisc_flush(st_gdata->tty); + stop_tty(st_gdata->tty); + } + + /* all chnl_ids now unregistered */ + st_kim_stop(st_gdata->kim_data); + /* disable ST LL */ + st_ll_disable(st_gdata); + } + return err; +} + +/* + * called in protocol stack drivers + * via the write function pointer + */ +long st_write(struct sk_buff *skb) +{ + struct st_data_s *st_gdata; + long len; + + st_kim_ref(&st_gdata, 0); + if (unlikely(skb == NULL || st_gdata == NULL + || st_gdata->tty == NULL)) { + pr_err("data/tty unavailable to perform write"); + return -EINVAL; + } + + pr_debug("%d to be written", skb->len); + len = skb->len; + + /* st_ll to decide where to enqueue the skb */ + st_int_enqueue(st_gdata, skb); + /* wake up */ + st_tx_wakeup(st_gdata); + + /* return number of bytes written */ + return len; +} + +/* for protocols making use of shared transport */ +EXPORT_SYMBOL_GPL(st_unregister); + +/********************************************************************/ +/* + * functions called from TTY layer + */ +static int st_tty_open(struct tty_struct *tty) +{ + struct st_data_s *st_gdata; + pr_info("%s ", __func__); + + st_kim_ref(&st_gdata, 0); + st_gdata->tty = tty; + tty->disc_data = st_gdata; + + /* don't do an wakeup for now */ + clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); + + /* mem already allocated + */ + tty->receive_room = 65536; + /* Flush any pending characters in the driver and discipline. */ + tty_ldisc_flush(tty); + tty_driver_flush_buffer(tty); + /* + * signal to UIM via KIM that - + * installation of N_TI_WL ldisc is complete + */ + st_kim_complete(st_gdata->kim_data); + pr_debug("done %s", __func__); + + return 0; +} + +static void st_tty_close(struct tty_struct *tty) +{ + unsigned char i; + unsigned long flags; + struct st_data_s *st_gdata = tty->disc_data; + + pr_info("%s ", __func__); + + /* + * TODO: + * if a protocol has been registered & line discipline + * un-installed for some reason - what should be done ? + */ + spin_lock_irqsave(&st_gdata->lock, flags); + for (i = ST_BT; i < ST_MAX_CHANNELS; i++) { + if (st_gdata->is_registered[i] == true) + pr_err("%d not un-registered", i); + st_gdata->list[i] = NULL; + st_gdata->is_registered[i] = false; + } + st_gdata->protos_registered = 0; + spin_unlock_irqrestore(&st_gdata->lock, flags); + /* + * signal to UIM via KIM that - + * N_TI_WL ldisc is un-installed + */ + st_kim_complete(st_gdata->kim_data); + st_gdata->tty = NULL; + /* Flush any pending characters in the driver and discipline. */ + tty_ldisc_flush(tty); + tty_driver_flush_buffer(tty); + + spin_lock_irqsave(&st_gdata->lock, flags); + /* empty out txq and tx_waitq */ + skb_queue_purge(&st_gdata->txq); + skb_queue_purge(&st_gdata->tx_waitq); + /* reset the TTY Rx states of ST */ + st_gdata->rx_count = 0; + st_gdata->rx_state = ST_W4_PACKET_TYPE; + kfree_skb(st_gdata->rx_skb); + st_gdata->rx_skb = NULL; + spin_unlock_irqrestore(&st_gdata->lock, flags); + + pr_debug("%s: done ", __func__); +} + +static void st_tty_receive(struct tty_struct *tty, const unsigned char *data, + char *tty_flags, int count) +{ +#ifdef VERBOSE + print_hex_dump(KERN_DEBUG, ">in>", DUMP_PREFIX_NONE, + 16, 1, data, count, 0); +#endif + + /* + * if fw download is in progress then route incoming data + * to KIM for validation + */ + st_recv(tty->disc_data, data, count); + pr_debug("done %s", __func__); +} + +/* + * wake-up function called in from the TTY layer + * inside the internal wakeup function will be called + */ +static void st_tty_wakeup(struct tty_struct *tty) +{ + struct st_data_s *st_gdata = tty->disc_data; + pr_debug("%s ", __func__); + /* don't do an wakeup for now */ + clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); + + /* + * schedule the internal wakeup instead of calling directly to + * avoid lockup (port->lock needed in tty->ops->write is + * already taken here + */ + schedule_work(&st_gdata->work_write_wakeup); +} + +static void st_tty_flush_buffer(struct tty_struct *tty) +{ + struct st_data_s *st_gdata = tty->disc_data; + pr_debug("%s ", __func__); + + kfree_skb(st_gdata->tx_skb); + st_gdata->tx_skb = NULL; + + tty_driver_flush_buffer(tty); + return; +} + +static struct tty_ldisc_ops st_ldisc_ops = { + .magic = TTY_LDISC_MAGIC, + .name = "n_st", + .open = st_tty_open, + .close = st_tty_close, + .receive_buf = st_tty_receive, + .write_wakeup = st_tty_wakeup, + .flush_buffer = st_tty_flush_buffer, + .owner = THIS_MODULE +}; + +/********************************************************************/ +int st_core_init(struct st_data_s **core_data) +{ + struct st_data_s *st_gdata; + long err; + + err = tty_register_ldisc(N_TI_WL, &st_ldisc_ops); + if (err) { + pr_err("error registering %d line discipline %ld", + N_TI_WL, err); + return err; + } + pr_debug("registered n_shared line discipline"); + + st_gdata = kzalloc(sizeof(struct st_data_s), GFP_KERNEL); + if (!st_gdata) { + pr_err("memory allocation failed"); + err = tty_unregister_ldisc(N_TI_WL); + if (err) + pr_err("unable to un-register ldisc %ld", err); + err = -ENOMEM; + return err; + } + + /* Initialize ST TxQ and Tx waitQ queue head. All BT/FM/GPS module skb's + * will be pushed in this queue for actual transmission. + */ + skb_queue_head_init(&st_gdata->txq); + skb_queue_head_init(&st_gdata->tx_waitq); + + /* Locking used in st_int_enqueue() to avoid multiple execution */ + spin_lock_init(&st_gdata->lock); + + err = st_ll_init(st_gdata); + if (err) { + pr_err("error during st_ll initialization(%ld)", err); + kfree(st_gdata); + err = tty_unregister_ldisc(N_TI_WL); + if (err) + pr_err("unable to un-register ldisc"); + return err; + } + + INIT_WORK(&st_gdata->work_write_wakeup, work_fn_write_wakeup); + + *core_data = st_gdata; + return 0; +} + +void st_core_exit(struct st_data_s *st_gdata) +{ + long err; + /* internal module cleanup */ + err = st_ll_deinit(st_gdata); + if (err) + pr_err("error during deinit of ST LL %ld", err); + + if (st_gdata != NULL) { + /* Free ST Tx Qs and skbs */ + skb_queue_purge(&st_gdata->txq); + skb_queue_purge(&st_gdata->tx_waitq); + kfree_skb(st_gdata->rx_skb); + kfree_skb(st_gdata->tx_skb); + /* TTY ldisc cleanup */ + err = tty_unregister_ldisc(N_TI_WL); + if (err) + pr_err("unable to un-register ldisc %ld", err); + /* free the global data pointer */ + kfree(st_gdata); + } +} diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c new file mode 100644 index 000000000..f2f6cab97 --- /dev/null +++ b/drivers/misc/ti-st/st_kim.c @@ -0,0 +1,846 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Shared Transport Line discipline driver Core + * Init Manager module responsible for GPIO control + * and firmware download + * Copyright (C) 2009-2010 Texas Instruments + * Author: Pavan Savoy <pavan_savoy@ti.com> + */ + +#define pr_fmt(fmt) "(stk) :" fmt +#include <linux/platform_device.h> +#include <linux/jiffies.h> +#include <linux/firmware.h> +#include <linux/delay.h> +#include <linux/wait.h> +#include <linux/gpio.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#include <linux/sched.h> +#include <linux/sysfs.h> +#include <linux/tty.h> + +#include <linux/skbuff.h> +#include <linux/ti_wilink_st.h> +#include <linux/module.h> + +#define MAX_ST_DEVICES 3 /* Imagine 1 on each UART for now */ +static struct platform_device *st_kim_devices[MAX_ST_DEVICES]; + +/**********************************************************************/ +/* internal functions */ + +/* + * st_get_plat_device - + * function which returns the reference to the platform device + * requested by id. As of now only 1 such device exists (id=0) + * the context requesting for reference can get the id to be + * requested by a. The protocol driver which is registering or + * b. the tty device which is opened. + */ +static struct platform_device *st_get_plat_device(int id) +{ + return st_kim_devices[id]; +} + +/* + * validate_firmware_response - + * function to return whether the firmware response was proper + * in case of error don't complete so that waiting for proper + * response times out + */ +static void validate_firmware_response(struct kim_data_s *kim_gdata) +{ + struct sk_buff *skb = kim_gdata->rx_skb; + if (!skb) + return; + + /* + * these magic numbers are the position in the response buffer which + * allows us to distinguish whether the response is for the read + * version info. command + */ + if (skb->data[2] == 0x01 && skb->data[3] == 0x01 && + skb->data[4] == 0x10 && skb->data[5] == 0x00) { + /* fw version response */ + memcpy(kim_gdata->resp_buffer, + kim_gdata->rx_skb->data, + kim_gdata->rx_skb->len); + kim_gdata->rx_state = ST_W4_PACKET_TYPE; + kim_gdata->rx_skb = NULL; + kim_gdata->rx_count = 0; + } else if (unlikely(skb->data[5] != 0)) { + pr_err("no proper response during fw download"); + pr_err("data6 %x", skb->data[5]); + kfree_skb(skb); + return; /* keep waiting for the proper response */ + } + /* becos of all the script being downloaded */ + complete_all(&kim_gdata->kim_rcvd); + kfree_skb(skb); +} + +/* + * check for data len received inside kim_int_recv + * most often hit the last case to update state to waiting for data + */ +static inline int kim_check_data_len(struct kim_data_s *kim_gdata, int len) +{ + register int room = skb_tailroom(kim_gdata->rx_skb); + + pr_debug("len %d room %d", len, room); + + if (!len) { + validate_firmware_response(kim_gdata); + } else if (len > room) { + /* + * Received packet's payload length is larger. + * We can't accommodate it in created skb. + */ + pr_err("Data length is too large len %d room %d", len, + room); + kfree_skb(kim_gdata->rx_skb); + } else { + /* + * Packet header has non-zero payload length and + * we have enough space in created skb. Lets read + * payload data */ + kim_gdata->rx_state = ST_W4_DATA; + kim_gdata->rx_count = len; + return len; + } + + /* + * Change ST LL state to continue to process next + * packet + */ + kim_gdata->rx_state = ST_W4_PACKET_TYPE; + kim_gdata->rx_skb = NULL; + kim_gdata->rx_count = 0; + + return 0; +} + +/* + * kim_int_recv - receive function called during firmware download + * firmware download responses on different UART drivers + * have been observed to come in bursts of different + * tty_receive and hence the logic + */ +static void kim_int_recv(struct kim_data_s *kim_gdata, + const unsigned char *data, long count) +{ + const unsigned char *ptr; + int len = 0; + unsigned char *plen; + + pr_debug("%s", __func__); + /* Decode received bytes here */ + ptr = data; + if (unlikely(ptr == NULL)) { + pr_err(" received null from TTY "); + return; + } + + while (count) { + if (kim_gdata->rx_count) { + len = min_t(unsigned int, kim_gdata->rx_count, count); + skb_put_data(kim_gdata->rx_skb, ptr, len); + kim_gdata->rx_count -= len; + count -= len; + ptr += len; + + if (kim_gdata->rx_count) + continue; + + /* Check ST RX state machine , where are we? */ + switch (kim_gdata->rx_state) { + /* Waiting for complete packet ? */ + case ST_W4_DATA: + pr_debug("Complete pkt received"); + validate_firmware_response(kim_gdata); + kim_gdata->rx_state = ST_W4_PACKET_TYPE; + kim_gdata->rx_skb = NULL; + continue; + /* Waiting for Bluetooth event header ? */ + case ST_W4_HEADER: + plen = + (unsigned char *)&kim_gdata->rx_skb->data[1]; + pr_debug("event hdr: plen 0x%02x\n", *plen); + kim_check_data_len(kim_gdata, *plen); + continue; + } /* end of switch */ + } /* end of if rx_state */ + switch (*ptr) { + /* Bluetooth event packet? */ + case 0x04: + kim_gdata->rx_state = ST_W4_HEADER; + kim_gdata->rx_count = 2; + break; + default: + pr_info("unknown packet"); + ptr++; + count--; + continue; + } + ptr++; + count--; + kim_gdata->rx_skb = + alloc_skb(1024+8, GFP_ATOMIC); + if (!kim_gdata->rx_skb) { + pr_err("can't allocate mem for new packet"); + kim_gdata->rx_state = ST_W4_PACKET_TYPE; + kim_gdata->rx_count = 0; + return; + } + skb_reserve(kim_gdata->rx_skb, 8); + kim_gdata->rx_skb->cb[0] = 4; + kim_gdata->rx_skb->cb[1] = 0; + + } + return; +} + +static long read_local_version(struct kim_data_s *kim_gdata, char *bts_scr_name) +{ + unsigned short version = 0, chip = 0, min_ver = 0, maj_ver = 0; + static const char read_ver_cmd[] = { 0x01, 0x01, 0x10, 0x00 }; + long timeout; + + pr_debug("%s", __func__); + + reinit_completion(&kim_gdata->kim_rcvd); + if (4 != st_int_write(kim_gdata->core_data, read_ver_cmd, 4)) { + pr_err("kim: couldn't write 4 bytes"); + return -EIO; + } + + timeout = wait_for_completion_interruptible_timeout( + &kim_gdata->kim_rcvd, msecs_to_jiffies(CMD_RESP_TIME)); + if (timeout <= 0) { + pr_err(" waiting for ver info- timed out or received signal"); + return timeout ? -ERESTARTSYS : -ETIMEDOUT; + } + reinit_completion(&kim_gdata->kim_rcvd); + /* + * the positions 12 & 13 in the response buffer provide with the + * chip, major & minor numbers + */ + + version = + MAKEWORD(kim_gdata->resp_buffer[12], + kim_gdata->resp_buffer[13]); + chip = (version & 0x7C00) >> 10; + min_ver = (version & 0x007F); + maj_ver = (version & 0x0380) >> 7; + + if (version & 0x8000) + maj_ver |= 0x0008; + + sprintf(bts_scr_name, "ti-connectivity/TIInit_%d.%d.%d.bts", + chip, maj_ver, min_ver); + + /* to be accessed later via sysfs entry */ + kim_gdata->version.full = version; + kim_gdata->version.chip = chip; + kim_gdata->version.maj_ver = maj_ver; + kim_gdata->version.min_ver = min_ver; + + pr_info("%s", bts_scr_name); + return 0; +} + +static void skip_change_remote_baud(unsigned char **ptr, long *len) +{ + unsigned char *nxt_action, *cur_action; + cur_action = *ptr; + + nxt_action = cur_action + sizeof(struct bts_action) + + ((struct bts_action *) cur_action)->size; + + if (((struct bts_action *) nxt_action)->type != ACTION_WAIT_EVENT) { + pr_err("invalid action after change remote baud command"); + } else { + *ptr = *ptr + sizeof(struct bts_action) + + ((struct bts_action *)cur_action)->size; + *len = *len - (sizeof(struct bts_action) + + ((struct bts_action *)cur_action)->size); + /* warn user on not commenting these in firmware */ + pr_warn("skipping the wait event of change remote baud"); + } +} + +/* + * download_firmware - + * internal function which parses through the .bts firmware + * script file intreprets SEND, DELAY actions only as of now + */ +static long download_firmware(struct kim_data_s *kim_gdata) +{ + long err = 0; + long len = 0; + unsigned char *ptr = NULL; + unsigned char *action_ptr = NULL; + unsigned char bts_scr_name[40] = { 0 }; /* 40 char long bts scr name? */ + int wr_room_space; + int cmd_size; + unsigned long timeout; + + err = read_local_version(kim_gdata, bts_scr_name); + if (err != 0) { + pr_err("kim: failed to read local ver"); + return err; + } + err = + request_firmware(&kim_gdata->fw_entry, bts_scr_name, + &kim_gdata->kim_pdev->dev); + if (unlikely((err != 0) || (kim_gdata->fw_entry->data == NULL) || + (kim_gdata->fw_entry->size == 0))) { + pr_err(" request_firmware failed(errno %ld) for %s", err, + bts_scr_name); + return -EINVAL; + } + ptr = (void *)kim_gdata->fw_entry->data; + len = kim_gdata->fw_entry->size; + /* + * bts_header to remove out magic number and + * version + */ + ptr += sizeof(struct bts_header); + len -= sizeof(struct bts_header); + + while (len > 0 && ptr) { + pr_debug(" action size %d, type %d ", + ((struct bts_action *)ptr)->size, + ((struct bts_action *)ptr)->type); + + switch (((struct bts_action *)ptr)->type) { + case ACTION_SEND_COMMAND: /* action send */ + pr_debug("S"); + action_ptr = &(((struct bts_action *)ptr)->data[0]); + if (unlikely + (((struct hci_command *)action_ptr)->opcode == + 0xFF36)) { + /* + * ignore remote change + * baud rate HCI VS command + */ + pr_warn("change remote baud" + " rate command in firmware"); + skip_change_remote_baud(&ptr, &len); + break; + } + /* + * Make sure we have enough free space in uart + * tx buffer to write current firmware command + */ + cmd_size = ((struct bts_action *)ptr)->size; + timeout = jiffies + msecs_to_jiffies(CMD_WR_TIME); + do { + wr_room_space = + st_get_uart_wr_room(kim_gdata->core_data); + if (wr_room_space < 0) { + pr_err("Unable to get free " + "space info from uart tx buffer"); + release_firmware(kim_gdata->fw_entry); + return wr_room_space; + } + mdelay(1); /* wait 1ms before checking room */ + } while ((wr_room_space < cmd_size) && + time_before(jiffies, timeout)); + + /* Timeout happened ? */ + if (time_after_eq(jiffies, timeout)) { + pr_err("Timeout while waiting for free " + "free space in uart tx buffer"); + release_firmware(kim_gdata->fw_entry); + return -ETIMEDOUT; + } + /* + * reinit completion before sending for the + * relevant wait + */ + reinit_completion(&kim_gdata->kim_rcvd); + + /* + * Free space found in uart buffer, call st_int_write + * to send current firmware command to the uart tx + * buffer. + */ + err = st_int_write(kim_gdata->core_data, + ((struct bts_action_send *)action_ptr)->data, + ((struct bts_action *)ptr)->size); + if (unlikely(err < 0)) { + release_firmware(kim_gdata->fw_entry); + return err; + } + /* + * Check number of bytes written to the uart tx buffer + * and requested command write size + */ + if (err != cmd_size) { + pr_err("Number of bytes written to uart " + "tx buffer are not matching with " + "requested cmd write size"); + release_firmware(kim_gdata->fw_entry); + return -EIO; + } + break; + case ACTION_WAIT_EVENT: /* wait */ + pr_debug("W"); + err = wait_for_completion_interruptible_timeout( + &kim_gdata->kim_rcvd, + msecs_to_jiffies(CMD_RESP_TIME)); + if (err <= 0) { + pr_err("response timeout/signaled during fw download "); + /* timed out */ + release_firmware(kim_gdata->fw_entry); + return err ? -ERESTARTSYS : -ETIMEDOUT; + } + reinit_completion(&kim_gdata->kim_rcvd); + break; + case ACTION_DELAY: /* sleep */ + pr_info("sleep command in scr"); + action_ptr = &(((struct bts_action *)ptr)->data[0]); + mdelay(((struct bts_action_delay *)action_ptr)->msec); + break; + } + len = + len - (sizeof(struct bts_action) + + ((struct bts_action *)ptr)->size); + ptr = + ptr + sizeof(struct bts_action) + + ((struct bts_action *)ptr)->size; + } + /* fw download complete */ + release_firmware(kim_gdata->fw_entry); + return 0; +} + +/**********************************************************************/ +/* functions called from ST core */ +/* called from ST Core, when REG_IN_PROGRESS (registration in progress) + * can be because of + * 1. response to read local version + * 2. during send/recv's of firmware download + */ +void st_kim_recv(void *disc_data, const unsigned char *data, long count) +{ + struct st_data_s *st_gdata = (struct st_data_s *)disc_data; + struct kim_data_s *kim_gdata = st_gdata->kim_data; + + /* + * proceed to gather all data and distinguish read fw version response + * from other fw responses when data gathering is complete + */ + kim_int_recv(kim_gdata, data, count); + return; +} + +/* + * to signal completion of line discipline installation + * called from ST Core, upon tty_open + */ +void st_kim_complete(void *kim_data) +{ + struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data; + complete(&kim_gdata->ldisc_installed); +} + +/* + * st_kim_start - called from ST Core upon 1st registration + * This involves toggling the chip enable gpio, reading + * the firmware version from chip, forming the fw file name + * based on the chip version, requesting the fw, parsing it + * and perform download(send/recv). + */ +long st_kim_start(void *kim_data) +{ + long err = 0; + long retry = POR_RETRY_COUNT; + struct ti_st_plat_data *pdata; + struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data; + + pr_info(" %s", __func__); + pdata = kim_gdata->kim_pdev->dev.platform_data; + + do { + /* platform specific enabling code here */ + if (pdata->chip_enable) + pdata->chip_enable(kim_gdata); + + /* Configure BT nShutdown to HIGH state */ + gpio_set_value_cansleep(kim_gdata->nshutdown, GPIO_LOW); + mdelay(5); /* FIXME: a proper toggle */ + gpio_set_value_cansleep(kim_gdata->nshutdown, GPIO_HIGH); + mdelay(100); + /* re-initialize the completion */ + reinit_completion(&kim_gdata->ldisc_installed); + /* send notification to UIM */ + kim_gdata->ldisc_install = 1; + pr_info("ldisc_install = 1"); + sysfs_notify(&kim_gdata->kim_pdev->dev.kobj, + NULL, "install"); + /* wait for ldisc to be installed */ + err = wait_for_completion_interruptible_timeout( + &kim_gdata->ldisc_installed, msecs_to_jiffies(LDISC_TIME)); + if (!err) { + /* + * ldisc installation timeout, + * flush uart, power cycle BT_EN + */ + pr_err("ldisc installation timeout"); + err = st_kim_stop(kim_gdata); + continue; + } else { + /* ldisc installed now */ + pr_info("line discipline installed"); + err = download_firmware(kim_gdata); + if (err != 0) { + /* + * ldisc installed but fw download failed, + * flush uart & power cycle BT_EN + */ + pr_err("download firmware failed"); + err = st_kim_stop(kim_gdata); + continue; + } else { /* on success don't retry */ + break; + } + } + } while (retry--); + return err; +} + +/* + * st_kim_stop - stop communication with chip. + * This can be called from ST Core/KIM, on the- + * (a) last un-register when chip need not be powered there-after, + * (b) upon failure to either install ldisc or download firmware. + * The function is responsible to (a) notify UIM about un-installation, + * (b) flush UART if the ldisc was installed. + * (c) reset BT_EN - pull down nshutdown at the end. + * (d) invoke platform's chip disabling routine. + */ +long st_kim_stop(void *kim_data) +{ + long err = 0; + struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data; + struct ti_st_plat_data *pdata = + kim_gdata->kim_pdev->dev.platform_data; + struct tty_struct *tty = kim_gdata->core_data->tty; + + reinit_completion(&kim_gdata->ldisc_installed); + + if (tty) { /* can be called before ldisc is installed */ + /* Flush any pending characters in the driver and discipline. */ + tty_ldisc_flush(tty); + tty_driver_flush_buffer(tty); + } + + /* send uninstall notification to UIM */ + pr_info("ldisc_install = 0"); + kim_gdata->ldisc_install = 0; + sysfs_notify(&kim_gdata->kim_pdev->dev.kobj, NULL, "install"); + + /* wait for ldisc to be un-installed */ + err = wait_for_completion_interruptible_timeout( + &kim_gdata->ldisc_installed, msecs_to_jiffies(LDISC_TIME)); + if (!err) { /* timeout */ + pr_err(" timed out waiting for ldisc to be un-installed"); + err = -ETIMEDOUT; + } + + /* By default configure BT nShutdown to LOW state */ + gpio_set_value_cansleep(kim_gdata->nshutdown, GPIO_LOW); + mdelay(1); + gpio_set_value_cansleep(kim_gdata->nshutdown, GPIO_HIGH); + mdelay(1); + gpio_set_value_cansleep(kim_gdata->nshutdown, GPIO_LOW); + + /* platform specific disable */ + if (pdata->chip_disable) + pdata->chip_disable(kim_gdata); + return err; +} + +/**********************************************************************/ +/* functions called from subsystems */ +/* called when debugfs entry is read from */ + +static int version_show(struct seq_file *s, void *unused) +{ + struct kim_data_s *kim_gdata = (struct kim_data_s *)s->private; + seq_printf(s, "%04X %d.%d.%d\n", kim_gdata->version.full, + kim_gdata->version.chip, kim_gdata->version.maj_ver, + kim_gdata->version.min_ver); + return 0; +} + +static int list_show(struct seq_file *s, void *unused) +{ + struct kim_data_s *kim_gdata = (struct kim_data_s *)s->private; + kim_st_list_protocols(kim_gdata->core_data, s); + return 0; +} + +static ssize_t show_install(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct kim_data_s *kim_data = dev_get_drvdata(dev); + return sprintf(buf, "%d\n", kim_data->ldisc_install); +} + +#ifdef DEBUG +static ssize_t store_dev_name(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct kim_data_s *kim_data = dev_get_drvdata(dev); + pr_debug("storing dev name >%s<", buf); + strncpy(kim_data->dev_name, buf, count); + pr_debug("stored dev name >%s<", kim_data->dev_name); + return count; +} + +static ssize_t store_baud_rate(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct kim_data_s *kim_data = dev_get_drvdata(dev); + pr_debug("storing baud rate >%s<", buf); + sscanf(buf, "%ld", &kim_data->baud_rate); + pr_debug("stored baud rate >%ld<", kim_data->baud_rate); + return count; +} +#endif /* if DEBUG */ + +static ssize_t show_dev_name(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct kim_data_s *kim_data = dev_get_drvdata(dev); + return sprintf(buf, "%s\n", kim_data->dev_name); +} + +static ssize_t show_baud_rate(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct kim_data_s *kim_data = dev_get_drvdata(dev); + return sprintf(buf, "%d\n", kim_data->baud_rate); +} + +static ssize_t show_flow_cntrl(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct kim_data_s *kim_data = dev_get_drvdata(dev); + return sprintf(buf, "%d\n", kim_data->flow_cntrl); +} + +/* structures specific for sysfs entries */ +static struct kobj_attribute ldisc_install = +__ATTR(install, 0444, (void *)show_install, NULL); + +static struct kobj_attribute uart_dev_name = +#ifdef DEBUG /* TODO: move this to debug-fs if possible */ +__ATTR(dev_name, 0644, (void *)show_dev_name, (void *)store_dev_name); +#else +__ATTR(dev_name, 0444, (void *)show_dev_name, NULL); +#endif + +static struct kobj_attribute uart_baud_rate = +#ifdef DEBUG /* TODO: move to debugfs */ +__ATTR(baud_rate, 0644, (void *)show_baud_rate, (void *)store_baud_rate); +#else +__ATTR(baud_rate, 0444, (void *)show_baud_rate, NULL); +#endif + +static struct kobj_attribute uart_flow_cntrl = +__ATTR(flow_cntrl, 0444, (void *)show_flow_cntrl, NULL); + +static struct attribute *uim_attrs[] = { + &ldisc_install.attr, + &uart_dev_name.attr, + &uart_baud_rate.attr, + &uart_flow_cntrl.attr, + NULL, +}; + +static const struct attribute_group uim_attr_grp = { + .attrs = uim_attrs, +}; + +/* + * st_kim_ref - reference the core's data + * This references the per-ST platform device in the arch/xx/ + * board-xx.c file. + * This would enable multiple such platform devices to exist + * on a given platform + */ +void st_kim_ref(struct st_data_s **core_data, int id) +{ + struct platform_device *pdev; + struct kim_data_s *kim_gdata; + /* get kim_gdata reference from platform device */ + pdev = st_get_plat_device(id); + if (!pdev) + goto err; + kim_gdata = platform_get_drvdata(pdev); + if (!kim_gdata) + goto err; + + *core_data = kim_gdata->core_data; + return; +err: + *core_data = NULL; +} + +DEFINE_SHOW_ATTRIBUTE(version); +DEFINE_SHOW_ATTRIBUTE(list); + +/**********************************************************************/ +/* functions called from platform device driver subsystem + * need to have a relevant platform device entry in the platform's + * board-*.c file + */ + +static struct dentry *kim_debugfs_dir; +static int kim_probe(struct platform_device *pdev) +{ + struct kim_data_s *kim_gdata; + struct ti_st_plat_data *pdata = pdev->dev.platform_data; + int err; + + if ((pdev->id != -1) && (pdev->id < MAX_ST_DEVICES)) { + /* multiple devices could exist */ + st_kim_devices[pdev->id] = pdev; + } else { + /* platform's sure about existence of 1 device */ + st_kim_devices[0] = pdev; + } + + kim_gdata = kzalloc(sizeof(struct kim_data_s), GFP_KERNEL); + if (!kim_gdata) { + pr_err("no mem to allocate"); + return -ENOMEM; + } + platform_set_drvdata(pdev, kim_gdata); + + err = st_core_init(&kim_gdata->core_data); + if (err != 0) { + pr_err(" ST core init failed"); + err = -EIO; + goto err_core_init; + } + /* refer to itself */ + kim_gdata->core_data->kim_data = kim_gdata; + + /* Claim the chip enable nShutdown gpio from the system */ + kim_gdata->nshutdown = pdata->nshutdown_gpio; + err = gpio_request(kim_gdata->nshutdown, "kim"); + if (unlikely(err)) { + pr_err(" gpio %d request failed ", kim_gdata->nshutdown); + goto err_sysfs_group; + } + + /* Configure nShutdown GPIO as output=0 */ + err = gpio_direction_output(kim_gdata->nshutdown, 0); + if (unlikely(err)) { + pr_err(" unable to configure gpio %d", kim_gdata->nshutdown); + goto err_sysfs_group; + } + /* get reference of pdev for request_firmware */ + kim_gdata->kim_pdev = pdev; + init_completion(&kim_gdata->kim_rcvd); + init_completion(&kim_gdata->ldisc_installed); + + err = sysfs_create_group(&pdev->dev.kobj, &uim_attr_grp); + if (err) { + pr_err("failed to create sysfs entries"); + goto err_sysfs_group; + } + + /* copying platform data */ + strncpy(kim_gdata->dev_name, pdata->dev_name, UART_DEV_NAME_LEN); + kim_gdata->flow_cntrl = pdata->flow_cntrl; + kim_gdata->baud_rate = pdata->baud_rate; + pr_info("sysfs entries created\n"); + + kim_debugfs_dir = debugfs_create_dir("ti-st", NULL); + + debugfs_create_file("version", S_IRUGO, kim_debugfs_dir, + kim_gdata, &version_fops); + debugfs_create_file("protocols", S_IRUGO, kim_debugfs_dir, + kim_gdata, &list_fops); + return 0; + +err_sysfs_group: + st_core_exit(kim_gdata->core_data); + +err_core_init: + kfree(kim_gdata); + + return err; +} + +static int kim_remove(struct platform_device *pdev) +{ + /* free the GPIOs requested */ + struct ti_st_plat_data *pdata = pdev->dev.platform_data; + struct kim_data_s *kim_gdata; + + kim_gdata = platform_get_drvdata(pdev); + + /* + * Free the Bluetooth/FM/GPIO + * nShutdown gpio from the system + */ + gpio_free(pdata->nshutdown_gpio); + pr_info("nshutdown GPIO Freed"); + + debugfs_remove_recursive(kim_debugfs_dir); + sysfs_remove_group(&pdev->dev.kobj, &uim_attr_grp); + pr_info("sysfs entries removed"); + + kim_gdata->kim_pdev = NULL; + st_core_exit(kim_gdata->core_data); + + kfree(kim_gdata); + kim_gdata = NULL; + return 0; +} + +static int kim_suspend(struct platform_device *pdev, pm_message_t state) +{ + struct ti_st_plat_data *pdata = pdev->dev.platform_data; + + if (pdata->suspend) + return pdata->suspend(pdev, state); + + return 0; +} + +static int kim_resume(struct platform_device *pdev) +{ + struct ti_st_plat_data *pdata = pdev->dev.platform_data; + + if (pdata->resume) + return pdata->resume(pdev); + + return 0; +} + +/**********************************************************************/ +/* entry point for ST KIM module, called in from ST Core */ +static struct platform_driver kim_platform_driver = { + .probe = kim_probe, + .remove = kim_remove, + .suspend = kim_suspend, + .resume = kim_resume, + .driver = { + .name = "kim", + }, +}; + +module_platform_driver(kim_platform_driver); + +MODULE_AUTHOR("Pavan Savoy <pavan_savoy@ti.com>"); +MODULE_DESCRIPTION("Shared Transport Driver for TI BT/FM/GPS combo chips "); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/ti-st/st_ll.c b/drivers/misc/ti-st/st_ll.c new file mode 100644 index 000000000..07406140d --- /dev/null +++ b/drivers/misc/ti-st/st_ll.c @@ -0,0 +1,156 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Shared Transport driver + * HCI-LL module responsible for TI proprietary HCI_LL protocol + * Copyright (C) 2009-2010 Texas Instruments + * Author: Pavan Savoy <pavan_savoy@ti.com> + */ + +#define pr_fmt(fmt) "(stll) :" fmt +#include <linux/skbuff.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/ti_wilink_st.h> + +/**********************************************************************/ +/* internal functions */ +static void send_ll_cmd(struct st_data_s *st_data, + unsigned char cmd) +{ + + pr_debug("%s: writing %x", __func__, cmd); + st_int_write(st_data, &cmd, 1); + return; +} + +static void ll_device_want_to_sleep(struct st_data_s *st_data) +{ + struct kim_data_s *kim_data; + struct ti_st_plat_data *pdata; + + pr_debug("%s", __func__); + /* sanity check */ + if (st_data->ll_state != ST_LL_AWAKE) + pr_err("ERR hcill: ST_LL_GO_TO_SLEEP_IND" + "in state %ld", st_data->ll_state); + + send_ll_cmd(st_data, LL_SLEEP_ACK); + /* update state */ + st_data->ll_state = ST_LL_ASLEEP; + + /* communicate to platform about chip asleep */ + kim_data = st_data->kim_data; + pdata = kim_data->kim_pdev->dev.platform_data; + if (pdata->chip_asleep) + pdata->chip_asleep(NULL); +} + +static void ll_device_want_to_wakeup(struct st_data_s *st_data) +{ + struct kim_data_s *kim_data; + struct ti_st_plat_data *pdata; + + /* diff actions in diff states */ + switch (st_data->ll_state) { + case ST_LL_ASLEEP: + send_ll_cmd(st_data, LL_WAKE_UP_ACK); /* send wake_ack */ + break; + case ST_LL_ASLEEP_TO_AWAKE: + /* duplicate wake_ind */ + pr_err("duplicate wake_ind while waiting for Wake ack"); + break; + case ST_LL_AWAKE: + /* duplicate wake_ind */ + pr_err("duplicate wake_ind already AWAKE"); + break; + case ST_LL_AWAKE_TO_ASLEEP: + /* duplicate wake_ind */ + pr_err("duplicate wake_ind"); + break; + } + /* update state */ + st_data->ll_state = ST_LL_AWAKE; + + /* communicate to platform about chip wakeup */ + kim_data = st_data->kim_data; + pdata = kim_data->kim_pdev->dev.platform_data; + if (pdata->chip_awake) + pdata->chip_awake(NULL); +} + +/**********************************************************************/ +/* functions invoked by ST Core */ + +/* called when ST Core wants to + * enable ST LL */ +void st_ll_enable(struct st_data_s *ll) +{ + ll->ll_state = ST_LL_AWAKE; +} + +/* called when ST Core /local module wants to + * disable ST LL */ +void st_ll_disable(struct st_data_s *ll) +{ + ll->ll_state = ST_LL_INVALID; +} + +/* called when ST Core wants to update the state */ +void st_ll_wakeup(struct st_data_s *ll) +{ + if (likely(ll->ll_state != ST_LL_AWAKE)) { + send_ll_cmd(ll, LL_WAKE_UP_IND); /* WAKE_IND */ + ll->ll_state = ST_LL_ASLEEP_TO_AWAKE; + } else { + /* don't send the duplicate wake_indication */ + pr_err(" Chip already AWAKE "); + } +} + +/* called when ST Core wants the state */ +unsigned long st_ll_getstate(struct st_data_s *ll) +{ + pr_debug(" returning state %ld", ll->ll_state); + return ll->ll_state; +} + +/* called from ST Core, when a PM related packet arrives */ +unsigned long st_ll_sleep_state(struct st_data_s *st_data, + unsigned char cmd) +{ + switch (cmd) { + case LL_SLEEP_IND: /* sleep ind */ + pr_debug("sleep indication recvd"); + ll_device_want_to_sleep(st_data); + break; + case LL_SLEEP_ACK: /* sleep ack */ + pr_err("sleep ack rcvd: host shouldn't"); + break; + case LL_WAKE_UP_IND: /* wake ind */ + pr_debug("wake indication recvd"); + ll_device_want_to_wakeup(st_data); + break; + case LL_WAKE_UP_ACK: /* wake ack */ + pr_debug("wake ack rcvd"); + st_data->ll_state = ST_LL_AWAKE; + break; + default: + pr_err(" unknown input/state "); + return -EINVAL; + } + return 0; +} + +/* Called from ST CORE to initialize ST LL */ +long st_ll_init(struct st_data_s *ll) +{ + /* set state to invalid */ + ll->ll_state = ST_LL_INVALID; + return 0; +} + +/* Called from ST CORE to de-initialize ST LL */ +long st_ll_deinit(struct st_data_s *ll) +{ + return 0; +} diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c new file mode 100644 index 000000000..2aebbfda1 --- /dev/null +++ b/drivers/misc/tifm_7xx1.c @@ -0,0 +1,427 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * tifm_7xx1.c - TI FlashMedia driver + * + * Copyright (C) 2006 Alex Dubov <oakad@yahoo.com> + */ + +#include <linux/tifm.h> +#include <linux/dma-mapping.h> +#include <linux/module.h> + +#define DRIVER_NAME "tifm_7xx1" +#define DRIVER_VERSION "0.8" + +#define TIFM_IRQ_ENABLE 0x80000000 +#define TIFM_IRQ_SOCKMASK(x) (x) +#define TIFM_IRQ_CARDMASK(x) ((x) << 8) +#define TIFM_IRQ_FIFOMASK(x) ((x) << 16) +#define TIFM_IRQ_SETALL 0xffffffff + +static void tifm_7xx1_dummy_eject(struct tifm_adapter *fm, + struct tifm_dev *sock) +{ +} + +static void tifm_7xx1_eject(struct tifm_adapter *fm, struct tifm_dev *sock) +{ + unsigned long flags; + + spin_lock_irqsave(&fm->lock, flags); + fm->socket_change_set |= 1 << sock->socket_id; + tifm_queue_work(&fm->media_switcher); + spin_unlock_irqrestore(&fm->lock, flags); +} + +static irqreturn_t tifm_7xx1_isr(int irq, void *dev_id) +{ + struct tifm_adapter *fm = dev_id; + struct tifm_dev *sock; + unsigned int irq_status, cnt; + + spin_lock(&fm->lock); + irq_status = readl(fm->addr + FM_INTERRUPT_STATUS); + if (irq_status == 0 || irq_status == (~0)) { + spin_unlock(&fm->lock); + return IRQ_NONE; + } + + if (irq_status & TIFM_IRQ_ENABLE) { + writel(TIFM_IRQ_ENABLE, fm->addr + FM_CLEAR_INTERRUPT_ENABLE); + + for (cnt = 0; cnt < fm->num_sockets; cnt++) { + sock = fm->sockets[cnt]; + if (sock) { + if ((irq_status >> cnt) & TIFM_IRQ_FIFOMASK(1)) + sock->data_event(sock); + if ((irq_status >> cnt) & TIFM_IRQ_CARDMASK(1)) + sock->card_event(sock); + } + } + + fm->socket_change_set |= irq_status + & ((1 << fm->num_sockets) - 1); + } + writel(irq_status, fm->addr + FM_INTERRUPT_STATUS); + + if (fm->finish_me) + complete_all(fm->finish_me); + else if (!fm->socket_change_set) + writel(TIFM_IRQ_ENABLE, fm->addr + FM_SET_INTERRUPT_ENABLE); + else + tifm_queue_work(&fm->media_switcher); + + spin_unlock(&fm->lock); + return IRQ_HANDLED; +} + +static unsigned char tifm_7xx1_toggle_sock_power(char __iomem *sock_addr) +{ + unsigned int s_state; + int cnt; + + writel(0x0e00, sock_addr + SOCK_CONTROL); + + for (cnt = 16; cnt <= 256; cnt <<= 1) { + if (!(TIFM_SOCK_STATE_POWERED + & readl(sock_addr + SOCK_PRESENT_STATE))) + break; + + msleep(cnt); + } + + s_state = readl(sock_addr + SOCK_PRESENT_STATE); + if (!(TIFM_SOCK_STATE_OCCUPIED & s_state)) + return 0; + + writel(readl(sock_addr + SOCK_CONTROL) | TIFM_CTRL_LED, + sock_addr + SOCK_CONTROL); + + /* xd needs some extra time before power on */ + if (((readl(sock_addr + SOCK_PRESENT_STATE) >> 4) & 7) + == TIFM_TYPE_XD) + msleep(40); + + writel((s_state & TIFM_CTRL_POWER_MASK) | 0x0c00, + sock_addr + SOCK_CONTROL); + /* wait for power to stabilize */ + msleep(20); + for (cnt = 16; cnt <= 256; cnt <<= 1) { + if ((TIFM_SOCK_STATE_POWERED + & readl(sock_addr + SOCK_PRESENT_STATE))) + break; + + msleep(cnt); + } + + writel(readl(sock_addr + SOCK_CONTROL) & (~TIFM_CTRL_LED), + sock_addr + SOCK_CONTROL); + + return (readl(sock_addr + SOCK_PRESENT_STATE) >> 4) & 7; +} + +inline static void tifm_7xx1_sock_power_off(char __iomem *sock_addr) +{ + writel((~TIFM_CTRL_POWER_MASK) & readl(sock_addr + SOCK_CONTROL), + sock_addr + SOCK_CONTROL); +} + +inline static char __iomem * +tifm_7xx1_sock_addr(char __iomem *base_addr, unsigned int sock_num) +{ + return base_addr + ((sock_num + 1) << 10); +} + +static void tifm_7xx1_switch_media(struct work_struct *work) +{ + struct tifm_adapter *fm = container_of(work, struct tifm_adapter, + media_switcher); + struct tifm_dev *sock; + char __iomem *sock_addr; + unsigned long flags; + unsigned char media_id; + unsigned int socket_change_set, cnt; + + spin_lock_irqsave(&fm->lock, flags); + socket_change_set = fm->socket_change_set; + fm->socket_change_set = 0; + + dev_dbg(fm->dev.parent, "checking media set %x\n", + socket_change_set); + + if (!socket_change_set) { + spin_unlock_irqrestore(&fm->lock, flags); + return; + } + + for (cnt = 0; cnt < fm->num_sockets; cnt++) { + if (!(socket_change_set & (1 << cnt))) + continue; + sock = fm->sockets[cnt]; + if (sock) { + printk(KERN_INFO + "%s : demand removing card from socket %u:%u\n", + dev_name(&fm->dev), fm->id, cnt); + fm->sockets[cnt] = NULL; + sock_addr = sock->addr; + spin_unlock_irqrestore(&fm->lock, flags); + device_unregister(&sock->dev); + spin_lock_irqsave(&fm->lock, flags); + tifm_7xx1_sock_power_off(sock_addr); + writel(0x0e00, sock_addr + SOCK_CONTROL); + } + + spin_unlock_irqrestore(&fm->lock, flags); + + media_id = tifm_7xx1_toggle_sock_power( + tifm_7xx1_sock_addr(fm->addr, cnt)); + + // tifm_alloc_device will check if media_id is valid + sock = tifm_alloc_device(fm, cnt, media_id); + if (sock) { + sock->addr = tifm_7xx1_sock_addr(fm->addr, cnt); + + if (!device_register(&sock->dev)) { + spin_lock_irqsave(&fm->lock, flags); + if (!fm->sockets[cnt]) { + fm->sockets[cnt] = sock; + sock = NULL; + } + spin_unlock_irqrestore(&fm->lock, flags); + } + if (sock) + put_device(&sock->dev); + } + spin_lock_irqsave(&fm->lock, flags); + } + + writel(TIFM_IRQ_FIFOMASK(socket_change_set) + | TIFM_IRQ_CARDMASK(socket_change_set), + fm->addr + FM_CLEAR_INTERRUPT_ENABLE); + + writel(TIFM_IRQ_FIFOMASK(socket_change_set) + | TIFM_IRQ_CARDMASK(socket_change_set), + fm->addr + FM_SET_INTERRUPT_ENABLE); + + writel(TIFM_IRQ_ENABLE, fm->addr + FM_SET_INTERRUPT_ENABLE); + spin_unlock_irqrestore(&fm->lock, flags); +} + +static int __maybe_unused tifm_7xx1_suspend(struct device *dev_d) +{ + struct pci_dev *dev = to_pci_dev(dev_d); + struct tifm_adapter *fm = pci_get_drvdata(dev); + int cnt; + + dev_dbg(&dev->dev, "suspending host\n"); + + for (cnt = 0; cnt < fm->num_sockets; cnt++) { + if (fm->sockets[cnt]) + tifm_7xx1_sock_power_off(fm->sockets[cnt]->addr); + } + + device_wakeup_disable(dev_d); + return 0; +} + +static int __maybe_unused tifm_7xx1_resume(struct device *dev_d) +{ + struct pci_dev *dev = to_pci_dev(dev_d); + struct tifm_adapter *fm = pci_get_drvdata(dev); + int rc; + unsigned long timeout; + unsigned int good_sockets = 0, bad_sockets = 0; + unsigned long flags; + /* Maximum number of entries is 4 */ + unsigned char new_ids[4]; + DECLARE_COMPLETION_ONSTACK(finish_resume); + + if (WARN_ON(fm->num_sockets > ARRAY_SIZE(new_ids))) + return -ENXIO; + + pci_set_master(dev); + + dev_dbg(&dev->dev, "resuming host\n"); + + for (rc = 0; rc < fm->num_sockets; rc++) + new_ids[rc] = tifm_7xx1_toggle_sock_power( + tifm_7xx1_sock_addr(fm->addr, rc)); + spin_lock_irqsave(&fm->lock, flags); + for (rc = 0; rc < fm->num_sockets; rc++) { + if (fm->sockets[rc]) { + if (fm->sockets[rc]->type == new_ids[rc]) + good_sockets |= 1 << rc; + else + bad_sockets |= 1 << rc; + } + } + + writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SOCKMASK((1 << fm->num_sockets) - 1), + fm->addr + FM_SET_INTERRUPT_ENABLE); + dev_dbg(&dev->dev, "change sets on resume: good %x, bad %x\n", + good_sockets, bad_sockets); + + fm->socket_change_set = 0; + if (good_sockets) { + fm->finish_me = &finish_resume; + spin_unlock_irqrestore(&fm->lock, flags); + timeout = wait_for_completion_timeout(&finish_resume, HZ); + dev_dbg(&dev->dev, "wait returned %lu\n", timeout); + writel(TIFM_IRQ_FIFOMASK(good_sockets) + | TIFM_IRQ_CARDMASK(good_sockets), + fm->addr + FM_CLEAR_INTERRUPT_ENABLE); + writel(TIFM_IRQ_FIFOMASK(good_sockets) + | TIFM_IRQ_CARDMASK(good_sockets), + fm->addr + FM_SET_INTERRUPT_ENABLE); + spin_lock_irqsave(&fm->lock, flags); + fm->finish_me = NULL; + fm->socket_change_set ^= good_sockets & fm->socket_change_set; + } + + fm->socket_change_set |= bad_sockets; + if (fm->socket_change_set) + tifm_queue_work(&fm->media_switcher); + + spin_unlock_irqrestore(&fm->lock, flags); + writel(TIFM_IRQ_ENABLE, + fm->addr + FM_SET_INTERRUPT_ENABLE); + + return 0; +} + +static int tifm_7xx1_dummy_has_ms_pif(struct tifm_adapter *fm, + struct tifm_dev *sock) +{ + return 0; +} + +static int tifm_7xx1_has_ms_pif(struct tifm_adapter *fm, struct tifm_dev *sock) +{ + if (((fm->num_sockets == 4) && (sock->socket_id == 2)) + || ((fm->num_sockets == 2) && (sock->socket_id == 0))) + return 1; + + return 0; +} + +static int tifm_7xx1_probe(struct pci_dev *dev, + const struct pci_device_id *dev_id) +{ + struct tifm_adapter *fm; + int pci_dev_busy = 0; + int rc; + + rc = pci_set_dma_mask(dev, DMA_BIT_MASK(32)); + if (rc) + return rc; + + rc = pci_enable_device(dev); + if (rc) + return rc; + + pci_set_master(dev); + + rc = pci_request_regions(dev, DRIVER_NAME); + if (rc) { + pci_dev_busy = 1; + goto err_out; + } + + pci_intx(dev, 1); + + fm = tifm_alloc_adapter(dev->device == PCI_DEVICE_ID_TI_XX21_XX11_FM + ? 4 : 2, &dev->dev); + if (!fm) { + rc = -ENOMEM; + goto err_out_int; + } + + INIT_WORK(&fm->media_switcher, tifm_7xx1_switch_media); + fm->eject = tifm_7xx1_eject; + fm->has_ms_pif = tifm_7xx1_has_ms_pif; + pci_set_drvdata(dev, fm); + + fm->addr = pci_ioremap_bar(dev, 0); + if (!fm->addr) { + rc = -ENODEV; + goto err_out_free; + } + + rc = request_irq(dev->irq, tifm_7xx1_isr, IRQF_SHARED, DRIVER_NAME, fm); + if (rc) + goto err_out_unmap; + + rc = tifm_add_adapter(fm); + if (rc) + goto err_out_irq; + + writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SOCKMASK((1 << fm->num_sockets) - 1), + fm->addr + FM_CLEAR_INTERRUPT_ENABLE); + writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SOCKMASK((1 << fm->num_sockets) - 1), + fm->addr + FM_SET_INTERRUPT_ENABLE); + return 0; + +err_out_irq: + free_irq(dev->irq, fm); +err_out_unmap: + iounmap(fm->addr); +err_out_free: + tifm_free_adapter(fm); +err_out_int: + pci_intx(dev, 0); + pci_release_regions(dev); +err_out: + if (!pci_dev_busy) + pci_disable_device(dev); + return rc; +} + +static void tifm_7xx1_remove(struct pci_dev *dev) +{ + struct tifm_adapter *fm = pci_get_drvdata(dev); + int cnt; + + fm->eject = tifm_7xx1_dummy_eject; + fm->has_ms_pif = tifm_7xx1_dummy_has_ms_pif; + writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE); + free_irq(dev->irq, fm); + + tifm_remove_adapter(fm); + + for (cnt = 0; cnt < fm->num_sockets; cnt++) + tifm_7xx1_sock_power_off(tifm_7xx1_sock_addr(fm->addr, cnt)); + + iounmap(fm->addr); + pci_intx(dev, 0); + pci_release_regions(dev); + + pci_disable_device(dev); + tifm_free_adapter(fm); +} + +static const struct pci_device_id tifm_7xx1_pci_tbl[] = { + { PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XX21_XX11_FM, PCI_ANY_ID, + PCI_ANY_ID, 0, 0, 0 }, /* xx21 - the one I have */ + { PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XX12_FM, PCI_ANY_ID, + PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XX20_FM, PCI_ANY_ID, + PCI_ANY_ID, 0, 0, 0 }, + { } +}; + +static SIMPLE_DEV_PM_OPS(tifm_7xx1_pm_ops, tifm_7xx1_suspend, tifm_7xx1_resume); + +static struct pci_driver tifm_7xx1_driver = { + .name = DRIVER_NAME, + .id_table = tifm_7xx1_pci_tbl, + .probe = tifm_7xx1_probe, + .remove = tifm_7xx1_remove, + .driver.pm = &tifm_7xx1_pm_ops, +}; + +module_pci_driver(tifm_7xx1_driver); +MODULE_AUTHOR("Alex Dubov"); +MODULE_DESCRIPTION("TI FlashMedia host driver"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, tifm_7xx1_pci_tbl); +MODULE_VERSION(DRIVER_VERSION); diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c new file mode 100644 index 000000000..667e574a7 --- /dev/null +++ b/drivers/misc/tifm_core.c @@ -0,0 +1,367 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * tifm_core.c - TI FlashMedia driver + * + * Copyright (C) 2006 Alex Dubov <oakad@yahoo.com> + */ + +#include <linux/tifm.h> +#include <linux/slab.h> +#include <linux/init.h> +#include <linux/idr.h> +#include <linux/module.h> + +#define DRIVER_NAME "tifm_core" +#define DRIVER_VERSION "0.8" + +static struct workqueue_struct *workqueue; +static DEFINE_IDR(tifm_adapter_idr); +static DEFINE_SPINLOCK(tifm_adapter_lock); + +static const char *tifm_media_type_name(unsigned char type, unsigned char nt) +{ + const char *card_type_name[3][3] = { + { "SmartMedia/xD", "MemoryStick", "MMC/SD" }, + { "XD", "MS", "SD"}, + { "xd", "ms", "sd"} + }; + + if (nt > 2 || type < 1 || type > 3) + return NULL; + return card_type_name[nt][type - 1]; +} + +static int tifm_dev_match(struct tifm_dev *sock, struct tifm_device_id *id) +{ + if (sock->type == id->type) + return 1; + return 0; +} + +static int tifm_bus_match(struct device *dev, struct device_driver *drv) +{ + struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev); + struct tifm_driver *fm_drv = container_of(drv, struct tifm_driver, + driver); + struct tifm_device_id *ids = fm_drv->id_table; + + if (ids) { + while (ids->type) { + if (tifm_dev_match(sock, ids)) + return 1; + ++ids; + } + } + return 0; +} + +static int tifm_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev); + + if (add_uevent_var(env, "TIFM_CARD_TYPE=%s", tifm_media_type_name(sock->type, 1))) + return -ENOMEM; + + return 0; +} + +static int tifm_device_probe(struct device *dev) +{ + struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev); + struct tifm_driver *drv = container_of(dev->driver, struct tifm_driver, + driver); + int rc = -ENODEV; + + get_device(dev); + if (dev->driver && drv->probe) { + rc = drv->probe(sock); + if (!rc) + return 0; + } + put_device(dev); + return rc; +} + +static void tifm_dummy_event(struct tifm_dev *sock) +{ + return; +} + +static int tifm_device_remove(struct device *dev) +{ + struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev); + struct tifm_driver *drv = container_of(dev->driver, struct tifm_driver, + driver); + + if (dev->driver && drv->remove) { + sock->card_event = tifm_dummy_event; + sock->data_event = tifm_dummy_event; + drv->remove(sock); + sock->dev.driver = NULL; + } + + put_device(dev); + return 0; +} + +#ifdef CONFIG_PM + +static int tifm_device_suspend(struct device *dev, pm_message_t state) +{ + struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev); + struct tifm_driver *drv = container_of(dev->driver, struct tifm_driver, + driver); + + if (dev->driver && drv->suspend) + return drv->suspend(sock, state); + return 0; +} + +static int tifm_device_resume(struct device *dev) +{ + struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev); + struct tifm_driver *drv = container_of(dev->driver, struct tifm_driver, + driver); + + if (dev->driver && drv->resume) + return drv->resume(sock); + return 0; +} + +#else + +#define tifm_device_suspend NULL +#define tifm_device_resume NULL + +#endif /* CONFIG_PM */ + +static ssize_t type_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev); + return sprintf(buf, "%x", sock->type); +} +static DEVICE_ATTR_RO(type); + +static struct attribute *tifm_dev_attrs[] = { + &dev_attr_type.attr, + NULL, +}; +ATTRIBUTE_GROUPS(tifm_dev); + +static struct bus_type tifm_bus_type = { + .name = "tifm", + .dev_groups = tifm_dev_groups, + .match = tifm_bus_match, + .uevent = tifm_uevent, + .probe = tifm_device_probe, + .remove = tifm_device_remove, + .suspend = tifm_device_suspend, + .resume = tifm_device_resume +}; + +static void tifm_free(struct device *dev) +{ + struct tifm_adapter *fm = container_of(dev, struct tifm_adapter, dev); + + kfree(fm); +} + +static struct class tifm_adapter_class = { + .name = "tifm_adapter", + .dev_release = tifm_free +}; + +struct tifm_adapter *tifm_alloc_adapter(unsigned int num_sockets, + struct device *dev) +{ + struct tifm_adapter *fm; + + fm = kzalloc(sizeof(struct tifm_adapter) + + sizeof(struct tifm_dev*) * num_sockets, GFP_KERNEL); + if (fm) { + fm->dev.class = &tifm_adapter_class; + fm->dev.parent = dev; + device_initialize(&fm->dev); + spin_lock_init(&fm->lock); + fm->num_sockets = num_sockets; + } + return fm; +} +EXPORT_SYMBOL(tifm_alloc_adapter); + +int tifm_add_adapter(struct tifm_adapter *fm) +{ + int rc; + + idr_preload(GFP_KERNEL); + spin_lock(&tifm_adapter_lock); + rc = idr_alloc(&tifm_adapter_idr, fm, 0, 0, GFP_NOWAIT); + if (rc >= 0) + fm->id = rc; + spin_unlock(&tifm_adapter_lock); + idr_preload_end(); + if (rc < 0) + return rc; + + dev_set_name(&fm->dev, "tifm%u", fm->id); + rc = device_add(&fm->dev); + if (rc) { + spin_lock(&tifm_adapter_lock); + idr_remove(&tifm_adapter_idr, fm->id); + spin_unlock(&tifm_adapter_lock); + } + + return rc; +} +EXPORT_SYMBOL(tifm_add_adapter); + +void tifm_remove_adapter(struct tifm_adapter *fm) +{ + unsigned int cnt; + + flush_workqueue(workqueue); + for (cnt = 0; cnt < fm->num_sockets; ++cnt) { + if (fm->sockets[cnt]) + device_unregister(&fm->sockets[cnt]->dev); + } + + spin_lock(&tifm_adapter_lock); + idr_remove(&tifm_adapter_idr, fm->id); + spin_unlock(&tifm_adapter_lock); + device_del(&fm->dev); +} +EXPORT_SYMBOL(tifm_remove_adapter); + +void tifm_free_adapter(struct tifm_adapter *fm) +{ + put_device(&fm->dev); +} +EXPORT_SYMBOL(tifm_free_adapter); + +void tifm_free_device(struct device *dev) +{ + struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev); + kfree(sock); +} +EXPORT_SYMBOL(tifm_free_device); + +struct tifm_dev *tifm_alloc_device(struct tifm_adapter *fm, unsigned int id, + unsigned char type) +{ + struct tifm_dev *sock = NULL; + + if (!tifm_media_type_name(type, 0)) + return sock; + + sock = kzalloc(sizeof(struct tifm_dev), GFP_KERNEL); + if (sock) { + spin_lock_init(&sock->lock); + sock->type = type; + sock->socket_id = id; + sock->card_event = tifm_dummy_event; + sock->data_event = tifm_dummy_event; + + sock->dev.parent = fm->dev.parent; + sock->dev.bus = &tifm_bus_type; + sock->dev.dma_mask = fm->dev.parent->dma_mask; + sock->dev.release = tifm_free_device; + + dev_set_name(&sock->dev, "tifm_%s%u:%u", + tifm_media_type_name(type, 2), fm->id, id); + printk(KERN_INFO DRIVER_NAME + ": %s card detected in socket %u:%u\n", + tifm_media_type_name(type, 0), fm->id, id); + } + return sock; +} +EXPORT_SYMBOL(tifm_alloc_device); + +void tifm_eject(struct tifm_dev *sock) +{ + struct tifm_adapter *fm = dev_get_drvdata(sock->dev.parent); + fm->eject(fm, sock); +} +EXPORT_SYMBOL(tifm_eject); + +int tifm_has_ms_pif(struct tifm_dev *sock) +{ + struct tifm_adapter *fm = dev_get_drvdata(sock->dev.parent); + return fm->has_ms_pif(fm, sock); +} +EXPORT_SYMBOL(tifm_has_ms_pif); + +int tifm_map_sg(struct tifm_dev *sock, struct scatterlist *sg, int nents, + int direction) +{ + return pci_map_sg(to_pci_dev(sock->dev.parent), sg, nents, direction); +} +EXPORT_SYMBOL(tifm_map_sg); + +void tifm_unmap_sg(struct tifm_dev *sock, struct scatterlist *sg, int nents, + int direction) +{ + pci_unmap_sg(to_pci_dev(sock->dev.parent), sg, nents, direction); +} +EXPORT_SYMBOL(tifm_unmap_sg); + +void tifm_queue_work(struct work_struct *work) +{ + queue_work(workqueue, work); +} +EXPORT_SYMBOL(tifm_queue_work); + +int tifm_register_driver(struct tifm_driver *drv) +{ + drv->driver.bus = &tifm_bus_type; + + return driver_register(&drv->driver); +} +EXPORT_SYMBOL(tifm_register_driver); + +void tifm_unregister_driver(struct tifm_driver *drv) +{ + driver_unregister(&drv->driver); +} +EXPORT_SYMBOL(tifm_unregister_driver); + +static int __init tifm_init(void) +{ + int rc; + + workqueue = create_freezable_workqueue("tifm"); + if (!workqueue) + return -ENOMEM; + + rc = bus_register(&tifm_bus_type); + + if (rc) + goto err_out_wq; + + rc = class_register(&tifm_adapter_class); + if (!rc) + return 0; + + bus_unregister(&tifm_bus_type); + +err_out_wq: + destroy_workqueue(workqueue); + + return rc; +} + +static void __exit tifm_exit(void) +{ + class_unregister(&tifm_adapter_class); + bus_unregister(&tifm_bus_type); + destroy_workqueue(workqueue); +} + +subsys_initcall(tifm_init); +module_exit(tifm_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Alex Dubov"); +MODULE_DESCRIPTION("TI FlashMedia core driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRIVER_VERSION); diff --git a/drivers/misc/tsl2550.c b/drivers/misc/tsl2550.c new file mode 100644 index 000000000..6d71865c8 --- /dev/null +++ b/drivers/misc/tsl2550.c @@ -0,0 +1,453 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * tsl2550.c - Linux kernel modules for ambient light sensor + * + * Copyright (C) 2007 Rodolfo Giometti <giometti@linux.it> + * Copyright (C) 2007 Eurotech S.p.A. <info@eurotech.it> + */ + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/i2c.h> +#include <linux/mutex.h> + +#define TSL2550_DRV_NAME "tsl2550" +#define DRIVER_VERSION "1.2" + +/* + * Defines + */ + +#define TSL2550_POWER_DOWN 0x00 +#define TSL2550_POWER_UP 0x03 +#define TSL2550_STANDARD_RANGE 0x18 +#define TSL2550_EXTENDED_RANGE 0x1d +#define TSL2550_READ_ADC0 0x43 +#define TSL2550_READ_ADC1 0x83 + +/* + * Structs + */ + +struct tsl2550_data { + struct i2c_client *client; + struct mutex update_lock; + + unsigned int power_state:1; + unsigned int operating_mode:1; +}; + +/* + * Global data + */ + +static const u8 TSL2550_MODE_RANGE[2] = { + TSL2550_STANDARD_RANGE, TSL2550_EXTENDED_RANGE, +}; + +/* + * Management functions + */ + +static int tsl2550_set_operating_mode(struct i2c_client *client, int mode) +{ + struct tsl2550_data *data = i2c_get_clientdata(client); + + int ret = i2c_smbus_write_byte(client, TSL2550_MODE_RANGE[mode]); + + data->operating_mode = mode; + + return ret; +} + +static int tsl2550_set_power_state(struct i2c_client *client, int state) +{ + struct tsl2550_data *data = i2c_get_clientdata(client); + int ret; + + if (state == 0) + ret = i2c_smbus_write_byte(client, TSL2550_POWER_DOWN); + else { + ret = i2c_smbus_write_byte(client, TSL2550_POWER_UP); + + /* On power up we should reset operating mode also... */ + tsl2550_set_operating_mode(client, data->operating_mode); + } + + data->power_state = state; + + return ret; +} + +static int tsl2550_get_adc_value(struct i2c_client *client, u8 cmd) +{ + int ret; + + ret = i2c_smbus_read_byte_data(client, cmd); + if (ret < 0) + return ret; + if (!(ret & 0x80)) + return -EAGAIN; + return ret & 0x7f; /* remove the "valid" bit */ +} + +/* + * LUX calculation + */ + +#define TSL2550_MAX_LUX 1846 + +static const u8 ratio_lut[] = { + 100, 100, 100, 100, 100, 100, 100, 100, + 100, 100, 100, 100, 100, 100, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 98, 98, 98, 98, 98, + 98, 98, 97, 97, 97, 97, 97, 96, + 96, 96, 96, 95, 95, 95, 94, 94, + 93, 93, 93, 92, 92, 91, 91, 90, + 89, 89, 88, 87, 87, 86, 85, 84, + 83, 82, 81, 80, 79, 78, 77, 75, + 74, 73, 71, 69, 68, 66, 64, 62, + 60, 58, 56, 54, 52, 49, 47, 44, + 42, 41, 40, 40, 39, 39, 38, 38, + 37, 37, 37, 36, 36, 36, 35, 35, + 35, 35, 34, 34, 34, 34, 33, 33, + 33, 33, 32, 32, 32, 32, 32, 31, + 31, 31, 31, 31, 30, 30, 30, 30, + 30, +}; + +static const u16 count_lut[] = { + 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 18, 20, 22, 24, 26, 28, 30, + 32, 34, 36, 38, 40, 42, 44, 46, + 49, 53, 57, 61, 65, 69, 73, 77, + 81, 85, 89, 93, 97, 101, 105, 109, + 115, 123, 131, 139, 147, 155, 163, 171, + 179, 187, 195, 203, 211, 219, 227, 235, + 247, 263, 279, 295, 311, 327, 343, 359, + 375, 391, 407, 423, 439, 455, 471, 487, + 511, 543, 575, 607, 639, 671, 703, 735, + 767, 799, 831, 863, 895, 927, 959, 991, + 1039, 1103, 1167, 1231, 1295, 1359, 1423, 1487, + 1551, 1615, 1679, 1743, 1807, 1871, 1935, 1999, + 2095, 2223, 2351, 2479, 2607, 2735, 2863, 2991, + 3119, 3247, 3375, 3503, 3631, 3759, 3887, 4015, +}; + +/* + * This function is described into Taos TSL2550 Designer's Notebook + * pages 2, 3. + */ +static int tsl2550_calculate_lux(u8 ch0, u8 ch1) +{ + unsigned int lux; + + /* Look up count from channel values */ + u16 c0 = count_lut[ch0]; + u16 c1 = count_lut[ch1]; + + /* Avoid division by 0 and count 1 cannot be greater than count 0 */ + if (c1 <= c0) + if (c0) { + /* + * Calculate ratio. + * Note: the "128" is a scaling factor + */ + u8 r = c1 * 128 / c0; + + /* Calculate LUX */ + lux = ((c0 - c1) * ratio_lut[r]) / 256; + } else + lux = 0; + else + return 0; + + /* LUX range check */ + return lux > TSL2550_MAX_LUX ? TSL2550_MAX_LUX : lux; +} + +/* + * SysFS support + */ + +static ssize_t tsl2550_show_power_state(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct tsl2550_data *data = i2c_get_clientdata(to_i2c_client(dev)); + + return sprintf(buf, "%u\n", data->power_state); +} + +static ssize_t tsl2550_store_power_state(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + struct tsl2550_data *data = i2c_get_clientdata(client); + unsigned long val = simple_strtoul(buf, NULL, 10); + int ret; + + if (val > 1) + return -EINVAL; + + mutex_lock(&data->update_lock); + ret = tsl2550_set_power_state(client, val); + mutex_unlock(&data->update_lock); + + if (ret < 0) + return ret; + + return count; +} + +static DEVICE_ATTR(power_state, S_IWUSR | S_IRUGO, + tsl2550_show_power_state, tsl2550_store_power_state); + +static ssize_t tsl2550_show_operating_mode(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct tsl2550_data *data = i2c_get_clientdata(to_i2c_client(dev)); + + return sprintf(buf, "%u\n", data->operating_mode); +} + +static ssize_t tsl2550_store_operating_mode(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + struct tsl2550_data *data = i2c_get_clientdata(client); + unsigned long val = simple_strtoul(buf, NULL, 10); + int ret; + + if (val > 1) + return -EINVAL; + + if (data->power_state == 0) + return -EBUSY; + + mutex_lock(&data->update_lock); + ret = tsl2550_set_operating_mode(client, val); + mutex_unlock(&data->update_lock); + + if (ret < 0) + return ret; + + return count; +} + +static DEVICE_ATTR(operating_mode, S_IWUSR | S_IRUGO, + tsl2550_show_operating_mode, tsl2550_store_operating_mode); + +static ssize_t __tsl2550_show_lux(struct i2c_client *client, char *buf) +{ + struct tsl2550_data *data = i2c_get_clientdata(client); + u8 ch0, ch1; + int ret; + + ret = tsl2550_get_adc_value(client, TSL2550_READ_ADC0); + if (ret < 0) + return ret; + ch0 = ret; + + ret = tsl2550_get_adc_value(client, TSL2550_READ_ADC1); + if (ret < 0) + return ret; + ch1 = ret; + + /* Do the job */ + ret = tsl2550_calculate_lux(ch0, ch1); + if (ret < 0) + return ret; + if (data->operating_mode == 1) + ret *= 5; + + return sprintf(buf, "%d\n", ret); +} + +static ssize_t tsl2550_show_lux1_input(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct tsl2550_data *data = i2c_get_clientdata(client); + int ret; + + /* No LUX data if not operational */ + if (!data->power_state) + return -EBUSY; + + mutex_lock(&data->update_lock); + ret = __tsl2550_show_lux(client, buf); + mutex_unlock(&data->update_lock); + + return ret; +} + +static DEVICE_ATTR(lux1_input, S_IRUGO, + tsl2550_show_lux1_input, NULL); + +static struct attribute *tsl2550_attributes[] = { + &dev_attr_power_state.attr, + &dev_attr_operating_mode.attr, + &dev_attr_lux1_input.attr, + NULL +}; + +static const struct attribute_group tsl2550_attr_group = { + .attrs = tsl2550_attributes, +}; + +/* + * Initialization function + */ + +static int tsl2550_init_client(struct i2c_client *client) +{ + struct tsl2550_data *data = i2c_get_clientdata(client); + int err; + + /* + * Probe the chip. To do so we try to power up the device and then to + * read back the 0x03 code + */ + err = i2c_smbus_read_byte_data(client, TSL2550_POWER_UP); + if (err < 0) + return err; + if (err != TSL2550_POWER_UP) + return -ENODEV; + data->power_state = 1; + + /* Set the default operating mode */ + err = i2c_smbus_write_byte(client, + TSL2550_MODE_RANGE[data->operating_mode]); + if (err < 0) + return err; + + return 0; +} + +/* + * I2C init/probing/exit functions + */ + +static struct i2c_driver tsl2550_driver; +static int tsl2550_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct i2c_adapter *adapter = client->adapter; + struct tsl2550_data *data; + int *opmode, err = 0; + + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WRITE_BYTE + | I2C_FUNC_SMBUS_READ_BYTE_DATA)) { + err = -EIO; + goto exit; + } + + data = kzalloc(sizeof(struct tsl2550_data), GFP_KERNEL); + if (!data) { + err = -ENOMEM; + goto exit; + } + data->client = client; + i2c_set_clientdata(client, data); + + /* Check platform data */ + opmode = client->dev.platform_data; + if (opmode) { + if (*opmode < 0 || *opmode > 1) { + dev_err(&client->dev, "invalid operating_mode (%d)\n", + *opmode); + err = -EINVAL; + goto exit_kfree; + } + data->operating_mode = *opmode; + } else + data->operating_mode = 0; /* default mode is standard */ + dev_info(&client->dev, "%s operating mode\n", + data->operating_mode ? "extended" : "standard"); + + mutex_init(&data->update_lock); + + /* Initialize the TSL2550 chip */ + err = tsl2550_init_client(client); + if (err) + goto exit_kfree; + + /* Register sysfs hooks */ + err = sysfs_create_group(&client->dev.kobj, &tsl2550_attr_group); + if (err) + goto exit_kfree; + + dev_info(&client->dev, "support ver. %s enabled\n", DRIVER_VERSION); + + return 0; + +exit_kfree: + kfree(data); +exit: + return err; +} + +static int tsl2550_remove(struct i2c_client *client) +{ + sysfs_remove_group(&client->dev.kobj, &tsl2550_attr_group); + + /* Power down the device */ + tsl2550_set_power_state(client, 0); + + kfree(i2c_get_clientdata(client)); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP + +static int tsl2550_suspend(struct device *dev) +{ + return tsl2550_set_power_state(to_i2c_client(dev), 0); +} + +static int tsl2550_resume(struct device *dev) +{ + return tsl2550_set_power_state(to_i2c_client(dev), 1); +} + +static SIMPLE_DEV_PM_OPS(tsl2550_pm_ops, tsl2550_suspend, tsl2550_resume); +#define TSL2550_PM_OPS (&tsl2550_pm_ops) + +#else + +#define TSL2550_PM_OPS NULL + +#endif /* CONFIG_PM_SLEEP */ + +static const struct i2c_device_id tsl2550_id[] = { + { "tsl2550", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, tsl2550_id); + +static const struct of_device_id tsl2550_of_match[] = { + { .compatible = "taos,tsl2550" }, + { } +}; +MODULE_DEVICE_TABLE(of, tsl2550_of_match); + +static struct i2c_driver tsl2550_driver = { + .driver = { + .name = TSL2550_DRV_NAME, + .of_match_table = tsl2550_of_match, + .pm = TSL2550_PM_OPS, + }, + .probe = tsl2550_probe, + .remove = tsl2550_remove, + .id_table = tsl2550_id, +}; + +module_i2c_driver(tsl2550_driver); + +MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>"); +MODULE_DESCRIPTION("TSL2550 ambient light sensor driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRIVER_VERSION); diff --git a/drivers/misc/uacce/Kconfig b/drivers/misc/uacce/Kconfig new file mode 100644 index 000000000..5e39b6083 --- /dev/null +++ b/drivers/misc/uacce/Kconfig @@ -0,0 +1,13 @@ +config UACCE + tristate "Accelerator Framework for User Land" + depends on IOMMU_API + help + UACCE provides interface for the user process to access the hardware + without interaction with the kernel space in data path. + + The user-space interface is described in + include/uapi/misc/uacce/uacce.h + + See Documentation/misc-devices/uacce.rst for more details. + + If you don't know what to do here, say N. diff --git a/drivers/misc/uacce/Makefile b/drivers/misc/uacce/Makefile new file mode 100644 index 000000000..5b4374e8b --- /dev/null +++ b/drivers/misc/uacce/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-or-later +obj-$(CONFIG_UACCE) += uacce.o diff --git a/drivers/misc/uacce/uacce.c b/drivers/misc/uacce/uacce.c new file mode 100644 index 000000000..95e56eb2c --- /dev/null +++ b/drivers/misc/uacce/uacce.c @@ -0,0 +1,578 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +#include <linux/compat.h> +#include <linux/dma-mapping.h> +#include <linux/iommu.h> +#include <linux/module.h> +#include <linux/poll.h> +#include <linux/slab.h> +#include <linux/uacce.h> + +static struct class *uacce_class; +static dev_t uacce_devt; +static DEFINE_XARRAY_ALLOC(uacce_xa); + +/* + * If the parent driver or the device disappears, the queue state is invalid and + * ops are not usable anymore. + */ +static bool uacce_queue_is_valid(struct uacce_queue *q) +{ + return q->state == UACCE_Q_INIT || q->state == UACCE_Q_STARTED; +} + +static int uacce_start_queue(struct uacce_queue *q) +{ + int ret; + + if (q->state != UACCE_Q_INIT) + return -EINVAL; + + if (q->uacce->ops->start_queue) { + ret = q->uacce->ops->start_queue(q); + if (ret < 0) + return ret; + } + + q->state = UACCE_Q_STARTED; + return 0; +} + +static int uacce_put_queue(struct uacce_queue *q) +{ + struct uacce_device *uacce = q->uacce; + + if ((q->state == UACCE_Q_STARTED) && uacce->ops->stop_queue) + uacce->ops->stop_queue(q); + + if ((q->state == UACCE_Q_INIT || q->state == UACCE_Q_STARTED) && + uacce->ops->put_queue) + uacce->ops->put_queue(q); + + q->state = UACCE_Q_ZOMBIE; + + return 0; +} + +static long uacce_fops_unl_ioctl(struct file *filep, + unsigned int cmd, unsigned long arg) +{ + struct uacce_queue *q = filep->private_data; + struct uacce_device *uacce = q->uacce; + long ret = -ENXIO; + + /* + * uacce->ops->ioctl() may take the mmap_lock when copying arg to/from + * user. Avoid a circular lock dependency with uacce_fops_mmap(), which + * gets called with mmap_lock held, by taking uacce->mutex instead of + * q->mutex. Doing this in uacce_fops_mmap() is not possible because + * uacce_fops_open() calls iommu_sva_bind_device(), which takes + * mmap_lock, while holding uacce->mutex. + */ + mutex_lock(&uacce->mutex); + if (!uacce_queue_is_valid(q)) + goto out_unlock; + + switch (cmd) { + case UACCE_CMD_START_Q: + ret = uacce_start_queue(q); + break; + case UACCE_CMD_PUT_Q: + ret = uacce_put_queue(q); + break; + default: + if (uacce->ops->ioctl) + ret = uacce->ops->ioctl(q, cmd, arg); + else + ret = -EINVAL; + } +out_unlock: + mutex_unlock(&uacce->mutex); + return ret; +} + +#ifdef CONFIG_COMPAT +static long uacce_fops_compat_ioctl(struct file *filep, + unsigned int cmd, unsigned long arg) +{ + arg = (unsigned long)compat_ptr(arg); + + return uacce_fops_unl_ioctl(filep, cmd, arg); +} +#endif + +static int uacce_bind_queue(struct uacce_device *uacce, struct uacce_queue *q) +{ + u32 pasid; + struct iommu_sva *handle; + + if (!(uacce->flags & UACCE_DEV_SVA)) + return 0; + + handle = iommu_sva_bind_device(uacce->parent, current->mm, NULL); + if (IS_ERR(handle)) + return PTR_ERR(handle); + + pasid = iommu_sva_get_pasid(handle); + if (pasid == IOMMU_PASID_INVALID) { + iommu_sva_unbind_device(handle); + return -ENODEV; + } + + q->handle = handle; + q->pasid = pasid; + return 0; +} + +static void uacce_unbind_queue(struct uacce_queue *q) +{ + if (!q->handle) + return; + iommu_sva_unbind_device(q->handle); + q->handle = NULL; +} + +static int uacce_fops_open(struct inode *inode, struct file *filep) +{ + struct uacce_device *uacce; + struct uacce_queue *q; + int ret = 0; + + uacce = xa_load(&uacce_xa, iminor(inode)); + if (!uacce) + return -ENODEV; + + q = kzalloc(sizeof(struct uacce_queue), GFP_KERNEL); + if (!q) + return -ENOMEM; + + mutex_lock(&uacce->mutex); + + if (!uacce->parent) { + ret = -EINVAL; + goto out_with_mem; + } + + ret = uacce_bind_queue(uacce, q); + if (ret) + goto out_with_mem; + + q->uacce = uacce; + + if (uacce->ops->get_queue) { + ret = uacce->ops->get_queue(uacce, q->pasid, q); + if (ret < 0) + goto out_with_bond; + } + + init_waitqueue_head(&q->wait); + filep->private_data = q; + uacce->inode = inode; + q->state = UACCE_Q_INIT; + mutex_init(&q->mutex); + list_add(&q->list, &uacce->queues); + mutex_unlock(&uacce->mutex); + + return 0; + +out_with_bond: + uacce_unbind_queue(q); +out_with_mem: + kfree(q); + mutex_unlock(&uacce->mutex); + return ret; +} + +static int uacce_fops_release(struct inode *inode, struct file *filep) +{ + struct uacce_queue *q = filep->private_data; + struct uacce_device *uacce = q->uacce; + + mutex_lock(&uacce->mutex); + uacce_put_queue(q); + uacce_unbind_queue(q); + list_del(&q->list); + mutex_unlock(&uacce->mutex); + kfree(q); + + return 0; +} + +static void uacce_vma_close(struct vm_area_struct *vma) +{ + struct uacce_queue *q = vma->vm_private_data; + struct uacce_qfile_region *qfr = NULL; + + if (vma->vm_pgoff < UACCE_MAX_REGION) + qfr = q->qfrs[vma->vm_pgoff]; + + kfree(qfr); +} + +static const struct vm_operations_struct uacce_vm_ops = { + .close = uacce_vma_close, +}; + +static int uacce_fops_mmap(struct file *filep, struct vm_area_struct *vma) +{ + struct uacce_queue *q = filep->private_data; + struct uacce_device *uacce = q->uacce; + struct uacce_qfile_region *qfr; + enum uacce_qfrt type = UACCE_MAX_REGION; + int ret = 0; + + if (vma->vm_pgoff < UACCE_MAX_REGION) + type = vma->vm_pgoff; + else + return -EINVAL; + + qfr = kzalloc(sizeof(*qfr), GFP_KERNEL); + if (!qfr) + return -ENOMEM; + + vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_WIPEONFORK; + vma->vm_ops = &uacce_vm_ops; + vma->vm_private_data = q; + qfr->type = type; + + mutex_lock(&q->mutex); + if (!uacce_queue_is_valid(q)) { + ret = -ENXIO; + goto out_with_lock; + } + + if (q->qfrs[type]) { + ret = -EEXIST; + goto out_with_lock; + } + + switch (type) { + case UACCE_QFRT_MMIO: + if (!uacce->ops->mmap) { + ret = -EINVAL; + goto out_with_lock; + } + + ret = uacce->ops->mmap(q, vma, qfr); + if (ret) + goto out_with_lock; + + break; + + case UACCE_QFRT_DUS: + if (!uacce->ops->mmap) { + ret = -EINVAL; + goto out_with_lock; + } + + ret = uacce->ops->mmap(q, vma, qfr); + if (ret) + goto out_with_lock; + break; + + default: + ret = -EINVAL; + goto out_with_lock; + } + + q->qfrs[type] = qfr; + mutex_unlock(&q->mutex); + + return ret; + +out_with_lock: + mutex_unlock(&q->mutex); + kfree(qfr); + return ret; +} + +static __poll_t uacce_fops_poll(struct file *file, poll_table *wait) +{ + struct uacce_queue *q = file->private_data; + struct uacce_device *uacce = q->uacce; + __poll_t ret = 0; + + mutex_lock(&q->mutex); + if (!uacce_queue_is_valid(q)) + goto out_unlock; + + poll_wait(file, &q->wait, wait); + + if (uacce->ops->is_q_updated && uacce->ops->is_q_updated(q)) + ret = EPOLLIN | EPOLLRDNORM; + +out_unlock: + mutex_unlock(&q->mutex); + return ret; +} + +static const struct file_operations uacce_fops = { + .owner = THIS_MODULE, + .open = uacce_fops_open, + .release = uacce_fops_release, + .unlocked_ioctl = uacce_fops_unl_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = uacce_fops_compat_ioctl, +#endif + .mmap = uacce_fops_mmap, + .poll = uacce_fops_poll, +}; + +#define to_uacce_device(dev) container_of(dev, struct uacce_device, dev) + +static ssize_t api_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct uacce_device *uacce = to_uacce_device(dev); + + return sprintf(buf, "%s\n", uacce->api_ver); +} + +static ssize_t flags_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct uacce_device *uacce = to_uacce_device(dev); + + return sprintf(buf, "%u\n", uacce->flags); +} + +static ssize_t available_instances_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct uacce_device *uacce = to_uacce_device(dev); + + if (!uacce->ops->get_available_instances) + return -ENODEV; + + return sprintf(buf, "%d\n", + uacce->ops->get_available_instances(uacce)); +} + +static ssize_t algorithms_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct uacce_device *uacce = to_uacce_device(dev); + + return sprintf(buf, "%s\n", uacce->algs); +} + +static ssize_t region_mmio_size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct uacce_device *uacce = to_uacce_device(dev); + + return sprintf(buf, "%lu\n", + uacce->qf_pg_num[UACCE_QFRT_MMIO] << PAGE_SHIFT); +} + +static ssize_t region_dus_size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct uacce_device *uacce = to_uacce_device(dev); + + return sprintf(buf, "%lu\n", + uacce->qf_pg_num[UACCE_QFRT_DUS] << PAGE_SHIFT); +} + +static DEVICE_ATTR_RO(api); +static DEVICE_ATTR_RO(flags); +static DEVICE_ATTR_RO(available_instances); +static DEVICE_ATTR_RO(algorithms); +static DEVICE_ATTR_RO(region_mmio_size); +static DEVICE_ATTR_RO(region_dus_size); + +static struct attribute *uacce_dev_attrs[] = { + &dev_attr_api.attr, + &dev_attr_flags.attr, + &dev_attr_available_instances.attr, + &dev_attr_algorithms.attr, + &dev_attr_region_mmio_size.attr, + &dev_attr_region_dus_size.attr, + NULL, +}; + +static umode_t uacce_dev_is_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + struct device *dev = kobj_to_dev(kobj); + struct uacce_device *uacce = to_uacce_device(dev); + + if (((attr == &dev_attr_region_mmio_size.attr) && + (!uacce->qf_pg_num[UACCE_QFRT_MMIO])) || + ((attr == &dev_attr_region_dus_size.attr) && + (!uacce->qf_pg_num[UACCE_QFRT_DUS]))) + return 0; + + return attr->mode; +} + +static struct attribute_group uacce_dev_group = { + .is_visible = uacce_dev_is_visible, + .attrs = uacce_dev_attrs, +}; + +__ATTRIBUTE_GROUPS(uacce_dev); + +static void uacce_release(struct device *dev) +{ + struct uacce_device *uacce = to_uacce_device(dev); + + kfree(uacce); +} + +/** + * uacce_alloc() - alloc an accelerator + * @parent: pointer of uacce parent device + * @interface: pointer of uacce_interface for register + * + * Returns uacce pointer if success and ERR_PTR if not + * Need check returned negotiated uacce->flags + */ +struct uacce_device *uacce_alloc(struct device *parent, + struct uacce_interface *interface) +{ + unsigned int flags = interface->flags; + struct uacce_device *uacce; + int ret; + + uacce = kzalloc(sizeof(struct uacce_device), GFP_KERNEL); + if (!uacce) + return ERR_PTR(-ENOMEM); + + if (flags & UACCE_DEV_SVA) { + ret = iommu_dev_enable_feature(parent, IOMMU_DEV_FEAT_SVA); + if (ret) + flags &= ~UACCE_DEV_SVA; + } + + uacce->parent = parent; + uacce->flags = flags; + uacce->ops = interface->ops; + + ret = xa_alloc(&uacce_xa, &uacce->dev_id, uacce, xa_limit_32b, + GFP_KERNEL); + if (ret < 0) + goto err_with_uacce; + + INIT_LIST_HEAD(&uacce->queues); + mutex_init(&uacce->mutex); + device_initialize(&uacce->dev); + uacce->dev.devt = MKDEV(MAJOR(uacce_devt), uacce->dev_id); + uacce->dev.class = uacce_class; + uacce->dev.groups = uacce_dev_groups; + uacce->dev.parent = uacce->parent; + uacce->dev.release = uacce_release; + dev_set_name(&uacce->dev, "%s-%d", interface->name, uacce->dev_id); + + return uacce; + +err_with_uacce: + if (flags & UACCE_DEV_SVA) + iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_SVA); + kfree(uacce); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(uacce_alloc); + +/** + * uacce_register() - add the accelerator to cdev and export to user space + * @uacce: The initialized uacce device + * + * Return 0 if register succeeded, or an error. + */ +int uacce_register(struct uacce_device *uacce) +{ + if (!uacce) + return -ENODEV; + + uacce->cdev = cdev_alloc(); + if (!uacce->cdev) + return -ENOMEM; + + uacce->cdev->ops = &uacce_fops; + uacce->cdev->owner = THIS_MODULE; + + return cdev_device_add(uacce->cdev, &uacce->dev); +} +EXPORT_SYMBOL_GPL(uacce_register); + +/** + * uacce_remove() - remove the accelerator + * @uacce: the accelerator to remove + */ +void uacce_remove(struct uacce_device *uacce) +{ + struct uacce_queue *q, *next_q; + + if (!uacce) + return; + /* + * unmap remaining mapping from user space, preventing user still + * access the mmaped area while parent device is already removed + */ + if (uacce->inode) + unmap_mapping_range(uacce->inode->i_mapping, 0, 0, 1); + + /* + * uacce_fops_open() may be running concurrently, even after we remove + * the cdev. Holding uacce->mutex ensures that open() does not obtain a + * removed uacce device. + */ + mutex_lock(&uacce->mutex); + /* ensure no open queue remains */ + list_for_each_entry_safe(q, next_q, &uacce->queues, list) { + /* + * Taking q->mutex ensures that fops do not use the defunct + * uacce->ops after the queue is disabled. + */ + mutex_lock(&q->mutex); + uacce_put_queue(q); + mutex_unlock(&q->mutex); + uacce_unbind_queue(q); + } + + /* disable sva now since no opened queues */ + if (uacce->flags & UACCE_DEV_SVA) + iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_SVA); + + if (uacce->cdev) + cdev_device_del(uacce->cdev, &uacce->dev); + xa_erase(&uacce_xa, uacce->dev_id); + /* + * uacce exists as long as there are open fds, but ops will be freed + * now. Ensure that bugs cause NULL deref rather than use-after-free. + */ + uacce->ops = NULL; + uacce->parent = NULL; + mutex_unlock(&uacce->mutex); + put_device(&uacce->dev); +} +EXPORT_SYMBOL_GPL(uacce_remove); + +static int __init uacce_init(void) +{ + int ret; + + uacce_class = class_create(THIS_MODULE, UACCE_NAME); + if (IS_ERR(uacce_class)) + return PTR_ERR(uacce_class); + + ret = alloc_chrdev_region(&uacce_devt, 0, MINORMASK, UACCE_NAME); + if (ret) + class_destroy(uacce_class); + + return ret; +} + +static __exit void uacce_exit(void) +{ + unregister_chrdev_region(uacce_devt, MINORMASK); + class_destroy(uacce_class); +} + +subsys_initcall(uacce_init); +module_exit(uacce_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Hisilicon Tech. Co., Ltd."); +MODULE_DESCRIPTION("Accelerator interface for Userland applications"); diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c new file mode 100644 index 000000000..b837e7eba --- /dev/null +++ b/drivers/misc/vmw_balloon.c @@ -0,0 +1,1997 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * VMware Balloon driver. + * + * Copyright (C) 2000-2018, VMware, Inc. All Rights Reserved. + * + * This is VMware physical memory management driver for Linux. The driver + * acts like a "balloon" that can be inflated to reclaim physical pages by + * reserving them in the guest and invalidating them in the monitor, + * freeing up the underlying machine pages so they can be allocated to + * other guests. The balloon can also be deflated to allow the guest to + * use more physical memory. Higher level policies can control the sizes + * of balloons in VMs in order to manage physical memory resources. + */ + +//#define DEBUG +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/types.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/vmalloc.h> +#include <linux/sched.h> +#include <linux/module.h> +#include <linux/workqueue.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#include <linux/rwsem.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/mount.h> +#include <linux/pseudo_fs.h> +#include <linux/balloon_compaction.h> +#include <linux/vmw_vmci_defs.h> +#include <linux/vmw_vmci_api.h> +#include <asm/hypervisor.h> + +MODULE_AUTHOR("VMware, Inc."); +MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver"); +MODULE_ALIAS("dmi:*:svnVMware*:*"); +MODULE_ALIAS("vmware_vmmemctl"); +MODULE_LICENSE("GPL"); + +static bool __read_mostly vmwballoon_shrinker_enable; +module_param(vmwballoon_shrinker_enable, bool, 0444); +MODULE_PARM_DESC(vmwballoon_shrinker_enable, + "Enable non-cooperative out-of-memory protection. Disabled by default as it may degrade performance."); + +/* Delay in seconds after shrink before inflation. */ +#define VMBALLOON_SHRINK_DELAY (5) + +/* Maximum number of refused pages we accumulate during inflation cycle */ +#define VMW_BALLOON_MAX_REFUSED 16 + +/* Magic number for the balloon mount-point */ +#define BALLOON_VMW_MAGIC 0x0ba11007 + +/* + * Hypervisor communication port definitions. + */ +#define VMW_BALLOON_HV_PORT 0x5670 +#define VMW_BALLOON_HV_MAGIC 0x456c6d6f +#define VMW_BALLOON_GUEST_ID 1 /* Linux */ + +enum vmwballoon_capabilities { + /* + * Bit 0 is reserved and not associated to any capability. + */ + VMW_BALLOON_BASIC_CMDS = (1 << 1), + VMW_BALLOON_BATCHED_CMDS = (1 << 2), + VMW_BALLOON_BATCHED_2M_CMDS = (1 << 3), + VMW_BALLOON_SIGNALLED_WAKEUP_CMD = (1 << 4), + VMW_BALLOON_64_BIT_TARGET = (1 << 5) +}; + +#define VMW_BALLOON_CAPABILITIES_COMMON (VMW_BALLOON_BASIC_CMDS \ + | VMW_BALLOON_BATCHED_CMDS \ + | VMW_BALLOON_BATCHED_2M_CMDS \ + | VMW_BALLOON_SIGNALLED_WAKEUP_CMD) + +#define VMW_BALLOON_2M_ORDER (PMD_SHIFT - PAGE_SHIFT) + +/* + * 64-bit targets are only supported in 64-bit + */ +#ifdef CONFIG_64BIT +#define VMW_BALLOON_CAPABILITIES (VMW_BALLOON_CAPABILITIES_COMMON \ + | VMW_BALLOON_64_BIT_TARGET) +#else +#define VMW_BALLOON_CAPABILITIES VMW_BALLOON_CAPABILITIES_COMMON +#endif + +enum vmballoon_page_size_type { + VMW_BALLOON_4K_PAGE, + VMW_BALLOON_2M_PAGE, + VMW_BALLOON_LAST_SIZE = VMW_BALLOON_2M_PAGE +}; + +#define VMW_BALLOON_NUM_PAGE_SIZES (VMW_BALLOON_LAST_SIZE + 1) + +static const char * const vmballoon_page_size_names[] = { + [VMW_BALLOON_4K_PAGE] = "4k", + [VMW_BALLOON_2M_PAGE] = "2M" +}; + +enum vmballoon_op { + VMW_BALLOON_INFLATE, + VMW_BALLOON_DEFLATE +}; + +enum vmballoon_op_stat_type { + VMW_BALLOON_OP_STAT, + VMW_BALLOON_OP_FAIL_STAT +}; + +#define VMW_BALLOON_OP_STAT_TYPES (VMW_BALLOON_OP_FAIL_STAT + 1) + +/** + * enum vmballoon_cmd_type - backdoor commands. + * + * Availability of the commands is as followed: + * + * %VMW_BALLOON_CMD_START, %VMW_BALLOON_CMD_GET_TARGET and + * %VMW_BALLOON_CMD_GUEST_ID are always available. + * + * If the host reports %VMW_BALLOON_BASIC_CMDS are supported then + * %VMW_BALLOON_CMD_LOCK and %VMW_BALLOON_CMD_UNLOCK commands are available. + * + * If the host reports %VMW_BALLOON_BATCHED_CMDS are supported then + * %VMW_BALLOON_CMD_BATCHED_LOCK and VMW_BALLOON_CMD_BATCHED_UNLOCK commands + * are available. + * + * If the host reports %VMW_BALLOON_BATCHED_2M_CMDS are supported then + * %VMW_BALLOON_CMD_BATCHED_2M_LOCK and %VMW_BALLOON_CMD_BATCHED_2M_UNLOCK + * are supported. + * + * If the host reports VMW_BALLOON_SIGNALLED_WAKEUP_CMD is supported then + * VMW_BALLOON_CMD_VMCI_DOORBELL_SET command is supported. + * + * @VMW_BALLOON_CMD_START: Communicating supported version with the hypervisor. + * @VMW_BALLOON_CMD_GET_TARGET: Gets the balloon target size. + * @VMW_BALLOON_CMD_LOCK: Informs the hypervisor about a ballooned page. + * @VMW_BALLOON_CMD_UNLOCK: Informs the hypervisor about a page that is about + * to be deflated from the balloon. + * @VMW_BALLOON_CMD_GUEST_ID: Informs the hypervisor about the type of OS that + * runs in the VM. + * @VMW_BALLOON_CMD_BATCHED_LOCK: Inform the hypervisor about a batch of + * ballooned pages (up to 512). + * @VMW_BALLOON_CMD_BATCHED_UNLOCK: Inform the hypervisor about a batch of + * pages that are about to be deflated from the + * balloon (up to 512). + * @VMW_BALLOON_CMD_BATCHED_2M_LOCK: Similar to @VMW_BALLOON_CMD_BATCHED_LOCK + * for 2MB pages. + * @VMW_BALLOON_CMD_BATCHED_2M_UNLOCK: Similar to + * @VMW_BALLOON_CMD_BATCHED_UNLOCK for 2MB + * pages. + * @VMW_BALLOON_CMD_VMCI_DOORBELL_SET: A command to set doorbell notification + * that would be invoked when the balloon + * size changes. + * @VMW_BALLOON_CMD_LAST: Value of the last command. + */ +enum vmballoon_cmd_type { + VMW_BALLOON_CMD_START, + VMW_BALLOON_CMD_GET_TARGET, + VMW_BALLOON_CMD_LOCK, + VMW_BALLOON_CMD_UNLOCK, + VMW_BALLOON_CMD_GUEST_ID, + /* No command 5 */ + VMW_BALLOON_CMD_BATCHED_LOCK = 6, + VMW_BALLOON_CMD_BATCHED_UNLOCK, + VMW_BALLOON_CMD_BATCHED_2M_LOCK, + VMW_BALLOON_CMD_BATCHED_2M_UNLOCK, + VMW_BALLOON_CMD_VMCI_DOORBELL_SET, + VMW_BALLOON_CMD_LAST = VMW_BALLOON_CMD_VMCI_DOORBELL_SET, +}; + +#define VMW_BALLOON_CMD_NUM (VMW_BALLOON_CMD_LAST + 1) + +enum vmballoon_error_codes { + VMW_BALLOON_SUCCESS, + VMW_BALLOON_ERROR_CMD_INVALID, + VMW_BALLOON_ERROR_PPN_INVALID, + VMW_BALLOON_ERROR_PPN_LOCKED, + VMW_BALLOON_ERROR_PPN_UNLOCKED, + VMW_BALLOON_ERROR_PPN_PINNED, + VMW_BALLOON_ERROR_PPN_NOTNEEDED, + VMW_BALLOON_ERROR_RESET, + VMW_BALLOON_ERROR_BUSY +}; + +#define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES (0x03000000) + +#define VMW_BALLOON_CMD_WITH_TARGET_MASK \ + ((1UL << VMW_BALLOON_CMD_GET_TARGET) | \ + (1UL << VMW_BALLOON_CMD_LOCK) | \ + (1UL << VMW_BALLOON_CMD_UNLOCK) | \ + (1UL << VMW_BALLOON_CMD_BATCHED_LOCK) | \ + (1UL << VMW_BALLOON_CMD_BATCHED_UNLOCK) | \ + (1UL << VMW_BALLOON_CMD_BATCHED_2M_LOCK) | \ + (1UL << VMW_BALLOON_CMD_BATCHED_2M_UNLOCK)) + +static const char * const vmballoon_cmd_names[] = { + [VMW_BALLOON_CMD_START] = "start", + [VMW_BALLOON_CMD_GET_TARGET] = "target", + [VMW_BALLOON_CMD_LOCK] = "lock", + [VMW_BALLOON_CMD_UNLOCK] = "unlock", + [VMW_BALLOON_CMD_GUEST_ID] = "guestType", + [VMW_BALLOON_CMD_BATCHED_LOCK] = "batchLock", + [VMW_BALLOON_CMD_BATCHED_UNLOCK] = "batchUnlock", + [VMW_BALLOON_CMD_BATCHED_2M_LOCK] = "2m-lock", + [VMW_BALLOON_CMD_BATCHED_2M_UNLOCK] = "2m-unlock", + [VMW_BALLOON_CMD_VMCI_DOORBELL_SET] = "doorbellSet" +}; + +enum vmballoon_stat_page { + VMW_BALLOON_PAGE_STAT_ALLOC, + VMW_BALLOON_PAGE_STAT_ALLOC_FAIL, + VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC, + VMW_BALLOON_PAGE_STAT_REFUSED_FREE, + VMW_BALLOON_PAGE_STAT_FREE, + VMW_BALLOON_PAGE_STAT_LAST = VMW_BALLOON_PAGE_STAT_FREE +}; + +#define VMW_BALLOON_PAGE_STAT_NUM (VMW_BALLOON_PAGE_STAT_LAST + 1) + +enum vmballoon_stat_general { + VMW_BALLOON_STAT_TIMER, + VMW_BALLOON_STAT_DOORBELL, + VMW_BALLOON_STAT_RESET, + VMW_BALLOON_STAT_SHRINK, + VMW_BALLOON_STAT_SHRINK_FREE, + VMW_BALLOON_STAT_LAST = VMW_BALLOON_STAT_SHRINK_FREE +}; + +#define VMW_BALLOON_STAT_NUM (VMW_BALLOON_STAT_LAST + 1) + +static DEFINE_STATIC_KEY_TRUE(vmw_balloon_batching); +static DEFINE_STATIC_KEY_FALSE(balloon_stat_enabled); + +struct vmballoon_ctl { + struct list_head pages; + struct list_head refused_pages; + struct list_head prealloc_pages; + unsigned int n_refused_pages; + unsigned int n_pages; + enum vmballoon_page_size_type page_size; + enum vmballoon_op op; +}; + +/** + * struct vmballoon_batch_entry - a batch entry for lock or unlock. + * + * @status: the status of the operation, which is written by the hypervisor. + * @reserved: reserved for future use. Must be set to zero. + * @pfn: the physical frame number of the page to be locked or unlocked. + */ +struct vmballoon_batch_entry { + u64 status : 5; + u64 reserved : PAGE_SHIFT - 5; + u64 pfn : 52; +} __packed; + +struct vmballoon { + /** + * @max_page_size: maximum supported page size for ballooning. + * + * Protected by @conf_sem + */ + enum vmballoon_page_size_type max_page_size; + + /** + * @size: balloon actual size in basic page size (frames). + * + * While we currently do not support size which is bigger than 32-bit, + * in preparation for future support, use 64-bits. + */ + atomic64_t size; + + /** + * @target: balloon target size in basic page size (frames). + * + * We do not protect the target under the assumption that setting the + * value is always done through a single write. If this assumption ever + * breaks, we would have to use X_ONCE for accesses, and suffer the less + * optimized code. Although we may read stale target value if multiple + * accesses happen at once, the performance impact should be minor. + */ + unsigned long target; + + /** + * @reset_required: reset flag + * + * Setting this flag may introduce races, but the code is expected to + * handle them gracefully. In the worst case, another operation will + * fail as reset did not take place. Clearing the flag is done while + * holding @conf_sem for write. + */ + bool reset_required; + + /** + * @capabilities: hypervisor balloon capabilities. + * + * Protected by @conf_sem. + */ + unsigned long capabilities; + + /** + * @batch_page: pointer to communication batch page. + * + * When batching is used, batch_page points to a page, which holds up to + * %VMW_BALLOON_BATCH_MAX_PAGES entries for locking or unlocking. + */ + struct vmballoon_batch_entry *batch_page; + + /** + * @batch_max_pages: maximum pages that can be locked/unlocked. + * + * Indicates the number of pages that the hypervisor can lock or unlock + * at once, according to whether batching is enabled. If batching is + * disabled, only a single page can be locked/unlock on each operation. + * + * Protected by @conf_sem. + */ + unsigned int batch_max_pages; + + /** + * @page: page to be locked/unlocked by the hypervisor + * + * @page is only used when batching is disabled and a single page is + * reclaimed on each iteration. + * + * Protected by @comm_lock. + */ + struct page *page; + + /** + * @shrink_timeout: timeout until the next inflation. + * + * After an shrink event, indicates the time in jiffies after which + * inflation is allowed again. Can be written concurrently with reads, + * so must use READ_ONCE/WRITE_ONCE when accessing. + */ + unsigned long shrink_timeout; + + /* statistics */ + struct vmballoon_stats *stats; + +#ifdef CONFIG_DEBUG_FS + /* debugfs file exporting statistics */ + struct dentry *dbg_entry; +#endif + + /** + * @b_dev_info: balloon device information descriptor. + */ + struct balloon_dev_info b_dev_info; + + struct delayed_work dwork; + + /** + * @huge_pages - list of the inflated 2MB pages. + * + * Protected by @b_dev_info.pages_lock . + */ + struct list_head huge_pages; + + /** + * @vmci_doorbell. + * + * Protected by @conf_sem. + */ + struct vmci_handle vmci_doorbell; + + /** + * @conf_sem: semaphore to protect the configuration and the statistics. + */ + struct rw_semaphore conf_sem; + + /** + * @comm_lock: lock to protect the communication with the host. + * + * Lock ordering: @conf_sem -> @comm_lock . + */ + spinlock_t comm_lock; + + /** + * @shrinker: shrinker interface that is used to avoid over-inflation. + */ + struct shrinker shrinker; + + /** + * @shrinker_registered: whether the shrinker was registered. + * + * The shrinker interface does not handle gracefully the removal of + * shrinker that was not registered before. This indication allows to + * simplify the unregistration process. + */ + bool shrinker_registered; +}; + +static struct vmballoon balloon; + +struct vmballoon_stats { + /* timer / doorbell operations */ + atomic64_t general_stat[VMW_BALLOON_STAT_NUM]; + + /* allocation statistics for huge and small pages */ + atomic64_t + page_stat[VMW_BALLOON_PAGE_STAT_NUM][VMW_BALLOON_NUM_PAGE_SIZES]; + + /* Monitor operations: total operations, and failures */ + atomic64_t ops[VMW_BALLOON_CMD_NUM][VMW_BALLOON_OP_STAT_TYPES]; +}; + +static inline bool is_vmballoon_stats_on(void) +{ + return IS_ENABLED(CONFIG_DEBUG_FS) && + static_branch_unlikely(&balloon_stat_enabled); +} + +static inline void vmballoon_stats_op_inc(struct vmballoon *b, unsigned int op, + enum vmballoon_op_stat_type type) +{ + if (is_vmballoon_stats_on()) + atomic64_inc(&b->stats->ops[op][type]); +} + +static inline void vmballoon_stats_gen_inc(struct vmballoon *b, + enum vmballoon_stat_general stat) +{ + if (is_vmballoon_stats_on()) + atomic64_inc(&b->stats->general_stat[stat]); +} + +static inline void vmballoon_stats_gen_add(struct vmballoon *b, + enum vmballoon_stat_general stat, + unsigned int val) +{ + if (is_vmballoon_stats_on()) + atomic64_add(val, &b->stats->general_stat[stat]); +} + +static inline void vmballoon_stats_page_inc(struct vmballoon *b, + enum vmballoon_stat_page stat, + enum vmballoon_page_size_type size) +{ + if (is_vmballoon_stats_on()) + atomic64_inc(&b->stats->page_stat[stat][size]); +} + +static inline void vmballoon_stats_page_add(struct vmballoon *b, + enum vmballoon_stat_page stat, + enum vmballoon_page_size_type size, + unsigned int val) +{ + if (is_vmballoon_stats_on()) + atomic64_add(val, &b->stats->page_stat[stat][size]); +} + +static inline unsigned long +__vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1, + unsigned long arg2, unsigned long *result) +{ + unsigned long status, dummy1, dummy2, dummy3, local_result; + + vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_STAT); + + asm volatile ("inl %%dx" : + "=a"(status), + "=c"(dummy1), + "=d"(dummy2), + "=b"(local_result), + "=S"(dummy3) : + "0"(VMW_BALLOON_HV_MAGIC), + "1"(cmd), + "2"(VMW_BALLOON_HV_PORT), + "3"(arg1), + "4"(arg2) : + "memory"); + + /* update the result if needed */ + if (result) + *result = (cmd == VMW_BALLOON_CMD_START) ? dummy1 : + local_result; + + /* update target when applicable */ + if (status == VMW_BALLOON_SUCCESS && + ((1ul << cmd) & VMW_BALLOON_CMD_WITH_TARGET_MASK)) + WRITE_ONCE(b->target, local_result); + + if (status != VMW_BALLOON_SUCCESS && + status != VMW_BALLOON_SUCCESS_WITH_CAPABILITIES) { + vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_FAIL_STAT); + pr_debug("%s: %s [0x%lx,0x%lx) failed, returned %ld\n", + __func__, vmballoon_cmd_names[cmd], arg1, arg2, + status); + } + + /* mark reset required accordingly */ + if (status == VMW_BALLOON_ERROR_RESET) + b->reset_required = true; + + return status; +} + +static __always_inline unsigned long +vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1, + unsigned long arg2) +{ + unsigned long dummy; + + return __vmballoon_cmd(b, cmd, arg1, arg2, &dummy); +} + +/* + * Send "start" command to the host, communicating supported version + * of the protocol. + */ +static int vmballoon_send_start(struct vmballoon *b, unsigned long req_caps) +{ + unsigned long status, capabilities; + + status = __vmballoon_cmd(b, VMW_BALLOON_CMD_START, req_caps, 0, + &capabilities); + + switch (status) { + case VMW_BALLOON_SUCCESS_WITH_CAPABILITIES: + b->capabilities = capabilities; + break; + case VMW_BALLOON_SUCCESS: + b->capabilities = VMW_BALLOON_BASIC_CMDS; + break; + default: + return -EIO; + } + + /* + * 2MB pages are only supported with batching. If batching is for some + * reason disabled, do not use 2MB pages, since otherwise the legacy + * mechanism is used with 2MB pages, causing a failure. + */ + b->max_page_size = VMW_BALLOON_4K_PAGE; + if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) && + (b->capabilities & VMW_BALLOON_BATCHED_CMDS)) + b->max_page_size = VMW_BALLOON_2M_PAGE; + + + return 0; +} + +/** + * vmballoon_send_guest_id - communicate guest type to the host. + * + * @b: pointer to the balloon. + * + * Communicate guest type to the host so that it can adjust ballooning + * algorithm to the one most appropriate for the guest. This command + * is normally issued after sending "start" command and is part of + * standard reset sequence. + * + * Return: zero on success or appropriate error code. + */ +static int vmballoon_send_guest_id(struct vmballoon *b) +{ + unsigned long status; + + status = vmballoon_cmd(b, VMW_BALLOON_CMD_GUEST_ID, + VMW_BALLOON_GUEST_ID, 0); + + return status == VMW_BALLOON_SUCCESS ? 0 : -EIO; +} + +/** + * vmballoon_page_order() - return the order of the page + * @page_size: the size of the page. + * + * Return: the allocation order. + */ +static inline +unsigned int vmballoon_page_order(enum vmballoon_page_size_type page_size) +{ + return page_size == VMW_BALLOON_2M_PAGE ? VMW_BALLOON_2M_ORDER : 0; +} + +/** + * vmballoon_page_in_frames() - returns the number of frames in a page. + * @page_size: the size of the page. + * + * Return: the number of 4k frames. + */ +static inline unsigned int +vmballoon_page_in_frames(enum vmballoon_page_size_type page_size) +{ + return 1 << vmballoon_page_order(page_size); +} + +/** + * vmballoon_mark_page_offline() - mark a page as offline + * @page: pointer for the page. + * @page_size: the size of the page. + */ +static void +vmballoon_mark_page_offline(struct page *page, + enum vmballoon_page_size_type page_size) +{ + int i; + + for (i = 0; i < vmballoon_page_in_frames(page_size); i++) + __SetPageOffline(page + i); +} + +/** + * vmballoon_mark_page_online() - mark a page as online + * @page: pointer for the page. + * @page_size: the size of the page. + */ +static void +vmballoon_mark_page_online(struct page *page, + enum vmballoon_page_size_type page_size) +{ + int i; + + for (i = 0; i < vmballoon_page_in_frames(page_size); i++) + __ClearPageOffline(page + i); +} + +/** + * vmballoon_send_get_target() - Retrieve desired balloon size from the host. + * + * @b: pointer to the balloon. + * + * Return: zero on success, EINVAL if limit does not fit in 32-bit, as required + * by the host-guest protocol and EIO if an error occurred in communicating with + * the host. + */ +static int vmballoon_send_get_target(struct vmballoon *b) +{ + unsigned long status; + unsigned long limit; + + limit = totalram_pages(); + + /* Ensure limit fits in 32-bits if 64-bit targets are not supported */ + if (!(b->capabilities & VMW_BALLOON_64_BIT_TARGET) && + limit != (u32)limit) + return -EINVAL; + + status = vmballoon_cmd(b, VMW_BALLOON_CMD_GET_TARGET, limit, 0); + + return status == VMW_BALLOON_SUCCESS ? 0 : -EIO; +} + +/** + * vmballoon_alloc_page_list - allocates a list of pages. + * + * @b: pointer to the balloon. + * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation. + * @req_n_pages: the number of requested pages. + * + * Tries to allocate @req_n_pages. Add them to the list of balloon pages in + * @ctl.pages and updates @ctl.n_pages to reflect the number of pages. + * + * Return: zero on success or error code otherwise. + */ +static int vmballoon_alloc_page_list(struct vmballoon *b, + struct vmballoon_ctl *ctl, + unsigned int req_n_pages) +{ + struct page *page; + unsigned int i; + + for (i = 0; i < req_n_pages; i++) { + /* + * First check if we happen to have pages that were allocated + * before. This happens when 2MB page rejected during inflation + * by the hypervisor, and then split into 4KB pages. + */ + if (!list_empty(&ctl->prealloc_pages)) { + page = list_first_entry(&ctl->prealloc_pages, + struct page, lru); + list_del(&page->lru); + } else { + if (ctl->page_size == VMW_BALLOON_2M_PAGE) + page = alloc_pages(__GFP_HIGHMEM|__GFP_NOWARN| + __GFP_NOMEMALLOC, VMW_BALLOON_2M_ORDER); + else + page = balloon_page_alloc(); + + vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC, + ctl->page_size); + } + + if (page) { + /* Success. Add the page to the list and continue. */ + list_add(&page->lru, &ctl->pages); + continue; + } + + /* Allocation failed. Update statistics and stop. */ + vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC_FAIL, + ctl->page_size); + break; + } + + ctl->n_pages = i; + + return req_n_pages == ctl->n_pages ? 0 : -ENOMEM; +} + +/** + * vmballoon_handle_one_result - Handle lock/unlock result for a single page. + * + * @b: pointer for %struct vmballoon. + * @page: pointer for the page whose result should be handled. + * @page_size: size of the page. + * @status: status of the operation as provided by the hypervisor. + */ +static int vmballoon_handle_one_result(struct vmballoon *b, struct page *page, + enum vmballoon_page_size_type page_size, + unsigned long status) +{ + /* On success do nothing. The page is already on the balloon list. */ + if (likely(status == VMW_BALLOON_SUCCESS)) + return 0; + + pr_debug("%s: failed comm pfn %lx status %lu page_size %s\n", __func__, + page_to_pfn(page), status, + vmballoon_page_size_names[page_size]); + + /* Error occurred */ + vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC, + page_size); + + return -EIO; +} + +/** + * vmballoon_status_page - returns the status of (un)lock operation + * + * @b: pointer to the balloon. + * @idx: index for the page for which the operation is performed. + * @p: pointer to where the page struct is returned. + * + * Following a lock or unlock operation, returns the status of the operation for + * an individual page. Provides the page that the operation was performed on on + * the @page argument. + * + * Returns: The status of a lock or unlock operation for an individual page. + */ +static unsigned long vmballoon_status_page(struct vmballoon *b, int idx, + struct page **p) +{ + if (static_branch_likely(&vmw_balloon_batching)) { + /* batching mode */ + *p = pfn_to_page(b->batch_page[idx].pfn); + return b->batch_page[idx].status; + } + + /* non-batching mode */ + *p = b->page; + + /* + * If a failure occurs, the indication will be provided in the status + * of the entire operation, which is considered before the individual + * page status. So for non-batching mode, the indication is always of + * success. + */ + return VMW_BALLOON_SUCCESS; +} + +/** + * vmballoon_lock_op - notifies the host about inflated/deflated pages. + * @b: pointer to the balloon. + * @num_pages: number of inflated/deflated pages. + * @page_size: size of the page. + * @op: the type of operation (lock or unlock). + * + * Notify the host about page(s) that were ballooned (or removed from the + * balloon) so that host can use it without fear that guest will need it (or + * stop using them since the VM does). Host may reject some pages, we need to + * check the return value and maybe submit a different page. The pages that are + * inflated/deflated are pointed by @b->page. + * + * Return: result as provided by the hypervisor. + */ +static unsigned long vmballoon_lock_op(struct vmballoon *b, + unsigned int num_pages, + enum vmballoon_page_size_type page_size, + enum vmballoon_op op) +{ + unsigned long cmd, pfn; + + lockdep_assert_held(&b->comm_lock); + + if (static_branch_likely(&vmw_balloon_batching)) { + if (op == VMW_BALLOON_INFLATE) + cmd = page_size == VMW_BALLOON_2M_PAGE ? + VMW_BALLOON_CMD_BATCHED_2M_LOCK : + VMW_BALLOON_CMD_BATCHED_LOCK; + else + cmd = page_size == VMW_BALLOON_2M_PAGE ? + VMW_BALLOON_CMD_BATCHED_2M_UNLOCK : + VMW_BALLOON_CMD_BATCHED_UNLOCK; + + pfn = PHYS_PFN(virt_to_phys(b->batch_page)); + } else { + cmd = op == VMW_BALLOON_INFLATE ? VMW_BALLOON_CMD_LOCK : + VMW_BALLOON_CMD_UNLOCK; + pfn = page_to_pfn(b->page); + + /* In non-batching mode, PFNs must fit in 32-bit */ + if (unlikely(pfn != (u32)pfn)) + return VMW_BALLOON_ERROR_PPN_INVALID; + } + + return vmballoon_cmd(b, cmd, pfn, num_pages); +} + +/** + * vmballoon_add_page - adds a page towards lock/unlock operation. + * + * @b: pointer to the balloon. + * @idx: index of the page to be ballooned in this batch. + * @p: pointer to the page that is about to be ballooned. + * + * Adds the page to be ballooned. Must be called while holding @comm_lock. + */ +static void vmballoon_add_page(struct vmballoon *b, unsigned int idx, + struct page *p) +{ + lockdep_assert_held(&b->comm_lock); + + if (static_branch_likely(&vmw_balloon_batching)) + b->batch_page[idx] = (struct vmballoon_batch_entry) + { .pfn = page_to_pfn(p) }; + else + b->page = p; +} + +/** + * vmballoon_lock - lock or unlock a batch of pages. + * + * @b: pointer to the balloon. + * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation. + * + * Notifies the host of about ballooned pages (after inflation or deflation, + * according to @ctl). If the host rejects the page put it on the + * @ctl refuse list. These refused page are then released when moving to the + * next size of pages. + * + * Note that we neither free any @page here nor put them back on the ballooned + * pages list. Instead we queue it for later processing. We do that for several + * reasons. First, we do not want to free the page under the lock. Second, it + * allows us to unify the handling of lock and unlock. In the inflate case, the + * caller will check if there are too many refused pages and release them. + * Although it is not identical to the past behavior, it should not affect + * performance. + */ +static int vmballoon_lock(struct vmballoon *b, struct vmballoon_ctl *ctl) +{ + unsigned long batch_status; + struct page *page; + unsigned int i, num_pages; + + num_pages = ctl->n_pages; + if (num_pages == 0) + return 0; + + /* communication with the host is done under the communication lock */ + spin_lock(&b->comm_lock); + + i = 0; + list_for_each_entry(page, &ctl->pages, lru) + vmballoon_add_page(b, i++, page); + + batch_status = vmballoon_lock_op(b, ctl->n_pages, ctl->page_size, + ctl->op); + + /* + * Iterate over the pages in the provided list. Since we are changing + * @ctl->n_pages we are saving the original value in @num_pages and + * use this value to bound the loop. + */ + for (i = 0; i < num_pages; i++) { + unsigned long status; + + status = vmballoon_status_page(b, i, &page); + + /* + * Failure of the whole batch overrides a single operation + * results. + */ + if (batch_status != VMW_BALLOON_SUCCESS) + status = batch_status; + + /* Continue if no error happened */ + if (!vmballoon_handle_one_result(b, page, ctl->page_size, + status)) + continue; + + /* + * Error happened. Move the pages to the refused list and update + * the pages number. + */ + list_move(&page->lru, &ctl->refused_pages); + ctl->n_pages--; + ctl->n_refused_pages++; + } + + spin_unlock(&b->comm_lock); + + return batch_status == VMW_BALLOON_SUCCESS ? 0 : -EIO; +} + +/** + * vmballoon_release_page_list() - Releases a page list + * + * @page_list: list of pages to release. + * @n_pages: pointer to the number of pages. + * @page_size: whether the pages in the list are 2MB (or else 4KB). + * + * Releases the list of pages and zeros the number of pages. + */ +static void vmballoon_release_page_list(struct list_head *page_list, + int *n_pages, + enum vmballoon_page_size_type page_size) +{ + struct page *page, *tmp; + + list_for_each_entry_safe(page, tmp, page_list, lru) { + list_del(&page->lru); + __free_pages(page, vmballoon_page_order(page_size)); + } + + if (n_pages) + *n_pages = 0; +} + + +/* + * Release pages that were allocated while attempting to inflate the + * balloon but were refused by the host for one reason or another. + */ +static void vmballoon_release_refused_pages(struct vmballoon *b, + struct vmballoon_ctl *ctl) +{ + vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_FREE, + ctl->page_size); + + vmballoon_release_page_list(&ctl->refused_pages, &ctl->n_refused_pages, + ctl->page_size); +} + +/** + * vmballoon_change - retrieve the required balloon change + * + * @b: pointer for the balloon. + * + * Return: the required change for the balloon size. A positive number + * indicates inflation, a negative number indicates a deflation. + */ +static int64_t vmballoon_change(struct vmballoon *b) +{ + int64_t size, target; + + size = atomic64_read(&b->size); + target = READ_ONCE(b->target); + + /* + * We must cast first because of int sizes + * Otherwise we might get huge positives instead of negatives + */ + + if (b->reset_required) + return 0; + + /* consider a 2MB slack on deflate, unless the balloon is emptied */ + if (target < size && target != 0 && + size - target < vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE)) + return 0; + + /* If an out-of-memory recently occurred, inflation is disallowed. */ + if (target > size && time_before(jiffies, READ_ONCE(b->shrink_timeout))) + return 0; + + return target - size; +} + +/** + * vmballoon_enqueue_page_list() - Enqueues list of pages after inflation. + * + * @b: pointer to balloon. + * @pages: list of pages to enqueue. + * @n_pages: pointer to number of pages in list. The value is zeroed. + * @page_size: whether the pages are 2MB or 4KB pages. + * + * Enqueues the provides list of pages in the ballooned page list, clears the + * list and zeroes the number of pages that was provided. + */ +static void vmballoon_enqueue_page_list(struct vmballoon *b, + struct list_head *pages, + unsigned int *n_pages, + enum vmballoon_page_size_type page_size) +{ + unsigned long flags; + struct page *page; + + if (page_size == VMW_BALLOON_4K_PAGE) { + balloon_page_list_enqueue(&b->b_dev_info, pages); + } else { + /* + * Keep the huge pages in a local list which is not available + * for the balloon compaction mechanism. + */ + spin_lock_irqsave(&b->b_dev_info.pages_lock, flags); + + list_for_each_entry(page, pages, lru) { + vmballoon_mark_page_offline(page, VMW_BALLOON_2M_PAGE); + } + + list_splice_init(pages, &b->huge_pages); + __count_vm_events(BALLOON_INFLATE, *n_pages * + vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE)); + spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags); + } + + *n_pages = 0; +} + +/** + * vmballoon_dequeue_page_list() - Dequeues page lists for deflation. + * + * @b: pointer to balloon. + * @pages: list of pages to enqueue. + * @n_pages: pointer to number of pages in list. The value is zeroed. + * @page_size: whether the pages are 2MB or 4KB pages. + * @n_req_pages: the number of requested pages. + * + * Dequeues the number of requested pages from the balloon for deflation. The + * number of dequeued pages may be lower, if not enough pages in the requested + * size are available. + */ +static void vmballoon_dequeue_page_list(struct vmballoon *b, + struct list_head *pages, + unsigned int *n_pages, + enum vmballoon_page_size_type page_size, + unsigned int n_req_pages) +{ + struct page *page, *tmp; + unsigned int i = 0; + unsigned long flags; + + /* In the case of 4k pages, use the compaction infrastructure */ + if (page_size == VMW_BALLOON_4K_PAGE) { + *n_pages = balloon_page_list_dequeue(&b->b_dev_info, pages, + n_req_pages); + return; + } + + /* 2MB pages */ + spin_lock_irqsave(&b->b_dev_info.pages_lock, flags); + list_for_each_entry_safe(page, tmp, &b->huge_pages, lru) { + vmballoon_mark_page_online(page, VMW_BALLOON_2M_PAGE); + + list_move(&page->lru, pages); + if (++i == n_req_pages) + break; + } + + __count_vm_events(BALLOON_DEFLATE, + i * vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE)); + spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags); + *n_pages = i; +} + +/** + * vmballoon_split_refused_pages() - Split the 2MB refused pages to 4k. + * + * If inflation of 2MB pages was denied by the hypervisor, it is likely to be + * due to one or few 4KB pages. These 2MB pages may keep being allocated and + * then being refused. To prevent this case, this function splits the refused + * pages into 4KB pages and adds them into @prealloc_pages list. + * + * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation. + */ +static void vmballoon_split_refused_pages(struct vmballoon_ctl *ctl) +{ + struct page *page, *tmp; + unsigned int i, order; + + order = vmballoon_page_order(ctl->page_size); + + list_for_each_entry_safe(page, tmp, &ctl->refused_pages, lru) { + list_del(&page->lru); + split_page(page, order); + for (i = 0; i < (1 << order); i++) + list_add(&page[i].lru, &ctl->prealloc_pages); + } + ctl->n_refused_pages = 0; +} + +/** + * vmballoon_inflate() - Inflate the balloon towards its target size. + * + * @b: pointer to the balloon. + */ +static void vmballoon_inflate(struct vmballoon *b) +{ + int64_t to_inflate_frames; + struct vmballoon_ctl ctl = { + .pages = LIST_HEAD_INIT(ctl.pages), + .refused_pages = LIST_HEAD_INIT(ctl.refused_pages), + .prealloc_pages = LIST_HEAD_INIT(ctl.prealloc_pages), + .page_size = b->max_page_size, + .op = VMW_BALLOON_INFLATE + }; + + while ((to_inflate_frames = vmballoon_change(b)) > 0) { + unsigned int to_inflate_pages, page_in_frames; + int alloc_error, lock_error = 0; + + VM_BUG_ON(!list_empty(&ctl.pages)); + VM_BUG_ON(ctl.n_pages != 0); + + page_in_frames = vmballoon_page_in_frames(ctl.page_size); + + to_inflate_pages = min_t(unsigned long, b->batch_max_pages, + DIV_ROUND_UP_ULL(to_inflate_frames, + page_in_frames)); + + /* Start by allocating */ + alloc_error = vmballoon_alloc_page_list(b, &ctl, + to_inflate_pages); + + /* Actually lock the pages by telling the hypervisor */ + lock_error = vmballoon_lock(b, &ctl); + + /* + * If an error indicates that something serious went wrong, + * stop the inflation. + */ + if (lock_error) + break; + + /* Update the balloon size */ + atomic64_add(ctl.n_pages * page_in_frames, &b->size); + + vmballoon_enqueue_page_list(b, &ctl.pages, &ctl.n_pages, + ctl.page_size); + + /* + * If allocation failed or the number of refused pages exceeds + * the maximum allowed, move to the next page size. + */ + if (alloc_error || + ctl.n_refused_pages >= VMW_BALLOON_MAX_REFUSED) { + if (ctl.page_size == VMW_BALLOON_4K_PAGE) + break; + + /* + * Split the refused pages to 4k. This will also empty + * the refused pages list. + */ + vmballoon_split_refused_pages(&ctl); + ctl.page_size--; + } + + cond_resched(); + } + + /* + * Release pages that were allocated while attempting to inflate the + * balloon but were refused by the host for one reason or another, + * and update the statistics. + */ + if (ctl.n_refused_pages != 0) + vmballoon_release_refused_pages(b, &ctl); + + vmballoon_release_page_list(&ctl.prealloc_pages, NULL, ctl.page_size); +} + +/** + * vmballoon_deflate() - Decrease the size of the balloon. + * + * @b: pointer to the balloon + * @n_frames: the number of frames to deflate. If zero, automatically + * calculated according to the target size. + * @coordinated: whether to coordinate with the host + * + * Decrease the size of the balloon allowing guest to use more memory. + * + * Return: The number of deflated frames (i.e., basic page size units) + */ +static unsigned long vmballoon_deflate(struct vmballoon *b, uint64_t n_frames, + bool coordinated) +{ + unsigned long deflated_frames = 0; + unsigned long tried_frames = 0; + struct vmballoon_ctl ctl = { + .pages = LIST_HEAD_INIT(ctl.pages), + .refused_pages = LIST_HEAD_INIT(ctl.refused_pages), + .page_size = VMW_BALLOON_4K_PAGE, + .op = VMW_BALLOON_DEFLATE + }; + + /* free pages to reach target */ + while (true) { + unsigned int to_deflate_pages, n_unlocked_frames; + unsigned int page_in_frames; + int64_t to_deflate_frames; + bool deflated_all; + + page_in_frames = vmballoon_page_in_frames(ctl.page_size); + + VM_BUG_ON(!list_empty(&ctl.pages)); + VM_BUG_ON(ctl.n_pages); + VM_BUG_ON(!list_empty(&ctl.refused_pages)); + VM_BUG_ON(ctl.n_refused_pages); + + /* + * If we were requested a specific number of frames, we try to + * deflate this number of frames. Otherwise, deflation is + * performed according to the target and balloon size. + */ + to_deflate_frames = n_frames ? n_frames - tried_frames : + -vmballoon_change(b); + + /* break if no work to do */ + if (to_deflate_frames <= 0) + break; + + /* + * Calculate the number of frames based on current page size, + * but limit the deflated frames to a single chunk + */ + to_deflate_pages = min_t(unsigned long, b->batch_max_pages, + DIV_ROUND_UP_ULL(to_deflate_frames, + page_in_frames)); + + /* First take the pages from the balloon pages. */ + vmballoon_dequeue_page_list(b, &ctl.pages, &ctl.n_pages, + ctl.page_size, to_deflate_pages); + + /* + * Before pages are moving to the refused list, count their + * frames as frames that we tried to deflate. + */ + tried_frames += ctl.n_pages * page_in_frames; + + /* + * Unlock the pages by communicating with the hypervisor if the + * communication is coordinated (i.e., not pop). We ignore the + * return code. Instead we check if all the pages we manage to + * unlock all the pages. If we failed, we will move to the next + * page size, and would eventually try again later. + */ + if (coordinated) + vmballoon_lock(b, &ctl); + + /* + * Check if we deflated enough. We will move to the next page + * size if we did not manage to do so. This calculation takes + * place now, as once the pages are released, the number of + * pages is zeroed. + */ + deflated_all = (ctl.n_pages == to_deflate_pages); + + /* Update local and global counters */ + n_unlocked_frames = ctl.n_pages * page_in_frames; + atomic64_sub(n_unlocked_frames, &b->size); + deflated_frames += n_unlocked_frames; + + vmballoon_stats_page_add(b, VMW_BALLOON_PAGE_STAT_FREE, + ctl.page_size, ctl.n_pages); + + /* free the ballooned pages */ + vmballoon_release_page_list(&ctl.pages, &ctl.n_pages, + ctl.page_size); + + /* Return the refused pages to the ballooned list. */ + vmballoon_enqueue_page_list(b, &ctl.refused_pages, + &ctl.n_refused_pages, + ctl.page_size); + + /* If we failed to unlock all the pages, move to next size. */ + if (!deflated_all) { + if (ctl.page_size == b->max_page_size) + break; + ctl.page_size++; + } + + cond_resched(); + } + + return deflated_frames; +} + +/** + * vmballoon_deinit_batching - disables batching mode. + * + * @b: pointer to &struct vmballoon. + * + * Disables batching, by deallocating the page for communication with the + * hypervisor and disabling the static key to indicate that batching is off. + */ +static void vmballoon_deinit_batching(struct vmballoon *b) +{ + free_page((unsigned long)b->batch_page); + b->batch_page = NULL; + static_branch_disable(&vmw_balloon_batching); + b->batch_max_pages = 1; +} + +/** + * vmballoon_init_batching - enable batching mode. + * + * @b: pointer to &struct vmballoon. + * + * Enables batching, by allocating a page for communication with the hypervisor + * and enabling the static_key to use batching. + * + * Return: zero on success or an appropriate error-code. + */ +static int vmballoon_init_batching(struct vmballoon *b) +{ + struct page *page; + + page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!page) + return -ENOMEM; + + b->batch_page = page_address(page); + b->batch_max_pages = PAGE_SIZE / sizeof(struct vmballoon_batch_entry); + + static_branch_enable(&vmw_balloon_batching); + + return 0; +} + +/* + * Receive notification and resize balloon + */ +static void vmballoon_doorbell(void *client_data) +{ + struct vmballoon *b = client_data; + + vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_DOORBELL); + + mod_delayed_work(system_freezable_wq, &b->dwork, 0); +} + +/* + * Clean up vmci doorbell + */ +static void vmballoon_vmci_cleanup(struct vmballoon *b) +{ + vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET, + VMCI_INVALID_ID, VMCI_INVALID_ID); + + if (!vmci_handle_is_invalid(b->vmci_doorbell)) { + vmci_doorbell_destroy(b->vmci_doorbell); + b->vmci_doorbell = VMCI_INVALID_HANDLE; + } +} + +/** + * vmballoon_vmci_init - Initialize vmci doorbell. + * + * @b: pointer to the balloon. + * + * Return: zero on success or when wakeup command not supported. Error-code + * otherwise. + * + * Initialize vmci doorbell, to get notified as soon as balloon changes. + */ +static int vmballoon_vmci_init(struct vmballoon *b) +{ + unsigned long error; + + if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0) + return 0; + + error = vmci_doorbell_create(&b->vmci_doorbell, VMCI_FLAG_DELAYED_CB, + VMCI_PRIVILEGE_FLAG_RESTRICTED, + vmballoon_doorbell, b); + + if (error != VMCI_SUCCESS) + goto fail; + + error = __vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET, + b->vmci_doorbell.context, + b->vmci_doorbell.resource, NULL); + + if (error != VMW_BALLOON_SUCCESS) + goto fail; + + return 0; +fail: + vmballoon_vmci_cleanup(b); + return -EIO; +} + +/** + * vmballoon_pop - Quickly release all pages allocate for the balloon. + * + * @b: pointer to the balloon. + * + * This function is called when host decides to "reset" balloon for one reason + * or another. Unlike normal "deflate" we do not (shall not) notify host of the + * pages being released. + */ +static void vmballoon_pop(struct vmballoon *b) +{ + unsigned long size; + + while ((size = atomic64_read(&b->size))) + vmballoon_deflate(b, size, false); +} + +/* + * Perform standard reset sequence by popping the balloon (in case it + * is not empty) and then restarting protocol. This operation normally + * happens when host responds with VMW_BALLOON_ERROR_RESET to a command. + */ +static void vmballoon_reset(struct vmballoon *b) +{ + int error; + + down_write(&b->conf_sem); + + vmballoon_vmci_cleanup(b); + + /* free all pages, skipping monitor unlock */ + vmballoon_pop(b); + + if (vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES)) + goto unlock; + + if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) { + if (vmballoon_init_batching(b)) { + /* + * We failed to initialize batching, inform the monitor + * about it by sending a null capability. + * + * The guest will retry in one second. + */ + vmballoon_send_start(b, 0); + goto unlock; + } + } else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) { + vmballoon_deinit_batching(b); + } + + vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_RESET); + b->reset_required = false; + + error = vmballoon_vmci_init(b); + if (error) + pr_err("failed to initialize vmci doorbell\n"); + + if (vmballoon_send_guest_id(b)) + pr_err("failed to send guest ID to the host\n"); + +unlock: + up_write(&b->conf_sem); +} + +/** + * vmballoon_work - periodic balloon worker for reset, inflation and deflation. + * + * @work: pointer to the &work_struct which is provided by the workqueue. + * + * Resets the protocol if needed, gets the new size and adjusts balloon as + * needed. Repeat in 1 sec. + */ +static void vmballoon_work(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct vmballoon *b = container_of(dwork, struct vmballoon, dwork); + int64_t change = 0; + + if (b->reset_required) + vmballoon_reset(b); + + down_read(&b->conf_sem); + + /* + * Update the stats while holding the semaphore to ensure that + * @stats_enabled is consistent with whether the stats are actually + * enabled + */ + vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_TIMER); + + if (!vmballoon_send_get_target(b)) + change = vmballoon_change(b); + + if (change != 0) { + pr_debug("%s - size: %llu, target %lu\n", __func__, + atomic64_read(&b->size), READ_ONCE(b->target)); + + if (change > 0) + vmballoon_inflate(b); + else /* (change < 0) */ + vmballoon_deflate(b, 0, true); + } + + up_read(&b->conf_sem); + + /* + * We are using a freezable workqueue so that balloon operations are + * stopped while the system transitions to/from sleep/hibernation. + */ + queue_delayed_work(system_freezable_wq, + dwork, round_jiffies_relative(HZ)); + +} + +/** + * vmballoon_shrinker_scan() - deflate the balloon due to memory pressure. + * @shrinker: pointer to the balloon shrinker. + * @sc: page reclaim information. + * + * Returns: number of pages that were freed during deflation. + */ +static unsigned long vmballoon_shrinker_scan(struct shrinker *shrinker, + struct shrink_control *sc) +{ + struct vmballoon *b = &balloon; + unsigned long deflated_frames; + + pr_debug("%s - size: %llu", __func__, atomic64_read(&b->size)); + + vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_SHRINK); + + /* + * If the lock is also contended for read, we cannot easily reclaim and + * we bail out. + */ + if (!down_read_trylock(&b->conf_sem)) + return 0; + + deflated_frames = vmballoon_deflate(b, sc->nr_to_scan, true); + + vmballoon_stats_gen_add(b, VMW_BALLOON_STAT_SHRINK_FREE, + deflated_frames); + + /* + * Delay future inflation for some time to mitigate the situations in + * which balloon continuously grows and shrinks. Use WRITE_ONCE() since + * the access is asynchronous. + */ + WRITE_ONCE(b->shrink_timeout, jiffies + HZ * VMBALLOON_SHRINK_DELAY); + + up_read(&b->conf_sem); + + return deflated_frames; +} + +/** + * vmballoon_shrinker_count() - return the number of ballooned pages. + * @shrinker: pointer to the balloon shrinker. + * @sc: page reclaim information. + * + * Returns: number of 4k pages that are allocated for the balloon and can + * therefore be reclaimed under pressure. + */ +static unsigned long vmballoon_shrinker_count(struct shrinker *shrinker, + struct shrink_control *sc) +{ + struct vmballoon *b = &balloon; + + return atomic64_read(&b->size); +} + +static void vmballoon_unregister_shrinker(struct vmballoon *b) +{ + if (b->shrinker_registered) + unregister_shrinker(&b->shrinker); + b->shrinker_registered = false; +} + +static int vmballoon_register_shrinker(struct vmballoon *b) +{ + int r; + + /* Do nothing if the shrinker is not enabled */ + if (!vmwballoon_shrinker_enable) + return 0; + + b->shrinker.scan_objects = vmballoon_shrinker_scan; + b->shrinker.count_objects = vmballoon_shrinker_count; + b->shrinker.seeks = DEFAULT_SEEKS; + + r = register_shrinker(&b->shrinker); + + if (r == 0) + b->shrinker_registered = true; + + return r; +} + +/* + * DEBUGFS Interface + */ +#ifdef CONFIG_DEBUG_FS + +static const char * const vmballoon_stat_page_names[] = { + [VMW_BALLOON_PAGE_STAT_ALLOC] = "alloc", + [VMW_BALLOON_PAGE_STAT_ALLOC_FAIL] = "allocFail", + [VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC] = "errAlloc", + [VMW_BALLOON_PAGE_STAT_REFUSED_FREE] = "errFree", + [VMW_BALLOON_PAGE_STAT_FREE] = "free" +}; + +static const char * const vmballoon_stat_names[] = { + [VMW_BALLOON_STAT_TIMER] = "timer", + [VMW_BALLOON_STAT_DOORBELL] = "doorbell", + [VMW_BALLOON_STAT_RESET] = "reset", + [VMW_BALLOON_STAT_SHRINK] = "shrink", + [VMW_BALLOON_STAT_SHRINK_FREE] = "shrinkFree" +}; + +static int vmballoon_enable_stats(struct vmballoon *b) +{ + int r = 0; + + down_write(&b->conf_sem); + + /* did we somehow race with another reader which enabled stats? */ + if (b->stats) + goto out; + + b->stats = kzalloc(sizeof(*b->stats), GFP_KERNEL); + + if (!b->stats) { + /* allocation failed */ + r = -ENOMEM; + goto out; + } + static_key_enable(&balloon_stat_enabled.key); +out: + up_write(&b->conf_sem); + return r; +} + +/** + * vmballoon_debug_show - shows statistics of balloon operations. + * @f: pointer to the &struct seq_file. + * @offset: ignored. + * + * Provides the statistics that can be accessed in vmmemctl in the debugfs. + * To avoid the overhead - mainly that of memory - of collecting the statistics, + * we only collect statistics after the first time the counters are read. + * + * Return: zero on success or an error code. + */ +static int vmballoon_debug_show(struct seq_file *f, void *offset) +{ + struct vmballoon *b = f->private; + int i, j; + + /* enables stats if they are disabled */ + if (!b->stats) { + int r = vmballoon_enable_stats(b); + + if (r) + return r; + } + + /* format capabilities info */ + seq_printf(f, "%-22s: %#16x\n", "balloon capabilities", + VMW_BALLOON_CAPABILITIES); + seq_printf(f, "%-22s: %#16lx\n", "used capabilities", b->capabilities); + seq_printf(f, "%-22s: %16s\n", "is resetting", + b->reset_required ? "y" : "n"); + + /* format size info */ + seq_printf(f, "%-22s: %16lu\n", "target", READ_ONCE(b->target)); + seq_printf(f, "%-22s: %16llu\n", "current", atomic64_read(&b->size)); + + for (i = 0; i < VMW_BALLOON_CMD_NUM; i++) { + if (vmballoon_cmd_names[i] == NULL) + continue; + + seq_printf(f, "%-22s: %16llu (%llu failed)\n", + vmballoon_cmd_names[i], + atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_STAT]), + atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_FAIL_STAT])); + } + + for (i = 0; i < VMW_BALLOON_STAT_NUM; i++) + seq_printf(f, "%-22s: %16llu\n", + vmballoon_stat_names[i], + atomic64_read(&b->stats->general_stat[i])); + + for (i = 0; i < VMW_BALLOON_PAGE_STAT_NUM; i++) { + for (j = 0; j < VMW_BALLOON_NUM_PAGE_SIZES; j++) + seq_printf(f, "%-18s(%s): %16llu\n", + vmballoon_stat_page_names[i], + vmballoon_page_size_names[j], + atomic64_read(&b->stats->page_stat[i][j])); + } + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(vmballoon_debug); + +static void __init vmballoon_debugfs_init(struct vmballoon *b) +{ + b->dbg_entry = debugfs_create_file("vmmemctl", S_IRUGO, NULL, b, + &vmballoon_debug_fops); +} + +static void __exit vmballoon_debugfs_exit(struct vmballoon *b) +{ + static_key_disable(&balloon_stat_enabled.key); + debugfs_remove(b->dbg_entry); + kfree(b->stats); + b->stats = NULL; +} + +#else + +static inline void vmballoon_debugfs_init(struct vmballoon *b) +{ +} + +static inline void vmballoon_debugfs_exit(struct vmballoon *b) +{ +} + +#endif /* CONFIG_DEBUG_FS */ + + +#ifdef CONFIG_BALLOON_COMPACTION + +static int vmballoon_init_fs_context(struct fs_context *fc) +{ + return init_pseudo(fc, BALLOON_VMW_MAGIC) ? 0 : -ENOMEM; +} + +static struct file_system_type vmballoon_fs = { + .name = "balloon-vmware", + .init_fs_context = vmballoon_init_fs_context, + .kill_sb = kill_anon_super, +}; + +static struct vfsmount *vmballoon_mnt; + +/** + * vmballoon_migratepage() - migrates a balloon page. + * @b_dev_info: balloon device information descriptor. + * @newpage: the page to which @page should be migrated. + * @page: a ballooned page that should be migrated. + * @mode: migration mode, ignored. + * + * This function is really open-coded, but that is according to the interface + * that balloon_compaction provides. + * + * Return: zero on success, -EAGAIN when migration cannot be performed + * momentarily, and -EBUSY if migration failed and should be retried + * with that specific page. + */ +static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info, + struct page *newpage, struct page *page, + enum migrate_mode mode) +{ + unsigned long status, flags; + struct vmballoon *b; + int ret; + + b = container_of(b_dev_info, struct vmballoon, b_dev_info); + + /* + * If the semaphore is taken, there is ongoing configuration change + * (i.e., balloon reset), so try again. + */ + if (!down_read_trylock(&b->conf_sem)) + return -EAGAIN; + + spin_lock(&b->comm_lock); + /* + * We must start by deflating and not inflating, as otherwise the + * hypervisor may tell us that it has enough memory and the new page is + * not needed. Since the old page is isolated, we cannot use the list + * interface to unlock it, as the LRU field is used for isolation. + * Instead, we use the native interface directly. + */ + vmballoon_add_page(b, 0, page); + status = vmballoon_lock_op(b, 1, VMW_BALLOON_4K_PAGE, + VMW_BALLOON_DEFLATE); + + if (status == VMW_BALLOON_SUCCESS) + status = vmballoon_status_page(b, 0, &page); + + /* + * If a failure happened, let the migration mechanism know that it + * should not retry. + */ + if (status != VMW_BALLOON_SUCCESS) { + spin_unlock(&b->comm_lock); + ret = -EBUSY; + goto out_unlock; + } + + /* + * The page is isolated, so it is safe to delete it without holding + * @pages_lock . We keep holding @comm_lock since we will need it in a + * second. + */ + balloon_page_delete(page); + + put_page(page); + + /* Inflate */ + vmballoon_add_page(b, 0, newpage); + status = vmballoon_lock_op(b, 1, VMW_BALLOON_4K_PAGE, + VMW_BALLOON_INFLATE); + + if (status == VMW_BALLOON_SUCCESS) + status = vmballoon_status_page(b, 0, &newpage); + + spin_unlock(&b->comm_lock); + + if (status != VMW_BALLOON_SUCCESS) { + /* + * A failure happened. While we can deflate the page we just + * inflated, this deflation can also encounter an error. Instead + * we will decrease the size of the balloon to reflect the + * change and report failure. + */ + atomic64_dec(&b->size); + ret = -EBUSY; + } else { + /* + * Success. Take a reference for the page, and we will add it to + * the list after acquiring the lock. + */ + get_page(newpage); + ret = MIGRATEPAGE_SUCCESS; + } + + /* Update the balloon list under the @pages_lock */ + spin_lock_irqsave(&b->b_dev_info.pages_lock, flags); + + /* + * On inflation success, we already took a reference for the @newpage. + * If we succeed just insert it to the list and update the statistics + * under the lock. + */ + if (ret == MIGRATEPAGE_SUCCESS) { + balloon_page_insert(&b->b_dev_info, newpage); + __count_vm_event(BALLOON_MIGRATE); + } + + /* + * We deflated successfully, so regardless to the inflation success, we + * need to reduce the number of isolated_pages. + */ + b->b_dev_info.isolated_pages--; + spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags); + +out_unlock: + up_read(&b->conf_sem); + return ret; +} + +/** + * vmballoon_compaction_deinit() - removes compaction related data. + * + * @b: pointer to the balloon. + */ +static void vmballoon_compaction_deinit(struct vmballoon *b) +{ + if (!IS_ERR(b->b_dev_info.inode)) + iput(b->b_dev_info.inode); + + b->b_dev_info.inode = NULL; + kern_unmount(vmballoon_mnt); + vmballoon_mnt = NULL; +} + +/** + * vmballoon_compaction_init() - initialized compaction for the balloon. + * + * @b: pointer to the balloon. + * + * If during the initialization a failure occurred, this function does not + * perform cleanup. The caller must call vmballoon_compaction_deinit() in this + * case. + * + * Return: zero on success or error code on failure. + */ +static __init int vmballoon_compaction_init(struct vmballoon *b) +{ + vmballoon_mnt = kern_mount(&vmballoon_fs); + if (IS_ERR(vmballoon_mnt)) + return PTR_ERR(vmballoon_mnt); + + b->b_dev_info.migratepage = vmballoon_migratepage; + b->b_dev_info.inode = alloc_anon_inode(vmballoon_mnt->mnt_sb); + + if (IS_ERR(b->b_dev_info.inode)) + return PTR_ERR(b->b_dev_info.inode); + + b->b_dev_info.inode->i_mapping->a_ops = &balloon_aops; + return 0; +} + +#else /* CONFIG_BALLOON_COMPACTION */ + +static void vmballoon_compaction_deinit(struct vmballoon *b) +{ +} + +static int vmballoon_compaction_init(struct vmballoon *b) +{ + return 0; +} + +#endif /* CONFIG_BALLOON_COMPACTION */ + +static int __init vmballoon_init(void) +{ + int error; + + /* + * Check if we are running on VMware's hypervisor and bail out + * if we are not. + */ + if (x86_hyper_type != X86_HYPER_VMWARE) + return -ENODEV; + + INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work); + + error = vmballoon_register_shrinker(&balloon); + if (error) + goto fail; + + /* + * Initialization of compaction must be done after the call to + * balloon_devinfo_init() . + */ + balloon_devinfo_init(&balloon.b_dev_info); + error = vmballoon_compaction_init(&balloon); + if (error) + goto fail; + + INIT_LIST_HEAD(&balloon.huge_pages); + spin_lock_init(&balloon.comm_lock); + init_rwsem(&balloon.conf_sem); + balloon.vmci_doorbell = VMCI_INVALID_HANDLE; + balloon.batch_page = NULL; + balloon.page = NULL; + balloon.reset_required = true; + + queue_delayed_work(system_freezable_wq, &balloon.dwork, 0); + + vmballoon_debugfs_init(&balloon); + + return 0; +fail: + vmballoon_unregister_shrinker(&balloon); + vmballoon_compaction_deinit(&balloon); + return error; +} + +/* + * Using late_initcall() instead of module_init() allows the balloon to use the + * VMCI doorbell even when the balloon is built into the kernel. Otherwise the + * VMCI is probed only after the balloon is initialized. If the balloon is used + * as a module, late_initcall() is equivalent to module_init(). + */ +late_initcall(vmballoon_init); + +static void __exit vmballoon_exit(void) +{ + vmballoon_unregister_shrinker(&balloon); + vmballoon_vmci_cleanup(&balloon); + cancel_delayed_work_sync(&balloon.dwork); + + vmballoon_debugfs_exit(&balloon); + + /* + * Deallocate all reserved memory, and reset connection with monitor. + * Reset connection before deallocating memory to avoid potential for + * additional spurious resets from guest touching deallocated pages. + */ + vmballoon_send_start(&balloon, 0); + vmballoon_pop(&balloon); + + /* Only once we popped the balloon, compaction can be deinit */ + vmballoon_compaction_deinit(&balloon); +} +module_exit(vmballoon_exit); diff --git a/drivers/misc/vmw_vmci/Kconfig b/drivers/misc/vmw_vmci/Kconfig new file mode 100644 index 000000000..605794aad --- /dev/null +++ b/drivers/misc/vmw_vmci/Kconfig @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# VMware VMCI device +# + +config VMWARE_VMCI + tristate "VMware VMCI Driver" + depends on X86 && PCI + help + This is VMware's Virtual Machine Communication Interface. It enables + high-speed communication between host and guest in a virtual + environment via the VMCI virtual device. + + If unsure, say N. + + To compile this driver as a module, choose M here: the + module will be called vmw_vmci. diff --git a/drivers/misc/vmw_vmci/Makefile b/drivers/misc/vmw_vmci/Makefile new file mode 100644 index 000000000..475fa31a9 --- /dev/null +++ b/drivers/misc/vmw_vmci/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci.o +vmw_vmci-y += vmci_context.o vmci_datagram.o vmci_doorbell.o \ + vmci_driver.o vmci_event.o vmci_guest.o vmci_handle_array.o \ + vmci_host.o vmci_queue_pair.o vmci_resource.o vmci_route.o diff --git a/drivers/misc/vmw_vmci/vmci_context.c b/drivers/misc/vmw_vmci/vmci_context.c new file mode 100644 index 000000000..26ff49fdf --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_context.c @@ -0,0 +1,1217 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + */ + +#include <linux/vmw_vmci_defs.h> +#include <linux/vmw_vmci_api.h> +#include <linux/highmem.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/cred.h> +#include <linux/slab.h> + +#include "vmci_queue_pair.h" +#include "vmci_datagram.h" +#include "vmci_doorbell.h" +#include "vmci_context.h" +#include "vmci_driver.h" +#include "vmci_event.h" + +/* Use a wide upper bound for the maximum contexts. */ +#define VMCI_MAX_CONTEXTS 2000 + +/* + * List of current VMCI contexts. Contexts can be added by + * vmci_ctx_create() and removed via vmci_ctx_destroy(). + * These, along with context lookup, are protected by the + * list structure's lock. + */ +static struct { + struct list_head head; + spinlock_t lock; /* Spinlock for context list operations */ +} ctx_list = { + .head = LIST_HEAD_INIT(ctx_list.head), + .lock = __SPIN_LOCK_UNLOCKED(ctx_list.lock), +}; + +/* Used by contexts that did not set up notify flag pointers */ +static bool ctx_dummy_notify; + +static void ctx_signal_notify(struct vmci_ctx *context) +{ + *context->notify = true; +} + +static void ctx_clear_notify(struct vmci_ctx *context) +{ + *context->notify = false; +} + +/* + * If nothing requires the attention of the guest, clears both + * notify flag and call. + */ +static void ctx_clear_notify_call(struct vmci_ctx *context) +{ + if (context->pending_datagrams == 0 && + vmci_handle_arr_get_size(context->pending_doorbell_array) == 0) + ctx_clear_notify(context); +} + +/* + * Sets the context's notify flag iff datagrams are pending for this + * context. Called from vmci_setup_notify(). + */ +void vmci_ctx_check_signal_notify(struct vmci_ctx *context) +{ + spin_lock(&context->lock); + if (context->pending_datagrams) + ctx_signal_notify(context); + spin_unlock(&context->lock); +} + +/* + * Allocates and initializes a VMCI context. + */ +struct vmci_ctx *vmci_ctx_create(u32 cid, u32 priv_flags, + uintptr_t event_hnd, + int user_version, + const struct cred *cred) +{ + struct vmci_ctx *context; + int error; + + if (cid == VMCI_INVALID_ID) { + pr_devel("Invalid context ID for VMCI context\n"); + error = -EINVAL; + goto err_out; + } + + if (priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS) { + pr_devel("Invalid flag (flags=0x%x) for VMCI context\n", + priv_flags); + error = -EINVAL; + goto err_out; + } + + if (user_version == 0) { + pr_devel("Invalid suer_version %d\n", user_version); + error = -EINVAL; + goto err_out; + } + + context = kzalloc(sizeof(*context), GFP_KERNEL); + if (!context) { + pr_warn("Failed to allocate memory for VMCI context\n"); + error = -EINVAL; + goto err_out; + } + + kref_init(&context->kref); + spin_lock_init(&context->lock); + INIT_LIST_HEAD(&context->list_item); + INIT_LIST_HEAD(&context->datagram_queue); + INIT_LIST_HEAD(&context->notifier_list); + + /* Initialize host-specific VMCI context. */ + init_waitqueue_head(&context->host_context.wait_queue); + + context->queue_pair_array = + vmci_handle_arr_create(0, VMCI_MAX_GUEST_QP_COUNT); + if (!context->queue_pair_array) { + error = -ENOMEM; + goto err_free_ctx; + } + + context->doorbell_array = + vmci_handle_arr_create(0, VMCI_MAX_GUEST_DOORBELL_COUNT); + if (!context->doorbell_array) { + error = -ENOMEM; + goto err_free_qp_array; + } + + context->pending_doorbell_array = + vmci_handle_arr_create(0, VMCI_MAX_GUEST_DOORBELL_COUNT); + if (!context->pending_doorbell_array) { + error = -ENOMEM; + goto err_free_db_array; + } + + context->user_version = user_version; + + context->priv_flags = priv_flags; + + if (cred) + context->cred = get_cred(cred); + + context->notify = &ctx_dummy_notify; + context->notify_page = NULL; + + /* + * If we collide with an existing context we generate a new + * and use it instead. The VMX will determine if regeneration + * is okay. Since there isn't 4B - 16 VMs running on a given + * host, the below loop will terminate. + */ + spin_lock(&ctx_list.lock); + + while (vmci_ctx_exists(cid)) { + /* We reserve the lowest 16 ids for fixed contexts. */ + cid = max(cid, VMCI_RESERVED_CID_LIMIT - 1) + 1; + if (cid == VMCI_INVALID_ID) + cid = VMCI_RESERVED_CID_LIMIT; + } + context->cid = cid; + + list_add_tail_rcu(&context->list_item, &ctx_list.head); + spin_unlock(&ctx_list.lock); + + return context; + + err_free_db_array: + vmci_handle_arr_destroy(context->doorbell_array); + err_free_qp_array: + vmci_handle_arr_destroy(context->queue_pair_array); + err_free_ctx: + kfree(context); + err_out: + return ERR_PTR(error); +} + +/* + * Destroy VMCI context. + */ +void vmci_ctx_destroy(struct vmci_ctx *context) +{ + spin_lock(&ctx_list.lock); + list_del_rcu(&context->list_item); + spin_unlock(&ctx_list.lock); + synchronize_rcu(); + + vmci_ctx_put(context); +} + +/* + * Fire notification for all contexts interested in given cid. + */ +static int ctx_fire_notification(u32 context_id, u32 priv_flags) +{ + u32 i, array_size; + struct vmci_ctx *sub_ctx; + struct vmci_handle_arr *subscriber_array; + struct vmci_handle context_handle = + vmci_make_handle(context_id, VMCI_EVENT_HANDLER); + + /* + * We create an array to hold the subscribers we find when + * scanning through all contexts. + */ + subscriber_array = vmci_handle_arr_create(0, VMCI_MAX_CONTEXTS); + if (subscriber_array == NULL) + return VMCI_ERROR_NO_MEM; + + /* + * Scan all contexts to find who is interested in being + * notified about given contextID. + */ + rcu_read_lock(); + list_for_each_entry_rcu(sub_ctx, &ctx_list.head, list_item) { + struct vmci_handle_list *node; + + /* + * We only deliver notifications of the removal of + * contexts, if the two contexts are allowed to + * interact. + */ + if (vmci_deny_interaction(priv_flags, sub_ctx->priv_flags)) + continue; + + list_for_each_entry_rcu(node, &sub_ctx->notifier_list, node) { + if (!vmci_handle_is_equal(node->handle, context_handle)) + continue; + + vmci_handle_arr_append_entry(&subscriber_array, + vmci_make_handle(sub_ctx->cid, + VMCI_EVENT_HANDLER)); + } + } + rcu_read_unlock(); + + /* Fire event to all subscribers. */ + array_size = vmci_handle_arr_get_size(subscriber_array); + for (i = 0; i < array_size; i++) { + int result; + struct vmci_event_ctx ev; + + ev.msg.hdr.dst = vmci_handle_arr_get_entry(subscriber_array, i); + ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, + VMCI_CONTEXT_RESOURCE_ID); + ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr); + ev.msg.event_data.event = VMCI_EVENT_CTX_REMOVED; + ev.payload.context_id = context_id; + + result = vmci_datagram_dispatch(VMCI_HYPERVISOR_CONTEXT_ID, + &ev.msg.hdr, false); + if (result < VMCI_SUCCESS) { + pr_devel("Failed to enqueue event datagram (type=%d) for context (ID=0x%x)\n", + ev.msg.event_data.event, + ev.msg.hdr.dst.context); + /* We continue to enqueue on next subscriber. */ + } + } + vmci_handle_arr_destroy(subscriber_array); + + return VMCI_SUCCESS; +} + +/* + * Returns the current number of pending datagrams. The call may + * also serve as a synchronization point for the datagram queue, + * as no enqueue operations can occur concurrently. + */ +int vmci_ctx_pending_datagrams(u32 cid, u32 *pending) +{ + struct vmci_ctx *context; + + context = vmci_ctx_get(cid); + if (context == NULL) + return VMCI_ERROR_INVALID_ARGS; + + spin_lock(&context->lock); + if (pending) + *pending = context->pending_datagrams; + spin_unlock(&context->lock); + vmci_ctx_put(context); + + return VMCI_SUCCESS; +} + +/* + * Queues a VMCI datagram for the appropriate target VM context. + */ +int vmci_ctx_enqueue_datagram(u32 cid, struct vmci_datagram *dg) +{ + struct vmci_datagram_queue_entry *dq_entry; + struct vmci_ctx *context; + struct vmci_handle dg_src; + size_t vmci_dg_size; + + vmci_dg_size = VMCI_DG_SIZE(dg); + if (vmci_dg_size > VMCI_MAX_DG_SIZE) { + pr_devel("Datagram too large (bytes=%zu)\n", vmci_dg_size); + return VMCI_ERROR_INVALID_ARGS; + } + + /* Get the target VM's VMCI context. */ + context = vmci_ctx_get(cid); + if (!context) { + pr_devel("Invalid context (ID=0x%x)\n", cid); + return VMCI_ERROR_INVALID_ARGS; + } + + /* Allocate guest call entry and add it to the target VM's queue. */ + dq_entry = kmalloc(sizeof(*dq_entry), GFP_KERNEL); + if (dq_entry == NULL) { + pr_warn("Failed to allocate memory for datagram\n"); + vmci_ctx_put(context); + return VMCI_ERROR_NO_MEM; + } + dq_entry->dg = dg; + dq_entry->dg_size = vmci_dg_size; + dg_src = dg->src; + INIT_LIST_HEAD(&dq_entry->list_item); + + spin_lock(&context->lock); + + /* + * We put a higher limit on datagrams from the hypervisor. If + * the pending datagram is not from hypervisor, then we check + * if enqueueing it would exceed the + * VMCI_MAX_DATAGRAM_QUEUE_SIZE limit on the destination. If + * the pending datagram is from hypervisor, we allow it to be + * queued at the destination side provided we don't reach the + * VMCI_MAX_DATAGRAM_AND_EVENT_QUEUE_SIZE limit. + */ + if (context->datagram_queue_size + vmci_dg_size >= + VMCI_MAX_DATAGRAM_QUEUE_SIZE && + (!vmci_handle_is_equal(dg_src, + vmci_make_handle + (VMCI_HYPERVISOR_CONTEXT_ID, + VMCI_CONTEXT_RESOURCE_ID)) || + context->datagram_queue_size + vmci_dg_size >= + VMCI_MAX_DATAGRAM_AND_EVENT_QUEUE_SIZE)) { + spin_unlock(&context->lock); + vmci_ctx_put(context); + kfree(dq_entry); + pr_devel("Context (ID=0x%x) receive queue is full\n", cid); + return VMCI_ERROR_NO_RESOURCES; + } + + list_add(&dq_entry->list_item, &context->datagram_queue); + context->pending_datagrams++; + context->datagram_queue_size += vmci_dg_size; + ctx_signal_notify(context); + wake_up(&context->host_context.wait_queue); + spin_unlock(&context->lock); + vmci_ctx_put(context); + + return vmci_dg_size; +} + +/* + * Verifies whether a context with the specified context ID exists. + * FIXME: utility is dubious as no decisions can be reliably made + * using this data as context can appear and disappear at any time. + */ +bool vmci_ctx_exists(u32 cid) +{ + struct vmci_ctx *context; + bool exists = false; + + rcu_read_lock(); + + list_for_each_entry_rcu(context, &ctx_list.head, list_item) { + if (context->cid == cid) { + exists = true; + break; + } + } + + rcu_read_unlock(); + return exists; +} + +/* + * Retrieves VMCI context corresponding to the given cid. + */ +struct vmci_ctx *vmci_ctx_get(u32 cid) +{ + struct vmci_ctx *c, *context = NULL; + + if (cid == VMCI_INVALID_ID) + return NULL; + + rcu_read_lock(); + list_for_each_entry_rcu(c, &ctx_list.head, list_item) { + if (c->cid == cid) { + /* + * The context owner drops its own reference to the + * context only after removing it from the list and + * waiting for RCU grace period to expire. This + * means that we are not about to increase the + * reference count of something that is in the + * process of being destroyed. + */ + context = c; + kref_get(&context->kref); + break; + } + } + rcu_read_unlock(); + + return context; +} + +/* + * Deallocates all parts of a context data structure. This + * function doesn't lock the context, because it assumes that + * the caller was holding the last reference to context. + */ +static void ctx_free_ctx(struct kref *kref) +{ + struct vmci_ctx *context = container_of(kref, struct vmci_ctx, kref); + struct vmci_datagram_queue_entry *dq_entry, *dq_entry_tmp; + struct vmci_handle temp_handle; + struct vmci_handle_list *notifier, *tmp; + + /* + * Fire event to all contexts interested in knowing this + * context is dying. + */ + ctx_fire_notification(context->cid, context->priv_flags); + + /* + * Cleanup all queue pair resources attached to context. If + * the VM dies without cleaning up, this code will make sure + * that no resources are leaked. + */ + temp_handle = vmci_handle_arr_get_entry(context->queue_pair_array, 0); + while (!vmci_handle_is_equal(temp_handle, VMCI_INVALID_HANDLE)) { + if (vmci_qp_broker_detach(temp_handle, + context) < VMCI_SUCCESS) { + /* + * When vmci_qp_broker_detach() succeeds it + * removes the handle from the array. If + * detach fails, we must remove the handle + * ourselves. + */ + vmci_handle_arr_remove_entry(context->queue_pair_array, + temp_handle); + } + temp_handle = + vmci_handle_arr_get_entry(context->queue_pair_array, 0); + } + + /* + * It is fine to destroy this without locking the callQueue, as + * this is the only thread having a reference to the context. + */ + list_for_each_entry_safe(dq_entry, dq_entry_tmp, + &context->datagram_queue, list_item) { + WARN_ON(dq_entry->dg_size != VMCI_DG_SIZE(dq_entry->dg)); + list_del(&dq_entry->list_item); + kfree(dq_entry->dg); + kfree(dq_entry); + } + + list_for_each_entry_safe(notifier, tmp, + &context->notifier_list, node) { + list_del(¬ifier->node); + kfree(notifier); + } + + vmci_handle_arr_destroy(context->queue_pair_array); + vmci_handle_arr_destroy(context->doorbell_array); + vmci_handle_arr_destroy(context->pending_doorbell_array); + vmci_ctx_unset_notify(context); + if (context->cred) + put_cred(context->cred); + kfree(context); +} + +/* + * Drops reference to VMCI context. If this is the last reference to + * the context it will be deallocated. A context is created with + * a reference count of one, and on destroy, it is removed from + * the context list before its reference count is decremented. Thus, + * if we reach zero, we are sure that nobody else are about to increment + * it (they need the entry in the context list for that), and so there + * is no need for locking. + */ +void vmci_ctx_put(struct vmci_ctx *context) +{ + kref_put(&context->kref, ctx_free_ctx); +} + +/* + * Dequeues the next datagram and returns it to caller. + * The caller passes in a pointer to the max size datagram + * it can handle and the datagram is only unqueued if the + * size is less than max_size. If larger max_size is set to + * the size of the datagram to give the caller a chance to + * set up a larger buffer for the guestcall. + */ +int vmci_ctx_dequeue_datagram(struct vmci_ctx *context, + size_t *max_size, + struct vmci_datagram **dg) +{ + struct vmci_datagram_queue_entry *dq_entry; + struct list_head *list_item; + int rv; + + /* Dequeue the next datagram entry. */ + spin_lock(&context->lock); + if (context->pending_datagrams == 0) { + ctx_clear_notify_call(context); + spin_unlock(&context->lock); + pr_devel("No datagrams pending\n"); + return VMCI_ERROR_NO_MORE_DATAGRAMS; + } + + list_item = context->datagram_queue.next; + + dq_entry = + list_entry(list_item, struct vmci_datagram_queue_entry, list_item); + + /* Check size of caller's buffer. */ + if (*max_size < dq_entry->dg_size) { + *max_size = dq_entry->dg_size; + spin_unlock(&context->lock); + pr_devel("Caller's buffer should be at least (size=%u bytes)\n", + (u32) *max_size); + return VMCI_ERROR_NO_MEM; + } + + list_del(list_item); + context->pending_datagrams--; + context->datagram_queue_size -= dq_entry->dg_size; + if (context->pending_datagrams == 0) { + ctx_clear_notify_call(context); + rv = VMCI_SUCCESS; + } else { + /* + * Return the size of the next datagram. + */ + struct vmci_datagram_queue_entry *next_entry; + + list_item = context->datagram_queue.next; + next_entry = + list_entry(list_item, struct vmci_datagram_queue_entry, + list_item); + + /* + * The following size_t -> int truncation is fine as + * the maximum size of a (routable) datagram is 68KB. + */ + rv = (int)next_entry->dg_size; + } + spin_unlock(&context->lock); + + /* Caller must free datagram. */ + *dg = dq_entry->dg; + dq_entry->dg = NULL; + kfree(dq_entry); + + return rv; +} + +/* + * Reverts actions set up by vmci_setup_notify(). Unmaps and unlocks the + * page mapped/locked by vmci_setup_notify(). + */ +void vmci_ctx_unset_notify(struct vmci_ctx *context) +{ + struct page *notify_page; + + spin_lock(&context->lock); + + notify_page = context->notify_page; + context->notify = &ctx_dummy_notify; + context->notify_page = NULL; + + spin_unlock(&context->lock); + + if (notify_page) { + kunmap(notify_page); + put_page(notify_page); + } +} + +/* + * Add remote_cid to list of contexts current contexts wants + * notifications from/about. + */ +int vmci_ctx_add_notification(u32 context_id, u32 remote_cid) +{ + struct vmci_ctx *context; + struct vmci_handle_list *notifier, *n; + int result; + bool exists = false; + + context = vmci_ctx_get(context_id); + if (!context) + return VMCI_ERROR_NOT_FOUND; + + if (VMCI_CONTEXT_IS_VM(context_id) && VMCI_CONTEXT_IS_VM(remote_cid)) { + pr_devel("Context removed notifications for other VMs not supported (src=0x%x, remote=0x%x)\n", + context_id, remote_cid); + result = VMCI_ERROR_DST_UNREACHABLE; + goto out; + } + + if (context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED) { + result = VMCI_ERROR_NO_ACCESS; + goto out; + } + + notifier = kmalloc(sizeof(struct vmci_handle_list), GFP_KERNEL); + if (!notifier) { + result = VMCI_ERROR_NO_MEM; + goto out; + } + + INIT_LIST_HEAD(¬ifier->node); + notifier->handle = vmci_make_handle(remote_cid, VMCI_EVENT_HANDLER); + + spin_lock(&context->lock); + + if (context->n_notifiers < VMCI_MAX_CONTEXTS) { + list_for_each_entry(n, &context->notifier_list, node) { + if (vmci_handle_is_equal(n->handle, notifier->handle)) { + exists = true; + break; + } + } + + if (exists) { + kfree(notifier); + result = VMCI_ERROR_ALREADY_EXISTS; + } else { + list_add_tail_rcu(¬ifier->node, + &context->notifier_list); + context->n_notifiers++; + result = VMCI_SUCCESS; + } + } else { + kfree(notifier); + result = VMCI_ERROR_NO_MEM; + } + + spin_unlock(&context->lock); + + out: + vmci_ctx_put(context); + return result; +} + +/* + * Remove remote_cid from current context's list of contexts it is + * interested in getting notifications from/about. + */ +int vmci_ctx_remove_notification(u32 context_id, u32 remote_cid) +{ + struct vmci_ctx *context; + struct vmci_handle_list *notifier, *tmp; + struct vmci_handle handle; + bool found = false; + + context = vmci_ctx_get(context_id); + if (!context) + return VMCI_ERROR_NOT_FOUND; + + handle = vmci_make_handle(remote_cid, VMCI_EVENT_HANDLER); + + spin_lock(&context->lock); + list_for_each_entry_safe(notifier, tmp, + &context->notifier_list, node) { + if (vmci_handle_is_equal(notifier->handle, handle)) { + list_del_rcu(¬ifier->node); + context->n_notifiers--; + found = true; + break; + } + } + spin_unlock(&context->lock); + + if (found) { + synchronize_rcu(); + kfree(notifier); + } + + vmci_ctx_put(context); + + return found ? VMCI_SUCCESS : VMCI_ERROR_NOT_FOUND; +} + +static int vmci_ctx_get_chkpt_notifiers(struct vmci_ctx *context, + u32 *buf_size, void **pbuf) +{ + u32 *notifiers; + size_t data_size; + struct vmci_handle_list *entry; + int i = 0; + + if (context->n_notifiers == 0) { + *buf_size = 0; + *pbuf = NULL; + return VMCI_SUCCESS; + } + + data_size = context->n_notifiers * sizeof(*notifiers); + if (*buf_size < data_size) { + *buf_size = data_size; + return VMCI_ERROR_MORE_DATA; + } + + notifiers = kmalloc(data_size, GFP_ATOMIC); /* FIXME: want GFP_KERNEL */ + if (!notifiers) + return VMCI_ERROR_NO_MEM; + + list_for_each_entry(entry, &context->notifier_list, node) + notifiers[i++] = entry->handle.context; + + *buf_size = data_size; + *pbuf = notifiers; + return VMCI_SUCCESS; +} + +static int vmci_ctx_get_chkpt_doorbells(struct vmci_ctx *context, + u32 *buf_size, void **pbuf) +{ + struct dbell_cpt_state *dbells; + u32 i, n_doorbells; + + n_doorbells = vmci_handle_arr_get_size(context->doorbell_array); + if (n_doorbells > 0) { + size_t data_size = n_doorbells * sizeof(*dbells); + if (*buf_size < data_size) { + *buf_size = data_size; + return VMCI_ERROR_MORE_DATA; + } + + dbells = kzalloc(data_size, GFP_ATOMIC); + if (!dbells) + return VMCI_ERROR_NO_MEM; + + for (i = 0; i < n_doorbells; i++) + dbells[i].handle = vmci_handle_arr_get_entry( + context->doorbell_array, i); + + *buf_size = data_size; + *pbuf = dbells; + } else { + *buf_size = 0; + *pbuf = NULL; + } + + return VMCI_SUCCESS; +} + +/* + * Get current context's checkpoint state of given type. + */ +int vmci_ctx_get_chkpt_state(u32 context_id, + u32 cpt_type, + u32 *buf_size, + void **pbuf) +{ + struct vmci_ctx *context; + int result; + + context = vmci_ctx_get(context_id); + if (!context) + return VMCI_ERROR_NOT_FOUND; + + spin_lock(&context->lock); + + switch (cpt_type) { + case VMCI_NOTIFICATION_CPT_STATE: + result = vmci_ctx_get_chkpt_notifiers(context, buf_size, pbuf); + break; + + case VMCI_WELLKNOWN_CPT_STATE: + /* + * For compatibility with VMX'en with VM to VM communication, we + * always return zero wellknown handles. + */ + + *buf_size = 0; + *pbuf = NULL; + result = VMCI_SUCCESS; + break; + + case VMCI_DOORBELL_CPT_STATE: + result = vmci_ctx_get_chkpt_doorbells(context, buf_size, pbuf); + break; + + default: + pr_devel("Invalid cpt state (type=%d)\n", cpt_type); + result = VMCI_ERROR_INVALID_ARGS; + break; + } + + spin_unlock(&context->lock); + vmci_ctx_put(context); + + return result; +} + +/* + * Set current context's checkpoint state of given type. + */ +int vmci_ctx_set_chkpt_state(u32 context_id, + u32 cpt_type, + u32 buf_size, + void *cpt_buf) +{ + u32 i; + u32 current_id; + int result = VMCI_SUCCESS; + u32 num_ids = buf_size / sizeof(u32); + + if (cpt_type == VMCI_WELLKNOWN_CPT_STATE && num_ids > 0) { + /* + * We would end up here if VMX with VM to VM communication + * attempts to restore a checkpoint with wellknown handles. + */ + pr_warn("Attempt to restore checkpoint with obsolete wellknown handles\n"); + return VMCI_ERROR_OBSOLETE; + } + + if (cpt_type != VMCI_NOTIFICATION_CPT_STATE) { + pr_devel("Invalid cpt state (type=%d)\n", cpt_type); + return VMCI_ERROR_INVALID_ARGS; + } + + for (i = 0; i < num_ids && result == VMCI_SUCCESS; i++) { + current_id = ((u32 *)cpt_buf)[i]; + result = vmci_ctx_add_notification(context_id, current_id); + if (result != VMCI_SUCCESS) + break; + } + if (result != VMCI_SUCCESS) + pr_devel("Failed to set cpt state (type=%d) (error=%d)\n", + cpt_type, result); + + return result; +} + +/* + * Retrieves the specified context's pending notifications in the + * form of a handle array. The handle arrays returned are the + * actual data - not a copy and should not be modified by the + * caller. They must be released using + * vmci_ctx_rcv_notifications_release. + */ +int vmci_ctx_rcv_notifications_get(u32 context_id, + struct vmci_handle_arr **db_handle_array, + struct vmci_handle_arr **qp_handle_array) +{ + struct vmci_ctx *context; + int result = VMCI_SUCCESS; + + context = vmci_ctx_get(context_id); + if (context == NULL) + return VMCI_ERROR_NOT_FOUND; + + spin_lock(&context->lock); + + *db_handle_array = context->pending_doorbell_array; + context->pending_doorbell_array = + vmci_handle_arr_create(0, VMCI_MAX_GUEST_DOORBELL_COUNT); + if (!context->pending_doorbell_array) { + context->pending_doorbell_array = *db_handle_array; + *db_handle_array = NULL; + result = VMCI_ERROR_NO_MEM; + } + *qp_handle_array = NULL; + + spin_unlock(&context->lock); + vmci_ctx_put(context); + + return result; +} + +/* + * Releases handle arrays with pending notifications previously + * retrieved using vmci_ctx_rcv_notifications_get. If the + * notifications were not successfully handed over to the guest, + * success must be false. + */ +void vmci_ctx_rcv_notifications_release(u32 context_id, + struct vmci_handle_arr *db_handle_array, + struct vmci_handle_arr *qp_handle_array, + bool success) +{ + struct vmci_ctx *context = vmci_ctx_get(context_id); + + spin_lock(&context->lock); + if (!success) { + struct vmci_handle handle; + + /* + * New notifications may have been added while we were not + * holding the context lock, so we transfer any new pending + * doorbell notifications to the old array, and reinstate the + * old array. + */ + + handle = vmci_handle_arr_remove_tail( + context->pending_doorbell_array); + while (!vmci_handle_is_invalid(handle)) { + if (!vmci_handle_arr_has_entry(db_handle_array, + handle)) { + vmci_handle_arr_append_entry( + &db_handle_array, handle); + } + handle = vmci_handle_arr_remove_tail( + context->pending_doorbell_array); + } + vmci_handle_arr_destroy(context->pending_doorbell_array); + context->pending_doorbell_array = db_handle_array; + db_handle_array = NULL; + } else { + ctx_clear_notify_call(context); + } + spin_unlock(&context->lock); + vmci_ctx_put(context); + + if (db_handle_array) + vmci_handle_arr_destroy(db_handle_array); + + if (qp_handle_array) + vmci_handle_arr_destroy(qp_handle_array); +} + +/* + * Registers that a new doorbell handle has been allocated by the + * context. Only doorbell handles registered can be notified. + */ +int vmci_ctx_dbell_create(u32 context_id, struct vmci_handle handle) +{ + struct vmci_ctx *context; + int result; + + if (context_id == VMCI_INVALID_ID || vmci_handle_is_invalid(handle)) + return VMCI_ERROR_INVALID_ARGS; + + context = vmci_ctx_get(context_id); + if (context == NULL) + return VMCI_ERROR_NOT_FOUND; + + spin_lock(&context->lock); + if (!vmci_handle_arr_has_entry(context->doorbell_array, handle)) + result = vmci_handle_arr_append_entry(&context->doorbell_array, + handle); + else + result = VMCI_ERROR_DUPLICATE_ENTRY; + + spin_unlock(&context->lock); + vmci_ctx_put(context); + + return result; +} + +/* + * Unregisters a doorbell handle that was previously registered + * with vmci_ctx_dbell_create. + */ +int vmci_ctx_dbell_destroy(u32 context_id, struct vmci_handle handle) +{ + struct vmci_ctx *context; + struct vmci_handle removed_handle; + + if (context_id == VMCI_INVALID_ID || vmci_handle_is_invalid(handle)) + return VMCI_ERROR_INVALID_ARGS; + + context = vmci_ctx_get(context_id); + if (context == NULL) + return VMCI_ERROR_NOT_FOUND; + + spin_lock(&context->lock); + removed_handle = + vmci_handle_arr_remove_entry(context->doorbell_array, handle); + vmci_handle_arr_remove_entry(context->pending_doorbell_array, handle); + spin_unlock(&context->lock); + + vmci_ctx_put(context); + + return vmci_handle_is_invalid(removed_handle) ? + VMCI_ERROR_NOT_FOUND : VMCI_SUCCESS; +} + +/* + * Unregisters all doorbell handles that were previously + * registered with vmci_ctx_dbell_create. + */ +int vmci_ctx_dbell_destroy_all(u32 context_id) +{ + struct vmci_ctx *context; + struct vmci_handle handle; + + if (context_id == VMCI_INVALID_ID) + return VMCI_ERROR_INVALID_ARGS; + + context = vmci_ctx_get(context_id); + if (context == NULL) + return VMCI_ERROR_NOT_FOUND; + + spin_lock(&context->lock); + do { + struct vmci_handle_arr *arr = context->doorbell_array; + handle = vmci_handle_arr_remove_tail(arr); + } while (!vmci_handle_is_invalid(handle)); + do { + struct vmci_handle_arr *arr = context->pending_doorbell_array; + handle = vmci_handle_arr_remove_tail(arr); + } while (!vmci_handle_is_invalid(handle)); + spin_unlock(&context->lock); + + vmci_ctx_put(context); + + return VMCI_SUCCESS; +} + +/* + * Registers a notification of a doorbell handle initiated by the + * specified source context. The notification of doorbells are + * subject to the same isolation rules as datagram delivery. To + * allow host side senders of notifications a finer granularity + * of sender rights than those assigned to the sending context + * itself, the host context is required to specify a different + * set of privilege flags that will override the privileges of + * the source context. + */ +int vmci_ctx_notify_dbell(u32 src_cid, + struct vmci_handle handle, + u32 src_priv_flags) +{ + struct vmci_ctx *dst_context; + int result; + + if (vmci_handle_is_invalid(handle)) + return VMCI_ERROR_INVALID_ARGS; + + /* Get the target VM's VMCI context. */ + dst_context = vmci_ctx_get(handle.context); + if (!dst_context) { + pr_devel("Invalid context (ID=0x%x)\n", handle.context); + return VMCI_ERROR_NOT_FOUND; + } + + if (src_cid != handle.context) { + u32 dst_priv_flags; + + if (VMCI_CONTEXT_IS_VM(src_cid) && + VMCI_CONTEXT_IS_VM(handle.context)) { + pr_devel("Doorbell notification from VM to VM not supported (src=0x%x, dst=0x%x)\n", + src_cid, handle.context); + result = VMCI_ERROR_DST_UNREACHABLE; + goto out; + } + + result = vmci_dbell_get_priv_flags(handle, &dst_priv_flags); + if (result < VMCI_SUCCESS) { + pr_warn("Failed to get privilege flags for destination (handle=0x%x:0x%x)\n", + handle.context, handle.resource); + goto out; + } + + if (src_cid != VMCI_HOST_CONTEXT_ID || + src_priv_flags == VMCI_NO_PRIVILEGE_FLAGS) { + src_priv_flags = vmci_context_get_priv_flags(src_cid); + } + + if (vmci_deny_interaction(src_priv_flags, dst_priv_flags)) { + result = VMCI_ERROR_NO_ACCESS; + goto out; + } + } + + if (handle.context == VMCI_HOST_CONTEXT_ID) { + result = vmci_dbell_host_context_notify(src_cid, handle); + } else { + spin_lock(&dst_context->lock); + + if (!vmci_handle_arr_has_entry(dst_context->doorbell_array, + handle)) { + result = VMCI_ERROR_NOT_FOUND; + } else { + if (!vmci_handle_arr_has_entry( + dst_context->pending_doorbell_array, + handle)) { + result = vmci_handle_arr_append_entry( + &dst_context->pending_doorbell_array, + handle); + if (result == VMCI_SUCCESS) { + ctx_signal_notify(dst_context); + wake_up(&dst_context->host_context.wait_queue); + } + } else { + result = VMCI_SUCCESS; + } + } + spin_unlock(&dst_context->lock); + } + + out: + vmci_ctx_put(dst_context); + + return result; +} + +bool vmci_ctx_supports_host_qp(struct vmci_ctx *context) +{ + return context && context->user_version >= VMCI_VERSION_HOSTQP; +} + +/* + * Registers that a new queue pair handle has been allocated by + * the context. + */ +int vmci_ctx_qp_create(struct vmci_ctx *context, struct vmci_handle handle) +{ + int result; + + if (context == NULL || vmci_handle_is_invalid(handle)) + return VMCI_ERROR_INVALID_ARGS; + + if (!vmci_handle_arr_has_entry(context->queue_pair_array, handle)) + result = vmci_handle_arr_append_entry( + &context->queue_pair_array, handle); + else + result = VMCI_ERROR_DUPLICATE_ENTRY; + + return result; +} + +/* + * Unregisters a queue pair handle that was previously registered + * with vmci_ctx_qp_create. + */ +int vmci_ctx_qp_destroy(struct vmci_ctx *context, struct vmci_handle handle) +{ + struct vmci_handle hndl; + + if (context == NULL || vmci_handle_is_invalid(handle)) + return VMCI_ERROR_INVALID_ARGS; + + hndl = vmci_handle_arr_remove_entry(context->queue_pair_array, handle); + + return vmci_handle_is_invalid(hndl) ? + VMCI_ERROR_NOT_FOUND : VMCI_SUCCESS; +} + +/* + * Determines whether a given queue pair handle is registered + * with the given context. + */ +bool vmci_ctx_qp_exists(struct vmci_ctx *context, struct vmci_handle handle) +{ + if (context == NULL || vmci_handle_is_invalid(handle)) + return false; + + return vmci_handle_arr_has_entry(context->queue_pair_array, handle); +} + +/* + * vmci_context_get_priv_flags() - Retrieve privilege flags. + * @context_id: The context ID of the VMCI context. + * + * Retrieves privilege flags of the given VMCI context ID. + */ +u32 vmci_context_get_priv_flags(u32 context_id) +{ + if (vmci_host_code_active()) { + u32 flags; + struct vmci_ctx *context; + + context = vmci_ctx_get(context_id); + if (!context) + return VMCI_LEAST_PRIVILEGE_FLAGS; + + flags = context->priv_flags; + vmci_ctx_put(context); + return flags; + } + return VMCI_NO_PRIVILEGE_FLAGS; +} +EXPORT_SYMBOL_GPL(vmci_context_get_priv_flags); + +/* + * vmci_is_context_owner() - Determimnes if user is the context owner + * @context_id: The context ID of the VMCI context. + * @uid: The host user id (real kernel value). + * + * Determines whether a given UID is the owner of given VMCI context. + */ +bool vmci_is_context_owner(u32 context_id, kuid_t uid) +{ + bool is_owner = false; + + if (vmci_host_code_active()) { + struct vmci_ctx *context = vmci_ctx_get(context_id); + if (context) { + if (context->cred) + is_owner = uid_eq(context->cred->uid, uid); + vmci_ctx_put(context); + } + } + + return is_owner; +} +EXPORT_SYMBOL_GPL(vmci_is_context_owner); diff --git a/drivers/misc/vmw_vmci/vmci_context.h b/drivers/misc/vmw_vmci/vmci_context.h new file mode 100644 index 000000000..4db8701c9 --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_context.h @@ -0,0 +1,174 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * VMware VMCI driver (vmciContext.h) + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + */ + +#ifndef _VMCI_CONTEXT_H_ +#define _VMCI_CONTEXT_H_ + +#include <linux/vmw_vmci_defs.h> +#include <linux/atomic.h> +#include <linux/kref.h> +#include <linux/types.h> +#include <linux/wait.h> + +#include "vmci_handle_array.h" +#include "vmci_datagram.h" + +/* Used to determine what checkpoint state to get and set. */ +enum { + VMCI_NOTIFICATION_CPT_STATE = 1, + VMCI_WELLKNOWN_CPT_STATE = 2, + VMCI_DG_OUT_STATE = 3, + VMCI_DG_IN_STATE = 4, + VMCI_DG_IN_SIZE_STATE = 5, + VMCI_DOORBELL_CPT_STATE = 6, +}; + +/* Host specific struct used for signalling */ +struct vmci_host { + wait_queue_head_t wait_queue; +}; + +struct vmci_handle_list { + struct list_head node; + struct vmci_handle handle; +}; + +struct vmci_ctx { + struct list_head list_item; /* For global VMCI list. */ + u32 cid; + struct kref kref; + struct list_head datagram_queue; /* Head of per VM queue. */ + u32 pending_datagrams; + size_t datagram_queue_size; /* Size of datagram queue in bytes. */ + + /* + * Version of the code that created + * this context; e.g., VMX. + */ + int user_version; + spinlock_t lock; /* Locks callQueue and handle_arrays. */ + + /* + * queue_pairs attached to. The array of + * handles for queue pairs is accessed + * from the code for QP API, and there + * it is protected by the QP lock. It + * is also accessed from the context + * clean up path, which does not + * require a lock. VMCILock is not + * used to protect the QP array field. + */ + struct vmci_handle_arr *queue_pair_array; + + /* Doorbells created by context. */ + struct vmci_handle_arr *doorbell_array; + + /* Doorbells pending for context. */ + struct vmci_handle_arr *pending_doorbell_array; + + /* Contexts current context is subscribing to. */ + struct list_head notifier_list; + unsigned int n_notifiers; + + struct vmci_host host_context; + u32 priv_flags; + + const struct cred *cred; + bool *notify; /* Notify flag pointer - hosted only. */ + struct page *notify_page; /* Page backing the notify UVA. */ +}; + +/* VMCINotifyAddRemoveInfo: Used to add/remove remote context notifications. */ +struct vmci_ctx_info { + u32 remote_cid; + int result; +}; + +/* VMCICptBufInfo: Used to set/get current context's checkpoint state. */ +struct vmci_ctx_chkpt_buf_info { + u64 cpt_buf; + u32 cpt_type; + u32 buf_size; + s32 result; + u32 _pad; +}; + +/* + * VMCINotificationReceiveInfo: Used to recieve pending notifications + * for doorbells and queue pairs. + */ +struct vmci_ctx_notify_recv_info { + u64 db_handle_buf_uva; + u64 db_handle_buf_size; + u64 qp_handle_buf_uva; + u64 qp_handle_buf_size; + s32 result; + u32 _pad; +}; + +/* + * Utilility function that checks whether two entities are allowed + * to interact. If one of them is restricted, the other one must + * be trusted. + */ +static inline bool vmci_deny_interaction(u32 part_one, u32 part_two) +{ + return ((part_one & VMCI_PRIVILEGE_FLAG_RESTRICTED) && + !(part_two & VMCI_PRIVILEGE_FLAG_TRUSTED)) || + ((part_two & VMCI_PRIVILEGE_FLAG_RESTRICTED) && + !(part_one & VMCI_PRIVILEGE_FLAG_TRUSTED)); +} + +struct vmci_ctx *vmci_ctx_create(u32 cid, u32 flags, + uintptr_t event_hnd, int version, + const struct cred *cred); +void vmci_ctx_destroy(struct vmci_ctx *context); + +bool vmci_ctx_supports_host_qp(struct vmci_ctx *context); +int vmci_ctx_enqueue_datagram(u32 cid, struct vmci_datagram *dg); +int vmci_ctx_dequeue_datagram(struct vmci_ctx *context, + size_t *max_size, struct vmci_datagram **dg); +int vmci_ctx_pending_datagrams(u32 cid, u32 *pending); +struct vmci_ctx *vmci_ctx_get(u32 cid); +void vmci_ctx_put(struct vmci_ctx *context); +bool vmci_ctx_exists(u32 cid); + +int vmci_ctx_add_notification(u32 context_id, u32 remote_cid); +int vmci_ctx_remove_notification(u32 context_id, u32 remote_cid); +int vmci_ctx_get_chkpt_state(u32 context_id, u32 cpt_type, + u32 *num_cids, void **cpt_buf_ptr); +int vmci_ctx_set_chkpt_state(u32 context_id, u32 cpt_type, + u32 num_cids, void *cpt_buf); + +int vmci_ctx_qp_create(struct vmci_ctx *context, struct vmci_handle handle); +int vmci_ctx_qp_destroy(struct vmci_ctx *context, struct vmci_handle handle); +bool vmci_ctx_qp_exists(struct vmci_ctx *context, struct vmci_handle handle); + +void vmci_ctx_check_signal_notify(struct vmci_ctx *context); +void vmci_ctx_unset_notify(struct vmci_ctx *context); + +int vmci_ctx_dbell_create(u32 context_id, struct vmci_handle handle); +int vmci_ctx_dbell_destroy(u32 context_id, struct vmci_handle handle); +int vmci_ctx_dbell_destroy_all(u32 context_id); +int vmci_ctx_notify_dbell(u32 cid, struct vmci_handle handle, + u32 src_priv_flags); + +int vmci_ctx_rcv_notifications_get(u32 context_id, struct vmci_handle_arr + **db_handle_array, struct vmci_handle_arr + **qp_handle_array); +void vmci_ctx_rcv_notifications_release(u32 context_id, struct vmci_handle_arr + *db_handle_array, struct vmci_handle_arr + *qp_handle_array, bool success); + +static inline u32 vmci_ctx_get_id(struct vmci_ctx *context) +{ + if (!context) + return VMCI_INVALID_ID; + return context->cid; +} + +#endif /* _VMCI_CONTEXT_H_ */ diff --git a/drivers/misc/vmw_vmci/vmci_datagram.c b/drivers/misc/vmw_vmci/vmci_datagram.c new file mode 100644 index 000000000..f50d22882 --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_datagram.c @@ -0,0 +1,494 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + */ + +#include <linux/vmw_vmci_defs.h> +#include <linux/vmw_vmci_api.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/bug.h> + +#include "vmci_datagram.h" +#include "vmci_resource.h" +#include "vmci_context.h" +#include "vmci_driver.h" +#include "vmci_event.h" +#include "vmci_route.h" + +/* + * struct datagram_entry describes the datagram entity. It is used for datagram + * entities created only on the host. + */ +struct datagram_entry { + struct vmci_resource resource; + u32 flags; + bool run_delayed; + vmci_datagram_recv_cb recv_cb; + void *client_data; + u32 priv_flags; +}; + +struct delayed_datagram_info { + struct datagram_entry *entry; + struct work_struct work; + bool in_dg_host_queue; + /* msg and msg_payload must be together. */ + struct vmci_datagram msg; + u8 msg_payload[]; +}; + +/* Number of in-flight host->host datagrams */ +static atomic_t delayed_dg_host_queue_size = ATOMIC_INIT(0); + +/* + * Create a datagram entry given a handle pointer. + */ +static int dg_create_handle(u32 resource_id, + u32 flags, + u32 priv_flags, + vmci_datagram_recv_cb recv_cb, + void *client_data, struct vmci_handle *out_handle) +{ + int result; + u32 context_id; + struct vmci_handle handle; + struct datagram_entry *entry; + + if ((flags & VMCI_FLAG_WELLKNOWN_DG_HND) != 0) + return VMCI_ERROR_INVALID_ARGS; + + if ((flags & VMCI_FLAG_ANYCID_DG_HND) != 0) { + context_id = VMCI_INVALID_ID; + } else { + context_id = vmci_get_context_id(); + if (context_id == VMCI_INVALID_ID) + return VMCI_ERROR_NO_RESOURCES; + } + + handle = vmci_make_handle(context_id, resource_id); + + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { + pr_warn("Failed allocating memory for datagram entry\n"); + return VMCI_ERROR_NO_MEM; + } + + entry->run_delayed = (flags & VMCI_FLAG_DG_DELAYED_CB) ? true : false; + entry->flags = flags; + entry->recv_cb = recv_cb; + entry->client_data = client_data; + entry->priv_flags = priv_flags; + + /* Make datagram resource live. */ + result = vmci_resource_add(&entry->resource, + VMCI_RESOURCE_TYPE_DATAGRAM, + handle); + if (result != VMCI_SUCCESS) { + pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d\n", + handle.context, handle.resource, result); + kfree(entry); + return result; + } + + *out_handle = vmci_resource_handle(&entry->resource); + return VMCI_SUCCESS; +} + +/* + * Internal utility function with the same purpose as + * vmci_datagram_get_priv_flags that also takes a context_id. + */ +static int vmci_datagram_get_priv_flags(u32 context_id, + struct vmci_handle handle, + u32 *priv_flags) +{ + if (context_id == VMCI_INVALID_ID) + return VMCI_ERROR_INVALID_ARGS; + + if (context_id == VMCI_HOST_CONTEXT_ID) { + struct datagram_entry *src_entry; + struct vmci_resource *resource; + + resource = vmci_resource_by_handle(handle, + VMCI_RESOURCE_TYPE_DATAGRAM); + if (!resource) + return VMCI_ERROR_INVALID_ARGS; + + src_entry = container_of(resource, struct datagram_entry, + resource); + *priv_flags = src_entry->priv_flags; + vmci_resource_put(resource); + } else if (context_id == VMCI_HYPERVISOR_CONTEXT_ID) + *priv_flags = VMCI_MAX_PRIVILEGE_FLAGS; + else + *priv_flags = vmci_context_get_priv_flags(context_id); + + return VMCI_SUCCESS; +} + +/* + * Calls the specified callback in a delayed context. + */ +static void dg_delayed_dispatch(struct work_struct *work) +{ + struct delayed_datagram_info *dg_info = + container_of(work, struct delayed_datagram_info, work); + + dg_info->entry->recv_cb(dg_info->entry->client_data, &dg_info->msg); + + vmci_resource_put(&dg_info->entry->resource); + + if (dg_info->in_dg_host_queue) + atomic_dec(&delayed_dg_host_queue_size); + + kfree(dg_info); +} + +/* + * Dispatch datagram as a host, to the host, or other vm context. This + * function cannot dispatch to hypervisor context handlers. This should + * have been handled before we get here by vmci_datagram_dispatch. + * Returns number of bytes sent on success, error code otherwise. + */ +static int dg_dispatch_as_host(u32 context_id, struct vmci_datagram *dg) +{ + int retval; + size_t dg_size; + u32 src_priv_flags; + + dg_size = VMCI_DG_SIZE(dg); + + /* Host cannot send to the hypervisor. */ + if (dg->dst.context == VMCI_HYPERVISOR_CONTEXT_ID) + return VMCI_ERROR_DST_UNREACHABLE; + + /* Check that source handle matches sending context. */ + if (dg->src.context != context_id) { + pr_devel("Sender context (ID=0x%x) is not owner of src datagram entry (handle=0x%x:0x%x)\n", + context_id, dg->src.context, dg->src.resource); + return VMCI_ERROR_NO_ACCESS; + } + + /* Get hold of privileges of sending endpoint. */ + retval = vmci_datagram_get_priv_flags(context_id, dg->src, + &src_priv_flags); + if (retval != VMCI_SUCCESS) { + pr_warn("Couldn't get privileges (handle=0x%x:0x%x)\n", + dg->src.context, dg->src.resource); + return retval; + } + + /* Determine if we should route to host or guest destination. */ + if (dg->dst.context == VMCI_HOST_CONTEXT_ID) { + /* Route to host datagram entry. */ + struct datagram_entry *dst_entry; + struct vmci_resource *resource; + + if (dg->src.context == VMCI_HYPERVISOR_CONTEXT_ID && + dg->dst.resource == VMCI_EVENT_HANDLER) { + return vmci_event_dispatch(dg); + } + + resource = vmci_resource_by_handle(dg->dst, + VMCI_RESOURCE_TYPE_DATAGRAM); + if (!resource) { + pr_devel("Sending to invalid destination (handle=0x%x:0x%x)\n", + dg->dst.context, dg->dst.resource); + return VMCI_ERROR_INVALID_RESOURCE; + } + dst_entry = container_of(resource, struct datagram_entry, + resource); + if (vmci_deny_interaction(src_priv_flags, + dst_entry->priv_flags)) { + vmci_resource_put(resource); + return VMCI_ERROR_NO_ACCESS; + } + + /* + * If a VMCI datagram destined for the host is also sent by the + * host, we always run it delayed. This ensures that no locks + * are held when the datagram callback runs. + */ + if (dst_entry->run_delayed || + dg->src.context == VMCI_HOST_CONTEXT_ID) { + struct delayed_datagram_info *dg_info; + + if (atomic_add_return(1, &delayed_dg_host_queue_size) + == VMCI_MAX_DELAYED_DG_HOST_QUEUE_SIZE) { + atomic_dec(&delayed_dg_host_queue_size); + vmci_resource_put(resource); + return VMCI_ERROR_NO_MEM; + } + + dg_info = kmalloc(sizeof(*dg_info) + + (size_t) dg->payload_size, GFP_ATOMIC); + if (!dg_info) { + atomic_dec(&delayed_dg_host_queue_size); + vmci_resource_put(resource); + return VMCI_ERROR_NO_MEM; + } + + dg_info->in_dg_host_queue = true; + dg_info->entry = dst_entry; + memcpy(&dg_info->msg, dg, dg_size); + + INIT_WORK(&dg_info->work, dg_delayed_dispatch); + schedule_work(&dg_info->work); + retval = VMCI_SUCCESS; + + } else { + retval = dst_entry->recv_cb(dst_entry->client_data, dg); + vmci_resource_put(resource); + if (retval < VMCI_SUCCESS) + return retval; + } + } else { + /* Route to destination VM context. */ + struct vmci_datagram *new_dg; + + if (context_id != dg->dst.context) { + if (vmci_deny_interaction(src_priv_flags, + vmci_context_get_priv_flags + (dg->dst.context))) { + return VMCI_ERROR_NO_ACCESS; + } else if (VMCI_CONTEXT_IS_VM(context_id)) { + /* + * If the sending context is a VM, it + * cannot reach another VM. + */ + + pr_devel("Datagram communication between VMs not supported (src=0x%x, dst=0x%x)\n", + context_id, dg->dst.context); + return VMCI_ERROR_DST_UNREACHABLE; + } + } + + /* We make a copy to enqueue. */ + new_dg = kmemdup(dg, dg_size, GFP_KERNEL); + if (new_dg == NULL) + return VMCI_ERROR_NO_MEM; + + retval = vmci_ctx_enqueue_datagram(dg->dst.context, new_dg); + if (retval < VMCI_SUCCESS) { + kfree(new_dg); + return retval; + } + } + + /* + * We currently truncate the size to signed 32 bits. This doesn't + * matter for this handler as it only support 4Kb messages. + */ + return (int)dg_size; +} + +/* + * Dispatch datagram as a guest, down through the VMX and potentially to + * the host. + * Returns number of bytes sent on success, error code otherwise. + */ +static int dg_dispatch_as_guest(struct vmci_datagram *dg) +{ + int retval; + struct vmci_resource *resource; + + resource = vmci_resource_by_handle(dg->src, + VMCI_RESOURCE_TYPE_DATAGRAM); + if (!resource) + return VMCI_ERROR_NO_HANDLE; + + retval = vmci_send_datagram(dg); + vmci_resource_put(resource); + return retval; +} + +/* + * Dispatch datagram. This will determine the routing for the datagram + * and dispatch it accordingly. + * Returns number of bytes sent on success, error code otherwise. + */ +int vmci_datagram_dispatch(u32 context_id, + struct vmci_datagram *dg, bool from_guest) +{ + int retval; + enum vmci_route route; + + BUILD_BUG_ON(sizeof(struct vmci_datagram) != 24); + + if (dg->payload_size > VMCI_MAX_DG_SIZE || + VMCI_DG_SIZE(dg) > VMCI_MAX_DG_SIZE) { + pr_devel("Payload (size=%llu bytes) too big to send\n", + (unsigned long long)dg->payload_size); + return VMCI_ERROR_INVALID_ARGS; + } + + retval = vmci_route(&dg->src, &dg->dst, from_guest, &route); + if (retval < VMCI_SUCCESS) { + pr_devel("Failed to route datagram (src=0x%x, dst=0x%x, err=%d)\n", + dg->src.context, dg->dst.context, retval); + return retval; + } + + if (VMCI_ROUTE_AS_HOST == route) { + if (VMCI_INVALID_ID == context_id) + context_id = VMCI_HOST_CONTEXT_ID; + return dg_dispatch_as_host(context_id, dg); + } + + if (VMCI_ROUTE_AS_GUEST == route) + return dg_dispatch_as_guest(dg); + + pr_warn("Unknown route (%d) for datagram\n", route); + return VMCI_ERROR_DST_UNREACHABLE; +} + +/* + * Invoke the handler for the given datagram. This is intended to be + * called only when acting as a guest and receiving a datagram from the + * virtual device. + */ +int vmci_datagram_invoke_guest_handler(struct vmci_datagram *dg) +{ + struct vmci_resource *resource; + struct datagram_entry *dst_entry; + + resource = vmci_resource_by_handle(dg->dst, + VMCI_RESOURCE_TYPE_DATAGRAM); + if (!resource) { + pr_devel("destination (handle=0x%x:0x%x) doesn't exist\n", + dg->dst.context, dg->dst.resource); + return VMCI_ERROR_NO_HANDLE; + } + + dst_entry = container_of(resource, struct datagram_entry, resource); + if (dst_entry->run_delayed) { + struct delayed_datagram_info *dg_info; + + dg_info = kmalloc(sizeof(*dg_info) + (size_t)dg->payload_size, + GFP_ATOMIC); + if (!dg_info) { + vmci_resource_put(resource); + return VMCI_ERROR_NO_MEM; + } + + dg_info->in_dg_host_queue = false; + dg_info->entry = dst_entry; + memcpy(&dg_info->msg, dg, VMCI_DG_SIZE(dg)); + + INIT_WORK(&dg_info->work, dg_delayed_dispatch); + schedule_work(&dg_info->work); + } else { + dst_entry->recv_cb(dst_entry->client_data, dg); + vmci_resource_put(resource); + } + + return VMCI_SUCCESS; +} + +/* + * vmci_datagram_create_handle_priv() - Create host context datagram endpoint + * @resource_id: The resource ID. + * @flags: Datagram Flags. + * @priv_flags: Privilege Flags. + * @recv_cb: Callback when receiving datagrams. + * @client_data: Pointer for a datagram_entry struct + * @out_handle: vmci_handle that is populated as a result of this function. + * + * Creates a host context datagram endpoint and returns a handle to it. + */ +int vmci_datagram_create_handle_priv(u32 resource_id, + u32 flags, + u32 priv_flags, + vmci_datagram_recv_cb recv_cb, + void *client_data, + struct vmci_handle *out_handle) +{ + if (out_handle == NULL) + return VMCI_ERROR_INVALID_ARGS; + + if (recv_cb == NULL) { + pr_devel("Client callback needed when creating datagram\n"); + return VMCI_ERROR_INVALID_ARGS; + } + + if (priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS) + return VMCI_ERROR_INVALID_ARGS; + + return dg_create_handle(resource_id, flags, priv_flags, recv_cb, + client_data, out_handle); +} +EXPORT_SYMBOL_GPL(vmci_datagram_create_handle_priv); + +/* + * vmci_datagram_create_handle() - Create host context datagram endpoint + * @resource_id: Resource ID. + * @flags: Datagram Flags. + * @recv_cb: Callback when receiving datagrams. + * @client_ata: Pointer for a datagram_entry struct + * @out_handle: vmci_handle that is populated as a result of this function. + * + * Creates a host context datagram endpoint and returns a handle to + * it. Same as vmci_datagram_create_handle_priv without the priviledge + * flags argument. + */ +int vmci_datagram_create_handle(u32 resource_id, + u32 flags, + vmci_datagram_recv_cb recv_cb, + void *client_data, + struct vmci_handle *out_handle) +{ + return vmci_datagram_create_handle_priv( + resource_id, flags, + VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS, + recv_cb, client_data, + out_handle); +} +EXPORT_SYMBOL_GPL(vmci_datagram_create_handle); + +/* + * vmci_datagram_destroy_handle() - Destroys datagram handle + * @handle: vmci_handle to be destroyed and reaped. + * + * Use this function to destroy any datagram handles created by + * vmci_datagram_create_handle{,Priv} functions. + */ +int vmci_datagram_destroy_handle(struct vmci_handle handle) +{ + struct datagram_entry *entry; + struct vmci_resource *resource; + + resource = vmci_resource_by_handle(handle, VMCI_RESOURCE_TYPE_DATAGRAM); + if (!resource) { + pr_devel("Failed to destroy datagram (handle=0x%x:0x%x)\n", + handle.context, handle.resource); + return VMCI_ERROR_NOT_FOUND; + } + + entry = container_of(resource, struct datagram_entry, resource); + + vmci_resource_put(&entry->resource); + vmci_resource_remove(&entry->resource); + kfree(entry); + + return VMCI_SUCCESS; +} +EXPORT_SYMBOL_GPL(vmci_datagram_destroy_handle); + +/* + * vmci_datagram_send() - Send a datagram + * @msg: The datagram to send. + * + * Sends the provided datagram on its merry way. + */ +int vmci_datagram_send(struct vmci_datagram *msg) +{ + if (msg == NULL) + return VMCI_ERROR_INVALID_ARGS; + + return vmci_datagram_dispatch(VMCI_INVALID_ID, msg, false); +} +EXPORT_SYMBOL_GPL(vmci_datagram_send); diff --git a/drivers/misc/vmw_vmci/vmci_datagram.h b/drivers/misc/vmw_vmci/vmci_datagram.h new file mode 100644 index 000000000..b5b5b9286 --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_datagram.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + */ + +#ifndef _VMCI_DATAGRAM_H_ +#define _VMCI_DATAGRAM_H_ + +#include <linux/types.h> +#include <linux/list.h> + +#include "vmci_context.h" + +#define VMCI_MAX_DELAYED_DG_HOST_QUEUE_SIZE 256 + +/* + * The struct vmci_datagram_queue_entry is a queue header for the in-kernel VMCI + * datagram queues. It is allocated in non-paged memory, as the + * content is accessed while holding a spinlock. The pending datagram + * itself may be allocated from paged memory. We shadow the size of + * the datagram in the non-paged queue entry as this size is used + * while holding the same spinlock as above. + */ +struct vmci_datagram_queue_entry { + struct list_head list_item; /* For queuing. */ + size_t dg_size; /* Size of datagram. */ + struct vmci_datagram *dg; /* Pending datagram. */ +}; + +/* VMCIDatagramSendRecvInfo */ +struct vmci_datagram_snd_rcv_info { + u64 addr; + u32 len; + s32 result; +}; + +/* Datagram API for non-public use. */ +int vmci_datagram_dispatch(u32 context_id, struct vmci_datagram *dg, + bool from_guest); +int vmci_datagram_invoke_guest_handler(struct vmci_datagram *dg); + +#endif /* _VMCI_DATAGRAM_H_ */ diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c new file mode 100644 index 000000000..fa8a7fce4 --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_doorbell.c @@ -0,0 +1,604 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + */ + +#include <linux/vmw_vmci_defs.h> +#include <linux/vmw_vmci_api.h> +#include <linux/completion.h> +#include <linux/hash.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/slab.h> + +#include "vmci_datagram.h" +#include "vmci_doorbell.h" +#include "vmci_resource.h" +#include "vmci_driver.h" +#include "vmci_route.h" + + +#define VMCI_DOORBELL_INDEX_BITS 6 +#define VMCI_DOORBELL_INDEX_TABLE_SIZE (1 << VMCI_DOORBELL_INDEX_BITS) +#define VMCI_DOORBELL_HASH(_idx) hash_32(_idx, VMCI_DOORBELL_INDEX_BITS) + +/* + * DoorbellEntry describes the a doorbell notification handle allocated by the + * host. + */ +struct dbell_entry { + struct vmci_resource resource; + struct hlist_node node; + struct work_struct work; + vmci_callback notify_cb; + void *client_data; + u32 idx; + u32 priv_flags; + bool run_delayed; + atomic_t active; /* Only used by guest personality */ +}; + +/* The VMCI index table keeps track of currently registered doorbells. */ +struct dbell_index_table { + spinlock_t lock; /* Index table lock */ + struct hlist_head entries[VMCI_DOORBELL_INDEX_TABLE_SIZE]; +}; + +static struct dbell_index_table vmci_doorbell_it = { + .lock = __SPIN_LOCK_UNLOCKED(vmci_doorbell_it.lock), +}; + +/* + * The max_notify_idx is one larger than the currently known bitmap index in + * use, and is used to determine how much of the bitmap needs to be scanned. + */ +static u32 max_notify_idx; + +/* + * The notify_idx_count is used for determining whether there are free entries + * within the bitmap (if notify_idx_count + 1 < max_notify_idx). + */ +static u32 notify_idx_count; + +/* + * The last_notify_idx_reserved is used to track the last index handed out - in + * the case where multiple handles share a notification index, we hand out + * indexes round robin based on last_notify_idx_reserved. + */ +static u32 last_notify_idx_reserved; + +/* This is a one entry cache used to by the index allocation. */ +static u32 last_notify_idx_released = PAGE_SIZE; + + +/* + * Utility function that retrieves the privilege flags associated + * with a given doorbell handle. For guest endpoints, the + * privileges are determined by the context ID, but for host + * endpoints privileges are associated with the complete + * handle. Hypervisor endpoints are not yet supported. + */ +int vmci_dbell_get_priv_flags(struct vmci_handle handle, u32 *priv_flags) +{ + if (priv_flags == NULL || handle.context == VMCI_INVALID_ID) + return VMCI_ERROR_INVALID_ARGS; + + if (handle.context == VMCI_HOST_CONTEXT_ID) { + struct dbell_entry *entry; + struct vmci_resource *resource; + + resource = vmci_resource_by_handle(handle, + VMCI_RESOURCE_TYPE_DOORBELL); + if (!resource) + return VMCI_ERROR_NOT_FOUND; + + entry = container_of(resource, struct dbell_entry, resource); + *priv_flags = entry->priv_flags; + vmci_resource_put(resource); + } else if (handle.context == VMCI_HYPERVISOR_CONTEXT_ID) { + /* + * Hypervisor endpoints for notifications are not + * supported (yet). + */ + return VMCI_ERROR_INVALID_ARGS; + } else { + *priv_flags = vmci_context_get_priv_flags(handle.context); + } + + return VMCI_SUCCESS; +} + +/* + * Find doorbell entry by bitmap index. + */ +static struct dbell_entry *dbell_index_table_find(u32 idx) +{ + u32 bucket = VMCI_DOORBELL_HASH(idx); + struct dbell_entry *dbell; + + hlist_for_each_entry(dbell, &vmci_doorbell_it.entries[bucket], + node) { + if (idx == dbell->idx) + return dbell; + } + + return NULL; +} + +/* + * Add the given entry to the index table. This willi take a reference to the + * entry's resource so that the entry is not deleted before it is removed from + * the * table. + */ +static void dbell_index_table_add(struct dbell_entry *entry) +{ + u32 bucket; + u32 new_notify_idx; + + vmci_resource_get(&entry->resource); + + spin_lock_bh(&vmci_doorbell_it.lock); + + /* + * Below we try to allocate an index in the notification + * bitmap with "not too much" sharing between resources. If we + * use less that the full bitmap, we either add to the end if + * there are no unused flags within the currently used area, + * or we search for unused ones. If we use the full bitmap, we + * allocate the index round robin. + */ + if (max_notify_idx < PAGE_SIZE || notify_idx_count < PAGE_SIZE) { + if (last_notify_idx_released < max_notify_idx && + !dbell_index_table_find(last_notify_idx_released)) { + new_notify_idx = last_notify_idx_released; + last_notify_idx_released = PAGE_SIZE; + } else { + bool reused = false; + new_notify_idx = last_notify_idx_reserved; + if (notify_idx_count + 1 < max_notify_idx) { + do { + if (!dbell_index_table_find + (new_notify_idx)) { + reused = true; + break; + } + new_notify_idx = (new_notify_idx + 1) % + max_notify_idx; + } while (new_notify_idx != + last_notify_idx_released); + } + if (!reused) { + new_notify_idx = max_notify_idx; + max_notify_idx++; + } + } + } else { + new_notify_idx = (last_notify_idx_reserved + 1) % PAGE_SIZE; + } + + last_notify_idx_reserved = new_notify_idx; + notify_idx_count++; + + entry->idx = new_notify_idx; + bucket = VMCI_DOORBELL_HASH(entry->idx); + hlist_add_head(&entry->node, &vmci_doorbell_it.entries[bucket]); + + spin_unlock_bh(&vmci_doorbell_it.lock); +} + +/* + * Remove the given entry from the index table. This will release() the + * entry's resource. + */ +static void dbell_index_table_remove(struct dbell_entry *entry) +{ + spin_lock_bh(&vmci_doorbell_it.lock); + + hlist_del_init(&entry->node); + + notify_idx_count--; + if (entry->idx == max_notify_idx - 1) { + /* + * If we delete an entry with the maximum known + * notification index, we take the opportunity to + * prune the current max. As there might be other + * unused indices immediately below, we lower the + * maximum until we hit an index in use. + */ + while (max_notify_idx > 0 && + !dbell_index_table_find(max_notify_idx - 1)) + max_notify_idx--; + } + + last_notify_idx_released = entry->idx; + + spin_unlock_bh(&vmci_doorbell_it.lock); + + vmci_resource_put(&entry->resource); +} + +/* + * Creates a link between the given doorbell handle and the given + * index in the bitmap in the device backend. A notification state + * is created in hypervisor. + */ +static int dbell_link(struct vmci_handle handle, u32 notify_idx) +{ + struct vmci_doorbell_link_msg link_msg; + + link_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, + VMCI_DOORBELL_LINK); + link_msg.hdr.src = VMCI_ANON_SRC_HANDLE; + link_msg.hdr.payload_size = sizeof(link_msg) - VMCI_DG_HEADERSIZE; + link_msg.handle = handle; + link_msg.notify_idx = notify_idx; + + return vmci_send_datagram(&link_msg.hdr); +} + +/* + * Unlinks the given doorbell handle from an index in the bitmap in + * the device backend. The notification state is destroyed in hypervisor. + */ +static int dbell_unlink(struct vmci_handle handle) +{ + struct vmci_doorbell_unlink_msg unlink_msg; + + unlink_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, + VMCI_DOORBELL_UNLINK); + unlink_msg.hdr.src = VMCI_ANON_SRC_HANDLE; + unlink_msg.hdr.payload_size = sizeof(unlink_msg) - VMCI_DG_HEADERSIZE; + unlink_msg.handle = handle; + + return vmci_send_datagram(&unlink_msg.hdr); +} + +/* + * Notify another guest or the host. We send a datagram down to the + * host via the hypervisor with the notification info. + */ +static int dbell_notify_as_guest(struct vmci_handle handle, u32 priv_flags) +{ + struct vmci_doorbell_notify_msg notify_msg; + + notify_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, + VMCI_DOORBELL_NOTIFY); + notify_msg.hdr.src = VMCI_ANON_SRC_HANDLE; + notify_msg.hdr.payload_size = sizeof(notify_msg) - VMCI_DG_HEADERSIZE; + notify_msg.handle = handle; + + return vmci_send_datagram(¬ify_msg.hdr); +} + +/* + * Calls the specified callback in a delayed context. + */ +static void dbell_delayed_dispatch(struct work_struct *work) +{ + struct dbell_entry *entry = container_of(work, + struct dbell_entry, work); + + entry->notify_cb(entry->client_data); + vmci_resource_put(&entry->resource); +} + +/* + * Dispatches a doorbell notification to the host context. + */ +int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle) +{ + struct dbell_entry *entry; + struct vmci_resource *resource; + + if (vmci_handle_is_invalid(handle)) { + pr_devel("Notifying an invalid doorbell (handle=0x%x:0x%x)\n", + handle.context, handle.resource); + return VMCI_ERROR_INVALID_ARGS; + } + + resource = vmci_resource_by_handle(handle, + VMCI_RESOURCE_TYPE_DOORBELL); + if (!resource) { + pr_devel("Notifying an unknown doorbell (handle=0x%x:0x%x)\n", + handle.context, handle.resource); + return VMCI_ERROR_NOT_FOUND; + } + + entry = container_of(resource, struct dbell_entry, resource); + if (entry->run_delayed) { + if (!schedule_work(&entry->work)) + vmci_resource_put(resource); + } else { + entry->notify_cb(entry->client_data); + vmci_resource_put(resource); + } + + return VMCI_SUCCESS; +} + +/* + * Register the notification bitmap with the host. + */ +bool vmci_dbell_register_notification_bitmap(u64 bitmap_ppn) +{ + int result; + struct vmci_notify_bm_set_msg bitmap_set_msg = { }; + + bitmap_set_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, + VMCI_SET_NOTIFY_BITMAP); + bitmap_set_msg.hdr.src = VMCI_ANON_SRC_HANDLE; + bitmap_set_msg.hdr.payload_size = sizeof(bitmap_set_msg) - + VMCI_DG_HEADERSIZE; + if (vmci_use_ppn64()) + bitmap_set_msg.bitmap_ppn64 = bitmap_ppn; + else + bitmap_set_msg.bitmap_ppn32 = (u32) bitmap_ppn; + + result = vmci_send_datagram(&bitmap_set_msg.hdr); + if (result != VMCI_SUCCESS) { + pr_devel("Failed to register (PPN=%llu) as notification bitmap (error=%d)\n", + bitmap_ppn, result); + return false; + } + return true; +} + +/* + * Executes or schedules the handlers for a given notify index. + */ +static void dbell_fire_entries(u32 notify_idx) +{ + u32 bucket = VMCI_DOORBELL_HASH(notify_idx); + struct dbell_entry *dbell; + + spin_lock_bh(&vmci_doorbell_it.lock); + + hlist_for_each_entry(dbell, &vmci_doorbell_it.entries[bucket], node) { + if (dbell->idx == notify_idx && + atomic_read(&dbell->active) == 1) { + if (dbell->run_delayed) { + vmci_resource_get(&dbell->resource); + if (!schedule_work(&dbell->work)) + vmci_resource_put(&dbell->resource); + } else { + dbell->notify_cb(dbell->client_data); + } + } + } + + spin_unlock_bh(&vmci_doorbell_it.lock); +} + +/* + * Scans the notification bitmap, collects pending notifications, + * resets the bitmap and invokes appropriate callbacks. + */ +void vmci_dbell_scan_notification_entries(u8 *bitmap) +{ + u32 idx; + + for (idx = 0; idx < max_notify_idx; idx++) { + if (bitmap[idx] & 0x1) { + bitmap[idx] &= ~1; + dbell_fire_entries(idx); + } + } +} + +/* + * vmci_doorbell_create() - Creates a doorbell + * @handle: A handle used to track the resource. Can be invalid. + * @flags: Flag that determines context of callback. + * @priv_flags: Privileges flags. + * @notify_cb: The callback to be ivoked when the doorbell fires. + * @client_data: A parameter to be passed to the callback. + * + * Creates a doorbell with the given callback. If the handle is + * VMCI_INVALID_HANDLE, a free handle will be assigned, if + * possible. The callback can be run immediately (potentially with + * locks held - the default) or delayed (in a kernel thread) by + * specifying the flag VMCI_FLAG_DELAYED_CB. If delayed execution + * is selected, a given callback may not be run if the kernel is + * unable to allocate memory for the delayed execution (highly + * unlikely). + */ +int vmci_doorbell_create(struct vmci_handle *handle, + u32 flags, + u32 priv_flags, + vmci_callback notify_cb, void *client_data) +{ + struct dbell_entry *entry; + struct vmci_handle new_handle; + int result; + + if (!handle || !notify_cb || flags & ~VMCI_FLAG_DELAYED_CB || + priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS) + return VMCI_ERROR_INVALID_ARGS; + + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (entry == NULL) { + pr_warn("Failed allocating memory for datagram entry\n"); + return VMCI_ERROR_NO_MEM; + } + + if (vmci_handle_is_invalid(*handle)) { + u32 context_id = vmci_get_context_id(); + + if (context_id == VMCI_INVALID_ID) { + pr_warn("Failed to get context ID\n"); + result = VMCI_ERROR_NO_RESOURCES; + goto free_mem; + } + + /* Let resource code allocate a free ID for us */ + new_handle = vmci_make_handle(context_id, VMCI_INVALID_ID); + } else { + bool valid_context = false; + + /* + * Validate the handle. We must do both of the checks below + * because we can be acting as both a host and a guest at the + * same time. We always allow the host context ID, since the + * host functionality is in practice always there with the + * unified driver. + */ + if (handle->context == VMCI_HOST_CONTEXT_ID || + (vmci_guest_code_active() && + vmci_get_context_id() == handle->context)) { + valid_context = true; + } + + if (!valid_context || handle->resource == VMCI_INVALID_ID) { + pr_devel("Invalid argument (handle=0x%x:0x%x)\n", + handle->context, handle->resource); + result = VMCI_ERROR_INVALID_ARGS; + goto free_mem; + } + + new_handle = *handle; + } + + entry->idx = 0; + INIT_HLIST_NODE(&entry->node); + entry->priv_flags = priv_flags; + INIT_WORK(&entry->work, dbell_delayed_dispatch); + entry->run_delayed = flags & VMCI_FLAG_DELAYED_CB; + entry->notify_cb = notify_cb; + entry->client_data = client_data; + atomic_set(&entry->active, 0); + + result = vmci_resource_add(&entry->resource, + VMCI_RESOURCE_TYPE_DOORBELL, + new_handle); + if (result != VMCI_SUCCESS) { + pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d\n", + new_handle.context, new_handle.resource, result); + goto free_mem; + } + + new_handle = vmci_resource_handle(&entry->resource); + if (vmci_guest_code_active()) { + dbell_index_table_add(entry); + result = dbell_link(new_handle, entry->idx); + if (VMCI_SUCCESS != result) + goto destroy_resource; + + atomic_set(&entry->active, 1); + } + + *handle = new_handle; + + return result; + + destroy_resource: + dbell_index_table_remove(entry); + vmci_resource_remove(&entry->resource); + free_mem: + kfree(entry); + return result; +} +EXPORT_SYMBOL_GPL(vmci_doorbell_create); + +/* + * vmci_doorbell_destroy() - Destroy a doorbell. + * @handle: The handle tracking the resource. + * + * Destroys a doorbell previously created with vmcii_doorbell_create. This + * operation may block waiting for a callback to finish. + */ +int vmci_doorbell_destroy(struct vmci_handle handle) +{ + struct dbell_entry *entry; + struct vmci_resource *resource; + + if (vmci_handle_is_invalid(handle)) + return VMCI_ERROR_INVALID_ARGS; + + resource = vmci_resource_by_handle(handle, + VMCI_RESOURCE_TYPE_DOORBELL); + if (!resource) { + pr_devel("Failed to destroy doorbell (handle=0x%x:0x%x)\n", + handle.context, handle.resource); + return VMCI_ERROR_NOT_FOUND; + } + + entry = container_of(resource, struct dbell_entry, resource); + + if (!hlist_unhashed(&entry->node)) { + int result; + + dbell_index_table_remove(entry); + + result = dbell_unlink(handle); + if (VMCI_SUCCESS != result) { + + /* + * The only reason this should fail would be + * an inconsistency between guest and + * hypervisor state, where the guest believes + * it has an active registration whereas the + * hypervisor doesn't. One case where this may + * happen is if a doorbell is unregistered + * following a hibernation at a time where the + * doorbell state hasn't been restored on the + * hypervisor side yet. Since the handle has + * now been removed in the guest, we just + * print a warning and return success. + */ + pr_devel("Unlink of doorbell (handle=0x%x:0x%x) unknown by hypervisor (error=%d)\n", + handle.context, handle.resource, result); + } + } + + /* + * Now remove the resource from the table. It might still be in use + * after this, in a callback or still on the delayed work queue. + */ + vmci_resource_put(&entry->resource); + vmci_resource_remove(&entry->resource); + + kfree(entry); + + return VMCI_SUCCESS; +} +EXPORT_SYMBOL_GPL(vmci_doorbell_destroy); + +/* + * vmci_doorbell_notify() - Ring the doorbell (and hide in the bushes). + * @dst: The handlle identifying the doorbell resource + * @priv_flags: Priviledge flags. + * + * Generates a notification on the doorbell identified by the + * handle. For host side generation of notifications, the caller + * can specify what the privilege of the calling side is. + */ +int vmci_doorbell_notify(struct vmci_handle dst, u32 priv_flags) +{ + int retval; + enum vmci_route route; + struct vmci_handle src; + + if (vmci_handle_is_invalid(dst) || + (priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS)) + return VMCI_ERROR_INVALID_ARGS; + + src = VMCI_INVALID_HANDLE; + retval = vmci_route(&src, &dst, false, &route); + if (retval < VMCI_SUCCESS) + return retval; + + if (VMCI_ROUTE_AS_HOST == route) + return vmci_ctx_notify_dbell(VMCI_HOST_CONTEXT_ID, + dst, priv_flags); + + if (VMCI_ROUTE_AS_GUEST == route) + return dbell_notify_as_guest(dst, priv_flags); + + pr_warn("Unknown route (%d) for doorbell\n", route); + return VMCI_ERROR_DST_UNREACHABLE; +} +EXPORT_SYMBOL_GPL(vmci_doorbell_notify); diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.h b/drivers/misc/vmw_vmci/vmci_doorbell.h new file mode 100644 index 000000000..1dfb388d0 --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_doorbell.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + */ + +#ifndef VMCI_DOORBELL_H +#define VMCI_DOORBELL_H + +#include <linux/vmw_vmci_defs.h> +#include <linux/types.h> + +#include "vmci_driver.h" + +/* + * VMCINotifyResourceInfo: Used to create and destroy doorbells, and + * generate a notification for a doorbell or queue pair. + */ +struct vmci_dbell_notify_resource_info { + struct vmci_handle handle; + u16 resource; + u16 action; + s32 result; +}; + +/* + * Structure used for checkpointing the doorbell mappings. It is + * written to the checkpoint as is, so changing this structure will + * break checkpoint compatibility. + */ +struct dbell_cpt_state { + struct vmci_handle handle; + u64 bitmap_idx; +}; + +int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle); +int vmci_dbell_get_priv_flags(struct vmci_handle handle, u32 *priv_flags); + +bool vmci_dbell_register_notification_bitmap(u64 bitmap_ppn); +void vmci_dbell_scan_notification_entries(u8 *bitmap); + +#endif /* VMCI_DOORBELL_H */ diff --git a/drivers/misc/vmw_vmci/vmci_driver.c b/drivers/misc/vmw_vmci/vmci_driver.c new file mode 100644 index 000000000..cbb706dab --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_driver.c @@ -0,0 +1,176 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + */ + +#include <linux/vmw_vmci_defs.h> +#include <linux/vmw_vmci_api.h> +#include <linux/atomic.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> + +#include "vmci_driver.h" +#include "vmci_event.h" + +static bool vmci_disable_host; +module_param_named(disable_host, vmci_disable_host, bool, 0); +MODULE_PARM_DESC(disable_host, + "Disable driver host personality (default=enabled)"); + +static bool vmci_disable_guest; +module_param_named(disable_guest, vmci_disable_guest, bool, 0); +MODULE_PARM_DESC(disable_guest, + "Disable driver guest personality (default=enabled)"); + +static bool vmci_guest_personality_initialized; +static bool vmci_host_personality_initialized; + +static DEFINE_MUTEX(vmci_vsock_mutex); /* protects vmci_vsock_transport_cb */ +static vmci_vsock_cb vmci_vsock_transport_cb; +static bool vmci_vsock_cb_host_called; + +/* + * vmci_get_context_id() - Gets the current context ID. + * + * Returns the current context ID. Note that since this is accessed only + * from code running in the host, this always returns the host context ID. + */ +u32 vmci_get_context_id(void) +{ + if (vmci_guest_code_active()) + return vmci_get_vm_context_id(); + else if (vmci_host_code_active()) + return VMCI_HOST_CONTEXT_ID; + + return VMCI_INVALID_ID; +} +EXPORT_SYMBOL_GPL(vmci_get_context_id); + +/* + * vmci_register_vsock_callback() - Register the VSOCK vmci_transport callback. + * + * The callback will be called when the first host or guest becomes active, + * or if they are already active when this function is called. + * To unregister the callback, call this function with NULL parameter. + * + * Returns 0 on success. -EBUSY if a callback is already registered. + */ +int vmci_register_vsock_callback(vmci_vsock_cb callback) +{ + int err = 0; + + mutex_lock(&vmci_vsock_mutex); + + if (vmci_vsock_transport_cb && callback) { + err = -EBUSY; + goto out; + } + + vmci_vsock_transport_cb = callback; + + if (!vmci_vsock_transport_cb) { + vmci_vsock_cb_host_called = false; + goto out; + } + + if (vmci_guest_code_active()) + vmci_vsock_transport_cb(false); + + if (vmci_host_users() > 0) { + vmci_vsock_cb_host_called = true; + vmci_vsock_transport_cb(true); + } + +out: + mutex_unlock(&vmci_vsock_mutex); + return err; +} +EXPORT_SYMBOL_GPL(vmci_register_vsock_callback); + +void vmci_call_vsock_callback(bool is_host) +{ + mutex_lock(&vmci_vsock_mutex); + + if (!vmci_vsock_transport_cb) + goto out; + + /* In the host, this function could be called multiple times, + * but we want to register it only once. + */ + if (is_host) { + if (vmci_vsock_cb_host_called) + goto out; + + vmci_vsock_cb_host_called = true; + } + + vmci_vsock_transport_cb(is_host); +out: + mutex_unlock(&vmci_vsock_mutex); +} + +static int __init vmci_drv_init(void) +{ + int vmci_err; + int error; + + vmci_err = vmci_event_init(); + if (vmci_err < VMCI_SUCCESS) { + pr_err("Failed to initialize VMCIEvent (result=%d)\n", + vmci_err); + return -EINVAL; + } + + if (!vmci_disable_guest) { + error = vmci_guest_init(); + if (error) { + pr_warn("Failed to initialize guest personality (err=%d)\n", + error); + } else { + vmci_guest_personality_initialized = true; + pr_info("Guest personality initialized and is %s\n", + vmci_guest_code_active() ? + "active" : "inactive"); + } + } + + if (!vmci_disable_host) { + error = vmci_host_init(); + if (error) { + pr_warn("Unable to initialize host personality (err=%d)\n", + error); + } else { + vmci_host_personality_initialized = true; + pr_info("Initialized host personality\n"); + } + } + + if (!vmci_guest_personality_initialized && + !vmci_host_personality_initialized) { + vmci_event_exit(); + return -ENODEV; + } + + return 0; +} +module_init(vmci_drv_init); + +static void __exit vmci_drv_exit(void) +{ + if (vmci_guest_personality_initialized) + vmci_guest_exit(); + + if (vmci_host_personality_initialized) + vmci_host_exit(); + + vmci_event_exit(); +} +module_exit(vmci_drv_exit); + +MODULE_AUTHOR("VMware, Inc."); +MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface."); +MODULE_VERSION("1.1.6.0-k"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/vmw_vmci/vmci_driver.h b/drivers/misc/vmw_vmci/vmci_driver.h new file mode 100644 index 000000000..990682480 --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_driver.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + */ + +#ifndef _VMCI_DRIVER_H_ +#define _VMCI_DRIVER_H_ + +#include <linux/vmw_vmci_defs.h> +#include <linux/wait.h> + +#include "vmci_queue_pair.h" +#include "vmci_context.h" + +enum vmci_obj_type { + VMCIOBJ_VMX_VM = 10, + VMCIOBJ_CONTEXT, + VMCIOBJ_SOCKET, + VMCIOBJ_NOT_SET, +}; + +/* For storing VMCI structures in file handles. */ +struct vmci_obj { + void *ptr; + enum vmci_obj_type type; +}; + +/* + * Needed by other components of this module. It's okay to have one global + * instance of this because there can only ever be one VMCI device. Our + * virtual hardware enforces this. + */ +extern struct pci_dev *vmci_pdev; + +u32 vmci_get_context_id(void); +int vmci_send_datagram(struct vmci_datagram *dg); +void vmci_call_vsock_callback(bool is_host); + +int vmci_host_init(void); +void vmci_host_exit(void); +bool vmci_host_code_active(void); +int vmci_host_users(void); + +int vmci_guest_init(void); +void vmci_guest_exit(void); +bool vmci_guest_code_active(void); +u32 vmci_get_vm_context_id(void); + +bool vmci_use_ppn64(void); + +#endif /* _VMCI_DRIVER_H_ */ diff --git a/drivers/misc/vmw_vmci/vmci_event.c b/drivers/misc/vmw_vmci/vmci_event.c new file mode 100644 index 000000000..e3436abf3 --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_event.c @@ -0,0 +1,217 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + */ + +#include <linux/vmw_vmci_defs.h> +#include <linux/vmw_vmci_api.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/rculist.h> + +#include "vmci_driver.h" +#include "vmci_event.h" + +#define EVENT_MAGIC 0xEABE0000 +#define VMCI_EVENT_MAX_ATTEMPTS 10 + +struct vmci_subscription { + u32 id; + u32 event; + vmci_event_cb callback; + void *callback_data; + struct list_head node; /* on one of subscriber lists */ +}; + +static struct list_head subscriber_array[VMCI_EVENT_MAX]; +static DEFINE_MUTEX(subscriber_mutex); + +int __init vmci_event_init(void) +{ + int i; + + for (i = 0; i < VMCI_EVENT_MAX; i++) + INIT_LIST_HEAD(&subscriber_array[i]); + + return VMCI_SUCCESS; +} + +void vmci_event_exit(void) +{ + int e; + + /* We free all memory at exit. */ + for (e = 0; e < VMCI_EVENT_MAX; e++) { + struct vmci_subscription *cur, *p2; + list_for_each_entry_safe(cur, p2, &subscriber_array[e], node) { + + /* + * We should never get here because all events + * should have been unregistered before we try + * to unload the driver module. + */ + pr_warn("Unexpected free events occurring\n"); + list_del(&cur->node); + kfree(cur); + } + } +} + +/* + * Find entry. Assumes subscriber_mutex is held. + */ +static struct vmci_subscription *event_find(u32 sub_id) +{ + int e; + + for (e = 0; e < VMCI_EVENT_MAX; e++) { + struct vmci_subscription *cur; + list_for_each_entry(cur, &subscriber_array[e], node) { + if (cur->id == sub_id) + return cur; + } + } + return NULL; +} + +/* + * Actually delivers the events to the subscribers. + * The callback function for each subscriber is invoked. + */ +static void event_deliver(struct vmci_event_msg *event_msg) +{ + struct vmci_subscription *cur; + struct list_head *subscriber_list; + + rcu_read_lock(); + subscriber_list = &subscriber_array[event_msg->event_data.event]; + list_for_each_entry_rcu(cur, subscriber_list, node) { + cur->callback(cur->id, &event_msg->event_data, + cur->callback_data); + } + rcu_read_unlock(); +} + +/* + * Dispatcher for the VMCI_EVENT_RECEIVE datagrams. Calls all + * subscribers for given event. + */ +int vmci_event_dispatch(struct vmci_datagram *msg) +{ + struct vmci_event_msg *event_msg = (struct vmci_event_msg *)msg; + + if (msg->payload_size < sizeof(u32) || + msg->payload_size > sizeof(struct vmci_event_data_max)) + return VMCI_ERROR_INVALID_ARGS; + + if (!VMCI_EVENT_VALID(event_msg->event_data.event)) + return VMCI_ERROR_EVENT_UNKNOWN; + + event_deliver(event_msg); + return VMCI_SUCCESS; +} + +/* + * vmci_event_subscribe() - Subscribe to a given event. + * @event: The event to subscribe to. + * @callback: The callback to invoke upon the event. + * @callback_data: Data to pass to the callback. + * @subscription_id: ID used to track subscription. Used with + * vmci_event_unsubscribe() + * + * Subscribes to the provided event. The callback specified will be + * fired from RCU critical section and therefore must not sleep. + */ +int vmci_event_subscribe(u32 event, + vmci_event_cb callback, + void *callback_data, + u32 *new_subscription_id) +{ + struct vmci_subscription *sub; + int attempts; + int retval; + bool have_new_id = false; + + if (!new_subscription_id) { + pr_devel("%s: Invalid subscription (NULL)\n", __func__); + return VMCI_ERROR_INVALID_ARGS; + } + + if (!VMCI_EVENT_VALID(event) || !callback) { + pr_devel("%s: Failed to subscribe to event (type=%d) (callback=%p) (data=%p)\n", + __func__, event, callback, callback_data); + return VMCI_ERROR_INVALID_ARGS; + } + + sub = kzalloc(sizeof(*sub), GFP_KERNEL); + if (!sub) + return VMCI_ERROR_NO_MEM; + + sub->id = VMCI_EVENT_MAX; + sub->event = event; + sub->callback = callback; + sub->callback_data = callback_data; + INIT_LIST_HEAD(&sub->node); + + mutex_lock(&subscriber_mutex); + + /* Creation of a new event is always allowed. */ + for (attempts = 0; attempts < VMCI_EVENT_MAX_ATTEMPTS; attempts++) { + static u32 subscription_id; + /* + * We try to get an id a couple of time before + * claiming we are out of resources. + */ + + /* Test for duplicate id. */ + if (!event_find(++subscription_id)) { + sub->id = subscription_id; + have_new_id = true; + break; + } + } + + if (have_new_id) { + list_add_rcu(&sub->node, &subscriber_array[event]); + retval = VMCI_SUCCESS; + } else { + retval = VMCI_ERROR_NO_RESOURCES; + } + + mutex_unlock(&subscriber_mutex); + + *new_subscription_id = sub->id; + return retval; +} +EXPORT_SYMBOL_GPL(vmci_event_subscribe); + +/* + * vmci_event_unsubscribe() - unsubscribe from an event. + * @sub_id: A subscription ID as provided by vmci_event_subscribe() + * + * Unsubscribe from given event. Removes it from list and frees it. + * Will return callback_data if requested by caller. + */ +int vmci_event_unsubscribe(u32 sub_id) +{ + struct vmci_subscription *s; + + mutex_lock(&subscriber_mutex); + s = event_find(sub_id); + if (s) + list_del_rcu(&s->node); + mutex_unlock(&subscriber_mutex); + + if (!s) + return VMCI_ERROR_NOT_FOUND; + + synchronize_rcu(); + kfree(s); + + return VMCI_SUCCESS; +} +EXPORT_SYMBOL_GPL(vmci_event_unsubscribe); diff --git a/drivers/misc/vmw_vmci/vmci_event.h b/drivers/misc/vmw_vmci/vmci_event.h new file mode 100644 index 000000000..89cd0117d --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_event.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + */ + +#ifndef __VMCI_EVENT_H__ +#define __VMCI_EVENT_H__ + +#include <linux/vmw_vmci_api.h> + +int vmci_event_init(void); +void vmci_event_exit(void); +int vmci_event_dispatch(struct vmci_datagram *msg); + +#endif /*__VMCI_EVENT_H__ */ diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c new file mode 100644 index 000000000..1018dc772 --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_guest.c @@ -0,0 +1,755 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + */ + +#include <linux/vmw_vmci_defs.h> +#include <linux/vmw_vmci_api.h> +#include <linux/moduleparam.h> +#include <linux/interrupt.h> +#include <linux/highmem.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/smp.h> +#include <linux/io.h> +#include <linux/vmalloc.h> + +#include "vmci_datagram.h" +#include "vmci_doorbell.h" +#include "vmci_context.h" +#include "vmci_driver.h" +#include "vmci_event.h" + +#define PCI_DEVICE_ID_VMWARE_VMCI 0x0740 + +#define VMCI_UTIL_NUM_RESOURCES 1 + +static bool vmci_disable_msi; +module_param_named(disable_msi, vmci_disable_msi, bool, 0); +MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)"); + +static bool vmci_disable_msix; +module_param_named(disable_msix, vmci_disable_msix, bool, 0); +MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)"); + +static u32 ctx_update_sub_id = VMCI_INVALID_ID; +static u32 vm_context_id = VMCI_INVALID_ID; + +struct vmci_guest_device { + struct device *dev; /* PCI device we are attached to */ + void __iomem *iobase; + + bool exclusive_vectors; + + struct tasklet_struct datagram_tasklet; + struct tasklet_struct bm_tasklet; + + void *data_buffer; + void *notification_bitmap; + dma_addr_t notification_base; +}; + +static bool use_ppn64; + +bool vmci_use_ppn64(void) +{ + return use_ppn64; +} + +/* vmci_dev singleton device and supporting data*/ +struct pci_dev *vmci_pdev; +static struct vmci_guest_device *vmci_dev_g; +static DEFINE_SPINLOCK(vmci_dev_spinlock); + +static atomic_t vmci_num_guest_devices = ATOMIC_INIT(0); + +bool vmci_guest_code_active(void) +{ + return atomic_read(&vmci_num_guest_devices) != 0; +} + +u32 vmci_get_vm_context_id(void) +{ + if (vm_context_id == VMCI_INVALID_ID) { + struct vmci_datagram get_cid_msg; + get_cid_msg.dst = + vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, + VMCI_GET_CONTEXT_ID); + get_cid_msg.src = VMCI_ANON_SRC_HANDLE; + get_cid_msg.payload_size = 0; + vm_context_id = vmci_send_datagram(&get_cid_msg); + } + return vm_context_id; +} + +/* + * VM to hypervisor call mechanism. We use the standard VMware naming + * convention since shared code is calling this function as well. + */ +int vmci_send_datagram(struct vmci_datagram *dg) +{ + unsigned long flags; + int result; + + /* Check args. */ + if (dg == NULL) + return VMCI_ERROR_INVALID_ARGS; + + /* + * Need to acquire spinlock on the device because the datagram + * data may be spread over multiple pages and the monitor may + * interleave device user rpc calls from multiple + * VCPUs. Acquiring the spinlock precludes that + * possibility. Disabling interrupts to avoid incoming + * datagrams during a "rep out" and possibly landing up in + * this function. + */ + spin_lock_irqsave(&vmci_dev_spinlock, flags); + + if (vmci_dev_g) { + iowrite8_rep(vmci_dev_g->iobase + VMCI_DATA_OUT_ADDR, + dg, VMCI_DG_SIZE(dg)); + result = ioread32(vmci_dev_g->iobase + VMCI_RESULT_LOW_ADDR); + } else { + result = VMCI_ERROR_UNAVAILABLE; + } + + spin_unlock_irqrestore(&vmci_dev_spinlock, flags); + + return result; +} +EXPORT_SYMBOL_GPL(vmci_send_datagram); + +/* + * Gets called with the new context id if updated or resumed. + * Context id. + */ +static void vmci_guest_cid_update(u32 sub_id, + const struct vmci_event_data *event_data, + void *client_data) +{ + const struct vmci_event_payld_ctx *ev_payload = + vmci_event_data_const_payload(event_data); + + if (sub_id != ctx_update_sub_id) { + pr_devel("Invalid subscriber (ID=0x%x)\n", sub_id); + return; + } + + if (!event_data || ev_payload->context_id == VMCI_INVALID_ID) { + pr_devel("Invalid event data\n"); + return; + } + + pr_devel("Updating context from (ID=0x%x) to (ID=0x%x) on event (type=%d)\n", + vm_context_id, ev_payload->context_id, event_data->event); + + vm_context_id = ev_payload->context_id; +} + +/* + * Verify that the host supports the hypercalls we need. If it does not, + * try to find fallback hypercalls and use those instead. Returns + * true if required hypercalls (or fallback hypercalls) are + * supported by the host, false otherwise. + */ +static int vmci_check_host_caps(struct pci_dev *pdev) +{ + bool result; + struct vmci_resource_query_msg *msg; + u32 msg_size = sizeof(struct vmci_resource_query_hdr) + + VMCI_UTIL_NUM_RESOURCES * sizeof(u32); + struct vmci_datagram *check_msg; + + check_msg = kzalloc(msg_size, GFP_KERNEL); + if (!check_msg) { + dev_err(&pdev->dev, "%s: Insufficient memory\n", __func__); + return -ENOMEM; + } + + check_msg->dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, + VMCI_RESOURCES_QUERY); + check_msg->src = VMCI_ANON_SRC_HANDLE; + check_msg->payload_size = msg_size - VMCI_DG_HEADERSIZE; + msg = (struct vmci_resource_query_msg *)VMCI_DG_PAYLOAD(check_msg); + + msg->num_resources = VMCI_UTIL_NUM_RESOURCES; + msg->resources[0] = VMCI_GET_CONTEXT_ID; + + /* Checks that hyper calls are supported */ + result = vmci_send_datagram(check_msg) == 0x01; + kfree(check_msg); + + dev_dbg(&pdev->dev, "%s: Host capability check: %s\n", + __func__, result ? "PASSED" : "FAILED"); + + /* We need the vector. There are no fallbacks. */ + return result ? 0 : -ENXIO; +} + +/* + * Reads datagrams from the data in port and dispatches them. We + * always start reading datagrams into only the first page of the + * datagram buffer. If the datagrams don't fit into one page, we + * use the maximum datagram buffer size for the remainder of the + * invocation. This is a simple heuristic for not penalizing + * small datagrams. + * + * This function assumes that it has exclusive access to the data + * in port for the duration of the call. + */ +static void vmci_dispatch_dgs(unsigned long data) +{ + struct vmci_guest_device *vmci_dev = (struct vmci_guest_device *)data; + u8 *dg_in_buffer = vmci_dev->data_buffer; + struct vmci_datagram *dg; + size_t dg_in_buffer_size = VMCI_MAX_DG_SIZE; + size_t current_dg_in_buffer_size = PAGE_SIZE; + size_t remaining_bytes; + + BUILD_BUG_ON(VMCI_MAX_DG_SIZE < PAGE_SIZE); + + ioread8_rep(vmci_dev->iobase + VMCI_DATA_IN_ADDR, + vmci_dev->data_buffer, current_dg_in_buffer_size); + dg = (struct vmci_datagram *)dg_in_buffer; + remaining_bytes = current_dg_in_buffer_size; + + while (dg->dst.resource != VMCI_INVALID_ID || + remaining_bytes > PAGE_SIZE) { + unsigned dg_in_size; + + /* + * When the input buffer spans multiple pages, a datagram can + * start on any page boundary in the buffer. + */ + if (dg->dst.resource == VMCI_INVALID_ID) { + dg = (struct vmci_datagram *)roundup( + (uintptr_t)dg + 1, PAGE_SIZE); + remaining_bytes = + (size_t)(dg_in_buffer + + current_dg_in_buffer_size - + (u8 *)dg); + continue; + } + + dg_in_size = VMCI_DG_SIZE_ALIGNED(dg); + + if (dg_in_size <= dg_in_buffer_size) { + int result; + + /* + * If the remaining bytes in the datagram + * buffer doesn't contain the complete + * datagram, we first make sure we have enough + * room for it and then we read the reminder + * of the datagram and possibly any following + * datagrams. + */ + if (dg_in_size > remaining_bytes) { + if (remaining_bytes != + current_dg_in_buffer_size) { + + /* + * We move the partial + * datagram to the front and + * read the reminder of the + * datagram and possibly + * following calls into the + * following bytes. + */ + memmove(dg_in_buffer, dg_in_buffer + + current_dg_in_buffer_size - + remaining_bytes, + remaining_bytes); + dg = (struct vmci_datagram *) + dg_in_buffer; + } + + if (current_dg_in_buffer_size != + dg_in_buffer_size) + current_dg_in_buffer_size = + dg_in_buffer_size; + + ioread8_rep(vmci_dev->iobase + + VMCI_DATA_IN_ADDR, + vmci_dev->data_buffer + + remaining_bytes, + current_dg_in_buffer_size - + remaining_bytes); + } + + /* + * We special case event datagrams from the + * hypervisor. + */ + if (dg->src.context == VMCI_HYPERVISOR_CONTEXT_ID && + dg->dst.resource == VMCI_EVENT_HANDLER) { + result = vmci_event_dispatch(dg); + } else { + result = vmci_datagram_invoke_guest_handler(dg); + } + if (result < VMCI_SUCCESS) + dev_dbg(vmci_dev->dev, + "Datagram with resource (ID=0x%x) failed (err=%d)\n", + dg->dst.resource, result); + + /* On to the next datagram. */ + dg = (struct vmci_datagram *)((u8 *)dg + + dg_in_size); + } else { + size_t bytes_to_skip; + + /* + * Datagram doesn't fit in datagram buffer of maximal + * size. We drop it. + */ + dev_dbg(vmci_dev->dev, + "Failed to receive datagram (size=%u bytes)\n", + dg_in_size); + + bytes_to_skip = dg_in_size - remaining_bytes; + if (current_dg_in_buffer_size != dg_in_buffer_size) + current_dg_in_buffer_size = dg_in_buffer_size; + + for (;;) { + ioread8_rep(vmci_dev->iobase + + VMCI_DATA_IN_ADDR, + vmci_dev->data_buffer, + current_dg_in_buffer_size); + if (bytes_to_skip <= current_dg_in_buffer_size) + break; + + bytes_to_skip -= current_dg_in_buffer_size; + } + dg = (struct vmci_datagram *)(dg_in_buffer + + bytes_to_skip); + } + + remaining_bytes = + (size_t) (dg_in_buffer + current_dg_in_buffer_size - + (u8 *)dg); + + if (remaining_bytes < VMCI_DG_HEADERSIZE) { + /* Get the next batch of datagrams. */ + + ioread8_rep(vmci_dev->iobase + VMCI_DATA_IN_ADDR, + vmci_dev->data_buffer, + current_dg_in_buffer_size); + dg = (struct vmci_datagram *)dg_in_buffer; + remaining_bytes = current_dg_in_buffer_size; + } + } +} + +/* + * Scans the notification bitmap for raised flags, clears them + * and handles the notifications. + */ +static void vmci_process_bitmap(unsigned long data) +{ + struct vmci_guest_device *dev = (struct vmci_guest_device *)data; + + if (!dev->notification_bitmap) { + dev_dbg(dev->dev, "No bitmap present in %s\n", __func__); + return; + } + + vmci_dbell_scan_notification_entries(dev->notification_bitmap); +} + +/* + * Interrupt handler for legacy or MSI interrupt, or for first MSI-X + * interrupt (vector VMCI_INTR_DATAGRAM). + */ +static irqreturn_t vmci_interrupt(int irq, void *_dev) +{ + struct vmci_guest_device *dev = _dev; + + /* + * If we are using MSI-X with exclusive vectors then we simply schedule + * the datagram tasklet, since we know the interrupt was meant for us. + * Otherwise we must read the ICR to determine what to do. + */ + + if (dev->exclusive_vectors) { + tasklet_schedule(&dev->datagram_tasklet); + } else { + unsigned int icr; + + /* Acknowledge interrupt and determine what needs doing. */ + icr = ioread32(dev->iobase + VMCI_ICR_ADDR); + if (icr == 0 || icr == ~0) + return IRQ_NONE; + + if (icr & VMCI_ICR_DATAGRAM) { + tasklet_schedule(&dev->datagram_tasklet); + icr &= ~VMCI_ICR_DATAGRAM; + } + + if (icr & VMCI_ICR_NOTIFICATION) { + tasklet_schedule(&dev->bm_tasklet); + icr &= ~VMCI_ICR_NOTIFICATION; + } + + if (icr != 0) + dev_warn(dev->dev, + "Ignoring unknown interrupt cause (%d)\n", + icr); + } + + return IRQ_HANDLED; +} + +/* + * Interrupt handler for MSI-X interrupt vector VMCI_INTR_NOTIFICATION, + * which is for the notification bitmap. Will only get called if we are + * using MSI-X with exclusive vectors. + */ +static irqreturn_t vmci_interrupt_bm(int irq, void *_dev) +{ + struct vmci_guest_device *dev = _dev; + + /* For MSI-X we can just assume it was meant for us. */ + tasklet_schedule(&dev->bm_tasklet); + + return IRQ_HANDLED; +} + +/* + * Most of the initialization at module load time is done here. + */ +static int vmci_guest_probe_device(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct vmci_guest_device *vmci_dev; + void __iomem *iobase; + unsigned int capabilities; + unsigned int caps_in_use; + unsigned long cmd; + int vmci_err; + int error; + + dev_dbg(&pdev->dev, "Probing for vmci/PCI guest device\n"); + + error = pcim_enable_device(pdev); + if (error) { + dev_err(&pdev->dev, + "Failed to enable VMCI device: %d\n", error); + return error; + } + + error = pcim_iomap_regions(pdev, 1 << 0, KBUILD_MODNAME); + if (error) { + dev_err(&pdev->dev, "Failed to reserve/map IO regions\n"); + return error; + } + + iobase = pcim_iomap_table(pdev)[0]; + + dev_info(&pdev->dev, "Found VMCI PCI device at %#lx, irq %u\n", + (unsigned long)iobase, pdev->irq); + + vmci_dev = devm_kzalloc(&pdev->dev, sizeof(*vmci_dev), GFP_KERNEL); + if (!vmci_dev) { + dev_err(&pdev->dev, + "Can't allocate memory for VMCI device\n"); + return -ENOMEM; + } + + vmci_dev->dev = &pdev->dev; + vmci_dev->exclusive_vectors = false; + vmci_dev->iobase = iobase; + + tasklet_init(&vmci_dev->datagram_tasklet, + vmci_dispatch_dgs, (unsigned long)vmci_dev); + tasklet_init(&vmci_dev->bm_tasklet, + vmci_process_bitmap, (unsigned long)vmci_dev); + + vmci_dev->data_buffer = vmalloc(VMCI_MAX_DG_SIZE); + if (!vmci_dev->data_buffer) { + dev_err(&pdev->dev, + "Can't allocate memory for datagram buffer\n"); + return -ENOMEM; + } + + pci_set_master(pdev); /* To enable queue_pair functionality. */ + + /* + * Verify that the VMCI Device supports the capabilities that + * we need. If the device is missing capabilities that we would + * like to use, check for fallback capabilities and use those + * instead (so we can run a new VM on old hosts). Fail the load if + * a required capability is missing and there is no fallback. + * + * Right now, we need datagrams. There are no fallbacks. + */ + capabilities = ioread32(vmci_dev->iobase + VMCI_CAPS_ADDR); + if (!(capabilities & VMCI_CAPS_DATAGRAM)) { + dev_err(&pdev->dev, "Device does not support datagrams\n"); + error = -ENXIO; + goto err_free_data_buffer; + } + caps_in_use = VMCI_CAPS_DATAGRAM; + + /* + * Use 64-bit PPNs if the device supports. + * + * There is no check for the return value of dma_set_mask_and_coherent + * since this driver can handle the default mask values if + * dma_set_mask_and_coherent fails. + */ + if (capabilities & VMCI_CAPS_PPN64) { + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + use_ppn64 = true; + caps_in_use |= VMCI_CAPS_PPN64; + } else { + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44)); + use_ppn64 = false; + } + + /* + * If the hardware supports notifications, we will use that as + * well. + */ + if (capabilities & VMCI_CAPS_NOTIFICATIONS) { + vmci_dev->notification_bitmap = dma_alloc_coherent( + &pdev->dev, PAGE_SIZE, &vmci_dev->notification_base, + GFP_KERNEL); + if (!vmci_dev->notification_bitmap) { + dev_warn(&pdev->dev, + "Unable to allocate notification bitmap\n"); + } else { + memset(vmci_dev->notification_bitmap, 0, PAGE_SIZE); + caps_in_use |= VMCI_CAPS_NOTIFICATIONS; + } + } + + dev_info(&pdev->dev, "Using capabilities 0x%x\n", caps_in_use); + + /* Let the host know which capabilities we intend to use. */ + iowrite32(caps_in_use, vmci_dev->iobase + VMCI_CAPS_ADDR); + + /* Set up global device so that we can start sending datagrams */ + spin_lock_irq(&vmci_dev_spinlock); + vmci_dev_g = vmci_dev; + vmci_pdev = pdev; + spin_unlock_irq(&vmci_dev_spinlock); + + /* + * Register notification bitmap with device if that capability is + * used. + */ + if (caps_in_use & VMCI_CAPS_NOTIFICATIONS) { + unsigned long bitmap_ppn = + vmci_dev->notification_base >> PAGE_SHIFT; + if (!vmci_dbell_register_notification_bitmap(bitmap_ppn)) { + dev_warn(&pdev->dev, + "VMCI device unable to register notification bitmap with PPN 0x%lx\n", + bitmap_ppn); + error = -ENXIO; + goto err_remove_vmci_dev_g; + } + } + + /* Check host capabilities. */ + error = vmci_check_host_caps(pdev); + if (error) + goto err_remove_bitmap; + + /* Enable device. */ + + /* + * We subscribe to the VMCI_EVENT_CTX_ID_UPDATE here so we can + * update the internal context id when needed. + */ + vmci_err = vmci_event_subscribe(VMCI_EVENT_CTX_ID_UPDATE, + vmci_guest_cid_update, NULL, + &ctx_update_sub_id); + if (vmci_err < VMCI_SUCCESS) + dev_warn(&pdev->dev, + "Failed to subscribe to event (type=%d): %d\n", + VMCI_EVENT_CTX_ID_UPDATE, vmci_err); + + /* + * Enable interrupts. Try MSI-X first, then MSI, and then fallback on + * legacy interrupts. + */ + error = pci_alloc_irq_vectors(pdev, VMCI_MAX_INTRS, VMCI_MAX_INTRS, + PCI_IRQ_MSIX); + if (error < 0) { + error = pci_alloc_irq_vectors(pdev, 1, 1, + PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_LEGACY); + if (error < 0) + goto err_remove_bitmap; + } else { + vmci_dev->exclusive_vectors = true; + } + + /* + * Request IRQ for legacy or MSI interrupts, or for first + * MSI-X vector. + */ + error = request_irq(pci_irq_vector(pdev, 0), vmci_interrupt, + IRQF_SHARED, KBUILD_MODNAME, vmci_dev); + if (error) { + dev_err(&pdev->dev, "Irq %u in use: %d\n", + pci_irq_vector(pdev, 0), error); + goto err_disable_msi; + } + + /* + * For MSI-X with exclusive vectors we need to request an + * interrupt for each vector so that we get a separate + * interrupt handler routine. This allows us to distinguish + * between the vectors. + */ + if (vmci_dev->exclusive_vectors) { + error = request_irq(pci_irq_vector(pdev, 1), + vmci_interrupt_bm, 0, KBUILD_MODNAME, + vmci_dev); + if (error) { + dev_err(&pdev->dev, + "Failed to allocate irq %u: %d\n", + pci_irq_vector(pdev, 1), error); + goto err_free_irq; + } + } + + dev_dbg(&pdev->dev, "Registered device\n"); + + atomic_inc(&vmci_num_guest_devices); + + /* Enable specific interrupt bits. */ + cmd = VMCI_IMR_DATAGRAM; + if (caps_in_use & VMCI_CAPS_NOTIFICATIONS) + cmd |= VMCI_IMR_NOTIFICATION; + iowrite32(cmd, vmci_dev->iobase + VMCI_IMR_ADDR); + + /* Enable interrupts. */ + iowrite32(VMCI_CONTROL_INT_ENABLE, + vmci_dev->iobase + VMCI_CONTROL_ADDR); + + pci_set_drvdata(pdev, vmci_dev); + + vmci_call_vsock_callback(false); + return 0; + +err_free_irq: + free_irq(pci_irq_vector(pdev, 0), vmci_dev); + tasklet_kill(&vmci_dev->datagram_tasklet); + tasklet_kill(&vmci_dev->bm_tasklet); + +err_disable_msi: + pci_free_irq_vectors(pdev); + + vmci_err = vmci_event_unsubscribe(ctx_update_sub_id); + if (vmci_err < VMCI_SUCCESS) + dev_warn(&pdev->dev, + "Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n", + VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id, vmci_err); + +err_remove_bitmap: + if (vmci_dev->notification_bitmap) { + iowrite32(VMCI_CONTROL_RESET, + vmci_dev->iobase + VMCI_CONTROL_ADDR); + dma_free_coherent(&pdev->dev, PAGE_SIZE, + vmci_dev->notification_bitmap, + vmci_dev->notification_base); + } + +err_remove_vmci_dev_g: + spin_lock_irq(&vmci_dev_spinlock); + vmci_pdev = NULL; + vmci_dev_g = NULL; + spin_unlock_irq(&vmci_dev_spinlock); + +err_free_data_buffer: + vfree(vmci_dev->data_buffer); + + /* The rest are managed resources and will be freed by PCI core */ + return error; +} + +static void vmci_guest_remove_device(struct pci_dev *pdev) +{ + struct vmci_guest_device *vmci_dev = pci_get_drvdata(pdev); + int vmci_err; + + dev_dbg(&pdev->dev, "Removing device\n"); + + atomic_dec(&vmci_num_guest_devices); + + vmci_qp_guest_endpoints_exit(); + + vmci_err = vmci_event_unsubscribe(ctx_update_sub_id); + if (vmci_err < VMCI_SUCCESS) + dev_warn(&pdev->dev, + "Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n", + VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id, vmci_err); + + spin_lock_irq(&vmci_dev_spinlock); + vmci_dev_g = NULL; + vmci_pdev = NULL; + spin_unlock_irq(&vmci_dev_spinlock); + + dev_dbg(&pdev->dev, "Resetting vmci device\n"); + iowrite32(VMCI_CONTROL_RESET, vmci_dev->iobase + VMCI_CONTROL_ADDR); + + /* + * Free IRQ and then disable MSI/MSI-X as appropriate. For + * MSI-X, we might have multiple vectors, each with their own + * IRQ, which we must free too. + */ + if (vmci_dev->exclusive_vectors) + free_irq(pci_irq_vector(pdev, 1), vmci_dev); + free_irq(pci_irq_vector(pdev, 0), vmci_dev); + pci_free_irq_vectors(pdev); + + tasklet_kill(&vmci_dev->datagram_tasklet); + tasklet_kill(&vmci_dev->bm_tasklet); + + if (vmci_dev->notification_bitmap) { + /* + * The device reset above cleared the bitmap state of the + * device, so we can safely free it here. + */ + + dma_free_coherent(&pdev->dev, PAGE_SIZE, + vmci_dev->notification_bitmap, + vmci_dev->notification_base); + } + + vfree(vmci_dev->data_buffer); + + /* The rest are managed resources and will be freed by PCI core */ +} + +static const struct pci_device_id vmci_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, PCI_DEVICE_ID_VMWARE_VMCI), }, + { 0 }, +}; +MODULE_DEVICE_TABLE(pci, vmci_ids); + +static struct pci_driver vmci_guest_driver = { + .name = KBUILD_MODNAME, + .id_table = vmci_ids, + .probe = vmci_guest_probe_device, + .remove = vmci_guest_remove_device, +}; + +int __init vmci_guest_init(void) +{ + return pci_register_driver(&vmci_guest_driver); +} + +void __exit vmci_guest_exit(void) +{ + pci_unregister_driver(&vmci_guest_driver); +} diff --git a/drivers/misc/vmw_vmci/vmci_handle_array.c b/drivers/misc/vmw_vmci/vmci_handle_array.c new file mode 100644 index 000000000..de7fee7ea --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_handle_array.c @@ -0,0 +1,146 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + */ + +#include <linux/slab.h> +#include "vmci_handle_array.h" + +static size_t handle_arr_calc_size(u32 capacity) +{ + return VMCI_HANDLE_ARRAY_HEADER_SIZE + + capacity * sizeof(struct vmci_handle); +} + +struct vmci_handle_arr *vmci_handle_arr_create(u32 capacity, u32 max_capacity) +{ + struct vmci_handle_arr *array; + + if (max_capacity == 0 || capacity > max_capacity) + return NULL; + + if (capacity == 0) + capacity = min((u32)VMCI_HANDLE_ARRAY_DEFAULT_CAPACITY, + max_capacity); + + array = kmalloc(handle_arr_calc_size(capacity), GFP_ATOMIC); + if (!array) + return NULL; + + array->capacity = capacity; + array->max_capacity = max_capacity; + array->size = 0; + + return array; +} + +void vmci_handle_arr_destroy(struct vmci_handle_arr *array) +{ + kfree(array); +} + +int vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr, + struct vmci_handle handle) +{ + struct vmci_handle_arr *array = *array_ptr; + + if (unlikely(array->size >= array->capacity)) { + /* reallocate. */ + struct vmci_handle_arr *new_array; + u32 capacity_bump = min(array->max_capacity - array->capacity, + array->capacity); + size_t new_size = handle_arr_calc_size(array->capacity + + capacity_bump); + + if (array->size >= array->max_capacity) + return VMCI_ERROR_NO_MEM; + + new_array = krealloc(array, new_size, GFP_ATOMIC); + if (!new_array) + return VMCI_ERROR_NO_MEM; + + new_array->capacity += capacity_bump; + *array_ptr = array = new_array; + } + + array->entries[array->size] = handle; + array->size++; + + return VMCI_SUCCESS; +} + +/* + * Handle that was removed, VMCI_INVALID_HANDLE if entry not found. + */ +struct vmci_handle vmci_handle_arr_remove_entry(struct vmci_handle_arr *array, + struct vmci_handle entry_handle) +{ + struct vmci_handle handle = VMCI_INVALID_HANDLE; + u32 i; + + for (i = 0; i < array->size; i++) { + if (vmci_handle_is_equal(array->entries[i], entry_handle)) { + handle = array->entries[i]; + array->size--; + array->entries[i] = array->entries[array->size]; + array->entries[array->size] = VMCI_INVALID_HANDLE; + break; + } + } + + return handle; +} + +/* + * Handle that was removed, VMCI_INVALID_HANDLE if array was empty. + */ +struct vmci_handle vmci_handle_arr_remove_tail(struct vmci_handle_arr *array) +{ + struct vmci_handle handle = VMCI_INVALID_HANDLE; + + if (array->size) { + array->size--; + handle = array->entries[array->size]; + array->entries[array->size] = VMCI_INVALID_HANDLE; + } + + return handle; +} + +/* + * Handle at given index, VMCI_INVALID_HANDLE if invalid index. + */ +struct vmci_handle +vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, u32 index) +{ + if (unlikely(index >= array->size)) + return VMCI_INVALID_HANDLE; + + return array->entries[index]; +} + +bool vmci_handle_arr_has_entry(const struct vmci_handle_arr *array, + struct vmci_handle entry_handle) +{ + u32 i; + + for (i = 0; i < array->size; i++) + if (vmci_handle_is_equal(array->entries[i], entry_handle)) + return true; + + return false; +} + +/* + * NULL if the array is empty. Otherwise, a pointer to the array + * of VMCI handles in the handle array. + */ +struct vmci_handle *vmci_handle_arr_get_handles(struct vmci_handle_arr *array) +{ + if (array->size) + return array->entries; + + return NULL; +} diff --git a/drivers/misc/vmw_vmci/vmci_handle_array.h b/drivers/misc/vmw_vmci/vmci_handle_array.h new file mode 100644 index 000000000..96193f85b --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_handle_array.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + */ + +#ifndef _VMCI_HANDLE_ARRAY_H_ +#define _VMCI_HANDLE_ARRAY_H_ + +#include <linux/vmw_vmci_defs.h> +#include <linux/limits.h> +#include <linux/types.h> + +struct vmci_handle_arr { + u32 capacity; + u32 max_capacity; + u32 size; + u32 pad; + struct vmci_handle entries[]; +}; + +#define VMCI_HANDLE_ARRAY_HEADER_SIZE \ + offsetof(struct vmci_handle_arr, entries) +/* Select a default capacity that results in a 64 byte sized array */ +#define VMCI_HANDLE_ARRAY_DEFAULT_CAPACITY 6 +/* Make sure that the max array size can be expressed by a u32 */ +#define VMCI_HANDLE_ARRAY_MAX_CAPACITY \ + ((U32_MAX - VMCI_HANDLE_ARRAY_HEADER_SIZE - 1) / \ + sizeof(struct vmci_handle)) + +struct vmci_handle_arr *vmci_handle_arr_create(u32 capacity, u32 max_capacity); +void vmci_handle_arr_destroy(struct vmci_handle_arr *array); +int vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr, + struct vmci_handle handle); +struct vmci_handle vmci_handle_arr_remove_entry(struct vmci_handle_arr *array, + struct vmci_handle + entry_handle); +struct vmci_handle vmci_handle_arr_remove_tail(struct vmci_handle_arr *array); +struct vmci_handle +vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, u32 index); +bool vmci_handle_arr_has_entry(const struct vmci_handle_arr *array, + struct vmci_handle entry_handle); +struct vmci_handle *vmci_handle_arr_get_handles(struct vmci_handle_arr *array); + +static inline u32 vmci_handle_arr_get_size( + const struct vmci_handle_arr *array) +{ + return array->size; +} + + +#endif /* _VMCI_HANDLE_ARRAY_H_ */ diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c new file mode 100644 index 000000000..4a903770b --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_host.c @@ -0,0 +1,1025 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + */ + +#include <linux/vmw_vmci_defs.h> +#include <linux/vmw_vmci_api.h> +#include <linux/miscdevice.h> +#include <linux/interrupt.h> +#include <linux/highmem.h> +#include <linux/atomic.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/sched.h> +#include <linux/cred.h> +#include <linux/slab.h> +#include <linux/file.h> +#include <linux/init.h> +#include <linux/poll.h> +#include <linux/pci.h> +#include <linux/smp.h> +#include <linux/fs.h> +#include <linux/io.h> + +#include "vmci_handle_array.h" +#include "vmci_queue_pair.h" +#include "vmci_datagram.h" +#include "vmci_doorbell.h" +#include "vmci_resource.h" +#include "vmci_context.h" +#include "vmci_driver.h" +#include "vmci_event.h" + +#define VMCI_UTIL_NUM_RESOURCES 1 + +enum { + VMCI_NOTIFY_RESOURCE_QUEUE_PAIR = 0, + VMCI_NOTIFY_RESOURCE_DOOR_BELL = 1, +}; + +enum { + VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY = 0, + VMCI_NOTIFY_RESOURCE_ACTION_CREATE = 1, + VMCI_NOTIFY_RESOURCE_ACTION_DESTROY = 2, +}; + +/* + * VMCI driver initialization. This block can also be used to + * pass initial group membership etc. + */ +struct vmci_init_blk { + u32 cid; + u32 flags; +}; + +/* VMCIqueue_pairAllocInfo_VMToVM */ +struct vmci_qp_alloc_info_vmvm { + struct vmci_handle handle; + u32 peer; + u32 flags; + u64 produce_size; + u64 consume_size; + u64 produce_page_file; /* User VA. */ + u64 consume_page_file; /* User VA. */ + u64 produce_page_file_size; /* Size of the file name array. */ + u64 consume_page_file_size; /* Size of the file name array. */ + s32 result; + u32 _pad; +}; + +/* VMCISetNotifyInfo: Used to pass notify flag's address to the host driver. */ +struct vmci_set_notify_info { + u64 notify_uva; + s32 result; + u32 _pad; +}; + +/* + * Per-instance host state + */ +struct vmci_host_dev { + struct vmci_ctx *context; + int user_version; + enum vmci_obj_type ct_type; + struct mutex lock; /* Mutex lock for vmci context access */ +}; + +static struct vmci_ctx *host_context; +static bool vmci_host_device_initialized; +static atomic_t vmci_host_active_users = ATOMIC_INIT(0); + +/* + * Determines whether the VMCI host personality is + * available. Since the core functionality of the host driver is + * always present, all guests could possibly use the host + * personality. However, to minimize the deviation from the + * pre-unified driver state of affairs, we only consider the host + * device active if there is no active guest device or if there + * are VMX'en with active VMCI contexts using the host device. + */ +bool vmci_host_code_active(void) +{ + return vmci_host_device_initialized && + (!vmci_guest_code_active() || + atomic_read(&vmci_host_active_users) > 0); +} + +int vmci_host_users(void) +{ + return atomic_read(&vmci_host_active_users); +} + +/* + * Called on open of /dev/vmci. + */ +static int vmci_host_open(struct inode *inode, struct file *filp) +{ + struct vmci_host_dev *vmci_host_dev; + + vmci_host_dev = kzalloc(sizeof(struct vmci_host_dev), GFP_KERNEL); + if (vmci_host_dev == NULL) + return -ENOMEM; + + vmci_host_dev->ct_type = VMCIOBJ_NOT_SET; + mutex_init(&vmci_host_dev->lock); + filp->private_data = vmci_host_dev; + + return 0; +} + +/* + * Called on close of /dev/vmci, most often when the process + * exits. + */ +static int vmci_host_close(struct inode *inode, struct file *filp) +{ + struct vmci_host_dev *vmci_host_dev = filp->private_data; + + if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) { + vmci_ctx_destroy(vmci_host_dev->context); + vmci_host_dev->context = NULL; + + /* + * The number of active contexts is used to track whether any + * VMX'en are using the host personality. It is incremented when + * a context is created through the IOCTL_VMCI_INIT_CONTEXT + * ioctl. + */ + atomic_dec(&vmci_host_active_users); + } + vmci_host_dev->ct_type = VMCIOBJ_NOT_SET; + + kfree(vmci_host_dev); + filp->private_data = NULL; + return 0; +} + +/* + * This is used to wake up the VMX when a VMCI call arrives, or + * to wake up select() or poll() at the next clock tick. + */ +static __poll_t vmci_host_poll(struct file *filp, poll_table *wait) +{ + struct vmci_host_dev *vmci_host_dev = filp->private_data; + struct vmci_ctx *context; + __poll_t mask = 0; + + if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) { + /* + * Read context only if ct_type == VMCIOBJ_CONTEXT to make + * sure that context is initialized + */ + context = vmci_host_dev->context; + + /* Check for VMCI calls to this VM context. */ + if (wait) + poll_wait(filp, &context->host_context.wait_queue, + wait); + + spin_lock(&context->lock); + if (context->pending_datagrams > 0 || + vmci_handle_arr_get_size( + context->pending_doorbell_array) > 0) { + mask = EPOLLIN; + } + spin_unlock(&context->lock); + } + return mask; +} + +/* + * Copies the handles of a handle array into a user buffer, and + * returns the new length in userBufferSize. If the copy to the + * user buffer fails, the functions still returns VMCI_SUCCESS, + * but retval != 0. + */ +static int drv_cp_harray_to_user(void __user *user_buf_uva, + u64 *user_buf_size, + struct vmci_handle_arr *handle_array, + int *retval) +{ + u32 array_size = 0; + struct vmci_handle *handles; + + if (handle_array) + array_size = vmci_handle_arr_get_size(handle_array); + + if (array_size * sizeof(*handles) > *user_buf_size) + return VMCI_ERROR_MORE_DATA; + + *user_buf_size = array_size * sizeof(*handles); + if (*user_buf_size) + *retval = copy_to_user(user_buf_uva, + vmci_handle_arr_get_handles + (handle_array), *user_buf_size); + + return VMCI_SUCCESS; +} + +/* + * Sets up a given context for notify to work. Maps the notify + * boolean in user VA into kernel space. + */ +static int vmci_host_setup_notify(struct vmci_ctx *context, + unsigned long uva) +{ + int retval; + + if (context->notify_page) { + pr_devel("%s: Notify mechanism is already set up\n", __func__); + return VMCI_ERROR_DUPLICATE_ENTRY; + } + + /* + * We are using 'bool' internally, but let's make sure we explicit + * about the size. + */ + BUILD_BUG_ON(sizeof(bool) != sizeof(u8)); + + /* + * Lock physical page backing a given user VA. + */ + retval = get_user_pages_fast(uva, 1, FOLL_WRITE, &context->notify_page); + if (retval != 1) { + context->notify_page = NULL; + return VMCI_ERROR_GENERIC; + } + + /* + * Map the locked page and set up notify pointer. + */ + context->notify = kmap(context->notify_page) + (uva & (PAGE_SIZE - 1)); + vmci_ctx_check_signal_notify(context); + + return VMCI_SUCCESS; +} + +static int vmci_host_get_version(struct vmci_host_dev *vmci_host_dev, + unsigned int cmd, void __user *uptr) +{ + if (cmd == IOCTL_VMCI_VERSION2) { + int __user *vptr = uptr; + if (get_user(vmci_host_dev->user_version, vptr)) + return -EFAULT; + } + + /* + * The basic logic here is: + * + * If the user sends in a version of 0 tell it our version. + * If the user didn't send in a version, tell it our version. + * If the user sent in an old version, tell it -its- version. + * If the user sent in an newer version, tell it our version. + * + * The rationale behind telling the caller its version is that + * Workstation 6.5 required that VMX and VMCI kernel module were + * version sync'd. All new VMX users will be programmed to + * handle the VMCI kernel module version. + */ + + if (vmci_host_dev->user_version > 0 && + vmci_host_dev->user_version < VMCI_VERSION_HOSTQP) { + return vmci_host_dev->user_version; + } + + return VMCI_VERSION; +} + +#define vmci_ioctl_err(fmt, ...) \ + pr_devel("%s: " fmt, ioctl_name, ##__VA_ARGS__) + +static int vmci_host_do_init_context(struct vmci_host_dev *vmci_host_dev, + const char *ioctl_name, + void __user *uptr) +{ + struct vmci_init_blk init_block; + const struct cred *cred; + int retval; + + if (copy_from_user(&init_block, uptr, sizeof(init_block))) { + vmci_ioctl_err("error reading init block\n"); + return -EFAULT; + } + + mutex_lock(&vmci_host_dev->lock); + + if (vmci_host_dev->ct_type != VMCIOBJ_NOT_SET) { + vmci_ioctl_err("received VMCI init on initialized handle\n"); + retval = -EINVAL; + goto out; + } + + if (init_block.flags & ~VMCI_PRIVILEGE_FLAG_RESTRICTED) { + vmci_ioctl_err("unsupported VMCI restriction flag\n"); + retval = -EINVAL; + goto out; + } + + cred = get_current_cred(); + vmci_host_dev->context = vmci_ctx_create(init_block.cid, + init_block.flags, 0, + vmci_host_dev->user_version, + cred); + put_cred(cred); + if (IS_ERR(vmci_host_dev->context)) { + retval = PTR_ERR(vmci_host_dev->context); + vmci_ioctl_err("error initializing context\n"); + goto out; + } + + /* + * Copy cid to userlevel, we do this to allow the VMX + * to enforce its policy on cid generation. + */ + init_block.cid = vmci_ctx_get_id(vmci_host_dev->context); + if (copy_to_user(uptr, &init_block, sizeof(init_block))) { + vmci_ctx_destroy(vmci_host_dev->context); + vmci_host_dev->context = NULL; + vmci_ioctl_err("error writing init block\n"); + retval = -EFAULT; + goto out; + } + + vmci_host_dev->ct_type = VMCIOBJ_CONTEXT; + atomic_inc(&vmci_host_active_users); + + vmci_call_vsock_callback(true); + + retval = 0; + +out: + mutex_unlock(&vmci_host_dev->lock); + return retval; +} + +static int vmci_host_do_send_datagram(struct vmci_host_dev *vmci_host_dev, + const char *ioctl_name, + void __user *uptr) +{ + struct vmci_datagram_snd_rcv_info send_info; + struct vmci_datagram *dg = NULL; + u32 cid; + + if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { + vmci_ioctl_err("only valid for contexts\n"); + return -EINVAL; + } + + if (copy_from_user(&send_info, uptr, sizeof(send_info))) + return -EFAULT; + + if (send_info.len > VMCI_MAX_DG_SIZE) { + vmci_ioctl_err("datagram is too big (size=%d)\n", + send_info.len); + return -EINVAL; + } + + if (send_info.len < sizeof(*dg)) { + vmci_ioctl_err("datagram is too small (size=%d)\n", + send_info.len); + return -EINVAL; + } + + dg = memdup_user((void __user *)(uintptr_t)send_info.addr, + send_info.len); + if (IS_ERR(dg)) { + vmci_ioctl_err( + "cannot allocate memory to dispatch datagram\n"); + return PTR_ERR(dg); + } + + if (VMCI_DG_SIZE(dg) != send_info.len) { + vmci_ioctl_err("datagram size mismatch\n"); + kfree(dg); + return -EINVAL; + } + + pr_devel("Datagram dst (handle=0x%x:0x%x) src (handle=0x%x:0x%x), payload (size=%llu bytes)\n", + dg->dst.context, dg->dst.resource, + dg->src.context, dg->src.resource, + (unsigned long long)dg->payload_size); + + /* Get source context id. */ + cid = vmci_ctx_get_id(vmci_host_dev->context); + send_info.result = vmci_datagram_dispatch(cid, dg, true); + kfree(dg); + + return copy_to_user(uptr, &send_info, sizeof(send_info)) ? -EFAULT : 0; +} + +static int vmci_host_do_receive_datagram(struct vmci_host_dev *vmci_host_dev, + const char *ioctl_name, + void __user *uptr) +{ + struct vmci_datagram_snd_rcv_info recv_info; + struct vmci_datagram *dg = NULL; + int retval; + size_t size; + + if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { + vmci_ioctl_err("only valid for contexts\n"); + return -EINVAL; + } + + if (copy_from_user(&recv_info, uptr, sizeof(recv_info))) + return -EFAULT; + + size = recv_info.len; + recv_info.result = vmci_ctx_dequeue_datagram(vmci_host_dev->context, + &size, &dg); + + if (recv_info.result >= VMCI_SUCCESS) { + void __user *ubuf = (void __user *)(uintptr_t)recv_info.addr; + retval = copy_to_user(ubuf, dg, VMCI_DG_SIZE(dg)); + kfree(dg); + if (retval != 0) + return -EFAULT; + } + + return copy_to_user(uptr, &recv_info, sizeof(recv_info)) ? -EFAULT : 0; +} + +static int vmci_host_do_alloc_queuepair(struct vmci_host_dev *vmci_host_dev, + const char *ioctl_name, + void __user *uptr) +{ + struct vmci_handle handle; + int vmci_status; + int __user *retptr; + + if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { + vmci_ioctl_err("only valid for contexts\n"); + return -EINVAL; + } + + if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) { + struct vmci_qp_alloc_info_vmvm alloc_info; + struct vmci_qp_alloc_info_vmvm __user *info = uptr; + + if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info))) + return -EFAULT; + + handle = alloc_info.handle; + retptr = &info->result; + + vmci_status = vmci_qp_broker_alloc(alloc_info.handle, + alloc_info.peer, + alloc_info.flags, + VMCI_NO_PRIVILEGE_FLAGS, + alloc_info.produce_size, + alloc_info.consume_size, + NULL, + vmci_host_dev->context); + + if (vmci_status == VMCI_SUCCESS) + vmci_status = VMCI_SUCCESS_QUEUEPAIR_CREATE; + } else { + struct vmci_qp_alloc_info alloc_info; + struct vmci_qp_alloc_info __user *info = uptr; + struct vmci_qp_page_store page_store; + + if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info))) + return -EFAULT; + + handle = alloc_info.handle; + retptr = &info->result; + + page_store.pages = alloc_info.ppn_va; + page_store.len = alloc_info.num_ppns; + + vmci_status = vmci_qp_broker_alloc(alloc_info.handle, + alloc_info.peer, + alloc_info.flags, + VMCI_NO_PRIVILEGE_FLAGS, + alloc_info.produce_size, + alloc_info.consume_size, + &page_store, + vmci_host_dev->context); + } + + if (put_user(vmci_status, retptr)) { + if (vmci_status >= VMCI_SUCCESS) { + vmci_status = vmci_qp_broker_detach(handle, + vmci_host_dev->context); + } + return -EFAULT; + } + + return 0; +} + +static int vmci_host_do_queuepair_setva(struct vmci_host_dev *vmci_host_dev, + const char *ioctl_name, + void __user *uptr) +{ + struct vmci_qp_set_va_info set_va_info; + struct vmci_qp_set_va_info __user *info = uptr; + s32 result; + + if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { + vmci_ioctl_err("only valid for contexts\n"); + return -EINVAL; + } + + if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) { + vmci_ioctl_err("is not allowed\n"); + return -EINVAL; + } + + if (copy_from_user(&set_va_info, uptr, sizeof(set_va_info))) + return -EFAULT; + + if (set_va_info.va) { + /* + * VMX is passing down a new VA for the queue + * pair mapping. + */ + result = vmci_qp_broker_map(set_va_info.handle, + vmci_host_dev->context, + set_va_info.va); + } else { + /* + * The queue pair is about to be unmapped by + * the VMX. + */ + result = vmci_qp_broker_unmap(set_va_info.handle, + vmci_host_dev->context, 0); + } + + return put_user(result, &info->result) ? -EFAULT : 0; +} + +static int vmci_host_do_queuepair_setpf(struct vmci_host_dev *vmci_host_dev, + const char *ioctl_name, + void __user *uptr) +{ + struct vmci_qp_page_file_info page_file_info; + struct vmci_qp_page_file_info __user *info = uptr; + s32 result; + + if (vmci_host_dev->user_version < VMCI_VERSION_HOSTQP || + vmci_host_dev->user_version >= VMCI_VERSION_NOVMVM) { + vmci_ioctl_err("not supported on this VMX (version=%d)\n", + vmci_host_dev->user_version); + return -EINVAL; + } + + if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { + vmci_ioctl_err("only valid for contexts\n"); + return -EINVAL; + } + + if (copy_from_user(&page_file_info, uptr, sizeof(*info))) + return -EFAULT; + + /* + * Communicate success pre-emptively to the caller. Note that the + * basic premise is that it is incumbent upon the caller not to look at + * the info.result field until after the ioctl() returns. And then, + * only if the ioctl() result indicates no error. We send up the + * SUCCESS status before calling SetPageStore() store because failing + * to copy up the result code means unwinding the SetPageStore(). + * + * It turns out the logic to unwind a SetPageStore() opens a can of + * worms. For example, if a host had created the queue_pair and a + * guest attaches and SetPageStore() is successful but writing success + * fails, then ... the host has to be stopped from writing (anymore) + * data into the queue_pair. That means an additional test in the + * VMCI_Enqueue() code path. Ugh. + */ + + if (put_user(VMCI_SUCCESS, &info->result)) { + /* + * In this case, we can't write a result field of the + * caller's info block. So, we don't even try to + * SetPageStore(). + */ + return -EFAULT; + } + + result = vmci_qp_broker_set_page_store(page_file_info.handle, + page_file_info.produce_va, + page_file_info.consume_va, + vmci_host_dev->context); + if (result < VMCI_SUCCESS) { + if (put_user(result, &info->result)) { + /* + * Note that in this case the SetPageStore() + * call failed but we were unable to + * communicate that to the caller (because the + * copy_to_user() call failed). So, if we + * simply return an error (in this case + * -EFAULT) then the caller will know that the + * SetPageStore failed even though we couldn't + * put the result code in the result field and + * indicate exactly why it failed. + * + * That says nothing about the issue where we + * were once able to write to the caller's info + * memory and now can't. Something more + * serious is probably going on than the fact + * that SetPageStore() didn't work. + */ + return -EFAULT; + } + } + + return 0; +} + +static int vmci_host_do_qp_detach(struct vmci_host_dev *vmci_host_dev, + const char *ioctl_name, + void __user *uptr) +{ + struct vmci_qp_dtch_info detach_info; + struct vmci_qp_dtch_info __user *info = uptr; + s32 result; + + if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { + vmci_ioctl_err("only valid for contexts\n"); + return -EINVAL; + } + + if (copy_from_user(&detach_info, uptr, sizeof(detach_info))) + return -EFAULT; + + result = vmci_qp_broker_detach(detach_info.handle, + vmci_host_dev->context); + if (result == VMCI_SUCCESS && + vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) { + result = VMCI_SUCCESS_LAST_DETACH; + } + + return put_user(result, &info->result) ? -EFAULT : 0; +} + +static int vmci_host_do_ctx_add_notify(struct vmci_host_dev *vmci_host_dev, + const char *ioctl_name, + void __user *uptr) +{ + struct vmci_ctx_info ar_info; + struct vmci_ctx_info __user *info = uptr; + s32 result; + u32 cid; + + if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { + vmci_ioctl_err("only valid for contexts\n"); + return -EINVAL; + } + + if (copy_from_user(&ar_info, uptr, sizeof(ar_info))) + return -EFAULT; + + cid = vmci_ctx_get_id(vmci_host_dev->context); + result = vmci_ctx_add_notification(cid, ar_info.remote_cid); + + return put_user(result, &info->result) ? -EFAULT : 0; +} + +static int vmci_host_do_ctx_remove_notify(struct vmci_host_dev *vmci_host_dev, + const char *ioctl_name, + void __user *uptr) +{ + struct vmci_ctx_info ar_info; + struct vmci_ctx_info __user *info = uptr; + u32 cid; + int result; + + if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { + vmci_ioctl_err("only valid for contexts\n"); + return -EINVAL; + } + + if (copy_from_user(&ar_info, uptr, sizeof(ar_info))) + return -EFAULT; + + cid = vmci_ctx_get_id(vmci_host_dev->context); + result = vmci_ctx_remove_notification(cid, + ar_info.remote_cid); + + return put_user(result, &info->result) ? -EFAULT : 0; +} + +static int vmci_host_do_ctx_get_cpt_state(struct vmci_host_dev *vmci_host_dev, + const char *ioctl_name, + void __user *uptr) +{ + struct vmci_ctx_chkpt_buf_info get_info; + u32 cid; + void *cpt_buf; + int retval; + + if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { + vmci_ioctl_err("only valid for contexts\n"); + return -EINVAL; + } + + if (copy_from_user(&get_info, uptr, sizeof(get_info))) + return -EFAULT; + + cid = vmci_ctx_get_id(vmci_host_dev->context); + get_info.result = vmci_ctx_get_chkpt_state(cid, get_info.cpt_type, + &get_info.buf_size, &cpt_buf); + if (get_info.result == VMCI_SUCCESS && get_info.buf_size) { + void __user *ubuf = (void __user *)(uintptr_t)get_info.cpt_buf; + retval = copy_to_user(ubuf, cpt_buf, get_info.buf_size); + kfree(cpt_buf); + + if (retval) + return -EFAULT; + } + + return copy_to_user(uptr, &get_info, sizeof(get_info)) ? -EFAULT : 0; +} + +static int vmci_host_do_ctx_set_cpt_state(struct vmci_host_dev *vmci_host_dev, + const char *ioctl_name, + void __user *uptr) +{ + struct vmci_ctx_chkpt_buf_info set_info; + u32 cid; + void *cpt_buf; + int retval; + + if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { + vmci_ioctl_err("only valid for contexts\n"); + return -EINVAL; + } + + if (copy_from_user(&set_info, uptr, sizeof(set_info))) + return -EFAULT; + + cpt_buf = memdup_user((void __user *)(uintptr_t)set_info.cpt_buf, + set_info.buf_size); + if (IS_ERR(cpt_buf)) + return PTR_ERR(cpt_buf); + + cid = vmci_ctx_get_id(vmci_host_dev->context); + set_info.result = vmci_ctx_set_chkpt_state(cid, set_info.cpt_type, + set_info.buf_size, cpt_buf); + + retval = copy_to_user(uptr, &set_info, sizeof(set_info)) ? -EFAULT : 0; + + kfree(cpt_buf); + return retval; +} + +static int vmci_host_do_get_context_id(struct vmci_host_dev *vmci_host_dev, + const char *ioctl_name, + void __user *uptr) +{ + u32 __user *u32ptr = uptr; + + return put_user(VMCI_HOST_CONTEXT_ID, u32ptr) ? -EFAULT : 0; +} + +static int vmci_host_do_set_notify(struct vmci_host_dev *vmci_host_dev, + const char *ioctl_name, + void __user *uptr) +{ + struct vmci_set_notify_info notify_info; + + if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { + vmci_ioctl_err("only valid for contexts\n"); + return -EINVAL; + } + + if (copy_from_user(¬ify_info, uptr, sizeof(notify_info))) + return -EFAULT; + + if (notify_info.notify_uva) { + notify_info.result = + vmci_host_setup_notify(vmci_host_dev->context, + notify_info.notify_uva); + } else { + vmci_ctx_unset_notify(vmci_host_dev->context); + notify_info.result = VMCI_SUCCESS; + } + + return copy_to_user(uptr, ¬ify_info, sizeof(notify_info)) ? + -EFAULT : 0; +} + +static int vmci_host_do_notify_resource(struct vmci_host_dev *vmci_host_dev, + const char *ioctl_name, + void __user *uptr) +{ + struct vmci_dbell_notify_resource_info info; + u32 cid; + + if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) { + vmci_ioctl_err("invalid for current VMX versions\n"); + return -EINVAL; + } + + if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { + vmci_ioctl_err("only valid for contexts\n"); + return -EINVAL; + } + + if (copy_from_user(&info, uptr, sizeof(info))) + return -EFAULT; + + cid = vmci_ctx_get_id(vmci_host_dev->context); + + switch (info.action) { + case VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY: + if (info.resource == VMCI_NOTIFY_RESOURCE_DOOR_BELL) { + u32 flags = VMCI_NO_PRIVILEGE_FLAGS; + info.result = vmci_ctx_notify_dbell(cid, info.handle, + flags); + } else { + info.result = VMCI_ERROR_UNAVAILABLE; + } + break; + + case VMCI_NOTIFY_RESOURCE_ACTION_CREATE: + info.result = vmci_ctx_dbell_create(cid, info.handle); + break; + + case VMCI_NOTIFY_RESOURCE_ACTION_DESTROY: + info.result = vmci_ctx_dbell_destroy(cid, info.handle); + break; + + default: + vmci_ioctl_err("got unknown action (action=%d)\n", + info.action); + info.result = VMCI_ERROR_INVALID_ARGS; + } + + return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0; +} + +static int vmci_host_do_recv_notifications(struct vmci_host_dev *vmci_host_dev, + const char *ioctl_name, + void __user *uptr) +{ + struct vmci_ctx_notify_recv_info info; + struct vmci_handle_arr *db_handle_array; + struct vmci_handle_arr *qp_handle_array; + void __user *ubuf; + u32 cid; + int retval = 0; + + if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { + vmci_ioctl_err("only valid for contexts\n"); + return -EINVAL; + } + + if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) { + vmci_ioctl_err("not supported for the current vmx version\n"); + return -EINVAL; + } + + if (copy_from_user(&info, uptr, sizeof(info))) + return -EFAULT; + + if ((info.db_handle_buf_size && !info.db_handle_buf_uva) || + (info.qp_handle_buf_size && !info.qp_handle_buf_uva)) { + return -EINVAL; + } + + cid = vmci_ctx_get_id(vmci_host_dev->context); + + info.result = vmci_ctx_rcv_notifications_get(cid, + &db_handle_array, &qp_handle_array); + if (info.result != VMCI_SUCCESS) + return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0; + + ubuf = (void __user *)(uintptr_t)info.db_handle_buf_uva; + info.result = drv_cp_harray_to_user(ubuf, &info.db_handle_buf_size, + db_handle_array, &retval); + if (info.result == VMCI_SUCCESS && !retval) { + ubuf = (void __user *)(uintptr_t)info.qp_handle_buf_uva; + info.result = drv_cp_harray_to_user(ubuf, + &info.qp_handle_buf_size, + qp_handle_array, &retval); + } + + if (!retval && copy_to_user(uptr, &info, sizeof(info))) + retval = -EFAULT; + + vmci_ctx_rcv_notifications_release(cid, + db_handle_array, qp_handle_array, + info.result == VMCI_SUCCESS && !retval); + + return retval; +} + +static long vmci_host_unlocked_ioctl(struct file *filp, + unsigned int iocmd, unsigned long ioarg) +{ +#define VMCI_DO_IOCTL(ioctl_name, ioctl_fn) do { \ + char *name = __stringify(IOCTL_VMCI_ ## ioctl_name); \ + return vmci_host_do_ ## ioctl_fn( \ + vmci_host_dev, name, uptr); \ + } while (0) + + struct vmci_host_dev *vmci_host_dev = filp->private_data; + void __user *uptr = (void __user *)ioarg; + + switch (iocmd) { + case IOCTL_VMCI_INIT_CONTEXT: + VMCI_DO_IOCTL(INIT_CONTEXT, init_context); + case IOCTL_VMCI_DATAGRAM_SEND: + VMCI_DO_IOCTL(DATAGRAM_SEND, send_datagram); + case IOCTL_VMCI_DATAGRAM_RECEIVE: + VMCI_DO_IOCTL(DATAGRAM_RECEIVE, receive_datagram); + case IOCTL_VMCI_QUEUEPAIR_ALLOC: + VMCI_DO_IOCTL(QUEUEPAIR_ALLOC, alloc_queuepair); + case IOCTL_VMCI_QUEUEPAIR_SETVA: + VMCI_DO_IOCTL(QUEUEPAIR_SETVA, queuepair_setva); + case IOCTL_VMCI_QUEUEPAIR_SETPAGEFILE: + VMCI_DO_IOCTL(QUEUEPAIR_SETPAGEFILE, queuepair_setpf); + case IOCTL_VMCI_QUEUEPAIR_DETACH: + VMCI_DO_IOCTL(QUEUEPAIR_DETACH, qp_detach); + case IOCTL_VMCI_CTX_ADD_NOTIFICATION: + VMCI_DO_IOCTL(CTX_ADD_NOTIFICATION, ctx_add_notify); + case IOCTL_VMCI_CTX_REMOVE_NOTIFICATION: + VMCI_DO_IOCTL(CTX_REMOVE_NOTIFICATION, ctx_remove_notify); + case IOCTL_VMCI_CTX_GET_CPT_STATE: + VMCI_DO_IOCTL(CTX_GET_CPT_STATE, ctx_get_cpt_state); + case IOCTL_VMCI_CTX_SET_CPT_STATE: + VMCI_DO_IOCTL(CTX_SET_CPT_STATE, ctx_set_cpt_state); + case IOCTL_VMCI_GET_CONTEXT_ID: + VMCI_DO_IOCTL(GET_CONTEXT_ID, get_context_id); + case IOCTL_VMCI_SET_NOTIFY: + VMCI_DO_IOCTL(SET_NOTIFY, set_notify); + case IOCTL_VMCI_NOTIFY_RESOURCE: + VMCI_DO_IOCTL(NOTIFY_RESOURCE, notify_resource); + case IOCTL_VMCI_NOTIFICATIONS_RECEIVE: + VMCI_DO_IOCTL(NOTIFICATIONS_RECEIVE, recv_notifications); + + case IOCTL_VMCI_VERSION: + case IOCTL_VMCI_VERSION2: + return vmci_host_get_version(vmci_host_dev, iocmd, uptr); + + default: + pr_devel("%s: Unknown ioctl (iocmd=%d)\n", __func__, iocmd); + return -EINVAL; + } + +#undef VMCI_DO_IOCTL +} + +static const struct file_operations vmuser_fops = { + .owner = THIS_MODULE, + .open = vmci_host_open, + .release = vmci_host_close, + .poll = vmci_host_poll, + .unlocked_ioctl = vmci_host_unlocked_ioctl, + .compat_ioctl = compat_ptr_ioctl, +}; + +static struct miscdevice vmci_host_miscdev = { + .name = "vmci", + .minor = MISC_DYNAMIC_MINOR, + .fops = &vmuser_fops, +}; + +int __init vmci_host_init(void) +{ + int error; + + host_context = vmci_ctx_create(VMCI_HOST_CONTEXT_ID, + VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS, + -1, VMCI_VERSION, NULL); + if (IS_ERR(host_context)) { + error = PTR_ERR(host_context); + pr_warn("Failed to initialize VMCIContext (error%d)\n", + error); + return error; + } + + error = misc_register(&vmci_host_miscdev); + if (error) { + pr_warn("Module registration error (name=%s, major=%d, minor=%d, err=%d)\n", + vmci_host_miscdev.name, + MISC_MAJOR, vmci_host_miscdev.minor, + error); + pr_warn("Unable to initialize host personality\n"); + vmci_ctx_destroy(host_context); + return error; + } + + pr_info("VMCI host device registered (name=%s, major=%d, minor=%d)\n", + vmci_host_miscdev.name, MISC_MAJOR, vmci_host_miscdev.minor); + + vmci_host_device_initialized = true; + return 0; +} + +void __exit vmci_host_exit(void) +{ + vmci_host_device_initialized = false; + + misc_deregister(&vmci_host_miscdev); + vmci_ctx_destroy(host_context); + vmci_qp_broker_exit(); + + pr_debug("VMCI host driver module unloaded\n"); +} diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c new file mode 100644 index 000000000..d4d388f02 --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c @@ -0,0 +1,3263 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + */ + +#include <linux/vmw_vmci_defs.h> +#include <linux/vmw_vmci_api.h> +#include <linux/highmem.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/pagemap.h> +#include <linux/pci.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/uio.h> +#include <linux/wait.h> +#include <linux/vmalloc.h> +#include <linux/skbuff.h> + +#include "vmci_handle_array.h" +#include "vmci_queue_pair.h" +#include "vmci_datagram.h" +#include "vmci_resource.h" +#include "vmci_context.h" +#include "vmci_driver.h" +#include "vmci_event.h" +#include "vmci_route.h" + +/* + * In the following, we will distinguish between two kinds of VMX processes - + * the ones with versions lower than VMCI_VERSION_NOVMVM that use specialized + * VMCI page files in the VMX and supporting VM to VM communication and the + * newer ones that use the guest memory directly. We will in the following + * refer to the older VMX versions as old-style VMX'en, and the newer ones as + * new-style VMX'en. + * + * The state transition datagram is as follows (the VMCIQPB_ prefix has been + * removed for readability) - see below for more details on the transtions: + * + * -------------- NEW ------------- + * | | + * \_/ \_/ + * CREATED_NO_MEM <-----------------> CREATED_MEM + * | | | + * | o-----------------------o | + * | | | + * \_/ \_/ \_/ + * ATTACHED_NO_MEM <----------------> ATTACHED_MEM + * | | | + * | o----------------------o | + * | | | + * \_/ \_/ \_/ + * SHUTDOWN_NO_MEM <----------------> SHUTDOWN_MEM + * | | + * | | + * -------------> gone <------------- + * + * In more detail. When a VMCI queue pair is first created, it will be in the + * VMCIQPB_NEW state. It will then move into one of the following states: + * + * - VMCIQPB_CREATED_NO_MEM: this state indicates that either: + * + * - the created was performed by a host endpoint, in which case there is + * no backing memory yet. + * + * - the create was initiated by an old-style VMX, that uses + * vmci_qp_broker_set_page_store to specify the UVAs of the queue pair at + * a later point in time. This state can be distinguished from the one + * above by the context ID of the creator. A host side is not allowed to + * attach until the page store has been set. + * + * - VMCIQPB_CREATED_MEM: this state is the result when the queue pair + * is created by a VMX using the queue pair device backend that + * sets the UVAs of the queue pair immediately and stores the + * information for later attachers. At this point, it is ready for + * the host side to attach to it. + * + * Once the queue pair is in one of the created states (with the exception of + * the case mentioned for older VMX'en above), it is possible to attach to the + * queue pair. Again we have two new states possible: + * + * - VMCIQPB_ATTACHED_MEM: this state can be reached through the following + * paths: + * + * - from VMCIQPB_CREATED_NO_MEM when a new-style VMX allocates a queue + * pair, and attaches to a queue pair previously created by the host side. + * + * - from VMCIQPB_CREATED_MEM when the host side attaches to a queue pair + * already created by a guest. + * + * - from VMCIQPB_ATTACHED_NO_MEM, when an old-style VMX calls + * vmci_qp_broker_set_page_store (see below). + * + * - VMCIQPB_ATTACHED_NO_MEM: If the queue pair already was in the + * VMCIQPB_CREATED_NO_MEM due to a host side create, an old-style VMX will + * bring the queue pair into this state. Once vmci_qp_broker_set_page_store + * is called to register the user memory, the VMCIQPB_ATTACH_MEM state + * will be entered. + * + * From the attached queue pair, the queue pair can enter the shutdown states + * when either side of the queue pair detaches. If the guest side detaches + * first, the queue pair will enter the VMCIQPB_SHUTDOWN_NO_MEM state, where + * the content of the queue pair will no longer be available. If the host + * side detaches first, the queue pair will either enter the + * VMCIQPB_SHUTDOWN_MEM, if the guest memory is currently mapped, or + * VMCIQPB_SHUTDOWN_NO_MEM, if the guest memory is not mapped + * (e.g., the host detaches while a guest is stunned). + * + * New-style VMX'en will also unmap guest memory, if the guest is + * quiesced, e.g., during a snapshot operation. In that case, the guest + * memory will no longer be available, and the queue pair will transition from + * *_MEM state to a *_NO_MEM state. The VMX may later map the memory once more, + * in which case the queue pair will transition from the *_NO_MEM state at that + * point back to the *_MEM state. Note that the *_NO_MEM state may have changed, + * since the peer may have either attached or detached in the meantime. The + * values are laid out such that ++ on a state will move from a *_NO_MEM to a + * *_MEM state, and vice versa. + */ + +/* The Kernel specific component of the struct vmci_queue structure. */ +struct vmci_queue_kern_if { + struct mutex __mutex; /* Protects the queue. */ + struct mutex *mutex; /* Shared by producer and consumer queues. */ + size_t num_pages; /* Number of pages incl. header. */ + bool host; /* Host or guest? */ + union { + struct { + dma_addr_t *pas; + void **vas; + } g; /* Used by the guest. */ + struct { + struct page **page; + struct page **header_page; + } h; /* Used by the host. */ + } u; +}; + +/* + * This structure is opaque to the clients. + */ +struct vmci_qp { + struct vmci_handle handle; + struct vmci_queue *produce_q; + struct vmci_queue *consume_q; + u64 produce_q_size; + u64 consume_q_size; + u32 peer; + u32 flags; + u32 priv_flags; + bool guest_endpoint; + unsigned int blocked; + unsigned int generation; + wait_queue_head_t event; +}; + +enum qp_broker_state { + VMCIQPB_NEW, + VMCIQPB_CREATED_NO_MEM, + VMCIQPB_CREATED_MEM, + VMCIQPB_ATTACHED_NO_MEM, + VMCIQPB_ATTACHED_MEM, + VMCIQPB_SHUTDOWN_NO_MEM, + VMCIQPB_SHUTDOWN_MEM, + VMCIQPB_GONE +}; + +#define QPBROKERSTATE_HAS_MEM(_qpb) (_qpb->state == VMCIQPB_CREATED_MEM || \ + _qpb->state == VMCIQPB_ATTACHED_MEM || \ + _qpb->state == VMCIQPB_SHUTDOWN_MEM) + +/* + * In the queue pair broker, we always use the guest point of view for + * the produce and consume queue values and references, e.g., the + * produce queue size stored is the guests produce queue size. The + * host endpoint will need to swap these around. The only exception is + * the local queue pairs on the host, in which case the host endpoint + * that creates the queue pair will have the right orientation, and + * the attaching host endpoint will need to swap. + */ +struct qp_entry { + struct list_head list_item; + struct vmci_handle handle; + u32 peer; + u32 flags; + u64 produce_size; + u64 consume_size; + u32 ref_count; +}; + +struct qp_broker_entry { + struct vmci_resource resource; + struct qp_entry qp; + u32 create_id; + u32 attach_id; + enum qp_broker_state state; + bool require_trusted_attach; + bool created_by_trusted; + bool vmci_page_files; /* Created by VMX using VMCI page files */ + struct vmci_queue *produce_q; + struct vmci_queue *consume_q; + struct vmci_queue_header saved_produce_q; + struct vmci_queue_header saved_consume_q; + vmci_event_release_cb wakeup_cb; + void *client_data; + void *local_mem; /* Kernel memory for local queue pair */ +}; + +struct qp_guest_endpoint { + struct vmci_resource resource; + struct qp_entry qp; + u64 num_ppns; + void *produce_q; + void *consume_q; + struct ppn_set ppn_set; +}; + +struct qp_list { + struct list_head head; + struct mutex mutex; /* Protect queue list. */ +}; + +static struct qp_list qp_broker_list = { + .head = LIST_HEAD_INIT(qp_broker_list.head), + .mutex = __MUTEX_INITIALIZER(qp_broker_list.mutex), +}; + +static struct qp_list qp_guest_endpoints = { + .head = LIST_HEAD_INIT(qp_guest_endpoints.head), + .mutex = __MUTEX_INITIALIZER(qp_guest_endpoints.mutex), +}; + +#define INVALID_VMCI_GUEST_MEM_ID 0 +#define QPE_NUM_PAGES(_QPE) ((u32) \ + (DIV_ROUND_UP(_QPE.produce_size, PAGE_SIZE) + \ + DIV_ROUND_UP(_QPE.consume_size, PAGE_SIZE) + 2)) + + +/* + * Frees kernel VA space for a given queue and its queue header, and + * frees physical data pages. + */ +static void qp_free_queue(void *q, u64 size) +{ + struct vmci_queue *queue = q; + + if (queue) { + u64 i; + + /* Given size does not include header, so add in a page here. */ + for (i = 0; i < DIV_ROUND_UP(size, PAGE_SIZE) + 1; i++) { + dma_free_coherent(&vmci_pdev->dev, PAGE_SIZE, + queue->kernel_if->u.g.vas[i], + queue->kernel_if->u.g.pas[i]); + } + + vfree(queue); + } +} + +/* + * Allocates kernel queue pages of specified size with IOMMU mappings, + * plus space for the queue structure/kernel interface and the queue + * header. + */ +static void *qp_alloc_queue(u64 size, u32 flags) +{ + u64 i; + struct vmci_queue *queue; + size_t pas_size; + size_t vas_size; + size_t queue_size = sizeof(*queue) + sizeof(*queue->kernel_if); + u64 num_pages; + + if (size > SIZE_MAX - PAGE_SIZE) + return NULL; + num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; + if (num_pages > + (SIZE_MAX - queue_size) / + (sizeof(*queue->kernel_if->u.g.pas) + + sizeof(*queue->kernel_if->u.g.vas))) + return NULL; + + pas_size = num_pages * sizeof(*queue->kernel_if->u.g.pas); + vas_size = num_pages * sizeof(*queue->kernel_if->u.g.vas); + queue_size += pas_size + vas_size; + + queue = vmalloc(queue_size); + if (!queue) + return NULL; + + queue->q_header = NULL; + queue->saved_header = NULL; + queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1); + queue->kernel_if->mutex = NULL; + queue->kernel_if->num_pages = num_pages; + queue->kernel_if->u.g.pas = (dma_addr_t *)(queue->kernel_if + 1); + queue->kernel_if->u.g.vas = + (void **)((u8 *)queue->kernel_if->u.g.pas + pas_size); + queue->kernel_if->host = false; + + for (i = 0; i < num_pages; i++) { + queue->kernel_if->u.g.vas[i] = + dma_alloc_coherent(&vmci_pdev->dev, PAGE_SIZE, + &queue->kernel_if->u.g.pas[i], + GFP_KERNEL); + if (!queue->kernel_if->u.g.vas[i]) { + /* Size excl. the header. */ + qp_free_queue(queue, i * PAGE_SIZE); + return NULL; + } + } + + /* Queue header is the first page. */ + queue->q_header = queue->kernel_if->u.g.vas[0]; + + return queue; +} + +/* + * Copies from a given buffer or iovector to a VMCI Queue. Uses + * kmap()/kunmap() to dynamically map/unmap required portions of the queue + * by traversing the offset -> page translation structure for the queue. + * Assumes that offset + size does not wrap around in the queue. + */ +static int qp_memcpy_to_queue_iter(struct vmci_queue *queue, + u64 queue_offset, + struct iov_iter *from, + size_t size) +{ + struct vmci_queue_kern_if *kernel_if = queue->kernel_if; + size_t bytes_copied = 0; + + while (bytes_copied < size) { + const u64 page_index = + (queue_offset + bytes_copied) / PAGE_SIZE; + const size_t page_offset = + (queue_offset + bytes_copied) & (PAGE_SIZE - 1); + void *va; + size_t to_copy; + + if (kernel_if->host) + va = kmap(kernel_if->u.h.page[page_index]); + else + va = kernel_if->u.g.vas[page_index + 1]; + /* Skip header. */ + + if (size - bytes_copied > PAGE_SIZE - page_offset) + /* Enough payload to fill up from this page. */ + to_copy = PAGE_SIZE - page_offset; + else + to_copy = size - bytes_copied; + + if (!copy_from_iter_full((u8 *)va + page_offset, to_copy, + from)) { + if (kernel_if->host) + kunmap(kernel_if->u.h.page[page_index]); + return VMCI_ERROR_INVALID_ARGS; + } + bytes_copied += to_copy; + if (kernel_if->host) + kunmap(kernel_if->u.h.page[page_index]); + } + + return VMCI_SUCCESS; +} + +/* + * Copies to a given buffer or iovector from a VMCI Queue. Uses + * kmap()/kunmap() to dynamically map/unmap required portions of the queue + * by traversing the offset -> page translation structure for the queue. + * Assumes that offset + size does not wrap around in the queue. + */ +static int qp_memcpy_from_queue_iter(struct iov_iter *to, + const struct vmci_queue *queue, + u64 queue_offset, size_t size) +{ + struct vmci_queue_kern_if *kernel_if = queue->kernel_if; + size_t bytes_copied = 0; + + while (bytes_copied < size) { + const u64 page_index = + (queue_offset + bytes_copied) / PAGE_SIZE; + const size_t page_offset = + (queue_offset + bytes_copied) & (PAGE_SIZE - 1); + void *va; + size_t to_copy; + int err; + + if (kernel_if->host) + va = kmap(kernel_if->u.h.page[page_index]); + else + va = kernel_if->u.g.vas[page_index + 1]; + /* Skip header. */ + + if (size - bytes_copied > PAGE_SIZE - page_offset) + /* Enough payload to fill up this page. */ + to_copy = PAGE_SIZE - page_offset; + else + to_copy = size - bytes_copied; + + err = copy_to_iter((u8 *)va + page_offset, to_copy, to); + if (err != to_copy) { + if (kernel_if->host) + kunmap(kernel_if->u.h.page[page_index]); + return VMCI_ERROR_INVALID_ARGS; + } + bytes_copied += to_copy; + if (kernel_if->host) + kunmap(kernel_if->u.h.page[page_index]); + } + + return VMCI_SUCCESS; +} + +/* + * Allocates two list of PPNs --- one for the pages in the produce queue, + * and the other for the pages in the consume queue. Intializes the list + * of PPNs with the page frame numbers of the KVA for the two queues (and + * the queue headers). + */ +static int qp_alloc_ppn_set(void *prod_q, + u64 num_produce_pages, + void *cons_q, + u64 num_consume_pages, struct ppn_set *ppn_set) +{ + u64 *produce_ppns; + u64 *consume_ppns; + struct vmci_queue *produce_q = prod_q; + struct vmci_queue *consume_q = cons_q; + u64 i; + + if (!produce_q || !num_produce_pages || !consume_q || + !num_consume_pages || !ppn_set) + return VMCI_ERROR_INVALID_ARGS; + + if (ppn_set->initialized) + return VMCI_ERROR_ALREADY_EXISTS; + + produce_ppns = + kmalloc_array(num_produce_pages, sizeof(*produce_ppns), + GFP_KERNEL); + if (!produce_ppns) + return VMCI_ERROR_NO_MEM; + + consume_ppns = + kmalloc_array(num_consume_pages, sizeof(*consume_ppns), + GFP_KERNEL); + if (!consume_ppns) { + kfree(produce_ppns); + return VMCI_ERROR_NO_MEM; + } + + for (i = 0; i < num_produce_pages; i++) + produce_ppns[i] = + produce_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT; + + for (i = 0; i < num_consume_pages; i++) + consume_ppns[i] = + consume_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT; + + ppn_set->num_produce_pages = num_produce_pages; + ppn_set->num_consume_pages = num_consume_pages; + ppn_set->produce_ppns = produce_ppns; + ppn_set->consume_ppns = consume_ppns; + ppn_set->initialized = true; + return VMCI_SUCCESS; +} + +/* + * Frees the two list of PPNs for a queue pair. + */ +static void qp_free_ppn_set(struct ppn_set *ppn_set) +{ + if (ppn_set->initialized) { + /* Do not call these functions on NULL inputs. */ + kfree(ppn_set->produce_ppns); + kfree(ppn_set->consume_ppns); + } + memset(ppn_set, 0, sizeof(*ppn_set)); +} + +/* + * Populates the list of PPNs in the hypercall structure with the PPNS + * of the produce queue and the consume queue. + */ +static int qp_populate_ppn_set(u8 *call_buf, const struct ppn_set *ppn_set) +{ + if (vmci_use_ppn64()) { + memcpy(call_buf, ppn_set->produce_ppns, + ppn_set->num_produce_pages * + sizeof(*ppn_set->produce_ppns)); + memcpy(call_buf + + ppn_set->num_produce_pages * + sizeof(*ppn_set->produce_ppns), + ppn_set->consume_ppns, + ppn_set->num_consume_pages * + sizeof(*ppn_set->consume_ppns)); + } else { + int i; + u32 *ppns = (u32 *) call_buf; + + for (i = 0; i < ppn_set->num_produce_pages; i++) + ppns[i] = (u32) ppn_set->produce_ppns[i]; + + ppns = &ppns[ppn_set->num_produce_pages]; + + for (i = 0; i < ppn_set->num_consume_pages; i++) + ppns[i] = (u32) ppn_set->consume_ppns[i]; + } + + return VMCI_SUCCESS; +} + +/* + * Allocates kernel VA space of specified size plus space for the queue + * and kernel interface. This is different from the guest queue allocator, + * because we do not allocate our own queue header/data pages here but + * share those of the guest. + */ +static struct vmci_queue *qp_host_alloc_queue(u64 size) +{ + struct vmci_queue *queue; + size_t queue_page_size; + u64 num_pages; + const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if)); + + if (size > SIZE_MAX - PAGE_SIZE) + return NULL; + num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; + if (num_pages > (SIZE_MAX - queue_size) / + sizeof(*queue->kernel_if->u.h.page)) + return NULL; + + queue_page_size = num_pages * sizeof(*queue->kernel_if->u.h.page); + + if (queue_size + queue_page_size > KMALLOC_MAX_SIZE) + return NULL; + + queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL); + if (queue) { + queue->q_header = NULL; + queue->saved_header = NULL; + queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1); + queue->kernel_if->host = true; + queue->kernel_if->mutex = NULL; + queue->kernel_if->num_pages = num_pages; + queue->kernel_if->u.h.header_page = + (struct page **)((u8 *)queue + queue_size); + queue->kernel_if->u.h.page = + &queue->kernel_if->u.h.header_page[1]; + } + + return queue; +} + +/* + * Frees kernel memory for a given queue (header plus translation + * structure). + */ +static void qp_host_free_queue(struct vmci_queue *queue, u64 queue_size) +{ + kfree(queue); +} + +/* + * Initialize the mutex for the pair of queues. This mutex is used to + * protect the q_header and the buffer from changing out from under any + * users of either queue. Of course, it's only any good if the mutexes + * are actually acquired. Queue structure must lie on non-paged memory + * or we cannot guarantee access to the mutex. + */ +static void qp_init_queue_mutex(struct vmci_queue *produce_q, + struct vmci_queue *consume_q) +{ + /* + * Only the host queue has shared state - the guest queues do not + * need to synchronize access using a queue mutex. + */ + + if (produce_q->kernel_if->host) { + produce_q->kernel_if->mutex = &produce_q->kernel_if->__mutex; + consume_q->kernel_if->mutex = &produce_q->kernel_if->__mutex; + mutex_init(produce_q->kernel_if->mutex); + } +} + +/* + * Cleans up the mutex for the pair of queues. + */ +static void qp_cleanup_queue_mutex(struct vmci_queue *produce_q, + struct vmci_queue *consume_q) +{ + if (produce_q->kernel_if->host) { + produce_q->kernel_if->mutex = NULL; + consume_q->kernel_if->mutex = NULL; + } +} + +/* + * Acquire the mutex for the queue. Note that the produce_q and + * the consume_q share a mutex. So, only one of the two need to + * be passed in to this routine. Either will work just fine. + */ +static void qp_acquire_queue_mutex(struct vmci_queue *queue) +{ + if (queue->kernel_if->host) + mutex_lock(queue->kernel_if->mutex); +} + +/* + * Release the mutex for the queue. Note that the produce_q and + * the consume_q share a mutex. So, only one of the two need to + * be passed in to this routine. Either will work just fine. + */ +static void qp_release_queue_mutex(struct vmci_queue *queue) +{ + if (queue->kernel_if->host) + mutex_unlock(queue->kernel_if->mutex); +} + +/* + * Helper function to release pages in the PageStoreAttachInfo + * previously obtained using get_user_pages. + */ +static void qp_release_pages(struct page **pages, + u64 num_pages, bool dirty) +{ + int i; + + for (i = 0; i < num_pages; i++) { + if (dirty) + set_page_dirty_lock(pages[i]); + + put_page(pages[i]); + pages[i] = NULL; + } +} + +/* + * Lock the user pages referenced by the {produce,consume}Buffer + * struct into memory and populate the {produce,consume}Pages + * arrays in the attach structure with them. + */ +static int qp_host_get_user_memory(u64 produce_uva, + u64 consume_uva, + struct vmci_queue *produce_q, + struct vmci_queue *consume_q) +{ + int retval; + int err = VMCI_SUCCESS; + + retval = get_user_pages_fast((uintptr_t) produce_uva, + produce_q->kernel_if->num_pages, + FOLL_WRITE, + produce_q->kernel_if->u.h.header_page); + if (retval < (int)produce_q->kernel_if->num_pages) { + pr_debug("get_user_pages_fast(produce) failed (retval=%d)", + retval); + if (retval > 0) + qp_release_pages(produce_q->kernel_if->u.h.header_page, + retval, false); + err = VMCI_ERROR_NO_MEM; + goto out; + } + + retval = get_user_pages_fast((uintptr_t) consume_uva, + consume_q->kernel_if->num_pages, + FOLL_WRITE, + consume_q->kernel_if->u.h.header_page); + if (retval < (int)consume_q->kernel_if->num_pages) { + pr_debug("get_user_pages_fast(consume) failed (retval=%d)", + retval); + if (retval > 0) + qp_release_pages(consume_q->kernel_if->u.h.header_page, + retval, false); + qp_release_pages(produce_q->kernel_if->u.h.header_page, + produce_q->kernel_if->num_pages, false); + err = VMCI_ERROR_NO_MEM; + } + + out: + return err; +} + +/* + * Registers the specification of the user pages used for backing a queue + * pair. Enough information to map in pages is stored in the OS specific + * part of the struct vmci_queue structure. + */ +static int qp_host_register_user_memory(struct vmci_qp_page_store *page_store, + struct vmci_queue *produce_q, + struct vmci_queue *consume_q) +{ + u64 produce_uva; + u64 consume_uva; + + /* + * The new style and the old style mapping only differs in + * that we either get a single or two UVAs, so we split the + * single UVA range at the appropriate spot. + */ + produce_uva = page_store->pages; + consume_uva = page_store->pages + + produce_q->kernel_if->num_pages * PAGE_SIZE; + return qp_host_get_user_memory(produce_uva, consume_uva, produce_q, + consume_q); +} + +/* + * Releases and removes the references to user pages stored in the attach + * struct. Pages are released from the page cache and may become + * swappable again. + */ +static void qp_host_unregister_user_memory(struct vmci_queue *produce_q, + struct vmci_queue *consume_q) +{ + qp_release_pages(produce_q->kernel_if->u.h.header_page, + produce_q->kernel_if->num_pages, true); + memset(produce_q->kernel_if->u.h.header_page, 0, + sizeof(*produce_q->kernel_if->u.h.header_page) * + produce_q->kernel_if->num_pages); + qp_release_pages(consume_q->kernel_if->u.h.header_page, + consume_q->kernel_if->num_pages, true); + memset(consume_q->kernel_if->u.h.header_page, 0, + sizeof(*consume_q->kernel_if->u.h.header_page) * + consume_q->kernel_if->num_pages); +} + +/* + * Once qp_host_register_user_memory has been performed on a + * queue, the queue pair headers can be mapped into the + * kernel. Once mapped, they must be unmapped with + * qp_host_unmap_queues prior to calling + * qp_host_unregister_user_memory. + * Pages are pinned. + */ +static int qp_host_map_queues(struct vmci_queue *produce_q, + struct vmci_queue *consume_q) +{ + int result; + + if (!produce_q->q_header || !consume_q->q_header) { + struct page *headers[2]; + + if (produce_q->q_header != consume_q->q_header) + return VMCI_ERROR_QUEUEPAIR_MISMATCH; + + if (produce_q->kernel_if->u.h.header_page == NULL || + *produce_q->kernel_if->u.h.header_page == NULL) + return VMCI_ERROR_UNAVAILABLE; + + headers[0] = *produce_q->kernel_if->u.h.header_page; + headers[1] = *consume_q->kernel_if->u.h.header_page; + + produce_q->q_header = vmap(headers, 2, VM_MAP, PAGE_KERNEL); + if (produce_q->q_header != NULL) { + consume_q->q_header = + (struct vmci_queue_header *)((u8 *) + produce_q->q_header + + PAGE_SIZE); + result = VMCI_SUCCESS; + } else { + pr_warn("vmap failed\n"); + result = VMCI_ERROR_NO_MEM; + } + } else { + result = VMCI_SUCCESS; + } + + return result; +} + +/* + * Unmaps previously mapped queue pair headers from the kernel. + * Pages are unpinned. + */ +static int qp_host_unmap_queues(u32 gid, + struct vmci_queue *produce_q, + struct vmci_queue *consume_q) +{ + if (produce_q->q_header) { + if (produce_q->q_header < consume_q->q_header) + vunmap(produce_q->q_header); + else + vunmap(consume_q->q_header); + + produce_q->q_header = NULL; + consume_q->q_header = NULL; + } + + return VMCI_SUCCESS; +} + +/* + * Finds the entry in the list corresponding to a given handle. Assumes + * that the list is locked. + */ +static struct qp_entry *qp_list_find(struct qp_list *qp_list, + struct vmci_handle handle) +{ + struct qp_entry *entry; + + if (vmci_handle_is_invalid(handle)) + return NULL; + + list_for_each_entry(entry, &qp_list->head, list_item) { + if (vmci_handle_is_equal(entry->handle, handle)) + return entry; + } + + return NULL; +} + +/* + * Finds the entry in the list corresponding to a given handle. + */ +static struct qp_guest_endpoint * +qp_guest_handle_to_entry(struct vmci_handle handle) +{ + struct qp_guest_endpoint *entry; + struct qp_entry *qp = qp_list_find(&qp_guest_endpoints, handle); + + entry = qp ? container_of( + qp, struct qp_guest_endpoint, qp) : NULL; + return entry; +} + +/* + * Finds the entry in the list corresponding to a given handle. + */ +static struct qp_broker_entry * +qp_broker_handle_to_entry(struct vmci_handle handle) +{ + struct qp_broker_entry *entry; + struct qp_entry *qp = qp_list_find(&qp_broker_list, handle); + + entry = qp ? container_of( + qp, struct qp_broker_entry, qp) : NULL; + return entry; +} + +/* + * Dispatches a queue pair event message directly into the local event + * queue. + */ +static int qp_notify_peer_local(bool attach, struct vmci_handle handle) +{ + u32 context_id = vmci_get_context_id(); + struct vmci_event_qp ev; + + memset(&ev, 0, sizeof(ev)); + ev.msg.hdr.dst = vmci_make_handle(context_id, VMCI_EVENT_HANDLER); + ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, + VMCI_CONTEXT_RESOURCE_ID); + ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr); + ev.msg.event_data.event = + attach ? VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH; + ev.payload.peer_id = context_id; + ev.payload.handle = handle; + + return vmci_event_dispatch(&ev.msg.hdr); +} + +/* + * Allocates and initializes a qp_guest_endpoint structure. + * Allocates a queue_pair rid (and handle) iff the given entry has + * an invalid handle. 0 through VMCI_RESERVED_RESOURCE_ID_MAX + * are reserved handles. Assumes that the QP list mutex is held + * by the caller. + */ +static struct qp_guest_endpoint * +qp_guest_endpoint_create(struct vmci_handle handle, + u32 peer, + u32 flags, + u64 produce_size, + u64 consume_size, + void *produce_q, + void *consume_q) +{ + int result; + struct qp_guest_endpoint *entry; + /* One page each for the queue headers. */ + const u64 num_ppns = DIV_ROUND_UP(produce_size, PAGE_SIZE) + + DIV_ROUND_UP(consume_size, PAGE_SIZE) + 2; + + if (vmci_handle_is_invalid(handle)) { + u32 context_id = vmci_get_context_id(); + + handle = vmci_make_handle(context_id, VMCI_INVALID_ID); + } + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (entry) { + entry->qp.peer = peer; + entry->qp.flags = flags; + entry->qp.produce_size = produce_size; + entry->qp.consume_size = consume_size; + entry->qp.ref_count = 0; + entry->num_ppns = num_ppns; + entry->produce_q = produce_q; + entry->consume_q = consume_q; + INIT_LIST_HEAD(&entry->qp.list_item); + + /* Add resource obj */ + result = vmci_resource_add(&entry->resource, + VMCI_RESOURCE_TYPE_QPAIR_GUEST, + handle); + entry->qp.handle = vmci_resource_handle(&entry->resource); + if ((result != VMCI_SUCCESS) || + qp_list_find(&qp_guest_endpoints, entry->qp.handle)) { + pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d", + handle.context, handle.resource, result); + kfree(entry); + entry = NULL; + } + } + return entry; +} + +/* + * Frees a qp_guest_endpoint structure. + */ +static void qp_guest_endpoint_destroy(struct qp_guest_endpoint *entry) +{ + qp_free_ppn_set(&entry->ppn_set); + qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q); + qp_free_queue(entry->produce_q, entry->qp.produce_size); + qp_free_queue(entry->consume_q, entry->qp.consume_size); + /* Unlink from resource hash table and free callback */ + vmci_resource_remove(&entry->resource); + + kfree(entry); +} + +/* + * Helper to make a queue_pairAlloc hypercall when the driver is + * supporting a guest device. + */ +static int qp_alloc_hypercall(const struct qp_guest_endpoint *entry) +{ + struct vmci_qp_alloc_msg *alloc_msg; + size_t msg_size; + size_t ppn_size; + int result; + + if (!entry || entry->num_ppns <= 2) + return VMCI_ERROR_INVALID_ARGS; + + ppn_size = vmci_use_ppn64() ? sizeof(u64) : sizeof(u32); + msg_size = sizeof(*alloc_msg) + + (size_t) entry->num_ppns * ppn_size; + alloc_msg = kmalloc(msg_size, GFP_KERNEL); + if (!alloc_msg) + return VMCI_ERROR_NO_MEM; + + alloc_msg->hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, + VMCI_QUEUEPAIR_ALLOC); + alloc_msg->hdr.src = VMCI_ANON_SRC_HANDLE; + alloc_msg->hdr.payload_size = msg_size - VMCI_DG_HEADERSIZE; + alloc_msg->handle = entry->qp.handle; + alloc_msg->peer = entry->qp.peer; + alloc_msg->flags = entry->qp.flags; + alloc_msg->produce_size = entry->qp.produce_size; + alloc_msg->consume_size = entry->qp.consume_size; + alloc_msg->num_ppns = entry->num_ppns; + + result = qp_populate_ppn_set((u8 *)alloc_msg + sizeof(*alloc_msg), + &entry->ppn_set); + if (result == VMCI_SUCCESS) + result = vmci_send_datagram(&alloc_msg->hdr); + + kfree(alloc_msg); + + return result; +} + +/* + * Helper to make a queue_pairDetach hypercall when the driver is + * supporting a guest device. + */ +static int qp_detatch_hypercall(struct vmci_handle handle) +{ + struct vmci_qp_detach_msg detach_msg; + + detach_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, + VMCI_QUEUEPAIR_DETACH); + detach_msg.hdr.src = VMCI_ANON_SRC_HANDLE; + detach_msg.hdr.payload_size = sizeof(handle); + detach_msg.handle = handle; + + return vmci_send_datagram(&detach_msg.hdr); +} + +/* + * Adds the given entry to the list. Assumes that the list is locked. + */ +static void qp_list_add_entry(struct qp_list *qp_list, struct qp_entry *entry) +{ + if (entry) + list_add(&entry->list_item, &qp_list->head); +} + +/* + * Removes the given entry from the list. Assumes that the list is locked. + */ +static void qp_list_remove_entry(struct qp_list *qp_list, + struct qp_entry *entry) +{ + if (entry) + list_del(&entry->list_item); +} + +/* + * Helper for VMCI queue_pair detach interface. Frees the physical + * pages for the queue pair. + */ +static int qp_detatch_guest_work(struct vmci_handle handle) +{ + int result; + struct qp_guest_endpoint *entry; + u32 ref_count = ~0; /* To avoid compiler warning below */ + + mutex_lock(&qp_guest_endpoints.mutex); + + entry = qp_guest_handle_to_entry(handle); + if (!entry) { + mutex_unlock(&qp_guest_endpoints.mutex); + return VMCI_ERROR_NOT_FOUND; + } + + if (entry->qp.flags & VMCI_QPFLAG_LOCAL) { + result = VMCI_SUCCESS; + + if (entry->qp.ref_count > 1) { + result = qp_notify_peer_local(false, handle); + /* + * We can fail to notify a local queuepair + * because we can't allocate. We still want + * to release the entry if that happens, so + * don't bail out yet. + */ + } + } else { + result = qp_detatch_hypercall(handle); + if (result < VMCI_SUCCESS) { + /* + * We failed to notify a non-local queuepair. + * That other queuepair might still be + * accessing the shared memory, so don't + * release the entry yet. It will get cleaned + * up by VMCIqueue_pair_Exit() if necessary + * (assuming we are going away, otherwise why + * did this fail?). + */ + + mutex_unlock(&qp_guest_endpoints.mutex); + return result; + } + } + + /* + * If we get here then we either failed to notify a local queuepair, or + * we succeeded in all cases. Release the entry if required. + */ + + entry->qp.ref_count--; + if (entry->qp.ref_count == 0) + qp_list_remove_entry(&qp_guest_endpoints, &entry->qp); + + /* If we didn't remove the entry, this could change once we unlock. */ + if (entry) + ref_count = entry->qp.ref_count; + + mutex_unlock(&qp_guest_endpoints.mutex); + + if (ref_count == 0) + qp_guest_endpoint_destroy(entry); + + return result; +} + +/* + * This functions handles the actual allocation of a VMCI queue + * pair guest endpoint. Allocates physical pages for the queue + * pair. It makes OS dependent calls through generic wrappers. + */ +static int qp_alloc_guest_work(struct vmci_handle *handle, + struct vmci_queue **produce_q, + u64 produce_size, + struct vmci_queue **consume_q, + u64 consume_size, + u32 peer, + u32 flags, + u32 priv_flags) +{ + const u64 num_produce_pages = + DIV_ROUND_UP(produce_size, PAGE_SIZE) + 1; + const u64 num_consume_pages = + DIV_ROUND_UP(consume_size, PAGE_SIZE) + 1; + void *my_produce_q = NULL; + void *my_consume_q = NULL; + int result; + struct qp_guest_endpoint *queue_pair_entry = NULL; + + if (priv_flags != VMCI_NO_PRIVILEGE_FLAGS) + return VMCI_ERROR_NO_ACCESS; + + mutex_lock(&qp_guest_endpoints.mutex); + + queue_pair_entry = qp_guest_handle_to_entry(*handle); + if (queue_pair_entry) { + if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) { + /* Local attach case. */ + if (queue_pair_entry->qp.ref_count > 1) { + pr_devel("Error attempting to attach more than once\n"); + result = VMCI_ERROR_UNAVAILABLE; + goto error_keep_entry; + } + + if (queue_pair_entry->qp.produce_size != consume_size || + queue_pair_entry->qp.consume_size != + produce_size || + queue_pair_entry->qp.flags != + (flags & ~VMCI_QPFLAG_ATTACH_ONLY)) { + pr_devel("Error mismatched queue pair in local attach\n"); + result = VMCI_ERROR_QUEUEPAIR_MISMATCH; + goto error_keep_entry; + } + + /* + * Do a local attach. We swap the consume and + * produce queues for the attacher and deliver + * an attach event. + */ + result = qp_notify_peer_local(true, *handle); + if (result < VMCI_SUCCESS) + goto error_keep_entry; + + my_produce_q = queue_pair_entry->consume_q; + my_consume_q = queue_pair_entry->produce_q; + goto out; + } + + result = VMCI_ERROR_ALREADY_EXISTS; + goto error_keep_entry; + } + + my_produce_q = qp_alloc_queue(produce_size, flags); + if (!my_produce_q) { + pr_warn("Error allocating pages for produce queue\n"); + result = VMCI_ERROR_NO_MEM; + goto error; + } + + my_consume_q = qp_alloc_queue(consume_size, flags); + if (!my_consume_q) { + pr_warn("Error allocating pages for consume queue\n"); + result = VMCI_ERROR_NO_MEM; + goto error; + } + + queue_pair_entry = qp_guest_endpoint_create(*handle, peer, flags, + produce_size, consume_size, + my_produce_q, my_consume_q); + if (!queue_pair_entry) { + pr_warn("Error allocating memory in %s\n", __func__); + result = VMCI_ERROR_NO_MEM; + goto error; + } + + result = qp_alloc_ppn_set(my_produce_q, num_produce_pages, my_consume_q, + num_consume_pages, + &queue_pair_entry->ppn_set); + if (result < VMCI_SUCCESS) { + pr_warn("qp_alloc_ppn_set failed\n"); + goto error; + } + + /* + * It's only necessary to notify the host if this queue pair will be + * attached to from another context. + */ + if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) { + /* Local create case. */ + u32 context_id = vmci_get_context_id(); + + /* + * Enforce similar checks on local queue pairs as we + * do for regular ones. The handle's context must + * match the creator or attacher context id (here they + * are both the current context id) and the + * attach-only flag cannot exist during create. We + * also ensure specified peer is this context or an + * invalid one. + */ + if (queue_pair_entry->qp.handle.context != context_id || + (queue_pair_entry->qp.peer != VMCI_INVALID_ID && + queue_pair_entry->qp.peer != context_id)) { + result = VMCI_ERROR_NO_ACCESS; + goto error; + } + + if (queue_pair_entry->qp.flags & VMCI_QPFLAG_ATTACH_ONLY) { + result = VMCI_ERROR_NOT_FOUND; + goto error; + } + } else { + result = qp_alloc_hypercall(queue_pair_entry); + if (result < VMCI_SUCCESS) { + pr_warn("qp_alloc_hypercall result = %d\n", result); + goto error; + } + } + + qp_init_queue_mutex((struct vmci_queue *)my_produce_q, + (struct vmci_queue *)my_consume_q); + + qp_list_add_entry(&qp_guest_endpoints, &queue_pair_entry->qp); + + out: + queue_pair_entry->qp.ref_count++; + *handle = queue_pair_entry->qp.handle; + *produce_q = (struct vmci_queue *)my_produce_q; + *consume_q = (struct vmci_queue *)my_consume_q; + + /* + * We should initialize the queue pair header pages on a local + * queue pair create. For non-local queue pairs, the + * hypervisor initializes the header pages in the create step. + */ + if ((queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) && + queue_pair_entry->qp.ref_count == 1) { + vmci_q_header_init((*produce_q)->q_header, *handle); + vmci_q_header_init((*consume_q)->q_header, *handle); + } + + mutex_unlock(&qp_guest_endpoints.mutex); + + return VMCI_SUCCESS; + + error: + mutex_unlock(&qp_guest_endpoints.mutex); + if (queue_pair_entry) { + /* The queues will be freed inside the destroy routine. */ + qp_guest_endpoint_destroy(queue_pair_entry); + } else { + qp_free_queue(my_produce_q, produce_size); + qp_free_queue(my_consume_q, consume_size); + } + return result; + + error_keep_entry: + /* This path should only be used when an existing entry was found. */ + mutex_unlock(&qp_guest_endpoints.mutex); + return result; +} + +/* + * The first endpoint issuing a queue pair allocation will create the state + * of the queue pair in the queue pair broker. + * + * If the creator is a guest, it will associate a VMX virtual address range + * with the queue pair as specified by the page_store. For compatibility with + * older VMX'en, that would use a separate step to set the VMX virtual + * address range, the virtual address range can be registered later using + * vmci_qp_broker_set_page_store. In that case, a page_store of NULL should be + * used. + * + * If the creator is the host, a page_store of NULL should be used as well, + * since the host is not able to supply a page store for the queue pair. + * + * For older VMX and host callers, the queue pair will be created in the + * VMCIQPB_CREATED_NO_MEM state, and for current VMX callers, it will be + * created in VMCOQPB_CREATED_MEM state. + */ +static int qp_broker_create(struct vmci_handle handle, + u32 peer, + u32 flags, + u32 priv_flags, + u64 produce_size, + u64 consume_size, + struct vmci_qp_page_store *page_store, + struct vmci_ctx *context, + vmci_event_release_cb wakeup_cb, + void *client_data, struct qp_broker_entry **ent) +{ + struct qp_broker_entry *entry = NULL; + const u32 context_id = vmci_ctx_get_id(context); + bool is_local = flags & VMCI_QPFLAG_LOCAL; + int result; + u64 guest_produce_size; + u64 guest_consume_size; + + /* Do not create if the caller asked not to. */ + if (flags & VMCI_QPFLAG_ATTACH_ONLY) + return VMCI_ERROR_NOT_FOUND; + + /* + * Creator's context ID should match handle's context ID or the creator + * must allow the context in handle's context ID as the "peer". + */ + if (handle.context != context_id && handle.context != peer) + return VMCI_ERROR_NO_ACCESS; + + if (VMCI_CONTEXT_IS_VM(context_id) && VMCI_CONTEXT_IS_VM(peer)) + return VMCI_ERROR_DST_UNREACHABLE; + + /* + * Creator's context ID for local queue pairs should match the + * peer, if a peer is specified. + */ + if (is_local && peer != VMCI_INVALID_ID && context_id != peer) + return VMCI_ERROR_NO_ACCESS; + + entry = kzalloc(sizeof(*entry), GFP_ATOMIC); + if (!entry) + return VMCI_ERROR_NO_MEM; + + if (vmci_ctx_get_id(context) == VMCI_HOST_CONTEXT_ID && !is_local) { + /* + * The queue pair broker entry stores values from the guest + * point of view, so a creating host side endpoint should swap + * produce and consume values -- unless it is a local queue + * pair, in which case no swapping is necessary, since the local + * attacher will swap queues. + */ + + guest_produce_size = consume_size; + guest_consume_size = produce_size; + } else { + guest_produce_size = produce_size; + guest_consume_size = consume_size; + } + + entry->qp.handle = handle; + entry->qp.peer = peer; + entry->qp.flags = flags; + entry->qp.produce_size = guest_produce_size; + entry->qp.consume_size = guest_consume_size; + entry->qp.ref_count = 1; + entry->create_id = context_id; + entry->attach_id = VMCI_INVALID_ID; + entry->state = VMCIQPB_NEW; + entry->require_trusted_attach = + !!(context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED); + entry->created_by_trusted = + !!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED); + entry->vmci_page_files = false; + entry->wakeup_cb = wakeup_cb; + entry->client_data = client_data; + entry->produce_q = qp_host_alloc_queue(guest_produce_size); + if (entry->produce_q == NULL) { + result = VMCI_ERROR_NO_MEM; + goto error; + } + entry->consume_q = qp_host_alloc_queue(guest_consume_size); + if (entry->consume_q == NULL) { + result = VMCI_ERROR_NO_MEM; + goto error; + } + + qp_init_queue_mutex(entry->produce_q, entry->consume_q); + + INIT_LIST_HEAD(&entry->qp.list_item); + + if (is_local) { + u8 *tmp; + + entry->local_mem = kcalloc(QPE_NUM_PAGES(entry->qp), + PAGE_SIZE, GFP_KERNEL); + if (entry->local_mem == NULL) { + result = VMCI_ERROR_NO_MEM; + goto error; + } + entry->state = VMCIQPB_CREATED_MEM; + entry->produce_q->q_header = entry->local_mem; + tmp = (u8 *)entry->local_mem + PAGE_SIZE * + (DIV_ROUND_UP(entry->qp.produce_size, PAGE_SIZE) + 1); + entry->consume_q->q_header = (struct vmci_queue_header *)tmp; + } else if (page_store) { + /* + * The VMX already initialized the queue pair headers, so no + * need for the kernel side to do that. + */ + result = qp_host_register_user_memory(page_store, + entry->produce_q, + entry->consume_q); + if (result < VMCI_SUCCESS) + goto error; + + entry->state = VMCIQPB_CREATED_MEM; + } else { + /* + * A create without a page_store may be either a host + * side create (in which case we are waiting for the + * guest side to supply the memory) or an old style + * queue pair create (in which case we will expect a + * set page store call as the next step). + */ + entry->state = VMCIQPB_CREATED_NO_MEM; + } + + qp_list_add_entry(&qp_broker_list, &entry->qp); + if (ent != NULL) + *ent = entry; + + /* Add to resource obj */ + result = vmci_resource_add(&entry->resource, + VMCI_RESOURCE_TYPE_QPAIR_HOST, + handle); + if (result != VMCI_SUCCESS) { + pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d", + handle.context, handle.resource, result); + goto error; + } + + entry->qp.handle = vmci_resource_handle(&entry->resource); + if (is_local) { + vmci_q_header_init(entry->produce_q->q_header, + entry->qp.handle); + vmci_q_header_init(entry->consume_q->q_header, + entry->qp.handle); + } + + vmci_ctx_qp_create(context, entry->qp.handle); + + return VMCI_SUCCESS; + + error: + if (entry != NULL) { + qp_host_free_queue(entry->produce_q, guest_produce_size); + qp_host_free_queue(entry->consume_q, guest_consume_size); + kfree(entry); + } + + return result; +} + +/* + * Enqueues an event datagram to notify the peer VM attached to + * the given queue pair handle about attach/detach event by the + * given VM. Returns Payload size of datagram enqueued on + * success, error code otherwise. + */ +static int qp_notify_peer(bool attach, + struct vmci_handle handle, + u32 my_id, + u32 peer_id) +{ + int rv; + struct vmci_event_qp ev; + + if (vmci_handle_is_invalid(handle) || my_id == VMCI_INVALID_ID || + peer_id == VMCI_INVALID_ID) + return VMCI_ERROR_INVALID_ARGS; + + /* + * In vmci_ctx_enqueue_datagram() we enforce the upper limit on + * number of pending events from the hypervisor to a given VM + * otherwise a rogue VM could do an arbitrary number of attach + * and detach operations causing memory pressure in the host + * kernel. + */ + + memset(&ev, 0, sizeof(ev)); + ev.msg.hdr.dst = vmci_make_handle(peer_id, VMCI_EVENT_HANDLER); + ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, + VMCI_CONTEXT_RESOURCE_ID); + ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr); + ev.msg.event_data.event = attach ? + VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH; + ev.payload.handle = handle; + ev.payload.peer_id = my_id; + + rv = vmci_datagram_dispatch(VMCI_HYPERVISOR_CONTEXT_ID, + &ev.msg.hdr, false); + if (rv < VMCI_SUCCESS) + pr_warn("Failed to enqueue queue_pair %s event datagram for context (ID=0x%x)\n", + attach ? "ATTACH" : "DETACH", peer_id); + + return rv; +} + +/* + * The second endpoint issuing a queue pair allocation will attach to + * the queue pair registered with the queue pair broker. + * + * If the attacher is a guest, it will associate a VMX virtual address + * range with the queue pair as specified by the page_store. At this + * point, the already attach host endpoint may start using the queue + * pair, and an attach event is sent to it. For compatibility with + * older VMX'en, that used a separate step to set the VMX virtual + * address range, the virtual address range can be registered later + * using vmci_qp_broker_set_page_store. In that case, a page_store of + * NULL should be used, and the attach event will be generated once + * the actual page store has been set. + * + * If the attacher is the host, a page_store of NULL should be used as + * well, since the page store information is already set by the guest. + * + * For new VMX and host callers, the queue pair will be moved to the + * VMCIQPB_ATTACHED_MEM state, and for older VMX callers, it will be + * moved to the VMCOQPB_ATTACHED_NO_MEM state. + */ +static int qp_broker_attach(struct qp_broker_entry *entry, + u32 peer, + u32 flags, + u32 priv_flags, + u64 produce_size, + u64 consume_size, + struct vmci_qp_page_store *page_store, + struct vmci_ctx *context, + vmci_event_release_cb wakeup_cb, + void *client_data, + struct qp_broker_entry **ent) +{ + const u32 context_id = vmci_ctx_get_id(context); + bool is_local = flags & VMCI_QPFLAG_LOCAL; + int result; + + if (entry->state != VMCIQPB_CREATED_NO_MEM && + entry->state != VMCIQPB_CREATED_MEM) + return VMCI_ERROR_UNAVAILABLE; + + if (is_local) { + if (!(entry->qp.flags & VMCI_QPFLAG_LOCAL) || + context_id != entry->create_id) { + return VMCI_ERROR_INVALID_ARGS; + } + } else if (context_id == entry->create_id || + context_id == entry->attach_id) { + return VMCI_ERROR_ALREADY_EXISTS; + } + + if (VMCI_CONTEXT_IS_VM(context_id) && + VMCI_CONTEXT_IS_VM(entry->create_id)) + return VMCI_ERROR_DST_UNREACHABLE; + + /* + * If we are attaching from a restricted context then the queuepair + * must have been created by a trusted endpoint. + */ + if ((context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED) && + !entry->created_by_trusted) + return VMCI_ERROR_NO_ACCESS; + + /* + * If we are attaching to a queuepair that was created by a restricted + * context then we must be trusted. + */ + if (entry->require_trusted_attach && + (!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED))) + return VMCI_ERROR_NO_ACCESS; + + /* + * If the creator specifies VMCI_INVALID_ID in "peer" field, access + * control check is not performed. + */ + if (entry->qp.peer != VMCI_INVALID_ID && entry->qp.peer != context_id) + return VMCI_ERROR_NO_ACCESS; + + if (entry->create_id == VMCI_HOST_CONTEXT_ID) { + /* + * Do not attach if the caller doesn't support Host Queue Pairs + * and a host created this queue pair. + */ + + if (!vmci_ctx_supports_host_qp(context)) + return VMCI_ERROR_INVALID_RESOURCE; + + } else if (context_id == VMCI_HOST_CONTEXT_ID) { + struct vmci_ctx *create_context; + bool supports_host_qp; + + /* + * Do not attach a host to a user created queue pair if that + * user doesn't support host queue pair end points. + */ + + create_context = vmci_ctx_get(entry->create_id); + supports_host_qp = vmci_ctx_supports_host_qp(create_context); + vmci_ctx_put(create_context); + + if (!supports_host_qp) + return VMCI_ERROR_INVALID_RESOURCE; + } + + if ((entry->qp.flags & ~VMCI_QP_ASYMM) != (flags & ~VMCI_QP_ASYMM_PEER)) + return VMCI_ERROR_QUEUEPAIR_MISMATCH; + + if (context_id != VMCI_HOST_CONTEXT_ID) { + /* + * The queue pair broker entry stores values from the guest + * point of view, so an attaching guest should match the values + * stored in the entry. + */ + + if (entry->qp.produce_size != produce_size || + entry->qp.consume_size != consume_size) { + return VMCI_ERROR_QUEUEPAIR_MISMATCH; + } + } else if (entry->qp.produce_size != consume_size || + entry->qp.consume_size != produce_size) { + return VMCI_ERROR_QUEUEPAIR_MISMATCH; + } + + if (context_id != VMCI_HOST_CONTEXT_ID) { + /* + * If a guest attached to a queue pair, it will supply + * the backing memory. If this is a pre NOVMVM vmx, + * the backing memory will be supplied by calling + * vmci_qp_broker_set_page_store() following the + * return of the vmci_qp_broker_alloc() call. If it is + * a vmx of version NOVMVM or later, the page store + * must be supplied as part of the + * vmci_qp_broker_alloc call. Under all circumstances + * must the initially created queue pair not have any + * memory associated with it already. + */ + + if (entry->state != VMCIQPB_CREATED_NO_MEM) + return VMCI_ERROR_INVALID_ARGS; + + if (page_store != NULL) { + /* + * Patch up host state to point to guest + * supplied memory. The VMX already + * initialized the queue pair headers, so no + * need for the kernel side to do that. + */ + + result = qp_host_register_user_memory(page_store, + entry->produce_q, + entry->consume_q); + if (result < VMCI_SUCCESS) + return result; + + entry->state = VMCIQPB_ATTACHED_MEM; + } else { + entry->state = VMCIQPB_ATTACHED_NO_MEM; + } + } else if (entry->state == VMCIQPB_CREATED_NO_MEM) { + /* + * The host side is attempting to attach to a queue + * pair that doesn't have any memory associated with + * it. This must be a pre NOVMVM vmx that hasn't set + * the page store information yet, or a quiesced VM. + */ + + return VMCI_ERROR_UNAVAILABLE; + } else { + /* The host side has successfully attached to a queue pair. */ + entry->state = VMCIQPB_ATTACHED_MEM; + } + + if (entry->state == VMCIQPB_ATTACHED_MEM) { + result = + qp_notify_peer(true, entry->qp.handle, context_id, + entry->create_id); + if (result < VMCI_SUCCESS) + pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n", + entry->create_id, entry->qp.handle.context, + entry->qp.handle.resource); + } + + entry->attach_id = context_id; + entry->qp.ref_count++; + if (wakeup_cb) { + entry->wakeup_cb = wakeup_cb; + entry->client_data = client_data; + } + + /* + * When attaching to local queue pairs, the context already has + * an entry tracking the queue pair, so don't add another one. + */ + if (!is_local) + vmci_ctx_qp_create(context, entry->qp.handle); + + if (ent != NULL) + *ent = entry; + + return VMCI_SUCCESS; +} + +/* + * queue_pair_Alloc for use when setting up queue pair endpoints + * on the host. + */ +static int qp_broker_alloc(struct vmci_handle handle, + u32 peer, + u32 flags, + u32 priv_flags, + u64 produce_size, + u64 consume_size, + struct vmci_qp_page_store *page_store, + struct vmci_ctx *context, + vmci_event_release_cb wakeup_cb, + void *client_data, + struct qp_broker_entry **ent, + bool *swap) +{ + const u32 context_id = vmci_ctx_get_id(context); + bool create; + struct qp_broker_entry *entry = NULL; + bool is_local = flags & VMCI_QPFLAG_LOCAL; + int result; + + if (vmci_handle_is_invalid(handle) || + (flags & ~VMCI_QP_ALL_FLAGS) || is_local || + !(produce_size || consume_size) || + !context || context_id == VMCI_INVALID_ID || + handle.context == VMCI_INVALID_ID) { + return VMCI_ERROR_INVALID_ARGS; + } + + if (page_store && !VMCI_QP_PAGESTORE_IS_WELLFORMED(page_store)) + return VMCI_ERROR_INVALID_ARGS; + + /* + * In the initial argument check, we ensure that non-vmkernel hosts + * are not allowed to create local queue pairs. + */ + + mutex_lock(&qp_broker_list.mutex); + + if (!is_local && vmci_ctx_qp_exists(context, handle)) { + pr_devel("Context (ID=0x%x) already attached to queue pair (handle=0x%x:0x%x)\n", + context_id, handle.context, handle.resource); + mutex_unlock(&qp_broker_list.mutex); + return VMCI_ERROR_ALREADY_EXISTS; + } + + if (handle.resource != VMCI_INVALID_ID) + entry = qp_broker_handle_to_entry(handle); + + if (!entry) { + create = true; + result = + qp_broker_create(handle, peer, flags, priv_flags, + produce_size, consume_size, page_store, + context, wakeup_cb, client_data, ent); + } else { + create = false; + result = + qp_broker_attach(entry, peer, flags, priv_flags, + produce_size, consume_size, page_store, + context, wakeup_cb, client_data, ent); + } + + mutex_unlock(&qp_broker_list.mutex); + + if (swap) + *swap = (context_id == VMCI_HOST_CONTEXT_ID) && + !(create && is_local); + + return result; +} + +/* + * This function implements the kernel API for allocating a queue + * pair. + */ +static int qp_alloc_host_work(struct vmci_handle *handle, + struct vmci_queue **produce_q, + u64 produce_size, + struct vmci_queue **consume_q, + u64 consume_size, + u32 peer, + u32 flags, + u32 priv_flags, + vmci_event_release_cb wakeup_cb, + void *client_data) +{ + struct vmci_handle new_handle; + struct vmci_ctx *context; + struct qp_broker_entry *entry; + int result; + bool swap; + + if (vmci_handle_is_invalid(*handle)) { + new_handle = vmci_make_handle( + VMCI_HOST_CONTEXT_ID, VMCI_INVALID_ID); + } else + new_handle = *handle; + + context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID); + entry = NULL; + result = + qp_broker_alloc(new_handle, peer, flags, priv_flags, + produce_size, consume_size, NULL, context, + wakeup_cb, client_data, &entry, &swap); + if (result == VMCI_SUCCESS) { + if (swap) { + /* + * If this is a local queue pair, the attacher + * will swap around produce and consume + * queues. + */ + + *produce_q = entry->consume_q; + *consume_q = entry->produce_q; + } else { + *produce_q = entry->produce_q; + *consume_q = entry->consume_q; + } + + *handle = vmci_resource_handle(&entry->resource); + } else { + *handle = VMCI_INVALID_HANDLE; + pr_devel("queue pair broker failed to alloc (result=%d)\n", + result); + } + vmci_ctx_put(context); + return result; +} + +/* + * Allocates a VMCI queue_pair. Only checks validity of input + * arguments. The real work is done in the host or guest + * specific function. + */ +int vmci_qp_alloc(struct vmci_handle *handle, + struct vmci_queue **produce_q, + u64 produce_size, + struct vmci_queue **consume_q, + u64 consume_size, + u32 peer, + u32 flags, + u32 priv_flags, + bool guest_endpoint, + vmci_event_release_cb wakeup_cb, + void *client_data) +{ + if (!handle || !produce_q || !consume_q || + (!produce_size && !consume_size) || (flags & ~VMCI_QP_ALL_FLAGS)) + return VMCI_ERROR_INVALID_ARGS; + + if (guest_endpoint) { + return qp_alloc_guest_work(handle, produce_q, + produce_size, consume_q, + consume_size, peer, + flags, priv_flags); + } else { + return qp_alloc_host_work(handle, produce_q, + produce_size, consume_q, + consume_size, peer, flags, + priv_flags, wakeup_cb, client_data); + } +} + +/* + * This function implements the host kernel API for detaching from + * a queue pair. + */ +static int qp_detatch_host_work(struct vmci_handle handle) +{ + int result; + struct vmci_ctx *context; + + context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID); + + result = vmci_qp_broker_detach(handle, context); + + vmci_ctx_put(context); + return result; +} + +/* + * Detaches from a VMCI queue_pair. Only checks validity of input argument. + * Real work is done in the host or guest specific function. + */ +static int qp_detatch(struct vmci_handle handle, bool guest_endpoint) +{ + if (vmci_handle_is_invalid(handle)) + return VMCI_ERROR_INVALID_ARGS; + + if (guest_endpoint) + return qp_detatch_guest_work(handle); + else + return qp_detatch_host_work(handle); +} + +/* + * Returns the entry from the head of the list. Assumes that the list is + * locked. + */ +static struct qp_entry *qp_list_get_head(struct qp_list *qp_list) +{ + if (!list_empty(&qp_list->head)) { + struct qp_entry *entry = + list_first_entry(&qp_list->head, struct qp_entry, + list_item); + return entry; + } + + return NULL; +} + +void vmci_qp_broker_exit(void) +{ + struct qp_entry *entry; + struct qp_broker_entry *be; + + mutex_lock(&qp_broker_list.mutex); + + while ((entry = qp_list_get_head(&qp_broker_list))) { + be = (struct qp_broker_entry *)entry; + + qp_list_remove_entry(&qp_broker_list, entry); + kfree(be); + } + + mutex_unlock(&qp_broker_list.mutex); +} + +/* + * Requests that a queue pair be allocated with the VMCI queue + * pair broker. Allocates a queue pair entry if one does not + * exist. Attaches to one if it exists, and retrieves the page + * files backing that queue_pair. Assumes that the queue pair + * broker lock is held. + */ +int vmci_qp_broker_alloc(struct vmci_handle handle, + u32 peer, + u32 flags, + u32 priv_flags, + u64 produce_size, + u64 consume_size, + struct vmci_qp_page_store *page_store, + struct vmci_ctx *context) +{ + return qp_broker_alloc(handle, peer, flags, priv_flags, + produce_size, consume_size, + page_store, context, NULL, NULL, NULL, NULL); +} + +/* + * VMX'en with versions lower than VMCI_VERSION_NOVMVM use a separate + * step to add the UVAs of the VMX mapping of the queue pair. This function + * provides backwards compatibility with such VMX'en, and takes care of + * registering the page store for a queue pair previously allocated by the + * VMX during create or attach. This function will move the queue pair state + * to either from VMCIQBP_CREATED_NO_MEM to VMCIQBP_CREATED_MEM or + * VMCIQBP_ATTACHED_NO_MEM to VMCIQBP_ATTACHED_MEM. If moving to the + * attached state with memory, the queue pair is ready to be used by the + * host peer, and an attached event will be generated. + * + * Assumes that the queue pair broker lock is held. + * + * This function is only used by the hosted platform, since there is no + * issue with backwards compatibility for vmkernel. + */ +int vmci_qp_broker_set_page_store(struct vmci_handle handle, + u64 produce_uva, + u64 consume_uva, + struct vmci_ctx *context) +{ + struct qp_broker_entry *entry; + int result; + const u32 context_id = vmci_ctx_get_id(context); + + if (vmci_handle_is_invalid(handle) || !context || + context_id == VMCI_INVALID_ID) + return VMCI_ERROR_INVALID_ARGS; + + /* + * We only support guest to host queue pairs, so the VMX must + * supply UVAs for the mapped page files. + */ + + if (produce_uva == 0 || consume_uva == 0) + return VMCI_ERROR_INVALID_ARGS; + + mutex_lock(&qp_broker_list.mutex); + + if (!vmci_ctx_qp_exists(context, handle)) { + pr_warn("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n", + context_id, handle.context, handle.resource); + result = VMCI_ERROR_NOT_FOUND; + goto out; + } + + entry = qp_broker_handle_to_entry(handle); + if (!entry) { + result = VMCI_ERROR_NOT_FOUND; + goto out; + } + + /* + * If I'm the owner then I can set the page store. + * + * Or, if a host created the queue_pair and I'm the attached peer + * then I can set the page store. + */ + if (entry->create_id != context_id && + (entry->create_id != VMCI_HOST_CONTEXT_ID || + entry->attach_id != context_id)) { + result = VMCI_ERROR_QUEUEPAIR_NOTOWNER; + goto out; + } + + if (entry->state != VMCIQPB_CREATED_NO_MEM && + entry->state != VMCIQPB_ATTACHED_NO_MEM) { + result = VMCI_ERROR_UNAVAILABLE; + goto out; + } + + result = qp_host_get_user_memory(produce_uva, consume_uva, + entry->produce_q, entry->consume_q); + if (result < VMCI_SUCCESS) + goto out; + + result = qp_host_map_queues(entry->produce_q, entry->consume_q); + if (result < VMCI_SUCCESS) { + qp_host_unregister_user_memory(entry->produce_q, + entry->consume_q); + goto out; + } + + if (entry->state == VMCIQPB_CREATED_NO_MEM) + entry->state = VMCIQPB_CREATED_MEM; + else + entry->state = VMCIQPB_ATTACHED_MEM; + + entry->vmci_page_files = true; + + if (entry->state == VMCIQPB_ATTACHED_MEM) { + result = + qp_notify_peer(true, handle, context_id, entry->create_id); + if (result < VMCI_SUCCESS) { + pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n", + entry->create_id, entry->qp.handle.context, + entry->qp.handle.resource); + } + } + + result = VMCI_SUCCESS; + out: + mutex_unlock(&qp_broker_list.mutex); + return result; +} + +/* + * Resets saved queue headers for the given QP broker + * entry. Should be used when guest memory becomes available + * again, or the guest detaches. + */ +static void qp_reset_saved_headers(struct qp_broker_entry *entry) +{ + entry->produce_q->saved_header = NULL; + entry->consume_q->saved_header = NULL; +} + +/* + * The main entry point for detaching from a queue pair registered with the + * queue pair broker. If more than one endpoint is attached to the queue + * pair, the first endpoint will mainly decrement a reference count and + * generate a notification to its peer. The last endpoint will clean up + * the queue pair state registered with the broker. + * + * When a guest endpoint detaches, it will unmap and unregister the guest + * memory backing the queue pair. If the host is still attached, it will + * no longer be able to access the queue pair content. + * + * If the queue pair is already in a state where there is no memory + * registered for the queue pair (any *_NO_MEM state), it will transition to + * the VMCIQPB_SHUTDOWN_NO_MEM state. This will also happen, if a guest + * endpoint is the first of two endpoints to detach. If the host endpoint is + * the first out of two to detach, the queue pair will move to the + * VMCIQPB_SHUTDOWN_MEM state. + */ +int vmci_qp_broker_detach(struct vmci_handle handle, struct vmci_ctx *context) +{ + struct qp_broker_entry *entry; + const u32 context_id = vmci_ctx_get_id(context); + u32 peer_id; + bool is_local = false; + int result; + + if (vmci_handle_is_invalid(handle) || !context || + context_id == VMCI_INVALID_ID) { + return VMCI_ERROR_INVALID_ARGS; + } + + mutex_lock(&qp_broker_list.mutex); + + if (!vmci_ctx_qp_exists(context, handle)) { + pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n", + context_id, handle.context, handle.resource); + result = VMCI_ERROR_NOT_FOUND; + goto out; + } + + entry = qp_broker_handle_to_entry(handle); + if (!entry) { + pr_devel("Context (ID=0x%x) reports being attached to queue pair(handle=0x%x:0x%x) that isn't present in broker\n", + context_id, handle.context, handle.resource); + result = VMCI_ERROR_NOT_FOUND; + goto out; + } + + if (context_id != entry->create_id && context_id != entry->attach_id) { + result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED; + goto out; + } + + if (context_id == entry->create_id) { + peer_id = entry->attach_id; + entry->create_id = VMCI_INVALID_ID; + } else { + peer_id = entry->create_id; + entry->attach_id = VMCI_INVALID_ID; + } + entry->qp.ref_count--; + + is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL; + + if (context_id != VMCI_HOST_CONTEXT_ID) { + bool headers_mapped; + + /* + * Pre NOVMVM vmx'en may detach from a queue pair + * before setting the page store, and in that case + * there is no user memory to detach from. Also, more + * recent VMX'en may detach from a queue pair in the + * quiesced state. + */ + + qp_acquire_queue_mutex(entry->produce_q); + headers_mapped = entry->produce_q->q_header || + entry->consume_q->q_header; + if (QPBROKERSTATE_HAS_MEM(entry)) { + result = + qp_host_unmap_queues(INVALID_VMCI_GUEST_MEM_ID, + entry->produce_q, + entry->consume_q); + if (result < VMCI_SUCCESS) + pr_warn("Failed to unmap queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n", + handle.context, handle.resource, + result); + + qp_host_unregister_user_memory(entry->produce_q, + entry->consume_q); + + } + + if (!headers_mapped) + qp_reset_saved_headers(entry); + + qp_release_queue_mutex(entry->produce_q); + + if (!headers_mapped && entry->wakeup_cb) + entry->wakeup_cb(entry->client_data); + + } else { + if (entry->wakeup_cb) { + entry->wakeup_cb = NULL; + entry->client_data = NULL; + } + } + + if (entry->qp.ref_count == 0) { + qp_list_remove_entry(&qp_broker_list, &entry->qp); + + if (is_local) + kfree(entry->local_mem); + + qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q); + qp_host_free_queue(entry->produce_q, entry->qp.produce_size); + qp_host_free_queue(entry->consume_q, entry->qp.consume_size); + /* Unlink from resource hash table and free callback */ + vmci_resource_remove(&entry->resource); + + kfree(entry); + + vmci_ctx_qp_destroy(context, handle); + } else { + qp_notify_peer(false, handle, context_id, peer_id); + if (context_id == VMCI_HOST_CONTEXT_ID && + QPBROKERSTATE_HAS_MEM(entry)) { + entry->state = VMCIQPB_SHUTDOWN_MEM; + } else { + entry->state = VMCIQPB_SHUTDOWN_NO_MEM; + } + + if (!is_local) + vmci_ctx_qp_destroy(context, handle); + + } + result = VMCI_SUCCESS; + out: + mutex_unlock(&qp_broker_list.mutex); + return result; +} + +/* + * Establishes the necessary mappings for a queue pair given a + * reference to the queue pair guest memory. This is usually + * called when a guest is unquiesced and the VMX is allowed to + * map guest memory once again. + */ +int vmci_qp_broker_map(struct vmci_handle handle, + struct vmci_ctx *context, + u64 guest_mem) +{ + struct qp_broker_entry *entry; + const u32 context_id = vmci_ctx_get_id(context); + int result; + + if (vmci_handle_is_invalid(handle) || !context || + context_id == VMCI_INVALID_ID) + return VMCI_ERROR_INVALID_ARGS; + + mutex_lock(&qp_broker_list.mutex); + + if (!vmci_ctx_qp_exists(context, handle)) { + pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n", + context_id, handle.context, handle.resource); + result = VMCI_ERROR_NOT_FOUND; + goto out; + } + + entry = qp_broker_handle_to_entry(handle); + if (!entry) { + pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n", + context_id, handle.context, handle.resource); + result = VMCI_ERROR_NOT_FOUND; + goto out; + } + + if (context_id != entry->create_id && context_id != entry->attach_id) { + result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED; + goto out; + } + + result = VMCI_SUCCESS; + + if (context_id != VMCI_HOST_CONTEXT_ID && + !QPBROKERSTATE_HAS_MEM(entry)) { + struct vmci_qp_page_store page_store; + + page_store.pages = guest_mem; + page_store.len = QPE_NUM_PAGES(entry->qp); + + qp_acquire_queue_mutex(entry->produce_q); + qp_reset_saved_headers(entry); + result = + qp_host_register_user_memory(&page_store, + entry->produce_q, + entry->consume_q); + qp_release_queue_mutex(entry->produce_q); + if (result == VMCI_SUCCESS) { + /* Move state from *_NO_MEM to *_MEM */ + + entry->state++; + + if (entry->wakeup_cb) + entry->wakeup_cb(entry->client_data); + } + } + + out: + mutex_unlock(&qp_broker_list.mutex); + return result; +} + +/* + * Saves a snapshot of the queue headers for the given QP broker + * entry. Should be used when guest memory is unmapped. + * Results: + * VMCI_SUCCESS on success, appropriate error code if guest memory + * can't be accessed.. + */ +static int qp_save_headers(struct qp_broker_entry *entry) +{ + int result; + + if (entry->produce_q->saved_header != NULL && + entry->consume_q->saved_header != NULL) { + /* + * If the headers have already been saved, we don't need to do + * it again, and we don't want to map in the headers + * unnecessarily. + */ + + return VMCI_SUCCESS; + } + + if (NULL == entry->produce_q->q_header || + NULL == entry->consume_q->q_header) { + result = qp_host_map_queues(entry->produce_q, entry->consume_q); + if (result < VMCI_SUCCESS) + return result; + } + + memcpy(&entry->saved_produce_q, entry->produce_q->q_header, + sizeof(entry->saved_produce_q)); + entry->produce_q->saved_header = &entry->saved_produce_q; + memcpy(&entry->saved_consume_q, entry->consume_q->q_header, + sizeof(entry->saved_consume_q)); + entry->consume_q->saved_header = &entry->saved_consume_q; + + return VMCI_SUCCESS; +} + +/* + * Removes all references to the guest memory of a given queue pair, and + * will move the queue pair from state *_MEM to *_NO_MEM. It is usually + * called when a VM is being quiesced where access to guest memory should + * avoided. + */ +int vmci_qp_broker_unmap(struct vmci_handle handle, + struct vmci_ctx *context, + u32 gid) +{ + struct qp_broker_entry *entry; + const u32 context_id = vmci_ctx_get_id(context); + int result; + + if (vmci_handle_is_invalid(handle) || !context || + context_id == VMCI_INVALID_ID) + return VMCI_ERROR_INVALID_ARGS; + + mutex_lock(&qp_broker_list.mutex); + + if (!vmci_ctx_qp_exists(context, handle)) { + pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n", + context_id, handle.context, handle.resource); + result = VMCI_ERROR_NOT_FOUND; + goto out; + } + + entry = qp_broker_handle_to_entry(handle); + if (!entry) { + pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n", + context_id, handle.context, handle.resource); + result = VMCI_ERROR_NOT_FOUND; + goto out; + } + + if (context_id != entry->create_id && context_id != entry->attach_id) { + result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED; + goto out; + } + + if (context_id != VMCI_HOST_CONTEXT_ID && + QPBROKERSTATE_HAS_MEM(entry)) { + qp_acquire_queue_mutex(entry->produce_q); + result = qp_save_headers(entry); + if (result < VMCI_SUCCESS) + pr_warn("Failed to save queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n", + handle.context, handle.resource, result); + + qp_host_unmap_queues(gid, entry->produce_q, entry->consume_q); + + /* + * On hosted, when we unmap queue pairs, the VMX will also + * unmap the guest memory, so we invalidate the previously + * registered memory. If the queue pair is mapped again at a + * later point in time, we will need to reregister the user + * memory with a possibly new user VA. + */ + qp_host_unregister_user_memory(entry->produce_q, + entry->consume_q); + + /* + * Move state from *_MEM to *_NO_MEM. + */ + entry->state--; + + qp_release_queue_mutex(entry->produce_q); + } + + result = VMCI_SUCCESS; + + out: + mutex_unlock(&qp_broker_list.mutex); + return result; +} + +/* + * Destroys all guest queue pair endpoints. If active guest queue + * pairs still exist, hypercalls to attempt detach from these + * queue pairs will be made. Any failure to detach is silently + * ignored. + */ +void vmci_qp_guest_endpoints_exit(void) +{ + struct qp_entry *entry; + struct qp_guest_endpoint *ep; + + mutex_lock(&qp_guest_endpoints.mutex); + + while ((entry = qp_list_get_head(&qp_guest_endpoints))) { + ep = (struct qp_guest_endpoint *)entry; + + /* Don't make a hypercall for local queue_pairs. */ + if (!(entry->flags & VMCI_QPFLAG_LOCAL)) + qp_detatch_hypercall(entry->handle); + + /* We cannot fail the exit, so let's reset ref_count. */ + entry->ref_count = 0; + qp_list_remove_entry(&qp_guest_endpoints, entry); + + qp_guest_endpoint_destroy(ep); + } + + mutex_unlock(&qp_guest_endpoints.mutex); +} + +/* + * Helper routine that will lock the queue pair before subsequent + * operations. + * Note: Non-blocking on the host side is currently only implemented in ESX. + * Since non-blocking isn't yet implemented on the host personality we + * have no reason to acquire a spin lock. So to avoid the use of an + * unnecessary lock only acquire the mutex if we can block. + */ +static void qp_lock(const struct vmci_qp *qpair) +{ + qp_acquire_queue_mutex(qpair->produce_q); +} + +/* + * Helper routine that unlocks the queue pair after calling + * qp_lock. + */ +static void qp_unlock(const struct vmci_qp *qpair) +{ + qp_release_queue_mutex(qpair->produce_q); +} + +/* + * The queue headers may not be mapped at all times. If a queue is + * currently not mapped, it will be attempted to do so. + */ +static int qp_map_queue_headers(struct vmci_queue *produce_q, + struct vmci_queue *consume_q) +{ + int result; + + if (NULL == produce_q->q_header || NULL == consume_q->q_header) { + result = qp_host_map_queues(produce_q, consume_q); + if (result < VMCI_SUCCESS) + return (produce_q->saved_header && + consume_q->saved_header) ? + VMCI_ERROR_QUEUEPAIR_NOT_READY : + VMCI_ERROR_QUEUEPAIR_NOTATTACHED; + } + + return VMCI_SUCCESS; +} + +/* + * Helper routine that will retrieve the produce and consume + * headers of a given queue pair. If the guest memory of the + * queue pair is currently not available, the saved queue headers + * will be returned, if these are available. + */ +static int qp_get_queue_headers(const struct vmci_qp *qpair, + struct vmci_queue_header **produce_q_header, + struct vmci_queue_header **consume_q_header) +{ + int result; + + result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q); + if (result == VMCI_SUCCESS) { + *produce_q_header = qpair->produce_q->q_header; + *consume_q_header = qpair->consume_q->q_header; + } else if (qpair->produce_q->saved_header && + qpair->consume_q->saved_header) { + *produce_q_header = qpair->produce_q->saved_header; + *consume_q_header = qpair->consume_q->saved_header; + result = VMCI_SUCCESS; + } + + return result; +} + +/* + * Callback from VMCI queue pair broker indicating that a queue + * pair that was previously not ready, now either is ready or + * gone forever. + */ +static int qp_wakeup_cb(void *client_data) +{ + struct vmci_qp *qpair = (struct vmci_qp *)client_data; + + qp_lock(qpair); + while (qpair->blocked > 0) { + qpair->blocked--; + qpair->generation++; + wake_up(&qpair->event); + } + qp_unlock(qpair); + + return VMCI_SUCCESS; +} + +/* + * Makes the calling thread wait for the queue pair to become + * ready for host side access. Returns true when thread is + * woken up after queue pair state change, false otherwise. + */ +static bool qp_wait_for_ready_queue(struct vmci_qp *qpair) +{ + unsigned int generation; + + qpair->blocked++; + generation = qpair->generation; + qp_unlock(qpair); + wait_event(qpair->event, generation != qpair->generation); + qp_lock(qpair); + + return true; +} + +/* + * Enqueues a given buffer to the produce queue using the provided + * function. As many bytes as possible (space available in the queue) + * are enqueued. Assumes the queue->mutex has been acquired. Returns + * VMCI_ERROR_QUEUEPAIR_NOSPACE if no space was available to enqueue + * data, VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the + * queue (as defined by the queue size), VMCI_ERROR_INVALID_ARGS, if + * an error occured when accessing the buffer, + * VMCI_ERROR_QUEUEPAIR_NOTATTACHED, if the queue pair pages aren't + * available. Otherwise, the number of bytes written to the queue is + * returned. Updates the tail pointer of the produce queue. + */ +static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q, + struct vmci_queue *consume_q, + const u64 produce_q_size, + struct iov_iter *from) +{ + s64 free_space; + u64 tail; + size_t buf_size = iov_iter_count(from); + size_t written; + ssize_t result; + + result = qp_map_queue_headers(produce_q, consume_q); + if (unlikely(result != VMCI_SUCCESS)) + return result; + + free_space = vmci_q_header_free_space(produce_q->q_header, + consume_q->q_header, + produce_q_size); + if (free_space == 0) + return VMCI_ERROR_QUEUEPAIR_NOSPACE; + + if (free_space < VMCI_SUCCESS) + return (ssize_t) free_space; + + written = (size_t) (free_space > buf_size ? buf_size : free_space); + tail = vmci_q_header_producer_tail(produce_q->q_header); + if (likely(tail + written < produce_q_size)) { + result = qp_memcpy_to_queue_iter(produce_q, tail, from, written); + } else { + /* Tail pointer wraps around. */ + + const size_t tmp = (size_t) (produce_q_size - tail); + + result = qp_memcpy_to_queue_iter(produce_q, tail, from, tmp); + if (result >= VMCI_SUCCESS) + result = qp_memcpy_to_queue_iter(produce_q, 0, from, + written - tmp); + } + + if (result < VMCI_SUCCESS) + return result; + + vmci_q_header_add_producer_tail(produce_q->q_header, written, + produce_q_size); + return written; +} + +/* + * Dequeues data (if available) from the given consume queue. Writes data + * to the user provided buffer using the provided function. + * Assumes the queue->mutex has been acquired. + * Results: + * VMCI_ERROR_QUEUEPAIR_NODATA if no data was available to dequeue. + * VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the queue + * (as defined by the queue size). + * VMCI_ERROR_INVALID_ARGS, if an error occured when accessing the buffer. + * Otherwise the number of bytes dequeued is returned. + * Side effects: + * Updates the head pointer of the consume queue. + */ +static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q, + struct vmci_queue *consume_q, + const u64 consume_q_size, + struct iov_iter *to, + bool update_consumer) +{ + size_t buf_size = iov_iter_count(to); + s64 buf_ready; + u64 head; + size_t read; + ssize_t result; + + result = qp_map_queue_headers(produce_q, consume_q); + if (unlikely(result != VMCI_SUCCESS)) + return result; + + buf_ready = vmci_q_header_buf_ready(consume_q->q_header, + produce_q->q_header, + consume_q_size); + if (buf_ready == 0) + return VMCI_ERROR_QUEUEPAIR_NODATA; + + if (buf_ready < VMCI_SUCCESS) + return (ssize_t) buf_ready; + + read = (size_t) (buf_ready > buf_size ? buf_size : buf_ready); + head = vmci_q_header_consumer_head(produce_q->q_header); + if (likely(head + read < consume_q_size)) { + result = qp_memcpy_from_queue_iter(to, consume_q, head, read); + } else { + /* Head pointer wraps around. */ + + const size_t tmp = (size_t) (consume_q_size - head); + + result = qp_memcpy_from_queue_iter(to, consume_q, head, tmp); + if (result >= VMCI_SUCCESS) + result = qp_memcpy_from_queue_iter(to, consume_q, 0, + read - tmp); + + } + + if (result < VMCI_SUCCESS) + return result; + + if (update_consumer) + vmci_q_header_add_consumer_head(produce_q->q_header, + read, consume_q_size); + + return read; +} + +/* + * vmci_qpair_alloc() - Allocates a queue pair. + * @qpair: Pointer for the new vmci_qp struct. + * @handle: Handle to track the resource. + * @produce_qsize: Desired size of the producer queue. + * @consume_qsize: Desired size of the consumer queue. + * @peer: ContextID of the peer. + * @flags: VMCI flags. + * @priv_flags: VMCI priviledge flags. + * + * This is the client interface for allocating the memory for a + * vmci_qp structure and then attaching to the underlying + * queue. If an error occurs allocating the memory for the + * vmci_qp structure no attempt is made to attach. If an + * error occurs attaching, then the structure is freed. + */ +int vmci_qpair_alloc(struct vmci_qp **qpair, + struct vmci_handle *handle, + u64 produce_qsize, + u64 consume_qsize, + u32 peer, + u32 flags, + u32 priv_flags) +{ + struct vmci_qp *my_qpair; + int retval; + struct vmci_handle src = VMCI_INVALID_HANDLE; + struct vmci_handle dst = vmci_make_handle(peer, VMCI_INVALID_ID); + enum vmci_route route; + vmci_event_release_cb wakeup_cb; + void *client_data; + + /* + * Restrict the size of a queuepair. The device already + * enforces a limit on the total amount of memory that can be + * allocated to queuepairs for a guest. However, we try to + * allocate this memory before we make the queuepair + * allocation hypercall. On Linux, we allocate each page + * separately, which means rather than fail, the guest will + * thrash while it tries to allocate, and will become + * increasingly unresponsive to the point where it appears to + * be hung. So we place a limit on the size of an individual + * queuepair here, and leave the device to enforce the + * restriction on total queuepair memory. (Note that this + * doesn't prevent all cases; a user with only this much + * physical memory could still get into trouble.) The error + * used by the device is NO_RESOURCES, so use that here too. + */ + + if (produce_qsize + consume_qsize < max(produce_qsize, consume_qsize) || + produce_qsize + consume_qsize > VMCI_MAX_GUEST_QP_MEMORY) + return VMCI_ERROR_NO_RESOURCES; + + retval = vmci_route(&src, &dst, false, &route); + if (retval < VMCI_SUCCESS) + route = vmci_guest_code_active() ? + VMCI_ROUTE_AS_GUEST : VMCI_ROUTE_AS_HOST; + + if (flags & (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED)) { + pr_devel("NONBLOCK OR PINNED set"); + return VMCI_ERROR_INVALID_ARGS; + } + + my_qpair = kzalloc(sizeof(*my_qpair), GFP_KERNEL); + if (!my_qpair) + return VMCI_ERROR_NO_MEM; + + my_qpair->produce_q_size = produce_qsize; + my_qpair->consume_q_size = consume_qsize; + my_qpair->peer = peer; + my_qpair->flags = flags; + my_qpair->priv_flags = priv_flags; + + wakeup_cb = NULL; + client_data = NULL; + + if (VMCI_ROUTE_AS_HOST == route) { + my_qpair->guest_endpoint = false; + if (!(flags & VMCI_QPFLAG_LOCAL)) { + my_qpair->blocked = 0; + my_qpair->generation = 0; + init_waitqueue_head(&my_qpair->event); + wakeup_cb = qp_wakeup_cb; + client_data = (void *)my_qpair; + } + } else { + my_qpair->guest_endpoint = true; + } + + retval = vmci_qp_alloc(handle, + &my_qpair->produce_q, + my_qpair->produce_q_size, + &my_qpair->consume_q, + my_qpair->consume_q_size, + my_qpair->peer, + my_qpair->flags, + my_qpair->priv_flags, + my_qpair->guest_endpoint, + wakeup_cb, client_data); + + if (retval < VMCI_SUCCESS) { + kfree(my_qpair); + return retval; + } + + *qpair = my_qpair; + my_qpair->handle = *handle; + + return retval; +} +EXPORT_SYMBOL_GPL(vmci_qpair_alloc); + +/* + * vmci_qpair_detach() - Detatches the client from a queue pair. + * @qpair: Reference of a pointer to the qpair struct. + * + * This is the client interface for detaching from a VMCIQPair. + * Note that this routine will free the memory allocated for the + * vmci_qp structure too. + */ +int vmci_qpair_detach(struct vmci_qp **qpair) +{ + int result; + struct vmci_qp *old_qpair; + + if (!qpair || !(*qpair)) + return VMCI_ERROR_INVALID_ARGS; + + old_qpair = *qpair; + result = qp_detatch(old_qpair->handle, old_qpair->guest_endpoint); + + /* + * The guest can fail to detach for a number of reasons, and + * if it does so, it will cleanup the entry (if there is one). + * The host can fail too, but it won't cleanup the entry + * immediately, it will do that later when the context is + * freed. Either way, we need to release the qpair struct + * here; there isn't much the caller can do, and we don't want + * to leak. + */ + + memset(old_qpair, 0, sizeof(*old_qpair)); + old_qpair->handle = VMCI_INVALID_HANDLE; + old_qpair->peer = VMCI_INVALID_ID; + kfree(old_qpair); + *qpair = NULL; + + return result; +} +EXPORT_SYMBOL_GPL(vmci_qpair_detach); + +/* + * vmci_qpair_get_produce_indexes() - Retrieves the indexes of the producer. + * @qpair: Pointer to the queue pair struct. + * @producer_tail: Reference used for storing producer tail index. + * @consumer_head: Reference used for storing the consumer head index. + * + * This is the client interface for getting the current indexes of the + * QPair from the point of the view of the caller as the producer. + */ +int vmci_qpair_get_produce_indexes(const struct vmci_qp *qpair, + u64 *producer_tail, + u64 *consumer_head) +{ + struct vmci_queue_header *produce_q_header; + struct vmci_queue_header *consume_q_header; + int result; + + if (!qpair) + return VMCI_ERROR_INVALID_ARGS; + + qp_lock(qpair); + result = + qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); + if (result == VMCI_SUCCESS) + vmci_q_header_get_pointers(produce_q_header, consume_q_header, + producer_tail, consumer_head); + qp_unlock(qpair); + + if (result == VMCI_SUCCESS && + ((producer_tail && *producer_tail >= qpair->produce_q_size) || + (consumer_head && *consumer_head >= qpair->produce_q_size))) + return VMCI_ERROR_INVALID_SIZE; + + return result; +} +EXPORT_SYMBOL_GPL(vmci_qpair_get_produce_indexes); + +/* + * vmci_qpair_get_consume_indexes() - Retrieves the indexes of the consumer. + * @qpair: Pointer to the queue pair struct. + * @consumer_tail: Reference used for storing consumer tail index. + * @producer_head: Reference used for storing the producer head index. + * + * This is the client interface for getting the current indexes of the + * QPair from the point of the view of the caller as the consumer. + */ +int vmci_qpair_get_consume_indexes(const struct vmci_qp *qpair, + u64 *consumer_tail, + u64 *producer_head) +{ + struct vmci_queue_header *produce_q_header; + struct vmci_queue_header *consume_q_header; + int result; + + if (!qpair) + return VMCI_ERROR_INVALID_ARGS; + + qp_lock(qpair); + result = + qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); + if (result == VMCI_SUCCESS) + vmci_q_header_get_pointers(consume_q_header, produce_q_header, + consumer_tail, producer_head); + qp_unlock(qpair); + + if (result == VMCI_SUCCESS && + ((consumer_tail && *consumer_tail >= qpair->consume_q_size) || + (producer_head && *producer_head >= qpair->consume_q_size))) + return VMCI_ERROR_INVALID_SIZE; + + return result; +} +EXPORT_SYMBOL_GPL(vmci_qpair_get_consume_indexes); + +/* + * vmci_qpair_produce_free_space() - Retrieves free space in producer queue. + * @qpair: Pointer to the queue pair struct. + * + * This is the client interface for getting the amount of free + * space in the QPair from the point of the view of the caller as + * the producer which is the common case. Returns < 0 if err, else + * available bytes into which data can be enqueued if > 0. + */ +s64 vmci_qpair_produce_free_space(const struct vmci_qp *qpair) +{ + struct vmci_queue_header *produce_q_header; + struct vmci_queue_header *consume_q_header; + s64 result; + + if (!qpair) + return VMCI_ERROR_INVALID_ARGS; + + qp_lock(qpair); + result = + qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); + if (result == VMCI_SUCCESS) + result = vmci_q_header_free_space(produce_q_header, + consume_q_header, + qpair->produce_q_size); + else + result = 0; + + qp_unlock(qpair); + + return result; +} +EXPORT_SYMBOL_GPL(vmci_qpair_produce_free_space); + +/* + * vmci_qpair_consume_free_space() - Retrieves free space in consumer queue. + * @qpair: Pointer to the queue pair struct. + * + * This is the client interface for getting the amount of free + * space in the QPair from the point of the view of the caller as + * the consumer which is not the common case. Returns < 0 if err, else + * available bytes into which data can be enqueued if > 0. + */ +s64 vmci_qpair_consume_free_space(const struct vmci_qp *qpair) +{ + struct vmci_queue_header *produce_q_header; + struct vmci_queue_header *consume_q_header; + s64 result; + + if (!qpair) + return VMCI_ERROR_INVALID_ARGS; + + qp_lock(qpair); + result = + qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); + if (result == VMCI_SUCCESS) + result = vmci_q_header_free_space(consume_q_header, + produce_q_header, + qpair->consume_q_size); + else + result = 0; + + qp_unlock(qpair); + + return result; +} +EXPORT_SYMBOL_GPL(vmci_qpair_consume_free_space); + +/* + * vmci_qpair_produce_buf_ready() - Gets bytes ready to read from + * producer queue. + * @qpair: Pointer to the queue pair struct. + * + * This is the client interface for getting the amount of + * enqueued data in the QPair from the point of the view of the + * caller as the producer which is not the common case. Returns < 0 if err, + * else available bytes that may be read. + */ +s64 vmci_qpair_produce_buf_ready(const struct vmci_qp *qpair) +{ + struct vmci_queue_header *produce_q_header; + struct vmci_queue_header *consume_q_header; + s64 result; + + if (!qpair) + return VMCI_ERROR_INVALID_ARGS; + + qp_lock(qpair); + result = + qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); + if (result == VMCI_SUCCESS) + result = vmci_q_header_buf_ready(produce_q_header, + consume_q_header, + qpair->produce_q_size); + else + result = 0; + + qp_unlock(qpair); + + return result; +} +EXPORT_SYMBOL_GPL(vmci_qpair_produce_buf_ready); + +/* + * vmci_qpair_consume_buf_ready() - Gets bytes ready to read from + * consumer queue. + * @qpair: Pointer to the queue pair struct. + * + * This is the client interface for getting the amount of + * enqueued data in the QPair from the point of the view of the + * caller as the consumer which is the normal case. Returns < 0 if err, + * else available bytes that may be read. + */ +s64 vmci_qpair_consume_buf_ready(const struct vmci_qp *qpair) +{ + struct vmci_queue_header *produce_q_header; + struct vmci_queue_header *consume_q_header; + s64 result; + + if (!qpair) + return VMCI_ERROR_INVALID_ARGS; + + qp_lock(qpair); + result = + qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); + if (result == VMCI_SUCCESS) + result = vmci_q_header_buf_ready(consume_q_header, + produce_q_header, + qpair->consume_q_size); + else + result = 0; + + qp_unlock(qpair); + + return result; +} +EXPORT_SYMBOL_GPL(vmci_qpair_consume_buf_ready); + +/* + * vmci_qpair_enqueue() - Throw data on the queue. + * @qpair: Pointer to the queue pair struct. + * @buf: Pointer to buffer containing data + * @buf_size: Length of buffer. + * @buf_type: Buffer type (Unused). + * + * This is the client interface for enqueueing data into the queue. + * Returns number of bytes enqueued or < 0 on error. + */ +ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair, + const void *buf, + size_t buf_size, + int buf_type) +{ + ssize_t result; + struct iov_iter from; + struct kvec v = {.iov_base = (void *)buf, .iov_len = buf_size}; + + if (!qpair || !buf) + return VMCI_ERROR_INVALID_ARGS; + + iov_iter_kvec(&from, WRITE, &v, 1, buf_size); + + qp_lock(qpair); + + do { + result = qp_enqueue_locked(qpair->produce_q, + qpair->consume_q, + qpair->produce_q_size, + &from); + + if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && + !qp_wait_for_ready_queue(qpair)) + result = VMCI_ERROR_WOULD_BLOCK; + + } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); + + qp_unlock(qpair); + + return result; +} +EXPORT_SYMBOL_GPL(vmci_qpair_enqueue); + +/* + * vmci_qpair_dequeue() - Get data from the queue. + * @qpair: Pointer to the queue pair struct. + * @buf: Pointer to buffer for the data + * @buf_size: Length of buffer. + * @buf_type: Buffer type (Unused). + * + * This is the client interface for dequeueing data from the queue. + * Returns number of bytes dequeued or < 0 on error. + */ +ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair, + void *buf, + size_t buf_size, + int buf_type) +{ + ssize_t result; + struct iov_iter to; + struct kvec v = {.iov_base = buf, .iov_len = buf_size}; + + if (!qpair || !buf) + return VMCI_ERROR_INVALID_ARGS; + + iov_iter_kvec(&to, READ, &v, 1, buf_size); + + qp_lock(qpair); + + do { + result = qp_dequeue_locked(qpair->produce_q, + qpair->consume_q, + qpair->consume_q_size, + &to, true); + + if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && + !qp_wait_for_ready_queue(qpair)) + result = VMCI_ERROR_WOULD_BLOCK; + + } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); + + qp_unlock(qpair); + + return result; +} +EXPORT_SYMBOL_GPL(vmci_qpair_dequeue); + +/* + * vmci_qpair_peek() - Peek at the data in the queue. + * @qpair: Pointer to the queue pair struct. + * @buf: Pointer to buffer for the data + * @buf_size: Length of buffer. + * @buf_type: Buffer type (Unused on Linux). + * + * This is the client interface for peeking into a queue. (I.e., + * copy data from the queue without updating the head pointer.) + * Returns number of bytes dequeued or < 0 on error. + */ +ssize_t vmci_qpair_peek(struct vmci_qp *qpair, + void *buf, + size_t buf_size, + int buf_type) +{ + struct iov_iter to; + struct kvec v = {.iov_base = buf, .iov_len = buf_size}; + ssize_t result; + + if (!qpair || !buf) + return VMCI_ERROR_INVALID_ARGS; + + iov_iter_kvec(&to, READ, &v, 1, buf_size); + + qp_lock(qpair); + + do { + result = qp_dequeue_locked(qpair->produce_q, + qpair->consume_q, + qpair->consume_q_size, + &to, false); + + if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && + !qp_wait_for_ready_queue(qpair)) + result = VMCI_ERROR_WOULD_BLOCK; + + } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); + + qp_unlock(qpair); + + return result; +} +EXPORT_SYMBOL_GPL(vmci_qpair_peek); + +/* + * vmci_qpair_enquev() - Throw data on the queue using iov. + * @qpair: Pointer to the queue pair struct. + * @iov: Pointer to buffer containing data + * @iov_size: Length of buffer. + * @buf_type: Buffer type (Unused). + * + * This is the client interface for enqueueing data into the queue. + * This function uses IO vectors to handle the work. Returns number + * of bytes enqueued or < 0 on error. + */ +ssize_t vmci_qpair_enquev(struct vmci_qp *qpair, + struct msghdr *msg, + size_t iov_size, + int buf_type) +{ + ssize_t result; + + if (!qpair) + return VMCI_ERROR_INVALID_ARGS; + + qp_lock(qpair); + + do { + result = qp_enqueue_locked(qpair->produce_q, + qpair->consume_q, + qpair->produce_q_size, + &msg->msg_iter); + + if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && + !qp_wait_for_ready_queue(qpair)) + result = VMCI_ERROR_WOULD_BLOCK; + + } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); + + qp_unlock(qpair); + + return result; +} +EXPORT_SYMBOL_GPL(vmci_qpair_enquev); + +/* + * vmci_qpair_dequev() - Get data from the queue using iov. + * @qpair: Pointer to the queue pair struct. + * @iov: Pointer to buffer for the data + * @iov_size: Length of buffer. + * @buf_type: Buffer type (Unused). + * + * This is the client interface for dequeueing data from the queue. + * This function uses IO vectors to handle the work. Returns number + * of bytes dequeued or < 0 on error. + */ +ssize_t vmci_qpair_dequev(struct vmci_qp *qpair, + struct msghdr *msg, + size_t iov_size, + int buf_type) +{ + ssize_t result; + + if (!qpair) + return VMCI_ERROR_INVALID_ARGS; + + qp_lock(qpair); + + do { + result = qp_dequeue_locked(qpair->produce_q, + qpair->consume_q, + qpair->consume_q_size, + &msg->msg_iter, true); + + if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && + !qp_wait_for_ready_queue(qpair)) + result = VMCI_ERROR_WOULD_BLOCK; + + } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); + + qp_unlock(qpair); + + return result; +} +EXPORT_SYMBOL_GPL(vmci_qpair_dequev); + +/* + * vmci_qpair_peekv() - Peek at the data in the queue using iov. + * @qpair: Pointer to the queue pair struct. + * @iov: Pointer to buffer for the data + * @iov_size: Length of buffer. + * @buf_type: Buffer type (Unused on Linux). + * + * This is the client interface for peeking into a queue. (I.e., + * copy data from the queue without updating the head pointer.) + * This function uses IO vectors to handle the work. Returns number + * of bytes peeked or < 0 on error. + */ +ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, + struct msghdr *msg, + size_t iov_size, + int buf_type) +{ + ssize_t result; + + if (!qpair) + return VMCI_ERROR_INVALID_ARGS; + + qp_lock(qpair); + + do { + result = qp_dequeue_locked(qpair->produce_q, + qpair->consume_q, + qpair->consume_q_size, + &msg->msg_iter, false); + + if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && + !qp_wait_for_ready_queue(qpair)) + result = VMCI_ERROR_WOULD_BLOCK; + + } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); + + qp_unlock(qpair); + return result; +} +EXPORT_SYMBOL_GPL(vmci_qpair_peekv); diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.h b/drivers/misc/vmw_vmci/vmci_queue_pair.h new file mode 100644 index 000000000..00017fc29 --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.h @@ -0,0 +1,165 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + */ + +#ifndef _VMCI_QUEUE_PAIR_H_ +#define _VMCI_QUEUE_PAIR_H_ + +#include <linux/vmw_vmci_defs.h> +#include <linux/types.h> + +#include "vmci_context.h" + +/* Callback needed for correctly waiting on events. */ +typedef int (*vmci_event_release_cb) (void *client_data); + +/* Guest device port I/O. */ +struct ppn_set { + u64 num_produce_pages; + u64 num_consume_pages; + u64 *produce_ppns; + u64 *consume_ppns; + bool initialized; +}; + +/* VMCIqueue_pairAllocInfo */ +struct vmci_qp_alloc_info { + struct vmci_handle handle; + u32 peer; + u32 flags; + u64 produce_size; + u64 consume_size; + u64 ppn_va; /* Start VA of queue pair PPNs. */ + u64 num_ppns; + s32 result; + u32 version; +}; + +/* VMCIqueue_pairSetVAInfo */ +struct vmci_qp_set_va_info { + struct vmci_handle handle; + u64 va; /* Start VA of queue pair PPNs. */ + u64 num_ppns; + u32 version; + s32 result; +}; + +/* + * For backwards compatibility, here is a version of the + * VMCIqueue_pairPageFileInfo before host support end-points was added. + * Note that the current version of that structure requires VMX to + * pass down the VA of the mapped file. Before host support was added + * there was nothing of the sort. So, when the driver sees the ioctl + * with a parameter that is the sizeof + * VMCIqueue_pairPageFileInfo_NoHostQP then it can infer that the version + * of VMX running can't attach to host end points because it doesn't + * provide the VA of the mapped files. + * + * The Linux driver doesn't get an indication of the size of the + * structure passed down from user space. So, to fix a long standing + * but unfiled bug, the _pad field has been renamed to version. + * Existing versions of VMX always initialize the PageFileInfo + * structure so that _pad, er, version is set to 0. + * + * A version value of 1 indicates that the size of the structure has + * been increased to include two UVA's: produce_uva and consume_uva. + * These UVA's are of the mmap()'d queue contents backing files. + * + * In addition, if when VMX is sending down the + * VMCIqueue_pairPageFileInfo structure it gets an error then it will + * try again with the _NoHostQP version of the file to see if an older + * VMCI kernel module is running. + */ + +/* VMCIqueue_pairPageFileInfo */ +struct vmci_qp_page_file_info { + struct vmci_handle handle; + u64 produce_page_file; /* User VA. */ + u64 consume_page_file; /* User VA. */ + u64 produce_page_file_size; /* Size of the file name array. */ + u64 consume_page_file_size; /* Size of the file name array. */ + s32 result; + u32 version; /* Was _pad. */ + u64 produce_va; /* User VA of the mapped file. */ + u64 consume_va; /* User VA of the mapped file. */ +}; + +/* vmci queuepair detach info */ +struct vmci_qp_dtch_info { + struct vmci_handle handle; + s32 result; + u32 _pad; +}; + +/* + * struct vmci_qp_page_store describes how the memory of a given queue pair + * is backed. When the queue pair is between the host and a guest, the + * page store consists of references to the guest pages. On vmkernel, + * this is a list of PPNs, and on hosted, it is a user VA where the + * queue pair is mapped into the VMX address space. + */ +struct vmci_qp_page_store { + /* Reference to pages backing the queue pair. */ + u64 pages; + /* Length of pageList/virtual addres range (in pages). */ + u32 len; +}; + +/* + * This data type contains the information about a queue. + * There are two queues (hence, queue pairs) per transaction model between a + * pair of end points, A & B. One queue is used by end point A to transmit + * commands and responses to B. The other queue is used by B to transmit + * commands and responses. + * + * struct vmci_queue_kern_if is a per-OS defined Queue structure. It contains + * either a direct pointer to the linear address of the buffer contents or a + * pointer to structures which help the OS locate those data pages. See + * vmciKernelIf.c for each platform for its definition. + */ +struct vmci_queue { + struct vmci_queue_header *q_header; + struct vmci_queue_header *saved_header; + struct vmci_queue_kern_if *kernel_if; +}; + +/* + * Utility function that checks whether the fields of the page + * store contain valid values. + * Result: + * true if the page store is wellformed. false otherwise. + */ +static inline bool +VMCI_QP_PAGESTORE_IS_WELLFORMED(struct vmci_qp_page_store *page_store) +{ + return page_store->len >= 2; +} + +void vmci_qp_broker_exit(void); +int vmci_qp_broker_alloc(struct vmci_handle handle, u32 peer, + u32 flags, u32 priv_flags, + u64 produce_size, u64 consume_size, + struct vmci_qp_page_store *page_store, + struct vmci_ctx *context); +int vmci_qp_broker_set_page_store(struct vmci_handle handle, + u64 produce_uva, u64 consume_uva, + struct vmci_ctx *context); +int vmci_qp_broker_detach(struct vmci_handle handle, struct vmci_ctx *context); + +void vmci_qp_guest_endpoints_exit(void); + +int vmci_qp_alloc(struct vmci_handle *handle, + struct vmci_queue **produce_q, u64 produce_size, + struct vmci_queue **consume_q, u64 consume_size, + u32 peer, u32 flags, u32 priv_flags, + bool guest_endpoint, vmci_event_release_cb wakeup_cb, + void *client_data); +int vmci_qp_broker_map(struct vmci_handle handle, + struct vmci_ctx *context, u64 guest_mem); +int vmci_qp_broker_unmap(struct vmci_handle handle, + struct vmci_ctx *context, u32 gid); + +#endif /* _VMCI_QUEUE_PAIR_H_ */ diff --git a/drivers/misc/vmw_vmci/vmci_resource.c b/drivers/misc/vmw_vmci/vmci_resource.c new file mode 100644 index 000000000..692daa9ef --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_resource.c @@ -0,0 +1,221 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + */ + +#include <linux/vmw_vmci_defs.h> +#include <linux/hash.h> +#include <linux/types.h> +#include <linux/rculist.h> +#include <linux/completion.h> + +#include "vmci_resource.h" +#include "vmci_driver.h" + + +#define VMCI_RESOURCE_HASH_BITS 7 +#define VMCI_RESOURCE_HASH_BUCKETS (1 << VMCI_RESOURCE_HASH_BITS) + +struct vmci_hash_table { + spinlock_t lock; + struct hlist_head entries[VMCI_RESOURCE_HASH_BUCKETS]; +}; + +static struct vmci_hash_table vmci_resource_table = { + .lock = __SPIN_LOCK_UNLOCKED(vmci_resource_table.lock), +}; + +static unsigned int vmci_resource_hash(struct vmci_handle handle) +{ + return hash_32(handle.resource, VMCI_RESOURCE_HASH_BITS); +} + +/* + * Gets a resource (if one exists) matching given handle from the hash table. + */ +static struct vmci_resource *vmci_resource_lookup(struct vmci_handle handle, + enum vmci_resource_type type) +{ + struct vmci_resource *r, *resource = NULL; + unsigned int idx = vmci_resource_hash(handle); + + rcu_read_lock(); + hlist_for_each_entry_rcu(r, + &vmci_resource_table.entries[idx], node) { + u32 cid = r->handle.context; + u32 rid = r->handle.resource; + + if (r->type == type && + rid == handle.resource && + (cid == handle.context || cid == VMCI_INVALID_ID || + handle.context == VMCI_INVALID_ID)) { + resource = r; + break; + } + } + rcu_read_unlock(); + + return resource; +} + +/* + * Find an unused resource ID and return it. The first + * VMCI_RESERVED_RESOURCE_ID_MAX are reserved so we start from + * its value + 1. + * Returns VMCI resource id on success, VMCI_INVALID_ID on failure. + */ +static u32 vmci_resource_find_id(u32 context_id, + enum vmci_resource_type resource_type) +{ + static u32 resource_id = VMCI_RESERVED_RESOURCE_ID_MAX + 1; + u32 old_rid = resource_id; + u32 current_rid; + + /* + * Generate a unique resource ID. Keep on trying until we wrap around + * in the RID space. + */ + do { + struct vmci_handle handle; + + current_rid = resource_id; + resource_id++; + if (unlikely(resource_id == VMCI_INVALID_ID)) { + /* Skip the reserved rids. */ + resource_id = VMCI_RESERVED_RESOURCE_ID_MAX + 1; + } + + handle = vmci_make_handle(context_id, current_rid); + if (!vmci_resource_lookup(handle, resource_type)) + return current_rid; + } while (resource_id != old_rid); + + return VMCI_INVALID_ID; +} + + +int vmci_resource_add(struct vmci_resource *resource, + enum vmci_resource_type resource_type, + struct vmci_handle handle) + +{ + unsigned int idx; + int result; + + spin_lock(&vmci_resource_table.lock); + + if (handle.resource == VMCI_INVALID_ID) { + handle.resource = vmci_resource_find_id(handle.context, + resource_type); + if (handle.resource == VMCI_INVALID_ID) { + result = VMCI_ERROR_NO_HANDLE; + goto out; + } + } else if (vmci_resource_lookup(handle, resource_type)) { + result = VMCI_ERROR_ALREADY_EXISTS; + goto out; + } + + resource->handle = handle; + resource->type = resource_type; + INIT_HLIST_NODE(&resource->node); + kref_init(&resource->kref); + init_completion(&resource->done); + + idx = vmci_resource_hash(resource->handle); + hlist_add_head_rcu(&resource->node, &vmci_resource_table.entries[idx]); + + result = VMCI_SUCCESS; + +out: + spin_unlock(&vmci_resource_table.lock); + return result; +} + +void vmci_resource_remove(struct vmci_resource *resource) +{ + struct vmci_handle handle = resource->handle; + unsigned int idx = vmci_resource_hash(handle); + struct vmci_resource *r; + + /* Remove resource from hash table. */ + spin_lock(&vmci_resource_table.lock); + + hlist_for_each_entry(r, &vmci_resource_table.entries[idx], node) { + if (vmci_handle_is_equal(r->handle, resource->handle)) { + hlist_del_init_rcu(&r->node); + break; + } + } + + spin_unlock(&vmci_resource_table.lock); + synchronize_rcu(); + + vmci_resource_put(resource); + wait_for_completion(&resource->done); +} + +struct vmci_resource * +vmci_resource_by_handle(struct vmci_handle resource_handle, + enum vmci_resource_type resource_type) +{ + struct vmci_resource *r, *resource = NULL; + + rcu_read_lock(); + + r = vmci_resource_lookup(resource_handle, resource_type); + if (r && + (resource_type == r->type || + resource_type == VMCI_RESOURCE_TYPE_ANY)) { + resource = vmci_resource_get(r); + } + + rcu_read_unlock(); + + return resource; +} + +/* + * Get a reference to given resource. + */ +struct vmci_resource *vmci_resource_get(struct vmci_resource *resource) +{ + kref_get(&resource->kref); + + return resource; +} + +static void vmci_release_resource(struct kref *kref) +{ + struct vmci_resource *resource = + container_of(kref, struct vmci_resource, kref); + + /* Verify the resource has been unlinked from hash table */ + WARN_ON(!hlist_unhashed(&resource->node)); + + /* Signal that container of this resource can now be destroyed */ + complete(&resource->done); +} + +/* + * Resource's release function will get called if last reference. + * If it is the last reference, then we are sure that nobody else + * can increment the count again (it's gone from the resource hash + * table), so there's no need for locking here. + */ +int vmci_resource_put(struct vmci_resource *resource) +{ + /* + * We propagate the information back to caller in case it wants to know + * whether entry was freed. + */ + return kref_put(&resource->kref, vmci_release_resource) ? + VMCI_SUCCESS_ENTRY_DEAD : VMCI_SUCCESS; +} + +struct vmci_handle vmci_resource_handle(struct vmci_resource *resource) +{ + return resource->handle; +} diff --git a/drivers/misc/vmw_vmci/vmci_resource.h b/drivers/misc/vmw_vmci/vmci_resource.h new file mode 100644 index 000000000..02ae18506 --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_resource.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + */ + +#ifndef _VMCI_RESOURCE_H_ +#define _VMCI_RESOURCE_H_ + +#include <linux/vmw_vmci_defs.h> +#include <linux/types.h> + +#include "vmci_context.h" + + +enum vmci_resource_type { + VMCI_RESOURCE_TYPE_ANY, + VMCI_RESOURCE_TYPE_API, + VMCI_RESOURCE_TYPE_GROUP, + VMCI_RESOURCE_TYPE_DATAGRAM, + VMCI_RESOURCE_TYPE_DOORBELL, + VMCI_RESOURCE_TYPE_QPAIR_GUEST, + VMCI_RESOURCE_TYPE_QPAIR_HOST +}; + +struct vmci_resource { + struct vmci_handle handle; + enum vmci_resource_type type; + struct hlist_node node; + struct kref kref; + struct completion done; +}; + + +int vmci_resource_add(struct vmci_resource *resource, + enum vmci_resource_type resource_type, + struct vmci_handle handle); + +void vmci_resource_remove(struct vmci_resource *resource); + +struct vmci_resource * +vmci_resource_by_handle(struct vmci_handle resource_handle, + enum vmci_resource_type resource_type); + +struct vmci_resource *vmci_resource_get(struct vmci_resource *resource); +int vmci_resource_put(struct vmci_resource *resource); + +struct vmci_handle vmci_resource_handle(struct vmci_resource *resource); + +#endif /* _VMCI_RESOURCE_H_ */ diff --git a/drivers/misc/vmw_vmci/vmci_route.c b/drivers/misc/vmw_vmci/vmci_route.c new file mode 100644 index 000000000..8b91bfa53 --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_route.c @@ -0,0 +1,218 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + */ + +#include <linux/vmw_vmci_defs.h> +#include <linux/vmw_vmci_api.h> + +#include "vmci_context.h" +#include "vmci_driver.h" +#include "vmci_route.h" + +/* + * Make a routing decision for the given source and destination handles. + * This will try to determine the route using the handles and the available + * devices. Will set the source context if it is invalid. + */ +int vmci_route(struct vmci_handle *src, + const struct vmci_handle *dst, + bool from_guest, + enum vmci_route *route) +{ + bool has_host_device = vmci_host_code_active(); + bool has_guest_device = vmci_guest_code_active(); + + *route = VMCI_ROUTE_NONE; + + /* + * "from_guest" is only ever set to true by + * IOCTL_VMCI_DATAGRAM_SEND (or by the vmkernel equivalent), + * which comes from the VMX, so we know it is coming from a + * guest. + * + * To avoid inconsistencies, test these once. We will test + * them again when we do the actual send to ensure that we do + * not touch a non-existent device. + */ + + /* Must have a valid destination context. */ + if (VMCI_INVALID_ID == dst->context) + return VMCI_ERROR_INVALID_ARGS; + + /* Anywhere to hypervisor. */ + if (VMCI_HYPERVISOR_CONTEXT_ID == dst->context) { + + /* + * If this message already came from a guest then we + * cannot send it to the hypervisor. It must come + * from a local client. + */ + if (from_guest) + return VMCI_ERROR_DST_UNREACHABLE; + + /* + * We must be acting as a guest in order to send to + * the hypervisor. + */ + if (!has_guest_device) + return VMCI_ERROR_DEVICE_NOT_FOUND; + + /* And we cannot send if the source is the host context. */ + if (VMCI_HOST_CONTEXT_ID == src->context) + return VMCI_ERROR_INVALID_ARGS; + + /* + * If the client passed the ANON source handle then + * respect it (both context and resource are invalid). + * However, if they passed only an invalid context, + * then they probably mean ANY, in which case we + * should set the real context here before passing it + * down. + */ + if (VMCI_INVALID_ID == src->context && + VMCI_INVALID_ID != src->resource) + src->context = vmci_get_context_id(); + + /* Send from local client down to the hypervisor. */ + *route = VMCI_ROUTE_AS_GUEST; + return VMCI_SUCCESS; + } + + /* Anywhere to local client on host. */ + if (VMCI_HOST_CONTEXT_ID == dst->context) { + /* + * If it is not from a guest but we are acting as a + * guest, then we need to send it down to the host. + * Note that if we are also acting as a host then this + * will prevent us from sending from local client to + * local client, but we accept that restriction as a + * way to remove any ambiguity from the host context. + */ + if (src->context == VMCI_HYPERVISOR_CONTEXT_ID) { + /* + * If the hypervisor is the source, this is + * host local communication. The hypervisor + * may send vmci event datagrams to the host + * itself, but it will never send datagrams to + * an "outer host" through the guest device. + */ + + if (has_host_device) { + *route = VMCI_ROUTE_AS_HOST; + return VMCI_SUCCESS; + } else { + return VMCI_ERROR_DEVICE_NOT_FOUND; + } + } + + if (!from_guest && has_guest_device) { + /* If no source context then use the current. */ + if (VMCI_INVALID_ID == src->context) + src->context = vmci_get_context_id(); + + /* Send it from local client down to the host. */ + *route = VMCI_ROUTE_AS_GUEST; + return VMCI_SUCCESS; + } + + /* + * Otherwise we already received it from a guest and + * it is destined for a local client on this host, or + * it is from another local client on this host. We + * must be acting as a host to service it. + */ + if (!has_host_device) + return VMCI_ERROR_DEVICE_NOT_FOUND; + + if (VMCI_INVALID_ID == src->context) { + /* + * If it came from a guest then it must have a + * valid context. Otherwise we can use the + * host context. + */ + if (from_guest) + return VMCI_ERROR_INVALID_ARGS; + + src->context = VMCI_HOST_CONTEXT_ID; + } + + /* Route to local client. */ + *route = VMCI_ROUTE_AS_HOST; + return VMCI_SUCCESS; + } + + /* + * If we are acting as a host then this might be destined for + * a guest. + */ + if (has_host_device) { + /* It will have a context if it is meant for a guest. */ + if (vmci_ctx_exists(dst->context)) { + if (VMCI_INVALID_ID == src->context) { + /* + * If it came from a guest then it + * must have a valid context. + * Otherwise we can use the host + * context. + */ + + if (from_guest) + return VMCI_ERROR_INVALID_ARGS; + + src->context = VMCI_HOST_CONTEXT_ID; + } else if (VMCI_CONTEXT_IS_VM(src->context) && + src->context != dst->context) { + /* + * VM to VM communication is not + * allowed. Since we catch all + * communication destined for the host + * above, this must be destined for a + * VM since there is a valid context. + */ + + return VMCI_ERROR_DST_UNREACHABLE; + } + + /* Pass it up to the guest. */ + *route = VMCI_ROUTE_AS_HOST; + return VMCI_SUCCESS; + } else if (!has_guest_device) { + /* + * The host is attempting to reach a CID + * without an active context, and we can't + * send it down, since we have no guest + * device. + */ + + return VMCI_ERROR_DST_UNREACHABLE; + } + } + + /* + * We must be a guest trying to send to another guest, which means + * we need to send it down to the host. We do not filter out VM to + * VM communication here, since we want to be able to use the guest + * driver on older versions that do support VM to VM communication. + */ + if (!has_guest_device) { + /* + * Ending up here means we have neither guest nor host + * device. + */ + return VMCI_ERROR_DEVICE_NOT_FOUND; + } + + /* If no source context then use the current context. */ + if (VMCI_INVALID_ID == src->context) + src->context = vmci_get_context_id(); + + /* + * Send it from local client down to the host, which will + * route it to the other guest for us. + */ + *route = VMCI_ROUTE_AS_GUEST; + return VMCI_SUCCESS; +} diff --git a/drivers/misc/vmw_vmci/vmci_route.h b/drivers/misc/vmw_vmci/vmci_route.h new file mode 100644 index 000000000..040dbfb96 --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_route.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + */ + +#ifndef _VMCI_ROUTE_H_ +#define _VMCI_ROUTE_H_ + +#include <linux/vmw_vmci_defs.h> + +enum vmci_route { + VMCI_ROUTE_NONE, + VMCI_ROUTE_AS_HOST, + VMCI_ROUTE_AS_GUEST, +}; + +int vmci_route(struct vmci_handle *src, const struct vmci_handle *dst, + bool from_guest, enum vmci_route *route); + +#endif /* _VMCI_ROUTE_H_ */ diff --git a/drivers/misc/xilinx_sdfec.c b/drivers/misc/xilinx_sdfec.c new file mode 100644 index 000000000..922912927 --- /dev/null +++ b/drivers/misc/xilinx_sdfec.c @@ -0,0 +1,1490 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Xilinx SDFEC + * + * Copyright (C) 2019 Xilinx, Inc. + * + * Description: + * This driver is developed for SDFEC16 (Soft Decision FEC 16nm) + * IP. It exposes a char device which supports file operations + * like open(), close() and ioctl(). + */ + +#include <linux/miscdevice.h> +#include <linux/io.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_platform.h> +#include <linux/poll.h> +#include <linux/slab.h> +#include <linux/clk.h> +#include <linux/compat.h> +#include <linux/highmem.h> + +#include <uapi/misc/xilinx_sdfec.h> + +#define DEV_NAME_LEN 12 + +static DEFINE_IDA(dev_nrs); + +/* Xilinx SDFEC Register Map */ +/* CODE_WRI_PROTECT Register */ +#define XSDFEC_CODE_WR_PROTECT_ADDR (0x4) + +/* ACTIVE Register */ +#define XSDFEC_ACTIVE_ADDR (0x8) +#define XSDFEC_IS_ACTIVITY_SET (0x1) + +/* AXIS_WIDTH Register */ +#define XSDFEC_AXIS_WIDTH_ADDR (0xC) +#define XSDFEC_AXIS_DOUT_WORDS_LSB (5) +#define XSDFEC_AXIS_DOUT_WIDTH_LSB (3) +#define XSDFEC_AXIS_DIN_WORDS_LSB (2) +#define XSDFEC_AXIS_DIN_WIDTH_LSB (0) + +/* AXIS_ENABLE Register */ +#define XSDFEC_AXIS_ENABLE_ADDR (0x10) +#define XSDFEC_AXIS_OUT_ENABLE_MASK (0x38) +#define XSDFEC_AXIS_IN_ENABLE_MASK (0x7) +#define XSDFEC_AXIS_ENABLE_MASK \ + (XSDFEC_AXIS_OUT_ENABLE_MASK | XSDFEC_AXIS_IN_ENABLE_MASK) + +/* FEC_CODE Register */ +#define XSDFEC_FEC_CODE_ADDR (0x14) + +/* ORDER Register Map */ +#define XSDFEC_ORDER_ADDR (0x18) + +/* Interrupt Status Register */ +#define XSDFEC_ISR_ADDR (0x1C) +/* Interrupt Status Register Bit Mask */ +#define XSDFEC_ISR_MASK (0x3F) + +/* Write Only - Interrupt Enable Register */ +#define XSDFEC_IER_ADDR (0x20) +/* Write Only - Interrupt Disable Register */ +#define XSDFEC_IDR_ADDR (0x24) +/* Read Only - Interrupt Mask Register */ +#define XSDFEC_IMR_ADDR (0x28) + +/* ECC Interrupt Status Register */ +#define XSDFEC_ECC_ISR_ADDR (0x2C) +/* Single Bit Errors */ +#define XSDFEC_ECC_ISR_SBE_MASK (0x7FF) +/* PL Initialize Single Bit Errors */ +#define XSDFEC_PL_INIT_ECC_ISR_SBE_MASK (0x3C00000) +/* Multi Bit Errors */ +#define XSDFEC_ECC_ISR_MBE_MASK (0x3FF800) +/* PL Initialize Multi Bit Errors */ +#define XSDFEC_PL_INIT_ECC_ISR_MBE_MASK (0x3C000000) +/* Multi Bit Error to Event Shift */ +#define XSDFEC_ECC_ISR_MBE_TO_EVENT_SHIFT (11) +/* PL Initialize Multi Bit Error to Event Shift */ +#define XSDFEC_PL_INIT_ECC_ISR_MBE_TO_EVENT_SHIFT (4) +/* ECC Interrupt Status Bit Mask */ +#define XSDFEC_ECC_ISR_MASK (XSDFEC_ECC_ISR_SBE_MASK | XSDFEC_ECC_ISR_MBE_MASK) +/* ECC Interrupt Status PL Initialize Bit Mask */ +#define XSDFEC_PL_INIT_ECC_ISR_MASK \ + (XSDFEC_PL_INIT_ECC_ISR_SBE_MASK | XSDFEC_PL_INIT_ECC_ISR_MBE_MASK) +/* ECC Interrupt Status All Bit Mask */ +#define XSDFEC_ALL_ECC_ISR_MASK \ + (XSDFEC_ECC_ISR_MASK | XSDFEC_PL_INIT_ECC_ISR_MASK) +/* ECC Interrupt Status Single Bit Errors Mask */ +#define XSDFEC_ALL_ECC_ISR_SBE_MASK \ + (XSDFEC_ECC_ISR_SBE_MASK | XSDFEC_PL_INIT_ECC_ISR_SBE_MASK) +/* ECC Interrupt Status Multi Bit Errors Mask */ +#define XSDFEC_ALL_ECC_ISR_MBE_MASK \ + (XSDFEC_ECC_ISR_MBE_MASK | XSDFEC_PL_INIT_ECC_ISR_MBE_MASK) + +/* Write Only - ECC Interrupt Enable Register */ +#define XSDFEC_ECC_IER_ADDR (0x30) +/* Write Only - ECC Interrupt Disable Register */ +#define XSDFEC_ECC_IDR_ADDR (0x34) +/* Read Only - ECC Interrupt Mask Register */ +#define XSDFEC_ECC_IMR_ADDR (0x38) + +/* BYPASS Register */ +#define XSDFEC_BYPASS_ADDR (0x3C) + +/* Turbo Code Register */ +#define XSDFEC_TURBO_ADDR (0x100) +#define XSDFEC_TURBO_SCALE_MASK (0xFFF) +#define XSDFEC_TURBO_SCALE_BIT_POS (8) +#define XSDFEC_TURBO_SCALE_MAX (15) + +/* REG0 Register */ +#define XSDFEC_LDPC_CODE_REG0_ADDR_BASE (0x2000) +#define XSDFEC_LDPC_CODE_REG0_ADDR_HIGH (0x27F0) +#define XSDFEC_REG0_N_MIN (4) +#define XSDFEC_REG0_N_MAX (32768) +#define XSDFEC_REG0_N_MUL_P (256) +#define XSDFEC_REG0_N_LSB (0) +#define XSDFEC_REG0_K_MIN (2) +#define XSDFEC_REG0_K_MAX (32766) +#define XSDFEC_REG0_K_MUL_P (256) +#define XSDFEC_REG0_K_LSB (16) + +/* REG1 Register */ +#define XSDFEC_LDPC_CODE_REG1_ADDR_BASE (0x2004) +#define XSDFEC_LDPC_CODE_REG1_ADDR_HIGH (0x27f4) +#define XSDFEC_REG1_PSIZE_MIN (2) +#define XSDFEC_REG1_PSIZE_MAX (512) +#define XSDFEC_REG1_NO_PACKING_MASK (0x400) +#define XSDFEC_REG1_NO_PACKING_LSB (10) +#define XSDFEC_REG1_NM_MASK (0xFF800) +#define XSDFEC_REG1_NM_LSB (11) +#define XSDFEC_REG1_BYPASS_MASK (0x100000) + +/* REG2 Register */ +#define XSDFEC_LDPC_CODE_REG2_ADDR_BASE (0x2008) +#define XSDFEC_LDPC_CODE_REG2_ADDR_HIGH (0x27f8) +#define XSDFEC_REG2_NLAYERS_MIN (1) +#define XSDFEC_REG2_NLAYERS_MAX (256) +#define XSDFEC_REG2_NNMQC_MASK (0xFFE00) +#define XSDFEC_REG2_NMQC_LSB (9) +#define XSDFEC_REG2_NORM_TYPE_MASK (0x100000) +#define XSDFEC_REG2_NORM_TYPE_LSB (20) +#define XSDFEC_REG2_SPECIAL_QC_MASK (0x200000) +#define XSDFEC_REG2_SPEICAL_QC_LSB (21) +#define XSDFEC_REG2_NO_FINAL_PARITY_MASK (0x400000) +#define XSDFEC_REG2_NO_FINAL_PARITY_LSB (22) +#define XSDFEC_REG2_MAX_SCHEDULE_MASK (0x1800000) +#define XSDFEC_REG2_MAX_SCHEDULE_LSB (23) + +/* REG3 Register */ +#define XSDFEC_LDPC_CODE_REG3_ADDR_BASE (0x200C) +#define XSDFEC_LDPC_CODE_REG3_ADDR_HIGH (0x27FC) +#define XSDFEC_REG3_LA_OFF_LSB (8) +#define XSDFEC_REG3_QC_OFF_LSB (16) + +#define XSDFEC_LDPC_REG_JUMP (0x10) +#define XSDFEC_REG_WIDTH_JUMP (4) + +/* The maximum number of pinned pages */ +#define MAX_NUM_PAGES ((XSDFEC_QC_TABLE_DEPTH / PAGE_SIZE) + 1) + +/** + * struct xsdfec_clks - For managing SD-FEC clocks + * @core_clk: Main processing clock for core + * @axi_clk: AXI4-Lite memory-mapped clock + * @din_words_clk: DIN Words AXI4-Stream Slave clock + * @din_clk: DIN AXI4-Stream Slave clock + * @dout_clk: DOUT Words AXI4-Stream Slave clock + * @dout_words_clk: DOUT AXI4-Stream Slave clock + * @ctrl_clk: Control AXI4-Stream Slave clock + * @status_clk: Status AXI4-Stream Slave clock + */ +struct xsdfec_clks { + struct clk *core_clk; + struct clk *axi_clk; + struct clk *din_words_clk; + struct clk *din_clk; + struct clk *dout_clk; + struct clk *dout_words_clk; + struct clk *ctrl_clk; + struct clk *status_clk; +}; + +/** + * struct xsdfec_dev - Driver data for SDFEC + * @miscdev: Misc device handle + * @clks: Clocks managed by the SDFEC driver + * @waitq: Driver wait queue + * @config: Configuration of the SDFEC device + * @dev_name: Device name + * @flags: spinlock flags + * @regs: device physical base address + * @dev: pointer to device struct + * @state: State of the SDFEC device + * @error_data_lock: Error counter and states spinlock + * @dev_id: Device ID + * @isr_err_count: Count of ISR errors + * @cecc_count: Count of Correctable ECC errors (SBE) + * @uecc_count: Count of Uncorrectable ECC errors (MBE) + * @irq: IRQ number + * @state_updated: indicates State updated by interrupt handler + * @stats_updated: indicates Stats updated by interrupt handler + * @intr_enabled: indicates IRQ enabled + * + * This structure contains necessary state for SDFEC driver to operate + */ +struct xsdfec_dev { + struct miscdevice miscdev; + struct xsdfec_clks clks; + wait_queue_head_t waitq; + struct xsdfec_config config; + char dev_name[DEV_NAME_LEN]; + unsigned long flags; + void __iomem *regs; + struct device *dev; + enum xsdfec_state state; + /* Spinlock to protect state_updated and stats_updated */ + spinlock_t error_data_lock; + int dev_id; + u32 isr_err_count; + u32 cecc_count; + u32 uecc_count; + int irq; + bool state_updated; + bool stats_updated; + bool intr_enabled; +}; + +static inline void xsdfec_regwrite(struct xsdfec_dev *xsdfec, u32 addr, + u32 value) +{ + dev_dbg(xsdfec->dev, "Writing 0x%x to offset 0x%x", value, addr); + iowrite32(value, xsdfec->regs + addr); +} + +static inline u32 xsdfec_regread(struct xsdfec_dev *xsdfec, u32 addr) +{ + u32 rval; + + rval = ioread32(xsdfec->regs + addr); + dev_dbg(xsdfec->dev, "Read value = 0x%x from offset 0x%x", rval, addr); + return rval; +} + +static void update_bool_config_from_reg(struct xsdfec_dev *xsdfec, + u32 reg_offset, u32 bit_num, + char *config_value) +{ + u32 reg_val; + u32 bit_mask = 1 << bit_num; + + reg_val = xsdfec_regread(xsdfec, reg_offset); + *config_value = (reg_val & bit_mask) > 0; +} + +static void update_config_from_hw(struct xsdfec_dev *xsdfec) +{ + u32 reg_value; + bool sdfec_started; + + /* Update the Order */ + reg_value = xsdfec_regread(xsdfec, XSDFEC_ORDER_ADDR); + xsdfec->config.order = reg_value; + + update_bool_config_from_reg(xsdfec, XSDFEC_BYPASS_ADDR, + 0, /* Bit Number, maybe change to mask */ + &xsdfec->config.bypass); + + update_bool_config_from_reg(xsdfec, XSDFEC_CODE_WR_PROTECT_ADDR, + 0, /* Bit Number */ + &xsdfec->config.code_wr_protect); + + reg_value = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR); + xsdfec->config.irq.enable_isr = (reg_value & XSDFEC_ISR_MASK) > 0; + + reg_value = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR); + xsdfec->config.irq.enable_ecc_isr = + (reg_value & XSDFEC_ECC_ISR_MASK) > 0; + + reg_value = xsdfec_regread(xsdfec, XSDFEC_AXIS_ENABLE_ADDR); + sdfec_started = (reg_value & XSDFEC_AXIS_IN_ENABLE_MASK) > 0; + if (sdfec_started) + xsdfec->state = XSDFEC_STARTED; + else + xsdfec->state = XSDFEC_STOPPED; +} + +static int xsdfec_get_status(struct xsdfec_dev *xsdfec, void __user *arg) +{ + struct xsdfec_status status; + int err; + + memset(&status, 0, sizeof(status)); + spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags); + status.state = xsdfec->state; + xsdfec->state_updated = false; + spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags); + status.activity = (xsdfec_regread(xsdfec, XSDFEC_ACTIVE_ADDR) & + XSDFEC_IS_ACTIVITY_SET); + + err = copy_to_user(arg, &status, sizeof(status)); + if (err) + err = -EFAULT; + + return err; +} + +static int xsdfec_get_config(struct xsdfec_dev *xsdfec, void __user *arg) +{ + int err; + + err = copy_to_user(arg, &xsdfec->config, sizeof(xsdfec->config)); + if (err) + err = -EFAULT; + + return err; +} + +static int xsdfec_isr_enable(struct xsdfec_dev *xsdfec, bool enable) +{ + u32 mask_read; + + if (enable) { + /* Enable */ + xsdfec_regwrite(xsdfec, XSDFEC_IER_ADDR, XSDFEC_ISR_MASK); + mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR); + if (mask_read & XSDFEC_ISR_MASK) { + dev_dbg(xsdfec->dev, + "SDFEC enabling irq with IER failed"); + return -EIO; + } + } else { + /* Disable */ + xsdfec_regwrite(xsdfec, XSDFEC_IDR_ADDR, XSDFEC_ISR_MASK); + mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR); + if ((mask_read & XSDFEC_ISR_MASK) != XSDFEC_ISR_MASK) { + dev_dbg(xsdfec->dev, + "SDFEC disabling irq with IDR failed"); + return -EIO; + } + } + return 0; +} + +static int xsdfec_ecc_isr_enable(struct xsdfec_dev *xsdfec, bool enable) +{ + u32 mask_read; + + if (enable) { + /* Enable */ + xsdfec_regwrite(xsdfec, XSDFEC_ECC_IER_ADDR, + XSDFEC_ALL_ECC_ISR_MASK); + mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR); + if (mask_read & XSDFEC_ALL_ECC_ISR_MASK) { + dev_dbg(xsdfec->dev, + "SDFEC enabling ECC irq with ECC IER failed"); + return -EIO; + } + } else { + /* Disable */ + xsdfec_regwrite(xsdfec, XSDFEC_ECC_IDR_ADDR, + XSDFEC_ALL_ECC_ISR_MASK); + mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR); + if (!(((mask_read & XSDFEC_ALL_ECC_ISR_MASK) == + XSDFEC_ECC_ISR_MASK) || + ((mask_read & XSDFEC_ALL_ECC_ISR_MASK) == + XSDFEC_PL_INIT_ECC_ISR_MASK))) { + dev_dbg(xsdfec->dev, + "SDFEC disable ECC irq with ECC IDR failed"); + return -EIO; + } + } + return 0; +} + +static int xsdfec_set_irq(struct xsdfec_dev *xsdfec, void __user *arg) +{ + struct xsdfec_irq irq; + int err; + int isr_err; + int ecc_err; + + err = copy_from_user(&irq, arg, sizeof(irq)); + if (err) + return -EFAULT; + + /* Setup tlast related IRQ */ + isr_err = xsdfec_isr_enable(xsdfec, irq.enable_isr); + if (!isr_err) + xsdfec->config.irq.enable_isr = irq.enable_isr; + + /* Setup ECC related IRQ */ + ecc_err = xsdfec_ecc_isr_enable(xsdfec, irq.enable_ecc_isr); + if (!ecc_err) + xsdfec->config.irq.enable_ecc_isr = irq.enable_ecc_isr; + + if (isr_err < 0 || ecc_err < 0) + err = -EIO; + + return err; +} + +static int xsdfec_set_turbo(struct xsdfec_dev *xsdfec, void __user *arg) +{ + struct xsdfec_turbo turbo; + int err; + u32 turbo_write; + + err = copy_from_user(&turbo, arg, sizeof(turbo)); + if (err) + return -EFAULT; + + if (turbo.alg >= XSDFEC_TURBO_ALG_MAX) + return -EINVAL; + + if (turbo.scale > XSDFEC_TURBO_SCALE_MAX) + return -EINVAL; + + /* Check to see what device tree says about the FEC codes */ + if (xsdfec->config.code == XSDFEC_LDPC_CODE) + return -EIO; + + turbo_write = ((turbo.scale & XSDFEC_TURBO_SCALE_MASK) + << XSDFEC_TURBO_SCALE_BIT_POS) | + turbo.alg; + xsdfec_regwrite(xsdfec, XSDFEC_TURBO_ADDR, turbo_write); + return err; +} + +static int xsdfec_get_turbo(struct xsdfec_dev *xsdfec, void __user *arg) +{ + u32 reg_value; + struct xsdfec_turbo turbo_params; + int err; + + if (xsdfec->config.code == XSDFEC_LDPC_CODE) + return -EIO; + + memset(&turbo_params, 0, sizeof(turbo_params)); + reg_value = xsdfec_regread(xsdfec, XSDFEC_TURBO_ADDR); + + turbo_params.scale = (reg_value & XSDFEC_TURBO_SCALE_MASK) >> + XSDFEC_TURBO_SCALE_BIT_POS; + turbo_params.alg = reg_value & 0x1; + + err = copy_to_user(arg, &turbo_params, sizeof(turbo_params)); + if (err) + err = -EFAULT; + + return err; +} + +static int xsdfec_reg0_write(struct xsdfec_dev *xsdfec, u32 n, u32 k, u32 psize, + u32 offset) +{ + u32 wdata; + + if (n < XSDFEC_REG0_N_MIN || n > XSDFEC_REG0_N_MAX || psize == 0 || + (n > XSDFEC_REG0_N_MUL_P * psize) || n <= k || ((n % psize) != 0)) { + dev_dbg(xsdfec->dev, "N value is not in range"); + return -EINVAL; + } + n <<= XSDFEC_REG0_N_LSB; + + if (k < XSDFEC_REG0_K_MIN || k > XSDFEC_REG0_K_MAX || + (k > XSDFEC_REG0_K_MUL_P * psize) || ((k % psize) != 0)) { + dev_dbg(xsdfec->dev, "K value is not in range"); + return -EINVAL; + } + k = k << XSDFEC_REG0_K_LSB; + wdata = k | n; + + if (XSDFEC_LDPC_CODE_REG0_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) > + XSDFEC_LDPC_CODE_REG0_ADDR_HIGH) { + dev_dbg(xsdfec->dev, "Writing outside of LDPC reg0 space 0x%x", + XSDFEC_LDPC_CODE_REG0_ADDR_BASE + + (offset * XSDFEC_LDPC_REG_JUMP)); + return -EINVAL; + } + xsdfec_regwrite(xsdfec, + XSDFEC_LDPC_CODE_REG0_ADDR_BASE + + (offset * XSDFEC_LDPC_REG_JUMP), + wdata); + return 0; +} + +static int xsdfec_reg1_write(struct xsdfec_dev *xsdfec, u32 psize, + u32 no_packing, u32 nm, u32 offset) +{ + u32 wdata; + + if (psize < XSDFEC_REG1_PSIZE_MIN || psize > XSDFEC_REG1_PSIZE_MAX) { + dev_dbg(xsdfec->dev, "Psize is not in range"); + return -EINVAL; + } + + if (no_packing != 0 && no_packing != 1) + dev_dbg(xsdfec->dev, "No-packing bit register invalid"); + no_packing = ((no_packing << XSDFEC_REG1_NO_PACKING_LSB) & + XSDFEC_REG1_NO_PACKING_MASK); + + if (nm & ~(XSDFEC_REG1_NM_MASK >> XSDFEC_REG1_NM_LSB)) + dev_dbg(xsdfec->dev, "NM is beyond 10 bits"); + nm = (nm << XSDFEC_REG1_NM_LSB) & XSDFEC_REG1_NM_MASK; + + wdata = nm | no_packing | psize; + if (XSDFEC_LDPC_CODE_REG1_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) > + XSDFEC_LDPC_CODE_REG1_ADDR_HIGH) { + dev_dbg(xsdfec->dev, "Writing outside of LDPC reg1 space 0x%x", + XSDFEC_LDPC_CODE_REG1_ADDR_BASE + + (offset * XSDFEC_LDPC_REG_JUMP)); + return -EINVAL; + } + xsdfec_regwrite(xsdfec, + XSDFEC_LDPC_CODE_REG1_ADDR_BASE + + (offset * XSDFEC_LDPC_REG_JUMP), + wdata); + return 0; +} + +static int xsdfec_reg2_write(struct xsdfec_dev *xsdfec, u32 nlayers, u32 nmqc, + u32 norm_type, u32 special_qc, u32 no_final_parity, + u32 max_schedule, u32 offset) +{ + u32 wdata; + + if (nlayers < XSDFEC_REG2_NLAYERS_MIN || + nlayers > XSDFEC_REG2_NLAYERS_MAX) { + dev_dbg(xsdfec->dev, "Nlayers is not in range"); + return -EINVAL; + } + + if (nmqc & ~(XSDFEC_REG2_NNMQC_MASK >> XSDFEC_REG2_NMQC_LSB)) + dev_dbg(xsdfec->dev, "NMQC exceeds 11 bits"); + nmqc = (nmqc << XSDFEC_REG2_NMQC_LSB) & XSDFEC_REG2_NNMQC_MASK; + + if (norm_type > 1) + dev_dbg(xsdfec->dev, "Norm type is invalid"); + norm_type = ((norm_type << XSDFEC_REG2_NORM_TYPE_LSB) & + XSDFEC_REG2_NORM_TYPE_MASK); + if (special_qc > 1) + dev_dbg(xsdfec->dev, "Special QC in invalid"); + special_qc = ((special_qc << XSDFEC_REG2_SPEICAL_QC_LSB) & + XSDFEC_REG2_SPECIAL_QC_MASK); + + if (no_final_parity > 1) + dev_dbg(xsdfec->dev, "No final parity check invalid"); + no_final_parity = + ((no_final_parity << XSDFEC_REG2_NO_FINAL_PARITY_LSB) & + XSDFEC_REG2_NO_FINAL_PARITY_MASK); + if (max_schedule & + ~(XSDFEC_REG2_MAX_SCHEDULE_MASK >> XSDFEC_REG2_MAX_SCHEDULE_LSB)) + dev_dbg(xsdfec->dev, "Max Schedule exceeds 2 bits"); + max_schedule = ((max_schedule << XSDFEC_REG2_MAX_SCHEDULE_LSB) & + XSDFEC_REG2_MAX_SCHEDULE_MASK); + + wdata = (max_schedule | no_final_parity | special_qc | norm_type | + nmqc | nlayers); + + if (XSDFEC_LDPC_CODE_REG2_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) > + XSDFEC_LDPC_CODE_REG2_ADDR_HIGH) { + dev_dbg(xsdfec->dev, "Writing outside of LDPC reg2 space 0x%x", + XSDFEC_LDPC_CODE_REG2_ADDR_BASE + + (offset * XSDFEC_LDPC_REG_JUMP)); + return -EINVAL; + } + xsdfec_regwrite(xsdfec, + XSDFEC_LDPC_CODE_REG2_ADDR_BASE + + (offset * XSDFEC_LDPC_REG_JUMP), + wdata); + return 0; +} + +static int xsdfec_reg3_write(struct xsdfec_dev *xsdfec, u8 sc_off, u8 la_off, + u16 qc_off, u32 offset) +{ + u32 wdata; + + wdata = ((qc_off << XSDFEC_REG3_QC_OFF_LSB) | + (la_off << XSDFEC_REG3_LA_OFF_LSB) | sc_off); + if (XSDFEC_LDPC_CODE_REG3_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) > + XSDFEC_LDPC_CODE_REG3_ADDR_HIGH) { + dev_dbg(xsdfec->dev, "Writing outside of LDPC reg3 space 0x%x", + XSDFEC_LDPC_CODE_REG3_ADDR_BASE + + (offset * XSDFEC_LDPC_REG_JUMP)); + return -EINVAL; + } + xsdfec_regwrite(xsdfec, + XSDFEC_LDPC_CODE_REG3_ADDR_BASE + + (offset * XSDFEC_LDPC_REG_JUMP), + wdata); + return 0; +} + +static int xsdfec_table_write(struct xsdfec_dev *xsdfec, u32 offset, + u32 *src_ptr, u32 len, const u32 base_addr, + const u32 depth) +{ + u32 reg = 0; + int res, i, nr_pages; + u32 n; + u32 *addr = NULL; + struct page *pages[MAX_NUM_PAGES]; + + /* + * Writes that go beyond the length of + * Shared Scale(SC) table should fail + */ + if (offset > depth / XSDFEC_REG_WIDTH_JUMP || + len > depth / XSDFEC_REG_WIDTH_JUMP || + offset + len > depth / XSDFEC_REG_WIDTH_JUMP) { + dev_dbg(xsdfec->dev, "Write exceeds SC table length"); + return -EINVAL; + } + + n = (len * XSDFEC_REG_WIDTH_JUMP) / PAGE_SIZE; + if ((len * XSDFEC_REG_WIDTH_JUMP) % PAGE_SIZE) + n += 1; + + if (WARN_ON_ONCE(n > INT_MAX)) + return -EINVAL; + + nr_pages = n; + + res = pin_user_pages_fast((unsigned long)src_ptr, nr_pages, 0, pages); + if (res < nr_pages) { + if (res > 0) + unpin_user_pages(pages, res); + + return -EINVAL; + } + + for (i = 0; i < nr_pages; i++) { + addr = kmap(pages[i]); + do { + xsdfec_regwrite(xsdfec, + base_addr + ((offset + reg) * + XSDFEC_REG_WIDTH_JUMP), + addr[reg]); + reg++; + } while ((reg < len) && + ((reg * XSDFEC_REG_WIDTH_JUMP) % PAGE_SIZE)); + unpin_user_page(pages[i]); + } + return 0; +} + +static int xsdfec_add_ldpc(struct xsdfec_dev *xsdfec, void __user *arg) +{ + struct xsdfec_ldpc_params *ldpc; + int ret, n; + + ldpc = memdup_user(arg, sizeof(*ldpc)); + if (IS_ERR(ldpc)) + return PTR_ERR(ldpc); + + if (xsdfec->config.code == XSDFEC_TURBO_CODE) { + ret = -EIO; + goto err_out; + } + + /* Verify Device has not started */ + if (xsdfec->state == XSDFEC_STARTED) { + ret = -EIO; + goto err_out; + } + + if (xsdfec->config.code_wr_protect) { + ret = -EIO; + goto err_out; + } + + /* Write Reg 0 */ + ret = xsdfec_reg0_write(xsdfec, ldpc->n, ldpc->k, ldpc->psize, + ldpc->code_id); + if (ret) + goto err_out; + + /* Write Reg 1 */ + ret = xsdfec_reg1_write(xsdfec, ldpc->psize, ldpc->no_packing, ldpc->nm, + ldpc->code_id); + if (ret) + goto err_out; + + /* Write Reg 2 */ + ret = xsdfec_reg2_write(xsdfec, ldpc->nlayers, ldpc->nmqc, + ldpc->norm_type, ldpc->special_qc, + ldpc->no_final_parity, ldpc->max_schedule, + ldpc->code_id); + if (ret) + goto err_out; + + /* Write Reg 3 */ + ret = xsdfec_reg3_write(xsdfec, ldpc->sc_off, ldpc->la_off, + ldpc->qc_off, ldpc->code_id); + if (ret) + goto err_out; + + /* Write Shared Codes */ + n = ldpc->nlayers / 4; + if (ldpc->nlayers % 4) + n++; + + ret = xsdfec_table_write(xsdfec, ldpc->sc_off, ldpc->sc_table, n, + XSDFEC_LDPC_SC_TABLE_ADDR_BASE, + XSDFEC_SC_TABLE_DEPTH); + if (ret < 0) + goto err_out; + + ret = xsdfec_table_write(xsdfec, 4 * ldpc->la_off, ldpc->la_table, + ldpc->nlayers, XSDFEC_LDPC_LA_TABLE_ADDR_BASE, + XSDFEC_LA_TABLE_DEPTH); + if (ret < 0) + goto err_out; + + ret = xsdfec_table_write(xsdfec, 4 * ldpc->qc_off, ldpc->qc_table, + ldpc->nqc, XSDFEC_LDPC_QC_TABLE_ADDR_BASE, + XSDFEC_QC_TABLE_DEPTH); +err_out: + kfree(ldpc); + return ret; +} + +static int xsdfec_set_order(struct xsdfec_dev *xsdfec, void __user *arg) +{ + bool order_invalid; + enum xsdfec_order order; + int err; + + err = get_user(order, (enum xsdfec_order __user *)arg); + if (err) + return -EFAULT; + + order_invalid = (order != XSDFEC_MAINTAIN_ORDER) && + (order != XSDFEC_OUT_OF_ORDER); + if (order_invalid) + return -EINVAL; + + /* Verify Device has not started */ + if (xsdfec->state == XSDFEC_STARTED) + return -EIO; + + xsdfec_regwrite(xsdfec, XSDFEC_ORDER_ADDR, order); + + xsdfec->config.order = order; + + return 0; +} + +static int xsdfec_set_bypass(struct xsdfec_dev *xsdfec, bool __user *arg) +{ + bool bypass; + int err; + + err = get_user(bypass, arg); + if (err) + return -EFAULT; + + /* Verify Device has not started */ + if (xsdfec->state == XSDFEC_STARTED) + return -EIO; + + if (bypass) + xsdfec_regwrite(xsdfec, XSDFEC_BYPASS_ADDR, 1); + else + xsdfec_regwrite(xsdfec, XSDFEC_BYPASS_ADDR, 0); + + xsdfec->config.bypass = bypass; + + return 0; +} + +static int xsdfec_is_active(struct xsdfec_dev *xsdfec, bool __user *arg) +{ + u32 reg_value; + bool is_active; + int err; + + reg_value = xsdfec_regread(xsdfec, XSDFEC_ACTIVE_ADDR); + /* using a double ! operator instead of casting */ + is_active = !!(reg_value & XSDFEC_IS_ACTIVITY_SET); + err = put_user(is_active, arg); + if (err) + return -EFAULT; + + return err; +} + +static u32 +xsdfec_translate_axis_width_cfg_val(enum xsdfec_axis_width axis_width_cfg) +{ + u32 axis_width_field = 0; + + switch (axis_width_cfg) { + case XSDFEC_1x128b: + axis_width_field = 0; + break; + case XSDFEC_2x128b: + axis_width_field = 1; + break; + case XSDFEC_4x128b: + axis_width_field = 2; + break; + } + + return axis_width_field; +} + +static u32 xsdfec_translate_axis_words_cfg_val(enum xsdfec_axis_word_include + axis_word_inc_cfg) +{ + u32 axis_words_field = 0; + + if (axis_word_inc_cfg == XSDFEC_FIXED_VALUE || + axis_word_inc_cfg == XSDFEC_IN_BLOCK) + axis_words_field = 0; + else if (axis_word_inc_cfg == XSDFEC_PER_AXI_TRANSACTION) + axis_words_field = 1; + + return axis_words_field; +} + +static int xsdfec_cfg_axi_streams(struct xsdfec_dev *xsdfec) +{ + u32 reg_value; + u32 dout_words_field; + u32 dout_width_field; + u32 din_words_field; + u32 din_width_field; + struct xsdfec_config *config = &xsdfec->config; + + /* translate config info to register values */ + dout_words_field = + xsdfec_translate_axis_words_cfg_val(config->dout_word_include); + dout_width_field = + xsdfec_translate_axis_width_cfg_val(config->dout_width); + din_words_field = + xsdfec_translate_axis_words_cfg_val(config->din_word_include); + din_width_field = + xsdfec_translate_axis_width_cfg_val(config->din_width); + + reg_value = dout_words_field << XSDFEC_AXIS_DOUT_WORDS_LSB; + reg_value |= dout_width_field << XSDFEC_AXIS_DOUT_WIDTH_LSB; + reg_value |= din_words_field << XSDFEC_AXIS_DIN_WORDS_LSB; + reg_value |= din_width_field << XSDFEC_AXIS_DIN_WIDTH_LSB; + + xsdfec_regwrite(xsdfec, XSDFEC_AXIS_WIDTH_ADDR, reg_value); + + return 0; +} + +static int xsdfec_dev_open(struct inode *iptr, struct file *fptr) +{ + return 0; +} + +static int xsdfec_dev_release(struct inode *iptr, struct file *fptr) +{ + return 0; +} + +static int xsdfec_start(struct xsdfec_dev *xsdfec) +{ + u32 regread; + + regread = xsdfec_regread(xsdfec, XSDFEC_FEC_CODE_ADDR); + regread &= 0x1; + if (regread != xsdfec->config.code) { + dev_dbg(xsdfec->dev, + "%s SDFEC HW code does not match driver code, reg %d, code %d", + __func__, regread, xsdfec->config.code); + return -EINVAL; + } + + /* Set AXIS enable */ + xsdfec_regwrite(xsdfec, XSDFEC_AXIS_ENABLE_ADDR, + XSDFEC_AXIS_ENABLE_MASK); + /* Done */ + xsdfec->state = XSDFEC_STARTED; + return 0; +} + +static int xsdfec_stop(struct xsdfec_dev *xsdfec) +{ + u32 regread; + + if (xsdfec->state != XSDFEC_STARTED) + dev_dbg(xsdfec->dev, "Device not started correctly"); + /* Disable AXIS_ENABLE Input interfaces only */ + regread = xsdfec_regread(xsdfec, XSDFEC_AXIS_ENABLE_ADDR); + regread &= (~XSDFEC_AXIS_IN_ENABLE_MASK); + xsdfec_regwrite(xsdfec, XSDFEC_AXIS_ENABLE_ADDR, regread); + /* Stop */ + xsdfec->state = XSDFEC_STOPPED; + return 0; +} + +static int xsdfec_clear_stats(struct xsdfec_dev *xsdfec) +{ + spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags); + xsdfec->isr_err_count = 0; + xsdfec->uecc_count = 0; + xsdfec->cecc_count = 0; + spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags); + + return 0; +} + +static int xsdfec_get_stats(struct xsdfec_dev *xsdfec, void __user *arg) +{ + int err; + struct xsdfec_stats user_stats; + + spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags); + user_stats.isr_err_count = xsdfec->isr_err_count; + user_stats.cecc_count = xsdfec->cecc_count; + user_stats.uecc_count = xsdfec->uecc_count; + xsdfec->stats_updated = false; + spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags); + + err = copy_to_user(arg, &user_stats, sizeof(user_stats)); + if (err) + err = -EFAULT; + + return err; +} + +static int xsdfec_set_default_config(struct xsdfec_dev *xsdfec) +{ + /* Ensure registers are aligned with core configuration */ + xsdfec_regwrite(xsdfec, XSDFEC_FEC_CODE_ADDR, xsdfec->config.code); + xsdfec_cfg_axi_streams(xsdfec); + update_config_from_hw(xsdfec); + + return 0; +} + +static long xsdfec_dev_ioctl(struct file *fptr, unsigned int cmd, + unsigned long data) +{ + struct xsdfec_dev *xsdfec; + void __user *arg = NULL; + int rval = -EINVAL; + + xsdfec = container_of(fptr->private_data, struct xsdfec_dev, miscdev); + + /* In failed state allow only reset and get status IOCTLs */ + if (xsdfec->state == XSDFEC_NEEDS_RESET && + (cmd != XSDFEC_SET_DEFAULT_CONFIG && cmd != XSDFEC_GET_STATUS && + cmd != XSDFEC_GET_STATS && cmd != XSDFEC_CLEAR_STATS)) { + return -EPERM; + } + + if (_IOC_TYPE(cmd) != XSDFEC_MAGIC) + return -ENOTTY; + + /* check if ioctl argument is present and valid */ + if (_IOC_DIR(cmd) != _IOC_NONE) { + arg = (void __user *)data; + if (!arg) + return rval; + } + + switch (cmd) { + case XSDFEC_START_DEV: + rval = xsdfec_start(xsdfec); + break; + case XSDFEC_STOP_DEV: + rval = xsdfec_stop(xsdfec); + break; + case XSDFEC_CLEAR_STATS: + rval = xsdfec_clear_stats(xsdfec); + break; + case XSDFEC_GET_STATS: + rval = xsdfec_get_stats(xsdfec, arg); + break; + case XSDFEC_GET_STATUS: + rval = xsdfec_get_status(xsdfec, arg); + break; + case XSDFEC_GET_CONFIG: + rval = xsdfec_get_config(xsdfec, arg); + break; + case XSDFEC_SET_DEFAULT_CONFIG: + rval = xsdfec_set_default_config(xsdfec); + break; + case XSDFEC_SET_IRQ: + rval = xsdfec_set_irq(xsdfec, arg); + break; + case XSDFEC_SET_TURBO: + rval = xsdfec_set_turbo(xsdfec, arg); + break; + case XSDFEC_GET_TURBO: + rval = xsdfec_get_turbo(xsdfec, arg); + break; + case XSDFEC_ADD_LDPC_CODE_PARAMS: + rval = xsdfec_add_ldpc(xsdfec, arg); + break; + case XSDFEC_SET_ORDER: + rval = xsdfec_set_order(xsdfec, arg); + break; + case XSDFEC_SET_BYPASS: + rval = xsdfec_set_bypass(xsdfec, arg); + break; + case XSDFEC_IS_ACTIVE: + rval = xsdfec_is_active(xsdfec, (bool __user *)arg); + break; + default: + /* Should not get here */ + break; + } + return rval; +} + +#ifdef CONFIG_COMPAT +static long xsdfec_dev_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long data) +{ + return xsdfec_dev_ioctl(file, cmd, (unsigned long)compat_ptr(data)); +} +#endif + +static __poll_t xsdfec_poll(struct file *file, poll_table *wait) +{ + __poll_t mask = 0; + struct xsdfec_dev *xsdfec; + + xsdfec = container_of(file->private_data, struct xsdfec_dev, miscdev); + + if (!xsdfec) + return EPOLLNVAL | EPOLLHUP; + + poll_wait(file, &xsdfec->waitq, wait); + + /* XSDFEC ISR detected an error */ + spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags); + if (xsdfec->state_updated) + mask |= EPOLLIN | EPOLLPRI; + + if (xsdfec->stats_updated) + mask |= EPOLLIN | EPOLLRDNORM; + spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags); + + return mask; +} + +static const struct file_operations xsdfec_fops = { + .owner = THIS_MODULE, + .open = xsdfec_dev_open, + .release = xsdfec_dev_release, + .unlocked_ioctl = xsdfec_dev_ioctl, + .poll = xsdfec_poll, +#ifdef CONFIG_COMPAT + .compat_ioctl = xsdfec_dev_compat_ioctl, +#endif +}; + +static int xsdfec_parse_of(struct xsdfec_dev *xsdfec) +{ + struct device *dev = xsdfec->dev; + struct device_node *node = dev->of_node; + int rval; + const char *fec_code; + u32 din_width; + u32 din_word_include; + u32 dout_width; + u32 dout_word_include; + + rval = of_property_read_string(node, "xlnx,sdfec-code", &fec_code); + if (rval < 0) + return rval; + + if (!strcasecmp(fec_code, "ldpc")) + xsdfec->config.code = XSDFEC_LDPC_CODE; + else if (!strcasecmp(fec_code, "turbo")) + xsdfec->config.code = XSDFEC_TURBO_CODE; + else + return -EINVAL; + + rval = of_property_read_u32(node, "xlnx,sdfec-din-words", + &din_word_include); + if (rval < 0) + return rval; + + if (din_word_include < XSDFEC_AXIS_WORDS_INCLUDE_MAX) + xsdfec->config.din_word_include = din_word_include; + else + return -EINVAL; + + rval = of_property_read_u32(node, "xlnx,sdfec-din-width", &din_width); + if (rval < 0) + return rval; + + switch (din_width) { + /* Fall through and set for valid values */ + case XSDFEC_1x128b: + case XSDFEC_2x128b: + case XSDFEC_4x128b: + xsdfec->config.din_width = din_width; + break; + default: + return -EINVAL; + } + + rval = of_property_read_u32(node, "xlnx,sdfec-dout-words", + &dout_word_include); + if (rval < 0) + return rval; + + if (dout_word_include < XSDFEC_AXIS_WORDS_INCLUDE_MAX) + xsdfec->config.dout_word_include = dout_word_include; + else + return -EINVAL; + + rval = of_property_read_u32(node, "xlnx,sdfec-dout-width", &dout_width); + if (rval < 0) + return rval; + + switch (dout_width) { + /* Fall through and set for valid values */ + case XSDFEC_1x128b: + case XSDFEC_2x128b: + case XSDFEC_4x128b: + xsdfec->config.dout_width = dout_width; + break; + default: + return -EINVAL; + } + + /* Write LDPC to CODE Register */ + xsdfec_regwrite(xsdfec, XSDFEC_FEC_CODE_ADDR, xsdfec->config.code); + + xsdfec_cfg_axi_streams(xsdfec); + + return 0; +} + +static irqreturn_t xsdfec_irq_thread(int irq, void *dev_id) +{ + struct xsdfec_dev *xsdfec = dev_id; + irqreturn_t ret = IRQ_HANDLED; + u32 ecc_err; + u32 isr_err; + u32 uecc_count; + u32 cecc_count; + u32 isr_err_count; + u32 aecc_count; + u32 tmp; + + WARN_ON(xsdfec->irq != irq); + + /* Mask Interrupts */ + xsdfec_isr_enable(xsdfec, false); + xsdfec_ecc_isr_enable(xsdfec, false); + /* Read ISR */ + ecc_err = xsdfec_regread(xsdfec, XSDFEC_ECC_ISR_ADDR); + isr_err = xsdfec_regread(xsdfec, XSDFEC_ISR_ADDR); + /* Clear the interrupts */ + xsdfec_regwrite(xsdfec, XSDFEC_ECC_ISR_ADDR, ecc_err); + xsdfec_regwrite(xsdfec, XSDFEC_ISR_ADDR, isr_err); + + tmp = ecc_err & XSDFEC_ALL_ECC_ISR_MBE_MASK; + /* Count uncorrectable 2-bit errors */ + uecc_count = hweight32(tmp); + /* Count all ECC errors */ + aecc_count = hweight32(ecc_err); + /* Number of correctable 1-bit ECC error */ + cecc_count = aecc_count - 2 * uecc_count; + /* Count ISR errors */ + isr_err_count = hweight32(isr_err); + dev_dbg(xsdfec->dev, "tmp=%x, uecc=%x, aecc=%x, cecc=%x, isr=%x", tmp, + uecc_count, aecc_count, cecc_count, isr_err_count); + dev_dbg(xsdfec->dev, "uecc=%x, cecc=%x, isr=%x", xsdfec->uecc_count, + xsdfec->cecc_count, xsdfec->isr_err_count); + + spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags); + /* Add new errors to a 2-bits counter */ + if (uecc_count) + xsdfec->uecc_count += uecc_count; + /* Add new errors to a 1-bits counter */ + if (cecc_count) + xsdfec->cecc_count += cecc_count; + /* Add new errors to a ISR counter */ + if (isr_err_count) + xsdfec->isr_err_count += isr_err_count; + + /* Update state/stats flag */ + if (uecc_count) { + if (ecc_err & XSDFEC_ECC_ISR_MBE_MASK) + xsdfec->state = XSDFEC_NEEDS_RESET; + else if (ecc_err & XSDFEC_PL_INIT_ECC_ISR_MBE_MASK) + xsdfec->state = XSDFEC_PL_RECONFIGURE; + xsdfec->stats_updated = true; + xsdfec->state_updated = true; + } + + if (cecc_count) + xsdfec->stats_updated = true; + + if (isr_err_count) { + xsdfec->state = XSDFEC_NEEDS_RESET; + xsdfec->stats_updated = true; + xsdfec->state_updated = true; + } + + spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags); + dev_dbg(xsdfec->dev, "state=%x, stats=%x", xsdfec->state_updated, + xsdfec->stats_updated); + + /* Enable another polling */ + if (xsdfec->state_updated || xsdfec->stats_updated) + wake_up_interruptible(&xsdfec->waitq); + else + ret = IRQ_NONE; + + /* Unmask Interrupts */ + xsdfec_isr_enable(xsdfec, true); + xsdfec_ecc_isr_enable(xsdfec, true); + + return ret; +} + +static int xsdfec_clk_init(struct platform_device *pdev, + struct xsdfec_clks *clks) +{ + int err; + + clks->core_clk = devm_clk_get(&pdev->dev, "core_clk"); + if (IS_ERR(clks->core_clk)) { + dev_err(&pdev->dev, "failed to get core_clk"); + return PTR_ERR(clks->core_clk); + } + + clks->axi_clk = devm_clk_get(&pdev->dev, "s_axi_aclk"); + if (IS_ERR(clks->axi_clk)) { + dev_err(&pdev->dev, "failed to get axi_clk"); + return PTR_ERR(clks->axi_clk); + } + + clks->din_words_clk = devm_clk_get(&pdev->dev, "s_axis_din_words_aclk"); + if (IS_ERR(clks->din_words_clk)) { + if (PTR_ERR(clks->din_words_clk) != -ENOENT) { + err = PTR_ERR(clks->din_words_clk); + return err; + } + clks->din_words_clk = NULL; + } + + clks->din_clk = devm_clk_get(&pdev->dev, "s_axis_din_aclk"); + if (IS_ERR(clks->din_clk)) { + if (PTR_ERR(clks->din_clk) != -ENOENT) { + err = PTR_ERR(clks->din_clk); + return err; + } + clks->din_clk = NULL; + } + + clks->dout_clk = devm_clk_get(&pdev->dev, "m_axis_dout_aclk"); + if (IS_ERR(clks->dout_clk)) { + if (PTR_ERR(clks->dout_clk) != -ENOENT) { + err = PTR_ERR(clks->dout_clk); + return err; + } + clks->dout_clk = NULL; + } + + clks->dout_words_clk = + devm_clk_get(&pdev->dev, "s_axis_dout_words_aclk"); + if (IS_ERR(clks->dout_words_clk)) { + if (PTR_ERR(clks->dout_words_clk) != -ENOENT) { + err = PTR_ERR(clks->dout_words_clk); + return err; + } + clks->dout_words_clk = NULL; + } + + clks->ctrl_clk = devm_clk_get(&pdev->dev, "s_axis_ctrl_aclk"); + if (IS_ERR(clks->ctrl_clk)) { + if (PTR_ERR(clks->ctrl_clk) != -ENOENT) { + err = PTR_ERR(clks->ctrl_clk); + return err; + } + clks->ctrl_clk = NULL; + } + + clks->status_clk = devm_clk_get(&pdev->dev, "m_axis_status_aclk"); + if (IS_ERR(clks->status_clk)) { + if (PTR_ERR(clks->status_clk) != -ENOENT) { + err = PTR_ERR(clks->status_clk); + return err; + } + clks->status_clk = NULL; + } + + err = clk_prepare_enable(clks->core_clk); + if (err) { + dev_err(&pdev->dev, "failed to enable core_clk (%d)", err); + return err; + } + + err = clk_prepare_enable(clks->axi_clk); + if (err) { + dev_err(&pdev->dev, "failed to enable axi_clk (%d)", err); + goto err_disable_core_clk; + } + + err = clk_prepare_enable(clks->din_clk); + if (err) { + dev_err(&pdev->dev, "failed to enable din_clk (%d)", err); + goto err_disable_axi_clk; + } + + err = clk_prepare_enable(clks->din_words_clk); + if (err) { + dev_err(&pdev->dev, "failed to enable din_words_clk (%d)", err); + goto err_disable_din_clk; + } + + err = clk_prepare_enable(clks->dout_clk); + if (err) { + dev_err(&pdev->dev, "failed to enable dout_clk (%d)", err); + goto err_disable_din_words_clk; + } + + err = clk_prepare_enable(clks->dout_words_clk); + if (err) { + dev_err(&pdev->dev, "failed to enable dout_words_clk (%d)", + err); + goto err_disable_dout_clk; + } + + err = clk_prepare_enable(clks->ctrl_clk); + if (err) { + dev_err(&pdev->dev, "failed to enable ctrl_clk (%d)", err); + goto err_disable_dout_words_clk; + } + + err = clk_prepare_enable(clks->status_clk); + if (err) { + dev_err(&pdev->dev, "failed to enable status_clk (%d)\n", err); + goto err_disable_ctrl_clk; + } + + return err; + +err_disable_ctrl_clk: + clk_disable_unprepare(clks->ctrl_clk); +err_disable_dout_words_clk: + clk_disable_unprepare(clks->dout_words_clk); +err_disable_dout_clk: + clk_disable_unprepare(clks->dout_clk); +err_disable_din_words_clk: + clk_disable_unprepare(clks->din_words_clk); +err_disable_din_clk: + clk_disable_unprepare(clks->din_clk); +err_disable_axi_clk: + clk_disable_unprepare(clks->axi_clk); +err_disable_core_clk: + clk_disable_unprepare(clks->core_clk); + + return err; +} + +static void xsdfec_disable_all_clks(struct xsdfec_clks *clks) +{ + clk_disable_unprepare(clks->status_clk); + clk_disable_unprepare(clks->ctrl_clk); + clk_disable_unprepare(clks->dout_words_clk); + clk_disable_unprepare(clks->dout_clk); + clk_disable_unprepare(clks->din_words_clk); + clk_disable_unprepare(clks->din_clk); + clk_disable_unprepare(clks->core_clk); + clk_disable_unprepare(clks->axi_clk); +} + +static int xsdfec_probe(struct platform_device *pdev) +{ + struct xsdfec_dev *xsdfec; + struct device *dev; + struct resource *res; + int err; + bool irq_enabled = true; + + xsdfec = devm_kzalloc(&pdev->dev, sizeof(*xsdfec), GFP_KERNEL); + if (!xsdfec) + return -ENOMEM; + + xsdfec->dev = &pdev->dev; + spin_lock_init(&xsdfec->error_data_lock); + + err = xsdfec_clk_init(pdev, &xsdfec->clks); + if (err) + return err; + + dev = xsdfec->dev; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + xsdfec->regs = devm_ioremap_resource(dev, res); + if (IS_ERR(xsdfec->regs)) { + err = PTR_ERR(xsdfec->regs); + goto err_xsdfec_dev; + } + + xsdfec->irq = platform_get_irq(pdev, 0); + if (xsdfec->irq < 0) { + dev_dbg(dev, "platform_get_irq failed"); + irq_enabled = false; + } + + err = xsdfec_parse_of(xsdfec); + if (err < 0) + goto err_xsdfec_dev; + + update_config_from_hw(xsdfec); + + /* Save driver private data */ + platform_set_drvdata(pdev, xsdfec); + + if (irq_enabled) { + init_waitqueue_head(&xsdfec->waitq); + /* Register IRQ thread */ + err = devm_request_threaded_irq(dev, xsdfec->irq, NULL, + xsdfec_irq_thread, IRQF_ONESHOT, + "xilinx-sdfec16", xsdfec); + if (err < 0) { + dev_err(dev, "unable to request IRQ%d", xsdfec->irq); + goto err_xsdfec_dev; + } + } + + err = ida_alloc(&dev_nrs, GFP_KERNEL); + if (err < 0) + goto err_xsdfec_dev; + xsdfec->dev_id = err; + + snprintf(xsdfec->dev_name, DEV_NAME_LEN, "xsdfec%d", xsdfec->dev_id); + xsdfec->miscdev.minor = MISC_DYNAMIC_MINOR; + xsdfec->miscdev.name = xsdfec->dev_name; + xsdfec->miscdev.fops = &xsdfec_fops; + xsdfec->miscdev.parent = dev; + err = misc_register(&xsdfec->miscdev); + if (err) { + dev_err(dev, "error:%d. Unable to register device", err); + goto err_xsdfec_ida; + } + return 0; + +err_xsdfec_ida: + ida_free(&dev_nrs, xsdfec->dev_id); +err_xsdfec_dev: + xsdfec_disable_all_clks(&xsdfec->clks); + return err; +} + +static int xsdfec_remove(struct platform_device *pdev) +{ + struct xsdfec_dev *xsdfec; + + xsdfec = platform_get_drvdata(pdev); + misc_deregister(&xsdfec->miscdev); + ida_free(&dev_nrs, xsdfec->dev_id); + xsdfec_disable_all_clks(&xsdfec->clks); + return 0; +} + +static const struct of_device_id xsdfec_of_match[] = { + { + .compatible = "xlnx,sd-fec-1.1", + }, + { /* end of table */ } +}; +MODULE_DEVICE_TABLE(of, xsdfec_of_match); + +static struct platform_driver xsdfec_driver = { + .driver = { + .name = "xilinx-sdfec", + .of_match_table = xsdfec_of_match, + }, + .probe = xsdfec_probe, + .remove = xsdfec_remove, +}; + +module_platform_driver(xsdfec_driver); + +MODULE_AUTHOR("Xilinx, Inc"); +MODULE_DESCRIPTION("Xilinx SD-FEC16 Driver"); +MODULE_LICENSE("GPL"); |